From noreply at buildbot.pypy.org Thu Jan 1 13:53:34 2015 From: noreply at buildbot.pypy.org (fijal) Date: Thu, 1 Jan 2015 13:53:34 +0100 (CET) Subject: [pypy-commit] pypy default: simple optimization - dont store unrolling only data when not exporting state Message-ID: <20150101125334.06B951C3354@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r75193:e48107da28ee Date: 2015-01-01 14:53 +0200 http://bitbucket.org/pypy/pypy/changeset/e48107da28ee/ Log: simple optimization - dont store unrolling only data when not exporting state diff --git a/rpython/jit/metainterp/optimizeopt/heap.py b/rpython/jit/metainterp/optimizeopt/heap.py --- a/rpython/jit/metainterp/optimizeopt/heap.py +++ b/rpython/jit/metainterp/optimizeopt/heap.py @@ -64,16 +64,17 @@ # cancelling its previous effects with no side effect. self._lazy_setfield = None - def value_updated(self, oldvalue, newvalue): + def value_updated(self, oldvalue, newvalue, exporting_state): try: fieldvalue = self._cached_fields[oldvalue] except KeyError: pass else: self._cached_fields[newvalue] = fieldvalue - op = self._cached_fields_getfield_op[oldvalue].clone() - op.setarg(0, newvalue.box) - self._cached_fields_getfield_op[newvalue] = op + if exporting_state: + op = self._cached_fields_getfield_op[oldvalue].clone() + op.setarg(0, newvalue.box) + self._cached_fields_getfield_op[newvalue] = op def possible_aliasing(self, optheap, structvalue): # If lazy_setfield is set and contains a setfield on a different @@ -98,8 +99,9 @@ optimizer=None): assert self._lazy_setfield is None self._cached_fields[structvalue] = fieldvalue - op = optimizer.get_op_replacement(op) - self._cached_fields_getfield_op[structvalue] = op + if optimizer.exporting_state: + op = optimizer.get_op_replacement(op) + self._cached_fields_getfield_op[structvalue] = op def force_lazy_setfield(self, optheap, can_cache=True): op = self._lazy_setfield @@ -133,6 +135,7 @@ self._cached_fields_getfield_op.clear() def produce_potential_short_preamble_ops(self, optimizer, shortboxes, descr): + assert optimizer.exporting_state if self._lazy_setfield is not None: return for structvalue in self._cached_fields_getfield_op.keys(): @@ -195,10 +198,11 @@ def value_updated(self, oldvalue, newvalue): # XXXX very unhappy about that for cf in self.cached_fields.itervalues(): - cf.value_updated(oldvalue, newvalue) + cf.value_updated(oldvalue, newvalue, self.optimizer.exporting_state) for submap in self.cached_arrayitems.itervalues(): for cf in submap.itervalues(): - cf.value_updated(oldvalue, newvalue) + cf.value_updated(oldvalue, newvalue, + self.optimizer.exporting_state) def force_at_end_of_preamble(self): self.cached_dict_reads.clear() diff --git a/rpython/jit/metainterp/optimizeopt/optimizer.py b/rpython/jit/metainterp/optimizeopt/optimizer.py --- a/rpython/jit/metainterp/optimizeopt/optimizer.py +++ b/rpython/jit/metainterp/optimizeopt/optimizer.py @@ -508,6 +508,8 @@ class Optimizer(Optimization): + exporting_state = False + def __init__(self, metainterp_sd, jitdriver_sd, loop, optimizations=None): self.metainterp_sd = metainterp_sd self.jitdriver_sd = jitdriver_sd diff --git a/rpython/jit/metainterp/optimizeopt/pure.py b/rpython/jit/metainterp/optimizeopt/pure.py --- a/rpython/jit/metainterp/optimizeopt/pure.py +++ b/rpython/jit/metainterp/optimizeopt/pure.py @@ -115,8 +115,9 @@ return self.pure_operations.get(key, None) def remember_emitting_pure(self, op): - op = self.optimizer.get_op_replacement(op) - self.emitted_pure_operations[op] = True + if self.optimizer.exporting_state: + op = self.optimizer.get_op_replacement(op) + self.emitted_pure_operations[op] = True def produce_potential_short_preamble_ops(self, sb): for op in self.emitted_pure_operations: diff --git a/rpython/jit/metainterp/optimizeopt/unroll.py b/rpython/jit/metainterp/optimizeopt/unroll.py --- a/rpython/jit/metainterp/optimizeopt/unroll.py +++ b/rpython/jit/metainterp/optimizeopt/unroll.py @@ -73,6 +73,7 @@ return Snapshot(prev, new_snapshot_args) def propagate_all_forward(self, starting_state, export_state=True): + self.optimizer.exporting_state = export_state loop = self.optimizer.loop self.optimizer.clear_newoperations() From noreply at buildbot.pypy.org Thu Jan 1 16:15:26 2015 From: noreply at buildbot.pypy.org (fijal) Date: Thu, 1 Jan 2015 16:15:26 +0100 (CET) Subject: [pypy-commit] pypy default: update copyright Message-ID: <20150101151526.231B01C02FD@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r75194:341ff97ace13 Date: 2015-01-01 17:15 +0200 http://bitbucket.org/pypy/pypy/changeset/341ff97ace13/ Log: update copyright diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -28,7 +28,7 @@ DEALINGS IN THE SOFTWARE. -PyPy Copyright holders 2003-2014 +PyPy Copyright holders 2003-2015 ----------------------------------- Except when otherwise stated (look for LICENSE files or information at From noreply at buildbot.pypy.org Thu Jan 1 18:48:57 2015 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 1 Jan 2015 18:48:57 +0100 (CET) Subject: [pypy-commit] pypy default: Implement ffi.from_buffer(). Not really tested with memoryviews, Message-ID: <20150101174857.DF65D1C02FD@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r75195:9bdd174fb4e1 Date: 2015-01-01 18:47 +0100 http://bitbucket.org/pypy/pypy/changeset/9bdd174fb4e1/ Log: Implement ffi.from_buffer(). Not really tested with memoryviews, only buffers, just like in CPython for the same reasons. diff --git a/pypy/module/_cffi_backend/__init__.py b/pypy/module/_cffi_backend/__init__.py --- a/pypy/module/_cffi_backend/__init__.py +++ b/pypy/module/_cffi_backend/__init__.py @@ -34,6 +34,7 @@ 'newp_handle': 'handle.newp_handle', 'from_handle': 'handle.from_handle', '_get_types': 'func._get_types', + 'from_buffer': 'func.from_buffer', 'string': 'func.string', 'buffer': 'cbuffer.buffer', diff --git a/pypy/module/_cffi_backend/cdataobj.py b/pypy/module/_cffi_backend/cdataobj.py --- a/pypy/module/_cffi_backend/cdataobj.py +++ b/pypy/module/_cffi_backend/cdataobj.py @@ -440,6 +440,25 @@ return "handle to %s" % (self.space.str_w(w_repr),) +class W_CDataFromBuffer(W_CData): + _attrs_ = ['buf', 'length', 'w_keepalive'] + _immutable_fields_ = ['buf', 'length', 'w_keepalive'] + + def __init__(self, space, cdata, ctype, buf, w_object): + W_CData.__init__(self, space, cdata, ctype) + self.buf = buf + self.length = buf.getlength() + self.w_keepalive = w_object + + def get_array_length(self): + return self.length + + def _repr_extra(self): + w_repr = self.space.repr(self.w_keepalive) + return "buffer len %d from '%s' object" % ( + self.length, self.space.type(self.w_keepalive).name) + + W_CData.typedef = TypeDef( '_cffi_backend.CData', __module__ = '_cffi_backend', diff --git a/pypy/module/_cffi_backend/func.py b/pypy/module/_cffi_backend/func.py --- a/pypy/module/_cffi_backend/func.py +++ b/pypy/module/_cffi_backend/func.py @@ -76,3 +76,32 @@ def _get_types(space): return space.newtuple([space.gettypefor(cdataobj.W_CData), space.gettypefor(ctypeobj.W_CType)]) + +# ____________________________________________________________ + + at unwrap_spec(w_ctype=ctypeobj.W_CType) +def from_buffer(space, w_ctype, w_x): + from pypy.module._cffi_backend import ctypearray, ctypeprim + # + if (not isinstance(w_ctype, ctypearray.W_CTypeArray) or + not isinstance(w_ctype.ctptr.ctitem, ctypeprim.W_CTypePrimitiveChar)): + raise oefmt(space.w_TypeError, + "needs 'char[]', got '%s'", w_ctype.name) + # + # xxx do we really need to implement the same mess as in CPython 2.7 + # w.r.t. buffers and memoryviews?? + try: + buf = space.readbuf_w(w_x) + except OperationError, e: + if not e.match(space, space.w_TypeError): + raise + buf = space.buffer_w(w_x, space.BUF_SIMPLE) + try: + _cdata = buf.get_raw_address() + except ValueError: + raise oefmt(space.w_TypeError, + "from_buffer() got a '%T' object, which supports the " + "buffer interface but cannot be rendered as a plain " + "raw address on PyPy", w_x) + # + return cdataobj.W_CDataFromBuffer(space, _cdata, w_ctype, buf, w_x) diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py --- a/pypy/module/_cffi_backend/test/_backend_test_c.py +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py @@ -3195,6 +3195,20 @@ ('a2', BChar, 5)], None, -1, -1, SF_PACKED) +def test_from_buffer(): + import array + a = array.array('H', [10000, 20000, 30000]) + BChar = new_primitive_type("char") + BCharP = new_pointer_type(BChar) + BCharA = new_array_type(BCharP, None) + c = from_buffer(BCharA, a) + assert typeof(c) is BCharA + assert len(c) == 6 + assert repr(c) == "" + p = new_pointer_type(new_primitive_type("unsigned short")) + cast(p, c)[1] += 500 + assert list(a) == [10000, 20500, 30000] + def test_version(): # this test is here mostly for PyPy assert __version__ == "0.8.6" diff --git a/pypy/module/_cffi_backend/test/test_c.py b/pypy/module/_cffi_backend/test/test_c.py --- a/pypy/module/_cffi_backend/test/test_c.py +++ b/pypy/module/_cffi_backend/test/test_c.py @@ -30,7 +30,7 @@ class AppTestC(object): """Populated below, hack hack hack.""" - spaceconfig = dict(usemodules=('_cffi_backend', 'cStringIO')) + spaceconfig = dict(usemodules=('_cffi_backend', 'cStringIO', 'array')) def setup_class(cls): testfuncs_w = [] From noreply at buildbot.pypy.org Thu Jan 1 19:13:41 2015 From: noreply at buildbot.pypy.org (amauryfa) Date: Thu, 1 Jan 2015 19:13:41 +0100 (CET) Subject: [pypy-commit] pypy py3.3: Array module: Implement the LongLong typecodes. Message-ID: <20150101181341.685D61D3955@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3.3 Changeset: r75196:ae5a2b000fdb Date: 2014-12-31 16:59 +0100 http://bitbucket.org/pypy/pypy/changeset/ae5a2b000fdb/ Log: Array module: Implement the LongLong typecodes. diff --git a/pypy/module/array/interp_array.py b/pypy/module/array/interp_array.py --- a/pypy/module/array/interp_array.py +++ b/pypy/module/array/interp_array.py @@ -52,7 +52,7 @@ a.descr_frombytes(space, buf) break else: - msg = 'bad typecode (must be b, B, u, h, H, i, I, l, L, f or d)' + msg = 'bad typecode (must be b, B, u, h, H, i, I, l, L, q, Q, f or d)' raise OperationError(space.w_ValueError, space.wrap(msg)) return a @@ -620,6 +620,8 @@ # rbigint.touint() which # corresponds to the # C-type unsigned long + 'q': TypeCode(rffi.LONGLONG, 'bigint_w', True, True), + 'Q': TypeCode(rffi.ULONGLONG, 'bigint_w', True), 'f': TypeCode(lltype.SingleFloat, 'float_w', method='__float__'), 'd': TypeCode(lltype.Float, 'float_w', method='__float__'), } diff --git a/pypy/module/array/test/test_array.py b/pypy/module/array/test/test_array.py --- a/pypy/module/array/test/test_array.py +++ b/pypy/module/array/test/test_array.py @@ -8,7 +8,7 @@ raises(TypeError, self.array, 'hi') raises(TypeError, self.array, 1) - raises(ValueError, self.array, 'q') + raises(ValueError, self.array, 'x') a = self.array('u') raises(TypeError, a.append, 7) @@ -22,7 +22,7 @@ b = self.array('h', (1, 2, 3)) assert a == b - for tc in 'bhilBHILfd': + for tc in 'bhilBHILQqfd': assert self.array(tc).typecode == tc raises(TypeError, self.array, tc, None) @@ -111,6 +111,8 @@ assert(self.array(t).itemsize >= 4) for t in 'd': assert(self.array(t).itemsize >= 8) + for t in 'Qq': + assert(self.array(t).itemsize >= 8) inttypes = 'bhil' for t in inttypes: @@ -143,7 +145,7 @@ a.fromstring('') assert not len(a) - for t in 'bBhHiIlLfd': + for t in 'bBhHiIlLfdQq': a = self.array(t) a.fromstring('\x00' * a.itemsize * 2) assert len(a) == 2 and a[0] == 0 and a[1] == 0 @@ -717,13 +719,13 @@ assert repr(a) == "array('i', [4, 3, 2, 1, 0])" def test_type(self): - for t in 'bBhHiIlLfdu': + for t in 'bBhHiIlLfduQq': assert type(self.array(t)) is self.array assert isinstance(self.array(t), self.array) def test_iterable(self): import collections - for t in 'bBhHiIlLfdu': + for t in 'bBhHiIlLfduQq': assert isinstance(self.array(t), collections.Iterable) def test_subclass(self): From noreply at buildbot.pypy.org Thu Jan 1 19:13:42 2015 From: noreply at buildbot.pypy.org (amauryfa) Date: Thu, 1 Jan 2015 19:13:42 +0100 (CET) Subject: [pypy-commit] pypy py3.3: Implement CPython Issue #12199: Message-ID: <20150101181342.A9CCD1D3955@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3.3 Changeset: r75197:41f2d5146ca3 Date: 2015-01-01 16:05 +0100 http://bitbucket.org/pypy/pypy/changeset/41f2d5146ca3/ Log: Implement CPython Issue #12199: The TryExcept and TryFinally and AST nodes have been unified into a Try node. diff --git a/pypy/interpreter/astcompiler/ast.py b/pypy/interpreter/astcompiler/ast.py --- a/pypy/interpreter/astcompiler/ast.py +++ b/pypy/interpreter/astcompiler/ast.py @@ -334,10 +334,8 @@ return With.from_object(space, w_node) if space.isinstance_w(w_node, get(space).w_Raise): return Raise.from_object(space, w_node) - if space.isinstance_w(w_node, get(space).w_TryExcept): - return TryExcept.from_object(space, w_node) - if space.isinstance_w(w_node, get(space).w_TryFinally): - return TryFinally.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_Try): + return Try.from_object(space, w_node) if space.isinstance_w(w_node, get(space).w_Assert): return Assert.from_object(space, w_node) if space.isinstance_w(w_node, get(space).w_Import): @@ -1009,16 +1007,17 @@ State.ast_type('Raise', 'stmt', ['exc', 'cause']) -class TryExcept(stmt): - - def __init__(self, body, handlers, orelse, lineno, col_offset): +class Try(stmt): + + def __init__(self, body, handlers, orelse, finalbody, lineno, col_offset): self.body = body self.handlers = handlers self.orelse = orelse + self.finalbody = finalbody stmt.__init__(self, lineno, col_offset) def walkabout(self, visitor): - visitor.visit_TryExcept(self) + visitor.visit_Try(self) def mutate_over(self, visitor): if self.body: @@ -1027,10 +1026,12 @@ visitor._mutate_sequence(self.handlers) if self.orelse: visitor._mutate_sequence(self.orelse) - return visitor.visit_TryExcept(self) + if self.finalbody: + visitor._mutate_sequence(self.finalbody) + return visitor.visit_Try(self) def to_object(self, space): - w_node = space.call_function(get(space).w_TryExcept) + w_node = space.call_function(get(space).w_Try) if self.body is None: body_w = [] else: @@ -1049,57 +1050,6 @@ orelse_w = [node.to_object(space) for node in self.orelse] # stmt w_orelse = space.newlist(orelse_w) space.setattr(w_node, space.wrap('orelse'), w_orelse) - w_lineno = space.wrap(self.lineno) # int - space.setattr(w_node, space.wrap('lineno'), w_lineno) - w_col_offset = space.wrap(self.col_offset) # int - space.setattr(w_node, space.wrap('col_offset'), w_col_offset) - return w_node - - @staticmethod - def from_object(space, w_node): - w_body = get_field(space, w_node, 'body', False) - w_handlers = get_field(space, w_node, 'handlers', False) - w_orelse = get_field(space, w_node, 'orelse', False) - w_lineno = get_field(space, w_node, 'lineno', False) - w_col_offset = get_field(space, w_node, 'col_offset', False) - body_w = space.unpackiterable(w_body) - _body = [stmt.from_object(space, w_item) for w_item in body_w] - handlers_w = space.unpackiterable(w_handlers) - _handlers = [excepthandler.from_object(space, w_item) for w_item in handlers_w] - orelse_w = space.unpackiterable(w_orelse) - _orelse = [stmt.from_object(space, w_item) for w_item in orelse_w] - _lineno = space.int_w(w_lineno) - _col_offset = space.int_w(w_col_offset) - return TryExcept(_body, _handlers, _orelse, _lineno, _col_offset) - -State.ast_type('TryExcept', 'stmt', ['body', 'handlers', 'orelse']) - - -class TryFinally(stmt): - - def __init__(self, body, finalbody, lineno, col_offset): - self.body = body - self.finalbody = finalbody - stmt.__init__(self, lineno, col_offset) - - def walkabout(self, visitor): - visitor.visit_TryFinally(self) - - def mutate_over(self, visitor): - if self.body: - visitor._mutate_sequence(self.body) - if self.finalbody: - visitor._mutate_sequence(self.finalbody) - return visitor.visit_TryFinally(self) - - def to_object(self, space): - w_node = space.call_function(get(space).w_TryFinally) - if self.body is None: - body_w = [] - else: - body_w = [node.to_object(space) for node in self.body] # stmt - w_body = space.newlist(body_w) - space.setattr(w_node, space.wrap('body'), w_body) if self.finalbody is None: finalbody_w = [] else: @@ -1115,18 +1065,24 @@ @staticmethod def from_object(space, w_node): w_body = get_field(space, w_node, 'body', False) + w_handlers = get_field(space, w_node, 'handlers', False) + w_orelse = get_field(space, w_node, 'orelse', False) w_finalbody = get_field(space, w_node, 'finalbody', False) w_lineno = get_field(space, w_node, 'lineno', False) w_col_offset = get_field(space, w_node, 'col_offset', False) body_w = space.unpackiterable(w_body) _body = [stmt.from_object(space, w_item) for w_item in body_w] + handlers_w = space.unpackiterable(w_handlers) + _handlers = [excepthandler.from_object(space, w_item) for w_item in handlers_w] + orelse_w = space.unpackiterable(w_orelse) + _orelse = [stmt.from_object(space, w_item) for w_item in orelse_w] finalbody_w = space.unpackiterable(w_finalbody) _finalbody = [stmt.from_object(space, w_item) for w_item in finalbody_w] _lineno = space.int_w(w_lineno) _col_offset = space.int_w(w_col_offset) - return TryFinally(_body, _finalbody, _lineno, _col_offset) - -State.ast_type('TryFinally', 'stmt', ['body', 'finalbody']) + return Try(_body, _handlers, _orelse, _finalbody, _lineno, _col_offset) + +State.ast_type('Try', 'stmt', ['body', 'handlers', 'orelse', 'finalbody']) class Assert(stmt): @@ -3651,9 +3607,7 @@ return self.default_visitor(node) def visit_Raise(self, node): return self.default_visitor(node) - def visit_TryExcept(self, node): - return self.default_visitor(node) - def visit_TryFinally(self, node): + def visit_Try(self, node): return self.default_visitor(node) def visit_Assert(self, node): return self.default_visitor(node) @@ -3818,13 +3772,10 @@ if node.cause: node.cause.walkabout(self) - def visit_TryExcept(self, node): + def visit_Try(self, node): self.visit_sequence(node.body) self.visit_sequence(node.handlers) self.visit_sequence(node.orelse) - - def visit_TryFinally(self, node): - self.visit_sequence(node.body) self.visit_sequence(node.finalbody) def visit_Assert(self, node): diff --git a/pypy/interpreter/astcompiler/astbuilder.py b/pypy/interpreter/astcompiler/astbuilder.py --- a/pypy/interpreter/astcompiler/astbuilder.py +++ b/pypy/interpreter/astcompiler/astbuilder.py @@ -399,20 +399,15 @@ else: otherwise = self.handle_suite(try_node.children[-1]) except_count -= 1 + handlers = [] if except_count: - handlers = [] for i in range(except_count): base_offset = i * 3 exc = try_node.children[3 + base_offset] except_body = try_node.children[5 + base_offset] handlers.append(self.handle_except_clause(exc, except_body)) - except_ast = ast.TryExcept(body, handlers, otherwise, - try_node.lineno, try_node.column) - if finally_suite is None: - return except_ast - body = [except_ast] - return ast.TryFinally(body, finally_suite, try_node.lineno, - try_node.column) + return ast.Try(body, handlers, otherwise, finally_suite, + try_node.lineno, try_node.column) def handle_with_stmt(self, with_node): body = self.handle_suite(with_node.children[-1]) diff --git a/pypy/interpreter/astcompiler/codegen.py b/pypy/interpreter/astcompiler/codegen.py --- a/pypy/interpreter/astcompiler/codegen.py +++ b/pypy/interpreter/astcompiler/codegen.py @@ -599,20 +599,20 @@ self.visit_sequence(wh.orelse) self.use_next_block(end) - def visit_TryExcept(self, te): - self.update_position(te.lineno, True) + def _visit_try_except(self, tr): + self.update_position(tr.lineno, True) exc = self.new_block() otherwise = self.new_block() end = self.new_block() self.emit_jump(ops.SETUP_EXCEPT, exc) body = self.use_next_block() self.push_frame_block(F_BLOCK_EXCEPT, body) - self.visit_sequence(te.body) + self.visit_sequence(tr.body) self.emit_op(ops.POP_BLOCK) self.pop_frame_block(F_BLOCK_EXCEPT, body) self.emit_jump(ops.JUMP_FORWARD, otherwise) self.use_next_block(exc) - for handler in te.handlers: + for handler in tr.handlers: assert isinstance(handler, ast.ExceptHandler) self.update_position(handler.lineno, True) next_except = self.new_block() @@ -672,26 +672,35 @@ self.emit_op(ops.END_FINALLY) # this END_FINALLY will always re-raise self.is_dead_code() self.use_next_block(otherwise) - self.visit_sequence(te.orelse) + self.visit_sequence(tr.orelse) self.use_next_block(end) - def visit_TryFinally(self, tf): - self.update_position(tf.lineno, True) + def _visit_try_finally(self, tr): + self.update_position(tr.lineno, True) end = self.new_block() self.emit_jump(ops.SETUP_FINALLY, end) body = self.use_next_block() self.push_frame_block(F_BLOCK_FINALLY, body) - self.visit_sequence(tf.body) + if tr.handlers: + self._visit_try_except(tr) + else: + self.visit_sequence(tr.body) self.emit_op(ops.POP_BLOCK) self.pop_frame_block(F_BLOCK_FINALLY, body) # Indicates there was no exception. self.load_const(self.space.w_None) self.use_next_block(end) self.push_frame_block(F_BLOCK_FINALLY_END, end) - self.visit_sequence(tf.finalbody) + self.visit_sequence(tr.finalbody) self.emit_op(ops.END_FINALLY) self.pop_frame_block(F_BLOCK_FINALLY_END, end) + def visit_Try(self, tr): + if tr.finalbody: + return self._visit_try_finally(tr) + else: + return self._visit_try_except(tr) + def _import_as(self, alias): source_name = alias.name dot = source_name.find(".") diff --git a/pypy/interpreter/astcompiler/symtable.py b/pypy/interpreter/astcompiler/symtable.py --- a/pypy/interpreter/astcompiler/symtable.py +++ b/pypy/interpreter/astcompiler/symtable.py @@ -557,15 +557,10 @@ role = SYM_ASSIGNED self.note_symbol(name.id, role) - def visit_TryExcept(self, node): + def visit_Try(self, node): self.scope.note_try_start(node) self.visit_sequence(node.body) self.scope.note_try_end(node) self.visit_sequence(node.handlers) self.visit_sequence(node.orelse) - - def visit_TryFinally(self, node): - self.scope.note_try_start(node) - self.visit_sequence(node.body) - self.scope.note_try_end(node) self.visit_sequence(node.finalbody) diff --git a/pypy/interpreter/astcompiler/test/test_astbuilder.py b/pypy/interpreter/astcompiler/test/test_astbuilder.py --- a/pypy/interpreter/astcompiler/test/test_astbuilder.py +++ b/pypy/interpreter/astcompiler/test/test_astbuilder.py @@ -306,14 +306,15 @@ def test_try(self): tr = self.get_first_stmt("try: x" + "\n" + "finally: pass") - assert isinstance(tr, ast.TryFinally) + assert isinstance(tr, ast.Try) assert len(tr.body) == 1 assert isinstance(tr.body[0].value, ast.Name) assert len(tr.finalbody) == 1 assert isinstance(tr.finalbody[0], ast.Pass) + assert tr.orelse is None tr = self.get_first_stmt("try: x" + "\n" + "except: pass") - assert isinstance(tr, ast.TryExcept) + assert isinstance(tr, ast.Try) assert len(tr.body) == 1 assert isinstance(tr.body[0].value, ast.Name) assert len(tr.handlers) == 1 @@ -324,6 +325,7 @@ assert len(handler.body) == 1 assert isinstance(handler.body[0], ast.Pass) assert tr.orelse is None + assert tr.finalbody is None tr = self.get_first_stmt("try: x" + "\n" + "except Exception: pass") assert len(tr.handlers) == 1 @@ -375,32 +377,27 @@ tr = self.get_first_stmt("try: x" + "\n" + "except: 4" + "\n" + "finally: pass") - assert isinstance(tr, ast.TryFinally) + assert isinstance(tr, ast.Try) assert len(tr.finalbody) == 1 assert isinstance(tr.finalbody[0], ast.Pass) + assert len(tr.handlers) == 1 + assert len(tr.handlers[0].body) == 1 + assert isinstance(tr.handlers[0].body[0].value, ast.Num) assert len(tr.body) == 1 - exc = tr.body[0] - assert isinstance(exc, ast.TryExcept) - assert len(exc.handlers) == 1 - assert len(exc.handlers[0].body) == 1 - assert isinstance(exc.handlers[0].body[0].value, ast.Num) - assert len(exc.body) == 1 - assert isinstance(exc.body[0].value, ast.Name) + assert isinstance(tr.body[0].value, ast.Name) tr = self.get_first_stmt("try: x" + "\n" + "except: 4" + "\n" + "else: 'hi'" + "\n" + "finally: pass") - assert isinstance(tr, ast.TryFinally) + assert isinstance(tr, ast.Try) assert len(tr.finalbody) == 1 assert isinstance(tr.finalbody[0], ast.Pass) assert len(tr.body) == 1 - exc = tr.body[0] - assert isinstance(exc, ast.TryExcept) - assert len(exc.orelse) == 1 - assert isinstance(exc.orelse[0].value, ast.Str) - assert len(exc.body) == 1 - assert isinstance(exc.body[0].value, ast.Name) - assert len(exc.handlers) == 1 + assert len(tr.orelse) == 1 + assert isinstance(tr.orelse[0].value, ast.Str) + assert len(tr.body) == 1 + assert isinstance(tr.body[0].value, ast.Name) + assert len(tr.handlers) == 1 def test_with(self): wi = self.get_first_stmt("with x: pass") diff --git a/pypy/interpreter/astcompiler/tools/Python.asdl b/pypy/interpreter/astcompiler/tools/Python.asdl --- a/pypy/interpreter/astcompiler/tools/Python.asdl +++ b/pypy/interpreter/astcompiler/tools/Python.asdl @@ -31,8 +31,7 @@ | With(withitem* items, stmt* body) | Raise(expr? exc, expr? cause) - | TryExcept(stmt* body, excepthandler* handlers, stmt* orelse) - | TryFinally(stmt* body, stmt* finalbody) + | Try(stmt* body, excepthandler* handlers, stmt* orelse, stmt* finalbody) | Assert(expr test, expr? msg) | Import(alias* names) diff --git a/pypy/interpreter/astcompiler/validate.py b/pypy/interpreter/astcompiler/validate.py --- a/pypy/interpreter/astcompiler/validate.py +++ b/pypy/interpreter/astcompiler/validate.py @@ -240,15 +240,12 @@ elif node.cause: raise ValidationError("Raise with cause but no exception") - def visit_TryExcept(self, node): - self._validate_body(node.body, "TryExcept") + def visit_Try(self, node): + self._validate_body(node.body, "Try") for handler in node.handlers: handler.walkabout(self) self._validate_stmts(node.orelse) - - def visit_TryFinally(self, node): - self._validate_body(node.body, "TryFinally") - self._validate_body(node.finalbody, "TryFinally") + self._validate_stmts(node.finalbody) def visit_ExceptHandler(self, node): if node.type: diff --git a/pypy/module/_ast/test/test_ast.py b/pypy/module/_ast/test/test_ast.py --- a/pypy/module/_ast/test/test_ast.py +++ b/pypy/module/_ast/test/test_ast.py @@ -359,11 +359,12 @@ def test_issue793(self): import _ast as ast body = ast.Module([ - ast.TryExcept([ast.Pass(lineno=2, col_offset=4)], + ast.Try([ast.Pass(lineno=2, col_offset=4)], [ast.ExceptHandler(ast.Name('Exception', ast.Load(), lineno=3, col_offset=0), - None, [], lineno=4, col_offset=0)], - [], lineno=1, col_offset=0) + None, [ast.Pass(lineno=4, col_offset=0)], + lineno=4, col_offset=0)], + [], [], lineno=1, col_offset=0) ]) exec(compile(body, '', 'exec')) From noreply at buildbot.pypy.org Thu Jan 1 19:13:43 2015 From: noreply at buildbot.pypy.org (amauryfa) Date: Thu, 1 Jan 2015 19:13:43 +0100 (CET) Subject: [pypy-commit] pypy py3.3: Skip c_bisect tests Message-ID: <20150101181343.DC4AA1D3955@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3.3 Changeset: r75198:078576d9d66c Date: 2015-01-01 16:10 +0100 http://bitbucket.org/pypy/pypy/changeset/078576d9d66c/ Log: Skip c_bisect tests diff --git a/lib-python/3/test/test_bisect.py b/lib-python/3/test/test_bisect.py --- a/lib-python/3/test/test_bisect.py +++ b/lib-python/3/test/test_bisect.py @@ -202,6 +202,7 @@ class TestBisectPython(TestBisect, unittest.TestCase): module = py_bisect + at unittest.skipUnless(c_bisect, 'requires _bisect') class TestBisectC(TestBisect, unittest.TestCase): module = c_bisect @@ -237,6 +238,7 @@ class TestInsortPython(TestInsort, unittest.TestCase): module = py_bisect + at unittest.skipUnless(c_bisect, 'requires _bisect') class TestInsortC(TestInsort, unittest.TestCase): module = c_bisect @@ -292,6 +294,7 @@ class TestErrorHandlingPython(TestErrorHandling, unittest.TestCase): module = py_bisect + at unittest.skipUnless(c_bisect, 'requires _bisect') class TestErrorHandlingC(TestErrorHandling, unittest.TestCase): module = c_bisect @@ -319,6 +322,7 @@ class TestDocExamplePython(TestDocExample, unittest.TestCase): module = py_bisect + at unittest.skipUnless(c_bisect, 'requires _bisect') class TestDocExampleC(TestDocExample, unittest.TestCase): module = c_bisect From noreply at buildbot.pypy.org Thu Jan 1 19:13:45 2015 From: noreply at buildbot.pypy.org (amauryfa) Date: Thu, 1 Jan 2015 19:13:45 +0100 (CET) Subject: [pypy-commit] pypy py3.3: Fix tests to pass with -A and cpython3.3. Message-ID: <20150101181345.10CC51D3955@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3.3 Changeset: r75199:5494a2d79290 Date: 2015-01-01 18:57 +0100 http://bitbucket.org/pypy/pypy/changeset/5494a2d79290/ Log: Fix tests to pass with -A and cpython3.3. + Allow some bytes functions to search for integers instead of single chars. diff --git a/pypy/objspace/std/bytearrayobject.py b/pypy/objspace/std/bytearrayobject.py --- a/pypy/objspace/std/bytearrayobject.py +++ b/pypy/objspace/std/bytearrayobject.py @@ -66,7 +66,16 @@ return False @staticmethod - def _op_val(space, w_other): + def _op_val(space, w_other, allow_char=False): + # Some functions (contains, find) allow a number to specify a + # single char. + if allow_char and space.isinstance_w(w_other, space.w_int): + return StringMethods._single_char(space, w_other) + try: + return space.bytes_w(w_other) + except OperationError, e: + if not e.match(space, space.w_TypeError): + raise return space.buffer_w(w_other, space.BUF_SIMPLE).as_str() def _chr(self, char): @@ -394,14 +403,6 @@ except ValueError: raise oefmt(space.w_ValueError, "value not found in bytearray") - _StringMethods_descr_contains = descr_contains - def descr_contains(self, space, w_sub): - if space.isinstance_w(w_sub, space.w_int): - char = space.int_w(w_sub) - return _descr_contains_bytearray(self.data, space, char) - - return self._StringMethods_descr_contains(space, w_sub) - def descr_add(self, space, w_other): if isinstance(w_other, W_BytearrayObject): return self._new(self.data + w_other.data) @@ -432,15 +433,6 @@ def _make_data(s): return [s[i] for i in range(len(s))] - -def _descr_contains_bytearray(data, space, char): - if not 0 <= char < 256: - raise oefmt(space.w_ValueError, "byte must be in range(0, 256)") - for c in data: - if ord(c) == char: - return space.w_True - return space.w_False - # ____________________________________________________________ diff --git a/pypy/objspace/std/bytesobject.py b/pypy/objspace/std/bytesobject.py --- a/pypy/objspace/std/bytesobject.py +++ b/pypy/objspace/std/bytesobject.py @@ -431,7 +431,11 @@ return True @staticmethod - def _op_val(space, w_other): + def _op_val(space, w_other, allow_char=False): + # Some functions (contains, find) allow a number to specify a + # single char. + if allow_char and space.isinstance_w(w_other, space.w_int): + return StringMethods._single_char(space, w_other) try: return space.bytes_w(w_other) except OperationError, e: @@ -588,11 +592,6 @@ _StringMethods_descr_add = descr_add def descr_add(self, space, w_other): - if space.isinstance_w(w_other, space.w_bytearray): - # XXX: eliminate double-copy - from .bytearrayobject import W_BytearrayObject, _make_data - self_as_bytearray = W_BytearrayObject(_make_data(self._value)) - return space.add(self_as_bytearray, w_other) if space.config.objspace.std.withstrbuf: from pypy.objspace.std.strbufobject import W_StringBufferObject try: @@ -607,23 +606,6 @@ return W_StringBufferObject(builder) return self._StringMethods_descr_add(space, w_other) - _StringMethods_descr_contains = descr_contains - def descr_contains(self, space, w_sub): - if space.isinstance_w(w_sub, space.w_int): - try: - char = space.int_w(w_sub) - except OperationError as e: - if e.match(space, space.w_OverflowError): - char = 256 # arbitrary value which will trigger the ValueError - # condition below - else: - raise - if not 0 <= char < 256: - raise oefmt(space.w_ValueError, - "character must be in range(256)") - return space.newbool(self._value.find(chr(char)) >= 0) - return self._StringMethods_descr_contains(space, w_sub) - _StringMethods_descr_join = descr_join def descr_join(self, space, w_list): l = space.listview_bytes(w_list) diff --git a/pypy/objspace/std/stringmethods.py b/pypy/objspace/std/stringmethods.py --- a/pypy/objspace/std/stringmethods.py +++ b/pypy/objspace/std/stringmethods.py @@ -58,6 +58,21 @@ def _multi_chr(self, c): return c + @staticmethod + def _single_char(space, w_sub): + try: + char = space.int_w(w_sub) + except OperationError as e: + if e.match(space, space.w_OverflowError): + char = 256 # arbitrary value which will trigger the ValueError + # condition below + else: + raise + if not 0 <= char < 256: + raise oefmt(space.w_ValueError, + "byte must be in range(256)") + return chr(char) + def descr_len(self, space): return space.wrap(self._len()) @@ -66,18 +81,11 @@ def descr_contains(self, space, w_sub): value = self._val(space) + other = self._op_val(space, w_sub, allow_char=True) if self._use_rstr_ops(space, w_sub): - other = self._op_val(space, w_sub) - return space.newbool(value.find(other) >= 0) - - from pypy.objspace.std.bytesobject import W_BytesObject - if isinstance(w_sub, W_BytesObject): - other = self._op_val(space, w_sub) + res = value.find(other) + else: res = find(value, other, 0, len(value)) - else: - buffer = _get_buffer(space, w_sub) - res = find(value, buffer, 0, len(value)) - return space.newbool(res >= 0) def descr_add(self, space, w_other): @@ -246,55 +254,31 @@ def descr_find(self, space, w_sub, w_start=None, w_end=None): (value, start, end) = self._convert_idx_params(space, w_start, w_end) + sub = self._op_val(space, w_sub, allow_char=True) if self._use_rstr_ops(space, w_sub): - res = value.find(self._op_val(space, w_sub), start, end) - return space.wrap(res) - - from pypy.objspace.std.bytearrayobject import W_BytearrayObject - from pypy.objspace.std.bytesobject import W_BytesObject - if isinstance(w_sub, W_BytearrayObject): - res = find(value, w_sub.data, start, end) - elif isinstance(w_sub, W_BytesObject): - res = find(value, w_sub._value, start, end) + res = value.find(sub, start, end) else: - buffer = _get_buffer(space, w_sub) - res = find(value, buffer, start, end) - + res = find(value, sub, start, end) return space.wrap(res) def descr_rfind(self, space, w_sub, w_start=None, w_end=None): (value, start, end) = self._convert_idx_params(space, w_start, w_end) + sub = self._op_val(space, w_sub, allow_char=True) if self._use_rstr_ops(space, w_sub): - res = value.rfind(self._op_val(space, w_sub), start, end) - return space.wrap(res) - - from pypy.objspace.std.bytearrayobject import W_BytearrayObject - from pypy.objspace.std.bytesobject import W_BytesObject - if isinstance(w_sub, W_BytearrayObject): - res = rfind(value, w_sub.data, start, end) - elif isinstance(w_sub, W_BytesObject): - res = rfind(value, w_sub._value, start, end) + res = value.rfind(sub, start, end) else: - buffer = _get_buffer(space, w_sub) - res = rfind(value, buffer, start, end) - + res = rfind(value, sub, start, end) return space.wrap(res) def descr_index(self, space, w_sub, w_start=None, w_end=None): (value, start, end) = self._convert_idx_params(space, w_start, w_end) - from pypy.objspace.std.bytearrayobject import W_BytearrayObject - from pypy.objspace.std.bytesobject import W_BytesObject + sub = self._op_val(space, w_sub, allow_char=True) if self._use_rstr_ops(space, w_sub): - res = value.find(self._op_val(space, w_sub), start, end) - elif isinstance(w_sub, W_BytearrayObject): - res = find(value, w_sub.data, start, end) - elif isinstance(w_sub, W_BytesObject): - res = find(value, w_sub._value, start, end) + res = value.find(sub, start, end) else: - buffer = _get_buffer(space, w_sub) - res = find(value, buffer, start, end) + res = find(value, sub, start, end) if res < 0: raise oefmt(space.w_ValueError, @@ -306,15 +290,11 @@ from pypy.objspace.std.bytearrayobject import W_BytearrayObject from pypy.objspace.std.bytesobject import W_BytesObject + sub = self._op_val(space, w_sub, allow_char=True) if self._use_rstr_ops(space, w_sub): - res = value.rfind(self._op_val(space, w_sub), start, end) - elif isinstance(w_sub, W_BytearrayObject): - res = rfind(value, w_sub.data, start, end) - elif isinstance(w_sub, W_BytesObject): - res = rfind(value, w_sub._value, start, end) + res = value.rfind(sub, start, end) else: - buffer = _get_buffer(space, w_sub) - res = rfind(value, buffer, start, end) + res = rfind(value, sub, start, end) if res < 0: raise oefmt(space.w_ValueError, diff --git a/pypy/objspace/std/test/test_bytearrayobject.py b/pypy/objspace/std/test/test_bytearrayobject.py --- a/pypy/objspace/std/test/test_bytearrayobject.py +++ b/pypy/objspace/std/test/test_bytearrayobject.py @@ -177,12 +177,10 @@ assert bytearray(b'hello').find(b'l', -2) == 3 assert bytearray(b'hello').rfind(b'l') == 3 - - # these checks used to not raise in pypy but they should - raises(TypeError, bytearray(b'hello').index, ord('e')) - raises(TypeError, bytearray(b'hello').rindex, ord('e')) - raises(TypeError, bytearray(b'hello').find, ord('e')) - raises(TypeError, bytearray(b'hello').rfind, ord('e')) + assert bytearray(b'hello').index(ord('e')) == 1 + assert bytearray(b'hello').rindex(ord('l')) == 3 + assert bytearray(b'hello').find(ord('e')) == 1 + assert bytearray(b'hello').rfind(ord('l')) == 3 assert bytearray(b'hello').startswith(b'he') assert bytearray(b'hello').startswith(bytearray(b'he')) @@ -356,7 +354,7 @@ def check(a, b, expected): result = a + b assert result == expected - assert isinstance(result, bytearray) + assert isinstance(result, type(a)) check(b1, b2, b"abcdef") check(b1, b"def", b"abcdef") @@ -454,8 +452,8 @@ def test_buffer(self): b = bytearray(b'abcdefghi') buf = memoryview(b) - assert buf[2] == b'c' - buf[3] = b'D' + assert buf[2] == ord('c') + buf[3] = ord('D') assert b == b'abcDefghi' buf[4:6] = b'EF' assert b == b'abcDEFghi' diff --git a/pypy/objspace/std/test/test_bytesobject.py b/pypy/objspace/std/test/test_bytesobject.py --- a/pypy/objspace/std/test/test_bytesobject.py +++ b/pypy/objspace/std/test/test_bytesobject.py @@ -706,7 +706,7 @@ x += b"llo" b = memoryview(x) assert len(b) == 5 - assert b[-1] == b"o" + assert b[-1] == ord("o") assert b[:] == b"hello" assert b[1:0] == b"" raises(TypeError, "b[3] = 'x'") diff --git a/pypy/objspace/std/unicodeobject.py b/pypy/objspace/std/unicodeobject.py --- a/pypy/objspace/std/unicodeobject.py +++ b/pypy/objspace/std/unicodeobject.py @@ -103,7 +103,7 @@ return True @staticmethod - def _op_val(space, w_other): + def _op_val(space, w_other, allow_char=False): if isinstance(w_other, W_UnicodeObject): return w_other._value raise oefmt(space.w_TypeError, From noreply at buildbot.pypy.org Thu Jan 1 19:13:46 2015 From: noreply at buildbot.pypy.org (amauryfa) Date: Thu, 1 Jan 2015 19:13:46 +0100 (CET) Subject: [pypy-commit] pypy py3.3: array: Correctly convert bigint object to longlong numbers. Message-ID: <20150101181346.332301D3955@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3.3 Changeset: r75200:235ef201d238 Date: 2015-01-01 18:58 +0100 http://bitbucket.org/pypy/pypy/changeset/235ef201d238/ Log: array: Correctly convert bigint object to longlong numbers. diff --git a/pypy/module/array/interp_array.py b/pypy/module/array/interp_array.py --- a/pypy/module/array/interp_array.py +++ b/pypy/module/array/interp_array.py @@ -585,7 +585,7 @@ self.itemtype = itemtype self.bytes = rffi.sizeof(itemtype) self.arraytype = lltype.Array(itemtype, hints={'nolength': True}) - self.unwrap = unwrap + self.unwrap, _, self.convert = unwrap.partition('.') self.signed = signed self.canoverflow = canoverflow self.w_class = None @@ -616,12 +616,9 @@ 'i': TypeCode(rffi.INT, 'int_w', True, True), 'I': TypeCode(rffi.UINT, 'int_w', True), 'l': TypeCode(rffi.LONG, 'int_w', True, True), - 'L': TypeCode(rffi.ULONG, 'bigint_w'), # Overflow handled by - # rbigint.touint() which - # corresponds to the - # C-type unsigned long - 'q': TypeCode(rffi.LONGLONG, 'bigint_w', True, True), - 'Q': TypeCode(rffi.ULONGLONG, 'bigint_w', True), + 'L': TypeCode(rffi.ULONG, 'bigint_w.touint'), + 'q': TypeCode(rffi.LONGLONG, 'bigint_w.tolonglong', True, True), + 'Q': TypeCode(rffi.ULONGLONG, 'bigint_w.toulonglong', True), 'f': TypeCode(lltype.SingleFloat, 'float_w', method='__float__'), 'd': TypeCode(lltype.Float, 'float_w', method='__float__'), } @@ -701,9 +698,9 @@ "array item must be " + mytype.unwrap[:-2]) else: raise - if mytype.unwrap == 'bigint_w': + if mytype.convert: try: - item = item.touint() + item = getattr(item, mytype.convert)() except (ValueError, OverflowError): msg = 'unsigned %d-byte integer out of range' % \ mytype.bytes diff --git a/pypy/module/array/test/test_array.py b/pypy/module/array/test/test_array.py --- a/pypy/module/array/test/test_array.py +++ b/pypy/module/array/test/test_array.py @@ -413,7 +413,7 @@ def test_buffer(self): a = self.array('h', b'Hi') buf = memoryview(a) - assert buf[1] == b'i' + assert buf[1] == ord('i') def test_buffer_write(self): a = self.array('b', b'hello') @@ -426,7 +426,7 @@ def test_buffer_keepalive(self): buf = memoryview(self.array('b', b'text')) - assert buf[2] == b'x' + assert buf[2] == ord('x') # a = self.array('b', b'foobarbaz') buf = memoryview(a) From noreply at buildbot.pypy.org Thu Jan 1 22:39:10 2015 From: noreply at buildbot.pypy.org (mattip) Date: Thu, 1 Jan 2015 22:39:10 +0100 (CET) Subject: [pypy-commit] pypy ufuncapi: test, fix non-rpython many-output-ufunc, (fijal reviewing) Message-ID: <20150101213910.9A54E1C02FD@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: ufuncapi Changeset: r75201:b4d4ba49c7bb Date: 2015-01-01 23:38 +0200 http://bitbucket.org/pypy/pypy/changeset/b4d4ba49c7bb/ Log: test, fix non-rpython many-output-ufunc, (fijal reviewing) diff --git a/pypy/module/micronumpy/loop.py b/pypy/module/micronumpy/loop.py --- a/pypy/module/micronumpy/loop.py +++ b/pypy/module/micronumpy/loop.py @@ -126,9 +126,9 @@ res_dtype=res_dtype, nin=nin) for i in range(nin): vals[i] = in_iters[i].getitem(in_states[i]) - arglist = space.newlist(vals) - out_val = space.call_args(func, Arguments.frompacked(space, arglist)) - out_iter.setitem(out_state, res_dtype.coerce(space, out_val)) + w_arglist = space.newlist(vals) + w_out_val = space.call_args(func, Arguments.frompacked(space, w_arglist)) + out_iter.setitem(out_state, res_dtype.coerce(space, w_out_val)) for i in range(nin): in_states[i] = in_iters[i].next(in_states[i]) out_state = out_iter.next(out_state) @@ -162,23 +162,22 @@ out_states[i] = out_state shapelen = len(shape) vals = [None] * nin - # what does the function return? while not out_iters[0].done(out_states[0]): call_many_to_many_driver.jit_merge_point(shapelen=shapelen, func=func, res_dtype=res_dtype, nin=nin, nout=nout) for i in range(nin): vals[i] = in_iters[i].getitem(in_states[i]) - arglist = space.newlist(vals) - out_vals = space.call_args(func, Arguments.frompacked(space, arglist)) - # XXX bad form - out_vals should be a list or tuple of boxes. - # but func can return anything, - if not isinstance(out_vals, list) and not isinstance(out_vals, tuple): - out_iters[0].setitem(out_states[0], res_dtype.coerce(space, out_vals)) + w_arglist = space.newlist(vals) + w_outvals = space.call_args(func, Arguments.frompacked(space, w_arglist)) + # w_outvals should be a tuple, but func can return a single value as well + if space.isinstance_w(w_outvals, space.w_tuple): + batch = space.listview(w_outvals) + for i in range(len(batch)): + out_iters[i].setitem(out_states[i], res_dtype.coerce(space, batch[i])) + out_states[i] = out_iters[i].next(out_states[i]) + else: + out_iters[0].setitem(out_states[0], res_dtype.coerce(space, w_outvals)) out_states[0] = out_iters[0].next(out_states[0]) - else: - for i in range(len(out_vals)): - out_iters[i].setitem(out_states[i], res_dtype.coerce(space, out_vals[i])) - out_states[i] = out_iters[i].next(out_states[i]) for i in range(nin): in_states[i] = in_iters[i].next(in_states[i]) return space.newtuple([convert_to_array(space, o) for o in out_args]) diff --git a/pypy/module/micronumpy/test/test_ufuncs.py b/pypy/module/micronumpy/test/test_ufuncs.py --- a/pypy/module/micronumpy/test/test_ufuncs.py +++ b/pypy/module/micronumpy/test/test_ufuncs.py @@ -149,17 +149,22 @@ from numpy import ufunc, frompyfunc, arange, dtype def adder(a, b): return a+b + def sumdiff(a, b): + return a+b, a-b try: adder_ufunc0 = frompyfunc(adder, 2, 1) adder_ufunc1 = frompyfunc(adder, 2, 1) int_func22 = frompyfunc(int, 2, 2) int_func12 = frompyfunc(int, 1, 2) + sumdiff = frompyfunc(sumdiff, 2, 2) retype = dtype(object) except NotImplementedError as e: # dtype of returned value is object, which is not supported yet assert 'object' in str(e) # Use pypy specific extension for out_dtype adder_ufunc0 = frompyfunc(adder, 2, 1, dtypes=['match']) + sumdiff = frompyfunc(sumdiff, 2, 2, dtypes=['match'], + signature='(i),(i)->(i),(i)') adder_ufunc1 = frompyfunc([adder, adder], 2, 1, dtypes=[int, int, int, float, float, float]) int_func22 = frompyfunc([int, int], 2, 2, signature='(i),(i)->(i),(i)', @@ -167,19 +172,23 @@ int_func12 = frompyfunc([int], 1, 2, signature='(i)->(i),(i)', dtypes=['match']) retype = dtype(int) + a = arange(10) assert isinstance(adder_ufunc1, ufunc) - res = adder_ufunc0(arange(10), arange(10)) + res = adder_ufunc0(a, a) assert res.dtype == retype - assert all(res == arange(10) + arange(10)) - res = adder_ufunc1(arange(10), arange(10)) + assert all(res == a + a) + res = adder_ufunc1(a, a) assert res.dtype == retype - assert all(res == arange(10) + arange(10)) + assert all(res == a + a) raises(TypeError, frompyfunc, 1, 2, 3) - raises (ValueError, int_func22, arange(10)) - res = int_func12(arange(10)) + raises (ValueError, int_func22, a) + res = int_func12(a) assert len(res) == 2 assert isinstance(res, tuple) - assert (res[0] == arange(10)).all() + assert (res[0] == a).all() + res = sumdiff(2 * a, a) + assert (res[0] == 3 * a).all() + assert (res[1] == a).all() def test_frompyfunc_outerloop(self): def int_times2(in_array, out_array): From noreply at buildbot.pypy.org Fri Jan 2 03:03:57 2015 From: noreply at buildbot.pypy.org (stefanor) Date: Fri, 2 Jan 2015 03:03:57 +0100 (CET) Subject: [pypy-commit] pypy py3k: Remove Range-List optimaziton documentation. It isn't available in py3k Message-ID: <20150102020357.0F91D1D23DC@cobra.cs.uni-duesseldorf.de> Author: Stefano Rivera Branch: py3k Changeset: r75202:c6a414eebbc5 Date: 2015-01-01 18:03 -0800 http://bitbucket.org/pypy/pypy/changeset/c6a414eebbc5/ Log: Remove Range-List optimaziton documentation. It isn't available in py3k diff --git a/pypy/doc/interpreter-optimizations.rst b/pypy/doc/interpreter-optimizations.rst --- a/pypy/doc/interpreter-optimizations.rst +++ b/pypy/doc/interpreter-optimizations.rst @@ -99,25 +99,6 @@ with the :config:`objspace.std.withmapdict` option. -List Optimizations -~~~~~~~~~~~~~~~~~~ - -Range-Lists -+++++++++++ - -Range-lists solve the same problem that the ``xrange`` builtin solves poorly: -the problem that ``range`` allocates memory even if the resulting list is only -ever used for iterating over it. Range lists are a different implementation for -lists. They are created only as a result of a call to ``range``. As long as the -resulting list is used without being mutated, the list stores only the start, stop -and step of the range. Only when somebody mutates the list the actual list is -created. This gives the memory and speed behaviour of ``xrange`` and the generality -of use of ``range``, and makes ``xrange`` essentially useless. - -You can enable this feature with the :config:`objspace.std.withrangelist` -option. - - User Class Optimizations ~~~~~~~~~~~~~~~~~~~~~~~~ From noreply at buildbot.pypy.org Fri Jan 2 07:17:29 2015 From: noreply at buildbot.pypy.org (Digenis) Date: Fri, 2 Jan 2015 07:17:29 +0100 (CET) Subject: [pypy-commit] pypy default: don't capture "func" as a positional argument Message-ID: <20150102061729.936C41C06DB@cobra.cs.uni-duesseldorf.de> Author: Nikolaos-Digenis Karagiannis Branch: Changeset: r75203:d4b4902dad5e Date: 2015-01-02 07:22 +0200 http://bitbucket.org/pypy/pypy/changeset/d4b4902dad5e/ Log: don't capture "func" as a positional argument diff --git a/lib_pypy/_functools.py b/lib_pypy/_functools.py --- a/lib_pypy/_functools.py +++ b/lib_pypy/_functools.py @@ -9,7 +9,10 @@ of the given arguments and keywords. """ - def __init__(self, func, *args, **keywords): + def __init__(self, *args, **keywords): + if not args: + raise TypeError('__init__() takes at least 2 arguments (1 given)') + func, args = args[0], args[1:] if not callable(func): raise TypeError("the first argument must be callable") self._func = func From noreply at buildbot.pypy.org Fri Jan 2 08:31:09 2015 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 2 Jan 2015 08:31:09 +0100 (CET) Subject: [pypy-commit] pypy default: try harder to filter out NotImplementedError results for functions, None results for attributes Message-ID: <20150102073109.909121C0347@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: Changeset: r75204:45f14b80454a Date: 2015-01-02 09:30 +0200 http://bitbucket.org/pypy/pypy/changeset/45f14b80454a/ Log: try harder to filter out NotImplementedError results for functions, None results for attributes also add capability to run alternative cpython for comparison and "usage" help diff --git a/pypy/module/micronumpy/tool/numready/main.py b/pypy/module/micronumpy/tool/numready/main.py --- a/pypy/module/micronumpy/tool/numready/main.py +++ b/pypy/module/micronumpy/tool/numready/main.py @@ -71,6 +71,10 @@ lines = subprocess.check_output(args).splitlines() items = SearchableSet() for line in lines: + # since calling a function in "search.py" may have printed side effects, + # make sure the line begins with '[UT] : ' + if not (line[:1] in KINDS.values() and line[1:4] == ' : '): + continue kind, name = line.split(" : ", 1) subitems = [] if kind == KINDS["TYPE"] and name in SPECIAL_NAMES and attr is None: @@ -97,7 +101,15 @@ "nditer"] def main(argv): - cpy_items = find_numpy_items("/usr/bin/python") + if 'help' in argv[1]: + print '\nusage: python', os.path.dirname(__file__), ' [] []' + print ' path-to-cpython-with-numpy defaults to "/usr/bin/python"\n' + return + if len(argv) < 4: + cpython = '/usr/bin/python' + else: + cpython = argv[3] + cpy_items = find_numpy_items(cpython) pypy_items = find_numpy_items(argv[1]) ver = get_version_str(argv[1]) all_items = [] diff --git a/pypy/module/micronumpy/tool/numready/search.py b/pypy/module/micronumpy/tool/numready/search.py --- a/pypy/module/micronumpy/tool/numready/search.py +++ b/pypy/module/micronumpy/tool/numready/search.py @@ -23,6 +23,15 @@ if attr is None and name.startswith("_"): continue subobj = getattr(obj, name) + if subobj is None: + continue + if isinstance(subobj, types.FunctionType): + try: + subobj() + except NotImplementedError: + continue + except: + pass if isinstance(subobj, types.TypeType): kind = KINDS["TYPE"] else: From noreply at buildbot.pypy.org Fri Jan 2 10:04:34 2015 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 2 Jan 2015 10:04:34 +0100 (CET) Subject: [pypy-commit] pypy quieter-translation: do not print errors from Works(), maybe we should have a verbose mode instead? Message-ID: <20150102090434.9A7E61C0347@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: quieter-translation Changeset: r75205:daaece5cf074 Date: 2015-01-02 11:03 +0200 http://bitbucket.org/pypy/pypy/changeset/daaece5cf074/ Log: do not print errors from Works(), maybe we should have a verbose mode instead? diff --git a/ctypes_configure/cbuild.py b/ctypes_configure/cbuild.py --- a/ctypes_configure/cbuild.py +++ b/ctypes_configure/cbuild.py @@ -183,9 +183,9 @@ gcv['OPT'] = opt -def try_compile(c_files, eci): +def try_compile(c_files, eci, noerr=False): try: - build_executable(c_files, eci) + build_executable(c_files, eci, noerr=noerr) result = True except (distutils.errors.CompileError, distutils.errors.LinkError): diff --git a/ctypes_configure/configure.py b/ctypes_configure/configure.py --- a/ctypes_configure/configure.py +++ b/ctypes_configure/configure.py @@ -143,12 +143,12 @@ print >> f, '}' f.close() - def ask_gcc(self, question): + def ask_gcc(self, question, noerr=False): self.start_main() self.f.write(question + "\n") self.close() eci = self.config._compilation_info_ - return try_compile([self.path], eci) + return try_compile([self.path], eci, noerr=noerr) def configure(CConfig, noerr=False): @@ -457,7 +457,7 @@ class Works(CConfigSingleEntry): def question(self, ask_gcc): - return ask_gcc("") + return ask_gcc("", noerr=True) class SizeOf(CConfigEntry): """An entry in a CConfig class that stands for From noreply at buildbot.pypy.org Fri Jan 2 10:04:35 2015 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 2 Jan 2015 10:04:35 +0100 (CET) Subject: [pypy-commit] pypy quieter-translation: windows has no resource module Message-ID: <20150102090435.CA78B1C0347@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: quieter-translation Changeset: r75206:2e6785bb21f2 Date: 2015-01-02 11:03 +0200 http://bitbucket.org/pypy/pypy/changeset/2e6785bb21f2/ Log: windows has no resource module diff --git a/lib_pypy/ctypes_config_cache/resource.ctc.py b/lib_pypy/ctypes_config_cache/resource.ctc.py --- a/lib_pypy/ctypes_config_cache/resource.ctc.py +++ b/lib_pypy/ctypes_config_cache/resource.ctc.py @@ -8,55 +8,60 @@ import dumpcache from ctypes_configure.configure import (configure, ExternalCompilationInfo, ConstantInteger, DefinedConstantInteger, - SimpleType) + SimpleType, check_eci) -_CONSTANTS = ( - 'RLIM_INFINITY', - 'RLIM_NLIMITS', -) -_OPTIONAL_CONSTANTS = ( - 'RLIMIT_CPU', - 'RLIMIT_FSIZE', - 'RLIMIT_DATA', - 'RLIMIT_STACK', - 'RLIMIT_CORE', - 'RLIMIT_RSS', - 'RLIMIT_NPROC', - 'RLIMIT_NOFILE', - 'RLIMIT_OFILE', - 'RLIMIT_MEMLOCK', - 'RLIMIT_AS', - 'RLIMIT_LOCKS', - 'RLIMIT_SIGPENDING', - 'RLIMIT_MSGQUEUE', - 'RLIMIT_NICE', - 'RLIMIT_RTPRIO', - 'RLIMIT_VMEM', +eci = ExternalCompilationInfo(includes=['sys/resource.h']) +HAS_RESOURCE = check_eci(eci) - 'RUSAGE_BOTH', - 'RUSAGE_SELF', - 'RUSAGE_CHILDREN', -) +if HAS_RESOURCE: -# Setup our configure -class ResourceConfigure: - _compilation_info_ = ExternalCompilationInfo(includes=['sys/resource.h']) - rlim_t = SimpleType('rlim_t') -for key in _CONSTANTS: - setattr(ResourceConfigure, key, ConstantInteger(key)) -for key in _OPTIONAL_CONSTANTS: - setattr(ResourceConfigure, key, DefinedConstantInteger(key)) + _CONSTANTS = ( + 'RLIM_INFINITY', + 'RLIM_NLIMITS', + ) + _OPTIONAL_CONSTANTS = ( + 'RLIMIT_CPU', + 'RLIMIT_FSIZE', + 'RLIMIT_DATA', + 'RLIMIT_STACK', + 'RLIMIT_CORE', + 'RLIMIT_RSS', + 'RLIMIT_NPROC', + 'RLIMIT_NOFILE', + 'RLIMIT_OFILE', + 'RLIMIT_MEMLOCK', + 'RLIMIT_AS', + 'RLIMIT_LOCKS', + 'RLIMIT_SIGPENDING', + 'RLIMIT_MSGQUEUE', + 'RLIMIT_NICE', + 'RLIMIT_RTPRIO', + 'RLIMIT_VMEM', -# Configure constants and types -config = configure(ResourceConfigure) -config['rlim_t_max'] = (1<<(sizeof(config['rlim_t']) * 8)) - 1 -optional_constants = [] -for key in _OPTIONAL_CONSTANTS: - if config[key] is not None: - optional_constants.append(key) - else: - del config[key] + 'RUSAGE_BOTH', + 'RUSAGE_SELF', + 'RUSAGE_CHILDREN', + ) -config['ALL_CONSTANTS'] = _CONSTANTS + tuple(optional_constants) -dumpcache.dumpcache2('resource', config) + # Setup our configure + class ResourceConfigure: + _compilation_info_ = ExternalCompilationInfo(includes=['sys/resource.h']) + rlim_t = SimpleType('rlim_t') + for key in _CONSTANTS: + setattr(ResourceConfigure, key, ConstantInteger(key)) + for key in _OPTIONAL_CONSTANTS: + setattr(ResourceConfigure, key, DefinedConstantInteger(key)) + + # Configure constants and types + config = configure(ResourceConfigure) + config['rlim_t_max'] = (1<<(sizeof(config['rlim_t']) * 8)) - 1 + optional_constants = [] + for key in _OPTIONAL_CONSTANTS: + if config[key] is not None: + optional_constants.append(key) + else: + del config[key] + + config['ALL_CONSTANTS'] = _CONSTANTS + tuple(optional_constants) + dumpcache.dumpcache2('resource', config) From noreply at buildbot.pypy.org Fri Jan 2 10:18:51 2015 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 2 Jan 2015 10:18:51 +0100 (CET) Subject: [pypy-commit] pypy ufuncapi: backed out a7730d9255c3, handle this in quieter-translation branch Message-ID: <20150102091851.E5A911D382F@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: ufuncapi Changeset: r75207:70a3a4496b45 Date: 2015-01-02 11:13 +0200 http://bitbucket.org/pypy/pypy/changeset/70a3a4496b45/ Log: backed out a7730d9255c3, handle this in quieter-translation branch diff --git a/pypy/goal/targetpypystandalone.py b/pypy/goal/targetpypystandalone.py --- a/pypy/goal/targetpypystandalone.py +++ b/pypy/goal/targetpypystandalone.py @@ -292,10 +292,9 @@ return PyPyJitPolicy(pypy_hooks) def get_entry_point(self, config): - if sys.platform != 'win32': - from pypy.tool.lib_pypy import import_from_lib_pypy - rebuild = import_from_lib_pypy('ctypes_config_cache/rebuild') - rebuild.try_rebuild() + from pypy.tool.lib_pypy import import_from_lib_pypy + rebuild = import_from_lib_pypy('ctypes_config_cache/rebuild') + rebuild.try_rebuild() space = make_objspace(config) From noreply at buildbot.pypy.org Fri Jan 2 11:35:00 2015 From: noreply at buildbot.pypy.org (fijal) Date: Fri, 2 Jan 2015 11:35:00 +0100 (CET) Subject: [pypy-commit] pypy vmprof: if we don't set the JIT range, don't rely on random values Message-ID: <20150102103500.77B6F1C02FD@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: vmprof Changeset: r75208:77e2d84c2e8e Date: 2015-01-02 12:34 +0200 http://bitbucket.org/pypy/pypy/changeset/77e2d84c2e8e/ Log: if we don't set the JIT range, don't rely on random values diff --git a/pypy/module/_vmprof/src/vmprof.c b/pypy/module/_vmprof/src/vmprof.c --- a/pypy/module/_vmprof/src/vmprof.c +++ b/pypy/module/_vmprof/src/vmprof.c @@ -77,10 +77,9 @@ * ****************************************************** */ -static void* jit_start; -static void* jit_end; +static void* jit_start = NULL; +static void* jit_end = NULL; void vmprof_set_jit_range(void* start, void* end) { - printf("vmprof JIT range: %p-%p\n", start, end); jit_start = start; jit_end = end; } From noreply at buildbot.pypy.org Fri Jan 2 18:30:08 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 2 Jan 2015 18:30:08 +0100 (CET) Subject: [pypy-commit] cffi default: Issue #141: if compiling with __thread fails, try to compile without Message-ID: <20150102173008.65BC51C0347@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1617:9ee657789f0e Date: 2015-01-02 18:30 +0100 http://bitbucket.org/cffi/cffi/changeset/9ee657789f0e/ Log: Issue #141: if compiling with __thread fails, try to compile without __thread, and if that fails too, we know there is some other problem with the compiler. diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -41,6 +41,15 @@ # resultlist[:] = res +def no_working_compiler_found(): + sys.stderr.write(""" + No working compiler found, or bogus compiler options + passed to the compiler from Python's distutils module. + See the error messages above. + (If they are about -mno-fused-madd and you are on OS/X 10.8, + see http://stackoverflow.com/questions/22313407/ .)\n""") + sys.exit(1) + def ask_supports_thread(): from distutils.core import Distribution from distutils.sysconfig import get_config_vars @@ -50,6 +59,9 @@ if ok: define_macros.append(('USE__THREAD', None)) else: + ok1 = config.try_compile('int some_regular_variable_42;') + if not ok1: + no_working_compiler_found() sys.stderr.write("Note: will not use '__thread' in the C code\n") sys.stderr.write("The above error message can be safely ignored\n") From noreply at buildbot.pypy.org Fri Jan 2 19:16:28 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 2 Jan 2015 19:16:28 +0100 (CET) Subject: [pypy-commit] cffi default: improve error message a bit Message-ID: <20150102181628.4A3981C02FD@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1618:04e9f61eecfc Date: 2015-01-02 18:46 +0100 http://bitbucket.org/cffi/cffi/changeset/04e9f61eecfc/ Log: improve error message a bit diff --git a/cffi/model.py b/cffi/model.py --- a/cffi/model.py +++ b/cffi/model.py @@ -480,7 +480,7 @@ try: res = getattr(ffi._backend, funcname)(*args) except NotImplementedError as e: - raise NotImplementedError("%r: %s" % (srctype, e)) + raise NotImplementedError("%s: %r: %s" % (funcname, srctype, e)) # note that setdefault() on WeakValueDictionary is not atomic # and contains a rare bug (http://bugs.python.org/issue19542); # we have to use a lock and do it ourselves From noreply at buildbot.pypy.org Fri Jan 2 19:16:29 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 2 Jan 2015 19:16:29 +0100 (CET) Subject: [pypy-commit] cffi default: Delay reporting NotImplmentedErrors when building function types with Message-ID: <20150102181629.6C78B1C02FD@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1619:275285b314a7 Date: 2015-01-02 19:16 +0100 http://bitbucket.org/cffi/cffi/changeset/275285b314a7/ Log: Delay reporting NotImplmentedErrors when building function types with unsupported arguments. They are now reported only when we actually try to do a call. Issue #127. diff --git a/c/_cffi_backend.c b/c/_cffi_backend.c --- a/c/_cffi_backend.c +++ b/c/_cffi_backend.c @@ -4095,24 +4095,26 @@ cf = (CFieldObject *)ct->ct_extra; for (i=0; icf_bitshift >= 0) { - PyErr_SetString(PyExc_NotImplementedError, - "cannot pass as argument or return value " - "a struct with bit fields"); + PyErr_Format(PyExc_NotImplementedError, + "ctype '%s' not supported as argument or return value" + " (it is a struct with bit fields)", + ct->ct_name); return NULL; } flat = 1; - ct = cf->cf_type; - while (ct->ct_flags & CT_ARRAY) { - flat *= ct->ct_length; - ct = ct->ct_itemdescr; + ct1 = cf->cf_type; + while (ct1->ct_flags & CT_ARRAY) { + flat *= ct1->ct_length; + ct1 = ct1->ct_itemdescr; } if (flat <= 0) { - PyErr_SetString(PyExc_NotImplementedError, - "cannot pass as argument or return value " - "a struct with a zero-length array"); + PyErr_Format(PyExc_NotImplementedError, + "ctype '%s' not supported as argument or return value" + " (it is a struct with a zero-length array)", + ct->ct_name); return NULL; } nflat += flat; @@ -4382,11 +4384,6 @@ &fabi)) return NULL; - if (fresult->ct_flags & CT_UNION) { - PyErr_SetString(PyExc_NotImplementedError, - "function returning a union"); - return NULL; - } if ((fresult->ct_size < 0 && !(fresult->ct_flags & CT_VOID)) || (fresult->ct_flags & CT_ARRAY)) { char *msg; @@ -4410,8 +4407,14 @@ cif_description_t *cif_descr; cif_descr = fb_prepare_cif(fargs, fresult, fabi); - if (cif_descr == NULL) - goto error; + if (cif_descr == NULL) { + if (PyErr_ExceptionMatches(PyExc_NotImplementedError)) { + PyErr_Clear(); /* will get the exception if we see an + actual call */ + } + else + goto error; + } fct->ct_extra = (char *)cif_descr; } @@ -4646,8 +4649,9 @@ cif_descr = (cif_description_t *)ct->ct_extra; if (cif_descr == NULL) { - PyErr_SetString(PyExc_NotImplementedError, - "callbacks with '...'"); + PyErr_Format(PyExc_NotImplementedError, + "%s: callback with unsupported argument or " + "return type or with '...'", ct->ct_name); goto error; } if (ffi_prep_closure(closure, &cif_descr->cif, diff --git a/c/test_c.py b/c/test_c.py --- a/c/test_c.py +++ b/c/test_c.py @@ -1041,11 +1041,12 @@ BInt = new_primitive_type("int") BArray0 = new_array_type(new_pointer_type(BInt), 0) BStruct = new_struct_type("struct foo") + BStructP = new_pointer_type(BStruct) complete_struct_or_union(BStruct, [('a', BArray0)]) - py.test.raises(NotImplementedError, new_function_type, - (BStruct,), BInt, False) - py.test.raises(NotImplementedError, new_function_type, - (BInt,), BStruct, False) + BFunc = new_function_type((BStruct,), BInt, False) + py.test.raises(NotImplementedError, cast(BFunc, 123), cast(BStructP, 123)) + BFunc2 = new_function_type((BInt,), BStruct, False) + py.test.raises(NotImplementedError, cast(BFunc2, 123), 123) def test_call_function_9(): BInt = new_primitive_type("int") @@ -1816,7 +1817,8 @@ new_function_type((), new_pointer_type(BFunc)) BUnion = new_union_type("union foo_u") complete_struct_or_union(BUnion, []) - py.test.raises(NotImplementedError, new_function_type, (), BUnion) + BFunc = new_function_type((), BUnion) + py.test.raises(NotImplementedError, cast(BFunc, 123)) py.test.raises(TypeError, new_function_type, (), BArray) def test_struct_return_in_func(): diff --git a/testing/test_ffi_backend.py b/testing/test_ffi_backend.py --- a/testing/test_ffi_backend.py +++ b/testing/test_ffi_backend.py @@ -19,8 +19,8 @@ ffi.cdef("struct foo_s { int a,b,c,d,e; int x:1; };") e = py.test.raises(NotImplementedError, ffi.callback, "struct foo_s foo(void)", lambda: 42) - assert str(e.value) == (": " - "cannot pass as argument or return value a struct with bit fields") + assert str(e.value) == ("struct foo_s(*)(): " + "callback with unsupported argument or return type or with '...'") def test_inspecttype(self): ffi = FFI(backend=self.Backend()) diff --git a/testing/test_verify.py b/testing/test_verify.py --- a/testing/test_verify.py +++ b/testing/test_verify.py @@ -2069,3 +2069,20 @@ ffi2.cdef("int foo;") lib2 = ffi2.verify("int foo;", flags=ffi2.RTLD_GLOBAL | ffi2.RTLD_LAZY) return lib2 + +def test_consider_not_implemented_function_type(): + ffi = FFI() + ffi.cdef("typedef union { int a; float b; } Data;" + "typedef struct { int a:2; } MyStr;" + "typedef void (*foofunc_t)(Data);" + "typedef MyStr (*barfunc_t)(void);") + fooptr = ffi.cast("foofunc_t", 123) + barptr = ffi.cast("barfunc_t", 123) + # assert did not crash so far + e = py.test.raises(NotImplementedError, fooptr, ffi.new("Data *")) + assert str(e.value) == ( + "ctype 'Data' not supported as argument or return value") + e = py.test.raises(NotImplementedError, barptr) + assert str(e.value) == ( + "ctype 'MyStr' not supported as argument or return value " + "(it is a struct with bit fields)") From noreply at buildbot.pypy.org Fri Jan 2 20:47:09 2015 From: noreply at buildbot.pypy.org (pjenvey) Date: Fri, 2 Jan 2015 20:47:09 +0100 (CET) Subject: [pypy-commit] pypy default: issue1902 followup: fix another thinko which omitted the rewind Message-ID: <20150102194709.27DD41C02FD@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: Changeset: r75209:fe589b2b1ccc Date: 2015-01-02 11:46 -0800 http://bitbucket.org/pypy/pypy/changeset/fe589b2b1ccc/ Log: issue1902 followup: fix another thinko which omitted the rewind diff --git a/pypy/module/_io/interp_bufferedio.py b/pypy/module/_io/interp_bufferedio.py --- a/pypy/module/_io/interp_bufferedio.py +++ b/pypy/module/_io/interp_bufferedio.py @@ -565,7 +565,7 @@ # Flush the write buffer if necessary if self.writable: - self._writer_flush_unlocked(space) + self._flush_and_rewind_unlocked(space) self._reader_reset_buf() # Read whole blocks, and don't buffer them diff --git a/pypy/module/_io/test/test_io.py b/pypy/module/_io/test/test_io.py --- a/pypy/module/_io/test/test_io.py +++ b/pypy/module/_io/test/test_io.py @@ -362,3 +362,32 @@ f.read(1) f.seek(-1, 1) f.write(b'') + + def test_issue1902_2(self): + import _io + with _io.open(self.tmpfile, 'w+b', 4096) as f: + f.write(b'\xff' * 13569) + f.flush() + f.seek(0, 0) + + f.read(1) + f.seek(-1, 1) + f.write(b'\xff') + f.seek(1, 0) + f.read(4123) + f.seek(-4123, 1) + + def test_issue1902_3(self): + import _io + buffer_size = 4096 + with _io.open(self.tmpfile, 'w+b', buffer_size) as f: + f.write(b'\xff' * buffer_size * 3) + f.flush() + f.seek(0, 0) + + f.read(1) + f.seek(-1, 1) + f.write(b'\xff') + f.seek(1, 0) + f.read(buffer_size * 2) + assert f.tell() == 1 + buffer_size * 2 From noreply at buildbot.pypy.org Fri Jan 2 22:29:42 2015 From: noreply at buildbot.pypy.org (pjenvey) Date: Fri, 2 Jan 2015 22:29:42 +0100 (CET) Subject: [pypy-commit] pypy py3k: merge default Message-ID: <20150102212942.A1EBE1C1056@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r75210:ace68169b31e Date: 2015-01-02 12:39 -0800 http://bitbucket.org/pypy/pypy/changeset/ace68169b31e/ Log: merge default diff too long, truncating to 2000 out of 15159 lines diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -28,7 +28,7 @@ DEALINGS IN THE SOFTWARE. -PyPy Copyright holders 2003-2014 +PyPy Copyright holders 2003-2015 ----------------------------------- Except when otherwise stated (look for LICENSE files or information at diff --git a/lib-python/2.7/distutils/unixccompiler.py b/lib-python/2.7/distutils/unixccompiler.py --- a/lib-python/2.7/distutils/unixccompiler.py +++ b/lib-python/2.7/distutils/unixccompiler.py @@ -58,7 +58,7 @@ executables = {'preprocessor' : None, 'compiler' : ["cc"], 'compiler_so' : ["cc"], - 'compiler_cxx' : ["cc"], + 'compiler_cxx' : ["c++"], # pypy: changed, 'cc' is bogus 'linker_so' : ["cc", "-shared"], 'linker_exe' : ["cc"], 'archiver' : ["ar", "-cr"], diff --git a/lib-python/2.7/sqlite3/test/dbapi.py b/lib-python/2.7/sqlite3/test/dbapi.py --- a/lib-python/2.7/sqlite3/test/dbapi.py +++ b/lib-python/2.7/sqlite3/test/dbapi.py @@ -478,6 +478,29 @@ except TypeError: pass + def CheckCurDescription(self): + self.cu.execute("select * from test") + + actual = self.cu.description + expected = [ + ('id', None, None, None, None, None, None), + ('name', None, None, None, None, None, None), + ('income', None, None, None, None, None, None), + ] + self.assertEqual(expected, actual) + + def CheckCurDescriptionVoidStatement(self): + self.cu.execute("insert into test(name) values (?)", ("foo",)) + self.assertIsNone(self.cu.description) + + def CheckCurDescriptionWithoutStatement(self): + cu = self.cx.cursor() + try: + self.assertIsNone(cu.description) + finally: + cu.close() + + @unittest.skipUnless(threading, 'This test requires threading.') class ThreadTests(unittest.TestCase): def setUp(self): diff --git a/lib-python/2.7/subprocess.py b/lib-python/2.7/subprocess.py --- a/lib-python/2.7/subprocess.py +++ b/lib-python/2.7/subprocess.py @@ -1589,7 +1589,7 @@ 'copyfile' in caller.f_globals): dest_dir = sys.pypy_resolvedirof(target_executable) src_dir = sys.pypy_resolvedirof(sys.executable) - for libname in ['libpypy-c.so']: + for libname in ['libpypy-c.so', 'libpypy-c.dylib']: dest_library = os.path.join(dest_dir, libname) src_library = os.path.join(src_dir, libname) if os.path.exists(src_library): diff --git a/lib-python/2.7/test/test_collections.py b/lib-python/2.7/test/test_collections.py --- a/lib-python/2.7/test/test_collections.py +++ b/lib-python/2.7/test/test_collections.py @@ -1108,6 +1108,16 @@ od.popitem() self.assertEqual(len(od), 0) + def test_popitem_first(self): + pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)] + shuffle(pairs) + od = OrderedDict(pairs) + while pairs: + self.assertEqual(od.popitem(last=False), pairs.pop(0)) + with self.assertRaises(KeyError): + od.popitem(last=False) + self.assertEqual(len(od), 0) + def test_pop(self): pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)] shuffle(pairs) @@ -1179,7 +1189,11 @@ od = OrderedDict(pairs) # yaml.dump(od) --> # '!!python/object/apply:__main__.OrderedDict\n- - [a, 1]\n - [b, 2]\n' - self.assertTrue(all(type(pair)==list for pair in od.__reduce__()[1])) + + # PyPy bug fix: added [0] at the end of this line, because the + # test is really about the 2-tuples that need to be 2-lists + # inside the list of 6 of them + self.assertTrue(all(type(pair)==list for pair in od.__reduce__()[1][0])) def test_reduce_not_too_fat(self): # do not save instance dictionary if not needed @@ -1189,6 +1203,16 @@ od.x = 10 self.assertEqual(len(od.__reduce__()), 3) + def test_reduce_exact_output(self): + # PyPy: test that __reduce__() produces the exact same answer as + # CPython does, even though in the 'all_ordered_dicts' branch we + # have to emulate it. + pairs = [['c', 1], ['b', 2], ['d', 4]] + od = OrderedDict(pairs) + self.assertEqual(od.__reduce__(), (OrderedDict, (pairs,))) + od.x = 10 + self.assertEqual(od.__reduce__(), (OrderedDict, (pairs,), {'x': 10})) + def test_repr(self): od = OrderedDict([('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]) self.assertEqual(repr(od), diff --git a/lib-python/2.7/test/test_xml_etree.py b/lib-python/2.7/test/test_xml_etree.py --- a/lib-python/2.7/test/test_xml_etree.py +++ b/lib-python/2.7/test/test_xml_etree.py @@ -225,9 +225,9 @@ >>> element.remove(subelement) >>> serialize(element) # 5 '' - >>> element.remove(subelement) + >>> element.remove(subelement) # doctest: +ELLIPSIS Traceback (most recent call last): - ValueError: list.remove(x): x not in list + ValueError: list.remove(... >>> serialize(element) # 6 '' >>> element[0:0] = [subelement, subelement, subelement] diff --git a/lib-python/stdlib-upgrade.txt b/lib-python/stdlib-upgrade.txt --- a/lib-python/stdlib-upgrade.txt +++ b/lib-python/stdlib-upgrade.txt @@ -7,7 +7,7 @@ 1. check out the branch vendor/stdlib 2. upgrade the files there -3. update stdlib-versions.txt with the output of hg -id from the cpython repo +3. update stdlib-version.txt with the output of hg -id from the cpython repo 4. commit 5. update to default/py3k 6. create a integration branch for the new stdlib diff --git a/lib_pypy/_functools.py b/lib_pypy/_functools.py --- a/lib_pypy/_functools.py +++ b/lib_pypy/_functools.py @@ -33,7 +33,10 @@ of the given arguments and keywords. """ - def __init__(self, func, *args, **keywords): + def __init__(self, *args, **keywords): + if not args: + raise TypeError('__init__() takes at least 2 arguments (1 given)') + func, args = args[0], args[1:] if not callable(func): raise TypeError("the first argument must be callable") self._func = func diff --git a/lib_pypy/_sqlite3.py b/lib_pypy/_sqlite3.py --- a/lib_pypy/_sqlite3.py +++ b/lib_pypy/_sqlite3.py @@ -1187,8 +1187,9 @@ try: return self.__description except AttributeError: - self.__description = self.__statement._get_description() - return self.__description + if self.__statement: + self.__description = self.__statement._get_description() + return self.__description description = property(__get_description) def __get_lastrowid(self): diff --git a/lib_pypy/readline.py b/lib_pypy/readline.py --- a/lib_pypy/readline.py +++ b/lib_pypy/readline.py @@ -8,5 +8,9 @@ try: from pyrepl.readline import * -except SyntaxError: - raise ImportError +except ImportError: + import sys + if sys.platform == 'win32': + raise ImportError("the 'readline' module is not available on Windows" + " (on either PyPy or CPython)") + raise diff --git a/pypy/doc/embedding.rst b/pypy/doc/embedding.rst --- a/pypy/doc/embedding.rst +++ b/pypy/doc/embedding.rst @@ -6,6 +6,10 @@ C. It was developed in collaboration with Roberto De Ioris from the `uwsgi`_ project. The `PyPy uwsgi plugin`_ is a good example of using the embedding API. +**NOTE**: As of 1st of December, PyPy comes with ``--shared`` by default +on linux, linux64 and windows. We will make it the default on all platforms +by the time of the next release. + The first thing that you need is to compile PyPy yourself with the option ``--shared``. We plan to make ``--shared`` the default in the future. Consult the `how to compile PyPy`_ doc for details. This will result in ``libpypy.so`` @@ -93,12 +97,18 @@ return res; } -If we save it as ``x.c`` now, compile it and run it with:: +If we save it as ``x.c`` now, compile it and run it (on linux) with:: fijal at hermann:/opt/pypy$ gcc -o x x.c -lpypy-c -L. fijal at hermann:/opt/pypy$ LD_LIBRARY_PATH=. ./x hello from pypy +on OSX it is necessary to set the rpath of the binary if one wants to link to it:: + + gcc -o x x.c -lpypy-c -L. -Wl,-rpath -Wl, at executable_path + ./x + hello from pypy + Worked! .. note:: If the compilation fails because of missing PyPy.h header file, diff --git a/pypy/doc/project-ideas.rst b/pypy/doc/project-ideas.rst --- a/pypy/doc/project-ideas.rst +++ b/pypy/doc/project-ideas.rst @@ -35,6 +35,13 @@ PyPy's bytearray type is very inefficient. It would be an interesting task to look into possible optimizations on this. +Implement AF_XXX packet types for PyPy +-------------------------------------- + +PyPy is missing AF_XXX types of sockets. Implementing it is easy-to-medium +task. `bug report`_ + +.. _`bug report`: https://bitbucket.org/pypy/pypy/issue/1942/support-for-af_xxx-sockets#more Implement copy-on-write list slicing ------------------------------------ diff --git a/pypy/goal/targetpypystandalone.py b/pypy/goal/targetpypystandalone.py --- a/pypy/goal/targetpypystandalone.py +++ b/pypy/goal/targetpypystandalone.py @@ -241,8 +241,7 @@ enable_translationmodules(config) config.translation.suggest(check_str_without_nul=True) - if sys.platform.startswith('linux'): - config.translation.suggest(shared=True) + config.translation.suggest(shared=True) if config.translation.thread: config.objspace.usemodules.thread = True diff --git a/pypy/interpreter/astcompiler/optimize.py b/pypy/interpreter/astcompiler/optimize.py --- a/pypy/interpreter/astcompiler/optimize.py +++ b/pypy/interpreter/astcompiler/optimize.py @@ -89,17 +89,16 @@ class __extend__(ast.BoolOp): - def _accept_jump_if_any_is(self, gen, condition, target): - self.values[0].accept_jump_if(gen, condition, target) - for i in range(1, len(self.values)): + def _accept_jump_if_any_is(self, gen, condition, target, skip_last=0): + for i in range(len(self.values) - skip_last): self.values[i].accept_jump_if(gen, condition, target) def accept_jump_if(self, gen, condition, target): if condition and self.op == ast.And or \ (not condition and self.op == ast.Or): end = gen.new_block() - self._accept_jump_if_any_is(gen, not condition, end) - gen.emit_jump(ops.JUMP_FORWARD, target) + self._accept_jump_if_any_is(gen, not condition, end, skip_last=1) + self.values[-1].accept_jump_if(gen, condition, target) gen.use_next_block(end) else: self._accept_jump_if_any_is(gen, condition, target) diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py --- a/pypy/interpreter/pyframe.py +++ b/pypy/interpreter/pyframe.py @@ -125,6 +125,8 @@ else: return self.space.builtin + _NO_CELLS = [] + @jit.unroll_safe def initialize_frame_scopes(self, outer_func, code): # regular functions always have CO_OPTIMIZED and CO_NEWLOCALS. @@ -143,7 +145,7 @@ nfreevars = len(code.co_freevars) if not nfreevars: if not ncellvars: - self.cells = [] + self.cells = self._NO_CELLS return # no self.cells needed - fast path elif outer_func is None: space = self.space diff --git a/pypy/module/_cffi_backend/__init__.py b/pypy/module/_cffi_backend/__init__.py --- a/pypy/module/_cffi_backend/__init__.py +++ b/pypy/module/_cffi_backend/__init__.py @@ -34,6 +34,7 @@ 'newp_handle': 'handle.newp_handle', 'from_handle': 'handle.from_handle', '_get_types': 'func._get_types', + 'from_buffer': 'func.from_buffer', 'string': 'func.string', 'buffer': 'cbuffer.buffer', diff --git a/pypy/module/_cffi_backend/cdataobj.py b/pypy/module/_cffi_backend/cdataobj.py --- a/pypy/module/_cffi_backend/cdataobj.py +++ b/pypy/module/_cffi_backend/cdataobj.py @@ -440,6 +440,25 @@ return "handle to %s" % (self.space.str_w(w_repr),) +class W_CDataFromBuffer(W_CData): + _attrs_ = ['buf', 'length', 'w_keepalive'] + _immutable_fields_ = ['buf', 'length', 'w_keepalive'] + + def __init__(self, space, cdata, ctype, buf, w_object): + W_CData.__init__(self, space, cdata, ctype) + self.buf = buf + self.length = buf.getlength() + self.w_keepalive = w_object + + def get_array_length(self): + return self.length + + def _repr_extra(self): + w_repr = self.space.repr(self.w_keepalive) + return "buffer len %d from '%s' object" % ( + self.length, self.space.type(self.w_keepalive).name) + + W_CData.typedef = TypeDef( '_cffi_backend.CData', __module__ = '_cffi_backend', diff --git a/pypy/module/_cffi_backend/func.py b/pypy/module/_cffi_backend/func.py --- a/pypy/module/_cffi_backend/func.py +++ b/pypy/module/_cffi_backend/func.py @@ -76,3 +76,32 @@ def _get_types(space): return space.newtuple([space.gettypefor(cdataobj.W_CData), space.gettypefor(ctypeobj.W_CType)]) + +# ____________________________________________________________ + + at unwrap_spec(w_ctype=ctypeobj.W_CType) +def from_buffer(space, w_ctype, w_x): + from pypy.module._cffi_backend import ctypearray, ctypeprim + # + if (not isinstance(w_ctype, ctypearray.W_CTypeArray) or + not isinstance(w_ctype.ctptr.ctitem, ctypeprim.W_CTypePrimitiveChar)): + raise oefmt(space.w_TypeError, + "needs 'char[]', got '%s'", w_ctype.name) + # + # xxx do we really need to implement the same mess as in CPython 2.7 + # w.r.t. buffers and memoryviews?? + try: + buf = space.readbuf_w(w_x) + except OperationError, e: + if not e.match(space, space.w_TypeError): + raise + buf = space.buffer_w(w_x, space.BUF_SIMPLE) + try: + _cdata = buf.get_raw_address() + except ValueError: + raise oefmt(space.w_TypeError, + "from_buffer() got a '%T' object, which supports the " + "buffer interface but cannot be rendered as a plain " + "raw address on PyPy", w_x) + # + return cdataobj.W_CDataFromBuffer(space, _cdata, w_ctype, buf, w_x) diff --git a/pypy/module/_cffi_backend/newtype.py b/pypy/module/_cffi_backend/newtype.py --- a/pypy/module/_cffi_backend/newtype.py +++ b/pypy/module/_cffi_backend/newtype.py @@ -62,10 +62,54 @@ eptype("intptr_t", rffi.INTPTR_T, ctypeprim.W_CTypePrimitiveSigned) eptype("uintptr_t", rffi.UINTPTR_T, ctypeprim.W_CTypePrimitiveUnsigned) -eptype("ptrdiff_t", rffi.INTPTR_T, ctypeprim.W_CTypePrimitiveSigned) # <-xxx eptype("size_t", rffi.SIZE_T, ctypeprim.W_CTypePrimitiveUnsigned) eptype("ssize_t", rffi.SSIZE_T, ctypeprim.W_CTypePrimitiveSigned) +_WCTSigned = ctypeprim.W_CTypePrimitiveSigned +_WCTUnsign = ctypeprim.W_CTypePrimitiveUnsigned + +eptype("ptrdiff_t", getattr(rffi, 'PTRDIFF_T', rffi.INTPTR_T), _WCTSigned) +eptype("intmax_t", getattr(rffi, 'INTMAX_T', rffi.LONGLONG), _WCTSigned) +eptype("uintmax_t", getattr(rffi, 'UINTMAX_T', rffi.LONGLONG), _WCTUnsign) + +if hasattr(rffi, 'INT_LEAST8_T'): + eptype("int_least8_t", rffi.INT_LEAST8_T, _WCTSigned) + eptype("int_least16_t", rffi.INT_LEAST16_T, _WCTSigned) + eptype("int_least32_t", rffi.INT_LEAST32_T, _WCTSigned) + eptype("int_least64_t", rffi.INT_LEAST64_T, _WCTSigned) + eptype("uint_least8_t", rffi.UINT_LEAST8_T, _WCTUnsign) + eptype("uint_least16_t",rffi.UINT_LEAST16_T, _WCTUnsign) + eptype("uint_least32_t",rffi.UINT_LEAST32_T, _WCTUnsign) + eptype("uint_least64_t",rffi.UINT_LEAST64_T, _WCTUnsign) +else: + eptypesize("int_least8_t", 1, _WCTSigned) + eptypesize("uint_least8_t", 1, _WCTUnsign) + eptypesize("int_least16_t", 2, _WCTSigned) + eptypesize("uint_least16_t", 2, _WCTUnsign) + eptypesize("int_least32_t", 4, _WCTSigned) + eptypesize("uint_least32_t", 4, _WCTUnsign) + eptypesize("int_least64_t", 8, _WCTSigned) + eptypesize("uint_least64_t", 8, _WCTUnsign) + +if hasattr(rffi, 'INT_FAST8_T'): + eptype("int_fast8_t", rffi.INT_FAST8_T, _WCTSigned) + eptype("int_fast16_t", rffi.INT_FAST16_T, _WCTSigned) + eptype("int_fast32_t", rffi.INT_FAST32_T, _WCTSigned) + eptype("int_fast64_t", rffi.INT_FAST64_T, _WCTSigned) + eptype("uint_fast8_t", rffi.UINT_FAST8_T, _WCTUnsign) + eptype("uint_fast16_t",rffi.UINT_FAST16_T, _WCTUnsign) + eptype("uint_fast32_t",rffi.UINT_FAST32_T, _WCTUnsign) + eptype("uint_fast64_t",rffi.UINT_FAST64_T, _WCTUnsign) +else: + eptypesize("int_fast8_t", 1, _WCTSigned) + eptypesize("uint_fast8_t", 1, _WCTUnsign) + eptypesize("int_fast16_t", 2, _WCTSigned) + eptypesize("uint_fast16_t", 2, _WCTUnsign) + eptypesize("int_fast32_t", 4, _WCTSigned) + eptypesize("uint_fast32_t", 4, _WCTUnsign) + eptypesize("int_fast64_t", 8, _WCTSigned) + eptypesize("uint_fast64_t", 8, _WCTUnsign) + @unwrap_spec(name=str) def new_primitive_type(space, name): try: diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py --- a/pypy/module/_cffi_backend/test/_backend_test_c.py +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py @@ -397,7 +397,7 @@ def test_invalid_indexing(): p = new_primitive_type("int") x = cast(p, 42) - py.test.raises(TypeError, "p[0]") + py.test.raises(TypeError, "x[0]") def test_default_str(): BChar = new_primitive_type("char") @@ -2718,7 +2718,16 @@ def test_nonstandard_integer_types(): for typename in ['int8_t', 'uint8_t', 'int16_t', 'uint16_t', 'int32_t', 'uint32_t', 'int64_t', 'uint64_t', 'intptr_t', - 'uintptr_t', 'ptrdiff_t', 'size_t', 'ssize_t']: + 'uintptr_t', 'ptrdiff_t', 'size_t', 'ssize_t', + 'int_least8_t', 'uint_least8_t', + 'int_least16_t', 'uint_least16_t', + 'int_least32_t', 'uint_least32_t', + 'int_least64_t', 'uint_least64_t', + 'int_fast8_t', 'uint_fast8_t', + 'int_fast16_t', 'uint_fast16_t', + 'int_fast32_t', 'uint_fast32_t', + 'int_fast64_t', 'uint_fast64_t', + 'intmax_t', 'uintmax_t']: new_primitive_type(typename) # works def test_cannot_convert_unicode_to_charp(): @@ -3186,6 +3195,20 @@ ('a2', BChar, 5)], None, -1, -1, SF_PACKED) +def test_from_buffer(): + import array + a = array.array('H', [10000, 20000, 30000]) + BChar = new_primitive_type("char") + BCharP = new_pointer_type(BChar) + BCharA = new_array_type(BCharP, None) + c = from_buffer(BCharA, a) + assert typeof(c) is BCharA + assert len(c) == 6 + assert repr(c) == "" + p = new_pointer_type(new_primitive_type("unsigned short")) + cast(p, c)[1] += 500 + assert list(a) == [10000, 20500, 30000] + def test_version(): # this test is here mostly for PyPy assert __version__ == "0.8.6" diff --git a/pypy/module/_cffi_backend/test/test_c.py b/pypy/module/_cffi_backend/test/test_c.py --- a/pypy/module/_cffi_backend/test/test_c.py +++ b/pypy/module/_cffi_backend/test/test_c.py @@ -31,7 +31,7 @@ class AppTestC(object): """Populated below, hack hack hack.""" - spaceconfig = dict(usemodules=('_cffi_backend', '_io')) + spaceconfig = dict(usemodules=('_cffi_backend', '_io', 'array')) def setup_class(cls): testfuncs_w = [] diff --git a/pypy/module/_io/interp_bufferedio.py b/pypy/module/_io/interp_bufferedio.py --- a/pypy/module/_io/interp_bufferedio.py +++ b/pypy/module/_io/interp_bufferedio.py @@ -564,7 +564,7 @@ # Flush the write buffer if necessary if self.writable: - self._writer_flush_unlocked(space) + self._flush_and_rewind_unlocked(space) self._reader_reset_buf() # Read whole blocks, and don't buffer them diff --git a/pypy/module/_io/test/test_io.py b/pypy/module/_io/test/test_io.py --- a/pypy/module/_io/test/test_io.py +++ b/pypy/module/_io/test/test_io.py @@ -400,3 +400,32 @@ f.read(1) f.seek(-1, 1) f.write(b'') + + def test_issue1902_2(self): + import _io + with _io.open(self.tmpfile, 'w+b', 4096) as f: + f.write(b'\xff' * 13569) + f.flush() + f.seek(0, 0) + + f.read(1) + f.seek(-1, 1) + f.write(b'\xff') + f.seek(1, 0) + f.read(4123) + f.seek(-4123, 1) + + def test_issue1902_3(self): + import _io + buffer_size = 4096 + with _io.open(self.tmpfile, 'w+b', buffer_size) as f: + f.write(b'\xff' * buffer_size * 3) + f.flush() + f.seek(0, 0) + + f.read(1) + f.seek(-1, 1) + f.write(b'\xff') + f.seek(1, 0) + f.read(buffer_size * 2) + assert f.tell() == 1 + buffer_size * 2 diff --git a/pypy/module/_rawffi/alt/interp_funcptr.py b/pypy/module/_rawffi/alt/interp_funcptr.py --- a/pypy/module/_rawffi/alt/interp_funcptr.py +++ b/pypy/module/_rawffi/alt/interp_funcptr.py @@ -14,6 +14,7 @@ from rpython.rlib.objectmodel import we_are_translated from pypy.module._rawffi.alt.type_converter import FromAppLevelConverter, ToAppLevelConverter from pypy.module._rawffi.interp_rawffi import got_libffi_error, wrap_dlopenerror +from pypy.module._rawffi import lasterror import os if os.name == 'nt': @@ -202,11 +203,23 @@ self.func = func self.argchain = argchain + def before(self): + lasterror.restore_last_error(self.space) + + def after(self): + lasterror.save_last_error(self.space) + def get_longlong(self, w_ffitype): - return self.func.call(self.argchain, rffi.LONGLONG) + self.before() + x = self.func.call(self.argchain, rffi.LONGLONG) + self.after() + return x def get_ulonglong(self, w_ffitype): - return self.func.call(self.argchain, rffi.ULONGLONG) + self.before() + x = self.func.call(self.argchain, rffi.ULONGLONG) + self.after() + return x def get_signed(self, w_ffitype): # if the declared return type of the function is smaller than LONG, @@ -217,64 +230,94 @@ # to space.wrap in order to get a nice applevel . # restype = w_ffitype.get_ffitype() + self.before() call = self.func.call if restype is libffi.types.slong: - return call(self.argchain, rffi.LONG) + x = call(self.argchain, rffi.LONG) elif restype is libffi.types.sint: - return rffi.cast(rffi.LONG, call(self.argchain, rffi.INT)) + x = rffi.cast(rffi.LONG, call(self.argchain, rffi.INT)) elif restype is libffi.types.sshort: - return rffi.cast(rffi.LONG, call(self.argchain, rffi.SHORT)) + x = rffi.cast(rffi.LONG, call(self.argchain, rffi.SHORT)) elif restype is libffi.types.schar: - return rffi.cast(rffi.LONG, call(self.argchain, rffi.SIGNEDCHAR)) + x = rffi.cast(rffi.LONG, call(self.argchain, rffi.SIGNEDCHAR)) else: - self.error(w_ffitype) + raise self.error(w_ffitype) + self.after() + return x def get_unsigned(self, w_ffitype): - return self.func.call(self.argchain, rffi.ULONG) + self.before() + x = self.func.call(self.argchain, rffi.ULONG) + self.after() + return x def get_unsigned_which_fits_into_a_signed(self, w_ffitype): # the same comment as get_signed apply restype = w_ffitype.get_ffitype() + self.before() call = self.func.call if restype is libffi.types.uint: assert not libffi.IS_32_BIT # on 32bit machines, we should never get here, because it's a case # which has already been handled by get_unsigned above. - return rffi.cast(rffi.LONG, call(self.argchain, rffi.UINT)) + x = rffi.cast(rffi.LONG, call(self.argchain, rffi.UINT)) elif restype is libffi.types.ushort: - return rffi.cast(rffi.LONG, call(self.argchain, rffi.USHORT)) + x = rffi.cast(rffi.LONG, call(self.argchain, rffi.USHORT)) elif restype is libffi.types.uchar: - return rffi.cast(rffi.LONG, call(self.argchain, rffi.UCHAR)) + x = rffi.cast(rffi.LONG, call(self.argchain, rffi.UCHAR)) else: - self.error(w_ffitype) + raise self.error(w_ffitype) + self.after() + return x def get_pointer(self, w_ffitype): + self.before() ptrres = self.func.call(self.argchain, rffi.VOIDP) + self.after() return rffi.cast(rffi.ULONG, ptrres) def get_char(self, w_ffitype): - return self.func.call(self.argchain, rffi.UCHAR) + self.before() + x = self.func.call(self.argchain, rffi.UCHAR) + self.after() + return x def get_unichar(self, w_ffitype): - return self.func.call(self.argchain, rffi.WCHAR_T) + self.before() + x = self.func.call(self.argchain, rffi.WCHAR_T) + self.after() + return x def get_float(self, w_ffitype): - return self.func.call(self.argchain, rffi.DOUBLE) + self.before() + x = self.func.call(self.argchain, rffi.DOUBLE) + self.after() + return x def get_singlefloat(self, w_ffitype): - return self.func.call(self.argchain, rffi.FLOAT) + self.before() + x = self.func.call(self.argchain, rffi.FLOAT) + self.after() + return x def get_struct(self, w_ffitype, w_structdescr): + self.before() addr = self.func.call(self.argchain, rffi.LONG, is_struct=True) + self.after() return w_structdescr.fromaddress(self.space, addr) def get_struct_rawffi(self, w_ffitype, w_structdescr): + self.before() uintval = self.func.call(self.argchain, rffi.ULONG, is_struct=True) + self.after() return w_structdescr.fromaddress(self.space, uintval) def get_void(self, w_ffitype): - return self.func.call(self.argchain, lltype.Void) + self.before() + x = self.func.call(self.argchain, lltype.Void) + self.after() + return x def unpack_argtypes(space, w_argtypes, w_restype): diff --git a/pypy/module/_rawffi/buffer.py b/pypy/module/_rawffi/buffer.py --- a/pypy/module/_rawffi/buffer.py +++ b/pypy/module/_rawffi/buffer.py @@ -1,4 +1,5 @@ from rpython.rlib.buffer import Buffer +from rpython.rtyper.lltypesystem import rffi # XXX not the most efficient implementation @@ -20,3 +21,7 @@ def setitem(self, index, char): ll_buffer = self.datainstance.ll_buffer ll_buffer[index] = char + + def get_raw_address(self): + ll_buffer = self.datainstance.ll_buffer + return rffi.cast(rffi.CCHARP, ll_buffer) diff --git a/pypy/module/_rawffi/interp_rawffi.py b/pypy/module/_rawffi/interp_rawffi.py --- a/pypy/module/_rawffi/interp_rawffi.py +++ b/pypy/module/_rawffi/interp_rawffi.py @@ -20,6 +20,7 @@ from rpython.rlib.rarithmetic import intmask, r_uint from pypy.module._rawffi.buffer import RawFFIBuffer from pypy.module._rawffi.tracker import tracker +from pypy.module._rawffi import lasterror TYPEMAP = { # XXX A mess with unsigned/signed/normal chars :-/ @@ -498,10 +499,14 @@ try: if self.resshape is not None: result = self.resshape.allocate(space, 1, autofree=True) + lasterror.restore_last_error(space) self.ptr.call(args_ll, result.ll_buffer) + lasterror.save_last_error(space) return space.wrap(result) else: + lasterror.restore_last_error(space) self.ptr.call(args_ll, lltype.nullptr(rffi.VOIDP.TO)) + lasterror.save_last_error(space) return space.w_None except StackCheckError, e: raise OperationError(space.w_ValueError, space.wrap(e.message)) @@ -618,12 +623,10 @@ if sys.platform == 'win32': def get_last_error(space): - from rpython.rlib.rwin32 import GetLastError - return space.wrap(GetLastError()) + return space.wrap(lasterror.fetch_last_error(space)) @unwrap_spec(error=int) def set_last_error(space, error): - from rpython.rlib.rwin32 import SetLastError - SetLastError(error) + lasterror.store_last_error(space, error) else: # always have at least a dummy version of these functions # (https://bugs.pypy.org/issue1242) diff --git a/pypy/module/_rawffi/lasterror.py b/pypy/module/_rawffi/lasterror.py new file mode 100644 --- /dev/null +++ b/pypy/module/_rawffi/lasterror.py @@ -0,0 +1,40 @@ +# For Windows only. +# https://bitbucket.org/pypy/pypy/issue/1944/ctypes-on-windows-getlasterror + +import os + +_MS_WINDOWS = os.name == "nt" + + +if _MS_WINDOWS: + from rpython.rlib import rwin32 + from pypy.interpreter.executioncontext import ExecutionContext + + + ExecutionContext._rawffi_last_error = 0 + + def fetch_last_error(space): + ec = space.getexecutioncontext() + return ec._rawffi_last_error + + def store_last_error(space, last_error): + ec = space.getexecutioncontext() + ec._rawffi_last_error = last_error + + def restore_last_error(space): + ec = space.getexecutioncontext() + lasterror = ec._rawffi_last_error + rwin32.SetLastError(lasterror) + + def save_last_error(space): + lasterror = rwin32.GetLastError() + ec = space.getexecutioncontext() + ec._rawffi_last_error = lasterror + +else: + + def restore_last_error(space): + pass + + def save_last_error(space): + pass diff --git a/pypy/module/_rawffi/test/test__rawffi.py b/pypy/module/_rawffi/test/test__rawffi.py --- a/pypy/module/_rawffi/test/test__rawffi.py +++ b/pypy/module/_rawffi/test/test__rawffi.py @@ -16,6 +16,7 @@ #include "src/precommondefs.h" #include #include + #include struct x { @@ -204,6 +205,24 @@ return inp; } + RPY_EXPORTED + int check_errno(int incoming) + { + int old_errno = errno; + errno = incoming; + return old_errno; + } + + #ifdef _WIN32 + #include + RPY_EXPORTED + int check_last_error(int incoming) + { + int old_errno = GetLastError(); + SetLastError(incoming); + return old_errno; + } + #endif ''')) eci = ExternalCompilationInfo(include_dirs=[cdir]) return str(platform.compile([c_file], eci, 'x', standalone=False)) @@ -1118,6 +1137,15 @@ b[3] = b'x' assert b[3] == b'x' + def test_pypy_raw_address(self): + import _rawffi + S = _rawffi.Structure((40, 1)) + s = S(autofree=True) + addr = buffer(s)._pypy_raw_address() + assert type(addr) is int + assert buffer(s)._pypy_raw_address() == addr + assert buffer(s, 10)._pypy_raw_address() == addr + 10 + def test_union(self): import _rawffi longsize = _rawffi.sizeof('l') @@ -1143,6 +1171,37 @@ raises(OverflowError, "arg1[0] = 10**900") arg1.free() + def test_errno(self): + import _rawffi + lib = _rawffi.CDLL(self.lib_name) + A = _rawffi.Array('i') + f = lib.ptr('check_errno', ['i'], 'i') + _rawffi.set_errno(42) + arg = A(1) + arg[0] = 43 + res = f(arg) + assert res[0] == 42 + z = _rawffi.get_errno() + assert z == 43 + arg.free() + + def test_last_error(self): + import sys + if sys.platform != 'win32': + skip("Windows test") + import _rawffi + lib = _rawffi.CDLL(self.lib_name) + A = _rawffi.Array('i') + f = lib.ptr('check_last_error', ['i'], 'i') + _rawffi.set_last_error(42) + arg = A(1) + arg[0] = 43 + res = f(arg) + assert res[0] == 42 + z = _rawffi.get_last_error() + assert z == 43 + arg.free() + def test_char_array_int(self): import _rawffi A = _rawffi.Array('c') diff --git a/pypy/module/_ssl/interp_ssl.py b/pypy/module/_ssl/interp_ssl.py --- a/pypy/module/_ssl/interp_ssl.py +++ b/pypy/module/_ssl/interp_ssl.py @@ -55,7 +55,8 @@ if not OPENSSL_NO_SSL2: constants["PROTOCOL_SSLv2"] = PY_SSL_VERSION_SSL2 -constants["PROTOCOL_SSLv3"] = PY_SSL_VERSION_SSL3 +if not OPENSSL_NO_SSL3: + constants["PROTOCOL_SSLv3"] = PY_SSL_VERSION_SSL3 constants["PROTOCOL_SSLv23"] = PY_SSL_VERSION_SSL23 constants["PROTOCOL_TLSv1"] = PY_SSL_VERSION_TLS1 @@ -95,7 +96,7 @@ def __init__(self, space, protocol): if protocol == PY_SSL_VERSION_TLS1: method = libssl_TLSv1_method() - elif protocol == PY_SSL_VERSION_SSL3: + elif protocol == PY_SSL_VERSION_SSL3 and not OPENSSL_NO_SSL3: method = libssl_SSLv3_method() elif protocol == PY_SSL_VERSION_SSL2 and not OPENSSL_NO_SSL2: method = libssl_SSLv2_method() diff --git a/pypy/module/cpyext/frameobject.py b/pypy/module/cpyext/frameobject.py --- a/pypy/module/cpyext/frameobject.py +++ b/pypy/module/cpyext/frameobject.py @@ -58,7 +58,7 @@ w_globals = from_ref(space, py_frame.c_f_globals) frame = space.FrameClass(space, code, w_globals, outer_func=None) - frame.f_lineno = py_frame.c_f_lineno + frame.f_lineno = rffi.getintfield(py_frame, 'c_f_lineno') w_obj = space.wrap(frame) track_reference(space, py_obj, w_obj) return w_obj diff --git a/pypy/module/cpyext/slotdefs.py b/pypy/module/cpyext/slotdefs.py --- a/pypy/module/cpyext/slotdefs.py +++ b/pypy/module/cpyext/slotdefs.py @@ -244,6 +244,9 @@ def getitem(self, index): return self.ptr[index] + def get_raw_address(self): + return rffi.cast(rffi.CCHARP, self.ptr) + def wrap_getbuffer(space, w_self, w_args, func): func_target = rffi.cast(getbufferproc, func) with lltype.scoped_alloc(Py_buffer) as view: diff --git a/pypy/module/gc/__init__.py b/pypy/module/gc/__init__.py --- a/pypy/module/gc/__init__.py +++ b/pypy/module/gc/__init__.py @@ -30,6 +30,7 @@ 'get_referrers': 'referents.get_referrers', '_dump_rpy_heap': 'referents._dump_rpy_heap', 'get_typeids_z': 'referents.get_typeids_z', + 'get_typeids_list': 'referents.get_typeids_list', 'GcRef': 'referents.W_GcRef', }) MixedModule.__init__(self, space, w_name) diff --git a/pypy/module/gc/app_referents.py b/pypy/module/gc/app_referents.py --- a/pypy/module/gc/app_referents.py +++ b/pypy/module/gc/app_referents.py @@ -16,7 +16,8 @@ [0][0][0][-1] inserted after all GC roots, before all non-roots. If the argument is a filename and the 'zlib' module is available, - we also write a 'typeids.txt' in the same directory, if none exists. + we also write 'typeids.txt' and 'typeids.lst' in the same directory, + if they don't already exist. """ if isinstance(file, str): f = open(file, 'wb') @@ -30,7 +31,13 @@ filename2 = os.path.join(os.path.dirname(file), 'typeids.txt') if not os.path.exists(filename2): data = zlib.decompress(gc.get_typeids_z()) - f = open(filename2, 'wb') + f = open(filename2, 'w') + f.write(data) + f.close() + filename2 = os.path.join(os.path.dirname(file), 'typeids.lst') + if not os.path.exists(filename2): + data = ''.join(['%d\n' % n for n in gc.get_typeids_list()]) + f = open(filename2, 'w') f.write(data) f.close() else: diff --git a/pypy/module/gc/referents.py b/pypy/module/gc/referents.py --- a/pypy/module/gc/referents.py +++ b/pypy/module/gc/referents.py @@ -228,3 +228,8 @@ a = rgc.get_typeids_z() s = ''.join([a[i] for i in range(len(a))]) return space.wrap(s) + +def get_typeids_list(space): + l = rgc.get_typeids_list() + list_w = [space.wrap(l[i]) for i in range(len(l))] + return space.newlist(list_w) diff --git a/pypy/module/micronumpy/base.py b/pypy/module/micronumpy/base.py --- a/pypy/module/micronumpy/base.py +++ b/pypy/module/micronumpy/base.py @@ -1,7 +1,7 @@ from pypy.interpreter.baseobjspace import W_Root -from pypy.interpreter.error import OperationError +from pypy.interpreter.error import OperationError, oefmt from rpython.tool.pairtype import extendabletype - +from pypy.module.micronumpy import support def wrap_impl(space, w_cls, w_instance, impl): if w_cls is None or space.is_w(w_cls, space.gettypefor(W_NDimArray)): @@ -44,11 +44,32 @@ return W_NDimArray(impl) @staticmethod - def from_shape_and_storage(space, shape, storage, dtype, order='C', owning=False, - w_subtype=None, w_base=None, writable=True): + def from_shape_and_storage(space, shape, storage, dtype, storage_bytes=-1, + order='C', owning=False, w_subtype=None, + w_base=None, writable=True, strides=None): from pypy.module.micronumpy import concrete - from pypy.module.micronumpy.strides import calc_strides - strides, backstrides = calc_strides(shape, dtype, order) + from pypy.module.micronumpy.strides import (calc_strides, + calc_backstrides) + isize = dtype.elsize + if storage_bytes > 0 : + totalsize = support.product(shape) * isize + if totalsize > storage_bytes: + raise OperationError(space.w_TypeError, space.wrap( + "buffer is too small for requested array")) + else: + storage_bytes = support.product(shape) * isize + if strides is None: + strides, backstrides = calc_strides(shape, dtype, order) + else: + if len(strides) != len(shape): + raise oefmt(space.w_ValueError, + 'strides, if given, must be the same length as shape') + for i in range(len(strides)): + if strides[i] < 0 or strides[i]*shape[i] > storage_bytes: + raise oefmt(space.w_ValueError, + 'strides is incompatible with shape of requested ' + 'array and size of buffer') + backstrides = calc_backstrides(strides, shape) if w_base is not None: if owning: raise OperationError(space.w_ValueError, diff --git a/pypy/module/micronumpy/boxes.py b/pypy/module/micronumpy/boxes.py --- a/pypy/module/micronumpy/boxes.py +++ b/pypy/module/micronumpy/boxes.py @@ -59,9 +59,9 @@ _mixin_ = True def reduce(self, space): - numpypy = space.getbuiltinmodule("_numpypy") - assert isinstance(numpypy, MixedModule) - multiarray = numpypy.get("multiarray") + _numpypy = space.getbuiltinmodule("_numpypy") + assert isinstance(_numpypy, MixedModule) + multiarray = _numpypy.get("multiarray") assert isinstance(multiarray, MixedModule) scalar = multiarray.get("scalar") @@ -167,7 +167,7 @@ if len(args_w) >= 1: for w_arg in args_w: try: - idx = support.index_w(space, w_arg) + support.index_w(space, w_arg) except OperationError: raise oefmt(space.w_TypeError, "an integer is required") raise oefmt(space.w_ValueError, "axes don't match array") diff --git a/pypy/module/micronumpy/compile.py b/pypy/module/micronumpy/compile.py --- a/pypy/module/micronumpy/compile.py +++ b/pypy/module/micronumpy/compile.py @@ -34,8 +34,8 @@ SINGLE_ARG_FUNCTIONS = ["sum", "prod", "max", "min", "all", "any", - "unegative", "flat", "tostring","count_nonzero", - "argsort"] + "unegative", "flat", "tostring", "count_nonzero", + "argsort", "cumsum", "logical_xor_reduce"] TWO_ARG_FUNCTIONS = ["dot", 'take', 'searchsorted'] TWO_ARG_FUNCTIONS_OR_NONE = ['view', 'astype'] THREE_ARG_FUNCTIONS = ['where'] @@ -559,6 +559,11 @@ w_res = arr.descr_any(interp.space) elif self.name == "all": w_res = arr.descr_all(interp.space) + elif self.name == "cumsum": + w_res = arr.descr_cumsum(interp.space) + elif self.name == "logical_xor_reduce": + logical_xor = ufuncs.get(interp.space).logical_xor + w_res = logical_xor.reduce(interp.space, arr, None) elif self.name == "unegative": neg = ufuncs.get(interp.space).negative w_res = neg.call(interp.space, [arr]) diff --git a/pypy/module/micronumpy/concrete.py b/pypy/module/micronumpy/concrete.py --- a/pypy/module/micronumpy/concrete.py +++ b/pypy/module/micronumpy/concrete.py @@ -11,7 +11,7 @@ from pypy.module.micronumpy.iterators import ArrayIter from pypy.module.micronumpy.strides import (Chunk, Chunks, NewAxisChunk, RecordChunk, calc_strides, calc_new_strides, shape_agreement, - calculate_broadcast_strides) + calculate_broadcast_strides, calc_backstrides) class BaseConcreteArray(object): @@ -79,10 +79,7 @@ self.get_strides(), self.order) if new_strides is not None: # We can create a view, strides somehow match up. - ndims = len(new_shape) - new_backstrides = [0] * ndims - for nd in range(ndims): - new_backstrides[nd] = (new_shape[nd] - 1) * new_strides[nd] + new_backstrides = calc_backstrides(new_strides, new_shape) assert isinstance(orig_array, W_NDimArray) or orig_array is None return SliceArray(self.start, new_strides, new_backstrides, new_shape, self, orig_array) diff --git a/pypy/module/micronumpy/ctors.py b/pypy/module/micronumpy/ctors.py --- a/pypy/module/micronumpy/ctors.py +++ b/pypy/module/micronumpy/ctors.py @@ -302,5 +302,5 @@ return a else: writable = not buf.readonly - return W_NDimArray.from_shape_and_storage(space, [n], storage, dtype=dtype, - w_base=w_buffer, writable=writable) + return W_NDimArray.from_shape_and_storage(space, [n], storage, storage_bytes=s, + dtype=dtype, w_base=w_buffer, writable=writable) diff --git a/pypy/module/micronumpy/descriptor.py b/pypy/module/micronumpy/descriptor.py --- a/pypy/module/micronumpy/descriptor.py +++ b/pypy/module/micronumpy/descriptor.py @@ -286,7 +286,6 @@ def descr_hash(self, space): return space.wrap(self._compute_hash(space, 0x345678)) - def descr_str(self, space): if self.fields: return space.str(self.descr_get_descr(space)) @@ -394,7 +393,7 @@ alignment = space.int_w(space.getitem(w_data, space.wrap(6))) if (w_names == space.w_None) != (w_fields == space.w_None): - raise oefmt(space.w_ValueError, "inconsistent fields and names") + raise oefmt(space.w_ValueError, "inconsistent fields and names in Numpy dtype unpickling") self.byteorder = endian self.shape = [] diff --git a/pypy/module/micronumpy/flagsobj.py b/pypy/module/micronumpy/flagsobj.py --- a/pypy/module/micronumpy/flagsobj.py +++ b/pypy/module/micronumpy/flagsobj.py @@ -1,47 +1,30 @@ +from rpython.rlib import jit + from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.error import OperationError from pypy.interpreter.gateway import interp2app from pypy.interpreter.typedef import TypeDef, GetSetProperty from pypy.module.micronumpy import constants as NPY - +from pypy.module.micronumpy.strides import is_c_contiguous, is_f_contiguous def enable_flags(arr, flags): arr.flags |= flags - def clear_flags(arr, flags): arr.flags &= ~flags - def _update_contiguous_flags(arr): - shape = arr.shape - strides = arr.strides - - is_c_contig = True - sd = arr.dtype.elsize - for i in range(len(shape) - 1, -1, -1): - dim = shape[i] - if strides[i] != sd: - is_c_contig = False - break - if dim == 0: - break - sd *= dim + is_c_contig = is_c_contiguous(arr) if is_c_contig: enable_flags(arr, NPY.ARRAY_C_CONTIGUOUS) else: clear_flags(arr, NPY.ARRAY_C_CONTIGUOUS) - sd = arr.dtype.elsize - for i in range(len(shape)): - dim = shape[i] - if strides[i] != sd: - clear_flags(arr, NPY.ARRAY_F_CONTIGUOUS) - return - if dim == 0: - break - sd *= dim - enable_flags(arr, NPY.ARRAY_F_CONTIGUOUS) + is_f_contig = is_f_contiguous(arr) + if is_f_contig: + enable_flags(arr, NPY.ARRAY_F_CONTIGUOUS) + else: + clear_flags(arr, NPY.ARRAY_F_CONTIGUOUS) class W_FlagsObject(W_Root): diff --git a/pypy/module/micronumpy/flatiter.py b/pypy/module/micronumpy/flatiter.py --- a/pypy/module/micronumpy/flatiter.py +++ b/pypy/module/micronumpy/flatiter.py @@ -22,6 +22,9 @@ def get_shape(self): return self.shape + def get_size(self): + return self.base().get_size() + def create_iter(self, shape=None, backward_broadcast=False): assert isinstance(self.base(), W_NDimArray) return self.base().create_iter() @@ -41,8 +44,8 @@ return space.wrap(self.state.index) def descr_coords(self, space): - self.state = self.iter.update(self.state) - return space.newtuple([space.wrap(c) for c in self.state.indices]) + coords = self.iter.indices(self.state) + return space.newtuple([space.wrap(c) for c in coords]) def descr_iter(self): return self @@ -54,7 +57,7 @@ if self.iter.done(self.state): raise OperationError(space.w_StopIteration, space.w_None) w_res = self.iter.getitem(self.state) - self.state = self.iter.next(self.state) + self.iter.next(self.state, mutate=True) return w_res def descr_getitem(self, space, w_idx): @@ -71,7 +74,7 @@ base.get_order(), w_instance=base) return loop.flatiter_getitem(res, self.iter, state, step) finally: - self.state = self.iter.reset(self.state) + self.iter.reset(self.state, mutate=True) def descr_setitem(self, space, w_idx, w_value): if not (space.isinstance_w(w_idx, space.w_int) or @@ -91,7 +94,7 @@ arr = convert_to_array(space, w_value) loop.flatiter_setitem(space, dtype, arr, self.iter, state, step, length) finally: - self.state = self.iter.reset(self.state) + self.iter.reset(self.state, mutate=True) W_FlatIterator.typedef = TypeDef("numpy.flatiter", diff --git a/pypy/module/micronumpy/iterators.py b/pypy/module/micronumpy/iterators.py --- a/pypy/module/micronumpy/iterators.py +++ b/pypy/module/micronumpy/iterators.py @@ -41,16 +41,6 @@ from pypy.module.micronumpy.base import W_NDimArray from pypy.module.micronumpy.flagsobj import _update_contiguous_flags -class OpFlag(object): - def __init__(self): - self.rw = '' - self.broadcast = True - self.force_contig = False - self.force_align = False - self.native_byte_order = False - self.tmp_copy = '' - self.allocate = False - class PureShapeIter(object): def __init__(self, shape, idx_w): @@ -87,25 +77,24 @@ class IterState(object): - _immutable_fields_ = ['iterator', 'index', 'indices', 'offset'] + _immutable_fields_ = ['iterator', '_indices'] def __init__(self, iterator, index, indices, offset): self.iterator = iterator self.index = index - self.indices = indices + self._indices = indices self.offset = offset class ArrayIter(object): _immutable_fields_ = ['contiguous', 'array', 'size', 'ndim_m1', 'shape_m1[*]', 'strides[*]', 'backstrides[*]', 'factors[*]', - 'slice_shape', 'slice_stride', 'slice_backstride', - 'track_index', 'operand_type', 'slice_operand_type'] + 'track_index'] track_index = True - def __init__(self, array, size, shape, strides, backstrides, op_flags=OpFlag()): - from pypy.module.micronumpy import concrete + @jit.unroll_safe + def __init__(self, array, size, shape, strides, backstrides): assert len(shape) == len(strides) == len(backstrides) _update_contiguous_flags(array) self.contiguous = (array.flags & NPY.ARRAY_C_CONTIGUOUS and @@ -117,12 +106,6 @@ self.shape_m1 = [s - 1 for s in shape] self.strides = strides self.backstrides = backstrides - self.slice_shape = 1 - self.slice_stride = -1 - if strides: - self.slice_stride = strides[-1] - self.slice_backstride = 1 - self.slice_operand_type = concrete.SliceArray ndim = len(shape) factors = [0] * ndim @@ -132,32 +115,35 @@ else: factors[ndim-i-1] = factors[ndim-i] * shape[ndim-i] self.factors = factors - if op_flags.rw == 'r': - self.operand_type = concrete.ConcreteNonWritableArrayWithBase - else: - self.operand_type = concrete.ConcreteArrayWithBase @jit.unroll_safe - def reset(self, state=None): + def reset(self, state=None, mutate=False): + index = 0 if state is None: indices = [0] * len(self.shape_m1) else: assert state.iterator is self - indices = state.indices + indices = state._indices for i in xrange(self.ndim_m1, -1, -1): indices[i] = 0 - return IterState(self, 0, indices, self.array.start) + offset = self.array.start + if not mutate: + return IterState(self, index, indices, offset) + state.index = index + state.offset = offset @jit.unroll_safe - def next(self, state): + def next(self, state, mutate=False): assert state.iterator is self index = state.index if self.track_index: index += 1 - indices = state.indices[:] + indices = state._indices offset = state.offset if self.contiguous: offset += self.array.dtype.elsize + elif self.ndim_m1 == 0: + offset += self.strides[0] else: for i in xrange(self.ndim_m1, -1, -1): idx = indices[i] @@ -168,13 +154,18 @@ else: indices[i] = 0 offset -= self.backstrides[i] - return IterState(self, index, indices, offset) + if not mutate: + return IterState(self, index, indices, offset) + state.index = index + state.offset = offset @jit.unroll_safe def goto(self, index): offset = self.array.start if self.contiguous: offset += index * self.array.dtype.elsize + elif self.ndim_m1 == 0: + offset += index * self.strides[0] else: current = index for i in xrange(len(self.shape_m1)): @@ -183,20 +174,20 @@ return IterState(self, index, None, offset) @jit.unroll_safe - def update(self, state): + def indices(self, state): assert state.iterator is self assert self.track_index - if not self.contiguous: - return state + indices = state._indices + if not (self.contiguous or self.ndim_m1 == 0): + return indices current = state.index - indices = state.indices for i in xrange(len(self.shape_m1)): if self.factors[i] != 0: indices[i] = current / self.factors[i] current %= self.factors[i] else: indices[i] = 0 - return IterState(self, state.index, indices, state.offset) + return indices def done(self, state): assert state.iterator is self @@ -215,12 +206,6 @@ assert state.iterator is self self.array.setitem(state.offset, elem) - def getoperand(self, st, base): - impl = self.operand_type - res = impl([], self.array.dtype, self.array.order, [], [], - self.array.storage, base) - res.start = st.offset - return res def AxisIter(array, shape, axis, cumulative): strides = array.get_strides() @@ -244,42 +229,3 @@ size /= shape[axis] shape[axis] = backstrides[axis] = 0 return ArrayIter(array, size, shape, array.strides, backstrides) - -class SliceIter(ArrayIter): - ''' - used with external loops, getitem and setitem return a SliceArray - view into the original array - ''' - _immutable_fields_ = ['base', 'slice_shape[*]', 'slice_stride[*]', 'slice_backstride[*]'] - - def __init__(self, array, size, shape, strides, backstrides, slice_shape, - slice_stride, slice_backstride, op_flags, base): - from pypy.module.micronumpy import concrete - ArrayIter.__init__(self, array, size, shape, strides, backstrides, op_flags) - self.slice_shape = slice_shape - self.slice_stride = slice_stride - self.slice_backstride = slice_backstride - self.base = base - if op_flags.rw == 'r': - self.slice_operand_type = concrete.NonWritableSliceArray - else: - self.slice_operand_type = concrete.SliceArray - - def getitem(self, state): - # XXX cannot be called - must return a boxed value - assert False - - def getitem_bool(self, state): - # XXX cannot be called - must return a boxed value - assert False - - def setitem(self, state, elem): - # XXX cannot be called - must return a boxed value - assert False - - def getoperand(self, state, base): - assert state.iterator is self - impl = self.slice_operand_type - arr = impl(state.offset, [self.slice_stride], [self.slice_backstride], - [self.slice_shape], self.array, self.base) - return arr diff --git a/pypy/module/micronumpy/loop.py b/pypy/module/micronumpy/loop.py --- a/pypy/module/micronumpy/loop.py +++ b/pypy/module/micronumpy/loop.py @@ -42,23 +42,38 @@ # TODO handle __array_priorities__ and maybe flip the order + if w_lhs.get_size() == 1: + w_left = w_lhs.get_scalar_value().convert_to(space, calc_dtype) + left_iter = left_state = None + else: + w_left = None + left_iter, left_state = w_lhs.create_iter(shape) + left_iter.track_index = False + + if w_rhs.get_size() == 1: + w_right = w_rhs.get_scalar_value().convert_to(space, calc_dtype) + right_iter = right_state = None + else: + w_right = None + right_iter, right_state = w_rhs.create_iter(shape) + right_iter.track_index = False + if out is None: out = W_NDimArray.from_shape(space, shape, res_dtype, w_instance=lhs_for_subtype) - left_iter, left_state = w_lhs.create_iter(shape) - right_iter, right_state = w_rhs.create_iter(shape) out_iter, out_state = out.create_iter(shape) - left_iter.track_index = right_iter.track_index = False shapelen = len(shape) while not out_iter.done(out_state): call2_driver.jit_merge_point(shapelen=shapelen, func=func, calc_dtype=calc_dtype, res_dtype=res_dtype) - w_left = left_iter.getitem(left_state).convert_to(space, calc_dtype) - w_right = right_iter.getitem(right_state).convert_to(space, calc_dtype) + if left_iter: + w_left = left_iter.getitem(left_state).convert_to(space, calc_dtype) + left_state = left_iter.next(left_state) + if right_iter: + w_right = right_iter.getitem(right_state).convert_to(space, calc_dtype) + right_state = right_iter.next(right_state) out_iter.setitem(out_state, func(calc_dtype, w_left, w_right).convert_to( space, res_dtype)) - left_state = left_iter.next(left_state) - right_state = right_iter.next(right_state) out_state = out_iter.next(out_state) return out @@ -68,11 +83,12 @@ reds='auto') def call1(space, shape, func, calc_dtype, res_dtype, w_obj, out): + obj_iter, obj_state = w_obj.create_iter(shape) + obj_iter.track_index = False + if out is None: out = W_NDimArray.from_shape(space, shape, res_dtype, w_instance=w_obj) - obj_iter, obj_state = w_obj.create_iter(shape) out_iter, out_state = out.create_iter(shape) - obj_iter.track_index = False shapelen = len(shape) while not out_iter.done(out_state): call1_driver.jit_merge_point(shapelen=shapelen, func=func, @@ -89,17 +105,14 @@ def setslice(space, shape, target, source): if not shape: - # XXX - simplify - target_iter, target_state = target.create_iter(shape) - source_iter, source_state = source.create_iter(shape) dtype = target.dtype - val = source_iter.getitem(source_state) + val = source.getitem(source.start) if dtype.is_str_or_unicode(): val = dtype.coerce(space, val) else: val = val.convert_to(space, dtype) - target_iter.setitem(target_state, val) - return target + target.setitem(target.start, val) + return target return _setslice(space, shape, target, source) def _setslice(space, shape, target, source): @@ -107,6 +120,7 @@ # array implementations, not arrays target_iter, target_state = target.create_iter(shape) source_iter, source_state = source.create_iter(shape) + source_iter.track_index = False dtype = target.dtype shapelen = len(shape) while not target_iter.done(target_state): @@ -152,6 +166,7 @@ def compute_reduce_cumulative(space, obj, out, calc_dtype, func, identity): obj_iter, obj_state = obj.create_iter() out_iter, out_state = out.create_iter() + out_iter.track_index = False if identity is None: cur_value = obj_iter.getitem(obj_state).convert_to(space, calc_dtype) out_iter.setitem(out_state, cur_value) @@ -225,10 +240,9 @@ state = x_state return out -axis_reduce__driver = jit.JitDriver(name='numpy_axis_reduce', - greens=['shapelen', - 'func', 'dtype'], - reds='auto') +axis_reduce_driver = jit.JitDriver(name='numpy_axis_reduce', + greens=['shapelen', 'func', 'dtype'], + reds='auto') def do_axis_reduce(space, shape, func, arr, dtype, axis, out, identity, cumulative, temp): @@ -241,21 +255,24 @@ temp_iter = out_iter # hack temp_state = out_state arr_iter, arr_state = arr.create_iter() + arr_iter.track_index = False if identity is not None: identity = identity.convert_to(space, dtype) shapelen = len(shape) while not out_iter.done(out_state): - axis_reduce__driver.jit_merge_point(shapelen=shapelen, func=func, - dtype=dtype) - assert not arr_iter.done(arr_state) + axis_reduce_driver.jit_merge_point(shapelen=shapelen, func=func, + dtype=dtype) w_val = arr_iter.getitem(arr_state).convert_to(space, dtype) - out_state = out_iter.update(out_state) - if out_state.indices[axis] == 0: + arr_state = arr_iter.next(arr_state) + + out_indices = out_iter.indices(out_state) + if out_indices[axis] == 0: if identity is not None: w_val = func(dtype, identity, w_val) else: cur = temp_iter.getitem(temp_state) w_val = func(dtype, cur, w_val) + out_iter.setitem(out_state, w_val) out_state = out_iter.next(out_state) if cumulative: @@ -263,7 +280,6 @@ temp_state = temp_iter.next(temp_state) else: temp_state = out_state - arr_state = arr_iter.next(arr_state) return out @@ -382,9 +398,9 @@ while not arr_iter.done(arr_state): nonzero_driver.jit_merge_point(shapelen=shapelen, dims=dims, dtype=dtype) if arr_iter.getitem_bool(arr_state): - arr_state = arr_iter.update(arr_state) + arr_indices = arr_iter.indices(arr_state) for d in dims: - res_iter.setitem(res_state, box(arr_state.indices[d])) + res_iter.setitem(res_state, box(arr_indices[d])) res_state = res_iter.next(res_state) arr_state = arr_iter.next(arr_state) return res diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py --- a/pypy/module/micronumpy/ndarray.py +++ b/pypy/module/micronumpy/ndarray.py @@ -19,7 +19,7 @@ order_converter, shape_converter, searchside_converter from pypy.module.micronumpy.flagsobj import W_FlagsObject from pypy.module.micronumpy.strides import get_shape_from_iterable, \ - shape_agreement, shape_agreement_multiple + shape_agreement, shape_agreement_multiple, is_c_contiguous, is_f_contiguous def _match_dot_shapes(space, left, right): @@ -529,9 +529,10 @@ "__array__(dtype) not implemented")) if type(self) is W_NDimArray: return self + sz = support.product(self.get_shape()) * self.get_dtype().elsize return W_NDimArray.from_shape_and_storage( space, self.get_shape(), self.implementation.storage, - self.get_dtype(), w_base=self) + self.get_dtype(), storage_bytes=sz, w_base=self) def descr_array_iface(self, space): addr = self.implementation.get_storage_as_int(space) @@ -827,7 +828,15 @@ raise OperationError(space.w_ValueError, space.wrap( "new type not compatible with array.")) else: - if dims == 1 or impl.get_strides()[0] < impl.get_strides()[-1]: + if not is_c_contiguous(impl) and not is_f_contiguous(impl): + if old_itemsize != new_itemsize: + raise OperationError(space.w_ValueError, space.wrap( + "new type not compatible with array.")) + # Strides, shape does not change + v = impl.astype(space, dtype) + return wrap_impl(space, w_type, self, v) + strides = impl.get_strides() + if dims == 1 or strides[0] buf.getlength(): - raise OperationError(space.w_TypeError, space.wrap( - "buffer is too small for requested array")) storage = rffi.cast(RAW_STORAGE_PTR, raw_ptr) storage = rffi.ptradd(storage, offset) - return W_NDimArray.from_shape_and_storage(space, shape, storage, dtype, + return W_NDimArray.from_shape_and_storage(space, shape, storage, + dtype, w_base=w_buffer, + storage_bytes=buf.getlength()-offset, w_subtype=w_subtype, - w_base=w_buffer, - writable=not buf.readonly) + writable=not buf.readonly, + strides=strides) order = order_converter(space, w_order, NPY.CORDER) if order == NPY.CORDER: @@ -1236,8 +1245,9 @@ return w_ret - at unwrap_spec(addr=int) -def descr__from_shape_and_storage(space, w_cls, w_shape, addr, w_dtype, w_subtype=None): + at unwrap_spec(addr=int, buf_len=int) +def descr__from_shape_and_storage(space, w_cls, w_shape, addr, w_dtype, + buf_len=-1, w_subtype=None, w_strides=None): """ Create an array from an existing buffer, given its address as int. PyPy-only implementation detail. @@ -1246,14 +1256,22 @@ dtype = space.interp_w(descriptor.W_Dtype, space.call_function( space.gettypefor(descriptor.W_Dtype), w_dtype)) shape = shape_converter(space, w_shape, dtype) + if not space.is_none(w_strides): + strides = [space.int_w(w_i) for w_i in + space.unpackiterable(w_strides)] + else: + strides = None if w_subtype: if not space.isinstance_w(w_subtype, space.w_type): raise OperationError(space.w_ValueError, space.wrap( "subtype must be a subtype of ndarray, not a class instance")) return W_NDimArray.from_shape_and_storage(space, shape, storage, dtype, - 'C', False, w_subtype) + buf_len, 'C', False, w_subtype, + strides=strides) else: - return W_NDimArray.from_shape_and_storage(space, shape, storage, dtype) + return W_NDimArray.from_shape_and_storage(space, shape, storage, dtype, + storage_bytes=buf_len, + strides=strides) app_take = applevel(r""" def take(a, indices, axis, out, mode): diff --git a/pypy/module/micronumpy/nditer.py b/pypy/module/micronumpy/nditer.py --- a/pypy/module/micronumpy/nditer.py +++ b/pypy/module/micronumpy/nditer.py @@ -6,7 +6,7 @@ from pypy.module.micronumpy import ufuncs, support, concrete from pypy.module.micronumpy.base import W_NDimArray, convert_to_array from pypy.module.micronumpy.descriptor import decode_w_dtype -from pypy.module.micronumpy.iterators import ArrayIter, SliceIter, OpFlag +from pypy.module.micronumpy.iterators import ArrayIter from pypy.module.micronumpy.strides import (calculate_broadcast_strides, shape_agreement, shape_agreement_multiple) @@ -36,6 +36,16 @@ return ret +class OpFlag(object): + def __init__(self): + self.rw = '' + self.broadcast = True + self.force_contig = False + self.force_align = False + self.native_byte_order = False + self.tmp_copy = '' + self.allocate = False + def parse_op_flag(space, lst): op_flag = OpFlag() for w_item in lst: @@ -142,11 +152,73 @@ raise NotImplementedError('not implemented yet') -def get_iter(space, order, arr, shape, dtype, op_flags): +class OperandIter(ArrayIter): + _immutable_fields_ = ['slice_shape', 'slice_stride', 'slice_backstride', + 'operand_type', 'base'] + + def getitem(self, state): + # cannot be called - must return a boxed value + assert False + + def getitem_bool(self, state): + # cannot be called - must return a boxed value + assert False + + def setitem(self, state, elem): + # cannot be called - must return a boxed value + assert False + + +class ConcreteIter(OperandIter): + def __init__(self, array, size, shape, strides, backstrides, + op_flags, base): + OperandIter.__init__(self, array, size, shape, strides, backstrides) + self.slice_shape = 1 + self.slice_stride = -1 + if strides: + self.slice_stride = strides[-1] + self.slice_backstride = 1 + if op_flags.rw == 'r': + self.operand_type = concrete.ConcreteNonWritableArrayWithBase + else: + self.operand_type = concrete.ConcreteArrayWithBase + self.base = base + + def getoperand(self, state): + assert state.iterator is self + impl = self.operand_type + res = impl([], self.array.dtype, self.array.order, [], [], + self.array.storage, self.base) + res.start = state.offset + return res + + +class SliceIter(OperandIter): + def __init__(self, array, size, shape, strides, backstrides, slice_shape, + slice_stride, slice_backstride, op_flags, base): + OperandIter.__init__(self, array, size, shape, strides, backstrides) + self.slice_shape = slice_shape + self.slice_stride = slice_stride + self.slice_backstride = slice_backstride + if op_flags.rw == 'r': + self.operand_type = concrete.NonWritableSliceArray + else: + self.operand_type = concrete.SliceArray + self.base = base + + def getoperand(self, state): + assert state.iterator is self + impl = self.operand_type + arr = impl(state.offset, [self.slice_stride], [self.slice_backstride], + [self.slice_shape], self.array, self.base) + return arr + + +def get_iter(space, order, arr, shape, dtype, op_flags, base): imp = arr.implementation backward = is_backward(imp, order) if arr.is_scalar(): - return ArrayIter(imp, 1, [], [], [], op_flags=op_flags) + return ConcreteIter(imp, 1, [], [], [], op_flags, base) if (imp.strides[0] < imp.strides[-1] and not backward) or \ (imp.strides[0] > imp.strides[-1] and backward): # flip the strides. Is this always true for multidimension? @@ -161,7 +233,7 @@ backstrides = imp.backstrides r = calculate_broadcast_strides(strides, backstrides, imp.shape, shape, backward) - return ArrayIter(imp, imp.get_size(), shape, r[0], r[1], op_flags=op_flags) + return ConcreteIter(imp, imp.get_size(), shape, r[0], r[1], op_flags, base) def calculate_ndim(op_in, oa_ndim): if oa_ndim >=0: @@ -398,7 +470,7 @@ self.iters = [] for i in range(len(self.seq)): it = get_iter(space, self.order, self.seq[i], self.shape, - self.dtypes[i], self.op_flags[i]) + self.dtypes[i], self.op_flags[i], self) it.contiguous = False self.iters.append((it, it.reset())) @@ -437,7 +509,7 @@ return space.wrap(self) def getitem(self, it, st): - res = it.getoperand(st, self) + res = it.getoperand(st) return W_NDimArray(res) def descr_getitem(self, space, w_idx): @@ -455,6 +527,7 @@ def descr_len(self, space): space.wrap(len(self.iters)) + @jit.unroll_safe def descr_next(self, space): for it, st in self.iters: if not it.done(st): diff --git a/pypy/module/micronumpy/strides.py b/pypy/module/micronumpy/strides.py --- a/pypy/module/micronumpy/strides.py +++ b/pypy/module/micronumpy/strides.py @@ -270,7 +270,7 @@ shape = shape_agreement(space, shape, arr) return shape - + at jit.unroll_safe def _shape_agreement(shape1, shape2): """ Checks agreement about two shapes with respect to broadcasting. Returns the resulting shape. @@ -362,6 +362,13 @@ backstrides.reverse() return strides, backstrides + at jit.unroll_safe +def calc_backstrides(strides, shape): + ndims = len(shape) + new_backstrides = [0] * ndims + for nd in range(ndims): + new_backstrides[nd] = (shape[nd] - 1) * strides[nd] + return new_backstrides # Recalculating strides. Find the steps that the iteration does for each # dimension, given the stride and shape. Then try to create a new stride that @@ -422,3 +429,35 @@ n_old_elems_to_use *= old_shape[oldI] assert len(new_strides) == len(new_shape) return new_strides[:] + + at jit.unroll_safe +def is_c_contiguous(arr): + shape = arr.get_shape() + strides = arr.get_strides() + ret = True + sd = arr.dtype.elsize + for i in range(len(shape) - 1, -1, -1): + dim = shape[i] + if strides[i] != sd: + ret = False + break + if dim == 0: + break + sd *= dim + return ret + + at jit.unroll_safe +def is_f_contiguous(arr): + shape = arr.get_shape() + strides = arr.get_strides() + ret = True + sd = arr.dtype.elsize + for i in range(len(shape)): + dim = shape[i] + if strides[i] != sd: + ret = False + break + if dim == 0: + break + sd *= dim + return ret diff --git a/pypy/module/micronumpy/test/test_arrayops.py b/pypy/module/micronumpy/test/test_arrayops.py --- a/pypy/module/micronumpy/test/test_arrayops.py +++ b/pypy/module/micronumpy/test/test_arrayops.py @@ -3,13 +3,13 @@ class AppTestNumSupport(BaseNumpyAppTest): def test_zeros(self): - from numpypy import zeros + from numpy import zeros a = zeros(3) assert len(a) == 3 assert a[0] == a[1] == a[2] == 0 def test_empty(self): - from numpypy import empty + from numpy import empty import gc for i in range(1000): a = empty(3) @@ -26,26 +26,26 @@ "empty() returned a zeroed out array every time") def test_where(self): - from numpypy import where, ones, zeros, array + from numpy import where, ones, zeros, array a = [1, 2, 3, 0, -3] a = where(array(a) > 0, ones(5), zeros(5)) assert (a == [1, 1, 1, 0, 0]).all() def test_where_differing_dtypes(self): - from numpypy import array, ones, zeros, where + from numpy import array, ones, zeros, where a = [1, 2, 3, 0, -3] From noreply at buildbot.pypy.org Fri Jan 2 22:29:44 2015 From: noreply at buildbot.pypy.org (pjenvey) Date: Fri, 2 Jan 2015 22:29:44 +0100 (CET) Subject: [pypy-commit] pypy py3.3: merge py3k Message-ID: <20150102212944.DD5E81C1056@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3.3 Changeset: r75211:81a7ee6adad6 Date: 2015-01-02 13:16 -0800 http://bitbucket.org/pypy/pypy/changeset/81a7ee6adad6/ Log: merge py3k diff too long, truncating to 2000 out of 15188 lines diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -28,7 +28,7 @@ DEALINGS IN THE SOFTWARE. -PyPy Copyright holders 2003-2014 +PyPy Copyright holders 2003-2015 ----------------------------------- Except when otherwise stated (look for LICENSE files or information at diff --git a/lib-python/2.7/distutils/unixccompiler.py b/lib-python/2.7/distutils/unixccompiler.py --- a/lib-python/2.7/distutils/unixccompiler.py +++ b/lib-python/2.7/distutils/unixccompiler.py @@ -58,7 +58,7 @@ executables = {'preprocessor' : None, 'compiler' : ["cc"], 'compiler_so' : ["cc"], - 'compiler_cxx' : ["cc"], + 'compiler_cxx' : ["c++"], # pypy: changed, 'cc' is bogus 'linker_so' : ["cc", "-shared"], 'linker_exe' : ["cc"], 'archiver' : ["ar", "-cr"], diff --git a/lib-python/2.7/sqlite3/test/dbapi.py b/lib-python/2.7/sqlite3/test/dbapi.py --- a/lib-python/2.7/sqlite3/test/dbapi.py +++ b/lib-python/2.7/sqlite3/test/dbapi.py @@ -478,6 +478,29 @@ except TypeError: pass + def CheckCurDescription(self): + self.cu.execute("select * from test") + + actual = self.cu.description + expected = [ + ('id', None, None, None, None, None, None), + ('name', None, None, None, None, None, None), + ('income', None, None, None, None, None, None), + ] + self.assertEqual(expected, actual) + + def CheckCurDescriptionVoidStatement(self): + self.cu.execute("insert into test(name) values (?)", ("foo",)) + self.assertIsNone(self.cu.description) + + def CheckCurDescriptionWithoutStatement(self): + cu = self.cx.cursor() + try: + self.assertIsNone(cu.description) + finally: + cu.close() + + @unittest.skipUnless(threading, 'This test requires threading.') class ThreadTests(unittest.TestCase): def setUp(self): diff --git a/lib-python/2.7/subprocess.py b/lib-python/2.7/subprocess.py --- a/lib-python/2.7/subprocess.py +++ b/lib-python/2.7/subprocess.py @@ -1589,7 +1589,7 @@ 'copyfile' in caller.f_globals): dest_dir = sys.pypy_resolvedirof(target_executable) src_dir = sys.pypy_resolvedirof(sys.executable) - for libname in ['libpypy-c.so']: + for libname in ['libpypy-c.so', 'libpypy-c.dylib']: dest_library = os.path.join(dest_dir, libname) src_library = os.path.join(src_dir, libname) if os.path.exists(src_library): diff --git a/lib-python/2.7/test/test_collections.py b/lib-python/2.7/test/test_collections.py --- a/lib-python/2.7/test/test_collections.py +++ b/lib-python/2.7/test/test_collections.py @@ -1108,6 +1108,16 @@ od.popitem() self.assertEqual(len(od), 0) + def test_popitem_first(self): + pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)] + shuffle(pairs) + od = OrderedDict(pairs) + while pairs: + self.assertEqual(od.popitem(last=False), pairs.pop(0)) + with self.assertRaises(KeyError): + od.popitem(last=False) + self.assertEqual(len(od), 0) + def test_pop(self): pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)] shuffle(pairs) @@ -1179,7 +1189,11 @@ od = OrderedDict(pairs) # yaml.dump(od) --> # '!!python/object/apply:__main__.OrderedDict\n- - [a, 1]\n - [b, 2]\n' - self.assertTrue(all(type(pair)==list for pair in od.__reduce__()[1])) + + # PyPy bug fix: added [0] at the end of this line, because the + # test is really about the 2-tuples that need to be 2-lists + # inside the list of 6 of them + self.assertTrue(all(type(pair)==list for pair in od.__reduce__()[1][0])) def test_reduce_not_too_fat(self): # do not save instance dictionary if not needed @@ -1189,6 +1203,16 @@ od.x = 10 self.assertEqual(len(od.__reduce__()), 3) + def test_reduce_exact_output(self): + # PyPy: test that __reduce__() produces the exact same answer as + # CPython does, even though in the 'all_ordered_dicts' branch we + # have to emulate it. + pairs = [['c', 1], ['b', 2], ['d', 4]] + od = OrderedDict(pairs) + self.assertEqual(od.__reduce__(), (OrderedDict, (pairs,))) + od.x = 10 + self.assertEqual(od.__reduce__(), (OrderedDict, (pairs,), {'x': 10})) + def test_repr(self): od = OrderedDict([('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]) self.assertEqual(repr(od), diff --git a/lib-python/2.7/test/test_xml_etree.py b/lib-python/2.7/test/test_xml_etree.py --- a/lib-python/2.7/test/test_xml_etree.py +++ b/lib-python/2.7/test/test_xml_etree.py @@ -225,9 +225,9 @@ >>> element.remove(subelement) >>> serialize(element) # 5 '' - >>> element.remove(subelement) + >>> element.remove(subelement) # doctest: +ELLIPSIS Traceback (most recent call last): - ValueError: list.remove(x): x not in list + ValueError: list.remove(... >>> serialize(element) # 6 '' >>> element[0:0] = [subelement, subelement, subelement] diff --git a/lib-python/stdlib-upgrade.txt b/lib-python/stdlib-upgrade.txt --- a/lib-python/stdlib-upgrade.txt +++ b/lib-python/stdlib-upgrade.txt @@ -7,7 +7,7 @@ 1. check out the branch vendor/stdlib 2. upgrade the files there -3. update stdlib-versions.txt with the output of hg -id from the cpython repo +3. update stdlib-version.txt with the output of hg -id from the cpython repo 4. commit 5. update to default/py3k 6. create a integration branch for the new stdlib diff --git a/lib_pypy/_functools.py b/lib_pypy/_functools.py --- a/lib_pypy/_functools.py +++ b/lib_pypy/_functools.py @@ -33,7 +33,10 @@ of the given arguments and keywords. """ - def __init__(self, func, *args, **keywords): + def __init__(self, *args, **keywords): + if not args: + raise TypeError('__init__() takes at least 2 arguments (1 given)') + func, args = args[0], args[1:] if not callable(func): raise TypeError("the first argument must be callable") self._func = func diff --git a/lib_pypy/_sqlite3.py b/lib_pypy/_sqlite3.py --- a/lib_pypy/_sqlite3.py +++ b/lib_pypy/_sqlite3.py @@ -1188,8 +1188,9 @@ try: return self.__description except AttributeError: - self.__description = self.__statement._get_description() - return self.__description + if self.__statement: + self.__description = self.__statement._get_description() + return self.__description description = property(__get_description) def __get_lastrowid(self): diff --git a/lib_pypy/readline.py b/lib_pypy/readline.py --- a/lib_pypy/readline.py +++ b/lib_pypy/readline.py @@ -8,5 +8,9 @@ try: from pyrepl.readline import * -except SyntaxError: - raise ImportError +except ImportError: + import sys + if sys.platform == 'win32': + raise ImportError("the 'readline' module is not available on Windows" + " (on either PyPy or CPython)") + raise diff --git a/pypy/doc/embedding.rst b/pypy/doc/embedding.rst --- a/pypy/doc/embedding.rst +++ b/pypy/doc/embedding.rst @@ -6,6 +6,10 @@ C. It was developed in collaboration with Roberto De Ioris from the `uwsgi`_ project. The `PyPy uwsgi plugin`_ is a good example of using the embedding API. +**NOTE**: As of 1st of December, PyPy comes with ``--shared`` by default +on linux, linux64 and windows. We will make it the default on all platforms +by the time of the next release. + The first thing that you need is to compile PyPy yourself with the option ``--shared``. We plan to make ``--shared`` the default in the future. Consult the `how to compile PyPy`_ doc for details. This will result in ``libpypy.so`` @@ -93,12 +97,18 @@ return res; } -If we save it as ``x.c`` now, compile it and run it with:: +If we save it as ``x.c`` now, compile it and run it (on linux) with:: fijal at hermann:/opt/pypy$ gcc -o x x.c -lpypy-c -L. fijal at hermann:/opt/pypy$ LD_LIBRARY_PATH=. ./x hello from pypy +on OSX it is necessary to set the rpath of the binary if one wants to link to it:: + + gcc -o x x.c -lpypy-c -L. -Wl,-rpath -Wl, at executable_path + ./x + hello from pypy + Worked! .. note:: If the compilation fails because of missing PyPy.h header file, diff --git a/pypy/doc/interpreter-optimizations.rst b/pypy/doc/interpreter-optimizations.rst --- a/pypy/doc/interpreter-optimizations.rst +++ b/pypy/doc/interpreter-optimizations.rst @@ -99,25 +99,6 @@ with the :config:`objspace.std.withmapdict` option. -List Optimizations -~~~~~~~~~~~~~~~~~~ - -Range-Lists -+++++++++++ - -Range-lists solve the same problem that the ``xrange`` builtin solves poorly: -the problem that ``range`` allocates memory even if the resulting list is only -ever used for iterating over it. Range lists are a different implementation for -lists. They are created only as a result of a call to ``range``. As long as the -resulting list is used without being mutated, the list stores only the start, stop -and step of the range. Only when somebody mutates the list the actual list is -created. This gives the memory and speed behaviour of ``xrange`` and the generality -of use of ``range``, and makes ``xrange`` essentially useless. - -You can enable this feature with the :config:`objspace.std.withrangelist` -option. - - User Class Optimizations ~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/pypy/doc/project-ideas.rst b/pypy/doc/project-ideas.rst --- a/pypy/doc/project-ideas.rst +++ b/pypy/doc/project-ideas.rst @@ -35,6 +35,13 @@ PyPy's bytearray type is very inefficient. It would be an interesting task to look into possible optimizations on this. +Implement AF_XXX packet types for PyPy +-------------------------------------- + +PyPy is missing AF_XXX types of sockets. Implementing it is easy-to-medium +task. `bug report`_ + +.. _`bug report`: https://bitbucket.org/pypy/pypy/issue/1942/support-for-af_xxx-sockets#more Implement copy-on-write list slicing ------------------------------------ diff --git a/pypy/goal/targetpypystandalone.py b/pypy/goal/targetpypystandalone.py --- a/pypy/goal/targetpypystandalone.py +++ b/pypy/goal/targetpypystandalone.py @@ -241,8 +241,7 @@ enable_translationmodules(config) config.translation.suggest(check_str_without_nul=True) - if sys.platform.startswith('linux'): - config.translation.suggest(shared=True) + config.translation.suggest(shared=True) if config.translation.thread: config.objspace.usemodules.thread = True diff --git a/pypy/interpreter/astcompiler/optimize.py b/pypy/interpreter/astcompiler/optimize.py --- a/pypy/interpreter/astcompiler/optimize.py +++ b/pypy/interpreter/astcompiler/optimize.py @@ -89,17 +89,16 @@ class __extend__(ast.BoolOp): - def _accept_jump_if_any_is(self, gen, condition, target): - self.values[0].accept_jump_if(gen, condition, target) - for i in range(1, len(self.values)): + def _accept_jump_if_any_is(self, gen, condition, target, skip_last=0): + for i in range(len(self.values) - skip_last): self.values[i].accept_jump_if(gen, condition, target) def accept_jump_if(self, gen, condition, target): if condition and self.op == ast.And or \ (not condition and self.op == ast.Or): end = gen.new_block() - self._accept_jump_if_any_is(gen, not condition, end) - gen.emit_jump(ops.JUMP_FORWARD, target) + self._accept_jump_if_any_is(gen, not condition, end, skip_last=1) + self.values[-1].accept_jump_if(gen, condition, target) gen.use_next_block(end) else: self._accept_jump_if_any_is(gen, condition, target) diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py --- a/pypy/interpreter/pyframe.py +++ b/pypy/interpreter/pyframe.py @@ -125,6 +125,8 @@ else: return self.space.builtin + _NO_CELLS = [] + @jit.unroll_safe def initialize_frame_scopes(self, outer_func, code): # regular functions always have CO_OPTIMIZED and CO_NEWLOCALS. @@ -143,7 +145,7 @@ nfreevars = len(code.co_freevars) if not nfreevars: if not ncellvars: - self.cells = [] + self.cells = self._NO_CELLS return # no self.cells needed - fast path elif outer_func is None: space = self.space diff --git a/pypy/module/_cffi_backend/__init__.py b/pypy/module/_cffi_backend/__init__.py --- a/pypy/module/_cffi_backend/__init__.py +++ b/pypy/module/_cffi_backend/__init__.py @@ -34,6 +34,7 @@ 'newp_handle': 'handle.newp_handle', 'from_handle': 'handle.from_handle', '_get_types': 'func._get_types', + 'from_buffer': 'func.from_buffer', 'string': 'func.string', 'buffer': 'cbuffer.buffer', diff --git a/pypy/module/_cffi_backend/cdataobj.py b/pypy/module/_cffi_backend/cdataobj.py --- a/pypy/module/_cffi_backend/cdataobj.py +++ b/pypy/module/_cffi_backend/cdataobj.py @@ -440,6 +440,25 @@ return "handle to %s" % (self.space.str_w(w_repr),) +class W_CDataFromBuffer(W_CData): + _attrs_ = ['buf', 'length', 'w_keepalive'] + _immutable_fields_ = ['buf', 'length', 'w_keepalive'] + + def __init__(self, space, cdata, ctype, buf, w_object): + W_CData.__init__(self, space, cdata, ctype) + self.buf = buf + self.length = buf.getlength() + self.w_keepalive = w_object + + def get_array_length(self): + return self.length + + def _repr_extra(self): + w_repr = self.space.repr(self.w_keepalive) + return "buffer len %d from '%s' object" % ( + self.length, self.space.type(self.w_keepalive).name) + + W_CData.typedef = TypeDef( '_cffi_backend.CData', __module__ = '_cffi_backend', diff --git a/pypy/module/_cffi_backend/func.py b/pypy/module/_cffi_backend/func.py --- a/pypy/module/_cffi_backend/func.py +++ b/pypy/module/_cffi_backend/func.py @@ -76,3 +76,32 @@ def _get_types(space): return space.newtuple([space.gettypefor(cdataobj.W_CData), space.gettypefor(ctypeobj.W_CType)]) + +# ____________________________________________________________ + + at unwrap_spec(w_ctype=ctypeobj.W_CType) +def from_buffer(space, w_ctype, w_x): + from pypy.module._cffi_backend import ctypearray, ctypeprim + # + if (not isinstance(w_ctype, ctypearray.W_CTypeArray) or + not isinstance(w_ctype.ctptr.ctitem, ctypeprim.W_CTypePrimitiveChar)): + raise oefmt(space.w_TypeError, + "needs 'char[]', got '%s'", w_ctype.name) + # + # xxx do we really need to implement the same mess as in CPython 2.7 + # w.r.t. buffers and memoryviews?? + try: + buf = space.readbuf_w(w_x) + except OperationError, e: + if not e.match(space, space.w_TypeError): + raise + buf = space.buffer_w(w_x, space.BUF_SIMPLE) + try: + _cdata = buf.get_raw_address() + except ValueError: + raise oefmt(space.w_TypeError, + "from_buffer() got a '%T' object, which supports the " + "buffer interface but cannot be rendered as a plain " + "raw address on PyPy", w_x) + # + return cdataobj.W_CDataFromBuffer(space, _cdata, w_ctype, buf, w_x) diff --git a/pypy/module/_cffi_backend/newtype.py b/pypy/module/_cffi_backend/newtype.py --- a/pypy/module/_cffi_backend/newtype.py +++ b/pypy/module/_cffi_backend/newtype.py @@ -62,10 +62,54 @@ eptype("intptr_t", rffi.INTPTR_T, ctypeprim.W_CTypePrimitiveSigned) eptype("uintptr_t", rffi.UINTPTR_T, ctypeprim.W_CTypePrimitiveUnsigned) -eptype("ptrdiff_t", rffi.INTPTR_T, ctypeprim.W_CTypePrimitiveSigned) # <-xxx eptype("size_t", rffi.SIZE_T, ctypeprim.W_CTypePrimitiveUnsigned) eptype("ssize_t", rffi.SSIZE_T, ctypeprim.W_CTypePrimitiveSigned) +_WCTSigned = ctypeprim.W_CTypePrimitiveSigned +_WCTUnsign = ctypeprim.W_CTypePrimitiveUnsigned + +eptype("ptrdiff_t", getattr(rffi, 'PTRDIFF_T', rffi.INTPTR_T), _WCTSigned) +eptype("intmax_t", getattr(rffi, 'INTMAX_T', rffi.LONGLONG), _WCTSigned) +eptype("uintmax_t", getattr(rffi, 'UINTMAX_T', rffi.LONGLONG), _WCTUnsign) + +if hasattr(rffi, 'INT_LEAST8_T'): + eptype("int_least8_t", rffi.INT_LEAST8_T, _WCTSigned) + eptype("int_least16_t", rffi.INT_LEAST16_T, _WCTSigned) + eptype("int_least32_t", rffi.INT_LEAST32_T, _WCTSigned) + eptype("int_least64_t", rffi.INT_LEAST64_T, _WCTSigned) + eptype("uint_least8_t", rffi.UINT_LEAST8_T, _WCTUnsign) + eptype("uint_least16_t",rffi.UINT_LEAST16_T, _WCTUnsign) + eptype("uint_least32_t",rffi.UINT_LEAST32_T, _WCTUnsign) + eptype("uint_least64_t",rffi.UINT_LEAST64_T, _WCTUnsign) +else: + eptypesize("int_least8_t", 1, _WCTSigned) + eptypesize("uint_least8_t", 1, _WCTUnsign) + eptypesize("int_least16_t", 2, _WCTSigned) + eptypesize("uint_least16_t", 2, _WCTUnsign) + eptypesize("int_least32_t", 4, _WCTSigned) + eptypesize("uint_least32_t", 4, _WCTUnsign) + eptypesize("int_least64_t", 8, _WCTSigned) + eptypesize("uint_least64_t", 8, _WCTUnsign) + +if hasattr(rffi, 'INT_FAST8_T'): + eptype("int_fast8_t", rffi.INT_FAST8_T, _WCTSigned) + eptype("int_fast16_t", rffi.INT_FAST16_T, _WCTSigned) + eptype("int_fast32_t", rffi.INT_FAST32_T, _WCTSigned) + eptype("int_fast64_t", rffi.INT_FAST64_T, _WCTSigned) + eptype("uint_fast8_t", rffi.UINT_FAST8_T, _WCTUnsign) + eptype("uint_fast16_t",rffi.UINT_FAST16_T, _WCTUnsign) + eptype("uint_fast32_t",rffi.UINT_FAST32_T, _WCTUnsign) + eptype("uint_fast64_t",rffi.UINT_FAST64_T, _WCTUnsign) +else: + eptypesize("int_fast8_t", 1, _WCTSigned) + eptypesize("uint_fast8_t", 1, _WCTUnsign) + eptypesize("int_fast16_t", 2, _WCTSigned) + eptypesize("uint_fast16_t", 2, _WCTUnsign) + eptypesize("int_fast32_t", 4, _WCTSigned) + eptypesize("uint_fast32_t", 4, _WCTUnsign) + eptypesize("int_fast64_t", 8, _WCTSigned) + eptypesize("uint_fast64_t", 8, _WCTUnsign) + @unwrap_spec(name=str) def new_primitive_type(space, name): try: diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py --- a/pypy/module/_cffi_backend/test/_backend_test_c.py +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py @@ -397,7 +397,7 @@ def test_invalid_indexing(): p = new_primitive_type("int") x = cast(p, 42) - py.test.raises(TypeError, "p[0]") + py.test.raises(TypeError, "x[0]") def test_default_str(): BChar = new_primitive_type("char") @@ -2718,7 +2718,16 @@ def test_nonstandard_integer_types(): for typename in ['int8_t', 'uint8_t', 'int16_t', 'uint16_t', 'int32_t', 'uint32_t', 'int64_t', 'uint64_t', 'intptr_t', - 'uintptr_t', 'ptrdiff_t', 'size_t', 'ssize_t']: + 'uintptr_t', 'ptrdiff_t', 'size_t', 'ssize_t', + 'int_least8_t', 'uint_least8_t', + 'int_least16_t', 'uint_least16_t', + 'int_least32_t', 'uint_least32_t', + 'int_least64_t', 'uint_least64_t', + 'int_fast8_t', 'uint_fast8_t', + 'int_fast16_t', 'uint_fast16_t', + 'int_fast32_t', 'uint_fast32_t', + 'int_fast64_t', 'uint_fast64_t', + 'intmax_t', 'uintmax_t']: new_primitive_type(typename) # works def test_cannot_convert_unicode_to_charp(): @@ -3186,6 +3195,20 @@ ('a2', BChar, 5)], None, -1, -1, SF_PACKED) +def test_from_buffer(): + import array + a = array.array('H', [10000, 20000, 30000]) + BChar = new_primitive_type("char") + BCharP = new_pointer_type(BChar) + BCharA = new_array_type(BCharP, None) + c = from_buffer(BCharA, a) + assert typeof(c) is BCharA + assert len(c) == 6 + assert repr(c) == "" + p = new_pointer_type(new_primitive_type("unsigned short")) + cast(p, c)[1] += 500 + assert list(a) == [10000, 20500, 30000] + def test_version(): # this test is here mostly for PyPy assert __version__ == "0.8.6" diff --git a/pypy/module/_cffi_backend/test/test_c.py b/pypy/module/_cffi_backend/test/test_c.py --- a/pypy/module/_cffi_backend/test/test_c.py +++ b/pypy/module/_cffi_backend/test/test_c.py @@ -31,7 +31,7 @@ class AppTestC(object): """Populated below, hack hack hack.""" - spaceconfig = dict(usemodules=('_cffi_backend', '_io')) + spaceconfig = dict(usemodules=('_cffi_backend', '_io', 'array')) def setup_class(cls): testfuncs_w = [] diff --git a/pypy/module/_io/interp_bufferedio.py b/pypy/module/_io/interp_bufferedio.py --- a/pypy/module/_io/interp_bufferedio.py +++ b/pypy/module/_io/interp_bufferedio.py @@ -562,7 +562,7 @@ # Flush the write buffer if necessary if self.writable: - self._writer_flush_unlocked(space) + self._flush_and_rewind_unlocked(space) self._reader_reset_buf() # Read whole blocks, and don't buffer them diff --git a/pypy/module/_io/test/test_io.py b/pypy/module/_io/test/test_io.py --- a/pypy/module/_io/test/test_io.py +++ b/pypy/module/_io/test/test_io.py @@ -410,3 +410,32 @@ f.read(1) f.seek(-1, 1) f.write(b'') + + def test_issue1902_2(self): + import _io + with _io.open(self.tmpfile, 'w+b', 4096) as f: + f.write(b'\xff' * 13569) + f.flush() + f.seek(0, 0) + + f.read(1) + f.seek(-1, 1) + f.write(b'\xff') + f.seek(1, 0) + f.read(4123) + f.seek(-4123, 1) + + def test_issue1902_3(self): + import _io + buffer_size = 4096 + with _io.open(self.tmpfile, 'w+b', buffer_size) as f: + f.write(b'\xff' * buffer_size * 3) + f.flush() + f.seek(0, 0) + + f.read(1) + f.seek(-1, 1) + f.write(b'\xff') + f.seek(1, 0) + f.read(buffer_size * 2) + assert f.tell() == 1 + buffer_size * 2 diff --git a/pypy/module/_rawffi/alt/interp_funcptr.py b/pypy/module/_rawffi/alt/interp_funcptr.py --- a/pypy/module/_rawffi/alt/interp_funcptr.py +++ b/pypy/module/_rawffi/alt/interp_funcptr.py @@ -14,6 +14,7 @@ from rpython.rlib.objectmodel import we_are_translated from pypy.module._rawffi.alt.type_converter import FromAppLevelConverter, ToAppLevelConverter from pypy.module._rawffi.interp_rawffi import got_libffi_error, wrap_dlopenerror +from pypy.module._rawffi import lasterror import os if os.name == 'nt': @@ -202,11 +203,23 @@ self.func = func self.argchain = argchain + def before(self): + lasterror.restore_last_error(self.space) + + def after(self): + lasterror.save_last_error(self.space) + def get_longlong(self, w_ffitype): - return self.func.call(self.argchain, rffi.LONGLONG) + self.before() + x = self.func.call(self.argchain, rffi.LONGLONG) + self.after() + return x def get_ulonglong(self, w_ffitype): - return self.func.call(self.argchain, rffi.ULONGLONG) + self.before() + x = self.func.call(self.argchain, rffi.ULONGLONG) + self.after() + return x def get_signed(self, w_ffitype): # if the declared return type of the function is smaller than LONG, @@ -217,64 +230,94 @@ # to space.wrap in order to get a nice applevel . # restype = w_ffitype.get_ffitype() + self.before() call = self.func.call if restype is libffi.types.slong: - return call(self.argchain, rffi.LONG) + x = call(self.argchain, rffi.LONG) elif restype is libffi.types.sint: - return rffi.cast(rffi.LONG, call(self.argchain, rffi.INT)) + x = rffi.cast(rffi.LONG, call(self.argchain, rffi.INT)) elif restype is libffi.types.sshort: - return rffi.cast(rffi.LONG, call(self.argchain, rffi.SHORT)) + x = rffi.cast(rffi.LONG, call(self.argchain, rffi.SHORT)) elif restype is libffi.types.schar: - return rffi.cast(rffi.LONG, call(self.argchain, rffi.SIGNEDCHAR)) + x = rffi.cast(rffi.LONG, call(self.argchain, rffi.SIGNEDCHAR)) else: - self.error(w_ffitype) + raise self.error(w_ffitype) + self.after() + return x def get_unsigned(self, w_ffitype): - return self.func.call(self.argchain, rffi.ULONG) + self.before() + x = self.func.call(self.argchain, rffi.ULONG) + self.after() + return x def get_unsigned_which_fits_into_a_signed(self, w_ffitype): # the same comment as get_signed apply restype = w_ffitype.get_ffitype() + self.before() call = self.func.call if restype is libffi.types.uint: assert not libffi.IS_32_BIT # on 32bit machines, we should never get here, because it's a case # which has already been handled by get_unsigned above. - return rffi.cast(rffi.LONG, call(self.argchain, rffi.UINT)) + x = rffi.cast(rffi.LONG, call(self.argchain, rffi.UINT)) elif restype is libffi.types.ushort: - return rffi.cast(rffi.LONG, call(self.argchain, rffi.USHORT)) + x = rffi.cast(rffi.LONG, call(self.argchain, rffi.USHORT)) elif restype is libffi.types.uchar: - return rffi.cast(rffi.LONG, call(self.argchain, rffi.UCHAR)) + x = rffi.cast(rffi.LONG, call(self.argchain, rffi.UCHAR)) else: - self.error(w_ffitype) + raise self.error(w_ffitype) + self.after() + return x def get_pointer(self, w_ffitype): + self.before() ptrres = self.func.call(self.argchain, rffi.VOIDP) + self.after() return rffi.cast(rffi.ULONG, ptrres) def get_char(self, w_ffitype): - return self.func.call(self.argchain, rffi.UCHAR) + self.before() + x = self.func.call(self.argchain, rffi.UCHAR) + self.after() + return x def get_unichar(self, w_ffitype): - return self.func.call(self.argchain, rffi.WCHAR_T) + self.before() + x = self.func.call(self.argchain, rffi.WCHAR_T) + self.after() + return x def get_float(self, w_ffitype): - return self.func.call(self.argchain, rffi.DOUBLE) + self.before() + x = self.func.call(self.argchain, rffi.DOUBLE) + self.after() + return x def get_singlefloat(self, w_ffitype): - return self.func.call(self.argchain, rffi.FLOAT) + self.before() + x = self.func.call(self.argchain, rffi.FLOAT) + self.after() + return x def get_struct(self, w_ffitype, w_structdescr): + self.before() addr = self.func.call(self.argchain, rffi.LONG, is_struct=True) + self.after() return w_structdescr.fromaddress(self.space, addr) def get_struct_rawffi(self, w_ffitype, w_structdescr): + self.before() uintval = self.func.call(self.argchain, rffi.ULONG, is_struct=True) + self.after() return w_structdescr.fromaddress(self.space, uintval) def get_void(self, w_ffitype): - return self.func.call(self.argchain, lltype.Void) + self.before() + x = self.func.call(self.argchain, lltype.Void) + self.after() + return x def unpack_argtypes(space, w_argtypes, w_restype): diff --git a/pypy/module/_rawffi/buffer.py b/pypy/module/_rawffi/buffer.py --- a/pypy/module/_rawffi/buffer.py +++ b/pypy/module/_rawffi/buffer.py @@ -1,4 +1,5 @@ from rpython.rlib.buffer import Buffer +from rpython.rtyper.lltypesystem import rffi # XXX not the most efficient implementation @@ -20,3 +21,7 @@ def setitem(self, index, char): ll_buffer = self.datainstance.ll_buffer ll_buffer[index] = char + + def get_raw_address(self): + ll_buffer = self.datainstance.ll_buffer + return rffi.cast(rffi.CCHARP, ll_buffer) diff --git a/pypy/module/_rawffi/interp_rawffi.py b/pypy/module/_rawffi/interp_rawffi.py --- a/pypy/module/_rawffi/interp_rawffi.py +++ b/pypy/module/_rawffi/interp_rawffi.py @@ -20,6 +20,7 @@ from rpython.rlib.rarithmetic import intmask, r_uint from pypy.module._rawffi.buffer import RawFFIBuffer from pypy.module._rawffi.tracker import tracker +from pypy.module._rawffi import lasterror TYPEMAP = { # XXX A mess with unsigned/signed/normal chars :-/ @@ -498,10 +499,14 @@ try: if self.resshape is not None: result = self.resshape.allocate(space, 1, autofree=True) + lasterror.restore_last_error(space) self.ptr.call(args_ll, result.ll_buffer) + lasterror.save_last_error(space) return space.wrap(result) else: + lasterror.restore_last_error(space) self.ptr.call(args_ll, lltype.nullptr(rffi.VOIDP.TO)) + lasterror.save_last_error(space) return space.w_None except StackCheckError, e: raise OperationError(space.w_ValueError, space.wrap(e.message)) @@ -618,12 +623,10 @@ if sys.platform == 'win32': def get_last_error(space): - from rpython.rlib.rwin32 import GetLastError - return space.wrap(GetLastError()) + return space.wrap(lasterror.fetch_last_error(space)) @unwrap_spec(error=int) def set_last_error(space, error): - from rpython.rlib.rwin32 import SetLastError - SetLastError(error) + lasterror.store_last_error(space, error) else: # always have at least a dummy version of these functions # (https://bugs.pypy.org/issue1242) diff --git a/pypy/module/_rawffi/lasterror.py b/pypy/module/_rawffi/lasterror.py new file mode 100644 --- /dev/null +++ b/pypy/module/_rawffi/lasterror.py @@ -0,0 +1,40 @@ +# For Windows only. +# https://bitbucket.org/pypy/pypy/issue/1944/ctypes-on-windows-getlasterror + +import os + +_MS_WINDOWS = os.name == "nt" + + +if _MS_WINDOWS: + from rpython.rlib import rwin32 + from pypy.interpreter.executioncontext import ExecutionContext + + + ExecutionContext._rawffi_last_error = 0 + + def fetch_last_error(space): + ec = space.getexecutioncontext() + return ec._rawffi_last_error + + def store_last_error(space, last_error): + ec = space.getexecutioncontext() + ec._rawffi_last_error = last_error + + def restore_last_error(space): + ec = space.getexecutioncontext() + lasterror = ec._rawffi_last_error + rwin32.SetLastError(lasterror) + + def save_last_error(space): + lasterror = rwin32.GetLastError() + ec = space.getexecutioncontext() + ec._rawffi_last_error = lasterror + +else: + + def restore_last_error(space): + pass + + def save_last_error(space): + pass diff --git a/pypy/module/_rawffi/test/test__rawffi.py b/pypy/module/_rawffi/test/test__rawffi.py --- a/pypy/module/_rawffi/test/test__rawffi.py +++ b/pypy/module/_rawffi/test/test__rawffi.py @@ -16,6 +16,7 @@ #include "src/precommondefs.h" #include #include + #include struct x { @@ -204,6 +205,24 @@ return inp; } + RPY_EXPORTED + int check_errno(int incoming) + { + int old_errno = errno; + errno = incoming; + return old_errno; + } + + #ifdef _WIN32 + #include + RPY_EXPORTED + int check_last_error(int incoming) + { + int old_errno = GetLastError(); + SetLastError(incoming); + return old_errno; + } + #endif ''')) eci = ExternalCompilationInfo(include_dirs=[cdir]) return str(platform.compile([c_file], eci, 'x', standalone=False)) @@ -1118,6 +1137,15 @@ b[3] = b'x' assert b[3] == b'x' + def test_pypy_raw_address(self): + import _rawffi + S = _rawffi.Structure((40, 1)) + s = S(autofree=True) + addr = buffer(s)._pypy_raw_address() + assert type(addr) is int + assert buffer(s)._pypy_raw_address() == addr + assert buffer(s, 10)._pypy_raw_address() == addr + 10 + def test_union(self): import _rawffi longsize = _rawffi.sizeof('l') @@ -1143,6 +1171,37 @@ raises(OverflowError, "arg1[0] = 10**900") arg1.free() + def test_errno(self): + import _rawffi + lib = _rawffi.CDLL(self.lib_name) + A = _rawffi.Array('i') + f = lib.ptr('check_errno', ['i'], 'i') + _rawffi.set_errno(42) + arg = A(1) + arg[0] = 43 + res = f(arg) + assert res[0] == 42 + z = _rawffi.get_errno() + assert z == 43 + arg.free() + + def test_last_error(self): + import sys + if sys.platform != 'win32': + skip("Windows test") + import _rawffi + lib = _rawffi.CDLL(self.lib_name) + A = _rawffi.Array('i') + f = lib.ptr('check_last_error', ['i'], 'i') + _rawffi.set_last_error(42) + arg = A(1) + arg[0] = 43 + res = f(arg) + assert res[0] == 42 + z = _rawffi.get_last_error() + assert z == 43 + arg.free() + def test_char_array_int(self): import _rawffi A = _rawffi.Array('c') diff --git a/pypy/module/_ssl/interp_ssl.py b/pypy/module/_ssl/interp_ssl.py --- a/pypy/module/_ssl/interp_ssl.py +++ b/pypy/module/_ssl/interp_ssl.py @@ -57,7 +57,8 @@ if not OPENSSL_NO_SSL2: constants["PROTOCOL_SSLv2"] = PY_SSL_VERSION_SSL2 -constants["PROTOCOL_SSLv3"] = PY_SSL_VERSION_SSL3 +if not OPENSSL_NO_SSL3: + constants["PROTOCOL_SSLv3"] = PY_SSL_VERSION_SSL3 constants["PROTOCOL_SSLv23"] = PY_SSL_VERSION_SSL23 constants["PROTOCOL_TLSv1"] = PY_SSL_VERSION_TLS1 @@ -150,7 +151,7 @@ def __init__(self, space, protocol): if protocol == PY_SSL_VERSION_TLS1: method = libssl_TLSv1_method() - elif protocol == PY_SSL_VERSION_SSL3: + elif protocol == PY_SSL_VERSION_SSL3 and not OPENSSL_NO_SSL3: method = libssl_SSLv3_method() elif protocol == PY_SSL_VERSION_SSL2 and not OPENSSL_NO_SSL2: method = libssl_SSLv2_method() diff --git a/pypy/module/cpyext/frameobject.py b/pypy/module/cpyext/frameobject.py --- a/pypy/module/cpyext/frameobject.py +++ b/pypy/module/cpyext/frameobject.py @@ -58,7 +58,7 @@ w_globals = from_ref(space, py_frame.c_f_globals) frame = space.FrameClass(space, code, w_globals, outer_func=None) - frame.f_lineno = py_frame.c_f_lineno + frame.f_lineno = rffi.getintfield(py_frame, 'c_f_lineno') w_obj = space.wrap(frame) track_reference(space, py_obj, w_obj) return w_obj diff --git a/pypy/module/cpyext/slotdefs.py b/pypy/module/cpyext/slotdefs.py --- a/pypy/module/cpyext/slotdefs.py +++ b/pypy/module/cpyext/slotdefs.py @@ -244,6 +244,9 @@ def getitem(self, index): return self.ptr[index] + def get_raw_address(self): + return rffi.cast(rffi.CCHARP, self.ptr) + def wrap_getbuffer(space, w_self, w_args, func): func_target = rffi.cast(getbufferproc, func) with lltype.scoped_alloc(Py_buffer) as view: diff --git a/pypy/module/gc/__init__.py b/pypy/module/gc/__init__.py --- a/pypy/module/gc/__init__.py +++ b/pypy/module/gc/__init__.py @@ -30,6 +30,7 @@ 'get_referrers': 'referents.get_referrers', '_dump_rpy_heap': 'referents._dump_rpy_heap', 'get_typeids_z': 'referents.get_typeids_z', + 'get_typeids_list': 'referents.get_typeids_list', 'GcRef': 'referents.W_GcRef', }) MixedModule.__init__(self, space, w_name) diff --git a/pypy/module/gc/app_referents.py b/pypy/module/gc/app_referents.py --- a/pypy/module/gc/app_referents.py +++ b/pypy/module/gc/app_referents.py @@ -16,7 +16,8 @@ [0][0][0][-1] inserted after all GC roots, before all non-roots. If the argument is a filename and the 'zlib' module is available, - we also write a 'typeids.txt' in the same directory, if none exists. + we also write 'typeids.txt' and 'typeids.lst' in the same directory, + if they don't already exist. """ if isinstance(file, str): f = open(file, 'wb') @@ -30,7 +31,13 @@ filename2 = os.path.join(os.path.dirname(file), 'typeids.txt') if not os.path.exists(filename2): data = zlib.decompress(gc.get_typeids_z()) - f = open(filename2, 'wb') + f = open(filename2, 'w') + f.write(data) + f.close() + filename2 = os.path.join(os.path.dirname(file), 'typeids.lst') + if not os.path.exists(filename2): + data = ''.join(['%d\n' % n for n in gc.get_typeids_list()]) + f = open(filename2, 'w') f.write(data) f.close() else: diff --git a/pypy/module/gc/referents.py b/pypy/module/gc/referents.py --- a/pypy/module/gc/referents.py +++ b/pypy/module/gc/referents.py @@ -228,3 +228,8 @@ a = rgc.get_typeids_z() s = ''.join([a[i] for i in range(len(a))]) return space.wrap(s) + +def get_typeids_list(space): + l = rgc.get_typeids_list() + list_w = [space.wrap(l[i]) for i in range(len(l))] + return space.newlist(list_w) diff --git a/pypy/module/micronumpy/base.py b/pypy/module/micronumpy/base.py --- a/pypy/module/micronumpy/base.py +++ b/pypy/module/micronumpy/base.py @@ -1,7 +1,7 @@ from pypy.interpreter.baseobjspace import W_Root -from pypy.interpreter.error import OperationError +from pypy.interpreter.error import OperationError, oefmt from rpython.tool.pairtype import extendabletype - +from pypy.module.micronumpy import support def wrap_impl(space, w_cls, w_instance, impl): if w_cls is None or space.is_w(w_cls, space.gettypefor(W_NDimArray)): @@ -44,11 +44,32 @@ return W_NDimArray(impl) @staticmethod - def from_shape_and_storage(space, shape, storage, dtype, order='C', owning=False, - w_subtype=None, w_base=None, writable=True): + def from_shape_and_storage(space, shape, storage, dtype, storage_bytes=-1, + order='C', owning=False, w_subtype=None, + w_base=None, writable=True, strides=None): from pypy.module.micronumpy import concrete - from pypy.module.micronumpy.strides import calc_strides - strides, backstrides = calc_strides(shape, dtype, order) + from pypy.module.micronumpy.strides import (calc_strides, + calc_backstrides) + isize = dtype.elsize + if storage_bytes > 0 : + totalsize = support.product(shape) * isize + if totalsize > storage_bytes: + raise OperationError(space.w_TypeError, space.wrap( + "buffer is too small for requested array")) + else: + storage_bytes = support.product(shape) * isize + if strides is None: + strides, backstrides = calc_strides(shape, dtype, order) + else: + if len(strides) != len(shape): + raise oefmt(space.w_ValueError, + 'strides, if given, must be the same length as shape') + for i in range(len(strides)): + if strides[i] < 0 or strides[i]*shape[i] > storage_bytes: + raise oefmt(space.w_ValueError, + 'strides is incompatible with shape of requested ' + 'array and size of buffer') + backstrides = calc_backstrides(strides, shape) if w_base is not None: if owning: raise OperationError(space.w_ValueError, diff --git a/pypy/module/micronumpy/boxes.py b/pypy/module/micronumpy/boxes.py --- a/pypy/module/micronumpy/boxes.py +++ b/pypy/module/micronumpy/boxes.py @@ -59,9 +59,9 @@ _mixin_ = True def reduce(self, space): - numpypy = space.getbuiltinmodule("_numpypy") - assert isinstance(numpypy, MixedModule) - multiarray = numpypy.get("multiarray") + _numpypy = space.getbuiltinmodule("_numpypy") + assert isinstance(_numpypy, MixedModule) + multiarray = _numpypy.get("multiarray") assert isinstance(multiarray, MixedModule) scalar = multiarray.get("scalar") @@ -167,7 +167,7 @@ if len(args_w) >= 1: for w_arg in args_w: try: - idx = support.index_w(space, w_arg) + support.index_w(space, w_arg) except OperationError: raise oefmt(space.w_TypeError, "an integer is required") raise oefmt(space.w_ValueError, "axes don't match array") diff --git a/pypy/module/micronumpy/compile.py b/pypy/module/micronumpy/compile.py --- a/pypy/module/micronumpy/compile.py +++ b/pypy/module/micronumpy/compile.py @@ -34,8 +34,8 @@ SINGLE_ARG_FUNCTIONS = ["sum", "prod", "max", "min", "all", "any", - "unegative", "flat", "tostring","count_nonzero", - "argsort"] + "unegative", "flat", "tostring", "count_nonzero", + "argsort", "cumsum", "logical_xor_reduce"] TWO_ARG_FUNCTIONS = ["dot", 'take', 'searchsorted'] TWO_ARG_FUNCTIONS_OR_NONE = ['view', 'astype'] THREE_ARG_FUNCTIONS = ['where'] @@ -559,6 +559,11 @@ w_res = arr.descr_any(interp.space) elif self.name == "all": w_res = arr.descr_all(interp.space) + elif self.name == "cumsum": + w_res = arr.descr_cumsum(interp.space) + elif self.name == "logical_xor_reduce": + logical_xor = ufuncs.get(interp.space).logical_xor + w_res = logical_xor.reduce(interp.space, arr, None) elif self.name == "unegative": neg = ufuncs.get(interp.space).negative w_res = neg.call(interp.space, [arr]) diff --git a/pypy/module/micronumpy/concrete.py b/pypy/module/micronumpy/concrete.py --- a/pypy/module/micronumpy/concrete.py +++ b/pypy/module/micronumpy/concrete.py @@ -11,7 +11,7 @@ from pypy.module.micronumpy.iterators import ArrayIter from pypy.module.micronumpy.strides import (Chunk, Chunks, NewAxisChunk, RecordChunk, calc_strides, calc_new_strides, shape_agreement, - calculate_broadcast_strides) + calculate_broadcast_strides, calc_backstrides) class BaseConcreteArray(object): @@ -79,10 +79,7 @@ self.get_strides(), self.order) if new_strides is not None: # We can create a view, strides somehow match up. - ndims = len(new_shape) - new_backstrides = [0] * ndims - for nd in range(ndims): - new_backstrides[nd] = (new_shape[nd] - 1) * new_strides[nd] + new_backstrides = calc_backstrides(new_strides, new_shape) assert isinstance(orig_array, W_NDimArray) or orig_array is None return SliceArray(self.start, new_strides, new_backstrides, new_shape, self, orig_array) diff --git a/pypy/module/micronumpy/ctors.py b/pypy/module/micronumpy/ctors.py --- a/pypy/module/micronumpy/ctors.py +++ b/pypy/module/micronumpy/ctors.py @@ -302,5 +302,5 @@ return a else: writable = not buf.readonly - return W_NDimArray.from_shape_and_storage(space, [n], storage, dtype=dtype, - w_base=w_buffer, writable=writable) + return W_NDimArray.from_shape_and_storage(space, [n], storage, storage_bytes=s, + dtype=dtype, w_base=w_buffer, writable=writable) diff --git a/pypy/module/micronumpy/descriptor.py b/pypy/module/micronumpy/descriptor.py --- a/pypy/module/micronumpy/descriptor.py +++ b/pypy/module/micronumpy/descriptor.py @@ -286,7 +286,6 @@ def descr_hash(self, space): return space.wrap(self._compute_hash(space, 0x345678)) - def descr_str(self, space): if self.fields: return space.str(self.descr_get_descr(space)) @@ -394,7 +393,7 @@ alignment = space.int_w(space.getitem(w_data, space.wrap(6))) if (w_names == space.w_None) != (w_fields == space.w_None): - raise oefmt(space.w_ValueError, "inconsistent fields and names") + raise oefmt(space.w_ValueError, "inconsistent fields and names in Numpy dtype unpickling") self.byteorder = endian self.shape = [] diff --git a/pypy/module/micronumpy/flagsobj.py b/pypy/module/micronumpy/flagsobj.py --- a/pypy/module/micronumpy/flagsobj.py +++ b/pypy/module/micronumpy/flagsobj.py @@ -1,47 +1,30 @@ +from rpython.rlib import jit + from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.error import OperationError from pypy.interpreter.gateway import interp2app from pypy.interpreter.typedef import TypeDef, GetSetProperty from pypy.module.micronumpy import constants as NPY - +from pypy.module.micronumpy.strides import is_c_contiguous, is_f_contiguous def enable_flags(arr, flags): arr.flags |= flags - def clear_flags(arr, flags): arr.flags &= ~flags - def _update_contiguous_flags(arr): - shape = arr.shape - strides = arr.strides - - is_c_contig = True - sd = arr.dtype.elsize - for i in range(len(shape) - 1, -1, -1): - dim = shape[i] - if strides[i] != sd: - is_c_contig = False - break - if dim == 0: - break - sd *= dim + is_c_contig = is_c_contiguous(arr) if is_c_contig: enable_flags(arr, NPY.ARRAY_C_CONTIGUOUS) else: clear_flags(arr, NPY.ARRAY_C_CONTIGUOUS) - sd = arr.dtype.elsize - for i in range(len(shape)): - dim = shape[i] - if strides[i] != sd: - clear_flags(arr, NPY.ARRAY_F_CONTIGUOUS) - return - if dim == 0: - break - sd *= dim - enable_flags(arr, NPY.ARRAY_F_CONTIGUOUS) + is_f_contig = is_f_contiguous(arr) + if is_f_contig: + enable_flags(arr, NPY.ARRAY_F_CONTIGUOUS) + else: + clear_flags(arr, NPY.ARRAY_F_CONTIGUOUS) class W_FlagsObject(W_Root): diff --git a/pypy/module/micronumpy/flatiter.py b/pypy/module/micronumpy/flatiter.py --- a/pypy/module/micronumpy/flatiter.py +++ b/pypy/module/micronumpy/flatiter.py @@ -22,6 +22,9 @@ def get_shape(self): return self.shape + def get_size(self): + return self.base().get_size() + def create_iter(self, shape=None, backward_broadcast=False): assert isinstance(self.base(), W_NDimArray) return self.base().create_iter() @@ -41,8 +44,8 @@ return space.wrap(self.state.index) def descr_coords(self, space): - self.state = self.iter.update(self.state) - return space.newtuple([space.wrap(c) for c in self.state.indices]) + coords = self.iter.indices(self.state) + return space.newtuple([space.wrap(c) for c in coords]) def descr_iter(self): return self @@ -54,7 +57,7 @@ if self.iter.done(self.state): raise OperationError(space.w_StopIteration, space.w_None) w_res = self.iter.getitem(self.state) - self.state = self.iter.next(self.state) + self.iter.next(self.state, mutate=True) return w_res def descr_getitem(self, space, w_idx): @@ -71,7 +74,7 @@ base.get_order(), w_instance=base) return loop.flatiter_getitem(res, self.iter, state, step) finally: - self.state = self.iter.reset(self.state) + self.iter.reset(self.state, mutate=True) def descr_setitem(self, space, w_idx, w_value): if not (space.isinstance_w(w_idx, space.w_int) or @@ -91,7 +94,7 @@ arr = convert_to_array(space, w_value) loop.flatiter_setitem(space, dtype, arr, self.iter, state, step, length) finally: - self.state = self.iter.reset(self.state) + self.iter.reset(self.state, mutate=True) W_FlatIterator.typedef = TypeDef("numpy.flatiter", diff --git a/pypy/module/micronumpy/iterators.py b/pypy/module/micronumpy/iterators.py --- a/pypy/module/micronumpy/iterators.py +++ b/pypy/module/micronumpy/iterators.py @@ -41,16 +41,6 @@ from pypy.module.micronumpy.base import W_NDimArray from pypy.module.micronumpy.flagsobj import _update_contiguous_flags -class OpFlag(object): - def __init__(self): - self.rw = '' - self.broadcast = True - self.force_contig = False - self.force_align = False - self.native_byte_order = False - self.tmp_copy = '' - self.allocate = False - class PureShapeIter(object): def __init__(self, shape, idx_w): @@ -87,25 +77,24 @@ class IterState(object): - _immutable_fields_ = ['iterator', 'index', 'indices', 'offset'] + _immutable_fields_ = ['iterator', '_indices'] def __init__(self, iterator, index, indices, offset): self.iterator = iterator self.index = index - self.indices = indices + self._indices = indices self.offset = offset class ArrayIter(object): _immutable_fields_ = ['contiguous', 'array', 'size', 'ndim_m1', 'shape_m1[*]', 'strides[*]', 'backstrides[*]', 'factors[*]', - 'slice_shape', 'slice_stride', 'slice_backstride', - 'track_index', 'operand_type', 'slice_operand_type'] + 'track_index'] track_index = True - def __init__(self, array, size, shape, strides, backstrides, op_flags=OpFlag()): - from pypy.module.micronumpy import concrete + @jit.unroll_safe + def __init__(self, array, size, shape, strides, backstrides): assert len(shape) == len(strides) == len(backstrides) _update_contiguous_flags(array) self.contiguous = (array.flags & NPY.ARRAY_C_CONTIGUOUS and @@ -117,12 +106,6 @@ self.shape_m1 = [s - 1 for s in shape] self.strides = strides self.backstrides = backstrides - self.slice_shape = 1 - self.slice_stride = -1 - if strides: - self.slice_stride = strides[-1] - self.slice_backstride = 1 - self.slice_operand_type = concrete.SliceArray ndim = len(shape) factors = [0] * ndim @@ -132,32 +115,35 @@ else: factors[ndim-i-1] = factors[ndim-i] * shape[ndim-i] self.factors = factors - if op_flags.rw == 'r': - self.operand_type = concrete.ConcreteNonWritableArrayWithBase - else: - self.operand_type = concrete.ConcreteArrayWithBase @jit.unroll_safe - def reset(self, state=None): + def reset(self, state=None, mutate=False): + index = 0 if state is None: indices = [0] * len(self.shape_m1) else: assert state.iterator is self - indices = state.indices + indices = state._indices for i in xrange(self.ndim_m1, -1, -1): indices[i] = 0 - return IterState(self, 0, indices, self.array.start) + offset = self.array.start + if not mutate: + return IterState(self, index, indices, offset) + state.index = index + state.offset = offset @jit.unroll_safe - def next(self, state): + def next(self, state, mutate=False): assert state.iterator is self index = state.index if self.track_index: index += 1 - indices = state.indices[:] + indices = state._indices offset = state.offset if self.contiguous: offset += self.array.dtype.elsize + elif self.ndim_m1 == 0: + offset += self.strides[0] else: for i in xrange(self.ndim_m1, -1, -1): idx = indices[i] @@ -168,13 +154,18 @@ else: indices[i] = 0 offset -= self.backstrides[i] - return IterState(self, index, indices, offset) + if not mutate: + return IterState(self, index, indices, offset) + state.index = index + state.offset = offset @jit.unroll_safe def goto(self, index): offset = self.array.start if self.contiguous: offset += index * self.array.dtype.elsize + elif self.ndim_m1 == 0: + offset += index * self.strides[0] else: current = index for i in xrange(len(self.shape_m1)): @@ -183,20 +174,20 @@ return IterState(self, index, None, offset) @jit.unroll_safe - def update(self, state): + def indices(self, state): assert state.iterator is self assert self.track_index - if not self.contiguous: - return state + indices = state._indices + if not (self.contiguous or self.ndim_m1 == 0): + return indices current = state.index - indices = state.indices for i in xrange(len(self.shape_m1)): if self.factors[i] != 0: indices[i] = current / self.factors[i] current %= self.factors[i] else: indices[i] = 0 - return IterState(self, state.index, indices, state.offset) + return indices def done(self, state): assert state.iterator is self @@ -215,12 +206,6 @@ assert state.iterator is self self.array.setitem(state.offset, elem) - def getoperand(self, st, base): - impl = self.operand_type - res = impl([], self.array.dtype, self.array.order, [], [], - self.array.storage, base) - res.start = st.offset - return res def AxisIter(array, shape, axis, cumulative): strides = array.get_strides() @@ -244,42 +229,3 @@ size /= shape[axis] shape[axis] = backstrides[axis] = 0 return ArrayIter(array, size, shape, array.strides, backstrides) - -class SliceIter(ArrayIter): - ''' - used with external loops, getitem and setitem return a SliceArray - view into the original array - ''' - _immutable_fields_ = ['base', 'slice_shape[*]', 'slice_stride[*]', 'slice_backstride[*]'] - - def __init__(self, array, size, shape, strides, backstrides, slice_shape, - slice_stride, slice_backstride, op_flags, base): - from pypy.module.micronumpy import concrete - ArrayIter.__init__(self, array, size, shape, strides, backstrides, op_flags) - self.slice_shape = slice_shape - self.slice_stride = slice_stride - self.slice_backstride = slice_backstride - self.base = base - if op_flags.rw == 'r': - self.slice_operand_type = concrete.NonWritableSliceArray - else: - self.slice_operand_type = concrete.SliceArray - - def getitem(self, state): - # XXX cannot be called - must return a boxed value - assert False - - def getitem_bool(self, state): - # XXX cannot be called - must return a boxed value - assert False - - def setitem(self, state, elem): - # XXX cannot be called - must return a boxed value - assert False - - def getoperand(self, state, base): - assert state.iterator is self - impl = self.slice_operand_type - arr = impl(state.offset, [self.slice_stride], [self.slice_backstride], - [self.slice_shape], self.array, self.base) - return arr diff --git a/pypy/module/micronumpy/loop.py b/pypy/module/micronumpy/loop.py --- a/pypy/module/micronumpy/loop.py +++ b/pypy/module/micronumpy/loop.py @@ -42,23 +42,38 @@ # TODO handle __array_priorities__ and maybe flip the order + if w_lhs.get_size() == 1: + w_left = w_lhs.get_scalar_value().convert_to(space, calc_dtype) + left_iter = left_state = None + else: + w_left = None + left_iter, left_state = w_lhs.create_iter(shape) + left_iter.track_index = False + + if w_rhs.get_size() == 1: + w_right = w_rhs.get_scalar_value().convert_to(space, calc_dtype) + right_iter = right_state = None + else: + w_right = None + right_iter, right_state = w_rhs.create_iter(shape) + right_iter.track_index = False + if out is None: out = W_NDimArray.from_shape(space, shape, res_dtype, w_instance=lhs_for_subtype) - left_iter, left_state = w_lhs.create_iter(shape) - right_iter, right_state = w_rhs.create_iter(shape) out_iter, out_state = out.create_iter(shape) - left_iter.track_index = right_iter.track_index = False shapelen = len(shape) while not out_iter.done(out_state): call2_driver.jit_merge_point(shapelen=shapelen, func=func, calc_dtype=calc_dtype, res_dtype=res_dtype) - w_left = left_iter.getitem(left_state).convert_to(space, calc_dtype) - w_right = right_iter.getitem(right_state).convert_to(space, calc_dtype) + if left_iter: + w_left = left_iter.getitem(left_state).convert_to(space, calc_dtype) + left_state = left_iter.next(left_state) + if right_iter: + w_right = right_iter.getitem(right_state).convert_to(space, calc_dtype) + right_state = right_iter.next(right_state) out_iter.setitem(out_state, func(calc_dtype, w_left, w_right).convert_to( space, res_dtype)) - left_state = left_iter.next(left_state) - right_state = right_iter.next(right_state) out_state = out_iter.next(out_state) return out @@ -68,11 +83,12 @@ reds='auto') def call1(space, shape, func, calc_dtype, res_dtype, w_obj, out): + obj_iter, obj_state = w_obj.create_iter(shape) + obj_iter.track_index = False + if out is None: out = W_NDimArray.from_shape(space, shape, res_dtype, w_instance=w_obj) - obj_iter, obj_state = w_obj.create_iter(shape) out_iter, out_state = out.create_iter(shape) - obj_iter.track_index = False shapelen = len(shape) while not out_iter.done(out_state): call1_driver.jit_merge_point(shapelen=shapelen, func=func, @@ -89,17 +105,14 @@ def setslice(space, shape, target, source): if not shape: - # XXX - simplify - target_iter, target_state = target.create_iter(shape) - source_iter, source_state = source.create_iter(shape) dtype = target.dtype - val = source_iter.getitem(source_state) + val = source.getitem(source.start) if dtype.is_str_or_unicode(): val = dtype.coerce(space, val) else: val = val.convert_to(space, dtype) - target_iter.setitem(target_state, val) - return target + target.setitem(target.start, val) + return target return _setslice(space, shape, target, source) def _setslice(space, shape, target, source): @@ -107,6 +120,7 @@ # array implementations, not arrays target_iter, target_state = target.create_iter(shape) source_iter, source_state = source.create_iter(shape) + source_iter.track_index = False dtype = target.dtype shapelen = len(shape) while not target_iter.done(target_state): @@ -152,6 +166,7 @@ def compute_reduce_cumulative(space, obj, out, calc_dtype, func, identity): obj_iter, obj_state = obj.create_iter() out_iter, out_state = out.create_iter() + out_iter.track_index = False if identity is None: cur_value = obj_iter.getitem(obj_state).convert_to(space, calc_dtype) out_iter.setitem(out_state, cur_value) @@ -225,10 +240,9 @@ state = x_state return out -axis_reduce__driver = jit.JitDriver(name='numpy_axis_reduce', - greens=['shapelen', - 'func', 'dtype'], - reds='auto') +axis_reduce_driver = jit.JitDriver(name='numpy_axis_reduce', + greens=['shapelen', 'func', 'dtype'], + reds='auto') def do_axis_reduce(space, shape, func, arr, dtype, axis, out, identity, cumulative, temp): @@ -241,21 +255,24 @@ temp_iter = out_iter # hack temp_state = out_state arr_iter, arr_state = arr.create_iter() + arr_iter.track_index = False if identity is not None: identity = identity.convert_to(space, dtype) shapelen = len(shape) while not out_iter.done(out_state): - axis_reduce__driver.jit_merge_point(shapelen=shapelen, func=func, - dtype=dtype) - assert not arr_iter.done(arr_state) + axis_reduce_driver.jit_merge_point(shapelen=shapelen, func=func, + dtype=dtype) w_val = arr_iter.getitem(arr_state).convert_to(space, dtype) - out_state = out_iter.update(out_state) - if out_state.indices[axis] == 0: + arr_state = arr_iter.next(arr_state) + + out_indices = out_iter.indices(out_state) + if out_indices[axis] == 0: if identity is not None: w_val = func(dtype, identity, w_val) else: cur = temp_iter.getitem(temp_state) w_val = func(dtype, cur, w_val) + out_iter.setitem(out_state, w_val) out_state = out_iter.next(out_state) if cumulative: @@ -263,7 +280,6 @@ temp_state = temp_iter.next(temp_state) else: temp_state = out_state - arr_state = arr_iter.next(arr_state) return out @@ -382,9 +398,9 @@ while not arr_iter.done(arr_state): nonzero_driver.jit_merge_point(shapelen=shapelen, dims=dims, dtype=dtype) if arr_iter.getitem_bool(arr_state): - arr_state = arr_iter.update(arr_state) + arr_indices = arr_iter.indices(arr_state) for d in dims: - res_iter.setitem(res_state, box(arr_state.indices[d])) + res_iter.setitem(res_state, box(arr_indices[d])) res_state = res_iter.next(res_state) arr_state = arr_iter.next(arr_state) return res diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py --- a/pypy/module/micronumpy/ndarray.py +++ b/pypy/module/micronumpy/ndarray.py @@ -19,7 +19,7 @@ order_converter, shape_converter, searchside_converter from pypy.module.micronumpy.flagsobj import W_FlagsObject from pypy.module.micronumpy.strides import get_shape_from_iterable, \ - shape_agreement, shape_agreement_multiple + shape_agreement, shape_agreement_multiple, is_c_contiguous, is_f_contiguous def _match_dot_shapes(space, left, right): @@ -529,9 +529,10 @@ "__array__(dtype) not implemented")) if type(self) is W_NDimArray: return self + sz = support.product(self.get_shape()) * self.get_dtype().elsize return W_NDimArray.from_shape_and_storage( space, self.get_shape(), self.implementation.storage, - self.get_dtype(), w_base=self) + self.get_dtype(), storage_bytes=sz, w_base=self) def descr_array_iface(self, space): addr = self.implementation.get_storage_as_int(space) @@ -827,7 +828,15 @@ raise OperationError(space.w_ValueError, space.wrap( "new type not compatible with array.")) else: - if dims == 1 or impl.get_strides()[0] < impl.get_strides()[-1]: + if not is_c_contiguous(impl) and not is_f_contiguous(impl): + if old_itemsize != new_itemsize: + raise OperationError(space.w_ValueError, space.wrap( + "new type not compatible with array.")) + # Strides, shape does not change + v = impl.astype(space, dtype) + return wrap_impl(space, w_type, self, v) + strides = impl.get_strides() + if dims == 1 or strides[0] buf.getlength(): - raise OperationError(space.w_TypeError, space.wrap( - "buffer is too small for requested array")) storage = rffi.cast(RAW_STORAGE_PTR, raw_ptr) storage = rffi.ptradd(storage, offset) - return W_NDimArray.from_shape_and_storage(space, shape, storage, dtype, + return W_NDimArray.from_shape_and_storage(space, shape, storage, + dtype, w_base=w_buffer, + storage_bytes=buf.getlength()-offset, w_subtype=w_subtype, - w_base=w_buffer, - writable=not buf.readonly) + writable=not buf.readonly, + strides=strides) order = order_converter(space, w_order, NPY.CORDER) if order == NPY.CORDER: @@ -1236,8 +1245,9 @@ return w_ret - at unwrap_spec(addr=int) -def descr__from_shape_and_storage(space, w_cls, w_shape, addr, w_dtype, w_subtype=None): + at unwrap_spec(addr=int, buf_len=int) +def descr__from_shape_and_storage(space, w_cls, w_shape, addr, w_dtype, + buf_len=-1, w_subtype=None, w_strides=None): """ Create an array from an existing buffer, given its address as int. PyPy-only implementation detail. @@ -1246,14 +1256,22 @@ dtype = space.interp_w(descriptor.W_Dtype, space.call_function( space.gettypefor(descriptor.W_Dtype), w_dtype)) shape = shape_converter(space, w_shape, dtype) + if not space.is_none(w_strides): + strides = [space.int_w(w_i) for w_i in + space.unpackiterable(w_strides)] + else: + strides = None if w_subtype: if not space.isinstance_w(w_subtype, space.w_type): raise OperationError(space.w_ValueError, space.wrap( "subtype must be a subtype of ndarray, not a class instance")) return W_NDimArray.from_shape_and_storage(space, shape, storage, dtype, - 'C', False, w_subtype) + buf_len, 'C', False, w_subtype, + strides=strides) else: - return W_NDimArray.from_shape_and_storage(space, shape, storage, dtype) + return W_NDimArray.from_shape_and_storage(space, shape, storage, dtype, + storage_bytes=buf_len, + strides=strides) app_take = applevel(r""" def take(a, indices, axis, out, mode): diff --git a/pypy/module/micronumpy/nditer.py b/pypy/module/micronumpy/nditer.py --- a/pypy/module/micronumpy/nditer.py +++ b/pypy/module/micronumpy/nditer.py @@ -6,7 +6,7 @@ from pypy.module.micronumpy import ufuncs, support, concrete from pypy.module.micronumpy.base import W_NDimArray, convert_to_array from pypy.module.micronumpy.descriptor import decode_w_dtype -from pypy.module.micronumpy.iterators import ArrayIter, SliceIter, OpFlag +from pypy.module.micronumpy.iterators import ArrayIter from pypy.module.micronumpy.strides import (calculate_broadcast_strides, shape_agreement, shape_agreement_multiple) @@ -36,6 +36,16 @@ return ret +class OpFlag(object): + def __init__(self): + self.rw = '' + self.broadcast = True + self.force_contig = False + self.force_align = False + self.native_byte_order = False + self.tmp_copy = '' + self.allocate = False + def parse_op_flag(space, lst): op_flag = OpFlag() for w_item in lst: @@ -142,11 +152,73 @@ raise NotImplementedError('not implemented yet') -def get_iter(space, order, arr, shape, dtype, op_flags): +class OperandIter(ArrayIter): + _immutable_fields_ = ['slice_shape', 'slice_stride', 'slice_backstride', + 'operand_type', 'base'] + + def getitem(self, state): + # cannot be called - must return a boxed value + assert False + + def getitem_bool(self, state): + # cannot be called - must return a boxed value + assert False + + def setitem(self, state, elem): + # cannot be called - must return a boxed value + assert False + + +class ConcreteIter(OperandIter): + def __init__(self, array, size, shape, strides, backstrides, + op_flags, base): + OperandIter.__init__(self, array, size, shape, strides, backstrides) + self.slice_shape = 1 + self.slice_stride = -1 + if strides: + self.slice_stride = strides[-1] + self.slice_backstride = 1 + if op_flags.rw == 'r': + self.operand_type = concrete.ConcreteNonWritableArrayWithBase + else: + self.operand_type = concrete.ConcreteArrayWithBase + self.base = base + + def getoperand(self, state): + assert state.iterator is self + impl = self.operand_type + res = impl([], self.array.dtype, self.array.order, [], [], + self.array.storage, self.base) + res.start = state.offset + return res + + +class SliceIter(OperandIter): + def __init__(self, array, size, shape, strides, backstrides, slice_shape, + slice_stride, slice_backstride, op_flags, base): + OperandIter.__init__(self, array, size, shape, strides, backstrides) + self.slice_shape = slice_shape + self.slice_stride = slice_stride + self.slice_backstride = slice_backstride + if op_flags.rw == 'r': + self.operand_type = concrete.NonWritableSliceArray + else: + self.operand_type = concrete.SliceArray + self.base = base + + def getoperand(self, state): + assert state.iterator is self + impl = self.operand_type + arr = impl(state.offset, [self.slice_stride], [self.slice_backstride], + [self.slice_shape], self.array, self.base) + return arr + + +def get_iter(space, order, arr, shape, dtype, op_flags, base): imp = arr.implementation backward = is_backward(imp, order) if arr.is_scalar(): - return ArrayIter(imp, 1, [], [], [], op_flags=op_flags) + return ConcreteIter(imp, 1, [], [], [], op_flags, base) if (imp.strides[0] < imp.strides[-1] and not backward) or \ (imp.strides[0] > imp.strides[-1] and backward): # flip the strides. Is this always true for multidimension? @@ -161,7 +233,7 @@ backstrides = imp.backstrides r = calculate_broadcast_strides(strides, backstrides, imp.shape, shape, backward) - return ArrayIter(imp, imp.get_size(), shape, r[0], r[1], op_flags=op_flags) + return ConcreteIter(imp, imp.get_size(), shape, r[0], r[1], op_flags, base) def calculate_ndim(op_in, oa_ndim): if oa_ndim >=0: @@ -398,7 +470,7 @@ self.iters = [] for i in range(len(self.seq)): it = get_iter(space, self.order, self.seq[i], self.shape, - self.dtypes[i], self.op_flags[i]) + self.dtypes[i], self.op_flags[i], self) it.contiguous = False self.iters.append((it, it.reset())) @@ -437,7 +509,7 @@ return space.wrap(self) def getitem(self, it, st): - res = it.getoperand(st, self) + res = it.getoperand(st) return W_NDimArray(res) def descr_getitem(self, space, w_idx): @@ -455,6 +527,7 @@ def descr_len(self, space): space.wrap(len(self.iters)) + @jit.unroll_safe def descr_next(self, space): for it, st in self.iters: if not it.done(st): diff --git a/pypy/module/micronumpy/strides.py b/pypy/module/micronumpy/strides.py --- a/pypy/module/micronumpy/strides.py +++ b/pypy/module/micronumpy/strides.py @@ -270,7 +270,7 @@ shape = shape_agreement(space, shape, arr) return shape - + at jit.unroll_safe def _shape_agreement(shape1, shape2): """ Checks agreement about two shapes with respect to broadcasting. Returns the resulting shape. @@ -362,6 +362,13 @@ backstrides.reverse() return strides, backstrides + at jit.unroll_safe +def calc_backstrides(strides, shape): + ndims = len(shape) + new_backstrides = [0] * ndims + for nd in range(ndims): + new_backstrides[nd] = (shape[nd] - 1) * strides[nd] + return new_backstrides # Recalculating strides. Find the steps that the iteration does for each # dimension, given the stride and shape. Then try to create a new stride that @@ -422,3 +429,35 @@ n_old_elems_to_use *= old_shape[oldI] assert len(new_strides) == len(new_shape) return new_strides[:] + + at jit.unroll_safe +def is_c_contiguous(arr): + shape = arr.get_shape() + strides = arr.get_strides() + ret = True + sd = arr.dtype.elsize + for i in range(len(shape) - 1, -1, -1): + dim = shape[i] + if strides[i] != sd: + ret = False + break + if dim == 0: + break + sd *= dim + return ret + + at jit.unroll_safe +def is_f_contiguous(arr): + shape = arr.get_shape() + strides = arr.get_strides() + ret = True + sd = arr.dtype.elsize + for i in range(len(shape)): + dim = shape[i] + if strides[i] != sd: + ret = False + break + if dim == 0: + break + sd *= dim + return ret diff --git a/pypy/module/micronumpy/test/test_arrayops.py b/pypy/module/micronumpy/test/test_arrayops.py --- a/pypy/module/micronumpy/test/test_arrayops.py +++ b/pypy/module/micronumpy/test/test_arrayops.py @@ -3,13 +3,13 @@ From noreply at buildbot.pypy.org Fri Jan 2 22:29:46 2015 From: noreply at buildbot.pypy.org (pjenvey) Date: Fri, 2 Jan 2015 22:29:46 +0100 (CET) Subject: [pypy-commit] pypy py3.3: add ZLIB_RUNTIME_VERSION Message-ID: <20150102212946.13EFC1C1056@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3.3 Changeset: r75212:f7ed501c6405 Date: 2015-01-02 13:29 -0800 http://bitbucket.org/pypy/pypy/changeset/f7ed501c6405/ Log: add ZLIB_RUNTIME_VERSION diff --git a/pypy/module/zlib/__init__.py b/pypy/module/zlib/__init__.py --- a/pypy/module/zlib/__init__.py +++ b/pypy/module/zlib/__init__.py @@ -37,6 +37,12 @@ appleveldefs = { } + def setup_after_space_initialization(self): + space = self.space + space.setattr(self, space.wrap('ZLIB_RUNTIME_VERSION'), + space.wrap(rzlib.zlibVersion())) + + for _name in """ MAX_WBITS DEFLATED DEF_MEM_LEVEL diff --git a/pypy/module/zlib/test/test_zlib.py b/pypy/module/zlib/test/test_zlib.py --- a/pypy/module/zlib/test/test_zlib.py +++ b/pypy/module/zlib/test/test_zlib.py @@ -309,3 +309,7 @@ assert do.decompress(d0) == piece do.decompress(d1) == piece[100:] do.decompress(d2) == piece[:-100] + + def test_version(self): + zlib = self.zlib + assert zlib.ZLIB_VERSION[0] == zlib.ZLIB_RUNTIME_VERSION[0] From noreply at buildbot.pypy.org Fri Jan 2 23:41:57 2015 From: noreply at buildbot.pypy.org (squeaky) Date: Fri, 2 Jan 2015 23:41:57 +0100 (CET) Subject: [pypy-commit] pypy squeaky/use-cflags-for-compiling-asm: Use CFLAGS Message-ID: <20150102224157.5F4F11C0976@cobra.cs.uni-duesseldorf.de> Author: squeaky Branch: squeaky/use-cflags-for-compiling-asm Changeset: r75213:e27b54a9d700 Date: 2015-01-02 22:42 +0000 http://bitbucket.org/pypy/pypy/changeset/e27b54a9d700/ Log: Use CFLAGS diff --git a/rpython/translator/c/genc.py b/rpython/translator/c/genc.py --- a/rpython/translator/c/genc.py +++ b/rpython/translator/c/genc.py @@ -449,7 +449,7 @@ '-o $*.s -S $< $(INCLUDEDIRS)', '$(PYTHON) $(RPYDIR)/translator/c/gcc/trackgcroot.py ' '-t $*.s > $*.gctmp', - '$(CC) -o $*.o -c $*.lbl.s', + '$(CC) $(CFLAGS) -o $*.o -c $*.lbl.s', 'mv $*.gctmp $*.gcmap', 'rm $*.s $*.lbl.s']) From noreply at buildbot.pypy.org Sat Jan 3 01:53:07 2015 From: noreply at buildbot.pypy.org (pjenvey) Date: Sat, 3 Jan 2015 01:53:07 +0100 (CET) Subject: [pypy-commit] pypy default: (squeaky) we need @executable_path, not @rpath, for the shared lib location Message-ID: <20150103005307.D48AD1C0347@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: Changeset: r75214:8caae57cec39 Date: 2015-01-02 16:53 -0800 http://bitbucket.org/pypy/pypy/changeset/8caae57cec39/ Log: (squeaky) we need @executable_path, not @rpath, for the shared lib location diff --git a/rpython/translator/platform/darwin.py b/rpython/translator/platform/darwin.py --- a/rpython/translator/platform/darwin.py +++ b/rpython/translator/platform/darwin.py @@ -14,7 +14,9 @@ def _args_for_shared(self, args): return (list(self.shared_only) - + ['-dynamiclib', '-install_name', '@rpath/$(TARGET)', '-undefined', 'dynamic_lookup'] + + ['-dynamiclib', '-install_name', + '@executable_path/$(TARGET)', '-undefined', + 'dynamic_lookup'] + args) def _include_dirs_for_libffi(self): From noreply at buildbot.pypy.org Sat Jan 3 02:42:51 2015 From: noreply at buildbot.pypy.org (pjenvey) Date: Sat, 3 Jan 2015 02:42:51 +0100 (CET) Subject: [pypy-commit] pypy default: attempt to fix lldebug and the like w/ the now default of shared Message-ID: <20150103014251.8B9CF1C1056@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: Changeset: r75215:3b999828c913 Date: 2015-01-02 16:53 -0800 http://bitbucket.org/pypy/pypy/changeset/3b999828c913/ Log: attempt to fix lldebug and the like w/ the now default of shared diff --git a/rpython/translator/c/genc.py b/rpython/translator/c/genc.py --- a/rpython/translator/c/genc.py +++ b/rpython/translator/c/genc.py @@ -400,15 +400,15 @@ mk.definition('PROFOPT', profopt) rules = [ - ('clean', '', 'rm -f $(OBJECTS) $(TARGET) $(GCMAPFILES) $(ASMFILES) *.gc?? ../module_cache/*.gc??'), - ('clean_noprof', '', 'rm -f $(OBJECTS) $(TARGET) $(GCMAPFILES) $(ASMFILES)'), + ('clean', '', 'rm -f $(OBJECTS) $(DEFAULT_TARGET) $(TARGET) $(GCMAPFILES) $(ASMFILES) *.gc?? ../module_cache/*.gc??'), + ('clean_noprof', '', 'rm -f $(OBJECTS) $(DEFAULT_TARGET) $(TARGET) $(GCMAPFILES) $(ASMFILES)'), ('debug', '', '$(MAKE) CFLAGS="$(DEBUGFLAGS) -DRPY_ASSERT" debug_target'), ('debug_exc', '', '$(MAKE) CFLAGS="$(DEBUGFLAGS) -DRPY_ASSERT -DDO_LOG_EXC" debug_target'), ('debug_mem', '', '$(MAKE) CFLAGS="$(DEBUGFLAGS) -DRPY_ASSERT -DPYPY_USE_TRIVIAL_MALLOC" debug_target'), - ('llsafer', '', '$(MAKE) CFLAGS="-O2 -DRPY_LL_ASSERT" $(TARGET)'), + ('llsafer', '', '$(MAKE) CFLAGS="-O2 -DRPY_LL_ASSERT" $(DEFAULT_TARGET)'), ('lldebug', '', '$(MAKE) CFLAGS="$(DEBUGFLAGS) -DRPY_ASSERT -DRPY_LL_ASSERT" debug_target'), ('lldebug0','', '$(MAKE) CFLAGS="$(DEBUGFLAGS) -O0 -DRPY_ASSERT -DRPY_LL_ASSERT" debug_target'), - ('profile', '', '$(MAKE) CFLAGS="-g -O1 -pg $(CFLAGS) -fno-omit-frame-pointer" LDFLAGS="-pg $(LDFLAGS)" $(TARGET)'), + ('profile', '', '$(MAKE) CFLAGS="-g -O1 -pg $(CFLAGS) -fno-omit-frame-pointer" LDFLAGS="-pg $(LDFLAGS)" $(DEFAULT_TARGET)'), ] if self.has_profopt(): rules.append( @@ -471,7 +471,7 @@ if self.translator.platform.name == 'msvc': mk.rule('debug_target', 'debugmode_$(DEFAULT_TARGET)', 'rem') else: - mk.rule('debug_target', '$(TARGET)', '#') + mk.rule('debug_target', '$(DEFAULT_TARGET)', '#') mk.write() #self.translator.platform, # , From noreply at buildbot.pypy.org Sat Jan 3 03:15:27 2015 From: noreply at buildbot.pypy.org (pjenvey) Date: Sat, 3 Jan 2015 03:15:27 +0100 (CET) Subject: [pypy-commit] pypy py3k: merge default Message-ID: <20150103021527.BE1271C0347@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r75216:b85a8b82049d Date: 2015-01-02 18:14 -0800 http://bitbucket.org/pypy/pypy/changeset/b85a8b82049d/ Log: merge default diff --git a/rpython/translator/c/genc.py b/rpython/translator/c/genc.py --- a/rpython/translator/c/genc.py +++ b/rpython/translator/c/genc.py @@ -400,15 +400,15 @@ mk.definition('PROFOPT', profopt) rules = [ - ('clean', '', 'rm -f $(OBJECTS) $(TARGET) $(GCMAPFILES) $(ASMFILES) *.gc?? ../module_cache/*.gc??'), - ('clean_noprof', '', 'rm -f $(OBJECTS) $(TARGET) $(GCMAPFILES) $(ASMFILES)'), + ('clean', '', 'rm -f $(OBJECTS) $(DEFAULT_TARGET) $(TARGET) $(GCMAPFILES) $(ASMFILES) *.gc?? ../module_cache/*.gc??'), + ('clean_noprof', '', 'rm -f $(OBJECTS) $(DEFAULT_TARGET) $(TARGET) $(GCMAPFILES) $(ASMFILES)'), ('debug', '', '$(MAKE) CFLAGS="$(DEBUGFLAGS) -DRPY_ASSERT" debug_target'), ('debug_exc', '', '$(MAKE) CFLAGS="$(DEBUGFLAGS) -DRPY_ASSERT -DDO_LOG_EXC" debug_target'), ('debug_mem', '', '$(MAKE) CFLAGS="$(DEBUGFLAGS) -DRPY_ASSERT -DPYPY_USE_TRIVIAL_MALLOC" debug_target'), - ('llsafer', '', '$(MAKE) CFLAGS="-O2 -DRPY_LL_ASSERT" $(TARGET)'), + ('llsafer', '', '$(MAKE) CFLAGS="-O2 -DRPY_LL_ASSERT" $(DEFAULT_TARGET)'), ('lldebug', '', '$(MAKE) CFLAGS="$(DEBUGFLAGS) -DRPY_ASSERT -DRPY_LL_ASSERT" debug_target'), ('lldebug0','', '$(MAKE) CFLAGS="$(DEBUGFLAGS) -O0 -DRPY_ASSERT -DRPY_LL_ASSERT" debug_target'), - ('profile', '', '$(MAKE) CFLAGS="-g -O1 -pg $(CFLAGS) -fno-omit-frame-pointer" LDFLAGS="-pg $(LDFLAGS)" $(TARGET)'), + ('profile', '', '$(MAKE) CFLAGS="-g -O1 -pg $(CFLAGS) -fno-omit-frame-pointer" LDFLAGS="-pg $(LDFLAGS)" $(DEFAULT_TARGET)'), ] if self.has_profopt(): rules.append( @@ -471,7 +471,7 @@ if self.translator.platform.name == 'msvc': mk.rule('debug_target', 'debugmode_$(DEFAULT_TARGET)', 'rem') else: - mk.rule('debug_target', '$(TARGET)', '#') + mk.rule('debug_target', '$(DEFAULT_TARGET)', '#') mk.write() #self.translator.platform, # , diff --git a/rpython/translator/platform/darwin.py b/rpython/translator/platform/darwin.py --- a/rpython/translator/platform/darwin.py +++ b/rpython/translator/platform/darwin.py @@ -14,7 +14,9 @@ def _args_for_shared(self, args): return (list(self.shared_only) - + ['-dynamiclib', '-install_name', '@rpath/$(TARGET)', '-undefined', 'dynamic_lookup'] + + ['-dynamiclib', '-install_name', + '@executable_path/$(TARGET)', '-undefined', + 'dynamic_lookup'] + args) def _include_dirs_for_libffi(self): From noreply at buildbot.pypy.org Sat Jan 3 16:46:50 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 3 Jan 2015 16:46:50 +0100 (CET) Subject: [pypy-commit] pypy squeaky/use-cflags-for-compiling-asm: Close branch squeaky/use-cflags-for-compiling-asm Message-ID: <20150103154650.077171C0347@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: squeaky/use-cflags-for-compiling-asm Changeset: r75217:374ba49279d1 Date: 2015-01-03 16:47 +0100 http://bitbucket.org/pypy/pypy/changeset/374ba49279d1/ Log: Close branch squeaky/use-cflags-for-compiling-asm From noreply at buildbot.pypy.org Sat Jan 3 16:46:56 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 3 Jan 2015 16:46:56 +0100 (CET) Subject: [pypy-commit] pypy default: Merged in squeaky/use-cflags-for-compiling-asm (pull request #295) Message-ID: <20150103154656.C16841C0347@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r75218:5f188a56b47d Date: 2015-01-03 16:47 +0100 http://bitbucket.org/pypy/pypy/changeset/5f188a56b47d/ Log: Merged in squeaky/use-cflags-for-compiling-asm (pull request #295) Use CFLAGS diff --git a/rpython/translator/c/genc.py b/rpython/translator/c/genc.py --- a/rpython/translator/c/genc.py +++ b/rpython/translator/c/genc.py @@ -449,7 +449,7 @@ '-o $*.s -S $< $(INCLUDEDIRS)', '$(PYTHON) $(RPYDIR)/translator/c/gcc/trackgcroot.py ' '-t $*.s > $*.gctmp', - '$(CC) -o $*.o -c $*.lbl.s', + '$(CC) $(CFLAGS) -o $*.o -c $*.lbl.s', 'mv $*.gctmp $*.gcmap', 'rm $*.s $*.lbl.s']) From noreply at buildbot.pypy.org Sat Jan 3 21:36:36 2015 From: noreply at buildbot.pypy.org (fijal) Date: Sat, 3 Jan 2015 21:36:36 +0100 (CET) Subject: [pypy-commit] pypy optresult: start fixing the merge Message-ID: <20150103203636.7F0511C1356@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: optresult Changeset: r75219:2baf29ac51a3 Date: 2015-01-03 22:36 +0200 http://bitbucket.org/pypy/pypy/changeset/2baf29ac51a3/ Log: start fixing the merge diff --git a/rpython/jit/metainterp/history.py b/rpython/jit/metainterp/history.py --- a/rpython/jit/metainterp/history.py +++ b/rpython/jit/metainterp/history.py @@ -173,9 +173,6 @@ class Const(AbstractValue): __slots__ = () - is_source_op = True - source_op = None - @staticmethod def _new(x): "NOT_RPYTHON" diff --git a/rpython/jit/metainterp/optimizeopt/heap.py b/rpython/jit/metainterp/optimizeopt/heap.py --- a/rpython/jit/metainterp/optimizeopt/heap.py +++ b/rpython/jit/metainterp/optimizeopt/heap.py @@ -151,7 +151,7 @@ opnum = OpHelpers.getfield_for_descr(op.getdescr()) getop = ResOperation(opnum, [op.getarg(0)], op.getdescr()) - getop.source_op = result + xxx if isinstance(result, Const): optimizer.make_constant(getop, result) getop.is_source_op = True @@ -165,6 +165,7 @@ getop = ResOperation(opnum, [op.getarg(0), op.getarg(1)], op.getdescr()) getop.source_op = result + xxx if isinstance(result, Const): xxx optimizer.make_constant(getop, result) diff --git a/rpython/jit/metainterp/optimizeopt/intbounds.py b/rpython/jit/metainterp/optimizeopt/intbounds.py --- a/rpython/jit/metainterp/optimizeopt/intbounds.py +++ b/rpython/jit/metainterp/optimizeopt/intbounds.py @@ -245,8 +245,7 @@ # Transform into INT_ADD. The following guard will be killed # by optimize_GUARD_NO_OVERFLOW; if we see instead an # optimize_GUARD_OVERFLOW, then InvalidLoop. - xxx - op = op.copy_and_change(rop.INT_ADD) + op = self.replace_op_with(op, rop.INT_ADD) self.emit_operation(op) # emit the op r = self.getvalue(op) r.getintbound().intersect(resbound) @@ -527,7 +526,7 @@ def propagate_bounds_INT_ADD(self, op): v1 = self.getvalue(op.getarg(0)) v2 = self.getvalue(op.getarg(1)) - r = self.getvalue(op.result) + r = self.getvalue(op) b = r.getintbound().sub_bound(v2.getintbound()) if v1.getintbound().intersect(b): self.propagate_bounds_backward(op.getarg(0), v1) @@ -538,7 +537,7 @@ def propagate_bounds_INT_SUB(self, op): v1 = self.getvalue(op.getarg(0)) v2 = self.getvalue(op.getarg(1)) - r = self.getvalue(op.result) + r = self.getvalue(op) b = r.getintbound().add_bound(v2.getintbound()) if v1.getintbound().intersect(b): self.propagate_bounds_backward(op.getarg(0), v1) @@ -549,7 +548,7 @@ def propagate_bounds_INT_MUL(self, op): v1 = self.getvalue(op.getarg(0)) v2 = self.getvalue(op.getarg(1)) - r = self.getvalue(op.result) + r = self.getvalue(op) b = r.getintbound().div_bound(v2.getintbound()) if v1.getintbound().intersect(b): self.propagate_bounds_backward(op.getarg(0), v1) diff --git a/rpython/jit/metainterp/optimizeopt/optimizer.py b/rpython/jit/metainterp/optimizeopt/optimizer.py --- a/rpython/jit/metainterp/optimizeopt/optimizer.py +++ b/rpython/jit/metainterp/optimizeopt/optimizer.py @@ -324,10 +324,7 @@ if intbound: self.intbound = intbound else: - if isinstance(box, BoxInt): - self.intbound = IntBound(MININT, MAXINT) - else: - self.intbound = IntUnbounded() + self.intbound = IntBound(MININT, MAXINT) def copy_from(self, other_value): assert isinstance(other_value, IntOptValue) @@ -595,18 +592,16 @@ @specialize.argtype(0) def getvalue(self, box): - while box.source_op is not None: - box = box.source_op - assert box.is_source_op box = self.getinterned(box) try: value = self.values[box] except KeyError: - if isinstance(box, BoxPtr) or isinstance(box, ConstPtr): + if box.type == "r": value = self.values[box] = PtrOptValue(box) - elif isinstance(box, BoxInt) or isinstance(box, ConstInt): + elif box.type == "i": value = self.values[box] = IntOptValue(box) else: + assert box.type == "f" value = self.values[box] = OptValue(box) self.ensure_imported(value) return value @@ -625,8 +620,6 @@ def get_constant_box(self, box): if isinstance(box, Const): return box - while box.source_op is not None: - box = box.source_op try: value = self.values[box] self.ensure_imported(value) @@ -657,11 +650,21 @@ # replacing with a different box cur_value.copy_from(value) return + if not replace: + assert box not in self.values self.values[box] = value + def replace_op_with(self, op, newopnum, args=None, descr=None): + newop = op.copy_and_change(newopnum, args, descr) + if newop.type != 'v': + val = self.getvalue(op) + val.box = newop + self.values[newop] = val + return newop + def make_constant(self, box, constbox): if isinstance(constbox, ConstInt): - self.make_equal_to(box, ConstantIntValue(constbox)) + self.getvalue(box).make_constant(constbox) elif isinstance(constbox, ConstPtr): self.make_equal_to(box, ConstantPtrValue(constbox)) elif isinstance(constbox, ConstFloat): @@ -734,10 +737,6 @@ @specialize.argtype(0) def _emit_operation(self, op): assert not op.is_call_pure() - if op.getopnum() == rop.GUARD_VALUE: - val = self.getvalue(op.getarg(0)) - else: - val = None changed = False orig_op = op for i in range(op.numargs()): @@ -751,7 +750,7 @@ newbox = value.force_box(self) if arg is not newbox: if not changed: - op = op.clone() + op = self.replace_op_with(op, op.getopnum()) changed = True op.setarg(i, newbox) self.metainterp_sd.profiler.count(jitprof.Counters.OPT_OPS) @@ -760,13 +759,12 @@ pendingfields = self.pendingfields self.pendingfields = None if self.replaces_guard and orig_op in self.replaces_guard: - self.replace_op(self.replaces_guard[orig_op], op) + self.replace_guard_op(self.replaces_guard[orig_op], op) del self.replaces_guard[op] return else: - guard_op = op.clone() - op = self.store_final_boxes_in_guard(guard_op, pendingfields, - val) + guard_op = self.replace_op_with(op, op.getopnum()) + op = self.store_final_boxes_in_guard(guard_op, pendingfields) elif op.can_raise(): self.exception_might_have_happened = True self._last_emitted_op = orig_op @@ -784,11 +782,11 @@ if box is not arg: if not changed: changed = True - op = op.clone() + op = self.replace_op_with(op, op.getopnum()) op.setarg(i, box) return op - def replace_op(self, old_op_pos, new_op): + def replace_guard_op(self, old_op_pos, new_op): old_op = self._newoperations[old_op_pos] assert old_op.is_guard() old_descr = old_op.getdescr() @@ -796,7 +794,7 @@ new_descr.copy_all_attributes_from(old_descr) self._newoperations[old_op_pos] = new_op - def store_final_boxes_in_guard(self, op, pendingfields, val): + def store_final_boxes_in_guard(self, op, pendingfields): assert pendingfields is not None if op.getdescr() is not None: descr = op.getdescr() @@ -818,12 +816,12 @@ descr.store_final_boxes(op, newboxes, self.metainterp_sd) # if op.getopnum() == rop.GUARD_VALUE: + val = self.getvalue(op.getarg(0)) if val in self.bool_boxes: # Hack: turn guard_value(bool) into guard_true/guard_false. # This is done after the operation is emitted to let # store_final_boxes_in_guard set the guard_opnum field of the # descr to the original rop.GUARD_VALUE. - v = self.getvalue(op) constvalue = op.getarg(1).getint() if constvalue == 0: opnum = rop.GUARD_FALSE @@ -831,9 +829,8 @@ opnum = rop.GUARD_TRUE else: raise AssertionError("uh?") - newop = ResOperation(opnum, [op.getarg(0)], descr) + newop = self.replace_op_with(op, opnum, [op.getarg(0)], descr) newop.setfailargs(op.getfailargs()) - v.box = newop return newop else: # a real GUARD_VALUE. Make it use one counter per value. diff --git a/rpython/jit/metainterp/optimizeopt/pure.py b/rpython/jit/metainterp/optimizeopt/pure.py --- a/rpython/jit/metainterp/optimizeopt/pure.py +++ b/rpython/jit/metainterp/optimizeopt/pure.py @@ -56,7 +56,7 @@ if nextop: self.emit_operation(nextop) if args is not None: - self.pure_operations[args] = self.getvalue(op.result) + self.pure_operations[args] = self.getvalue(op) if remember: self.remember_emitting_pure(remember) @@ -75,14 +75,13 @@ op.getdescr()) oldval = self.pure_operations.get(args, None) if oldval is not None: - assert oldop.getopnum() == op.getopnum() # this removes a CALL_PURE that has the same (non-constant) # arguments as a previous CALL_PURE. self.make_equal_to(op, oldval) self.last_emitted_operation = REMOVED return else: - self.pure_operations[args] = self.getvalue(op.result) + self.pure_operations[args] = self.getvalue(op) # replace CALL_PURE with just CALL args = op.getarglist() diff --git a/rpython/jit/metainterp/optimizeopt/rewrite.py b/rpython/jit/metainterp/optimizeopt/rewrite.py --- a/rpython/jit/metainterp/optimizeopt/rewrite.py +++ b/rpython/jit/metainterp/optimizeopt/rewrite.py @@ -291,7 +291,6 @@ def optimize_GUARD_VALUE(self, op): value = self.getvalue(op.getarg(0)) - opv = self.getvalue(op) if value.is_virtual(): arg = value.get_constant_class(self.optimizer.cpu) if arg: @@ -346,7 +345,6 @@ value.make_constant_class(None, expectedclassbox) def optimize_GUARD_CLASS(self, op): - opv = self.getvalue(op) value = self.getvalue(op.getarg(0)) expectedclassbox = op.getarg(1) assert isinstance(expectedclassbox, Const) diff --git a/rpython/jit/metainterp/optimizeopt/simplify.py b/rpython/jit/metainterp/optimizeopt/simplify.py --- a/rpython/jit/metainterp/optimizeopt/simplify.py +++ b/rpython/jit/metainterp/optimizeopt/simplify.py @@ -54,7 +54,7 @@ def optimize_JUMP(self, op): if not self.unroll: - op = op.clone() + op = op.copy_and_change(op.getopnum()) descr = op.getdescr() assert isinstance(descr, JitCellToken) if not descr.target_tokens: diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py @@ -18,7 +18,7 @@ b0 = InputArgInt() b1 = InputArgInt() opt = optimizeopt.Optimizer(FakeMetaInterpStaticData(LLtypeMixin.cpu), - None) + None, None) op = ResOperation(rop.GUARD_TRUE, ['dummy'], None) # setup rd data fi0 = resume.FrameInfo(None, "code0", 11) diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py @@ -97,44 +97,6 @@ class OptimizeOptTest(BaseTestWithUnroll): -<<<<<<< local - def setup_method(self, meth=None): - class FailDescr(compile.ResumeGuardDescr): - oparse = None - def _oparser_uses_descr_of_guard(self, oparse, fail_args): - # typically called 3 times: once when parsing 'ops', - # once when parsing 'preamble', once when parsing 'expected'. - self.oparse = oparse - self.rd_frame_info_list, self.rd_snapshot = snapshot(fail_args) - def _clone_if_mutable(self, memo): - assert self is fdescr - return fdescr2 - def __repr__(self): - if self is fdescr: - return 'fdescr' - if self is fdescr2: - return 'fdescr2' - return compile.ResumeGuardDescr.__repr__(self) - # - def snapshot(fail_args, got=[]): - if not got: # only the first time, i.e. when parsing 'ops' - rd_frame_info_list = resume.FrameInfo(None, "code", 11) - rd_snapshot = resume.Snapshot(None, fail_args) - got.append(rd_frame_info_list) - got.append(rd_snapshot) - return got - # - fdescr = instantiate(FailDescr) - self.namespace['fdescr'] = fdescr - fdescr2 = instantiate(FailDescr) - self.namespace['fdescr2'] = fdescr2 - - def teardown_method(self, meth): - self.namespace.pop('fdescr', None) - self.namespace.pop('fdescr2', None) - -======= ->>>>>>> other def test_simple(self): ops = """ [] @@ -1184,11 +1146,7 @@ i1 = getfield_gc_i(p0, descr=valuedescr) i2 = int_sub(i1, 1) i3 = int_add(i0, i1) -<<<<<<< local #i4 = same_as_i(i2) # This same_as should be killed by backend -======= - i4 = same_as(i2) # This same_as should be killed by backend ->>>>>>> other jump(i3, i1, i2) """ expected = """ @@ -2448,31 +2406,17 @@ guard_true(i3) [p1] i4 = int_neg(i2) setfield_gc(p1, NULL, descr=nextdescr) -<<<<<<< local escape_n() #i5 = same_as(i4) jump(p1, i2, i4) -======= - escape() - i5 = same_as(i4) - jump(p1, i2, i4, i5) ->>>>>>> other - """ - expected = """ -<<<<<<< local - [p1, i2, i4] -======= - [p1, i2, i4, i5] ->>>>>>> other + """ + expected = """ + [p1, i2, i4] # i5 guard_true(i4) [p1] setfield_gc(p1, NULL, descr=nextdescr) -<<<<<<< local escape_n() jump(p1, i2, 1) -======= - escape() - jump(p1, i2, i5, i5) ->>>>>>> other + #jump(p1, i2, i5, i5) """ self.optimize_loop(ops, expected, preamble) diff --git a/rpython/jit/metainterp/optimizeopt/test/test_util.py b/rpython/jit/metainterp/optimizeopt/test/test_util.py --- a/rpython/jit/metainterp/optimizeopt/test/test_util.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_util.py @@ -13,7 +13,7 @@ from rpython.jit.codewriter.effectinfo import EffectInfo from rpython.jit.metainterp.logger import LogOperations from rpython.jit.codewriter.heaptracker import register_known_gctype -from rpython.jit.tool.oparser import parse, pure_parse +from rpython.jit.tool.oparser import OpParser from rpython.jit.metainterp.quasiimmut import QuasiImmutDescr from rpython.jit.metainterp import compile, resume, history from rpython.jit.metainterp.jitprof import EmptyProfiler @@ -356,7 +356,7 @@ _kind2count = {history.INT: 1, history.REF: 2, history.FLOAT: 3} return sorted(boxes, key=lambda box: _kind2count[box.type]) -final_descr = BasicFinalDescr() +final_descr = history.BasicFinalDescr() class BaseTest(object): diff --git a/rpython/jit/metainterp/optimizeopt/unroll.py b/rpython/jit/metainterp/optimizeopt/unroll.py --- a/rpython/jit/metainterp/optimizeopt/unroll.py +++ b/rpython/jit/metainterp/optimizeopt/unroll.py @@ -268,9 +268,7 @@ # in case it does, we would prefer to be suboptimal in asm # to a fatal RPython exception. # XXX investigate what is it - source_op = newresult - while source_op.source_op: - source_op = source_op.source_op + xxxx if source_op is not op and \ not self.short_boxes.has_producer(newresult) and \ not newvalue.is_constant(): @@ -396,6 +394,7 @@ target_token = start_label.getdescr() assert isinstance(target_token, TargetToken) + xxx # Turn guards into conditional jumps to the preamble #for i in range(len(short)): # op = short[i] @@ -607,7 +606,6 @@ while i < len(short_preamble): shop = short_preamble[i] newop = shop.clone(memo) - newop.is_source_op = True if newop.is_guard(): if not patchguardop: raise InvalidLoop("would like to have short preamble, but it has a guard and there's no guard_future_condition") diff --git a/rpython/jit/metainterp/optimizeopt/virtualstate.py b/rpython/jit/metainterp/optimizeopt/virtualstate.py --- a/rpython/jit/metainterp/optimizeopt/virtualstate.py +++ b/rpython/jit/metainterp/optimizeopt/virtualstate.py @@ -638,6 +638,7 @@ return alts def add_to_short(self, box, op): + xxx #if op: # op = op.clone(self.memo) # op.is_source_op = True diff --git a/rpython/jit/metainterp/optimizeopt/vstring.py b/rpython/jit/metainterp/optimizeopt/vstring.py --- a/rpython/jit/metainterp/optimizeopt/vstring.py +++ b/rpython/jit/metainterp/optimizeopt/vstring.py @@ -66,7 +66,6 @@ mode.STRLEN, [box]) else: lengthop = ResOperation(mode.STRLEN, [box]) - lengthop.is_source_op = True string_optimizer.emit_operation(lengthop) return lengthop @@ -111,7 +110,7 @@ assert self.source_op is not None lengthbox = self.getstrlen(optforce, self.mode, None) op = ResOperation(self.mode.NEWSTR, [lengthbox]) - op.source_op = self.source_op + xxx self.box = op if not we_are_translated(): op.name = 'FORCE' @@ -360,7 +359,7 @@ if string_optimizer is None: return None op = ResOperation(rop.INT_ADD, [box1, box2]) - op.is_source_op = True + xxx string_optimizer.emit_operation(op) return op @@ -371,7 +370,7 @@ if isinstance(box1, ConstInt): return ConstInt(box1.value - box2.value) op = ResOperation(rop.INT_SUB, [box1, box2]) - op.is_source_op = True + xxx string_optimizer.emit_operation(op) return op diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py --- a/rpython/jit/metainterp/resoperation.py +++ b/rpython/jit/metainterp/resoperation.py @@ -292,8 +292,8 @@ def setfailargs(self, fail_args): self._fail_args = fail_args - def copy_and_change(self, opnum, args=None, result=None, descr=None): - newop = AbstractResOp.copy_and_change(self, opnum, args, result, descr) + def copy_and_change(self, opnum, args=None, descr=None): + newop = AbstractResOp.copy_and_change(self, opnum, args, descr) assert isinstance(newop, GuardResOp) newop.setfailargs(self.getfailargs()) newop.rd_snapshot = self.rd_snapshot diff --git a/rpython/jit/metainterp/resume.py b/rpython/jit/metainterp/resume.py --- a/rpython/jit/metainterp/resume.py +++ b/rpython/jit/metainterp/resume.py @@ -372,15 +372,8 @@ storage = self.storage # make sure that nobody attached resume data to this guard yet assert not storage.rd_numb -<<<<<<< local - snapshot = storage.rd_snapshot - if snapshot is None: - assert not we_are_translated() - return # for tests in optimizeopt -======= snapshot = self.snapshot_storage.rd_snapshot assert snapshot is not None # is that true? ->>>>>>> other numb, liveboxes_from_env, v = self.memo.number(optimizer, snapshot) self.liveboxes_from_env = liveboxes_from_env self.liveboxes = {} diff --git a/rpython/jit/tool/oparser.py b/rpython/jit/tool/oparser.py --- a/rpython/jit/tool/oparser.py +++ b/rpython/jit/tool/oparser.py @@ -22,6 +22,16 @@ def getopname(self): return 'escape' + def copy_and_change(self, opnum, args=None, descr=None): + assert opnum == self.OPNUM + op = self.__class__() + if args is not None: + op.initarglist(args) + else: + op.initarglist(self._args) + assert descr is None + return op + def clone(self, memo): op = self.__class__() op.initarglist([memo.get(arg, arg) for arg in self.getarglist()]) @@ -71,7 +81,7 @@ op.initarglist(self.getarglist()[:]) return op - def _copy_and_change(self, opnum, args=None, descr=None): + def copy_and_change(self, opnum, args=None, descr=None): assert opnum == self.OPNUM newop = FORCE_SPILL() newop.initarglist(args or self.getarglist()) From noreply at buildbot.pypy.org Sun Jan 4 19:14:00 2015 From: noreply at buildbot.pypy.org (amauryfa) Date: Sun, 4 Jan 2015 19:14:00 +0100 (CET) Subject: [pypy-commit] pypy py3.3: Fix: 'a' in dictproxy() should not raise KeyError Message-ID: <20150104181400.4A1C71C0930@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3.3 Changeset: r75220:a3cb64940229 Date: 2015-01-01 20:25 +0100 http://bitbucket.org/pypy/pypy/changeset/a3cb64940229/ Log: Fix: 'a' in dictproxy() should not raise KeyError diff --git a/pypy/objspace/std/dictproxyobject.py b/pypy/objspace/std/dictproxyobject.py --- a/pypy/objspace/std/dictproxyobject.py +++ b/pypy/objspace/std/dictproxyobject.py @@ -145,7 +145,12 @@ unerase = staticmethod(unerase) def getitem(self, w_dict, w_key): - return self.space.getitem(self.unerase(w_dict.dstorage), w_key) + try: + return self.space.getitem(self.unerase(w_dict.dstorage), w_key) + except OperationError, e: + if not e.match(self.space, self.space.w_KeyError): + raise + return None def setitem(self, w_dict, w_key, w_value): raise oefmt(self.space.w_TypeError, diff --git a/pypy/objspace/std/test/test_dictproxy.py b/pypy/objspace/std/test/test_dictproxy.py --- a/pypy/objspace/std/test/test_dictproxy.py +++ b/pypy/objspace/std/test/test_dictproxy.py @@ -46,6 +46,8 @@ assert a.__dict__ != b.__dict__ assert a.__dict__ != {'123': '456'} assert {'123': '456'} != a.__dict__ + b.__dict__.pop('__qualname__') + c.__dict__.pop('__qualname__') assert b.__dict__ == c.__dict__ def test_str_repr(self): @@ -69,6 +71,8 @@ mapping = dict(a=1, b=2, c=3) proxy = dictproxy(mapping) assert proxy['a'] == 1 + assert 'a' in proxy + assert 'z' not in proxy assert repr(proxy) == 'mappingproxy(%r)' % mapping assert proxy.keys() == mapping.keys() raises(TypeError, "proxy['a'] = 4") From noreply at buildbot.pypy.org Sun Jan 4 19:14:01 2015 From: noreply at buildbot.pypy.org (amauryfa) Date: Sun, 4 Jan 2015 19:14:01 +0100 (CET) Subject: [pypy-commit] pypy py3.3: Make SimpleNamespace even simpler, Message-ID: <20150104181401.7F1791C0930@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3.3 Changeset: r75221:ef09df7491bd Date: 2015-01-01 20:53 +0100 http://bitbucket.org/pypy/pypy/changeset/ef09df7491bd/ Log: Make SimpleNamespace even simpler, because it's now a mutable object. diff --git a/pypy/module/sys/app.py b/pypy/module/sys/app.py --- a/pypy/module/sys/app.py +++ b/pypy/module/sys/app.py @@ -111,23 +111,8 @@ class SimpleNamespace: - def __new__(cls, **kwargs): - self = super().__new__(cls) - self._ns = {} - return self - def __init__(self, **kwargs): - self._ns.update(kwargs) - - def __getattr__(self, name): - try: - return self._ns[name] - except KeyError: - raise AttributeError(name) - - @property - def __dict__(self): - return self._ns + self.__dict__.update(kwargs) def __repr__(self, recurse=set()): ident = id(self) @@ -135,7 +120,7 @@ return "namespace(...)" recurse.add(ident) try: - pairs = ('%s=%r' % item for item in sorted(self._ns.items())) + pairs = ('%s=%r' % item for item in sorted(self.__dict__.items())) return "namespace(%s)" % ', '.join(pairs) finally: recurse.remove(ident) diff --git a/pypy/module/sys/test/test_sysmodule.py b/pypy/module/sys/test/test_sysmodule.py --- a/pypy/module/sys/test/test_sysmodule.py +++ b/pypy/module/sys/test/test_sysmodule.py @@ -554,6 +554,17 @@ ns1 = type(sys.implementation)(x=1, y=2, w=3) assert repr(ns1) == "namespace(w=3, x=1, y=2)" + def test_simplenamespace(self): + import sys + SimpleNamespace = type(sys.implementation) + ns = SimpleNamespace(x=1, y=2, w=3) + # + ns.z = 4 + assert ns.__dict__ == dict(x=1, y=2, w=3, z=4) + # + raises(AttributeError, "del ns.spam") + del ns.y + def test_settrace(self): import sys counts = [] From noreply at buildbot.pypy.org Sun Jan 4 19:14:02 2015 From: noreply at buildbot.pypy.org (amauryfa) Date: Sun, 4 Jan 2015 19:14:02 +0100 (CET) Subject: [pypy-commit] pypy py3.3: Add move validation for ast.Try Message-ID: <20150104181402.AA0771C0930@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3.3 Changeset: r75222:22011d79ba65 Date: 2015-01-01 22:38 +0100 http://bitbucket.org/pypy/pypy/changeset/22011d79ba65/ Log: Add move validation for ast.Try diff --git a/pypy/interpreter/astcompiler/validate.py b/pypy/interpreter/astcompiler/validate.py --- a/pypy/interpreter/astcompiler/validate.py +++ b/pypy/interpreter/astcompiler/validate.py @@ -242,6 +242,12 @@ def visit_Try(self, node): self._validate_body(node.body, "Try") + if not node.handlers and not node.finalbody: + raise ValidationError( + "Try has neither except handlers nor finalbody") + if not node.handlers and node.orelse: + raise ValidationError( + "Try has orelse but not except handlers") for handler in node.handlers: handler.walkabout(self) self._validate_stmts(node.orelse) From noreply at buildbot.pypy.org Sun Jan 4 19:14:03 2015 From: noreply at buildbot.pypy.org (amauryfa) Date: Sun, 4 Jan 2015 19:14:03 +0100 (CET) Subject: [pypy-commit] pypy py3.3: bytes.count() accepts an int for a character. Message-ID: <20150104181403.D14E41C0930@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3.3 Changeset: r75223:1db171bd1fdb Date: 2015-01-01 22:42 +0100 http://bitbucket.org/pypy/pypy/changeset/1db171bd1fdb/ Log: bytes.count() accepts an int for a character. diff --git a/pypy/objspace/std/stringmethods.py b/pypy/objspace/std/stringmethods.py --- a/pypy/objspace/std/stringmethods.py +++ b/pypy/objspace/std/stringmethods.py @@ -70,7 +70,7 @@ raise if not 0 <= char < 256: raise oefmt(space.w_ValueError, - "byte must be in range(256)") + "byte must be in range(0, 256)") return chr(char) def descr_len(self, space): @@ -178,21 +178,12 @@ def descr_count(self, space, w_sub, w_start=None, w_end=None): value, start, end = self._convert_idx_params(space, w_start, w_end) + sub = self._op_val(space, w_sub, allow_char=True) if self._use_rstr_ops(space, w_sub): - return space.newint(value.count(self._op_val(space, w_sub), start, - end)) - - from pypy.objspace.std.bytearrayobject import W_BytearrayObject - from pypy.objspace.std.bytesobject import W_BytesObject - if isinstance(w_sub, W_BytearrayObject): - res = count(value, w_sub.data, start, end) - elif isinstance(w_sub, W_BytesObject): - res = count(value, w_sub._value, start, end) + return space.newint(value.count(sub, start, end)) else: - buffer = _get_buffer(space, w_sub) - res = count(value, buffer, start, end) - - return space.wrap(max(res, 0)) + res = count(value, sub, start, end) + return space.wrap(max(res, 0)) def descr_decode(self, space, w_encoding=None, w_errors=None): from pypy.objspace.std.unicodeobject import ( diff --git a/pypy/objspace/std/test/test_bytearrayobject.py b/pypy/objspace/std/test/test_bytearrayobject.py --- a/pypy/objspace/std/test/test_bytearrayobject.py +++ b/pypy/objspace/std/test/test_bytearrayobject.py @@ -169,6 +169,7 @@ assert bytearray(b'hello').count(b'l') == 2 assert bytearray(b'hello').count(bytearray(b'l')) == 2 assert bytearray(b'hello').count(memoryview(b'l')) == 2 + assert bytearray(b'hello').count(ord('l')) == 2 assert bytearray(b'hello').index(b'e') == 1 assert bytearray(b'hello').rindex(b'l') == 3 diff --git a/pypy/objspace/std/test/test_bytesobject.py b/pypy/objspace/std/test/test_bytesobject.py --- a/pypy/objspace/std/test/test_bytesobject.py +++ b/pypy/objspace/std/test/test_bytesobject.py @@ -307,6 +307,7 @@ assert b'aaa'.count(b'a', 0, -1) == 2 assert b'aaa'.count(b'a', 0, -10) == 0 assert b'ababa'.count(b'aba') == 1 + assert b'ababa'.count(ord('a')) == 3 def test_startswith(self): assert b'ab'.startswith(b'ab') is True From noreply at buildbot.pypy.org Sun Jan 4 19:14:05 2015 From: noreply at buildbot.pypy.org (amauryfa) Date: Sun, 4 Jan 2015 19:14:05 +0100 (CET) Subject: [pypy-commit] pypy py3.3: Add bytearray.copy(), bytearray.clear() Message-ID: <20150104181405.05EFC1C0930@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3.3 Changeset: r75224:d8fae768da4c Date: 2015-01-01 22:51 +0100 http://bitbucket.org/pypy/pypy/changeset/d8fae768da4c/ Log: Add bytearray.copy(), bytearray.clear() diff --git a/pypy/objspace/std/bytearrayobject.py b/pypy/objspace/std/bytearrayobject.py --- a/pypy/objspace/std/bytearrayobject.py +++ b/pypy/objspace/std/bytearrayobject.py @@ -425,6 +425,11 @@ def descr_reverse(self, space): self.data.reverse() + def descr_clear(self, space): + self.data = [] + + def descr_copy(self, space): + return self._new(self.data[:]) # ____________________________________________________________ @@ -594,6 +599,18 @@ done using the specified fill character (default is a space). """ + def clear(): + """B.clear() -> None + + Remove all items from B. + """ + + def copy(): + """B.copy() -> bytearray + + Return a copy of B. + """ + def count(): """B.count(sub[, start[, end]]) -> int @@ -1045,6 +1062,10 @@ doc=BytearrayDocstrings.remove.__doc__), reverse = interp2app(W_BytearrayObject.descr_reverse, doc=BytearrayDocstrings.reverse.__doc__), + clear = interp2app(W_BytearrayObject.descr_clear, + doc=BytearrayDocstrings.clear.__doc__), + copy = interp2app(W_BytearrayObject.descr_copy, + doc=BytearrayDocstrings.copy.__doc__), ) W_BytearrayObject.typedef.flag_sequence_bug_compat = True diff --git a/pypy/objspace/std/test/test_bytearrayobject.py b/pypy/objspace/std/test/test_bytearrayobject.py --- a/pypy/objspace/std/test/test_bytearrayobject.py +++ b/pypy/objspace/std/test/test_bytearrayobject.py @@ -323,6 +323,13 @@ b.remove(Indexable()) assert b == b'' + def test_clear(self): + b = bytearray(b'hello') + b2 = b.copy() + b.clear() + assert b == bytearray() + assert b2 == bytearray(b'hello') + def test_reverse(self): b = bytearray(b'hello') b.reverse() From noreply at buildbot.pypy.org Sun Jan 4 19:14:06 2015 From: noreply at buildbot.pypy.org (amauryfa) Date: Sun, 4 Jan 2015 19:14:06 +0100 (CET) Subject: [pypy-commit] pypy py3.3: Update _testcapimodule.c with CPython3.3 Message-ID: <20150104181406.38DE71C0930@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3.3 Changeset: r75225:08430c91ae5f Date: 2015-01-01 23:12 +0100 http://bitbucket.org/pypy/pypy/changeset/08430c91ae5f/ Log: Update _testcapimodule.c with CPython3.3 diff --git a/lib-python/3/test/test_capi.py b/lib-python/3/test/test_capi.py --- a/lib-python/3/test/test_capi.py +++ b/lib-python/3/test/test_capi.py @@ -360,6 +360,8 @@ for name in dir(_testcapi): if name.startswith('test_'): test = getattr(_testcapi, name) + if support.verbose: + print("_tescapi.%s()" % name) test() if __name__ == "__main__": diff --git a/lib_pypy/_testcapimodule.c b/lib_pypy/_testcapimodule.c --- a/lib_pypy/_testcapimodule.c +++ b/lib_pypy/_testcapimodule.c @@ -1,4 +1,4 @@ -/* Verbatim copy of Modules/_testcapimodule.c from CPython 3.2, +/* Verbatim copy of Modules/_testcapimodule.c from CPython 3.3, * except that "run_in_subinterp" is commented out */ /* @@ -25,14 +25,7 @@ static PyObject * raiseTestError(const char* test_name, const char* msg) { - char buf[2048]; - - if (strlen(test_name) + strlen(msg) > sizeof(buf) - 50) - PyErr_SetString(TestError, "internal error msg too large"); - else { - PyOS_snprintf(buf, sizeof(buf), "%s: %s", test_name, msg); - PyErr_SetString(TestError, buf); - } + PyErr_Format(TestError, "%s: %s", test_name, msg); return NULL; } @@ -46,11 +39,9 @@ sizeof_error(const char* fatname, const char* typname, int expected, int got) { - char buf[1024]; - PyOS_snprintf(buf, sizeof(buf), - "%.200s #define == %d but sizeof(%.200s) == %d", + PyErr_Format(TestError, + "%s #define == %d but sizeof(%s) == %d", fatname, expected, typname, got); - PyErr_SetString(TestError, buf); return (PyObject*)NULL; } @@ -132,7 +123,13 @@ for (i = 0; i < count; i++) { v = PyLong_FromLong(i); - PyDict_SetItem(dict, v, v); + if (v == NULL) { + return -1; + } + if (PyDict_SetItem(dict, v, v) < 0) { + Py_DECREF(v); + return -1; + } Py_DECREF(v); } @@ -287,95 +284,6 @@ } -/* Issue #7385: Check that memoryview() does not crash - * when bf_getbuffer returns an error - */ - -static int -broken_buffer_getbuffer(PyObject *self, Py_buffer *view, int flags) -{ - PyErr_SetString( - TestError, - "test_broken_memoryview: expected error in bf_getbuffer"); - return -1; -} - -static PyBufferProcs memoryviewtester_as_buffer = { - (getbufferproc)broken_buffer_getbuffer, /* bf_getbuffer */ - 0, /* bf_releasebuffer */ -}; - -static PyTypeObject _MemoryViewTester_Type = { - PyVarObject_HEAD_INIT(NULL, 0) - "memoryviewtester", /* Name of this type */ - sizeof(PyObject), /* Basic object size */ - 0, /* Item size for varobject */ - (destructor)PyObject_Del, /* tp_dealloc */ - 0, /* tp_print */ - 0, /* tp_getattr */ - 0, /* tp_setattr */ - 0, /* tp_compare */ - 0, /* tp_repr */ - 0, /* tp_as_number */ - 0, /* tp_as_sequence */ - 0, /* tp_as_mapping */ - 0, /* tp_hash */ - 0, /* tp_call */ - 0, /* tp_str */ - PyObject_GenericGetAttr, /* tp_getattro */ - 0, /* tp_setattro */ - &memoryviewtester_as_buffer, /* tp_as_buffer */ - Py_TPFLAGS_DEFAULT, /* tp_flags */ - 0, /* tp_doc */ - 0, /* tp_traverse */ - 0, /* tp_clear */ - 0, /* tp_richcompare */ - 0, /* tp_weaklistoffset */ - 0, /* tp_iter */ - 0, /* tp_iternext */ - 0, /* tp_methods */ - 0, /* tp_members */ - 0, /* tp_getset */ - 0, /* tp_base */ - 0, /* tp_dict */ - 0, /* tp_descr_get */ - 0, /* tp_descr_set */ - 0, /* tp_dictoffset */ - 0, /* tp_init */ - 0, /* tp_alloc */ - PyType_GenericNew, /* tp_new */ -}; - -static PyObject* -test_broken_memoryview(PyObject* self) -{ - PyObject *obj = PyObject_New(PyObject, &_MemoryViewTester_Type); - PyObject *res; - - if (obj == NULL) { - PyErr_Clear(); - PyErr_SetString( - TestError, - "test_broken_memoryview: failed to create object"); - return NULL; - } - - res = PyMemoryView_FromObject(obj); - if (res || !PyErr_Occurred()){ - PyErr_SetString( - TestError, - "test_broken_memoryview: memoryview() didn't raise an Exception"); - Py_XDECREF(res); - Py_DECREF(obj); - return NULL; - } - - PyErr_Clear(); - Py_DECREF(obj); - Py_RETURN_NONE; -} - - /* Tests of PyLong_{As, From}{Unsigned,}Long(), and (#ifdef HAVE_LONG_LONG) PyLong_{As, From}{Unsigned,}LongLong(). @@ -781,6 +689,68 @@ return Py_None; } +/* Test the PyLong_As{Size,Ssize}_t API. At present this just tests that + non-integer arguments are handled correctly. It should be extended to + test overflow handling. + */ + +static PyObject * +test_long_as_size_t(PyObject *self) +{ + size_t out_u; + Py_ssize_t out_s; + + Py_INCREF(Py_None); + + out_u = PyLong_AsSize_t(Py_None); + if (out_u != (size_t)-1 || !PyErr_Occurred()) + return raiseTestError("test_long_as_size_t", + "PyLong_AsSize_t(None) didn't complain"); + if (!PyErr_ExceptionMatches(PyExc_TypeError)) + return raiseTestError("test_long_as_size_t", + "PyLong_AsSize_t(None) raised " + "something other than TypeError"); + PyErr_Clear(); + + out_s = PyLong_AsSsize_t(Py_None); + if (out_s != (Py_ssize_t)-1 || !PyErr_Occurred()) + return raiseTestError("test_long_as_size_t", + "PyLong_AsSsize_t(None) didn't complain"); + if (!PyErr_ExceptionMatches(PyExc_TypeError)) + return raiseTestError("test_long_as_size_t", + "PyLong_AsSsize_t(None) raised " + "something other than TypeError"); + PyErr_Clear(); + + /* Py_INCREF(Py_None) omitted - we already have a reference to it. */ + return Py_None; +} + +/* Test the PyLong_AsDouble API. At present this just tests that + non-integer arguments are handled correctly. + */ + +static PyObject * +test_long_as_double(PyObject *self) +{ + double out; + + Py_INCREF(Py_None); + + out = PyLong_AsDouble(Py_None); + if (out != -1.0 || !PyErr_Occurred()) + return raiseTestError("test_long_as_double", + "PyLong_AsDouble(None) didn't complain"); + if (!PyErr_ExceptionMatches(PyExc_TypeError)) + return raiseTestError("test_long_as_double", + "PyLong_AsDouble(None) raised " + "something other than TypeError"); + PyErr_Clear(); + + /* Py_INCREF(Py_None) omitted - we already have a reference to it. */ + return Py_None; +} + /* Test the L code for PyArg_ParseTuple. This should deliver a PY_LONG_LONG for both long and int arguments. The test may leak a little memory if it fails. @@ -840,7 +810,8 @@ } /* test PyArg_ParseTupleAndKeywords */ -static PyObject *getargs_keywords(PyObject *self, PyObject *args, PyObject *kwargs) +static PyObject * +getargs_keywords(PyObject *self, PyObject *args, PyObject *kwargs) { static char *keywords[] = {"arg1","arg2","arg3","arg4","arg5", NULL}; static char *fmt="(ii)i|(i(ii))(iii)i"; @@ -855,6 +826,21 @@ int_args[5], int_args[6], int_args[7], int_args[8], int_args[9]); } +/* test PyArg_ParseTupleAndKeywords keyword-only arguments */ +static PyObject * +getargs_keyword_only(PyObject *self, PyObject *args, PyObject *kwargs) +{ + static char *keywords[] = {"required", "optional", "keyword_only", NULL}; + int required = -1; + int optional = -1; + int keyword_only = -1; + + if (!PyArg_ParseTupleAndKeywords(args, kwargs, "i|i$i", keywords, + &required, &optional, &keyword_only)) + return NULL; + return Py_BuildValue("iii", required, optional, keyword_only); +} + /* Functions to call PyArg_ParseTuple with integer format codes, and return the result. */ @@ -939,6 +925,15 @@ return PyLong_FromSsize_t(value); } +static PyObject * +getargs_p(PyObject *self, PyObject *args) +{ + int value; + if (!PyArg_ParseTuple(args, "p", &value)) + return NULL; + return PyLong_FromLong(value); +} + #ifdef HAVE_LONG_LONG static PyObject * getargs_L(PyObject *self, PyObject *args) @@ -1015,6 +1010,15 @@ } static PyObject * +getargs_c(PyObject *self, PyObject *args) +{ + char c; + if (!PyArg_ParseTuple(args, "c", &c)) + return NULL; + return PyBytes_FromStringAndSize(&c, 1); +} + +static PyObject * getargs_s(PyObject *self, PyObject *args) { char *str; @@ -1200,51 +1204,73 @@ } static PyObject * -test_bug_7414(PyObject *self) +parse_tuple_and_keywords(PyObject *self, PyObject *args) { - /* Issue #7414: for PyArg_ParseTupleAndKeywords, 'C' code wasn't being - skipped properly in skipitem() */ - int a = 0, b = 0, result; - char *kwlist[] = {"a", "b", NULL}; - PyObject *tuple = NULL, *dict = NULL, *b_str; - - tuple = PyTuple_New(0); - if (tuple == NULL) - goto failure; - dict = PyDict_New(); - if (dict == NULL) - goto failure; - b_str = PyUnicode_FromString("b"); - if (b_str == NULL) - goto failure; - result = PyDict_SetItemString(dict, "b", b_str); - Py_DECREF(b_str); - if (result < 0) - goto failure; - - result = PyArg_ParseTupleAndKeywords(tuple, dict, "|CC", - kwlist, &a, &b); - if (!result) - goto failure; - - if (a != 0) - return raiseTestError("test_bug_7414", - "C format code not skipped properly"); - if (b != 'b') - return raiseTestError("test_bug_7414", - "C format code returned wrong value"); - - Py_DECREF(dict); - Py_DECREF(tuple); - Py_RETURN_NONE; - - failure: - Py_XDECREF(dict); - Py_XDECREF(tuple); - return NULL; + PyObject *sub_args; + PyObject *sub_kwargs; + char *sub_format; + PyObject *sub_keywords; + + Py_ssize_t i, size; + char *keywords[8 + 1]; /* space for NULL at end */ + PyObject *o; + PyObject *converted[8]; + + int result; + PyObject *return_value = NULL; + + double buffers[8][4]; /* double ensures alignment where necessary */ + + if (!PyArg_ParseTuple(args, "OOyO:parse_tuple_and_keywords", + &sub_args, &sub_kwargs, + &sub_format, &sub_keywords)) + return NULL; + + if (!(PyList_CheckExact(sub_keywords) || PyTuple_CheckExact(sub_keywords))) { + PyErr_SetString(PyExc_ValueError, + "parse_tuple_and_keywords: sub_keywords must be either list or tuple"); + return NULL; + } + + memset(buffers, 0, sizeof(buffers)); + memset(converted, 0, sizeof(converted)); + memset(keywords, 0, sizeof(keywords)); + + size = PySequence_Fast_GET_SIZE(sub_keywords); + if (size > 8) { + PyErr_SetString(PyExc_ValueError, + "parse_tuple_and_keywords: too many keywords in sub_keywords"); + goto exit; + } + + for (i = 0; i < size; i++) { + o = PySequence_Fast_GET_ITEM(sub_keywords, i); + if (!PyUnicode_FSConverter(o, (void *)(converted + i))) { + PyErr_Format(PyExc_ValueError, + "parse_tuple_and_keywords: could not convert keywords[%zd] to narrow string", i); + goto exit; + } + keywords[i] = PyBytes_AS_STRING(converted[i]); + } + + result = PyArg_ParseTupleAndKeywords(sub_args, sub_kwargs, + sub_format, keywords, + buffers + 0, buffers + 1, buffers + 2, buffers + 3, + buffers + 4, buffers + 5, buffers + 6, buffers + 7); + + if (result) { + return_value = Py_None; + Py_INCREF(Py_None); + } + +exit: + size = sizeof(converted) / sizeof(converted[0]); + for (i = 0; i < size; i++) { + Py_XDECREF(converted[i]); + } + return return_value; } - static volatile int x; /* Test the u and u# codes for PyArg_ParseTuple. May leak memory in case @@ -1296,7 +1322,7 @@ test_Z_code(PyObject *self) { PyObject *tuple, *obj; - Py_UNICODE *value1, *value2; + const Py_UNICODE *value1, *value2; Py_ssize_t len1, len2; tuple = PyTuple_New(2); @@ -1304,7 +1330,6 @@ return NULL; obj = PyUnicode_FromString("test"); - Py_INCREF(obj); PyTuple_SET_ITEM(tuple, 0, obj); Py_INCREF(Py_None); PyTuple_SET_ITEM(tuple, 1, Py_None); @@ -1351,6 +1376,7 @@ #if defined(SIZEOF_WCHAR_T) && (SIZEOF_WCHAR_T == 4) const wchar_t wtext[2] = {(wchar_t)0x10ABCDu}; size_t wtextlen = 1; + const wchar_t invalid[1] = {(wchar_t)0x110000u}; #else const wchar_t wtext[3] = {(wchar_t)0xDBEAu, (wchar_t)0xDFCDu}; size_t wtextlen = 2; @@ -1367,7 +1393,7 @@ return NULL; } - if (PyUnicode_GET_SIZE(wide) != PyUnicode_GET_SIZE(utf8)) { + if (PyUnicode_GET_LENGTH(wide) != PyUnicode_GET_LENGTH(utf8)) { Py_DECREF(wide); Py_DECREF(utf8); return raiseTestError("test_widechar", @@ -1386,6 +1412,23 @@ Py_DECREF(wide); Py_DECREF(utf8); + +#if defined(SIZEOF_WCHAR_T) && (SIZEOF_WCHAR_T == 4) + wide = PyUnicode_FromWideChar(invalid, 1); + if (wide == NULL) + PyErr_Clear(); + else + return raiseTestError("test_widechar", + "PyUnicode_FromWideChar(L\"\\U00110000\", 1) didn't fail"); + + wide = PyUnicode_FromUnicode(invalid, 1); + if (wide == NULL) + PyErr_Clear(); + else + return raiseTestError("test_widechar", + "PyUnicode_FromUnicode(L\"\\U00110000\", 1) didn't fail"); +#endif + Py_RETURN_NONE; } @@ -1454,7 +1497,7 @@ if (!PyArg_ParseTuple(args, "u#|s", &unicode, &length, &errors)) return NULL; - decimal_length = length * 10; /* len('􏿿') */ + decimal_length = length * 7; /* len('€') */ decimal = PyBytes_FromStringAndSize(NULL, decimal_length); if (decimal == NULL) return NULL; @@ -1480,13 +1523,36 @@ unicode_transformdecimaltoascii(PyObject *self, PyObject *args) { Py_UNICODE *unicode; - Py_ssize_t length = 0; + Py_ssize_t length; if (!PyArg_ParseTuple(args, "u#|s", &unicode, &length)) return NULL; return PyUnicode_TransformDecimalToASCII(unicode, length); } static PyObject * +unicode_legacy_string(PyObject *self, PyObject *args) +{ + Py_UNICODE *data; + Py_ssize_t len; + PyObject *u; + + if (!PyArg_ParseTuple(args, "u#", &data, &len)) + return NULL; + + u = PyUnicode_FromUnicode(NULL, len); + if (u == NULL) + return NULL; + + memcpy(PyUnicode_AS_UNICODE(u), data, len * sizeof(Py_UNICODE)); + + if (len > 0) { /* The empty string is always ready. */ + assert(!PyUnicode_IS_READY(u)); + } + + return u; +} + +static PyObject * getargs_w_star(PyObject *self, PyObject *args) { Py_buffer buffer; @@ -1572,7 +1638,7 @@ {-3L, 2, -1}, {4L, 3, 1}, {-4L, 3, -1}, - {0x7fffL, 15, 1}, /* one Python long digit */ + {0x7fffL, 15, 1}, /* one Python int digit */ {-0x7fffL, 15, -1}, {0xffffL, 16, 1}, {-0xffffL, 16, -1}, @@ -1580,10 +1646,16 @@ {-0xfffffffL, 28, -1}}; int i; - for (i = 0; i < sizeof(testcases) / sizeof(struct triple); ++i) { - PyObject *plong = PyLong_FromLong(testcases[i].input); - size_t nbits = _PyLong_NumBits(plong); - int sign = _PyLong_Sign(plong); + for (i = 0; i < Py_ARRAY_LENGTH(testcases); ++i) { + size_t nbits; + int sign; + PyObject *plong; + + plong = PyLong_FromLong(testcases[i].input); + if (plong == NULL) + return NULL; + nbits = _PyLong_NumBits(plong); + sign = _PyLong_Sign(plong); Py_DECREF(plong); if (nbits != testcases[i].nbits) @@ -1636,6 +1708,29 @@ return NULL; } +static PyObject * +test_set_exc_info(PyObject *self, PyObject *args) +{ + PyObject *orig_exc; + PyObject *new_type, *new_value, *new_tb; + PyObject *type, *value, *tb; + if (!PyArg_ParseTuple(args, "OOO:test_set_exc_info", + &new_type, &new_value, &new_tb)) + return NULL; + + PyErr_GetExcInfo(&type, &value, &tb); + + Py_INCREF(new_type); + Py_INCREF(new_value); + Py_INCREF(new_tb); + PyErr_SetExcInfo(new_type, new_value, new_tb); + + orig_exc = PyTuple_Pack(3, type ? type : Py_None, value ? value : Py_None, tb ? tb : Py_None); + Py_XDECREF(type); + Py_XDECREF(value); + Py_XDECREF(tb); + return orig_exc; +} static int test_run_counter = 0; @@ -1790,13 +1885,12 @@ { PyObject *result; char *msg; - static const Py_UNICODE one[] = {'1', 0}; #define CHECK_1_FORMAT(FORMAT, TYPE) \ result = PyUnicode_FromFormat(FORMAT, (TYPE)1); \ if (result == NULL) \ return NULL; \ - if (Py_UNICODE_strcmp(PyUnicode_AS_UNICODE(result), one)) { \ + if (PyUnicode_CompareWithASCIIString(result, "1")) { \ msg = FORMAT " failed at 1"; \ goto Fail; \ } \ @@ -2095,6 +2189,8 @@ /* Test 3: Allocate a few integers, then release them all simultaneously. */ multiple = malloc(sizeof(PyObject*) * 1000); + if (multiple == NULL) + return PyErr_NoMemory(); gettimeofday(&start, NULL); for(k=0; k < 20000; k++) { for(i=0; i < 1000; i++) { @@ -2106,10 +2202,13 @@ } gettimeofday(&stop, NULL); print_delta(3, &start, &stop); + free(multiple); /* Test 4: Allocate many integers, then release them all simultaneously. */ multiple = malloc(sizeof(PyObject*) * 1000000); + if (multiple == NULL) + return PyErr_NoMemory(); gettimeofday(&start, NULL); for(k=0; k < 20; k++) { for(i=0; i < 1000000; i++) { @@ -2121,9 +2220,12 @@ } gettimeofday(&stop, NULL); print_delta(4, &start, &stop); + free(multiple); /* Test 5: Allocate many integers < 32000 */ multiple = malloc(sizeof(PyObject*) * 1000000); + if (multiple == NULL) + return PyErr_NoMemory(); gettimeofday(&start, NULL); for(k=0; k < 10; k++) { for(i=0; i < 1000000; i++) { @@ -2135,6 +2237,7 @@ } gettimeofday(&stop, NULL); print_delta(5, &start, &stop); + free(multiple); /* Test 6: Perform small int addition */ op1 = PyLong_FromLong(1); @@ -2149,10 +2252,12 @@ /* Test 7: Perform medium int addition */ op1 = PyLong_FromLong(1000); + if (op1 == NULL) + return NULL; gettimeofday(&start, NULL); for(i=0; i < 10000000; i++) { result = PyNumber_Add(op1, op1); - Py_DECREF(result); + Py_XDECREF(result); } gettimeofday(&stop, NULL); Py_DECREF(op1); @@ -2323,6 +2428,14 @@ PyThreadState_Swap(NULL); substate = Py_NewInterpreter(); + if (substate == NULL) { + /* Since no new thread state was created, there is no exception to + propagate; raise a fresh one after swapping in the old thread + state. */ + PyThreadState_Swap(mainstate); + PyErr_SetString(PyExc_RuntimeError, "sub-interpreter creation failed"); + return NULL; + } r = PyRun_SimpleString(code); Py_EndInterpreter(substate); @@ -2333,6 +2446,133 @@ #endif /* PYPY_VERSION */ +static PyObject * +test_pytime_object_to_time_t(PyObject *self, PyObject *args) +{ + PyObject *obj; + time_t sec; + if (!PyArg_ParseTuple(args, "O:pytime_object_to_time_t", &obj)) + return NULL; + if (_PyTime_ObjectToTime_t(obj, &sec) == -1) + return NULL; + return _PyLong_FromTime_t(sec); +} + +static PyObject * +test_pytime_object_to_timeval(PyObject *self, PyObject *args) +{ + PyObject *obj; + time_t sec; + long usec; + if (!PyArg_ParseTuple(args, "O:pytime_object_to_timeval", &obj)) + return NULL; + if (_PyTime_ObjectToTimeval(obj, &sec, &usec) == -1) + return NULL; + return Py_BuildValue("Nl", _PyLong_FromTime_t(sec), usec); +} + +static PyObject * +test_pytime_object_to_timespec(PyObject *self, PyObject *args) +{ + PyObject *obj; + time_t sec; + long nsec; + if (!PyArg_ParseTuple(args, "O:pytime_object_to_timespec", &obj)) + return NULL; + if (_PyTime_ObjectToTimespec(obj, &sec, &nsec) == -1) + return NULL; + return Py_BuildValue("Nl", _PyLong_FromTime_t(sec), nsec); +} + +#ifdef WITH_THREAD +typedef struct { + PyThread_type_lock start_event; + PyThread_type_lock exit_event; + PyObject *callback; +} test_c_thread_t; + +static void +temporary_c_thread(void *data) +{ + test_c_thread_t *test_c_thread = data; + PyGILState_STATE state; + PyObject *res; + + PyThread_release_lock(test_c_thread->start_event); + + /* Allocate a Python thread state for this thread */ + state = PyGILState_Ensure(); + + res = PyObject_CallFunction(test_c_thread->callback, "", NULL); + Py_CLEAR(test_c_thread->callback); + + if (res == NULL) { + PyErr_Print(); + } + else { + Py_DECREF(res); + } + + /* Destroy the Python thread state for this thread */ + PyGILState_Release(state); + + PyThread_release_lock(test_c_thread->exit_event); + + PyThread_exit_thread(); +} + +static PyObject * +call_in_temporary_c_thread(PyObject *self, PyObject *callback) +{ + PyObject *res = NULL; + test_c_thread_t test_c_thread; + long thread; + + PyEval_InitThreads(); + + test_c_thread.start_event = PyThread_allocate_lock(); + test_c_thread.exit_event = PyThread_allocate_lock(); + test_c_thread.callback = NULL; + if (!test_c_thread.start_event || !test_c_thread.exit_event) { + PyErr_SetString(PyExc_RuntimeError, "could not allocate lock"); + goto exit; + } + + Py_INCREF(callback); + test_c_thread.callback = callback; + + PyThread_acquire_lock(test_c_thread.start_event, 1); + PyThread_acquire_lock(test_c_thread.exit_event, 1); + + thread = PyThread_start_new_thread(temporary_c_thread, &test_c_thread); + if (thread == -1) { + PyErr_SetString(PyExc_RuntimeError, "unable to start the thread"); + PyThread_release_lock(test_c_thread.start_event); + PyThread_release_lock(test_c_thread.exit_event); + goto exit; + } + + PyThread_acquire_lock(test_c_thread.start_event, 1); + PyThread_release_lock(test_c_thread.start_event); + + Py_BEGIN_ALLOW_THREADS + PyThread_acquire_lock(test_c_thread.exit_event, 1); + PyThread_release_lock(test_c_thread.exit_event); + Py_END_ALLOW_THREADS + + Py_INCREF(Py_None); + res = Py_None; + +exit: + Py_CLEAR(test_c_thread.callback); + if (test_c_thread.start_event) + PyThread_free_lock(test_c_thread.start_event); + if (test_c_thread.exit_event) + PyThread_free_lock(test_c_thread.exit_event); + return res; +} +#endif /* WITH_THREAD */ + static PyMethodDef TestMethods[] = { {"raise_exception", raise_exception, METH_VARARGS}, @@ -2342,14 +2582,15 @@ {"test_list_api", (PyCFunction)test_list_api, METH_NOARGS}, {"test_dict_iteration", (PyCFunction)test_dict_iteration,METH_NOARGS}, {"test_lazy_hash_inheritance", (PyCFunction)test_lazy_hash_inheritance,METH_NOARGS}, - {"test_broken_memoryview", (PyCFunction)test_broken_memoryview,METH_NOARGS}, {"test_long_api", (PyCFunction)test_long_api, METH_NOARGS}, {"test_long_and_overflow", (PyCFunction)test_long_and_overflow, METH_NOARGS}, + {"test_long_as_double", (PyCFunction)test_long_as_double,METH_NOARGS}, + {"test_long_as_size_t", (PyCFunction)test_long_as_size_t,METH_NOARGS}, {"test_long_numbits", (PyCFunction)test_long_numbits, METH_NOARGS}, {"test_k_code", (PyCFunction)test_k_code, METH_NOARGS}, {"test_empty_argparse", (PyCFunction)test_empty_argparse,METH_NOARGS}, - {"test_bug_7414", (PyCFunction)test_bug_7414, METH_NOARGS}, + {"parse_tuple_and_keywords", parse_tuple_and_keywords, METH_VARARGS}, {"test_null_strings", (PyCFunction)test_null_strings, METH_NOARGS}, {"test_string_from_format", (PyCFunction)test_string_from_format, METH_NOARGS}, {"test_with_docstring", (PyCFunction)test_with_docstring, METH_NOARGS, @@ -2360,6 +2601,8 @@ {"getargs_tuple", getargs_tuple, METH_VARARGS}, {"getargs_keywords", (PyCFunction)getargs_keywords, METH_VARARGS|METH_KEYWORDS}, + {"getargs_keyword_only", (PyCFunction)getargs_keyword_only, + METH_VARARGS|METH_KEYWORDS}, {"getargs_b", getargs_b, METH_VARARGS}, {"getargs_B", getargs_B, METH_VARARGS}, {"getargs_h", getargs_h, METH_VARARGS}, @@ -2369,6 +2612,7 @@ {"getargs_i", getargs_i, METH_VARARGS}, {"getargs_l", getargs_l, METH_VARARGS}, {"getargs_n", getargs_n, METH_VARARGS}, + {"getargs_p", getargs_p, METH_VARARGS}, #ifdef HAVE_LONG_LONG {"getargs_L", getargs_L, METH_VARARGS}, {"getargs_K", getargs_K, METH_VARARGS}, @@ -2377,6 +2621,7 @@ (PyCFunction)test_long_long_and_overflow, METH_NOARGS}, {"test_L_code", (PyCFunction)test_L_code, METH_NOARGS}, #endif + {"getargs_c", getargs_c, METH_VARARGS}, {"getargs_s", getargs_s, METH_VARARGS}, {"getargs_s_star", getargs_s_star, METH_VARARGS}, {"getargs_s_hash", getargs_s_hash, METH_VARARGS}, @@ -2403,6 +2648,7 @@ {"unicode_aswidecharstring",unicode_aswidecharstring, METH_VARARGS}, {"unicode_encodedecimal", unicode_encodedecimal, METH_VARARGS}, {"unicode_transformdecimaltoascii", unicode_transformdecimaltoascii, METH_VARARGS}, + {"unicode_legacy_string", unicode_legacy_string, METH_VARARGS}, #ifdef WITH_THREAD {"_test_thread_state", test_thread_state, METH_VARARGS}, {"_pending_threadfunc", pending_threadfunc, METH_VARARGS}, @@ -2412,6 +2658,7 @@ #endif {"traceback_print", traceback_print, METH_VARARGS}, {"exception_print", exception_print, METH_VARARGS}, + {"set_exc_info", test_set_exc_info, METH_VARARGS}, {"argparsing", argparsing, METH_VARARGS}, {"code_newempty", code_newempty, METH_VARARGS}, {"make_exception_with_doc", (PyCFunction)make_exception_with_doc, @@ -2422,6 +2669,13 @@ #ifndef PYPY_VERSION {"run_in_subinterp", run_in_subinterp, METH_VARARGS}, #endif + {"pytime_object_to_time_t", test_pytime_object_to_time_t, METH_VARARGS}, + {"pytime_object_to_timeval", test_pytime_object_to_timeval, METH_VARARGS}, + {"pytime_object_to_timespec", test_pytime_object_to_timespec, METH_VARARGS}, +#ifdef WITH_THREAD + {"call_in_temporary_c_thread", call_in_temporary_c_thread, METH_O, + PyDoc_STR("set_error_class(error_class) -> None")}, +#endif {NULL, NULL} /* sentinel */ }; @@ -2492,7 +2746,7 @@ ; test_structmembers *ob; const char *s = NULL; - int string_len = 0; + Py_ssize_t string_len = 0; ob = PyObject_New(test_structmembers, type); if (ob == NULL) return NULL; @@ -2604,7 +2858,6 @@ return NULL; Py_TYPE(&_HashInheritanceTester_Type)=&PyType_Type; - Py_TYPE(&_MemoryViewTester_Type)=&PyType_Type; Py_TYPE(&test_structmembersType)=&PyType_Type; Py_INCREF(&test_structmembersType); From noreply at buildbot.pypy.org Sun Jan 4 19:14:07 2015 From: noreply at buildbot.pypy.org (amauryfa) Date: Sun, 4 Jan 2015 19:14:07 +0100 (CET) Subject: [pypy-commit] pypy py3.3: Easy additions to cpyext, needed for _testcapi Message-ID: <20150104181407.7CD0D1C0930@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3.3 Changeset: r75226:f93c5b218a24 Date: 2015-01-02 12:21 +0100 http://bitbucket.org/pypy/pypy/changeset/f93c5b218a24/ Log: Easy additions to cpyext, needed for _testcapi diff --git a/pypy/module/cpyext/include/Python.h b/pypy/module/cpyext/include/Python.h --- a/pypy/module/cpyext/include/Python.h +++ b/pypy/module/cpyext/include/Python.h @@ -86,6 +86,7 @@ #include "object.h" #include "abstract.h" #include "pyport.h" +#include "pymacro.h" #include "warnings.h" #include diff --git a/pypy/module/cpyext/include/pymacro.h b/pypy/module/cpyext/include/pymacro.h new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/include/pymacro.h @@ -0,0 +1,26 @@ +#ifndef Py_PYMACRO_H +#define Py_PYMACRO_H + +/* Get the number of elements in a visible array + + This does not work on pointers, or arrays declared as [], or function + parameters. With correct compiler support, such usage will cause a build + error (see Py_BUILD_ASSERT_EXPR). + + Written by Rusty Russell, public domain, http://ccodearchive.net/ + + Requires at GCC 3.1+ */ +#if (defined(__GNUC__) && !defined(__STRICT_ANSI__) && \ + (((__GNUC__ == 3) && (__GNU_MINOR__ >= 1)) || (__GNUC__ >= 4))) +/* Two gcc extensions. + &a[0] degrades to a pointer: a different type from an array */ +#define Py_ARRAY_LENGTH(array) \ + (sizeof(array) / sizeof((array)[0]) \ + + Py_BUILD_ASSERT_EXPR(!__builtin_types_compatible_p(typeof(array), \ + typeof(&(array)[0])))) +#else +#define Py_ARRAY_LENGTH(array) \ + (sizeof(array) / sizeof((array)[0])) +#endif + +#endif /* Py_PYMACRO_H */ diff --git a/pypy/module/cpyext/longobject.py b/pypy/module/cpyext/longobject.py --- a/pypy/module/cpyext/longobject.py +++ b/pypy/module/cpyext/longobject.py @@ -84,6 +84,15 @@ """ return space.int_w(w_long) + at cpython_api([PyObject], rffi.SIZE_T, error=-1) +def PyLong_AsSize_t(space, w_long): + """Return a C size_t representation of of pylong. pylong must be + an instance of PyLongObject. + + Raise OverflowError if the value of pylong is out of range for a + size_t.""" + return space.uint_w(w_long) + @cpython_api([PyObject], rffi.LONGLONG, error=-1) def PyLong_AsLongLong(space, w_long): """ diff --git a/pypy/module/cpyext/pythonrun.py b/pypy/module/cpyext/pythonrun.py --- a/pypy/module/cpyext/pythonrun.py +++ b/pypy/module/cpyext/pythonrun.py @@ -1,6 +1,8 @@ from rpython.rtyper.lltypesystem import rffi, lltype from pypy.module.cpyext.api import cpython_api, CANNOT_FAIL from pypy.module.cpyext.state import State +from pypy.module.cpyext.pyobject import PyObject +from pypy.module.cpyext.pyerrors import PyErr_SetNone @cpython_api([], rffi.INT_real, error=CANNOT_FAIL) def Py_IsInitialized(space): @@ -46,3 +48,8 @@ except ValueError: return -1 return 0 + + at cpython_api([], PyObject, error=CANNOT_FAIL) +def PyThread_exit_thread(space): + PyErr_SetNone(space, space.w_SystemExit) + diff --git a/pypy/module/cpyext/stubs.py b/pypy/module/cpyext/stubs.py --- a/pypy/module/cpyext/stubs.py +++ b/pypy/module/cpyext/stubs.py @@ -1511,16 +1511,6 @@ raise NotImplementedError - at cpython_api([PyObject], rffi.SIZE_T, error=-1) -def PyLong_AsSize_t(space, pylong): - """Return a C size_t representation of of pylong. pylong must be - an instance of PyLongObject. - - Raise OverflowError if the value of pylong is out of range for a - size_t.""" - raise NotImplementedError - - @cpython_api([PyObject, rffi.CCHARP], rffi.INT_real, error=-1) def PyMapping_DelItemString(space, o, key): """Remove the mapping for object key from the object o. Return -1 on diff --git a/pypy/module/cpyext/unicodeobject.py b/pypy/module/cpyext/unicodeobject.py --- a/pypy/module/cpyext/unicodeobject.py +++ b/pypy/module/cpyext/unicodeobject.py @@ -201,6 +201,19 @@ assert isinstance(w_obj, unicodeobject.W_UnicodeObject) return space.len_w(w_obj) + at cpython_api([PyObject], Py_ssize_t, error=CANNOT_FAIL) +def PyUnicode_GET_LENGTH(space, w_obj): + """Return the length of the Unicode string, in code points. + o has to be a Unicode object in the "canonical" representation + (not checked).""" + assert isinstance(w_obj, unicodeobject.W_UnicodeObject) + return space.len_w(w_obj) + + at cpython_api([PyObject], rffi.INT, error=CANNOT_FAIL) +def PyUnicode_IS_READY(space, w_obj): + # PyPy is always ready. + return space.w_True + @cpython_api([PyObject], rffi.CWCHARP, error=CANNOT_FAIL) def PyUnicode_AS_UNICODE(space, ref): """Return a pointer to the internal Py_UNICODE buffer of the object. ref From noreply at buildbot.pypy.org Sun Jan 4 19:14:08 2015 From: noreply at buildbot.pypy.org (amauryfa) Date: Sun, 4 Jan 2015 19:14:08 +0100 (CET) Subject: [pypy-commit] pypy py3.3: dictproxy.copy() should use the proxied object and not always return a dict. Message-ID: <20150104181408.B5E5A1C0930@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3.3 Changeset: r75227:6048008be2aa Date: 2015-01-02 12:22 +0100 http://bitbucket.org/pypy/pypy/changeset/6048008be2aa/ Log: dictproxy.copy() should use the proxied object and not always return a dict. diff --git a/pypy/objspace/std/dictmultiobject.py b/pypy/objspace/std/dictmultiobject.py --- a/pypy/objspace/std/dictmultiobject.py +++ b/pypy/objspace/std/dictmultiobject.py @@ -197,9 +197,7 @@ def descr_copy(self, space): """D.copy() -> a shallow copy of D""" - w_new = W_DictMultiObject.allocate_and_init_instance(space) - update1_dict_dict(space, w_new, self) - return w_new + return self.copy() def descr_items(self, space): """D.items() -> a set-like object providing a view on D's items""" @@ -267,7 +265,7 @@ def _add_indirections(): dict_methods = "getitem getitem_str setitem setdefault \ - popitem delitem clear \ + popitem delitem clear copy \ length w_keys values items \ iterkeys itervalues iteritems \ listview_bytes listview_unicode listview_int \ @@ -412,6 +410,11 @@ w_dict.strategy = strategy w_dict.dstorage = storage + def copy(self, w_dict): + w_new = W_DictMultiObject.allocate_and_init_instance(self.space) + update1_dict_dict(self.space, w_new, w_dict) + return w_new + def listview_bytes(self, w_dict): return None diff --git a/pypy/objspace/std/dictproxyobject.py b/pypy/objspace/std/dictproxyobject.py --- a/pypy/objspace/std/dictproxyobject.py +++ b/pypy/objspace/std/dictproxyobject.py @@ -195,6 +195,9 @@ def clear(self, w_dict): raise oefmt(self.space.w_AttributeError, "clear") + def copy(self, w_dict): + return self.space.call_method(self.unerase(w_dict.dstorage), "copy") + create_iterator_classes( MappingProxyStrategy, override_next_key=MappingProxyStrategy.override_next_key, diff --git a/pypy/objspace/std/test/test_dictproxy.py b/pypy/objspace/std/test/test_dictproxy.py --- a/pypy/objspace/std/test/test_dictproxy.py +++ b/pypy/objspace/std/test/test_dictproxy.py @@ -78,6 +78,11 @@ raises(TypeError, "proxy['a'] = 4") raises(TypeError, "del proxy['a']") raises(AttributeError, "proxy.clear()") + # + class D(dict): + def copy(self): return 3 + proxy = dictproxy(D(a=1, b=2, c=3)) + assert proxy.copy() == 3 class AppTestUserObjectMethodCache(AppTestUserObject): spaceconfig = {"objspace.std.withmethodcachecounter": True} From noreply at buildbot.pypy.org Sun Jan 4 19:14:09 2015 From: noreply at buildbot.pypy.org (amauryfa) Date: Sun, 4 Jan 2015 19:14:09 +0100 (CET) Subject: [pypy-commit] pypy py3.3: dictproxy() should only accept mappings Message-ID: <20150104181409.E11911C0930@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3.3 Changeset: r75228:ed4e7c9f2fd6 Date: 2015-01-02 14:54 +0100 http://bitbucket.org/pypy/pypy/changeset/ed4e7c9f2fd6/ Log: dictproxy() should only accept mappings diff --git a/pypy/objspace/std/dictproxyobject.py b/pypy/objspace/std/dictproxyobject.py --- a/pypy/objspace/std/dictproxyobject.py +++ b/pypy/objspace/std/dictproxyobject.py @@ -11,6 +11,11 @@ class W_DictProxyObject(W_DictMultiObject): @staticmethod def descr_new(space, w_type, w_mapping): + if (not space.lookup(w_mapping, "__getitem__") or + space.isinstance_w(w_mapping, space.w_list) or + space.isinstance_w(w_mapping, space.w_tuple)): + raise oefmt(space.w_TypeError, + "mappingproxy() argument must be a mapping, not %T", w_mapping) strategy = space.fromcache(MappingProxyStrategy) storage = strategy.erase(w_mapping) w_obj = space.allocate_instance(W_DictProxyObject, w_type) diff --git a/pypy/objspace/std/test/test_dictproxy.py b/pypy/objspace/std/test/test_dictproxy.py --- a/pypy/objspace/std/test/test_dictproxy.py +++ b/pypy/objspace/std/test/test_dictproxy.py @@ -83,6 +83,9 @@ def copy(self): return 3 proxy = dictproxy(D(a=1, b=2, c=3)) assert proxy.copy() == 3 + # + raises(TypeError, dictproxy, 3) + raises(TypeError, dictproxy, [3]) class AppTestUserObjectMethodCache(AppTestUserObject): spaceconfig = {"objspace.std.withmethodcachecounter": True} From noreply at buildbot.pypy.org Sun Jan 4 19:14:11 2015 From: noreply at buildbot.pypy.org (amauryfa) Date: Sun, 4 Jan 2015 19:14:11 +0100 (CET) Subject: [pypy-commit] pypy py3.3: Add signal.pthread_kill Message-ID: <20150104181411.0E9881C0930@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3.3 Changeset: r75229:59d5157e77a5 Date: 2015-01-02 21:35 +0100 http://bitbucket.org/pypy/pypy/changeset/59d5157e77a5/ Log: Add signal.pthread_kill diff --git a/pypy/module/signal/__init__.py b/pypy/module/signal/__init__.py --- a/pypy/module/signal/__init__.py +++ b/pypy/module/signal/__init__.py @@ -25,6 +25,9 @@ for name in ['ITIMER_REAL', 'ITIMER_VIRTUAL', 'ITIMER_PROF']: interpleveldefs[name] = 'space.wrap(interp_signal.%s)' % (name,) + if os.name == 'posix': + interpleveldefs['pthread_kill'] = 'interp_signal.pthread_kill' + appleveldefs = { } diff --git a/pypy/module/signal/interp_signal.py b/pypy/module/signal/interp_signal.py --- a/pypy/module/signal/interp_signal.py +++ b/pypy/module/signal/interp_signal.py @@ -328,3 +328,13 @@ c_getitimer(which, old) return itimer_retval(space, old[0]) + + + at unwrap_spec(tid=int, signum=int) +def pthread_kill(space, tid, signum): + "Send a signal to a thread." + ret = c_pthread_kill(tid, signum) + if ret != 0: + raise exception_from_errno(space, space.w_OSError) + # the signal may have been send to the current thread + space.getexecutioncontext().checksignals() diff --git a/pypy/module/signal/test/test_signal.py b/pypy/module/signal/test/test_signal.py --- a/pypy/module/signal/test/test_signal.py +++ b/pypy/module/signal/test/test_signal.py @@ -319,3 +319,17 @@ import signal raises(signal.ItimerError, signal.setitimer, -1, 0) + +class AppTestPThread: + spaceconfig = dict(usemodules=['signal', 'thread', 'time']) + + def test_pthread_kill(self): + import signal + import _thread + signum = signal.SIGUSR1 + def handler(signum, frame): + 1/0 + signal.signal(signum, handler) + tid = _thread.get_ident() + raises(ZeroDivisionError, signal.pthread_kill, tid, signum) + diff --git a/rpython/rlib/rsignal.py b/rpython/rlib/rsignal.py --- a/rpython/rlib/rsignal.py +++ b/rpython/rlib/rsignal.py @@ -96,3 +96,5 @@ c_setitimer = external('setitimer', [rffi.INT, itimervalP, itimervalP], rffi.INT) c_getitimer = external('getitimer', [rffi.INT, itimervalP], rffi.INT) + +c_pthread_kill = external('pthread_kill', [lltype.Signed, rffi.INT], rffi.INT) From noreply at buildbot.pypy.org Sun Jan 4 19:14:12 2015 From: noreply at buildbot.pypy.org (amauryfa) Date: Sun, 4 Jan 2015 19:14:12 +0100 (CET) Subject: [pypy-commit] pypy py3.3: Add signal.sigwait() Message-ID: <20150104181412.2DA491C0930@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3.3 Changeset: r75230:385525065b2d Date: 2015-01-04 14:29 +0100 http://bitbucket.org/pypy/pypy/changeset/385525065b2d/ Log: Add signal.sigwait() diff --git a/pypy/module/signal/__init__.py b/pypy/module/signal/__init__.py --- a/pypy/module/signal/__init__.py +++ b/pypy/module/signal/__init__.py @@ -26,6 +26,7 @@ interpleveldefs[name] = 'space.wrap(interp_signal.%s)' % (name,) if os.name == 'posix': + interpleveldefs['sigwait'] = 'interp_signal.sigwait' interpleveldefs['pthread_kill'] = 'interp_signal.pthread_kill' appleveldefs = { diff --git a/pypy/module/signal/interp_signal.py b/pypy/module/signal/interp_signal.py --- a/pypy/module/signal/interp_signal.py +++ b/pypy/module/signal/interp_signal.py @@ -5,14 +5,14 @@ import os import errno -from pypy.interpreter.error import OperationError, exception_from_errno +from pypy.interpreter.error import OperationError, oefmt, exception_from_errno from pypy.interpreter.executioncontext import (AsyncAction, AbstractActionFlag, PeriodicAsyncAction) from pypy.interpreter.gateway import unwrap_spec from rpython.rlib import jit, rposix, rgc from rpython.rlib.objectmodel import we_are_translated -from rpython.rlib.rarithmetic import intmask +from rpython.rlib.rarithmetic import intmask, widen from rpython.rlib.rsignal import * from rpython.rtyper.lltypesystem import lltype, rffi @@ -334,7 +334,38 @@ def pthread_kill(space, tid, signum): "Send a signal to a thread." ret = c_pthread_kill(tid, signum) - if ret != 0: + if widen(ret) < 0: raise exception_from_errno(space, space.w_OSError) # the signal may have been send to the current thread space.getexecutioncontext().checksignals() + + +class SignalMask(object): + def __init__(self, space, w_signals): + self.space = space + self.w_signals = w_signals + + def __enter__(self): + space = self.space + self.mask = lltype.malloc(c_sigset_t.TO, flavor='raw') + c_sigemptyset(self.mask) + for w_signum in space.unpackiterable(self.w_signals): + signum = space.int_w(w_signum) + check_signum_in_range(space, signum) + err = c_sigaddset(self.mask, signum) + if err: + raise oefmt(space.w_ValueError, + "signal number %d out of range", signum) + return self.mask + + def __exit__(self, *args): + lltype.free(self.mask, flavor='raw') + +def sigwait(space, w_signals): + with SignalMask(space, w_signals) as sigset: + with lltype.scoped_alloc(rffi.INTP.TO, 1) as signum_ptr: + ret = c_sigwait(sigset, signum_ptr) + if ret != 0: + raise exception_from_errno(space, space.w_OSError) + signum = signum_ptr[0] + return space.wrap(signum) diff --git a/pypy/module/signal/test/test_signal.py b/pypy/module/signal/test/test_signal.py --- a/pypy/module/signal/test/test_signal.py +++ b/pypy/module/signal/test/test_signal.py @@ -333,3 +333,12 @@ tid = _thread.get_ident() raises(ZeroDivisionError, signal.pthread_kill, tid, signum) + def test_sigwait(self): + import signal + def handler(signum, frame): + 1/0 + signal.signal(signal.SIGALRM, handler) + signal.alarm(1) + received = signal.sigwait([signal.SIGALRM]) + assert received == signal.SIGALRM + diff --git a/rpython/rlib/rsignal.py b/rpython/rlib/rsignal.py --- a/rpython/rlib/rsignal.py +++ b/rpython/rlib/rsignal.py @@ -98,3 +98,9 @@ c_getitimer = external('getitimer', [rffi.INT, itimervalP], rffi.INT) c_pthread_kill = external('pthread_kill', [lltype.Signed, rffi.INT], rffi.INT) + +if sys.platform != 'win32': + c_sigset_t = rffi.COpaquePtr('sigset_t', compilation_info=eci) + c_sigemptyset = external('sigemptyset', [c_sigset_t], rffi.INT) + c_sigaddset = external('sigaddset', [c_sigset_t, rffi.INT], rffi.INT) + c_sigwait = external('sigwait', [c_sigset_t, rffi.INTP], rffi.INT) From noreply at buildbot.pypy.org Sun Jan 4 19:14:13 2015 From: noreply at buildbot.pypy.org (amauryfa) Date: Sun, 4 Jan 2015 19:14:13 +0100 (CET) Subject: [pypy-commit] pypy py3.3: Add signal.pthread_sigmask() Message-ID: <20150104181413.524091C0930@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3.3 Changeset: r75231:849428da8b0f Date: 2015-01-04 18:51 +0100 http://bitbucket.org/pypy/pypy/changeset/849428da8b0f/ Log: Add signal.pthread_sigmask() diff --git a/pypy/module/signal/__init__.py b/pypy/module/signal/__init__.py --- a/pypy/module/signal/__init__.py +++ b/pypy/module/signal/__init__.py @@ -28,6 +28,10 @@ if os.name == 'posix': interpleveldefs['sigwait'] = 'interp_signal.sigwait' interpleveldefs['pthread_kill'] = 'interp_signal.pthread_kill' + interpleveldefs['pthread_sigmask'] = 'interp_signal.pthread_sigmask' + interpleveldefs['SIG_BLOCK'] = 'space.wrap(interp_signal.SIG_BLOCK)' + interpleveldefs['SIG_UNBLOCK'] = 'space.wrap(interp_signal.SIG_UNBLOCK)' + interpleveldefs['SIG_SETMASK'] = 'space.wrap(interp_signal.SIG_SETMASK)' appleveldefs = { } diff --git a/pypy/module/signal/interp_signal.py b/pypy/module/signal/interp_signal.py --- a/pypy/module/signal/interp_signal.py +++ b/pypy/module/signal/interp_signal.py @@ -361,6 +361,19 @@ def __exit__(self, *args): lltype.free(self.mask, flavor='raw') +def _sigset_to_signals(space, mask): + signals_w = [] + for sig in range(1, NSIG): + if c_sigismember(mask, sig) != 1: + continue + # Handle the case where it is a member by adding the signal to + # the result list. Ignore the other cases because they mean + # the signal isn't a member of the mask or the signal was + # invalid, and an invalid signal must have been our fault in + # constructing the loop boundaries. + signals_w.append(space.wrap(sig)) + return space.call_function(space.w_set, space.newtuple(signals_w)) + def sigwait(space, w_signals): with SignalMask(space, w_signals) as sigset: with lltype.scoped_alloc(rffi.INTP.TO, 1) as signum_ptr: @@ -369,3 +382,14 @@ raise exception_from_errno(space, space.w_OSError) signum = signum_ptr[0] return space.wrap(signum) + + at unwrap_spec(how=int) +def pthread_sigmask(space, how, w_signals): + with SignalMask(space, w_signals) as sigset: + with lltype.scoped_alloc(c_sigset_t.TO) as previous: + ret = c_pthread_sigmask(how, sigset, previous) + if ret != 0: + raise exception_from_errno(space, space.w_OSError) + # if signals was unblocked, signal handlers have been called + space.getexecutioncontext().checksignals() + return _sigset_to_signals(space, previous) diff --git a/pypy/module/signal/test/test_signal.py b/pypy/module/signal/test/test_signal.py --- a/pypy/module/signal/test/test_signal.py +++ b/pypy/module/signal/test/test_signal.py @@ -342,3 +342,18 @@ received = signal.sigwait([signal.SIGALRM]) assert received == signal.SIGALRM + def test_sigmask(self): + import signal, posix + signum1 = signal.SIGUSR1 + signum2 = signal.SIGUSR2 + + def handler(signum, frame): + pass + signal.signal(signum1, handler) + signal.signal(signum2, handler) + + signal.pthread_sigmask(signal.SIG_BLOCK, (signum1, signum2)) + posix.kill(posix.getpid(), signum1) + posix.kill(posix.getpid(), signum2) + # Unblocking the 2 signals calls the C signal handler twice + signal.pthread_sigmask(signal.SIG_UNBLOCK, (signum1, signum2)) diff --git a/rpython/rlib/rsignal.py b/rpython/rlib/rsignal.py --- a/rpython/rlib/rsignal.py +++ b/rpython/rlib/rsignal.py @@ -31,7 +31,7 @@ signal_names.append('CTRL_BREAK_EVENT') CTRL_C_EVENT = 0 CTRL_BREAK_EVENT = 1 -includes = ['stdlib.h', 'src/signals.h'] +includes = ['stdlib.h', 'src/signals.h', 'signal.h'] if sys.platform != 'win32': includes.append('sys/time.h') @@ -47,7 +47,9 @@ _compilation_info_ = eci if sys.platform != 'win32': - for name in """ITIMER_REAL ITIMER_VIRTUAL ITIMER_PROF""".split(): + for name in """ + ITIMER_REAL ITIMER_VIRTUAL ITIMER_PROF + SIG_BLOCK SIG_UNBLOCK SIG_SETMASK""".split(): setattr(CConfig, name, rffi_platform.DefinedConstantInteger(name)) CConfig.timeval = rffi_platform.Struct( @@ -103,4 +105,7 @@ c_sigset_t = rffi.COpaquePtr('sigset_t', compilation_info=eci) c_sigemptyset = external('sigemptyset', [c_sigset_t], rffi.INT) c_sigaddset = external('sigaddset', [c_sigset_t, rffi.INT], rffi.INT) + c_sigismember = external('sigismember', [c_sigset_t, rffi.INT], rffi.INT) c_sigwait = external('sigwait', [c_sigset_t, rffi.INTP], rffi.INT) + c_pthread_sigmask = external('pthread_sigmask', + [rffi.INT, c_sigset_t, c_sigset_t], rffi.INT) From noreply at buildbot.pypy.org Sun Jan 4 19:14:14 2015 From: noreply at buildbot.pypy.org (amauryfa) Date: Sun, 4 Jan 2015 19:14:14 +0100 (CET) Subject: [pypy-commit] pypy py3.3: Add signal.sigpending() Message-ID: <20150104181414.7315C1C0930@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3.3 Changeset: r75232:6a4993eee75c Date: 2015-01-04 19:04 +0100 http://bitbucket.org/pypy/pypy/changeset/6a4993eee75c/ Log: Add signal.sigpending() diff --git a/pypy/module/signal/__init__.py b/pypy/module/signal/__init__.py --- a/pypy/module/signal/__init__.py +++ b/pypy/module/signal/__init__.py @@ -27,6 +27,7 @@ if os.name == 'posix': interpleveldefs['sigwait'] = 'interp_signal.sigwait' + interpleveldefs['sigpending'] = 'interp_signal.sigpending' interpleveldefs['pthread_kill'] = 'interp_signal.pthread_kill' interpleveldefs['pthread_sigmask'] = 'interp_signal.pthread_sigmask' interpleveldefs['SIG_BLOCK'] = 'space.wrap(interp_signal.SIG_BLOCK)' diff --git a/pypy/module/signal/interp_signal.py b/pypy/module/signal/interp_signal.py --- a/pypy/module/signal/interp_signal.py +++ b/pypy/module/signal/interp_signal.py @@ -383,6 +383,13 @@ signum = signum_ptr[0] return space.wrap(signum) +def sigpending(space): + with lltype.scoped_alloc(c_sigset_t.TO) as mask: + ret = c_sigpending(mask) + if ret != 0: + raise exception_from_errno(space, space.w_OSError) + return _sigset_to_signals(space, mask) + @unwrap_spec(how=int) def pthread_sigmask(space, how, w_signals): with SignalMask(space, w_signals) as sigset: diff --git a/pypy/module/signal/test/test_signal.py b/pypy/module/signal/test/test_signal.py --- a/pypy/module/signal/test/test_signal.py +++ b/pypy/module/signal/test/test_signal.py @@ -355,5 +355,7 @@ signal.pthread_sigmask(signal.SIG_BLOCK, (signum1, signum2)) posix.kill(posix.getpid(), signum1) posix.kill(posix.getpid(), signum2) + assert signal.sigpending() == set((signum1, signum2)) # Unblocking the 2 signals calls the C signal handler twice signal.pthread_sigmask(signal.SIG_UNBLOCK, (signum1, signum2)) + assert signal.sigpending() == set() diff --git a/rpython/rlib/rsignal.py b/rpython/rlib/rsignal.py --- a/rpython/rlib/rsignal.py +++ b/rpython/rlib/rsignal.py @@ -107,5 +107,6 @@ c_sigaddset = external('sigaddset', [c_sigset_t, rffi.INT], rffi.INT) c_sigismember = external('sigismember', [c_sigset_t, rffi.INT], rffi.INT) c_sigwait = external('sigwait', [c_sigset_t, rffi.INTP], rffi.INT) + c_sigpending = external('sigpending', [c_sigset_t], rffi.INT) c_pthread_sigmask = external('pthread_sigmask', [rffi.INT, c_sigset_t, c_sigset_t], rffi.INT) From noreply at buildbot.pypy.org Sun Jan 4 20:43:50 2015 From: noreply at buildbot.pypy.org (pjenvey) Date: Sun, 4 Jan 2015 20:43:50 +0100 (CET) Subject: [pypy-commit] pypy default: fix @executable_path not being recognized in the rpath without the trailing slash Message-ID: <20150104194350.5D7551C0930@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: Changeset: r75233:12cd1202efe7 Date: 2015-01-04 11:43 -0800 http://bitbucket.org/pypy/pypy/changeset/12cd1202efe7/ Log: fix @executable_path not being recognized in the rpath without the trailing slash diff --git a/rpython/translator/platform/darwin.py b/rpython/translator/platform/darwin.py --- a/rpython/translator/platform/darwin.py +++ b/rpython/translator/platform/darwin.py @@ -10,7 +10,7 @@ so_ext = 'dylib' DEFAULT_CC = 'clang' - rpath_flags = ['-Wl,-rpath', '-Wl, at executable_path'] + rpath_flags = ['-Wl,-rpath', '-Wl, at executable_path/'] def _args_for_shared(self, args): return (list(self.shared_only) From noreply at buildbot.pypy.org Sun Jan 4 20:43:51 2015 From: noreply at buildbot.pypy.org (pjenvey) Date: Sun, 4 Jan 2015 20:43:51 +0100 (CET) Subject: [pypy-commit] pypy default: backout 8caae57cec3 now that @executable_path is working Message-ID: <20150104194351.9DAEE1C0930@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: Changeset: r75234:df259a237d4f Date: 2015-01-04 11:43 -0800 http://bitbucket.org/pypy/pypy/changeset/df259a237d4f/ Log: backout 8caae57cec3 now that @executable_path is working diff --git a/rpython/translator/platform/darwin.py b/rpython/translator/platform/darwin.py --- a/rpython/translator/platform/darwin.py +++ b/rpython/translator/platform/darwin.py @@ -14,9 +14,7 @@ def _args_for_shared(self, args): return (list(self.shared_only) - + ['-dynamiclib', '-install_name', - '@executable_path/$(TARGET)', '-undefined', - 'dynamic_lookup'] + + ['-dynamiclib', '-install_name', '@rpath/$(TARGET)', '-undefined', 'dynamic_lookup'] + args) def _include_dirs_for_libffi(self): From noreply at buildbot.pypy.org Sun Jan 4 21:21:21 2015 From: noreply at buildbot.pypy.org (amauryfa) Date: Sun, 4 Jan 2015 21:21:21 +0100 (CET) Subject: [pypy-commit] pypy py3.3: in release.py, import lzma instead of _lzma because of an import loop Message-ID: <20150104202121.1BE811D2380@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3.3 Changeset: r75235:4e2ba5029f10 Date: 2015-01-04 21:20 +0100 http://bitbucket.org/pypy/pypy/changeset/4e2ba5029f10/ Log: in release.py, import lzma instead of _lzma because of an import loop (distutils imports zip which imports lzma). + Rename the .so built by _lzma.py diff --git a/lib_pypy/_lzma.py b/lib_pypy/_lzma.py --- a/lib_pypy/_lzma.py +++ b/lib_pypy/_lzma.py @@ -3,6 +3,7 @@ # PyPy changes: # - added __getstate__() methods that raise TypeError on pickling. +# - in ffi.verify(), changed modulename to '_lzmaffi'. from cffi import FFI import threading @@ -296,7 +297,7 @@ include_dirs=['/opt/local/include', '/usr/local/include'], library_dirs=['/opt/local/include', '/usr/local/include'], ext_package='_lzmaffi_mods', - modulename='_compiled_module') + modulename='_lzmaffi') def _new_lzma_stream(): ret = ffi.new('lzma_stream*') diff --git a/pypy/tool/release/package.py b/pypy/tool/release/package.py --- a/pypy/tool/release/package.py +++ b/pypy/tool/release/package.py @@ -111,7 +111,7 @@ ''' def create_cffi_import_libraries(pypy_c, options): - modules = ['_sqlite3', '_lzma', 'audioop'] + modules = ['_sqlite3', 'lzma', 'audioop'] subprocess.check_call([str(pypy_c), '-c', 'import _sqlite3']) if not sys.platform == 'win32': modules += ['_curses', 'syslog', '_gdbm', '_sqlite3'] From noreply at buildbot.pypy.org Sun Jan 4 22:18:38 2015 From: noreply at buildbot.pypy.org (amauryfa) Date: Sun, 4 Jan 2015 22:18:38 +0100 (CET) Subject: [pypy-commit] pypy py3.3: The wakeup_fd() file now receives the signal number (instead of \0) Message-ID: <20150104211838.0F4E21C0930@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3.3 Changeset: r75236:bd0bc88bec39 Date: 2015-01-04 22:17 +0100 http://bitbucket.org/pypy/pypy/changeset/bd0bc88bec39/ Log: The wakeup_fd() file now receives the signal number (instead of \0) Write it carefully so that rpython/ can still be shared with pypy2.7. diff --git a/pypy/module/signal/interp_signal.py b/pypy/module/signal/interp_signal.py --- a/pypy/module/signal/interp_signal.py +++ b/pypy/module/signal/interp_signal.py @@ -232,7 +232,7 @@ @jit.dont_look_inside @unwrap_spec(fd=int) def set_wakeup_fd(space, fd): - """Sets the fd to be written to (with '\0') when a signal + """Sets the fd to be written to (with the signal number) when a signal comes in. Returns the old fd. A library can use this to wakeup select or poll. The previous fd is returned. @@ -249,7 +249,7 @@ except OSError, e: if e.errno == errno.EBADF: raise OperationError(space.w_ValueError, space.wrap("invalid fd")) - old_fd = pypysig_set_wakeup_fd(fd) + old_fd = pypysig_set_wakeup_fd(fd, False) return space.wrap(intmask(old_fd)) diff --git a/pypy/module/signal/test/test_signal.py b/pypy/module/signal/test/test_signal.py --- a/pypy/module/signal/test/test_signal.py +++ b/pypy/module/signal/test/test_signal.py @@ -214,7 +214,7 @@ cannot_read() posix.kill(posix.getpid(), signal.SIGINT) res = posix.read(fd_read, 1) - assert res == b'\x00' + assert res == bytes([signal.SIGINT]) cannot_read() finally: old_wakeup = signal.set_wakeup_fd(old_wakeup) diff --git a/pypy/objspace/fake/objspace.py b/pypy/objspace/fake/objspace.py --- a/pypy/objspace/fake/objspace.py +++ b/pypy/objspace/fake/objspace.py @@ -114,7 +114,7 @@ BUILTIN_TYPES = ['int', 'str', 'float', 'tuple', 'list', 'dict', 'bytes', 'unicode', 'complex', 'slice', 'bool', 'text', 'object', - 'bytearray', 'memoryview'] + 'set', 'bytearray', 'memoryview'] class FakeObjSpace(ObjSpace): def __init__(self, config=None): diff --git a/rpython/rlib/rsignal.py b/rpython/rlib/rsignal.py --- a/rpython/rlib/rsignal.py +++ b/rpython/rlib/rsignal.py @@ -73,7 +73,8 @@ pypysig_default = external('pypysig_default', [rffi.INT], lltype.Void) pypysig_setflag = external('pypysig_setflag', [rffi.INT], lltype.Void) pypysig_reinstall = external('pypysig_reinstall', [rffi.INT], lltype.Void) -pypysig_set_wakeup_fd = external('pypysig_set_wakeup_fd', [rffi.INT], rffi.INT) +pypysig_set_wakeup_fd = external('pypysig_set_wakeup_fd', + [rffi.INT, rffi.INT], rffi.INT) pypysig_poll = external('pypysig_poll', [], rffi.INT, releasegil=False) # don't bother releasing the GIL around a call to pypysig_poll: it's # pointless and a performance issue diff --git a/rpython/translator/c/src/signals.c b/rpython/translator/c/src/signals.c --- a/rpython/translator/c/src/signals.c +++ b/rpython/translator/c/src/signals.c @@ -36,6 +36,7 @@ /* pypysig_occurred is only an optimization: it tells if any pypysig_flags could be set. */ static int wakeup_fd = -1; +static int wakeup_with_nul_byte = 1; #undef pypysig_getaddr_occurred void *pypysig_getaddr_occurred(void) @@ -92,7 +93,14 @@ #else int res; #endif - res = write(wakeup_fd, "\0", 1); + if (wakeup_with_nul_byte) + { + res = write(wakeup_fd, "\0", 1); + } else { + unsigned char byte = (unsigned char)signum; + res = write(wakeup_fd, &byte, 1); + } + /* the return value is ignored here */ } } @@ -143,9 +151,10 @@ return -1; /* no pending signal */ } -int pypysig_set_wakeup_fd(int fd) +int pypysig_set_wakeup_fd(int fd, int with_nul_byte) { int old_fd = wakeup_fd; wakeup_fd = fd; + wakeup_with_nul_byte = with_nul_byte; return old_fd; } diff --git a/rpython/translator/c/src/signals.h b/rpython/translator/c/src/signals.h --- a/rpython/translator/c/src/signals.h +++ b/rpython/translator/c/src/signals.h @@ -15,7 +15,7 @@ RPY_EXTERN void pypysig_reinstall(int signum); RPY_EXTERN -int pypysig_set_wakeup_fd(int fd); +int pypysig_set_wakeup_fd(int fd, int with_nul_byte); /* utility to poll for signals that arrived */ RPY_EXTERN From noreply at buildbot.pypy.org Sun Jan 4 23:39:07 2015 From: noreply at buildbot.pypy.org (amauryfa) Date: Sun, 4 Jan 2015 23:39:07 +0100 (CET) Subject: [pypy-commit] pypy py3.3: sigwait() should release the GIL, of course Message-ID: <20150104223907.E4F771D2904@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3.3 Changeset: r75237:bbc4e9886199 Date: 2015-01-04 23:38 +0100 http://bitbucket.org/pypy/pypy/changeset/bbc4e9886199/ Log: sigwait() should release the GIL, of course (I thought it was the default?) diff --git a/rpython/rlib/rsignal.py b/rpython/rlib/rsignal.py --- a/rpython/rlib/rsignal.py +++ b/rpython/rlib/rsignal.py @@ -107,7 +107,8 @@ c_sigemptyset = external('sigemptyset', [c_sigset_t], rffi.INT) c_sigaddset = external('sigaddset', [c_sigset_t, rffi.INT], rffi.INT) c_sigismember = external('sigismember', [c_sigset_t, rffi.INT], rffi.INT) - c_sigwait = external('sigwait', [c_sigset_t, rffi.INTP], rffi.INT) + c_sigwait = external('sigwait', [c_sigset_t, rffi.INTP], rffi.INT, + releasegil=True) c_sigpending = external('sigpending', [c_sigset_t], rffi.INT) c_pthread_sigmask = external('pthread_sigmask', [rffi.INT, c_sigset_t, c_sigset_t], rffi.INT) From noreply at buildbot.pypy.org Mon Jan 5 09:33:59 2015 From: noreply at buildbot.pypy.org (fijal) Date: Mon, 5 Jan 2015 09:33:59 +0100 (CET) Subject: [pypy-commit] pypy optresult: fix test_optimizebasic to the new reality Message-ID: <20150105083359.0F1E71D266F@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: optresult Changeset: r75238:176d9cb5ad7d Date: 2015-01-05 10:33 +0200 http://bitbucket.org/pypy/pypy/changeset/176d9cb5ad7d/ Log: fix test_optimizebasic to the new reality diff --git a/rpython/jit/metainterp/optimizeopt/intbounds.py b/rpython/jit/metainterp/optimizeopt/intbounds.py --- a/rpython/jit/metainterp/optimizeopt/intbounds.py +++ b/rpython/jit/metainterp/optimizeopt/intbounds.py @@ -73,7 +73,7 @@ def optimize_INT_OR_or_XOR(self, op): v1 = self.getvalue(op.getarg(0)) v2 = self.getvalue(op.getarg(1)) - if v1 is v2: + if v1.box is v2.box: if op.getopnum() == rop.INT_OR: self.make_equal_to(op, v1) else: @@ -253,13 +253,12 @@ def optimize_INT_SUB_OVF(self, op): v1 = self.getvalue(op.getarg(0)) v2 = self.getvalue(op.getarg(1)) - if v1 is v2: + if v1.box is v2.box: self.make_constant_int(op, 0) return resbound = v1.getintbound().sub_bound(v2.getintbound()) if resbound.bounded(): - xxx - op = op.copy_and_change(rop.INT_SUB) + op = self.replace_op_with(op, rop.INT_SUB) self.emit_operation(op) # emit the op r = self.getvalue(op) r.getintbound().intersect(resbound) @@ -279,7 +278,7 @@ v2 = self.getvalue(op.getarg(1)) if v1.getintbound().known_lt(v2.getintbound()): self.make_constant_int(op, 1) - elif v1.getintbound().known_ge(v2.getintbound()) or v1 is v2: + elif v1.getintbound().known_ge(v2.getintbound()) or v1.box is v2.box: self.make_constant_int(op, 0) else: self.emit_operation(op) @@ -289,7 +288,7 @@ v2 = self.getvalue(op.getarg(1)) if v1.getintbound().known_gt(v2.getintbound()): self.make_constant_int(op, 1) - elif v1.getintbound().known_le(v2.getintbound()) or v1 is v2: + elif v1.getintbound().known_le(v2.getintbound()) or v1.box is v2.box: self.make_constant_int(op, 0) else: self.emit_operation(op) @@ -297,7 +296,7 @@ def optimize_INT_LE(self, op): v1 = self.getvalue(op.getarg(0)) v2 = self.getvalue(op.getarg(1)) - if v1.getintbound().known_le(v2.getintbound()) or v1 is v2: + if v1.getintbound().known_le(v2.getintbound()) or v1.box is v2.box: self.make_constant_int(op, 1) elif v1.getintbound().known_gt(v2.getintbound()): self.make_constant_int(op, 0) @@ -307,7 +306,7 @@ def optimize_INT_GE(self, op): v1 = self.getvalue(op.getarg(0)) v2 = self.getvalue(op.getarg(1)) - if v1.getintbound().known_ge(v2.getintbound()) or v1 is v2: + if v1.getintbound().known_ge(v2.getintbound()) or v1.box is v2.box: self.make_constant_int(op, 1) elif v1.getintbound().known_lt(v2.getintbound()): self.make_constant_int(op, 0) @@ -321,7 +320,7 @@ self.make_constant_int(op, 0) elif v1.getintbound().known_lt(v2.getintbound()): self.make_constant_int(op, 0) - elif v1 is v2: + elif v1.box is v2.box: self.make_constant_int(op, 1) else: self.emit_operation(op) @@ -333,7 +332,7 @@ self.make_constant_int(op, 1) elif v1.getintbound().known_lt(v2.getintbound()): self.make_constant_int(op, 1) - elif v1 is v2: + elif v1.box is v2.box: self.make_constant_int(op, 0) else: self.emit_operation(op) diff --git a/rpython/jit/metainterp/optimizeopt/optimizer.py b/rpython/jit/metainterp/optimizeopt/optimizer.py --- a/rpython/jit/metainterp/optimizeopt/optimizer.py +++ b/rpython/jit/metainterp/optimizeopt/optimizer.py @@ -433,6 +433,9 @@ def getvalue(self, box): return self.optimizer.getvalue(box) + def getlastop(self): + return self.optimizer._last_emitted_op + def replace_op_with(self, op, newopnum, args=None, descr=None): return self.optimizer.replace_op_with(op, newopnum, args, descr) @@ -442,8 +445,8 @@ def make_constant_int(self, box, intconst): return self.optimizer.make_constant_int(box, intconst) - def make_equal_to(self, box, value, replace=False): - return self.optimizer.make_equal_to(box, value, replace=replace) + def make_equal_to(self, box, value): + return self.optimizer.make_equal_to(box, value) def get_constant_box(self, box): return self.optimizer.get_constant_box(box) @@ -638,20 +641,17 @@ def clear_newoperations(self): self._newoperations = [] - def make_equal_to(self, box, value, replace=False): + def make_equal_to(self, box, value): assert isinstance(value, OptValue) - if replace: - try: - cur_value = self.values[box] - except KeyError: - pass - else: - assert cur_value.getlevel() != LEVEL_CONSTANT - # replacing with a different box - cur_value.copy_from(value) - return - if not replace: - assert box not in self.values + try: + cur_value = self.values[box] + except KeyError: + pass + else: + assert cur_value.getlevel() != LEVEL_CONSTANT + # replacing with a different box + cur_value.copy_from(value) + return self.values[box] = value def replace_op_with(self, op, newopnum, args=None, descr=None): @@ -663,14 +663,7 @@ return newop def make_constant(self, box, constbox): - if isinstance(constbox, ConstInt): - self.getvalue(box).make_constant(constbox) - elif isinstance(constbox, ConstPtr): - self.make_equal_to(box, ConstantPtrValue(constbox)) - elif isinstance(constbox, ConstFloat): - self.make_equal_to(box, ConstantFloatValue(constbox)) - else: - assert False + self.getvalue(box).make_constant(constbox) def make_constant_int(self, box, intvalue): self.make_constant(box, ConstInt(intvalue)) @@ -717,6 +710,7 @@ if clear: self.clear_newoperations() for op in self.loop.operations: + self._last_emitted_op = None self.first_optimization.propagate_forward(op) self.loop.operations = self.get_newoperations() self.loop.quasi_immutable_deps = self.quasi_immutable_deps @@ -767,10 +761,11 @@ op = self.store_final_boxes_in_guard(guard_op, pendingfields) elif op.can_raise(): self.exception_might_have_happened = True - self._last_emitted_op = orig_op + self._last_emitted_op = op self._newoperations.append(op) def get_op_replacement(self, op): + # XXX this is wrong changed = False for i, arg in enumerate(op.getarglist()): try: @@ -782,7 +777,7 @@ if box is not arg: if not changed: changed = True - op = self.replace_op_with(op, op.getopnum()) + op = op.copy_and_change(op.getopnum()) op.setarg(i, box) return op diff --git a/rpython/jit/metainterp/optimizeopt/pure.py b/rpython/jit/metainterp/optimizeopt/pure.py --- a/rpython/jit/metainterp/optimizeopt/pure.py +++ b/rpython/jit/metainterp/optimizeopt/pure.py @@ -44,7 +44,7 @@ op.getarglist(), op.getdescr()) oldval = self.pure_operations.get(args, None) if oldval is not None: - self.optimizer.make_equal_to(op, oldval, True) + self.optimizer.make_equal_to(op, oldval) return else: remember = op diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py @@ -971,17 +971,17 @@ p0 = new_array_clear(3, descr=complexarraydescr) setinteriorfield_gc(p0, 0, f0, descr=compleximagdescr) setinteriorfield_gc(p0, 0, f1, descr=complexrealdescr) - call(0, p0, p0, 0, 2, 1, descr=complexarraycopydescr) - f2 = getinteriorfield_gc(p0, 2, descr=complexrealdescr) - f3 = getinteriorfield_gc(p0, 2, descr=compleximagdescr) - escape(f2) - escape(f3) + call_n(0, p0, p0, 0, 2, 1, descr=complexarraycopydescr) + f2 = getinteriorfield_gc_f(p0, 2, descr=complexrealdescr) + f3 = getinteriorfield_gc_f(p0, 2, descr=compleximagdescr) + escape_n(f2) + escape_n(f3) finish(1) """ expected = """ [f0, f1] - escape(f1) - escape(f0) + escape_n(f1) + escape_n(f0) finish(1) """ self.optimize_loop(ops, ops) @@ -990,11 +990,11 @@ def test_nonvirtual_array_of_struct_arraycopy(self): ops = """ [p0] - call(0, p0, p0, 0, 2, 1, descr=complexarraycopydescr) - f2 = getinteriorfield_gc(p0, 2, descr=compleximagdescr) - f3 = getinteriorfield_gc(p0, 2, descr=complexrealdescr) - escape(f2) - escape(f3) + call_n(0, p0, p0, 0, 2, 1, descr=complexarraycopydescr) + f2 = getinteriorfield_gc_f(p0, 2, descr=compleximagdescr) + f3 = getinteriorfield_gc_f(p0, 2, descr=complexrealdescr) + escape_n(f2) + escape_n(f3) finish(1) """ self.optimize_loop(ops, ops) @@ -2732,6 +2732,7 @@ """ self.optimize_loop(ops, expected) self.loop.inputargs[0].setref_base(self.nodeaddr) + py.test.skip("opt boxes don't inherit values, modify the test?") self.check_expanded_fail_descr(''' p1.nextdescr = p2 where p2 is a node_vtable, valuedescr=i2 @@ -3064,8 +3065,8 @@ # p2 = virtual_ref(p1, 2) setfield_gc(p0, p2, descr=nextdescr) - call_may_force(i1, descr=mayforcevirtdescr) - guard_not_forced_n() [p2, p1] + call_may_force_n(i1, descr=mayforcevirtdescr) + guard_not_forced() [p2, p1] virtual_ref_finish(p2, p1) setfield_gc(p0, NULL, descr=nextdescr) jump(p0, i1) diff --git a/rpython/jit/metainterp/optimizeopt/virtualize.py b/rpython/jit/metainterp/optimizeopt/virtualize.py --- a/rpython/jit/metainterp/optimizeopt/virtualize.py +++ b/rpython/jit/metainterp/optimizeopt/virtualize.py @@ -149,6 +149,7 @@ # keep self._fields, because it's all immutable anyway else: optforce.emit_operation(op) + op = optforce.getlastop() self.box = box = op # iteritems = self._fields.iteritems() @@ -331,7 +332,8 @@ # * if source_op is NEW_ARRAY, emit NEW_ARRAY_CLEAR if it's # followed by setting most items to zero anyway optforce.emit_operation(self.source_op) - self.box = box = self.source_op + op = optforce.getlastop() # potentially replaced + self.box = box = op for index in range(len(self._items)): subvalue = self._items[index] if subvalue is None: @@ -371,7 +373,8 @@ if not we_are_translated(): self.source_op.name = 'FORCE ' + self.source_op.name optforce.emit_operation(self.source_op) - self.box = box = self.source_op + op = optforce.getlastop() + self.box = box = op for index in range(len(self._items)): iteritems = self._items[index].iteritems() # random order is fine, except for tests @@ -462,7 +465,7 @@ if not we_are_translated(): op.name = 'FORCE ' + self.source_op.name optforce.emit_operation(self.source_op) - self.box = self.source_op + self.box = optforce.getlastop() for i in range(len(self.buffer.offsets)): # write the value offset = self.buffer.offsets[i] @@ -500,9 +503,9 @@ assert op is not None if not we_are_translated(): op.name = 'FORCE ' + self.source_op.name - self.box = self.source_op self.rawbuffer_value.force_box(optforce) optforce.emit_operation(op) + self.box = optforce.getlastop() def setitem_raw(self, offset, length, descr, value): self.rawbuffer_value.setitem_raw(self.offset+offset, length, descr, value) @@ -574,9 +577,7 @@ if self._last_guard_not_forced_2 is not None: guard_op = self._last_guard_not_forced_2 self.emit_operation(op) - v = self.getvalue(op) - guard_op = self.optimizer.store_final_boxes_in_guard(guard_op, [], - v) + guard_op = self.optimizer.store_final_boxes_in_guard(guard_op, []) i = len(self.optimizer._newoperations) - 1 assert i >= 0 self.optimizer._newoperations.insert(i, guard_op) @@ -614,10 +615,9 @@ # 'jit_virtual_ref'. The jit_virtual_ref structure may be forced soon, # but the point is that doing so does not force the original structure. newop = ResOperation(rop.NEW_WITH_VTABLE, [c_cls]) - newop.source_op = op vrefvalue = self.make_virtual(c_cls, newop) + self.optimizer.values[op] = vrefvalue token = ResOperation(rop.FORCE_TOKEN, []) - token.is_source_op = True self.emit_operation(token) vrefvalue.setfield(descr_virtual_token, self.getvalue(token)) vrefvalue.setfield(descr_forced, self.optimizer.cpu.ts.CVAL_NULLREF) diff --git a/rpython/jit/metainterp/optimizeopt/vstring.py b/rpython/jit/metainterp/optimizeopt/vstring.py --- a/rpython/jit/metainterp/optimizeopt/vstring.py +++ b/rpython/jit/metainterp/optimizeopt/vstring.py @@ -110,7 +110,6 @@ assert self.source_op is not None lengthbox = self.getstrlen(optforce, self.mode, None) op = ResOperation(self.mode.NEWSTR, [lengthbox]) - xxx self.box = op if not we_are_translated(): op.name = 'FORCE' @@ -359,7 +358,6 @@ if string_optimizer is None: return None op = ResOperation(rop.INT_ADD, [box1, box2]) - xxx string_optimizer.emit_operation(op) return op @@ -370,7 +368,6 @@ if isinstance(box1, ConstInt): return ConstInt(box1.value - box2.value) op = ResOperation(rop.INT_SUB, [box1, box2]) - xxx string_optimizer.emit_operation(op) return op diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py --- a/rpython/jit/metainterp/resoperation.py +++ b/rpython/jit/metainterp/resoperation.py @@ -4,6 +4,8 @@ from rpython.jit.codewriter import longlong class AbstractValue(object): + _repr_memo = {} + def _get_hash_(self): return compute_identity_hash(self) @@ -162,6 +164,9 @@ memo[self] = name return name + def __repr__(self): + return self.repr(self._repr_memo) + def getopname(self): try: return opname[self.getopnum()].lower() @@ -399,6 +404,9 @@ memo[self] = name return name + def __repr__(self): + return self.repr(self._repr_memo) + def getdescr(self): return None From noreply at buildbot.pypy.org Mon Jan 5 09:34:52 2015 From: noreply at buildbot.pypy.org (fijal) Date: Mon, 5 Jan 2015 09:34:52 +0100 (CET) Subject: [pypy-commit] pypy optresult: fix llgraph backend Message-ID: <20150105083452.BC6521D286C@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: optresult Changeset: r75239:f3578293202a Date: 2015-01-05 10:34 +0200 http://bitbucket.org/pypy/pypy/changeset/f3578293202a/ Log: fix llgraph backend diff --git a/rpython/jit/backend/llgraph/runner.py b/rpython/jit/backend/llgraph/runner.py --- a/rpython/jit/backend/llgraph/runner.py +++ b/rpython/jit/backend/llgraph/runner.py @@ -46,7 +46,7 @@ newdescr = WeakrefDescr(op.getdescr()) else: newdescr = None - newop = op._copy_and_change(op.getopnum(), + newop = op.copy_and_change(op.getopnum(), map(mapping, op.getarglist()), newdescr) _cache[op] = newop From noreply at buildbot.pypy.org Mon Jan 5 10:04:33 2015 From: noreply at buildbot.pypy.org (fijal) Date: Mon, 5 Jan 2015 10:04:33 +0100 (CET) Subject: [pypy-commit] pypy optresult: start whacking at the backend Message-ID: <20150105090433.A25D11D2380@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: optresult Changeset: r75240:035392d23801 Date: 2015-01-05 11:04 +0200 http://bitbucket.org/pypy/pypy/changeset/035392d23801/ Log: start whacking at the backend diff --git a/rpython/jit/backend/llsupport/regalloc.py b/rpython/jit/backend/llsupport/regalloc.py --- a/rpython/jit/backend/llsupport/regalloc.py +++ b/rpython/jit/backend/llsupport/regalloc.py @@ -734,7 +734,7 @@ # any instance field, we can use a fake object class Fake(cls): pass - op = Fake(None) + op = Fake() return op.is_comparison() or op.is_ovf() def valid_addressing_size(size): diff --git a/rpython/jit/backend/llsupport/rewrite.py b/rpython/jit/backend/llsupport/rewrite.py --- a/rpython/jit/backend/llsupport/rewrite.py +++ b/rpython/jit/backend/llsupport/rewrite.py @@ -2,8 +2,8 @@ from rpython.rlib.rarithmetic import ovfcheck from rpython.rtyper.lltypesystem import llmemory, lltype from rpython.jit.metainterp import history -from rpython.jit.metainterp.history import ConstInt, BoxPtr, ConstPtr, BoxInt -from rpython.jit.metainterp.resoperation import ResOperation, rop +from rpython.jit.metainterp.history import ConstInt, ConstPtr +from rpython.jit.metainterp.resoperation import ResOperation, rop, OpHelpers from rpython.jit.codewriter import heaptracker from rpython.jit.backend.llsupport.symbolic import WORD from rpython.jit.backend.llsupport.descr import SizeDescr, ArrayDescr,\ @@ -96,7 +96,7 @@ elif op.getopnum() == rop.SETARRAYITEM_GC: self.consider_setarrayitem_gc(op) # ---------- call assembler ----------- - if op.getopnum() == rop.CALL_ASSEMBLER: + if OpHelpers.is_call_assembler(op.getopnum()): self.handle_call_assembler(op) continue if op.getopnum() == rop.JUMP or op.getopnum() == rop.FINISH: @@ -199,11 +199,11 @@ def handle_new_fixedsize(self, descr, op): assert isinstance(descr, SizeDescr) size = descr.size - if self.gen_malloc_nursery(size, op.result): + if self.gen_malloc_nursery(size, op): self.gen_initialize_tid(op.result, descr.tid) else: - self.gen_malloc_fixedsize(size, descr.tid, op.result) - self.clear_gc_fields(descr, op.result) + self.gen_malloc_fixedsize(size, descr.tid, op) + self.clear_gc_fields(descr, op) def handle_new_array(self, arraydescr, op, kind=FLAG_ARRAY): v_length = op.getarg(0) diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -1257,10 +1257,13 @@ self.mc.SET_ir(rx86.Conditions['E'], rl.value) self.mc.MOVZX8(resloc, rl) - def genop_same_as(self, op, arglocs, resloc): + def _genop_same_as(self, op, arglocs, resloc): self.mov(arglocs[0], resloc) - genop_cast_ptr_to_int = genop_same_as - genop_cast_int_to_ptr = genop_same_as + genop_same_as_i = _genop_same_as + genop_same_as_r = _genop_same_as + genop_same_as_f = _genop_same_as + genop_cast_ptr_to_int = _genop_same_as + genop_cast_int_to_ptr = _genop_same_as def genop_int_force_ge_zero(self, op, arglocs, resloc): self.mc.TEST(arglocs[0], arglocs[0]) @@ -1417,17 +1420,24 @@ else: not_implemented("save_into_mem size = %d" % size) - def genop_getfield_gc(self, op, arglocs, resloc): + def _genop_getfield_gc(self, op, arglocs, resloc): base_loc, ofs_loc, size_loc, sign_loc = arglocs assert isinstance(size_loc, ImmedLoc) source_addr = AddressLoc(base_loc, ofs_loc) self.load_from_mem(resloc, source_addr, size_loc, sign_loc) - genop_getfield_raw = genop_getfield_gc - genop_getfield_raw_pure = genop_getfield_gc - genop_getfield_gc_pure = genop_getfield_gc + genop_getfield_gc_i = _genop_getfield_gc + genop_getfield_gc_r = _genop_getfield_gc + genop_getfield_gc_f = _genop_getfield_gc + genop_getfield_raw_i = _genop_getfield_gc + genop_getfield_raw_f = _genop_getfield_gc + genop_getfield_raw_pure_i = _genop_getfield_gc + genop_getfield_raw_pure_f = _genop_getfield_gc + genop_getfield_gc_pure_i = _genop_getfield_gc + genop_getfield_gc_pure_r = _genop_getfield_gc + genop_getfield_gc_pure_f = _genop_getfield_gc - def genop_getarrayitem_gc(self, op, arglocs, resloc): + def _genop_getarrayitem_gc(self, op, arglocs, resloc): base_loc, ofs_loc, size_loc, ofs, sign_loc = arglocs assert isinstance(ofs, ImmedLoc) assert isinstance(size_loc, ImmedLoc) @@ -1435,15 +1445,24 @@ src_addr = addr_add(base_loc, ofs_loc, ofs.value, scale) self.load_from_mem(resloc, src_addr, size_loc, sign_loc) - genop_getarrayitem_gc_pure = genop_getarrayitem_gc - genop_getarrayitem_raw = genop_getarrayitem_gc - genop_getarrayitem_raw_pure = genop_getarrayitem_gc + genop_getarrayitem_gc_i = _genop_getarrayitem_gc + genop_getarrayitem_gc_r = _genop_getarrayitem_gc + genop_getarrayitem_gc_f = _genop_getarrayitem_gc + genop_getarrayitem_gc_pure_i = _genop_getarrayitem_gc + genop_getarrayitem_gc_pure_r = _genop_getarrayitem_gc + genop_getarrayitem_gc_pure_f = _genop_getarrayitem_gc + genop_getarrayitem_raw_i = _genop_getarrayitem_gc + genop_getarrayitem_raw_f = _genop_getarrayitem_gc + genop_getarrayitem_raw_pure_i = _genop_getarrayitem_gc + genop_getarrayitem_raw_pure_f = _genop_getarrayitem_gc - def genop_raw_load(self, op, arglocs, resloc): + def _genop_raw_load(self, op, arglocs, resloc): base_loc, ofs_loc, size_loc, ofs, sign_loc = arglocs assert isinstance(ofs, ImmedLoc) src_addr = addr_add(base_loc, ofs_loc, ofs.value, 0) self.load_from_mem(resloc, src_addr, size_loc, sign_loc) + genop_raw_load_i = _genop_raw_load + genop_raw_load_f = _genop_raw_load def _imul_const_scaled(self, mc, targetreg, sourcereg, itemsize): """Produce one operation to do roughly @@ -1490,13 +1509,16 @@ assert isinstance(ofs_loc, ImmedLoc) return AddressLoc(base_loc, temp_loc, shift, ofs_loc.value) - def genop_getinteriorfield_gc(self, op, arglocs, resloc): + def _genop_getinteriorfield_gc(self, op, arglocs, resloc): (base_loc, ofs_loc, itemsize_loc, fieldsize_loc, index_loc, temp_loc, sign_loc) = arglocs src_addr = self._get_interiorfield_addr(temp_loc, index_loc, itemsize_loc, base_loc, ofs_loc) self.load_from_mem(resloc, src_addr, fieldsize_loc, sign_loc) + genop_getinteriorfield_gc_i = _genop_getinteriorfield_gc + genop_getinteriorfield_gc_r = _genop_getinteriorfield_gc + genop_getinteriorfield_gc_f = _genop_getinteriorfield_gc def genop_discard_increment_debug_counter(self, op, arglocs): # The argument should be an immediate address. This should @@ -1919,8 +1941,12 @@ guard_token.pos_jump_offset = self.mc.get_relative_pos() - 4 self.pending_guard_tokens.append(guard_token) - def genop_call(self, op, arglocs, resloc): + def _genop_real_call(self, op, arglocs, resloc): self._genop_call(op, arglocs, resloc) + genop_call_i = _genop_real_call + genop_call_r = _genop_real_call + genop_call_f = _genop_real_call + genop_call_n = _genop_real_call def _genop_call(self, op, arglocs, resloc, is_call_release_gil=False): from rpython.jit.backend.llsupport.descr import CallDescr @@ -1955,24 +1981,32 @@ self.mc.CMP_bi(ofs, 0) self.implement_guard(guard_token, 'NE') - def genop_guard_call_may_force(self, op, guard_op, guard_token, + def _genop_guard_call_may_force(self, op, guard_op, guard_token, arglocs, result_loc): self._store_force_index(guard_op) self._genop_call(op, arglocs, result_loc) self._emit_guard_not_forced(guard_token) + genop_guard_call_may_force_i = _genop_guard_call_may_force + genop_guard_call_may_force_r = _genop_guard_call_may_force + genop_guard_call_may_force_f = _genop_guard_call_may_force + genop_guard_call_may_force_n = _genop_guard_call_may_force - def genop_guard_call_release_gil(self, op, guard_op, guard_token, + def _genop_guard_call_release_gil(self, op, guard_op, guard_token, arglocs, result_loc): self._store_force_index(guard_op) self._genop_call(op, arglocs, result_loc, is_call_release_gil=True) self._emit_guard_not_forced(guard_token) + genop_guard_call_release_gil_i = _genop_guard_call_release_gil + genop_guard_call_release_gil_r = _genop_guard_call_release_gil + genop_guard_call_release_gil_f = _genop_guard_call_release_gil + genop_guard_call_release_gil_n = _genop_guard_call_release_gil def imm(self, v): return imm(v) # ------------------- CALL ASSEMBLER -------------------------- - def genop_guard_call_assembler(self, op, guard_op, guard_token, + def _genop_guard_call_assembler(self, op, guard_op, guard_token, arglocs, result_loc): if len(arglocs) == 2: [argloc, vloc] = arglocs @@ -1981,6 +2015,10 @@ vloc = self.imm(0) self.call_assembler(op, guard_op, argloc, vloc, result_loc, eax) self._emit_guard_not_forced(guard_token) + genop_guard_call_assembler_i = _genop_guard_call_assembler + genop_guard_call_assembler_r = _genop_guard_call_assembler + genop_guard_call_assembler_f = _genop_guard_call_assembler + genop_guard_call_assembler_n = _genop_guard_call_assembler def _call_assembler_emit_call(self, addr, argloc, _): threadlocal_loc = RawEspLoc(THREADLOCAL_OFS, INT) diff --git a/rpython/jit/backend/x86/regalloc.py b/rpython/jit/backend/x86/regalloc.py --- a/rpython/jit/backend/x86/regalloc.py +++ b/rpython/jit/backend/x86/regalloc.py @@ -23,7 +23,7 @@ from rpython.jit.codewriter.effectinfo import EffectInfo from rpython.jit.metainterp.history import (Box, Const, ConstInt, ConstPtr, ConstFloat, BoxInt, BoxFloat, INT, REF, FLOAT, TargetToken) -from rpython.jit.metainterp.resoperation import rop, ResOperation +from rpython.jit.metainterp.resoperation import rop, OpHelpers from rpython.rlib import rgc from rpython.rlib.objectmodel import we_are_translated from rpython.rlib.rarithmetic import r_longlong, r_uint @@ -757,7 +757,7 @@ [self.loc(op.getarg(i)) for i in range(op.numargs())], guard_not_forced_op=guard_not_forced_op) - def consider_call(self, op): + def _consider_real_call(self, op): effectinfo = op.getdescr().get_extra_info() oopspecindex = effectinfo.oopspecindex if oopspecindex != EffectInfo.OS_NONE: @@ -789,21 +789,37 @@ if oopspecindex == EffectInfo.OS_MATH_READ_TIMESTAMP: return self._consider_math_read_timestamp(op) self._consider_call(op) + consider_call_i = _consider_real_call + consider_call_r = _consider_real_call + consider_call_f = _consider_real_call + consider_call_n = _consider_real_call - def consider_call_may_force(self, op, guard_op): + def _consider_call_may_force(self, op, guard_op): assert guard_op is not None self._consider_call(op, guard_op) + consider_call_may_force_i = _consider_call_may_force + consider_call_may_force_r = _consider_call_may_force + consider_call_may_force_f = _consider_call_may_force + consider_call_may_force_n = _consider_call_may_force - def consider_call_release_gil(self, op, guard_op): + def _consider_call_release_gil(self, op, guard_op): assert guard_op is not None self._consider_call(op, guard_op) - + consider_call_release_gil_i = _consider_call_release_gil + consider_call_release_gil_r = _consider_call_release_gil + consider_call_release_gil_f = _consider_call_release_gil + consider_call_release_gil_n = _consider_call_release_gil + def consider_call_malloc_gc(self, op): self._consider_call(op) - def consider_call_assembler(self, op, guard_op): + def _consider_call_assembler(self, op, guard_op): locs = self.locs_for_call_assembler(op, guard_op) self._call(op, locs, guard_not_forced_op=guard_op) + consider_call_assembler_i = _consider_call_assembler + consider_call_assembler_r = _consider_call_assembler + consider_call_assembler_f = _consider_call_assembler + consider_call_assembler_n = _consider_call_assembler def consider_cond_call_gc_wb(self, op): assert op.result is None @@ -1012,7 +1028,7 @@ consider_setarrayitem_raw = consider_setarrayitem_gc consider_raw_store = consider_setarrayitem_gc - def consider_getfield_gc(self, op): + def _consider_getfield_gc(self, op): ofs, size, sign = unpack_fielddescr(op.getdescr()) ofs_loc = imm(ofs) size_loc = imm(size) @@ -1025,15 +1041,22 @@ sign_loc = imm0 self.perform(op, [base_loc, ofs_loc, size_loc, sign_loc], result_loc) - consider_getfield_raw = consider_getfield_gc - consider_getfield_raw_pure = consider_getfield_gc - consider_getfield_gc_pure = consider_getfield_gc + consider_getfield_gc_i = _consider_getfield_gc + consider_getfield_gc_r = _consider_getfield_gc + consider_getfield_gc_f = _consider_getfield_gc + consider_getfield_raw_i = _consider_getfield_gc + consider_getfield_raw_f = _consider_getfield_gc + consider_getfield_raw_pure_i = _consider_getfield_gc + consider_getfield_raw_pure_f = _consider_getfield_gc + consider_getfield_gc_pure_i = _consider_getfield_gc + consider_getfield_gc_pure_r = _consider_getfield_gc + consider_getfield_gc_pure_f = _consider_getfield_gc def consider_increment_debug_counter(self, op): base_loc = self.loc(op.getarg(0)) self.perform_discard(op, [base_loc]) - def consider_getarrayitem_gc(self, op): + def _consider_getarrayitem_gc(self, op): itemsize, ofs, sign = unpack_arraydescr(op.getdescr()) args = op.getarglist() base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args) @@ -1046,12 +1069,20 @@ self.perform(op, [base_loc, ofs_loc, imm(itemsize), imm(ofs), sign_loc], result_loc) - consider_getarrayitem_raw = consider_getarrayitem_gc - consider_getarrayitem_gc_pure = consider_getarrayitem_gc - consider_getarrayitem_raw_pure = consider_getarrayitem_gc - consider_raw_load = consider_getarrayitem_gc + consider_getarrayitem_gc_i = _consider_getarrayitem_gc + consider_getarrayitem_gc_r = _consider_getarrayitem_gc + consider_getarrayitem_gc_f = _consider_getarrayitem_gc + consider_getarrayitem_raw_i = _consider_getarrayitem_gc + consider_getarrayitem_raw_f = _consider_getarrayitem_gc + consider_getarrayitem_gc_pure_i = _consider_getarrayitem_gc + consider_getarrayitem_gc_pure_r = _consider_getarrayitem_gc + consider_getarrayitem_gc_pure_f = _consider_getarrayitem_gc + consider_getarrayitem_raw_pure_i = _consider_getarrayitem_gc + consider_getarrayitem_raw_pure_f = _consider_getarrayitem_gc + consider_raw_load_i = _consider_getarrayitem_gc + consider_raw_load_f = _consider_getarrayitem_gc - def consider_getinteriorfield_gc(self, op): + def _consider_getinteriorfield_gc(self, op): t = unpack_interiorfielddescr(op.getdescr()) ofs, itemsize, fieldsize, sign = imm(t[0]), imm(t[1]), imm(t[2]), t[3] if sign: @@ -1079,6 +1110,10 @@ self.perform(op, [base_loc, ofs, itemsize, fieldsize, index_loc, temp_loc, sign_loc], result_loc) + consider_getinteriorfield_gc_i = _consider_getinteriorfield_gc + consider_getinteriorfield_gc_r = _consider_getinteriorfield_gc + consider_getinteriorfield_gc_f = _consider_getinteriorfield_gc + def consider_int_is_true(self, op, guard_op): # doesn't need arg to be in a register argloc = self.loc(op.getarg(0)) @@ -1090,12 +1125,15 @@ consider_int_is_zero = consider_int_is_true - def consider_same_as(self, op): + def _consider_same_as(self, op): argloc = self.loc(op.getarg(0)) resloc = self.force_allocate_reg(op.result) self.perform(op, [argloc], resloc) - consider_cast_ptr_to_int = consider_same_as - consider_cast_int_to_ptr = consider_same_as + consider_cast_ptr_to_int = _consider_same_as + consider_cast_int_to_ptr = _consider_same_as + consider_same_as_i = _consider_same_as + consider_same_as_r = _consider_same_as + consider_same_as_f = _consider_same_as def consider_int_force_ge_zero(self, op): argloc = self.make_sure_var_in_reg(op.getarg(0)) @@ -1452,9 +1490,9 @@ name = name[len('consider_'):] num = getattr(rop, name.upper()) if (is_comparison_or_ovf_op(num) - or num == rop.CALL_MAY_FORCE - or num == rop.CALL_ASSEMBLER - or num == rop.CALL_RELEASE_GIL): + or OpHelpers.is_call_may_force(num) + or OpHelpers.is_call_assembler(num) + or OpHelpers.is_call_release_gil(num)): oplist_with_guard[num] = value oplist[num] = add_none_argument(value) else: diff --git a/rpython/jit/metainterp/compile.py b/rpython/jit/metainterp/compile.py --- a/rpython/jit/metainterp/compile.py +++ b/rpython/jit/metainterp/compile.py @@ -139,9 +139,9 @@ part = create_empty_loop(metainterp) part.inputargs = inputargs[:] h_ops = history.operations - label = ResOperation(rop.LABEL, inputargs, None, + label = ResOperation(rop.LABEL, inputargs, descr=TargetToken(jitcell_token)) - end_label = ResOperation(rop.LABEL, jumpargs, None, descr=jitcell_token) + end_label = ResOperation(rop.LABEL, jumpargs, descr=jitcell_token) part.operations = [label] + h_ops[start:] + [end_label] try: @@ -165,7 +165,7 @@ part.operations = [part.operations[-1]] + \ [inliner.inline_op(h_ops[i]) for i in range(start, len(h_ops))] + \ [ResOperation(rop.JUMP, [inliner.inline_arg(a) for a in jumpargs], - None, descr=jitcell_token)] + descr=jitcell_token)] target_token = part.operations[0].getdescr() assert isinstance(target_token, TargetToken) all_target_tokens.append(target_token) @@ -222,7 +222,7 @@ part.operations = [partial_trace.operations[-1]] + \ h_ops[start:] + \ - [ResOperation(rop.JUMP, jumpargs, None, descr=loop_jitcell_token)] + [ResOperation(rop.JUMP, jumpargs, descr=loop_jitcell_token)] label = part.operations[0] orignial_label = label.clone() assert label.getopnum() == rop.LABEL @@ -236,7 +236,7 @@ assert isinstance(target_token, TargetToken) part.operations = [orignial_label] + \ [ResOperation(rop.JUMP, inputargs[:], - None, descr=loop_jitcell_token)] + descr=loop_jitcell_token)] try: optimize_trace(metainterp_sd, jitdriver_sd, part, jitdriver_sd.warmstate.enable_opts, @@ -275,6 +275,7 @@ return target_token def patch_new_loop_to_load_virtualizable_fields(loop, jitdriver_sd): + xxx vinfo = jitdriver_sd.virtualizable_info extra_ops = [] inputargs = loop.inputargs diff --git a/rpython/jit/metainterp/optimizeopt/simplify.py b/rpython/jit/metainterp/optimizeopt/simplify.py --- a/rpython/jit/metainterp/optimizeopt/simplify.py +++ b/rpython/jit/metainterp/optimizeopt/simplify.py @@ -48,7 +48,7 @@ if not self.unroll: descr = op.getdescr() if isinstance(descr, JitCellToken): - return self.optimize_JUMP(op._copy_and_change(rop.JUMP)) + return self.optimize_JUMP(op.copy_and_change(rop.JUMP)) self.last_label_descr = op.getdescr() self.emit_operation(op) diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py --- a/rpython/jit/metainterp/resoperation.py +++ b/rpython/jit/metainterp/resoperation.py @@ -1020,3 +1020,24 @@ elif tp == 'f': return rop.CALL_F return rop.CALL_N + + @staticmethod + def is_call_assembler(opnum): + return (opnum == rop.CALL_ASSEMBLER_I or + opnum == rop.CALL_ASSEMBLER_R or + opnum == rop.CALL_ASSEMBLER_F or + opnum == rop.CALL_ASSEMBLER_N) + + @staticmethod + def is_call_may_force(opnum): + return (opnum == rop.CALL_MAY_FORCE_I or + opnum == rop.CALL_MAY_FORCE_R or + opnum == rop.CALL_MAY_FORCE_F or + opnum == rop.CALL_MAY_FORCE_N) + + @staticmethod + def is_call_release_gil(opnum): + return (opnum == rop.CALL_RELEASE_GIL_I or + opnum == rop.CALL_RELEASE_GIL_R or + opnum == rop.CALL_RELEASE_GIL_F or + opnum == rop.CALL_RELEASE_GIL_N) From noreply at buildbot.pypy.org Mon Jan 5 11:01:25 2015 From: noreply at buildbot.pypy.org (fijal) Date: Mon, 5 Jan 2015 11:01:25 +0100 (CET) Subject: [pypy-commit] pypy default: reinstantiate the check Message-ID: <20150105100125.D03091C02FD@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r75241:49a1def6fefd Date: 2015-01-05 12:00 +0200 http://bitbucket.org/pypy/pypy/changeset/49a1def6fefd/ Log: reinstantiate the check diff --git a/rpython/jit/metainterp/optimizeopt/util.py b/rpython/jit/metainterp/optimizeopt/util.py --- a/rpython/jit/metainterp/optimizeopt/util.py +++ b/rpython/jit/metainterp/optimizeopt/util.py @@ -164,6 +164,8 @@ assert op1.result.same_box(remap[op2.result]) else: remap[op2.result] = op1.result + if op1.getopnum() not in [rop.JUMP, rop.LABEL] and not op1.is_guard(): + assert op1.getdescr() == op2.getdescr() if op1.getfailargs() or op2.getfailargs(): assert len(op1.getfailargs()) == len(op2.getfailargs()) if strict_fail_args: From noreply at buildbot.pypy.org Mon Jan 5 13:43:11 2015 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 5 Jan 2015 13:43:11 +0100 (CET) Subject: [pypy-commit] pypy default: Fix. Obscure case causing endless troubles. See irc discussion. Message-ID: <20150105124311.D15B51C115D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r75242:cd4b0dd3d1e3 Date: 2015-01-05 13:43 +0100 http://bitbucket.org/pypy/pypy/changeset/cd4b0dd3d1e3/ Log: Fix. Obscure case causing endless troubles. See irc discussion. diff --git a/rpython/jit/metainterp/optimizeopt/optimizer.py b/rpython/jit/metainterp/optimizeopt/optimizer.py --- a/rpython/jit/metainterp/optimizeopt/optimizer.py +++ b/rpython/jit/metainterp/optimizeopt/optimizer.py @@ -269,9 +269,8 @@ op = ResOperation(rop.GUARD_VALUE, [box, self.box], None) guards.append(op) elif level == LEVEL_KNOWNCLASS: - op = ResOperation(rop.GUARD_NONNULL, [box], None) - guards.append(op) - op = ResOperation(rop.GUARD_CLASS, [box, self.known_class], None) + op = ResOperation(rop.GUARD_NONNULL_CLASS, + [box, self.known_class], None) guards.append(op) else: if level == LEVEL_NONNULL: diff --git a/rpython/jit/metainterp/optimizeopt/rewrite.py b/rpython/jit/metainterp/optimizeopt/rewrite.py --- a/rpython/jit/metainterp/optimizeopt/rewrite.py +++ b/rpython/jit/metainterp/optimizeopt/rewrite.py @@ -297,7 +297,8 @@ name = "" raise InvalidLoop('A promote of a virtual %s (a recently allocated object) never makes sense!' % name) old_guard_op = value.get_last_guard(self.optimizer) - if old_guard_op: + if old_guard_op and not isinstance(old_guard_op.getdescr(), + compile.ResumeAtPositionDescr): # there already has been a guard_nonnull or guard_class or # guard_nonnull_class on this value, which is rather silly. # replace the original guard with a guard_value @@ -316,6 +317,11 @@ op = old_guard_op.copy_and_change(rop.GUARD_VALUE, args = [old_guard_op.getarg(0), op.getarg(1)], descr = descr) + # Note: we give explicitly a new descr for 'op'; this is why the + # old descr must not be ResumeAtPositionDescr (checked above). + # Better-safe-than-sorry but it should never occur: we should + # not put in short preambles guard_xxx and guard_value + # on the same box. self.optimizer.replace_guard(op, value) descr.make_a_counter_per_value(op) # to be safe @@ -354,7 +360,8 @@ % r) assert isinstance(value, PtrOptValue) old_guard_op = value.get_last_guard(self.optimizer) - if old_guard_op: + if old_guard_op and not isinstance(old_guard_op.getdescr(), + compile.ResumeAtPositionDescr): # there already has been a guard_nonnull or guard_class or # guard_nonnull_class on this value. if old_guard_op.getopnum() == rop.GUARD_NONNULL: @@ -364,6 +371,11 @@ op = old_guard_op.copy_and_change (rop.GUARD_NONNULL_CLASS, args = [old_guard_op.getarg(0), op.getarg(1)], descr=descr) + # Note: we give explicitly a new descr for 'op'; this is why the + # old descr must not be ResumeAtPositionDescr (checked above). + # Better-safe-than-sorry but it should never occur: we should + # not put in short preambles guard_nonnull and guard_class + # on the same box. self.optimizer.replace_guard(op, value) # not emitting the guard, so we have to pass None to # make_constant_class, so last_guard_pos is not updated diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py @@ -7255,11 +7255,9 @@ short = """ [p0] p1 = getfield_gc(p0, descr=nextdescr) - guard_nonnull(p1) [] - guard_class(p1, ConstClass(node_vtable)) [] + guard_nonnull_class(p1, ConstClass(node_vtable)) [] p2 = getfield_gc(p1, descr=nextdescr) - guard_nonnull(p2) [] - guard_class(p2, ConstClass(node_vtable)) [] + guard_nonnull_class(p2, ConstClass(node_vtable)) [] jump(p0) """ self.optimize_loop(ops, expected, expected_short=short) diff --git a/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py b/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py --- a/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py @@ -402,8 +402,7 @@ guards = value1.make_guards(box) expected = """ [p0] - guard_nonnull(p0) [] - guard_class(p0, ConstClass(node_vtable)) [] + guard_nonnull_class(p0, ConstClass(node_vtable)) [] """ self.compare(guards, expected, [box]) From noreply at buildbot.pypy.org Mon Jan 5 15:06:19 2015 From: noreply at buildbot.pypy.org (fijal) Date: Mon, 5 Jan 2015 15:06:19 +0100 (CET) Subject: [pypy-commit] pypy optresult: failing test Message-ID: <20150105140619.DFC181C02FD@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: optresult Changeset: r75243:fae2f865fc7d Date: 2015-01-05 15:53 +0200 http://bitbucket.org/pypy/pypy/changeset/fae2f865fc7d/ Log: failing test diff --git a/rpython/jit/metainterp/optimizeopt/optimizer.py b/rpython/jit/metainterp/optimizeopt/optimizer.py --- a/rpython/jit/metainterp/optimizeopt/optimizer.py +++ b/rpython/jit/metainterp/optimizeopt/optimizer.py @@ -754,7 +754,7 @@ self.pendingfields = None if self.replaces_guard and orig_op in self.replaces_guard: self.replace_guard_op(self.replaces_guard[orig_op], op) - del self.replaces_guard[op] + del self.replaces_guard[orig_op] return else: guard_op = self.replace_op_with(op, op.getopnum()) diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py @@ -5534,6 +5534,24 @@ """ self.optimize_loop(ops, ops) + def test_replace_result_of_new(self): + ops = """ + [i0] + guard_value(i0, 2) [] + p0 = newstr(i0) + escape_n(p0) + finish() + """ + expected = """ + [i0] + guard_value(i0, 2) [] + p0 = newstr(2) + escape_n(p0) + finish() + """ + self.optimize_loop(ops, expected) + + class TestLLtype(BaseTestOptimizeBasic, LLtypeMixin): pass diff --git a/rpython/jit/metainterp/optimizeopt/vstring.py b/rpython/jit/metainterp/optimizeopt/vstring.py --- a/rpython/jit/metainterp/optimizeopt/vstring.py +++ b/rpython/jit/metainterp/optimizeopt/vstring.py @@ -110,10 +110,10 @@ assert self.source_op is not None lengthbox = self.getstrlen(optforce, self.mode, None) op = ResOperation(self.mode.NEWSTR, [lengthbox]) - self.box = op if not we_are_translated(): op.name = 'FORCE' optforce.emit_operation(op) + self.box = optforce.getlastop() self.initialize_forced_string(optforce, self.box, CONST_0, self.mode) def initialize_forced_string(self, string_optimizer, targetbox, @@ -193,8 +193,7 @@ charbox = charvalue.force_box(string_optimizer) op = ResOperation(mode.STRSETITEM, [targetbox, offsetbox, - charbox], - None) + charbox]) string_optimizer.emit_operation(op) offsetbox = _int_add(string_optimizer, offsetbox, CONST_1) return offsetbox @@ -331,8 +330,7 @@ assert not isinstance(targetbox, Const)# ConstPtr never makes sense string_optimizer.emit_operation(ResOperation(mode.STRSETITEM, [targetbox, offsetbox, - charbox], - None)) + charbox])) offsetbox = _int_add(string_optimizer, offsetbox, CONST_1) else: if need_next_offset: @@ -533,7 +531,7 @@ dst.force_box(self), ConstInt(index + dst_start), vresult.force_box(self), - ], None) + ]) self.emit_operation(new_op) else: copy_str_content(self, From noreply at buildbot.pypy.org Mon Jan 5 15:06:21 2015 From: noreply at buildbot.pypy.org (fijal) Date: Mon, 5 Jan 2015 15:06:21 +0100 (CET) Subject: [pypy-commit] pypy optresult: fix the test. unskip test that shows problem with our infrastructure (and let Message-ID: <20150105140621.2D5521C02FD@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: optresult Changeset: r75244:f5bc3c7a6d3d Date: 2015-01-05 16:05 +0200 http://bitbucket.org/pypy/pypy/changeset/f5bc3c7a6d3d/ Log: fix the test. unskip test that shows problem with our infrastructure (and let it fail for now) diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py @@ -2732,7 +2732,6 @@ """ self.optimize_loop(ops, expected) self.loop.inputargs[0].setref_base(self.nodeaddr) - py.test.skip("opt boxes don't inherit values, modify the test?") self.check_expanded_fail_descr(''' p1.nextdescr = p2 where p2 is a node_vtable, valuedescr=i2 diff --git a/rpython/jit/metainterp/optimizeopt/vstring.py b/rpython/jit/metainterp/optimizeopt/vstring.py --- a/rpython/jit/metainterp/optimizeopt/vstring.py +++ b/rpython/jit/metainterp/optimizeopt/vstring.py @@ -415,9 +415,14 @@ # if the original 'op' did not have a ConstInt as argument, # build a new one with the ConstInt argument if not isinstance(op.getarg(0), ConstInt): - op = self.replace_op_with(op, mode.NEWSTR, [length_box]) + old_op = op + op = op.copy_and_change(mode.NEWSTR, [length_box]) + else: + old_op = None vvalue = self.make_vstring_plain(op, mode) vvalue.setup(length_box.getint()) + if old_op is not None: + self.optimizer.make_equal_to(old_op, vvalue) else: self.getvalue(op).ensure_nonnull() self.emit_operation(op) From noreply at buildbot.pypy.org Mon Jan 5 16:15:09 2015 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 5 Jan 2015 16:15:09 +0100 (CET) Subject: [pypy-commit] pypy.org extradoc: update the values Message-ID: <20150105151509.06EBA1D266F@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r558:42f2e825e69a Date: 2015-01-05 16:15 +0100 http://bitbucket.org/pypy/pypy.org/changeset/42f2e825e69a/ Log: update the values diff --git a/don1.html b/don1.html --- a/don1.html +++ b/don1.html @@ -9,13 +9,13 @@ - $58367 of $105000 (55.6%) + $58557 of $105000 (55.8%)
diff --git a/don3.html b/don3.html --- a/don3.html +++ b/don3.html @@ -9,13 +9,13 @@ - $51099 of $60000 (85.2%) + $51176 of $60000 (85.3%)
diff --git a/don4.html b/don4.html --- a/don4.html +++ b/don4.html @@ -9,7 +9,7 @@ @@ -17,7 +17,7 @@ 2nd call: - $21455 of $80000 (26.8%) + $21656 of $80000 (27.1%)
From noreply at buildbot.pypy.org Mon Jan 5 20:51:34 2015 From: noreply at buildbot.pypy.org (dmalcolm) Date: Mon, 5 Jan 2015 20:51:34 +0100 (CET) Subject: [pypy-commit] pypy libgccjit-backend: Get test_ooops/test_ooops_non_gc to pass Message-ID: <20150105195134.8EF231C03FB@cobra.cs.uni-duesseldorf.de> Author: David Malcolm Branch: libgccjit-backend Changeset: r75246:207274050e18 Date: 2014-12-24 05:30 -0500 http://bitbucket.org/pypy/pypy/changeset/207274050e18/ Log: Get test_ooops/test_ooops_non_gc to pass diff --git a/rpython/jit/backend/libgccjit/assembler.py b/rpython/jit/backend/libgccjit/assembler.py --- a/rpython/jit/backend/libgccjit/assembler.py +++ b/rpython/jit/backend/libgccjit/assembler.py @@ -998,6 +998,29 @@ # + def _impl_ptr_cmp(self, resop, gcc_jit_comparison): + rval0 = self.expr_to_rvalue(resop._arg0) + rval1 = self.expr_to_rvalue(resop._arg1) + lvalres = self.expr_to_lvalue(resop.result) + resop_cmp = ( + self.ctxt.new_cast( + self.ctxt.new_comparison(gcc_jit_comparison, + rval0, rval1), + self.t_Signed) + ) + self.b_current.add_assignment(lvalres, + resop_cmp) + def emit_ptr_eq(self, resop): + self._impl_ptr_cmp(resop, self.lib.GCC_JIT_COMPARISON_EQ) + def emit_ptr_ne(self, resop): + self._impl_ptr_cmp(resop, self.lib.GCC_JIT_COMPARISON_NE) + def emit_instance_ptr_eq(self, resop): + self._impl_ptr_cmp(resop, self.lib.GCC_JIT_COMPARISON_EQ) + def emit_instance_ptr_ne(self, resop): + self._impl_ptr_cmp(resop, self.lib.GCC_JIT_COMPARISON_NE) + + # + def impl_get_lvalue_at_offset_from_ptr(self, ptr_expr, ll_offset, t_field): ptr = self.expr_to_rvalue(ptr_expr) From noreply at buildbot.pypy.org Mon Jan 5 20:51:33 2015 From: noreply at buildbot.pypy.org (dmalcolm) Date: Mon, 5 Jan 2015 20:51:33 +0100 (CET) Subject: [pypy-commit] pypy libgccjit-backend: Provide a way to turn off the comments Message-ID: <20150105195133.3B5591C03FB@cobra.cs.uni-duesseldorf.de> Author: David Malcolm Branch: libgccjit-backend Changeset: r75245:a87b97f2b383 Date: 2014-12-24 05:20 -0500 http://bitbucket.org/pypy/pypy/changeset/a87b97f2b383/ Log: Provide a way to turn off the comments Running pytest under the profile module shows >50% wallclock time spent in str2charp, much of that in Block.add_comment. diff --git a/rpython/jit/backend/libgccjit/assembler.py b/rpython/jit/backend/libgccjit/assembler.py --- a/rpython/jit/backend/libgccjit/assembler.py +++ b/rpython/jit/backend/libgccjit/assembler.py @@ -123,7 +123,7 @@ self.set_handler(handler) def set_handler(self, handler): - print('set_handler(%r)' % handler) + #print('set_handler(%r)' % handler) # We want to write the equivalent of: # (*fn_ptr_ptr) = handler; @@ -172,6 +172,8 @@ eci = make_eci() self.lib = Library(eci) + self.add_comments = 1 + def make_context(self): self.ctxt = Context.acquire(self.lib)#self.lib.gcc_jit_context_acquire() if 0: @@ -190,11 +192,11 @@ self.ctxt.set_int_option( self.lib.GCC_JIT_INT_OPTION_OPTIMIZATION_LEVEL, r_int(2)) - if 1: + if 0: self.ctxt.set_bool_option( self.lib.GCC_JIT_BOOL_OPTION_KEEP_INTERMEDIATES, r_int(1)) - if 1: + if 0: self.ctxt.set_bool_option( self.lib.GCC_JIT_BOOL_OPTION_DUMP_EVERYTHING, r_int(1)) @@ -269,7 +271,8 @@ self.datablockwrapper.done() # finish using cpu.asmmemmgr self.datablockwrapper = None - self.ctxt.dump_to_file("/tmp/%s.c" % loopname, r_int(1)) + if 1: + self.ctxt.dump_to_file("/tmp/%s.c" % loopname, r_int(1)) #raise foo @@ -306,7 +309,8 @@ self.make_function(name, inputargs, operations) - self.ctxt.dump_to_file("/tmp/%s.c" % name, r_int(1)) + if 1: + self.ctxt.dump_to_file("/tmp/%s.c" % name, r_int(1)) jit_result = self.ctxt.compile() self.ctxt.release() @@ -413,11 +417,13 @@ #print(dir(op)) #print(repr(op.getopname())) text += '\t%s\n' % op - self.b_current.add_comment(str(text)) + if self.add_comments: + self.b_current.add_comment(str(text)) # Get initial values from input args: for idx, arg in enumerate(inputargs): - self.b_current.add_comment("inputargs[%i]: %s" % (idx, arg)) + if self.add_comments: + self.b_current.add_comment("inputargs[%i]: %s" % (idx, arg)) # (gdb) p *(double*)&jitframe->arg0 # $7 = 10.5 # (gdb) p *(double*)&jitframe->arg1 @@ -442,7 +448,8 @@ #print(dir(op)) #print(repr(op.getopname())) # Add a comment describing this ResOperation - self.b_current.add_comment(str(op)) + if self.add_comments: + self.b_current.add_comment(str(op)) # Compile the operation itself... methname = 'emit_%s' % op.getopname() From noreply at buildbot.pypy.org Mon Jan 5 20:51:35 2015 From: noreply at buildbot.pypy.org (dmalcolm) Date: Mon, 5 Jan 2015 20:51:35 +0100 (CET) Subject: [pypy-commit] pypy libgccjit-backend: Get test_increment_debug_counter to pass Message-ID: <20150105195135.D1E041C03FB@cobra.cs.uni-duesseldorf.de> Author: David Malcolm Branch: libgccjit-backend Changeset: r75247:72cda48c154f Date: 2015-01-05 14:59 -0500 http://bitbucket.org/pypy/pypy/changeset/72cda48c154f/ Log: Get test_increment_debug_counter to pass diff --git a/rpython/jit/backend/libgccjit/assembler.py b/rpython/jit/backend/libgccjit/assembler.py --- a/rpython/jit/backend/libgccjit/assembler.py +++ b/rpython/jit/backend/libgccjit/assembler.py @@ -192,11 +192,11 @@ self.ctxt.set_int_option( self.lib.GCC_JIT_INT_OPTION_OPTIMIZATION_LEVEL, r_int(2)) - if 0: + if 1: self.ctxt.set_bool_option( self.lib.GCC_JIT_BOOL_OPTION_KEEP_INTERMEDIATES, r_int(1)) - if 0: + if 1: self.ctxt.set_bool_option( self.lib.GCC_JIT_BOOL_OPTION_DUMP_EVERYTHING, r_int(1)) @@ -207,6 +207,7 @@ self.t_Signed = self.ctxt.get_int_type(r_int(self.sizeof_signed), r_int(1)) + self.t_signed_ptr = self.t_Signed.get_pointer() self.t_UINT = self.ctxt.get_int_type(r_int(self.sizeof_signed), r_int(0)) self.t_float = self.ctxt.get_type(self.lib.GCC_JIT_TYPE_DOUBLE) # FIXME @@ -218,11 +219,13 @@ self.u_signed = self.ctxt.new_field(self.t_Signed, "u_signed") self.u_float = self.ctxt.new_field(self.t_float, "u_float") - self.u_ptr = self.ctxt.new_field(self.t_void_ptr, "u_ptr") + self.u_void_ptr = self.ctxt.new_field(self.t_void_ptr, "u_void_ptr") + self.u_signed_ptr = self.ctxt.new_field(self.t_signed_ptr, "u_signed_ptr") self.t_any = self.ctxt.new_union_type ("any", [self.u_signed, self.u_float, - self.u_ptr]) + self.u_void_ptr, + self.u_signed_ptr]) def setup(self, looptoken): allblocks = self.get_asmmemmgr_blocks(looptoken) @@ -273,6 +276,8 @@ if 1: self.ctxt.dump_to_file("/tmp/%s.c" % loopname, r_int(1)) + if 0: + self.ctxt.dump_reproducer_to_file("/tmp/reproduce-%s.c" % loopname) #raise foo @@ -534,7 +539,7 @@ elif isinstance(expr, (BoxFloat, ConstFloat)): return self.u_float; elif isinstance(expr, (BoxPtr, ConstPtr)): - return self.u_ptr; + return self.u_void_ptr; else: raise NotImplementedError('unhandled expr: %s %s' % (expr, type(expr))) @@ -980,7 +985,7 @@ lvalue_tmp = self.fn.new_local(self.t_any, "tmp") lvalue_result = self.expr_to_lvalue(resop.result) self.b_current.add_assignment( - lvalue_tmp.access_field(self.u_ptr), + lvalue_tmp.access_field(self.u_void_ptr), rvalue_in) self.b_current.add_assignment( lvalue_result, @@ -994,7 +999,7 @@ rvalue_in) self.b_current.add_assignment( lvalue_result, - lvalue_tmp.as_rvalue().access_field(self.u_ptr)) + lvalue_tmp.as_rvalue().access_field(self.u_void_ptr)) # @@ -1021,6 +1026,9 @@ # + # + # '_ALWAYS_PURE_LAST', # ----- end of always_pure operations ----- + def impl_get_lvalue_at_offset_from_ptr(self, ptr_expr, ll_offset, t_field): ptr = self.expr_to_rvalue(ptr_expr) @@ -1081,6 +1089,25 @@ self.ctxt.new_cast(field_lvalue.as_rvalue(), self.get_type_for_expr(resop.result))) + # '_NOSIDEEFFECT_LAST', # ----- end of no_side_effect operations ----- + + def cast_signed_to_ptr_to_signed(self, rvalue): + tmp = self.fn.new_local(self.t_any, 'tmp') + self.b_current.add_assignment(tmp.access_field(self.u_signed), + rvalue) + return tmp.access_field(self.u_signed_ptr).as_rvalue() + + def emit_increment_debug_counter(self, resop): + # Equivalent of: + # signed *counter; + # (*counter)++; + ptr = self.expr_to_rvalue(resop._arg0) + ptr = self.cast_signed_to_ptr_to_signed(ptr) + self.b_current.add_assignment_op( + ptr.dereference(), + self.lib.GCC_JIT_BINARY_OP_PLUS, + self.ctxt.one(self.t_Signed)) + def emit_setfield_gc(self, resop): #print(repr(resop)) assert isinstance(resop._arg0, (BoxPtr, ConstPtr)) diff --git a/rpython/jit/backend/libgccjit/rffi_bindings.py b/rpython/jit/backend/libgccjit/rffi_bindings.py --- a/rpython/jit/backend/libgccjit/rffi_bindings.py +++ b/rpython/jit/backend/libgccjit/rffi_bindings.py @@ -124,6 +124,10 @@ CCHARP, INT]), + (lltype.Void, + 'gcc_jit_context_dump_reproducer_to_file', [self.GCC_JIT_CONTEXT_P, + CCHARP]), + (CCHARP, 'gcc_jit_context_get_last_error', [self.GCC_JIT_CONTEXT_P]), @@ -383,6 +387,14 @@ self.GCC_JIT_LVALUE_P, self.GCC_JIT_RVALUE_P]), (lltype.Void, + 'gcc_jit_block_add_assignment_op', [self.GCC_JIT_BLOCK_P, + self.GCC_JIT_LOCATION_P, + self.GCC_JIT_LVALUE_P, + # FIXME: + # enum gcc_jit_binary_op: + INT, + self.GCC_JIT_RVALUE_P]), + (lltype.Void, 'gcc_jit_block_add_comment', [self.GCC_JIT_BLOCK_P, self.GCC_JIT_LOCATION_P, CCHARP]), @@ -543,6 +555,12 @@ update_locations) free_charp(path_charp) + def dump_reproducer_to_file(self, path): + path_charp = str2charp(path) + self.lib.gcc_jit_context_dump_reproducer_to_file(self.inner_ctxt, + path_charp) + free_charp(path_charp) + def get_type(self, r_enum): return Type(self.lib, self, @@ -649,6 +667,13 @@ self.inner_ctxt, numeric_type.inner_type)) + def one(self, numeric_type): + return RValue(self.lib, + self, + self.lib.gcc_jit_context_one( + self.inner_ctxt, + numeric_type.inner_type)) + def new_rvalue_from_double(self, type_, llvalue): return RValue(self.lib, self, @@ -810,6 +835,7 @@ raise LibgccjitError(ctxt) Wrapper.__init__(self, lib) self.inner_obj = inner_obj + self.ctxt = ctxt class Type(Object): def __init__(self, lib, ctxt, inner_type): @@ -819,7 +845,7 @@ def get_pointer(self): return Type(self.lib, - self, + self.ctxt, self.lib.gcc_jit_type_get_pointer(self.inner_type)) class Field(Object): @@ -837,7 +863,7 @@ def as_type(self): return Type(self.lib, - self, + self.ctxt, self.lib.gcc_jit_struct_as_type(self.inner_struct)) @@ -861,12 +887,12 @@ def get_type(self): return Type(self.lib, - self, + self.ctxt, self.lib.gcc_jit_rvalue_get_type(self.inner_rvalue)) def access_field(self, field): return RValue(self.lib, - self, + self.ctxt, self.lib.gcc_jit_rvalue_access_field( self.inner_rvalue, self.lib.null_location_ptr, @@ -874,7 +900,7 @@ def dereference_field(self, field): return LValue(self.lib, - self, + self.ctxt, self.lib.gcc_jit_rvalue_dereference_field( self.inner_rvalue, self.lib.null_location_ptr, @@ -882,7 +908,7 @@ def dereference(self): return LValue(self.lib, - self, + self.ctxt, self.lib.gcc_jit_rvalue_dereference( self.inner_rvalue, self.lib.null_location_ptr)) @@ -895,12 +921,12 @@ def as_rvalue(self): return RValue(self.lib, - self, + self.ctxt, self.lib.gcc_jit_lvalue_as_rvalue(self.inner_lvalue)) def access_field(self, field): return LValue(self.lib, - self, + self.ctxt, self.lib.gcc_jit_lvalue_access_field ( self.inner_lvalue, self.lib.null_location_ptr, @@ -908,7 +934,7 @@ def get_address(self): return RValue(self.lib, - self, + self.ctxt, self.lib.gcc_jit_lvalue_get_address( self.inner_lvalue, self.lib.null_location_ptr)) @@ -921,7 +947,7 @@ def as_rvalue(self): return RValue(self.lib, - self, + self.ctxt, self.lib.gcc_jit_param_as_rvalue(self.inner_param)) class Function(Object): @@ -938,7 +964,7 @@ type_.inner_type, name_charp) free_charp(name_charp) - return LValue(self.lib, self, local) + return LValue(self.lib, self.ctxt, local) def new_block(self, name=None): if name is not None: @@ -949,7 +975,7 @@ name_charp) if name_charp: free_charp(name_charp) - return Block(self.lib, self, block) + return Block(self.lib, self.ctxt, block) class Block(Object): def __init__(self, lib, ctxt, inner_block): @@ -964,6 +990,13 @@ lvalue.inner_lvalue, rvalue.inner_rvalue) + def add_assignment_op(self, lvalue, op, rvalue): + self.lib.gcc_jit_block_add_assignment_op(self.inner_block, + self.lib.null_location_ptr, + lvalue.inner_lvalue, + op, + rvalue.inner_rvalue) + def add_comment(self, text): text_charp = str2charp(text) self.lib.gcc_jit_block_add_comment(self.inner_block, From noreply at buildbot.pypy.org Mon Jan 5 22:58:07 2015 From: noreply at buildbot.pypy.org (amauryfa) Date: Mon, 5 Jan 2015 22:58:07 +0100 (CET) Subject: [pypy-commit] pypy py3.3: Port CPython issue18109, which has no test :-/ Message-ID: <20150105215807.3A09B1D266F@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3.3 Changeset: r75248:bbfa38c0f158 Date: 2015-01-05 22:34 +0100 http://bitbucket.org/pypy/pypy/changeset/bbfa38c0f158/ Log: Port CPython issue18109, which has no test :-/ diff --git a/pypy/module/_socket/interp_func.py b/pypy/module/_socket/interp_func.py --- a/pypy/module/_socket/interp_func.py +++ b/pypy/module/_socket/interp_func.py @@ -19,7 +19,7 @@ res = rsocket.gethostname() except SocketError, e: raise converted_error(space, e) - return space.wrap(res) + return space.fsdecode(space.wrapbytes(res)) @unwrap_spec(host=str) def gethostbyname(space, host): From noreply at buildbot.pypy.org Mon Jan 5 22:58:08 2015 From: noreply at buildbot.pypy.org (amauryfa) Date: Mon, 5 Jan 2015 22:58:08 +0100 (CET) Subject: [pypy-commit] pypy py3.3: Function.__repr__ now uses the __qualname__ attribute. Message-ID: <20150105215808.9389F1D266F@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3.3 Changeset: r75249:c27d75f38838 Date: 2015-01-05 22:35 +0100 http://bitbucket.org/pypy/pypy/changeset/c27d75f38838/ Log: Function.__repr__ now uses the __qualname__ attribute. diff --git a/pypy/interpreter/astcompiler/codegen.py b/pypy/interpreter/astcompiler/codegen.py --- a/pypy/interpreter/astcompiler/codegen.py +++ b/pypy/interpreter/astcompiler/codegen.py @@ -307,7 +307,7 @@ def _make_function(self, code, num_defaults=0, qualname=None): """Emit the opcodes to turn a code object into a function.""" - w_qualname = self.space.wrap(qualname or code.co_name) + w_qualname = self.space.wrap((qualname or code.co_name).decode('utf-8')) if code.co_freevars: # Load cell and free vars to pass on. for free in code.co_freevars: diff --git a/pypy/interpreter/function.py b/pypy/interpreter/function.py --- a/pypy/interpreter/function.py +++ b/pypy/interpreter/function.py @@ -247,8 +247,7 @@ return self.call_args(__args__) def descr_function_repr(self): - return self.getrepr(self.space, u'function %s' % - (self.name.decode('utf-8'),)) + return self.getrepr(self.space, u'function %s' % self.qualname) # delicate _all = {'': None} diff --git a/pypy/interpreter/test/test_function.py b/pypy/interpreter/test/test_function.py --- a/pypy/interpreter/test/test_function.py +++ b/pypy/interpreter/test/test_function.py @@ -153,7 +153,8 @@ """ def 日本(): pass - assert repr(日本).startswith('.日本 at ') assert 日本.__name__ == '日本' """ From noreply at buildbot.pypy.org Tue Jan 6 00:24:29 2015 From: noreply at buildbot.pypy.org (amauryfa) Date: Tue, 6 Jan 2015 00:24:29 +0100 (CET) Subject: [pypy-commit] pypy py3.3: CPython Issue #18025: Fixed a segfault in io.BufferedIOBase.readinto() when raw Message-ID: <20150105232429.91DF11D286C@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3.3 Changeset: r75250:4eebc41245ad Date: 2015-01-05 23:45 +0100 http://bitbucket.org/pypy/pypy/changeset/4eebc41245ad/ Log: CPython Issue #18025: Fixed a segfault in io.BufferedIOBase.readinto() when raw stream's read() returns more bytes than requested. diff --git a/pypy/module/_io/interp_bufferedio.py b/pypy/module/_io/interp_bufferedio.py --- a/pypy/module/_io/interp_bufferedio.py +++ b/pypy/module/_io/interp_bufferedio.py @@ -77,6 +77,11 @@ raise OperationError(space.w_TypeError, space.wrap( "read() should return bytes")) data = space.bytes_w(w_data) + if len(data) > length: + raise oefmt(space.w_ValueError, + "read() returned too much data: " + "%d bytes requested, %d returned", + length, len(data)) rwbuffer.setslice(0, data) return space.wrap(len(data)) diff --git a/pypy/module/_io/test/test_bufferedio.py b/pypy/module/_io/test/test_bufferedio.py --- a/pypy/module/_io/test/test_bufferedio.py +++ b/pypy/module/_io/test/test_bufferedio.py @@ -149,6 +149,15 @@ f.close() assert a == b'a\nb\ncxxxxx' + def test_readinto_buffer_overflow(self): + import _io + class BadReader(_io._BufferedIOBase): + def read(self, n=-1): + return b'x' * 10**6 + bufio = BadReader() + b = bytearray(2) + raises(ValueError, bufio.readinto, b) + def test_seek(self): import _io raw = _io.FileIO(self.tmpfile) From noreply at buildbot.pypy.org Tue Jan 6 00:31:21 2015 From: noreply at buildbot.pypy.org (amauryfa) Date: Tue, 6 Jan 2015 00:31:21 +0100 (CET) Subject: [pypy-commit] pypy py3.3: Unskip test about ast.Try, and fix it. Message-ID: <20150105233121.ED4701D286C@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3.3 Changeset: r75251:8c6a64ce91ed Date: 2015-01-06 00:30 +0100 http://bitbucket.org/pypy/pypy/changeset/8c6a64ce91ed/ Log: Unskip test about ast.Try, and fix it. diff --git a/pypy/interpreter/astcompiler/test/test_validate.py b/pypy/interpreter/astcompiler/test/test_validate.py --- a/pypy/interpreter/astcompiler/test/test_validate.py +++ b/pypy/interpreter/astcompiler/test/test_validate.py @@ -189,24 +189,23 @@ self.stmt(r, "must have Load context") def test_try(self): - skip("enable when parser uses the new Try construct") p = ast.Pass(0, 0) - t = ast.Try([], [], [], [p]) + t = ast.Try([], [], [], [p], 0, 0) self.stmt(t, "empty body on Try") - t = ast.Try([ast.Expr(ast.Name("x", ast.Store, 0, 0), 0, 0)], [], [], [p]) + t = ast.Try([ast.Expr(ast.Name("x", ast.Store, 0, 0), 0, 0)], [], [], [p], 0, 0) self.stmt(t, "must have Load context") - t = ast.Try([p], [], [], []) + t = ast.Try([p], [], [], [], 0, 0) self.stmt(t, "Try has neither except handlers nor finalbody") - t = ast.Try([p], [], [p], [p]) + t = ast.Try([p], [], [p], [p], 0, 0) self.stmt(t, "Try has orelse but no except handlers") - t = ast.Try([p], [ast.ExceptHandler(None, "x", [])], [], []) + t = ast.Try([p], [ast.ExceptHandler(None, "x", [], 0, 0)], [], [], 0, 0) self.stmt(t, "empty body on ExceptHandler") - e = [ast.ExceptHandler(ast.Name("x", ast.Store, 0, 0), "y", [p])] - self.stmt(ast.Try([p], e, [], []), "must have Load context") - e = [ast.ExceptHandler(None, "x", [p])] - t = ast.Try([p], e, [ast.Expr(ast.Name("x", ast.Store, 0, 0), 0, 0)], [p]) + e = [ast.ExceptHandler(ast.Name("x", ast.Store, 0, 0), "y", [p], 0, 0)] + self.stmt(ast.Try([p], e, [], [], 0, 0), "must have Load context") + e = [ast.ExceptHandler(None, "x", [p], 0, 0)] + t = ast.Try([p], e, [ast.Expr(ast.Name("x", ast.Store, 0, 0), 0, 0)], [p], 0, 0) self.stmt(t, "must have Load context") - t = ast.Try([p], e, [p], [ast.Expr(ast.Name("x", ast.Store, 0, 0), 0, 0)]) + t = ast.Try([p], e, [p], [ast.Expr(ast.Name("x", ast.Store, 0, 0), 0, 0)], 0, 0) self.stmt(t, "must have Load context") def test_assert(self): diff --git a/pypy/interpreter/astcompiler/validate.py b/pypy/interpreter/astcompiler/validate.py --- a/pypy/interpreter/astcompiler/validate.py +++ b/pypy/interpreter/astcompiler/validate.py @@ -247,7 +247,7 @@ "Try has neither except handlers nor finalbody") if not node.handlers and node.orelse: raise ValidationError( - "Try has orelse but not except handlers") + "Try has orelse but no except handlers") for handler in node.handlers: handler.walkabout(self) self._validate_stmts(node.orelse) From noreply at buildbot.pypy.org Tue Jan 6 15:37:18 2015 From: noreply at buildbot.pypy.org (dmalcolm) Date: Tue, 6 Jan 2015 15:37:18 +0100 (CET) Subject: [pypy-commit] pypy libgccjit-backend: Don't rely on dynamic linker name lookup when locating jump targets Message-ID: <20150106143718.288681C363D@cobra.cs.uni-duesseldorf.de> Author: David Malcolm Branch: libgccjit-backend Changeset: r75252:2f581f6a1f99 Date: 2015-01-05 16:02 -0500 http://bitbucket.org/pypy/pypy/changeset/2f581f6a1f99/ Log: Don't rely on dynamic linker name lookup when locating jump targets Instead, jump through a saved function pointer. Doing so avoids the need to use RTLD_GLOBAL in dlopen in libgccjit's jit-playback.c. diff --git a/rpython/jit/backend/libgccjit/assembler.py b/rpython/jit/backend/libgccjit/assembler.py --- a/rpython/jit/backend/libgccjit/assembler.py +++ b/rpython/jit/backend/libgccjit/assembler.py @@ -42,6 +42,7 @@ self.name = name self.rffi_fn = rffi_fn self.patchpoints = [] + self.fn_ptr = None def new_block(self, name): return self.rffi_fn.new_block(name) @@ -291,6 +292,7 @@ fn_ptr = jit_result.get_code(loopname) looptoken._ll_function_addr = fn_ptr + self.fn.fn_ptr = fn_ptr looptoken.compiled_loop_token._ll_initial_locs = initial_locs @@ -326,6 +328,7 @@ # Patch the patchpoint for "faildescr" to point to our new code fn_ptr = jit_result.get_code(name) self.patchpoint_for_descr[faildescr].set_handler(fn_ptr) + self.fn.fn_ptr = fn_ptr def make_JITFRAME_struct(self, inputargs, operations): #print(jitframe.JITFRAME) @@ -586,19 +589,21 @@ b_dest = self.block_for_label_descr[jumpop.getdescr()] self.b_current.end_with_jump(b_dest) else: - # Implement as a tail-call: - # Sadly, we need to "import" the fn by name, since it was - # created on a different gcc_jit_context. + # Implement as a tail-call + # The function was created on a different gcc_jit_context, + # but we know its address in memory, so we can implement + # it as a call through a function pointer. p = Params(self) - other_fn = self.ctxt.new_function( - self.lib.GCC_JIT_FUNCTION_IMPORTED, + fn_ptr_type = self.ctxt.new_function_ptr_type( self.t_jit_frame_ptr, - dest_fn.name, - p.paramlist, - r_int(0)) + p.paramtypes, + r_int(0)) # is_variadic + ptr_to_other_fn = self.ctxt.new_rvalue_from_ptr(fn_ptr_type, + dest_fn.fn_ptr) args = [param.as_rvalue() for param in self.loop_params.paramlist] - call = self.ctxt.new_call(other_fn, args) + call = self.ctxt.new_call_through_ptr(ptr_to_other_fn, + args) self.b_current.end_with_return(call) def emit_finish(self, resop): From noreply at buildbot.pypy.org Tue Jan 6 15:37:19 2015 From: noreply at buildbot.pypy.org (dmalcolm) Date: Tue, 6 Jan 2015 15:37:19 +0100 (CET) Subject: [pypy-commit] pypy libgccjit-backend: Introduce AssemblerLibgccjit.shared_ctxt to avoid rebuilding the basic types each time Message-ID: <20150106143719.43E321C363D@cobra.cs.uni-duesseldorf.de> Author: David Malcolm Branch: libgccjit-backend Changeset: r75253:619395643793 Date: 2015-01-05 16:28 -0500 http://bitbucket.org/pypy/pypy/changeset/619395643793/ Log: Introduce AssemblerLibgccjit.shared_ctxt to avoid rebuilding the basic types each time diff --git a/rpython/jit/backend/libgccjit/assembler.py b/rpython/jit/backend/libgccjit/assembler.py --- a/rpython/jit/backend/libgccjit/assembler.py +++ b/rpython/jit/backend/libgccjit/assembler.py @@ -175,59 +175,65 @@ self.add_comments = 1 - def make_context(self): - self.ctxt = Context.acquire(self.lib)#self.lib.gcc_jit_context_acquire() + # Create "shared_ctxt" to hold the common types + # needed by every loop/bridge + # Compiling a loop or bridge will involve creating a child + # context of the shared_ctxt. + self.shared_ctxt = Context.acquire(self.lib) if 0: - self.ctxt.set_bool_option( + self.shared_ctxt.set_bool_option( self.lib.GCC_JIT_BOOL_OPTION_DUMP_INITIAL_TREE, r_int(1)) if 0: - self.ctxt.set_bool_option( + self.shared_ctxt.set_bool_option( self.lib.GCC_JIT_BOOL_OPTION_DUMP_INITIAL_GIMPLE, r_int(1)) if 1: - self.ctxt.set_bool_option( + self.shared_ctxt.set_bool_option( self.lib.GCC_JIT_BOOL_OPTION_DEBUGINFO, r_int(1)) if 1: - self.ctxt.set_int_option( + self.shared_ctxt.set_int_option( self.lib.GCC_JIT_INT_OPTION_OPTIMIZATION_LEVEL, r_int(2)) if 1: - self.ctxt.set_bool_option( + self.shared_ctxt.set_bool_option( self.lib.GCC_JIT_BOOL_OPTION_KEEP_INTERMEDIATES, r_int(1)) if 1: - self.ctxt.set_bool_option( + self.shared_ctxt.set_bool_option( self.lib.GCC_JIT_BOOL_OPTION_DUMP_EVERYTHING, r_int(1)) if 0: - self.ctxt.set_bool_option( + self.shared_ctxt.set_bool_option( self.lib.GCC_JIT_BOOL_OPTION_DUMP_GENERATED_CODE, r_int(1)) - self.t_Signed = self.ctxt.get_int_type(r_int(self.sizeof_signed), + self.t_Signed = self.shared_ctxt.get_int_type(r_int(self.sizeof_signed), r_int(1)) self.t_signed_ptr = self.t_Signed.get_pointer() - self.t_UINT = self.ctxt.get_int_type(r_int(self.sizeof_signed), + self.t_UINT = self.shared_ctxt.get_int_type(r_int(self.sizeof_signed), r_int(0)) - self.t_float = self.ctxt.get_type(self.lib.GCC_JIT_TYPE_DOUBLE) # FIXME - self.t_bool = self.ctxt.get_type(self.lib.GCC_JIT_TYPE_BOOL) - self.t_void_ptr = self.ctxt.get_type(self.lib.GCC_JIT_TYPE_VOID_PTR) - self.t_void = self.ctxt.get_type(self.lib.GCC_JIT_TYPE_VOID) - self.t_char_ptr = self.ctxt.get_type( + self.t_float = self.shared_ctxt.get_type(self.lib.GCC_JIT_TYPE_DOUBLE) # FIXME + self.t_bool = self.shared_ctxt.get_type(self.lib.GCC_JIT_TYPE_BOOL) + self.t_void_ptr = self.shared_ctxt.get_type(self.lib.GCC_JIT_TYPE_VOID_PTR) + self.t_void = self.shared_ctxt.get_type(self.lib.GCC_JIT_TYPE_VOID) + self.t_char_ptr = self.shared_ctxt.get_type( self.lib.GCC_JIT_TYPE_CHAR).get_pointer() - self.u_signed = self.ctxt.new_field(self.t_Signed, "u_signed") - self.u_float = self.ctxt.new_field(self.t_float, "u_float") - self.u_void_ptr = self.ctxt.new_field(self.t_void_ptr, "u_void_ptr") - self.u_signed_ptr = self.ctxt.new_field(self.t_signed_ptr, "u_signed_ptr") - self.t_any = self.ctxt.new_union_type ("any", + self.u_signed = self.shared_ctxt.new_field(self.t_Signed, "u_signed") + self.u_float = self.shared_ctxt.new_field(self.t_float, "u_float") + self.u_void_ptr = self.shared_ctxt.new_field(self.t_void_ptr, "u_void_ptr") + self.u_signed_ptr = self.shared_ctxt.new_field(self.t_signed_ptr, "u_signed_ptr") + self.t_any = self.shared_ctxt.new_union_type ("any", [self.u_signed, self.u_float, self.u_void_ptr, self.u_signed_ptr]) + def make_context(self): + self.ctxt = self.shared_ctxt.new_child_context() + def setup(self, looptoken): allblocks = self.get_asmmemmgr_blocks(looptoken) self.datablockwrapper = MachineDataBlockWrapper(self.cpu.asmmemmgr, diff --git a/rpython/jit/backend/libgccjit/rffi_bindings.py b/rpython/jit/backend/libgccjit/rffi_bindings.py --- a/rpython/jit/backend/libgccjit/rffi_bindings.py +++ b/rpython/jit/backend/libgccjit/rffi_bindings.py @@ -99,6 +99,9 @@ (self.GCC_JIT_CONTEXT_P, 'gcc_jit_context_acquire', []), + (self.GCC_JIT_CONTEXT_P, + 'gcc_jit_context_new_child_context', [self.GCC_JIT_CONTEXT_P]), + (lltype.Void, 'gcc_jit_context_release', [self.GCC_JIT_CONTEXT_P]), @@ -530,6 +533,10 @@ def acquire(lib): return Context(lib, lib.gcc_jit_context_acquire()) + def new_child_context(self): + return Context(self.lib, + self.lib.gcc_jit_context_new_child_context(self.inner_ctxt)) + def release(self): self.lib.gcc_jit_context_release(self.inner_ctxt) From noreply at buildbot.pypy.org Tue Jan 6 22:05:07 2015 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 6 Jan 2015 22:05:07 +0100 (CET) Subject: [pypy-commit] pypy.org extradoc: update the values Message-ID: <20150106210507.610BA1D28A4@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r559:514a518dfb43 Date: 2015-01-06 22:05 +0100 http://bitbucket.org/pypy/pypy.org/changeset/514a518dfb43/ Log: update the values diff --git a/don4.html b/don4.html --- a/don4.html +++ b/don4.html @@ -17,7 +17,7 @@ 2nd call: - $21656 of $80000 (27.1%) + $21661 of $80000 (27.1%)
From noreply at buildbot.pypy.org Wed Jan 7 18:37:59 2015 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 7 Jan 2015 18:37:59 +0100 (CET) Subject: [pypy-commit] cffi default: issue #172 Message-ID: <20150107173759.CA4281D3570@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1620:b3b479ff6ebc Date: 2015-01-07 18:38 +0100 http://bitbucket.org/cffi/cffi/changeset/b3b479ff6ebc/ Log: issue #172 Fix an inconsistency: the define "UNICODE" must be present explicitly iff we're declaring TCHAR to be a wchar_t. diff --git a/cffi/api.py b/cffi/api.py --- a/cffi/api.py +++ b/cffi/api.py @@ -69,6 +69,7 @@ self._function_caches = [] self._libraries = [] self._cdefsources = [] + self._windows_unicode = None if hasattr(backend, 'set_ffi'): backend.set_ffi(self) for name in backend.__dict__: @@ -347,6 +348,8 @@ which requires binary compatibility in the signatures. """ from .verifier import Verifier + if self._windows_unicode: + self._apply_windows_unicode(kwargs) self.verifier = Verifier(self, source, tmpdir, **kwargs) lib = self.verifier.load_library() self._libraries.append(lib) @@ -408,6 +411,43 @@ def from_handle(self, x): return self._backend.from_handle(x) + def set_unicode(self, unicode_enabled): + """Windows: if 'unicode_enabled' is True, enable the UNICODE and + _UNICODE defines in C, and declare the types like TCHAR and LPTCSTR + to be (pointers to) wchar_t. If 'unicode_enabled' is False, + declare these types to be (pointers to) plain 8-bit characters. + This is mostly for backward compatibility; you usually want True. + """ + if self._windows_unicode is not None: + raise ValueError("set_unicode() can only be called once") + if unicode_enabled: + self.cdef("typedef wchar_t TBYTE;" + "typedef wchar_t TCHAR;" + "typedef const wchar_t *LPCTSTR;" + "typedef const wchar_t *PCTSTR;" + "typedef wchar_t *LPTSTR;" + "typedef wchar_t *PTSTR;" + "typedef TBYTE *PTBYTE;" + "typedef TCHAR *PTCHAR;") + else: + self.cdef("typedef char TBYTE;" + "typedef char TCHAR;" + "typedef const char *LPCTSTR;" + "typedef const char *PCTSTR;" + "typedef char *LPTSTR;" + "typedef char *PTSTR;" + "typedef TBYTE *PTBYTE;" + "typedef TCHAR *PTCHAR;") + self._windows_unicode = unicode_enabled + + def _apply_windows_unicode(self, kwds): + defmacros = kwds.get('define_macros', ()) + if not isinstance(defmacros, (list, tuple)): + raise TypeError("'define_macros' must be a list or tuple") + defmacros = list(defmacros) + [('UNICODE', '1'), + ('_UNICODE', '1')] + kwds['define_macros'] = defmacros + def _load_backend_lib(backend, name, flags): if name is None: diff --git a/cffi/commontypes.py b/cffi/commontypes.py --- a/cffi/commontypes.py +++ b/cffi/commontypes.py @@ -86,8 +86,6 @@ "ULONGLONG": "unsigned long long", "WCHAR": "wchar_t", "SHORT": "short", - "TBYTE": "WCHAR", - "TCHAR": "WCHAR", "UCHAR": "unsigned char", "UINT": "unsigned int", "UINT8": "unsigned char", @@ -157,14 +155,12 @@ "LPCVOID": model.const_voidp_type, "LPCWSTR": "const WCHAR *", - "LPCTSTR": "LPCWSTR", "LPDWORD": "DWORD *", "LPHANDLE": "HANDLE *", "LPINT": "int *", "LPLONG": "long *", "LPSTR": "CHAR *", "LPWSTR": "WCHAR *", - "LPTSTR": "LPWSTR", "LPVOID": model.voidp_type, "LPWORD": "WORD *", "LRESULT": "LONG_PTR", @@ -173,7 +169,6 @@ "PBYTE": "BYTE *", "PCHAR": "CHAR *", "PCSTR": "const CHAR *", - "PCTSTR": "LPCWSTR", "PCWSTR": "const WCHAR *", "PDWORD": "DWORD *", "PDWORDLONG": "DWORDLONG *", @@ -200,9 +195,6 @@ "PSIZE_T": "SIZE_T *", "PSSIZE_T": "SSIZE_T *", "PSTR": "CHAR *", - "PTBYTE": "TBYTE *", - "PTCHAR": "TCHAR *", - "PTSTR": "LPWSTR", "PUCHAR": "UCHAR *", "PUHALF_PTR": "UHALF_PTR *", "PUINT": "UINT *", diff --git a/testing/test_verify.py b/testing/test_verify.py --- a/testing/test_verify.py +++ b/testing/test_verify.py @@ -2086,3 +2086,31 @@ assert str(e.value) == ( "ctype 'MyStr' not supported as argument or return value " "(it is a struct with bit fields)") + +def test_verify_extra_arguments(): + ffi = FFI() + ffi.cdef("#define ABA ...") + lib = ffi.verify("", define_macros=[('ABA', '42')]) + assert lib.ABA == 42 + +def test_implicit_unicode_on_windows(): + if sys.platform != 'win32': + py.test.skip("win32-only test") + for with_unicode in [True, False]: + ffi = FFI() + ffi.set_unicode(with_unicode) + ffi.cdef(""" + DWORD GetModuleFileName(HMODULE hModule, LPTSTR lpFilename, + DWORD nSize); + """) + lib = ffi.verify(""" + #include + """, libraries=['Kernel32']) + outbuf = ffi.new("TCHAR[]", 200) + n = lib.GetModuleFileName(ffi.NULL, outbuf, 500) + assert 0 < n < 500 + for i in range(n): + print repr(outbuf[i]) + assert ord(outbuf[i]) != 0 + assert ord(outbuf[n]) == 0 + assert ord(outbuf[0]) < 128 # should be a letter, or '\' From noreply at buildbot.pypy.org Wed Jan 7 18:50:41 2015 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 7 Jan 2015 18:50:41 +0100 (CET) Subject: [pypy-commit] cffi default: Document ffi.set_unicode() Message-ID: <20150107175041.261531D3570@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1621:e5f44c1fd9f7 Date: 2015-01-07 18:51 +0100 http://bitbucket.org/cffi/cffi/changeset/e5f44c1fd9f7/ Log: Document ffi.set_unicode() diff --git a/cffi/api.py b/cffi/api.py --- a/cffi/api.py +++ b/cffi/api.py @@ -411,16 +411,16 @@ def from_handle(self, x): return self._backend.from_handle(x) - def set_unicode(self, unicode_enabled): - """Windows: if 'unicode_enabled' is True, enable the UNICODE and + def set_unicode(self, enabled_flag): + """Windows: if 'enabled_flag' is True, enable the UNICODE and _UNICODE defines in C, and declare the types like TCHAR and LPTCSTR - to be (pointers to) wchar_t. If 'unicode_enabled' is False, + to be (pointers to) wchar_t. If 'enabled_flag' is False, declare these types to be (pointers to) plain 8-bit characters. This is mostly for backward compatibility; you usually want True. """ if self._windows_unicode is not None: raise ValueError("set_unicode() can only be called once") - if unicode_enabled: + if enabled_flag: self.cdef("typedef wchar_t TBYTE;" "typedef wchar_t TCHAR;" "typedef const wchar_t *LPCTSTR;" @@ -438,7 +438,7 @@ "typedef char *PTSTR;" "typedef TBYTE *PTBYTE;" "typedef TCHAR *PTCHAR;") - self._windows_unicode = unicode_enabled + self._windows_unicode = enabled_flag def _apply_windows_unicode(self, kwds): defmacros = kwds.get('define_macros', ()) diff --git a/doc/source/index.rst b/doc/source/index.rst --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -1374,6 +1374,27 @@ .. "versionadded:: 0.4" --- inlined in the previous paragraph +**ffi.set_unicode(enabled_flag)**: Windows: if ``enabled_flag`` is +True, enable the ``UNICODE`` and ``_UNICODE`` defines in C, and +declare the types like ``TCHAR`` and ``LPTCSTR`` to be (pointers to) +``wchar_t``. If ``enabled_flag`` is False, declare these types to be +(pointers to) plain 8-bit characters. *New in version 0.9.* + +The reason behind this method is that a lot of standard functions have +two versions, like ``MessageBoxA()`` and ``MessageBoxW()``. The +official interface is ``MessageBox()`` with arguments like +``LPTCSTR``. Depending on whether ``UNICODE`` is defined or not, the +standard header renames the generic function name to one of the two +specialized versions, and declares the correct (unicode or not) types. + +Usually, the right thing to do is to call this method with True. Be +aware (particularly on Python 2) that you then need to pass unicode +strings as arguments, not byte strings. (Before cffi version 0.9, +``TCHAR`` and friends where hard-coded as unicode, but ``UNICODE`` was, +inconsistently, not defined by default.) + +.. "versionadded:: 0.9" --- inlined in the previous paragraph + Unimplemented features ---------------------- From noreply at buildbot.pypy.org Wed Jan 7 19:24:41 2015 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 7 Jan 2015 19:24:41 +0100 (CET) Subject: [pypy-commit] cffi default: improve doc for ffi.set_unicode() Message-ID: <20150107182441.C16821D2380@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1622:f5ff0b68e418 Date: 2015-01-07 19:25 +0100 http://bitbucket.org/cffi/cffi/changeset/f5ff0b68e418/ Log: improve doc for ffi.set_unicode() diff --git a/doc/source/index.rst b/doc/source/index.rst --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -433,7 +433,9 @@ ``c_f``. * *New in version 0.6:* all `common Windows types`_ are defined if you run - on Windows (``DWORD``, ``LPARAM``, etc.). + on Windows (``DWORD``, ``LPARAM``, etc.). *Changed in version 0.9:* the + types ``TBYTE TCHAR LPCTSTR PCTSTR LPTSTR PTSTR PTBYTE PTCHAR`` are no + longer automatically defined; see ``ffi.set_unicode()`` below. * *New in version 0.9:* the other standard integer types from stdint.h, as long as they map to integers of 1, 2, 4 or 8 bytes. Larger integers @@ -1376,9 +1378,10 @@ **ffi.set_unicode(enabled_flag)**: Windows: if ``enabled_flag`` is True, enable the ``UNICODE`` and ``_UNICODE`` defines in C, and -declare the types like ``TCHAR`` and ``LPTCSTR`` to be (pointers to) -``wchar_t``. If ``enabled_flag`` is False, declare these types to be -(pointers to) plain 8-bit characters. *New in version 0.9.* +declare the types ``TBYTE TCHAR LPCTSTR PCTSTR LPTSTR PTSTR PTBYTE +PTCHAR`` to be (pointers to) ``wchar_t``. If ``enabled_flag`` is +False, declare these types to be (pointers to) plain 8-bit characters. +*New in version 0.9.* The reason behind this method is that a lot of standard functions have two versions, like ``MessageBoxA()`` and ``MessageBoxW()``. The From noreply at buildbot.pypy.org Wed Jan 7 21:23:15 2015 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 7 Jan 2015 21:23:15 +0100 (CET) Subject: [pypy-commit] pypy default: Update for cffi/275285b314a7 Message-ID: <20150107202315.7EC731C320E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r75254:9d3eb04e5545 Date: 2015-01-07 21:23 +0100 http://bitbucket.org/pypy/pypy/changeset/9d3eb04e5545/ Log: Update for cffi/275285b314a7 diff --git a/pypy/module/_cffi_backend/ccallback.py b/pypy/module/_cffi_backend/ccallback.py --- a/pypy/module/_cffi_backend/ccallback.py +++ b/pypy/module/_cffi_backend/ccallback.py @@ -45,8 +45,9 @@ # cif_descr = self.getfunctype().cif_descr if not cif_descr: - raise OperationError(space.w_NotImplementedError, - space.wrap("callbacks with '...'")) + raise oefmt(space.w_NotImplementedError, + "%s: callback with unsupported argument or " + "return type or with '...'", self.getfunctype().name) res = clibffi.c_ffi_prep_closure(self.get_closure(), cif_descr.cif, invoke_callback, rffi.cast(rffi.VOIDP, self.unique_id)) diff --git a/pypy/module/_cffi_backend/ctypefunc.py b/pypy/module/_cffi_backend/ctypefunc.py --- a/pypy/module/_cffi_backend/ctypefunc.py +++ b/pypy/module/_cffi_backend/ctypefunc.py @@ -34,6 +34,7 @@ could_cast_anything=False) self.fargs = fargs self.ellipsis = bool(ellipsis) + self.cif_descr = lltype.nullptr(CIF_DESCRIPTION) # fresult is stored in self.ctitem if not ellipsis: @@ -41,7 +42,14 @@ # at all. The cif is computed on every call from the actual # types passed in. For all other functions, the cif_descr # is computed here. - CifDescrBuilder(fargs, fresult).rawallocate(self) + builder = CifDescrBuilder(fargs, fresult) + try: + builder.rawallocate(self) + except OperationError, e: + if not e.match(space, space.w_NotImplementedError): + raise + # else, eat the NotImplementedError. We will get the + # exception if we see an actual call def new_ctypefunc_completing_argtypes(self, args_w): space = self.space @@ -178,8 +186,6 @@ # ____________________________________________________________ -W_CTypeFunc.cif_descr = lltype.nullptr(CIF_DESCRIPTION) # default value - BIG_ENDIAN = sys.byteorder == 'big' USE_C_LIBFFI_MSVC = getattr(clibffi, 'USE_C_LIBFFI_MSVC', False) @@ -295,18 +301,18 @@ nflat = 0 for i, cf in enumerate(ctype.fields_list): if cf.is_bitfield(): - raise OperationError(space.w_NotImplementedError, - space.wrap("cannot pass as argument or return value " - "a struct with bit fields")) + raise oefmt(space.w_NotImplementedError, + "ctype '%s' not supported as argument or return value" + " (it is a struct with bit fields)", ctype.name) flat = 1 ct = cf.ctype while isinstance(ct, ctypearray.W_CTypeArray): flat *= ct.length ct = ct.ctitem if flat <= 0: - raise OperationError(space.w_NotImplementedError, - space.wrap("cannot pass as argument or return value " - "a struct with a zero-length array")) + raise oefmt(space.w_NotImplementedError, + "ctype '%s' not supported as argument or return value" + " (it is a struct with a zero-length array)", ctype.name) nflat += flat if USE_C_LIBFFI_MSVC and is_result_type: diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py --- a/pypy/module/_cffi_backend/test/_backend_test_c.py +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py @@ -1030,11 +1030,12 @@ BInt = new_primitive_type("int") BArray0 = new_array_type(new_pointer_type(BInt), 0) BStruct = new_struct_type("struct foo") + BStructP = new_pointer_type(BStruct) complete_struct_or_union(BStruct, [('a', BArray0)]) - py.test.raises(NotImplementedError, new_function_type, - (BStruct,), BInt, False) - py.test.raises(NotImplementedError, new_function_type, - (BInt,), BStruct, False) + BFunc = new_function_type((BStruct,), BInt, False) + py.test.raises(NotImplementedError, cast(BFunc, 123), cast(BStructP, 123)) + BFunc2 = new_function_type((BInt,), BStruct, False) + py.test.raises(NotImplementedError, cast(BFunc2, 123), 123) def test_call_function_9(): BInt = new_primitive_type("int") @@ -1805,7 +1806,8 @@ new_function_type((), new_pointer_type(BFunc)) BUnion = new_union_type("union foo_u") complete_struct_or_union(BUnion, []) - py.test.raises(NotImplementedError, new_function_type, (), BUnion) + BFunc = new_function_type((), BUnion) + py.test.raises(NotImplementedError, cast(BFunc, 123)) py.test.raises(TypeError, new_function_type, (), BArray) def test_struct_return_in_func(): From noreply at buildbot.pypy.org Wed Jan 7 22:14:44 2015 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 7 Jan 2015 22:14:44 +0100 (CET) Subject: [pypy-commit] cffi default: Clarify the error we get if we try to use TCHAR & friends before calling Message-ID: <20150107211444.989ED1C07D2@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1623:138fea1cf4bd Date: 2015-01-07 22:15 +0100 http://bitbucket.org/cffi/cffi/changeset/138fea1cf4bd/ Log: Clarify the error we get if we try to use TCHAR & friends before calling ffi.set_unicode(). diff --git a/cffi/api.py b/cffi/api.py --- a/cffi/api.py +++ b/cffi/api.py @@ -420,6 +420,7 @@ """ if self._windows_unicode is not None: raise ValueError("set_unicode() can only be called once") + enabled_flag = bool(enabled_flag) if enabled_flag: self.cdef("typedef wchar_t TBYTE;" "typedef wchar_t TCHAR;" diff --git a/cffi/commontypes.py b/cffi/commontypes.py --- a/cffi/commontypes.py +++ b/cffi/commontypes.py @@ -29,6 +29,9 @@ result = model.PointerType(resolve_common_type(result[:-2])) elif result in model.PrimitiveType.ALL_PRIMITIVE_TYPES: result = model.PrimitiveType(result) + elif result == 'set-unicode-needed': + raise api.FFIError("The Windows type %r is only available after " + "you call ffi.set_unicode()" % (commontype,)) else: if commontype == result: raise api.FFIError("Unsupported type: %r. Please file a bug " @@ -232,6 +235,15 @@ "USN": "LONGLONG", "VOID": model.void_type, "WPARAM": "UINT_PTR", + + "TBYTE": "set-unicode-needed", + "TCHAR": "set-unicode-needed", + "LPCTSTR": "set-unicode-needed", + "PCTSTR": "set-unicode-needed", + "LPTSTR": "set-unicode-needed", + "PTSTR": "set-unicode-needed", + "PTBYTE": "set-unicode-needed", + "PTCHAR;": "set-unicode-needed", }) return result diff --git a/doc/source/index.rst b/doc/source/index.rst --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -1381,7 +1381,8 @@ declare the types ``TBYTE TCHAR LPCTSTR PCTSTR LPTSTR PTSTR PTBYTE PTCHAR`` to be (pointers to) ``wchar_t``. If ``enabled_flag`` is False, declare these types to be (pointers to) plain 8-bit characters. -*New in version 0.9.* +(These types are not predeclared at all if you don't call +``set_unicode()``.) *New in version 0.9.* The reason behind this method is that a lot of standard functions have two versions, like ``MessageBoxA()`` and ``MessageBoxW()``. The diff --git a/testing/test_verify.py b/testing/test_verify.py --- a/testing/test_verify.py +++ b/testing/test_verify.py @@ -1,6 +1,6 @@ import py, re import sys, os, math, weakref -from cffi import FFI, VerificationError, VerificationMissing, model +from cffi import FFI, VerificationError, VerificationMissing, model, FFIError from testing.support import * @@ -2096,6 +2096,10 @@ def test_implicit_unicode_on_windows(): if sys.platform != 'win32': py.test.skip("win32-only test") + ffi = FFI() + e = py.test.raises(FFIError, ffi.cdef, "int foo(LPTSTR);") + assert str(e.value) == ("The Windows type 'LPTSTR' is only available after" + " you call ffi.set_unicode()") for with_unicode in [True, False]: ffi = FFI() ffi.set_unicode(with_unicode) From noreply at buildbot.pypy.org Thu Jan 8 01:38:33 2015 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 8 Jan 2015 01:38:33 +0100 (CET) Subject: [pypy-commit] pypy default: try to fix freebsd translation Message-ID: <20150108003833.6390A1C3465@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r75255:7ee4207de06d Date: 2015-01-07 19:38 -0500 http://bitbucket.org/pypy/pypy/changeset/7ee4207de06d/ Log: try to fix freebsd translation diff --git a/rpython/translator/platform/freebsd.py b/rpython/translator/platform/freebsd.py --- a/rpython/translator/platform/freebsd.py +++ b/rpython/translator/platform/freebsd.py @@ -6,9 +6,12 @@ class Freebsd(BSD): name = "freebsd" - link_flags = ['-pthread'] + os.environ.get('LDFLAGS', '').split() - cflags = ['-O3', '-pthread', '-fomit-frame-pointer' - ] + os.environ.get('CFLAGS', '').split() + link_flags = tuple( + ['-pthread'] + + os.environ.get('LDFLAGS', '').split()) + cflags = tuple( + ['-O3', '-pthread', '-fomit-frame-pointer'] + + os.environ.get('CFLAGS', '').split()) class Freebsd_64(Freebsd): shared_only = ('-fPIC',) From noreply at buildbot.pypy.org Thu Jan 8 10:12:46 2015 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 8 Jan 2015 10:12:46 +0100 (CET) Subject: [pypy-commit] cffi default: Issue #173 Message-ID: <20150108091246.AE0311C327D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1624:a5bf8dfd9667 Date: 2015-01-08 10:13 +0100 http://bitbucket.org/cffi/cffi/changeset/a5bf8dfd9667/ Log: Issue #173 Test and fix for the implicit __pycache__ directory location. diff --git a/cffi/api.py b/cffi/api.py --- a/cffi/api.py +++ b/cffi/api.py @@ -347,11 +347,24 @@ (including calling macros). This is unlike 'ffi.dlopen()', which requires binary compatibility in the signatures. """ - from .verifier import Verifier + from .verifier import Verifier, _caller_dir_pycache + # + # If set_unicode(True) was called, insert the UNICODE and + # _UNICODE macro declarations if self._windows_unicode: self._apply_windows_unicode(kwargs) + # + # Set the tmpdir here, and not in Verifier.__init__: it picks + # up the caller's directory, which we want to be the caller of + # ffi.verify(), as opposed to the caller of Veritier(). + tmpdir = tmpdir or _caller_dir_pycache() + # + # Make a Verifier() and use it to load the library. self.verifier = Verifier(self, source, tmpdir, **kwargs) lib = self.verifier.load_library() + # + # Save the loaded library for keep-alive purposes, even + # if the caller doesn't keep it alive itself (it should). self._libraries.append(lib) return lib diff --git a/cffi/verifier.py b/cffi/verifier.py --- a/cffi/verifier.py +++ b/cffi/verifier.py @@ -44,7 +44,7 @@ modulename = '_cffi_%s_%s%s%s' % (tag, self._vengine._class_key, k1, k2) suffix = _get_so_suffixes()[0] - self.tmpdir = tmpdir or os.environ.get('CFFI_TMPDIR') or _caller_dir_pycache() + self.tmpdir = tmpdir or _caller_dir_pycache() self.sourcefilename = os.path.join(self.tmpdir, modulename + source_extension) self.modulefilename = os.path.join(self.tmpdir, modulename + suffix) self.ext_package = ext_package @@ -210,6 +210,9 @@ def _caller_dir_pycache(): if _TMPDIR: return _TMPDIR + result = os.environ.get('CFFI_TMPDIR') + if result: + return result filename = sys._getframe(2).f_code.co_filename return os.path.abspath(os.path.join(os.path.dirname(filename), '__pycache__')) diff --git a/testing/test_verify.py b/testing/test_verify.py --- a/testing/test_verify.py +++ b/testing/test_verify.py @@ -2118,3 +2118,10 @@ assert ord(outbuf[i]) != 0 assert ord(outbuf[n]) == 0 assert ord(outbuf[0]) < 128 # should be a letter, or '\' + +def test_use_local_dir(): + ffi = FFI() + lib = ffi.verify("", modulename="test_use_local_dir") + this_dir = os.path.dirname(__file__) + pycache_files = os.listdir(os.path.join(this_dir, '__pycache__')) + assert any('test_use_local_dir' in s for s in pycache_files) From noreply at buildbot.pypy.org Thu Jan 8 18:00:40 2015 From: noreply at buildbot.pypy.org (jcowgill) Date: Thu, 8 Jan 2015 18:00:40 +0100 (CET) Subject: [pypy-commit] cffi default: Skip test_opaque_integer_as_function_result on mips64el, causes SEGV Message-ID: <20150108170040.30C6A1C01E8@cobra.cs.uni-duesseldorf.de> Author: James Cowgill Branch: Changeset: r1625:67435ba27473 Date: 2015-01-08 09:00 -0800 http://bitbucket.org/cffi/cffi/changeset/67435ba27473/ Log: Skip test_opaque_integer_as_function_result on mips64el, causes SEGV It looked like CFFI told libffi that the return type of the function was a structure of size 2 but with no elements, which seems illegal. From: https://bugs.debian.org/774787 diff --git a/testing/test_verify.py b/testing/test_verify.py --- a/testing/test_verify.py +++ b/testing/test_verify.py @@ -1220,6 +1220,8 @@ import platform if platform.machine().startswith('sparc'): py.test.skip('Breaks horribly on sparc (SIGILL + corrupted stack)') + elif platform.machine() == 'mips64' and sys.maxsize > 2**32: + py.test.skip('Segfaults on mips64el') # XXX bad abuse of "struct { ...; }". It only works a bit by chance # anyway. XXX think about something better :-( ffi = FFI() From noreply at buildbot.pypy.org Thu Jan 8 22:21:22 2015 From: noreply at buildbot.pypy.org (amauryfa) Date: Thu, 8 Jan 2015 22:21:22 +0100 (CET) Subject: [pypy-commit] pypy py3.3: crypt.py imports _crypt. Message-ID: <20150108212122.B8BB71C338F@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3.3 Changeset: r75256:cdc9aa59d0a4 Date: 2015-01-06 09:06 +0100 http://bitbucket.org/pypy/pypy/changeset/cdc9aa59d0a4/ Log: crypt.py imports _crypt. diff --git a/pypy/module/crypt/__init__.py b/pypy/module/crypt/__init__.py --- a/pypy/module/crypt/__init__.py +++ b/pypy/module/crypt/__init__.py @@ -1,7 +1,7 @@ from pypy.interpreter.mixedmodule import MixedModule class Module(MixedModule): - """A demo built-in module based on rffi.""" + applevel_name = '_crypt' interpleveldefs = { 'crypt' : 'interp_crypt.crypt', From noreply at buildbot.pypy.org Thu Jan 8 22:21:24 2015 From: noreply at buildbot.pypy.org (amauryfa) Date: Thu, 8 Jan 2015 22:21:24 +0100 (CET) Subject: [pypy-commit] pypy py3.3: Improve pickling of "reversed" and "enumerate" objects. Message-ID: <20150108212124.204191C338F@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3.3 Changeset: r75257:4c0fc2915173 Date: 2015-01-08 17:27 +0100 http://bitbucket.org/pypy/pypy/changeset/4c0fc2915173/ Log: Improve pickling of "reversed" and "enumerate" objects. In addition, reversed is now a type. diff --git a/pypy/module/__builtin__/__init__.py b/pypy/module/__builtin__/__init__.py --- a/pypy/module/__builtin__/__init__.py +++ b/pypy/module/__builtin__/__init__.py @@ -70,7 +70,7 @@ 'zip' : 'functional.W_Zip', 'min' : 'functional.min', 'max' : 'functional.max', - 'reversed' : 'functional.reversed', + 'reversed' : 'functional.W_ReversedIterator', 'super' : 'descriptor.W_Super', 'staticmethod' : 'descriptor.StaticMethod', 'classmethod' : 'descriptor.ClassMethod', diff --git a/pypy/module/__builtin__/functional.py b/pypy/module/__builtin__/functional.py --- a/pypy/module/__builtin__/functional.py +++ b/pypy/module/__builtin__/functional.py @@ -203,6 +203,7 @@ self.w_iter = w_iter self.w_index = w_start + @staticmethod def descr___new__(space, w_subtype, w_iterable, w_start=None): self = space.allocate_instance(W_Enumerate, w_subtype) if w_start is None: @@ -222,35 +223,20 @@ return space.newtuple([w_index, w_item]) def descr___reduce__(self, space): - from pypy.interpreter.mixedmodule import MixedModule - w_mod = space.getbuiltinmodule('_pickle_support') - mod = space.interp_w(MixedModule, w_mod) - w_new_inst = mod.get('enumerate_new') - w_info = space.newtuple([self.w_iter, self.w_index]) - return space.newtuple([w_new_inst, w_info]) - -# exported through _pickle_support -def _make_enumerate(space, w_iter, w_index): - return space.wrap(W_Enumerate(w_iter, w_index)) + return space.newtuple([space.type(self), + space.newtuple([self.w_iter, self.w_index])]) W_Enumerate.typedef = TypeDef("enumerate", - __new__=interp2app(W_Enumerate.descr___new__.im_func), + __new__=interp2app(W_Enumerate.descr___new__), __iter__=interp2app(W_Enumerate.descr___iter__), __next__=interp2app(W_Enumerate.descr_next), __reduce__=interp2app(W_Enumerate.descr___reduce__), ) -def reversed(space, w_sequence): - """Return a iterator that yields items of sequence in reverse.""" - w_reversed_descr = space.lookup(w_sequence, "__reversed__") - if w_reversed_descr is not None: - w_reversed = space.get(w_reversed_descr, w_sequence) - return space.call_function(w_reversed) - return space.wrap(W_ReversedIterator(space, w_sequence)) +class W_ReversedIterator(W_Root): + """reverse iterator over values of the sequence.""" - -class W_ReversedIterator(W_Root): def __init__(self, space, w_sequence): self.remaining = space.len_w(w_sequence) - 1 if space.lookup(w_sequence, "__getitem__") is None: @@ -258,6 +244,16 @@ raise OperationError(space.w_TypeError, space.wrap(msg)) self.w_sequence = w_sequence + @staticmethod + def descr___new__(space, w_subtype, w_sequence): + w_reversed_descr = space.lookup(w_sequence, "__reversed__") + if w_reversed_descr is not None: + w_reversed = space.get(w_reversed_descr, w_sequence) + return space.call_function(w_reversed) + self = space.allocate_instance(W_ReversedIterator, w_subtype) + self.__init__(space, w_sequence) + return space.wrap(self) + def descr___iter__(self, space): return space.wrap(self) @@ -281,30 +277,33 @@ raise OperationError(space.w_StopIteration, space.w_None) def descr___reduce__(self, space): - from pypy.interpreter.mixedmodule import MixedModule - w_mod = space.getbuiltinmodule('_pickle_support') - mod = space.interp_w(MixedModule, w_mod) - w_new_inst = mod.get('reversed_new') - info_w = [self.w_sequence, space.wrap(self.remaining)] - w_info = space.newtuple(info_w) - return space.newtuple([w_new_inst, w_info]) + if self.w_sequence: + w_state = space.wrap(self.remaining) + return space.newtuple([ + space.type(self), + space.newtuple([self.w_sequence]), + w_state]) + else: + return space.newtuple([ + space.type(self), + space.newtuple([])]) + + def descr___setstate__(self, space, w_state): + self.remaining = space.int_w(wstate) + n = space.len_w(self.w_sequence) + if self.remaining < -1: + self.remaining = -1 + elif self.remaining > n - 1: + self.remaining = n - 1 W_ReversedIterator.typedef = TypeDef("reversed", + __new__ = interp2app(W_ReversedIterator.descr___new__), __iter__ = interp2app(W_ReversedIterator.descr___iter__), __length_hint__ = interp2app(W_ReversedIterator.descr_length), __next__ = interp2app(W_ReversedIterator.descr_next), __reduce__ = interp2app(W_ReversedIterator.descr___reduce__), + __setstate__ = interp2app(W_ReversedIterator.descr___setstate__), ) -W_ReversedIterator.typedef.acceptable_as_base_class = False - -# exported through _pickle_support -def _make_reversed(space, w_seq, w_remaining): - w_type = space.gettypeobject(W_ReversedIterator.typedef) - iterator = space.allocate_instance(W_ReversedIterator, w_type) - iterator.w_sequence = w_seq - iterator.remaining = space.int_w(w_remaining) - return space.wrap(iterator) - class W_Range(W_Root): diff --git a/pypy/module/__builtin__/test/test_functional.py b/pypy/module/__builtin__/test/test_functional.py --- a/pypy/module/__builtin__/test/test_functional.py +++ b/pypy/module/__builtin__/test/test_functional.py @@ -510,6 +510,7 @@ class AppTestReversed: def test_reversed(self): + assert isinstance(reversed, type) r = reversed("hello") assert iter(r) is r assert r.__next__() == "o" diff --git a/pypy/module/_pickle_support/__init__.py b/pypy/module/_pickle_support/__init__.py --- a/pypy/module/_pickle_support/__init__.py +++ b/pypy/module/_pickle_support/__init__.py @@ -23,7 +23,5 @@ 'intrangeiter_new': 'maker.intrangeiter_new', 'builtin_code': 'maker.builtin_code', 'builtin_function' : 'maker.builtin_function', - 'enumerate_new': 'maker.enumerate_new', - 'reversed_new': 'maker.reversed_new', 'operationerror_new': 'maker.operationerror_new', } diff --git a/pypy/module/_pickle_support/maker.py b/pypy/module/_pickle_support/maker.py --- a/pypy/module/_pickle_support/maker.py +++ b/pypy/module/_pickle_support/maker.py @@ -98,15 +98,6 @@ identifier)) -def enumerate_new(space, w_iter, w_index): - from pypy.module.__builtin__.functional import _make_enumerate - return _make_enumerate(space, w_iter, w_index) - -def reversed_new(space, w_seq, w_remaining): - from pypy.module.__builtin__.functional import _make_reversed - return _make_reversed(space, w_seq, w_remaining) - - # ___________________________________________________________________ # Helper functions for internal use From noreply at buildbot.pypy.org Thu Jan 8 22:21:25 2015 From: noreply at buildbot.pypy.org (amauryfa) Date: Thu, 8 Jan 2015 22:21:25 +0100 (CET) Subject: [pypy-commit] pypy py3.3: Use __qualname__ in type.__repr__ Message-ID: <20150108212125.6612D1C338F@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3.3 Changeset: r75258:6ad0ea075018 Date: 2015-01-08 17:27 +0100 http://bitbucket.org/pypy/pypy/changeset/6ad0ea075018/ Log: Use __qualname__ in type.__repr__ diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py --- a/pypy/objspace/std/typeobject.py +++ b/pypy/objspace/std/typeobject.py @@ -90,6 +90,7 @@ overridetypedef=None): w_self.space = space w_self.name = name + w_self.qualname = None w_self.bases_w = bases_w w_self.dict_w = dict_w w_self.nslots = 0 @@ -492,6 +493,9 @@ result = self.name return result.decode('utf-8') + def getqualname(self, space): + return self.qualname or self.getname(space) + def add_subclass(w_self, w_subclass): space = w_self.space if not space.config.translation.rweakref: @@ -580,7 +584,7 @@ else: mod = space.unicode_w(w_mod) if mod is not None and mod != u'builtins': - return space.wrap(u"" % (mod, self.getname(space))) + return space.wrap(u"" % (mod, self.getqualname(space))) else: return space.wrap(u"" % (self.name.decode('utf-8'))) @@ -689,6 +693,16 @@ raise oefmt(space.w_ValueError, "__name__ must not contain null bytes") w_type.name = name +def descr_get__qualname__(space, w_type): + w_type = _check(space, w_type) + return space.wrap(w_type.getqualname(space)) + +def descr_set__qualname__(space, w_type, w_value): + w_type = _check(space, w_type) + if not w_type.is_heaptype(): + raise oefmt(space.w_TypeError, "can't set %N.__qualname__", w_type) + w_type.qualname = space.unicode_w(w_value) + def descr_get__mro__(space, w_type): w_type = _check(space, w_type) return space.newtuple(w_type.mro_w) @@ -858,6 +872,7 @@ W_TypeObject.typedef = TypeDef("type", __new__ = gateway.interp2app(descr__new__), __name__ = GetSetProperty(descr_get__name__, descr_set__name__), + __qualname__ = GetSetProperty(descr_get__qualname__, descr_set__qualname__), __bases__ = GetSetProperty(descr_get__bases__, descr_set__bases__), __base__ = GetSetProperty(descr__base), __mro__ = GetSetProperty(descr_get__mro__), From noreply at buildbot.pypy.org Thu Jan 8 22:21:26 2015 From: noreply at buildbot.pypy.org (amauryfa) Date: Thu, 8 Jan 2015 22:21:26 +0100 (CET) Subject: [pypy-commit] pypy py3.3: Set __qualname__ of builtin methods Message-ID: <20150108212126.9BBBC1C338F@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3.3 Changeset: r75259:826457bf5cda Date: 2015-01-08 18:42 +0100 http://bitbucket.org/pypy/pypy/changeset/826457bf5cda/ Log: Set __qualname__ of builtin methods diff --git a/lib-python/3/test/test_funcattrs.py b/lib-python/3/test/test_funcattrs.py --- a/lib-python/3/test/test_funcattrs.py +++ b/lib-python/3/test/test_funcattrs.py @@ -121,7 +121,8 @@ self.b.__qualname__ = 'd' self.assertEqual(self.b.__qualname__, 'd') # __qualname__ must be a string - self.cannot_set_attr(self.b, '__qualname__', 7, TypeError) + self.cannot_set_attr(self.b, '__qualname__', 7, + (TypeError, AttributeError)) def test___code__(self): num_one, num_two = 7, 8 diff --git a/pypy/objspace/std/test/test_typeobject.py b/pypy/objspace/std/test/test_typeobject.py --- a/pypy/objspace/std/test/test_typeobject.py +++ b/pypy/objspace/std/test/test_typeobject.py @@ -395,6 +395,9 @@ class D(B, C): # assert does not raise TypeError pass + def test_method_qualname(self): + assert dict.copy.__qualname__ == 'dict.copy' + def test_builtin_add(self): x = 5 assert x.__add__(6) == 11 diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py --- a/pypy/objspace/std/typeobject.py +++ b/pypy/objspace/std/typeobject.py @@ -1,7 +1,8 @@ from pypy.interpreter import gateway from pypy.interpreter.baseobjspace import W_Root, SpaceCache from pypy.interpreter.error import oefmt, OperationError -from pypy.interpreter.function import Function, StaticMethod +from pypy.interpreter.function import ( + Function, StaticMethod, FunctionWithFixedCode) from pypy.interpreter.typedef import weakref_descr, GetSetProperty,\ descr_get_dict, dict_descr, Member, TypeDef from pypy.interpreter.astcompiler.misc import mangle @@ -1263,8 +1264,17 @@ overridetypedef = typedef w_type = W_TypeObject(space, typedef.name, bases_w, dict_w, overridetypedef=overridetypedef) + if typedef is not overridetypedef: w_type.w_doc = space.wrap(typedef.doc) + else: + # Set the __qualname__ of member functions + for name in rawdict: + w_obj = dict_w[name] + if isinstance(w_obj, FunctionWithFixedCode): + qualname = w_type.getqualname(space) + '.' + name + w_obj.fset_func_qualname(space, space.wrap(qualname)) + if hasattr(typedef, 'flag_sequence_bug_compat'): w_type.flag_sequence_bug_compat = typedef.flag_sequence_bug_compat w_type.lazyloaders = lazyloaders From noreply at buildbot.pypy.org Thu Jan 8 23:25:51 2015 From: noreply at buildbot.pypy.org (amauryfa) Date: Thu, 8 Jan 2015 23:25:51 +0100 (CET) Subject: [pypy-commit] pypy py3.3: Fix translation Message-ID: <20150108222551.E35A61C01E8@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3.3 Changeset: r75260:be32e3299b5e Date: 2015-01-08 23:24 +0100 http://bitbucket.org/pypy/pypy/changeset/be32e3299b5e/ Log: Fix translation diff --git a/pypy/module/__builtin__/functional.py b/pypy/module/__builtin__/functional.py --- a/pypy/module/__builtin__/functional.py +++ b/pypy/module/__builtin__/functional.py @@ -245,7 +245,7 @@ self.w_sequence = w_sequence @staticmethod - def descr___new__(space, w_subtype, w_sequence): + def descr___new__2(space, w_subtype, w_sequence): w_reversed_descr = space.lookup(w_sequence, "__reversed__") if w_reversed_descr is not None: w_reversed = space.get(w_reversed_descr, w_sequence) @@ -297,7 +297,7 @@ self.remaining = n - 1 W_ReversedIterator.typedef = TypeDef("reversed", - __new__ = interp2app(W_ReversedIterator.descr___new__), + __new__ = interp2app(W_ReversedIterator.descr___new__2), __iter__ = interp2app(W_ReversedIterator.descr___iter__), __length_hint__ = interp2app(W_ReversedIterator.descr_length), __next__ = interp2app(W_ReversedIterator.descr_next), From noreply at buildbot.pypy.org Thu Jan 8 23:41:42 2015 From: noreply at buildbot.pypy.org (amauryfa) Date: Thu, 8 Jan 2015 23:41:42 +0100 (CET) Subject: [pypy-commit] pypy py3.3: Another fix Message-ID: <20150108224142.EECAF1C01E8@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3.3 Changeset: r75261:2c2d11fa98da Date: 2015-01-08 23:41 +0100 http://bitbucket.org/pypy/pypy/changeset/2c2d11fa98da/ Log: Another fix diff --git a/pypy/module/__builtin__/functional.py b/pypy/module/__builtin__/functional.py --- a/pypy/module/__builtin__/functional.py +++ b/pypy/module/__builtin__/functional.py @@ -289,7 +289,7 @@ space.newtuple([])]) def descr___setstate__(self, space, w_state): - self.remaining = space.int_w(wstate) + self.remaining = space.int_w(w_state) n = space.len_w(self.w_sequence) if self.remaining < -1: self.remaining = -1 From noreply at buildbot.pypy.org Fri Jan 9 00:21:40 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 9 Jan 2015 00:21:40 +0100 (CET) Subject: [pypy-commit] pypy default: Issue #1958 Message-ID: <20150108232140.496111C01E8@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r75262:0f3ee2ce246e Date: 2015-01-09 00:21 +0100 http://bitbucket.org/pypy/pypy/changeset/0f3ee2ce246e/ Log: Issue #1958 Maybe temporary: just kill references to "distribute". I think that installing pip as described in the other lines of the instructions will get us a complete setuptools environment anyway. diff --git a/pypy/doc/install.rst b/pypy/doc/install.rst --- a/pypy/doc/install.rst +++ b/pypy/doc/install.rst @@ -38,14 +38,13 @@ and not move the binary there, else PyPy would not be able to find its library. -If you want to install 3rd party libraries, the most convenient way is to -install distribute_ and pip_: +If you want to install 3rd party libraries, the most convenient way is +to install pip_ (unless you want to install virtualenv as explained +below; then you can directly use pip inside virtualenvs): .. code-block:: console - $ curl -O http://python-distribute.org/distribute_setup.py $ curl -O https://raw.githubusercontent.com/pypa/pip/master/contrib/get-pip.py - $ ./pypy-2.1/bin/pypy distribute_setup.py $ ./pypy-2.1/bin/pypy get-pip.py $ ./pypy-2.1/bin/pip install pygments # for example @@ -69,7 +68,6 @@ Note that bin/python is now a symlink to bin/pypy. -.. _distribute: http://www.python-distribute.org/ .. _pip: http://pypi.python.org/pypi/pip From noreply at buildbot.pypy.org Fri Jan 9 00:43:22 2015 From: noreply at buildbot.pypy.org (dstufft) Date: Fri, 9 Jan 2015 00:43:22 +0100 (CET) Subject: [pypy-commit] pypy dstufft/update-pip-bootstrap-location-to-the-new-1420760611527: Update pip bootstrap location to the new location. Message-ID: <20150108234322.14F7F1C01E8@cobra.cs.uni-duesseldorf.de> Author: Donald Stufft Branch: dstufft/update-pip-bootstrap-location-to-the-new-1420760611527 Changeset: r75263:454e0b57169f Date: 2015-01-08 23:43 +0000 http://bitbucket.org/pypy/pypy/changeset/454e0b57169f/ Log: Update pip bootstrap location to the new location. diff --git a/pypy/doc/install.rst b/pypy/doc/install.rst --- a/pypy/doc/install.rst +++ b/pypy/doc/install.rst @@ -44,7 +44,7 @@ .. code-block:: console - $ curl -O https://raw.githubusercontent.com/pypa/pip/master/contrib/get-pip.py + $ curl -O https://bootstrap.pypa.io/get-pip.py $ ./pypy-2.1/bin/pypy get-pip.py $ ./pypy-2.1/bin/pip install pygments # for example From noreply at buildbot.pypy.org Fri Jan 9 00:44:36 2015 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Fri, 9 Jan 2015 00:44:36 +0100 (CET) Subject: [pypy-commit] pypy dstufft/update-pip-bootstrap-location-to-the-new-1420760611527: Close branch dstufft/update-pip-bootstrap-location-to-the-new-1420760611527 Message-ID: <20150108234436.7F4771C01E8@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: dstufft/update-pip-bootstrap-location-to-the-new-1420760611527 Changeset: r75264:836eeccbffd9 Date: 2015-01-08 15:44 -0800 http://bitbucket.org/pypy/pypy/changeset/836eeccbffd9/ Log: Close branch dstufft/update-pip-bootstrap-location-to-the- new-1420760611527 From noreply at buildbot.pypy.org Fri Jan 9 00:44:59 2015 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Fri, 9 Jan 2015 00:44:59 +0100 (CET) Subject: [pypy-commit] pypy default: Merged in dstufft/update-pip-bootstrap-location-to-the-new-1420760611527 (pull request #296) Message-ID: <20150108234459.EEE731C01E8@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r75265:733b2bd3a55b Date: 2015-01-08 15:44 -0800 http://bitbucket.org/pypy/pypy/changeset/733b2bd3a55b/ Log: Merged in dstufft/update-pip-bootstrap-location-to-the- new-1420760611527 (pull request #296) Update pip bootstrap location to the new location. diff --git a/pypy/doc/install.rst b/pypy/doc/install.rst --- a/pypy/doc/install.rst +++ b/pypy/doc/install.rst @@ -44,7 +44,7 @@ .. code-block:: console - $ curl -O https://raw.githubusercontent.com/pypa/pip/master/contrib/get-pip.py + $ curl -O https://bootstrap.pypa.io/get-pip.py $ ./pypy-2.1/bin/pypy get-pip.py $ ./pypy-2.1/bin/pip install pygments # for example From noreply at buildbot.pypy.org Fri Jan 9 01:01:22 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 9 Jan 2015 01:01:22 +0100 (CET) Subject: [pypy-commit] pypy default: Issue #1956 Message-ID: <20150109000122.632F01D2D84@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r75266:276ba3e27516 Date: 2015-01-09 01:01 +0100 http://bitbucket.org/pypy/pypy/changeset/276ba3e27516/ Log: Issue #1956 Follow-up on 9ff421c20db5: on systems that have clock_gettime(), reimplement the RPython clock() based on it. This is in line with "man clock" on Linux, and should be guaranteed to give more precise results. diff --git a/rpython/rtyper/module/ll_time.py b/rpython/rtyper/module/ll_time.py --- a/rpython/rtyper/module/ll_time.py +++ b/rpython/rtyper/module/ll_time.py @@ -47,6 +47,8 @@ if sys.platform.startswith('freebsd') or sys.platform.startswith('netbsd'): libraries = ['compat'] +elif sys.platform == 'linux2': + libraries = ['rt'] else: libraries = [] @@ -58,7 +60,15 @@ TIMEB = platform.Struct(STRUCT_TIMEB, [('time', rffi.INT), ('millitm', rffi.INT)]) -constant_names = ['RUSAGE_SELF', 'EINTR'] +class CConfigForClockGetTime: + _compilation_info_ = ExternalCompilationInfo( + includes=['time.h'], + libraries=libraries + ) + TIMESPEC = platform.Struct('struct timespec', [('tv_sec', rffi.LONG), + ('tv_nsec', rffi.LONG)]) + +constant_names = ['RUSAGE_SELF', 'EINTR', 'CLOCK_PROCESS_CPUTIME_ID'] for const in constant_names: setattr(CConfig, const, platform.DefinedConstantInteger(const)) defs_names = ['GETTIMEOFDAY_NO_TZ'] @@ -162,6 +172,21 @@ diff = a[0] - state.counter_start lltype.free(a, flavor='raw') return float(diff) / state.divisor + elif self.CLOCK_PROCESS_CPUTIME_ID is not None: + # Linux and other POSIX systems with clock_gettime() + self.configure(CConfigForClockGetTime) + TIMESPEC = self.TIMESPEC + CLOCK_PROCESS_CPUTIME_ID = self.CLOCK_PROCESS_CPUTIME_ID + c_clock_gettime = self.llexternal('clock_gettime', + [lltype.Signed, lltype.Ptr(TIMESPEC)], + rffi.INT, releasegil=False) + def time_clock_llimpl(): + a = lltype.malloc(TIMESPEC, flavor='raw') + c_clock_gettime(CLOCK_PROCESS_CPUTIME_ID, a) + result = (float(rffi.getintfield(a, 'c_tv_sec')) + + float(rffi.getintfield(a, 'c_tv_nsec')) * 0.000000001) + lltype.free(a, flavor='raw') + return result else: RUSAGE = self.RUSAGE RUSAGE_SELF = self.RUSAGE_SELF or 0 From noreply at buildbot.pypy.org Fri Jan 9 09:36:51 2015 From: noreply at buildbot.pypy.org (amauryfa) Date: Fri, 9 Jan 2015 09:36:51 +0100 (CET) Subject: [pypy-commit] pypy py3.3: Fix stack size of MAKE_FUNCTION and MAKE_CLOSURE opcodes, Message-ID: <20150109083651.521201D3640@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3.3 Changeset: r75267:bbb5d74a2b9d Date: 2015-01-09 09:33 +0100 http://bitbucket.org/pypy/pypy/changeset/bbb5d74a2b9d/ Log: Fix stack size of MAKE_FUNCTION and MAKE_CLOSURE opcodes, they changed with the introduction of __qualname__. This should fix test_dis.py diff --git a/pypy/interpreter/astcompiler/assemble.py b/pypy/interpreter/astcompiler/assemble.py --- a/pypy/interpreter/astcompiler/assemble.py +++ b/pypy/interpreter/astcompiler/assemble.py @@ -647,10 +647,10 @@ return 1 - arg def _compute_MAKE_CLOSURE(arg): - return -1 - _num_args(arg) - ((arg >> 16) & 0xFFFF) + return -2 - _num_args(arg) - ((arg >> 16) & 0xFFFF) def _compute_MAKE_FUNCTION(arg): - return -_num_args(arg) - ((arg >> 16) & 0xFFFF) + return -1 - _num_args(arg) - ((arg >> 16) & 0xFFFF) def _compute_BUILD_SLICE(arg): if arg == 3: From noreply at buildbot.pypy.org Fri Jan 9 10:04:57 2015 From: noreply at buildbot.pypy.org (amauryfa) Date: Fri, 9 Jan 2015 10:04:57 +0100 (CET) Subject: [pypy-commit] pypy py3.3: Kill time.accept2dyear Message-ID: <20150109090457.DC2CE1C1148@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3.3 Changeset: r75268:2ba2a9841686 Date: 2015-01-09 09:53 +0100 http://bitbucket.org/pypy/pypy/changeset/2ba2a9841686/ Log: Kill time.accept2dyear diff --git a/pypy/module/time/__init__.py b/pypy/module/time/__init__.py --- a/pypy/module/time/__init__.py +++ b/pypy/module/time/__init__.py @@ -39,5 +39,4 @@ from pypy.module.time import interp_time interp_time._init_timezone(space) - interp_time._init_accept2dyear(space) diff --git a/pypy/module/time/interp_time.py b/pypy/module/time/interp_time.py --- a/pypy/module/time/interp_time.py +++ b/pypy/module/time/interp_time.py @@ -202,13 +202,6 @@ c_strftime = external('strftime', [rffi.CCHARP, rffi.SIZE_T, rffi.CCHARP, TM_P], rffi.SIZE_T) -def _init_accept2dyear(space): - if os.environ.get("PYTHONY2K"): - accept2dyear = 0 - else: - accept2dyear = 1 - _set_module_object(space, "accept2dyear", space.wrap(accept2dyear)) - def _init_timezone(space): timezone = daylight = altzone = 0 tzname = ["", ""] @@ -439,21 +432,6 @@ glob_buf.c_tm_zone = lltype.nullptr(rffi.CCHARP.TO) rffi.setintfield(glob_buf, 'c_tm_gmtoff', 0) - if y < 1000: - w_accept2dyear = _get_module_object(space, "accept2dyear") - accept2dyear = space.is_true(w_accept2dyear) - - if accept2dyear: - if 69 <= y <= 99: - y += 1900 - elif 0 <= y <= 68: - y += 2000 - else: - raise OperationError(space.w_ValueError, - space.wrap("year out of range")) - space.warn(space.wrap("Century info guessed for a 2-digit year."), - space.w_DeprecationWarning) - # tm_wday does not need checking of its upper-bound since taking "% # 7" in _gettmarg() automatically restricts the range. if rffi.getintfield(glob_buf, 'c_tm_wday') < -1: diff --git a/pypy/module/time/test/test_time.py b/pypy/module/time/test/test_time.py --- a/pypy/module/time/test/test_time.py +++ b/pypy/module/time/test/test_time.py @@ -5,7 +5,6 @@ def test_attributes(self): import time - assert isinstance(time.accept2dyear, int) assert isinstance(time.altzone, int) assert isinstance(time.daylight, int) assert isinstance(time.timezone, int) @@ -102,21 +101,11 @@ assert isinstance(res, float) ltime = time.localtime() - time.accept2dyear == 0 ltime = list(ltime) ltime[0] = -1 - raises(ValueError, time.mktime, tuple(ltime)) - time.accept2dyear == 1 - - ltime = list(ltime) - ltime[0] = 67 - ltime = tuple(ltime) - if os.name != "nt" and sys.maxsize < 1<<32: # time_t may be 64bit - raises(OverflowError, time.mktime, ltime) - - ltime = list(ltime) + time.mktime(tuple(ltime)) # Does not crash anymore ltime[0] = 100 - raises(ValueError, time.mktime, tuple(ltime)) + time.mktime(tuple(ltime)) # Does not crash anymore t = time.time() assert int(time.mktime(time.localtime(t))) == int(t) @@ -169,28 +158,6 @@ assert asc[-len(str(bigyear)):] == str(bigyear) raises(OverflowError, time.asctime, (bigyear + 1,) + (0,)*8) - def test_accept2dyear_access(self): - import time - - accept2dyear = time.accept2dyear - del time.accept2dyear - try: - # with year >= 1900 this shouldn't need to access accept2dyear - assert time.asctime((2000,) + (0,) * 8).split()[-1] == '2000' - finally: - time.accept2dyear = accept2dyear - - def test_accept2dyear_bad(self): - import time - class X: - def __bool__(self): - raise RuntimeError('boo') - orig, time.accept2dyear = time.accept2dyear, X() - try: - raises(RuntimeError, time.asctime, (200,) + (0,) * 8) - finally: - time.accept2dyear = orig - def test_struct_time(self): import time raises(TypeError, time.struct_time) @@ -281,7 +248,7 @@ raises(TypeError, time.strftime, ()) raises(TypeError, time.strftime, (1,)) raises(TypeError, time.strftime, range(8)) - exp = '2000 01 01 00 00 00 1 001' + exp = '0 01 01 00 00 00 1 001' assert time.strftime("%Y %m %d %H %M %S %w %j", (0,)*9) == exp # Guard against invalid/non-supported format string @@ -314,9 +281,6 @@ # of the time tuple. # check year - if time.accept2dyear: - raises(ValueError, time.strftime, '', (-1, 1, 1, 0, 0, 0, 0, 1, -1)) - raises(ValueError, time.strftime, '', (100, 1, 1, 0, 0, 0, 0, 1, -1)) time.strftime('', (1899, 1, 1, 0, 0, 0, 0, 1, -1)) time.strftime('', (0, 1, 1, 0, 0, 0, 0, 1, -1)) From noreply at buildbot.pypy.org Fri Jan 9 11:10:50 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 9 Jan 2015 11:10:50 +0100 (CET) Subject: [pypy-commit] pypy default: Move the cif_descr back as a class default. This is actually Message-ID: <20150109101050.A4C101C03FB@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r75269:48cd201a34bf Date: 2015-01-09 11:10 +0100 http://bitbucket.org/pypy/pypy/changeset/48cd201a34bf/ Log: Move the cif_descr back as a class default. This is actually important because of an instantiate() which doesn't call the regular constructor. diff --git a/pypy/module/_cffi_backend/ctypefunc.py b/pypy/module/_cffi_backend/ctypefunc.py --- a/pypy/module/_cffi_backend/ctypefunc.py +++ b/pypy/module/_cffi_backend/ctypefunc.py @@ -27,6 +27,8 @@ _immutable_fields_ = ['fargs[*]', 'ellipsis', 'cif_descr'] kind = "function" + cif_descr = lltype.nullptr(CIF_DESCRIPTION) + def __init__(self, space, fargs, fresult, ellipsis): extra = self._compute_extra_text(fargs, fresult, ellipsis) size = rffi.sizeof(rffi.VOIDP) @@ -34,7 +36,6 @@ could_cast_anything=False) self.fargs = fargs self.ellipsis = bool(ellipsis) - self.cif_descr = lltype.nullptr(CIF_DESCRIPTION) # fresult is stored in self.ctitem if not ellipsis: @@ -50,6 +51,9 @@ raise # else, eat the NotImplementedError. We will get the # exception if we see an actual call + if self.cif_descr: # should not be True, but you never know + lltype.free(self.cif_descr, flavor='raw') + self.cif_descr = lltype.nullptr(CIF_DESCRIPTION) def new_ctypefunc_completing_argtypes(self, args_w): space = self.space @@ -65,10 +69,12 @@ "argument %d passed in the variadic part needs to " "be a cdata object (got %T)", i + 1, w_obj) fvarargs[i] = ct + # xxx call instantiate() directly. It's a bit of a hack. ctypefunc = instantiate(W_CTypeFunc) ctypefunc.space = space ctypefunc.fargs = fvarargs ctypefunc.ctitem = self.ctitem + #ctypefunc.cif_descr = NULL --- already provided as the default CifDescrBuilder(fvarargs, self.ctitem).rawallocate(ctypefunc) return ctypefunc From noreply at buildbot.pypy.org Fri Jan 9 11:46:21 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 9 Jan 2015 11:46:21 +0100 (CET) Subject: [pypy-commit] pypy default: Run pypy/tool/import_cffi.py Message-ID: <20150109104621.EABB81C03FB@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r75270:3db26c31b597 Date: 2015-01-09 11:45 +0100 http://bitbucket.org/pypy/pypy/changeset/3db26c31b597/ Log: Run pypy/tool/import_cffi.py diff --git a/lib_pypy/cffi/__init__.py b/lib_pypy/cffi/__init__.py --- a/lib_pypy/cffi/__init__.py +++ b/lib_pypy/cffi/__init__.py @@ -6,3 +6,8 @@ __version__ = "0.8.6" __version_info__ = (0, 8, 6) + +# The verifier module file names are based on the CRC32 of a string that +# contains the following version number. It may be older than __version__ +# if nothing is clearly incompatible. +__version_verifier_modules__ = "0.8.6" diff --git a/lib_pypy/cffi/api.py b/lib_pypy/cffi/api.py --- a/lib_pypy/cffi/api.py +++ b/lib_pypy/cffi/api.py @@ -69,6 +69,7 @@ self._function_caches = [] self._libraries = [] self._cdefsources = [] + self._windows_unicode = None if hasattr(backend, 'set_ffi'): backend.set_ffi(self) for name in backend.__dict__: @@ -77,6 +78,7 @@ # with self._lock: self.BVoidP = self._get_cached_btype(model.voidp_type) + self.BCharA = self._get_cached_btype(model.char_array_type) if isinstance(backend, types.ModuleType): # _cffi_backend: attach these constants to the class if not hasattr(FFI, 'NULL'): @@ -191,11 +193,12 @@ def offsetof(self, cdecl, fieldname): """Return the offset of the named field inside the given - structure, which must be given as a C type name. + structure, which must be given as a C type name. The field + may be 'x.y.z' in case of nested structures. """ if isinstance(cdecl, basestring): cdecl = self._typeof(cdecl) - return self._backend.typeoffsetof(cdecl, fieldname)[1] + return self._typeoffsetof(cdecl, fieldname)[1] def new(self, cdecl, init=None): """Allocate an instance according to the specified C type and @@ -264,6 +267,16 @@ """ return self._backend.buffer(cdata, size) + def from_buffer(self, python_buffer): + """Return a that points to the data of the + given Python object, which must support the buffer interface. + Note that this is not meant to be used on the built-in types str, + unicode, or bytearray (you can build 'char[]' arrays explicitly) + but only on objects containing large quantities of raw data + in some other format, like 'array.array' or numpy arrays. + """ + return self._backend.from_buffer(self.BCharA, python_buffer) + def callback(self, cdecl, python_callable=None, error=None): """Return a callback object or a decorator making such a callback object. 'cdecl' must name a C function pointer type. @@ -335,9 +348,23 @@ which requires binary compatibility in the signatures. """ from .verifier import Verifier, _caller_dir_pycache + # + # If set_unicode(True) was called, insert the UNICODE and + # _UNICODE macro declarations + if self._windows_unicode: + self._apply_windows_unicode(kwargs) + # + # Set the tmpdir here, and not in Verifier.__init__: it picks + # up the caller's directory, which we want to be the caller of + # ffi.verify(), as opposed to the caller of Veritier(). tmpdir = tmpdir or _caller_dir_pycache() + # + # Make a Verifier() and use it to load the library. self.verifier = Verifier(self, source, tmpdir, **kwargs) lib = self.verifier.load_library() + # + # Save the loaded library for keep-alive purposes, even + # if the caller doesn't keep it alive itself (it should). self._libraries.append(lib) return lib @@ -359,12 +386,22 @@ def addressof(self, cdata, field=None): """Return the address of a . If 'field' is specified, return the address of this field. + The field may be 'x.y.z' in case of nested structures. """ ctype = self._backend.typeof(cdata) - ctype, offset = self._backend.typeoffsetof(ctype, field) + ctype, offset = self._typeoffsetof(ctype, field) ctypeptr = self._pointer_to(ctype) return self._backend.rawaddressof(ctypeptr, cdata, offset) + def _typeoffsetof(self, ctype, field): + if field is not None and '.' in field: + offset = 0 + for field1 in field.split('.'): + ctype, offset1 = self._backend.typeoffsetof(ctype, field1) + offset += offset1 + return ctype, offset + return self._backend.typeoffsetof(ctype, field) + def include(self, ffi_to_include): """Includes the typedefs, structs, unions and enums defined in another FFI instance. Usage is similar to a #include in C, @@ -387,6 +424,44 @@ def from_handle(self, x): return self._backend.from_handle(x) + def set_unicode(self, enabled_flag): + """Windows: if 'enabled_flag' is True, enable the UNICODE and + _UNICODE defines in C, and declare the types like TCHAR and LPTCSTR + to be (pointers to) wchar_t. If 'enabled_flag' is False, + declare these types to be (pointers to) plain 8-bit characters. + This is mostly for backward compatibility; you usually want True. + """ + if self._windows_unicode is not None: + raise ValueError("set_unicode() can only be called once") + enabled_flag = bool(enabled_flag) + if enabled_flag: + self.cdef("typedef wchar_t TBYTE;" + "typedef wchar_t TCHAR;" + "typedef const wchar_t *LPCTSTR;" + "typedef const wchar_t *PCTSTR;" + "typedef wchar_t *LPTSTR;" + "typedef wchar_t *PTSTR;" + "typedef TBYTE *PTBYTE;" + "typedef TCHAR *PTCHAR;") + else: + self.cdef("typedef char TBYTE;" + "typedef char TCHAR;" + "typedef const char *LPCTSTR;" + "typedef const char *PCTSTR;" + "typedef char *LPTSTR;" + "typedef char *PTSTR;" + "typedef TBYTE *PTBYTE;" + "typedef TCHAR *PTCHAR;") + self._windows_unicode = enabled_flag + + def _apply_windows_unicode(self, kwds): + defmacros = kwds.get('define_macros', ()) + if not isinstance(defmacros, (list, tuple)): + raise TypeError("'define_macros' must be a list or tuple") + defmacros = list(defmacros) + [('UNICODE', '1'), + ('_UNICODE', '1')] + kwds['define_macros'] = defmacros + def _load_backend_lib(backend, name, flags): if name is None: diff --git a/lib_pypy/cffi/commontypes.py b/lib_pypy/cffi/commontypes.py --- a/lib_pypy/cffi/commontypes.py +++ b/lib_pypy/cffi/commontypes.py @@ -29,6 +29,9 @@ result = model.PointerType(resolve_common_type(result[:-2])) elif result in model.PrimitiveType.ALL_PRIMITIVE_TYPES: result = model.PrimitiveType(result) + elif result == 'set-unicode-needed': + raise api.FFIError("The Windows type %r is only available after " + "you call ffi.set_unicode()" % (commontype,)) else: if commontype == result: raise api.FFIError("Unsupported type: %r. Please file a bug " @@ -86,8 +89,6 @@ "ULONGLONG": "unsigned long long", "WCHAR": "wchar_t", "SHORT": "short", - "TBYTE": "WCHAR", - "TCHAR": "WCHAR", "UCHAR": "unsigned char", "UINT": "unsigned int", "UINT8": "unsigned char", @@ -157,14 +158,12 @@ "LPCVOID": model.const_voidp_type, "LPCWSTR": "const WCHAR *", - "LPCTSTR": "LPCWSTR", "LPDWORD": "DWORD *", "LPHANDLE": "HANDLE *", "LPINT": "int *", "LPLONG": "long *", "LPSTR": "CHAR *", "LPWSTR": "WCHAR *", - "LPTSTR": "LPWSTR", "LPVOID": model.voidp_type, "LPWORD": "WORD *", "LRESULT": "LONG_PTR", @@ -173,7 +172,6 @@ "PBYTE": "BYTE *", "PCHAR": "CHAR *", "PCSTR": "const CHAR *", - "PCTSTR": "LPCWSTR", "PCWSTR": "const WCHAR *", "PDWORD": "DWORD *", "PDWORDLONG": "DWORDLONG *", @@ -200,9 +198,6 @@ "PSIZE_T": "SIZE_T *", "PSSIZE_T": "SSIZE_T *", "PSTR": "CHAR *", - "PTBYTE": "TBYTE *", - "PTCHAR": "TCHAR *", - "PTSTR": "LPWSTR", "PUCHAR": "UCHAR *", "PUHALF_PTR": "UHALF_PTR *", "PUINT": "UINT *", @@ -240,6 +235,15 @@ "USN": "LONGLONG", "VOID": model.void_type, "WPARAM": "UINT_PTR", + + "TBYTE": "set-unicode-needed", + "TCHAR": "set-unicode-needed", + "LPCTSTR": "set-unicode-needed", + "PCTSTR": "set-unicode-needed", + "LPTSTR": "set-unicode-needed", + "PTSTR": "set-unicode-needed", + "PTBYTE": "set-unicode-needed", + "PTCHAR;": "set-unicode-needed", }) return result diff --git a/lib_pypy/cffi/cparser.py b/lib_pypy/cffi/cparser.py --- a/lib_pypy/cffi/cparser.py +++ b/lib_pypy/cffi/cparser.py @@ -1,4 +1,3 @@ - from . import api, model from .commontypes import COMMON_TYPES, resolve_common_type try: @@ -209,6 +208,8 @@ def _add_constants(self, key, val): if key in self._int_constants: + if self._int_constants[key] == val: + return # ignore identical double declarations raise api.FFIError( "multiple declarations of constant: %s" % (key,)) self._int_constants[key] = val @@ -460,6 +461,8 @@ elif kind == 'union': tp = model.UnionType(explicit_name, None, None, None) elif kind == 'enum': + if explicit_name == '__dotdotdot__': + raise CDefError("Enums cannot be declared with ...") tp = self._build_enum_type(explicit_name, type.values) else: raise AssertionError("kind = %r" % (kind,)) @@ -532,9 +535,24 @@ def _parse_constant(self, exprnode, partial_length_ok=False): # for now, limited to expressions that are an immediate number - # or negative number + # or positive/negative number if isinstance(exprnode, pycparser.c_ast.Constant): - return int(exprnode.value, 0) + s = exprnode.value + if s.startswith('0'): + if s.startswith('0x') or s.startswith('0X'): + return int(s, 16) + return int(s, 8) + elif '1' <= s[0] <= '9': + return int(s, 10) + elif s[0] == "'" and s[-1] == "'" and ( + len(s) == 3 or (len(s) == 4 and s[1] == "\\")): + return ord(s[-2]) + else: + raise api.CDefError("invalid constant %r" % (s,)) + # + if (isinstance(exprnode, pycparser.c_ast.UnaryOp) and + exprnode.op == '+'): + return self._parse_constant(exprnode.expr) # if (isinstance(exprnode, pycparser.c_ast.UnaryOp) and exprnode.op == '-'): diff --git a/lib_pypy/cffi/ffiplatform.py b/lib_pypy/cffi/ffiplatform.py --- a/lib_pypy/cffi/ffiplatform.py +++ b/lib_pypy/cffi/ffiplatform.py @@ -11,6 +11,9 @@ """ +LIST_OF_FILE_NAMES = ['sources', 'include_dirs', 'library_dirs', + 'extra_objects', 'depends'] + def get_extension(srcfilename, modname, sources=(), **kwds): from distutils.core import Extension allsources = [srcfilename] diff --git a/lib_pypy/cffi/model.py b/lib_pypy/cffi/model.py --- a/lib_pypy/cffi/model.py +++ b/lib_pypy/cffi/model.py @@ -235,6 +235,8 @@ BPtrItem = PointerType(self.item).get_cached_btype(ffi, finishlist) return global_cache(self, ffi, 'new_array_type', BPtrItem, self.length) +char_array_type = ArrayType(PrimitiveType('char'), None) + class StructOrUnionOrEnum(BaseTypeByIdentity): _attrs_ = ('name',) @@ -478,7 +480,7 @@ try: res = getattr(ffi._backend, funcname)(*args) except NotImplementedError as e: - raise NotImplementedError("%r: %s" % (srctype, e)) + raise NotImplementedError("%s: %r: %s" % (funcname, srctype, e)) # note that setdefault() on WeakValueDictionary is not atomic # and contains a rare bug (http://bugs.python.org/issue19542); # we have to use a lock and do it ourselves diff --git a/lib_pypy/cffi/vengine_cpy.py b/lib_pypy/cffi/vengine_cpy.py --- a/lib_pypy/cffi/vengine_cpy.py +++ b/lib_pypy/cffi/vengine_cpy.py @@ -65,7 +65,7 @@ # The following two 'chained_list_constants' items contains # the head of these two chained lists, as a string that gives the # call to do, if any. - self._chained_list_constants = ['0', '0'] + self._chained_list_constants = ['((void)lib,0)', '((void)lib,0)'] # prnt = self._prnt # first paste some standard set of lines that are mostly '#define' @@ -138,15 +138,22 @@ prnt() prnt('#endif') - def load_library(self): + def load_library(self, flags=None): # XXX review all usages of 'self' here! # import it as a new extension module + if hasattr(sys, "getdlopenflags"): + previous_flags = sys.getdlopenflags() try: + if hasattr(sys, "setdlopenflags") and flags is not None: + sys.setdlopenflags(flags) module = imp.load_dynamic(self.verifier.get_module_name(), self.verifier.modulefilename) except ImportError as e: error = "importing %r: %s" % (self.verifier.modulefilename, e) raise ffiplatform.VerificationError(error) + finally: + if hasattr(sys, "setdlopenflags"): + sys.setdlopenflags(previous_flags) # # call loading_cpy_struct() to get the struct layout inferred by # the C compiler @@ -228,7 +235,8 @@ converter = '_cffi_to_c_int' extraarg = ', %s' % tp.name else: - converter = '_cffi_to_c_%s' % (tp.name.replace(' ', '_'),) + converter = '(%s)_cffi_to_c_%s' % (tp.get_c_name(''), + tp.name.replace(' ', '_')) errvalue = '-1' # elif isinstance(tp, model.PointerType): @@ -267,8 +275,8 @@ self._prnt(' if (datasize != 0) {') self._prnt(' if (datasize < 0)') self._prnt(' %s;' % errcode) - self._prnt(' %s = alloca(datasize);' % (tovar,)) - self._prnt(' memset((void *)%s, 0, datasize);' % (tovar,)) + self._prnt(' %s = alloca((size_t)datasize);' % (tovar,)) + self._prnt(' memset((void *)%s, 0, (size_t)datasize);' % (tovar,)) self._prnt(' if (_cffi_convert_array_from_object(' '(char *)%s, _cffi_type(%d), %s) < 0)' % ( tovar, self._gettypenum(tp), fromvar)) @@ -336,7 +344,7 @@ prnt = self._prnt numargs = len(tp.args) if numargs == 0: - argname = 'no_arg' + argname = 'noarg' elif numargs == 1: argname = 'arg0' else: @@ -386,6 +394,9 @@ prnt(' Py_END_ALLOW_THREADS') prnt() # + prnt(' (void)self; /* unused */') + if numargs == 0: + prnt(' (void)noarg; /* unused */') if result_code: prnt(' return %s;' % self._convert_expr_from_c(tp.result, 'result', 'result type')) @@ -452,6 +463,7 @@ prnt('static void %s(%s *p)' % (checkfuncname, cname)) prnt('{') prnt(' /* only to generate compile-time warnings or errors */') + prnt(' (void)p;') for fname, ftype, fbitsize in tp.enumfields(): if (isinstance(ftype, model.PrimitiveType) and ftype.is_integer_type()) or fbitsize >= 0: @@ -482,6 +494,8 @@ prnt(' sizeof(((%s *)0)->%s),' % (cname, fname)) prnt(' -1') prnt(' };') + prnt(' (void)self; /* unused */') + prnt(' (void)noarg; /* unused */') prnt(' return _cffi_get_struct_layout(nums);') prnt(' /* the next line is not executed, but compiled */') prnt(' %s(0);' % (checkfuncname,)) @@ -653,14 +667,14 @@ prnt('static int %s(PyObject *lib)' % funcname) prnt('{') for enumerator, enumvalue in zip(tp.enumerators, tp.enumvalues): - if enumvalue < 0: - prnt(' if ((%s) >= 0 || (long)(%s) != %dL) {' % ( + if enumvalue <= 0: + prnt(' if ((%s) > 0 || (long)(%s) != %dL) {' % ( enumerator, enumerator, enumvalue)) else: - prnt(' if ((%s) < 0 || (unsigned long)(%s) != %dUL) {' % ( + prnt(' if ((%s) <= 0 || (unsigned long)(%s) != %dUL) {' % ( enumerator, enumerator, enumvalue)) prnt(' char buf[64];') - prnt(' if ((%s) < 0)' % enumerator) + prnt(' if ((%s) <= 0)' % enumerator) prnt(' snprintf(buf, 63, "%%ld", (long)(%s));' % enumerator) prnt(' else') prnt(' snprintf(buf, 63, "%%lu", (unsigned long)(%s));' % @@ -783,6 +797,24 @@ typedef unsigned __int16 uint16_t; typedef unsigned __int32 uint32_t; typedef unsigned __int64 uint64_t; + typedef __int8 int_least8_t; + typedef __int16 int_least16_t; + typedef __int32 int_least32_t; + typedef __int64 int_least64_t; + typedef unsigned __int8 uint_least8_t; + typedef unsigned __int16 uint_least16_t; + typedef unsigned __int32 uint_least32_t; + typedef unsigned __int64 uint_least64_t; + typedef __int8 int_fast8_t; + typedef __int16 int_fast16_t; + typedef __int32 int_fast32_t; + typedef __int64 int_fast64_t; + typedef unsigned __int8 uint_fast8_t; + typedef unsigned __int16 uint_fast16_t; + typedef unsigned __int32 uint_fast32_t; + typedef unsigned __int64 uint_fast64_t; + typedef __int64 intmax_t; + typedef unsigned __int64 uintmax_t; # else # include # endif @@ -828,12 +860,15 @@ PyLong_FromLongLong((long long)(x))) #define _cffi_from_c_int(x, type) \ - (((type)-1) > 0 ? /* unsigned */ \ - (sizeof(type) < sizeof(long) ? PyInt_FromLong(x) : \ - sizeof(type) == sizeof(long) ? PyLong_FromUnsignedLong(x) : \ - PyLong_FromUnsignedLongLong(x)) \ - : (sizeof(type) <= sizeof(long) ? PyInt_FromLong(x) : \ - PyLong_FromLongLong(x))) + (((type)-1) > 0 ? /* unsigned */ \ + (sizeof(type) < sizeof(long) ? \ + PyInt_FromLong((long)x) : \ + sizeof(type) == sizeof(long) ? \ + PyLong_FromUnsignedLong((unsigned long)x) : \ + PyLong_FromUnsignedLongLong((unsigned long long)x)) : \ + (sizeof(type) <= sizeof(long) ? \ + PyInt_FromLong((long)x) : \ + PyLong_FromLongLong((long long)x))) #define _cffi_to_c_int(o, type) \ (sizeof(type) == 1 ? (((type)-1) > 0 ? (type)_cffi_to_c_u8(o) \ @@ -844,7 +879,7 @@ : (type)_cffi_to_c_i32(o)) : \ sizeof(type) == 8 ? (((type)-1) > 0 ? (type)_cffi_to_c_u64(o) \ : (type)_cffi_to_c_i64(o)) : \ - (Py_FatalError("unsupported size for type " #type), 0)) + (Py_FatalError("unsupported size for type " #type), (type)0)) #define _cffi_to_c_i8 \ ((int(*)(PyObject *))_cffi_exports[1]) @@ -907,6 +942,7 @@ { PyObject *library; int was_alive = (_cffi_types != NULL); + (void)self; /* unused */ if (!PyArg_ParseTuple(args, "OOO", &_cffi_types, &_cffi_VerificationError, &library)) return NULL; diff --git a/lib_pypy/cffi/vengine_gen.py b/lib_pypy/cffi/vengine_gen.py --- a/lib_pypy/cffi/vengine_gen.py +++ b/lib_pypy/cffi/vengine_gen.py @@ -58,12 +58,12 @@ modname = self.verifier.get_module_name() prnt("void %s%s(void) { }\n" % (prefix, modname)) - def load_library(self): + def load_library(self, flags=0): # import it with the CFFI backend backend = self.ffi._backend # needs to make a path that contains '/', on Posix filename = os.path.join(os.curdir, self.verifier.modulefilename) - module = backend.load_library(filename) + module = backend.load_library(filename, flags) # # call loading_gen_struct() to get the struct layout inferred by # the C compiler @@ -235,6 +235,7 @@ prnt('static void %s(%s *p)' % (checkfuncname, cname)) prnt('{') prnt(' /* only to generate compile-time warnings or errors */') + prnt(' (void)p;') for fname, ftype, fbitsize in tp.enumfields(): if (isinstance(ftype, model.PrimitiveType) and ftype.is_integer_type()) or fbitsize >= 0: @@ -427,14 +428,14 @@ prnt('int %s(char *out_error)' % funcname) prnt('{') for enumerator, enumvalue in zip(tp.enumerators, tp.enumvalues): - if enumvalue < 0: - prnt(' if ((%s) >= 0 || (long)(%s) != %dL) {' % ( + if enumvalue <= 0: + prnt(' if ((%s) > 0 || (long)(%s) != %dL) {' % ( enumerator, enumerator, enumvalue)) else: - prnt(' if ((%s) < 0 || (unsigned long)(%s) != %dUL) {' % ( + prnt(' if ((%s) <= 0 || (unsigned long)(%s) != %dUL) {' % ( enumerator, enumerator, enumvalue)) prnt(' char buf[64];') - prnt(' if ((%s) < 0)' % enumerator) + prnt(' if ((%s) <= 0)' % enumerator) prnt(' sprintf(buf, "%%ld", (long)(%s));' % enumerator) prnt(' else') prnt(' sprintf(buf, "%%lu", (unsigned long)(%s));' % @@ -565,6 +566,24 @@ typedef unsigned __int16 uint16_t; typedef unsigned __int32 uint32_t; typedef unsigned __int64 uint64_t; + typedef __int8 int_least8_t; + typedef __int16 int_least16_t; + typedef __int32 int_least32_t; + typedef __int64 int_least64_t; + typedef unsigned __int8 uint_least8_t; + typedef unsigned __int16 uint_least16_t; + typedef unsigned __int32 uint_least32_t; + typedef unsigned __int64 uint_least64_t; + typedef __int8 int_fast8_t; + typedef __int16 int_fast16_t; + typedef __int32 int_fast32_t; + typedef __int64 int_fast64_t; + typedef unsigned __int8 uint_fast8_t; + typedef unsigned __int16 uint_fast16_t; + typedef unsigned __int32 uint_fast32_t; + typedef unsigned __int64 uint_fast64_t; + typedef __int64 intmax_t; + typedef unsigned __int64 uintmax_t; # else # include # endif diff --git a/lib_pypy/cffi/verifier.py b/lib_pypy/cffi/verifier.py --- a/lib_pypy/cffi/verifier.py +++ b/lib_pypy/cffi/verifier.py @@ -1,12 +1,23 @@ -import sys, os, binascii, imp, shutil -from . import __version__ +import sys, os, binascii, shutil +from . import __version_verifier_modules__ from . import ffiplatform +if sys.version_info >= (3, 3): + import importlib.machinery + def _extension_suffixes(): + return importlib.machinery.EXTENSION_SUFFIXES[:] +else: + import imp + def _extension_suffixes(): + return [suffix for suffix, _, type in imp.get_suffixes() + if type == imp.C_EXTENSION] + class Verifier(object): def __init__(self, ffi, preamble, tmpdir=None, modulename=None, - ext_package=None, tag='', force_generic_engine=False, **kwds): + ext_package=None, tag='', force_generic_engine=False, + source_extension='.c', flags=None, relative_to=None, **kwds): self.ffi = ffi self.preamble = preamble if not modulename: @@ -14,14 +25,15 @@ vengine_class = _locate_engine_class(ffi, force_generic_engine) self._vengine = vengine_class(self) self._vengine.patch_extension_kwds(kwds) - self.kwds = kwds + self.flags = flags + self.kwds = self.make_relative_to(kwds, relative_to) # if modulename: if tag: raise TypeError("can't specify both 'modulename' and 'tag'") else: - key = '\x00'.join([sys.version[:3], __version__, preamble, - flattened_kwds] + + key = '\x00'.join([sys.version[:3], __version_verifier_modules__, + preamble, flattened_kwds] + ffi._cdefsources) if sys.version_info >= (3,): key = key.encode('utf-8') @@ -33,7 +45,7 @@ k1, k2) suffix = _get_so_suffixes()[0] self.tmpdir = tmpdir or _caller_dir_pycache() - self.sourcefilename = os.path.join(self.tmpdir, modulename + '.c') + self.sourcefilename = os.path.join(self.tmpdir, modulename + source_extension) self.modulefilename = os.path.join(self.tmpdir, modulename + suffix) self.ext_package = ext_package self._has_source = False @@ -97,6 +109,20 @@ def generates_python_module(self): return self._vengine._gen_python_module + def make_relative_to(self, kwds, relative_to): + if relative_to and os.path.dirname(relative_to): + dirname = os.path.dirname(relative_to) + kwds = kwds.copy() + for key in ffiplatform.LIST_OF_FILE_NAMES: + if key in kwds: + lst = kwds[key] + if not isinstance(lst, (list, tuple)): + raise TypeError("keyword '%s' should be a list or tuple" + % (key,)) + lst = [os.path.join(dirname, fn) for fn in lst] + kwds[key] = lst + return kwds + # ---------- def _locate_module(self): @@ -148,7 +174,10 @@ def _load_library(self): assert self._has_module - return self._vengine.load_library() + if self.flags is not None: + return self._vengine.load_library(self.flags) + else: + return self._vengine.load_library() # ____________________________________________________________ @@ -181,6 +210,9 @@ def _caller_dir_pycache(): if _TMPDIR: return _TMPDIR + result = os.environ.get('CFFI_TMPDIR') + if result: + return result filename = sys._getframe(2).f_code.co_filename return os.path.abspath(os.path.join(os.path.dirname(filename), '__pycache__')) @@ -222,11 +254,7 @@ pass def _get_so_suffixes(): - suffixes = [] - for suffix, mode, type in imp.get_suffixes(): - if type == imp.C_EXTENSION: - suffixes.append(suffix) - + suffixes = _extension_suffixes() if not suffixes: # bah, no C_EXTENSION available. Occurs on pypy without cpyext if sys.platform == 'win32': diff --git a/pypy/module/test_lib_pypy/cffi_tests/backend_tests.py b/pypy/module/test_lib_pypy/cffi_tests/backend_tests.py --- a/pypy/module/test_lib_pypy/cffi_tests/backend_tests.py +++ b/pypy/module/test_lib_pypy/cffi_tests/backend_tests.py @@ -2,7 +2,7 @@ import py import platform import sys, ctypes -from cffi import FFI, CDefError +from cffi import FFI, CDefError, FFIError from pypy.module.test_lib_pypy.cffi_tests.support import * SIZE_OF_INT = ctypes.sizeof(ctypes.c_int) @@ -917,6 +917,16 @@ assert int(invalid_value) == 2 assert ffi.string(invalid_value) == "2" + def test_enum_char_hex_oct(self): + ffi = FFI(backend=self.Backend()) + ffi.cdef(r"enum foo{A='!', B='\'', C=0x10, D=010, E=- 0x10, F=-010};") + assert ffi.string(ffi.cast("enum foo", ord('!'))) == "A" + assert ffi.string(ffi.cast("enum foo", ord("'"))) == "B" + assert ffi.string(ffi.cast("enum foo", 16)) == "C" + assert ffi.string(ffi.cast("enum foo", 8)) == "D" + assert ffi.string(ffi.cast("enum foo", -16)) == "E" + assert ffi.string(ffi.cast("enum foo", -8)) == "F" + def test_array_of_struct(self): ffi = FFI(backend=self.Backend()) ffi.cdef("struct foo { int a, b; };") @@ -950,6 +960,15 @@ assert ffi.offsetof("struct foo", "b") == 4 assert ffi.offsetof("struct foo", "c") == 8 + def test_offsetof_nested(self): + ffi = FFI(backend=self.Backend()) + ffi.cdef("struct foo { int a, b, c; };" + "struct bar { struct foo d, e; };") + assert ffi.offsetof("struct bar", "e") == 12 + assert ffi.offsetof("struct bar", "e.a") == 12 + assert ffi.offsetof("struct bar", "e.b") == 16 + assert ffi.offsetof("struct bar", "e.c") == 20 + def test_alignof(self): ffi = FFI(backend=self.Backend()) ffi.cdef("struct foo { char a; short b; char c; };") @@ -1496,6 +1515,16 @@ assert a == ffi.addressof(p, 'y') assert a != ffi.addressof(p, 'x') + def test_addressof_field_nested(self): + ffi = FFI(backend=self.Backend()) + ffi.cdef("struct foo_s { int x, y; };" + "struct bar_s { struct foo_s a, b; };") + p = ffi.new("struct bar_s *") + a = ffi.addressof(p[0], 'b.y') + assert int(ffi.cast("uintptr_t", a)) == ( + int(ffi.cast("uintptr_t", p)) + + ffi.sizeof("struct foo_s") + ffi.sizeof("int")) + def test_addressof_anonymous_struct(self): ffi = FFI() ffi.cdef("typedef struct { int x; } foo_t;") @@ -1565,6 +1594,12 @@ p = ffi2.new("foo_p", [142]) assert p.x == 142 + def test_ignore_multiple_declarations_of_constant(self): + ffi = FFI(backend=self.Backend()) + ffi.cdef("#define FOO 42") + ffi.cdef("#define FOO 42") + py.test.raises(FFIError, ffi.cdef, "#define FOO 43") + def test_struct_packed(self): ffi = FFI(backend=self.Backend()) ffi.cdef("struct nonpacked { char a; int b; };") diff --git a/pypy/module/test_lib_pypy/cffi_tests/test_cdata.py b/pypy/module/test_lib_pypy/cffi_tests/test_cdata.py --- a/pypy/module/test_lib_pypy/cffi_tests/test_cdata.py +++ b/pypy/module/test_lib_pypy/cffi_tests/test_cdata.py @@ -20,6 +20,8 @@ return FakeType("void") def new_pointer_type(self, x): return FakeType('ptr-to-%r' % (x,)) + def new_array_type(self, x, y): + return FakeType('array-from-%r-len-%r' % (x, y)) def cast(self, x, y): return 'casted!' def _get_types(self): diff --git a/pypy/module/test_lib_pypy/cffi_tests/test_ffi_backend.py b/pypy/module/test_lib_pypy/cffi_tests/test_ffi_backend.py --- a/pypy/module/test_lib_pypy/cffi_tests/test_ffi_backend.py +++ b/pypy/module/test_lib_pypy/cffi_tests/test_ffi_backend.py @@ -20,8 +20,8 @@ ffi.cdef("struct foo_s { int a,b,c,d,e; int x:1; };") e = py.test.raises(NotImplementedError, ffi.callback, "struct foo_s foo(void)", lambda: 42) - assert str(e.value) == (": " - "cannot pass as argument or return value a struct with bit fields") + assert str(e.value) == ("struct foo_s(*)(): " + "callback with unsupported argument or return type or with '...'") def test_inspecttype(self): ffi = FFI(backend=self.Backend()) @@ -123,7 +123,7 @@ self.check("int a:2; short b:15; char c:2; char y;", 5, 4, 8) self.check("int a:2; char b:1; char c:1; char y;", 1, 4, 4) - @pytest.mark.skipif("platform.machine().startswith('arm')") + @pytest.mark.skipif("platform.machine().startswith(('arm', 'aarch64'))") def test_bitfield_anonymous_no_align(self): L = FFI().alignof("long long") self.check("char y; int :1;", 0, 1, 2) @@ -136,7 +136,8 @@ self.check("char x; long long z:57; char y;", L + 8, L, L + 8 + L) self.check("char x; long long :57; char y;", L + 8, 1, L + 9) - @pytest.mark.skipif("not platform.machine().startswith('arm')") + @pytest.mark.skipif( + "not platform.machine().startswith(('arm', 'aarch64'))") def test_bitfield_anonymous_align_arm(self): L = FFI().alignof("long long") self.check("char y; int :1;", 0, 4, 4) @@ -149,7 +150,7 @@ self.check("char x; long long z:57; char y;", L + 8, L, L + 8 + L) self.check("char x; long long :57; char y;", L + 8, L, L + 8 + L) - @pytest.mark.skipif("platform.machine().startswith('arm')") + @pytest.mark.skipif("platform.machine().startswith(('arm', 'aarch64'))") def test_bitfield_zero(self): L = FFI().alignof("long long") self.check("char y; int :0;", 0, 1, 4) @@ -160,7 +161,8 @@ self.check("char x; int :0; short b:1; char y;", 5, 2, 6) self.check("int a:1; int :0; int b:1; char y;", 5, 4, 8) - @pytest.mark.skipif("not platform.machine().startswith('arm')") + @pytest.mark.skipif( + "not platform.machine().startswith(('arm', 'aarch64'))") def test_bitfield_zero_arm(self): L = FFI().alignof("long long") self.check("char y; int :0;", 0, 4, 4) @@ -212,3 +214,12 @@ code, message = ffi.getwinerror(-1) assert code == 2 assert message == "The system cannot find the file specified" + + def test_from_buffer(self): + import array + ffi = FFI() + a = array.array('H', [10000, 20000, 30000]) + c = ffi.from_buffer(a) + assert ffi.typeof(c) is ffi.typeof("char[]") + ffi.cast("unsigned short *", c)[1] += 500 + assert list(a) == [10000, 20500, 30000] diff --git a/pypy/module/test_lib_pypy/cffi_tests/test_parsing.py b/pypy/module/test_lib_pypy/cffi_tests/test_parsing.py --- a/pypy/module/test_lib_pypy/cffi_tests/test_parsing.py +++ b/pypy/module/test_lib_pypy/cffi_tests/test_parsing.py @@ -289,3 +289,14 @@ decl = ast.children()[0][1] node = decl.type assert p._is_constant_globalvar(node) == expected_output + +def test_enum(): + ffi = FFI() + ffi.cdef(""" + enum Enum { POS = +1, TWO = 2, NIL = 0, NEG = -1}; + """) + C = ffi.dlopen(None) + assert C.POS == 1 + assert C.TWO == 2 + assert C.NIL == 0 + assert C.NEG == -1 diff --git a/pypy/module/test_lib_pypy/cffi_tests/test_verify.py b/pypy/module/test_lib_pypy/cffi_tests/test_verify.py --- a/pypy/module/test_lib_pypy/cffi_tests/test_verify.py +++ b/pypy/module/test_lib_pypy/cffi_tests/test_verify.py @@ -1,7 +1,7 @@ # Generated by pypy/tool/import_cffi.py import py, re import sys, os, math, weakref -from cffi import FFI, VerificationError, VerificationMissing, model +from cffi import FFI, VerificationError, VerificationMissing, model, FFIError from pypy.module.test_lib_pypy.cffi_tests.support import * @@ -15,12 +15,13 @@ else: if (sys.platform == 'darwin' and [int(x) for x in os.uname()[2].split('.')] >= [11, 0, 0]): + # assume a standard clang or gcc + extra_compile_args = ['-Werror', '-Wall', '-Wextra', '-Wconversion'] # special things for clang - extra_compile_args = [ - '-Werror', '-Qunused-arguments', '-Wno-error=shorten-64-to-32'] + extra_compile_args.append('-Qunused-arguments') else: # assume a standard gcc - extra_compile_args = ['-Werror'] + extra_compile_args = ['-Werror', '-Wall', '-Wextra', '-Wconversion'] class FFI(FFI): def verify(self, *args, **kwds): @@ -90,11 +91,48 @@ lib = ffi.verify('#include ', libraries=lib_m) assert lib.sin(1.23) == math.sin(1.23) +def _Wconversion(cdef, source, **kargs): + if sys.platform == 'win32': + py.test.skip("needs GCC or Clang") + ffi = FFI() + ffi.cdef(cdef) + py.test.raises(VerificationError, ffi.verify, source, **kargs) + extra_compile_args_orig = extra_compile_args[:] + extra_compile_args.remove('-Wconversion') + try: + lib = ffi.verify(source, **kargs) + finally: + extra_compile_args[:] = extra_compile_args_orig + return lib + +def test_Wconversion_unsigned(): + _Wconversion("unsigned foo(void);", + "int foo(void) { return -1;}") + +def test_Wconversion_integer(): + _Wconversion("short foo(void);", + "long long foo(void) { return 1<", libraries=lib_m) + res = lib.sin(1.23) + assert res != math.sin(1.23) # not exact, because of double->float + assert abs(res - math.sin(1.23)) < 1E-5 + +def test_Wconversion_float2int(): + _Wconversion("int sinf(float);", + "#include ", libraries=lib_m) + +def test_Wconversion_double2int(): + _Wconversion("int sin(double);", + "#include ", libraries=lib_m) + def test_rounding_1(): ffi = FFI() - ffi.cdef("float sin(double x);") + ffi.cdef("double sinf(float x);") lib = ffi.verify('#include ', libraries=lib_m) - res = lib.sin(1.23) + res = lib.sinf(1.23) assert res != math.sin(1.23) # not exact, because of double->float assert abs(res - math.sin(1.23)) < 1E-5 @@ -113,14 +151,21 @@ assert lib.strlen(b"hi there!") == 9 def test_strlen_approximate(): - ffi = FFI() - ffi.cdef("int strlen(char *s);") - lib = ffi.verify("#include ") + lib = _Wconversion("int strlen(char *s);", + "#include ") assert lib.strlen(b"hi there!") == 9 +def test_return_approximate(): + for typename in ['short', 'int', 'long', 'long long']: + ffi = FFI() + ffi.cdef("%s foo(signed char x);" % typename) + lib = ffi.verify("signed char foo(signed char x) { return x;}") + assert lib.foo(-128) == -128 + assert lib.foo(+127) == +127 + def test_strlen_array_of_char(): ffi = FFI() - ffi.cdef("int strlen(char[]);") + ffi.cdef("size_t strlen(char[]);") lib = ffi.verify("#include ") assert lib.strlen(b"hello") == 5 @@ -209,8 +254,8 @@ ffi = FFI() ffi.cdef('\n'.join(["%s foo_%s(%s);" % (tp, tp.replace(' ', '_'), tp) for tp in typenames])) - lib = ffi.verify('\n'.join(["%s foo_%s(%s x) { return x+1; }" % - (tp, tp.replace(' ', '_'), tp) + lib = ffi.verify('\n'.join(["%s foo_%s(%s x) { return (%s)(x+1); }" % + (tp, tp.replace(' ', '_'), tp, tp) for tp in typenames])) for typename in typenames: foo = getattr(lib, 'foo_%s' % typename.replace(' ', '_')) @@ -316,7 +361,7 @@ def test_char_type(): ffi = FFI() ffi.cdef("char foo(char);") - lib = ffi.verify("char foo(char x) { return x+1; }") + lib = ffi.verify("char foo(char x) { return ++x; }") assert lib.foo(b"A") == b"B" py.test.raises(TypeError, lib.foo, b"bar") py.test.raises(TypeError, lib.foo, "bar") @@ -386,7 +431,7 @@ ffi = FFI() ffi.cdef("typedef struct foo_s foo_t; int bar(foo_t *);") lib = ffi.verify("typedef struct foo_s foo_t;\n" - "int bar(foo_t *f) { return 42; }\n") + "int bar(foo_t *f) { (void)f; return 42; }\n") assert lib.bar(ffi.NULL) == 42 def test_ffi_full_struct(): @@ -897,7 +942,7 @@ static int foo(token_t *tk) { if (!tk) return -42; - *tk += 1.601; + *tk += 1.601f; return (int)*tk; } #define TOKEN_SIZE sizeof(token_t) @@ -992,7 +1037,7 @@ long a; }; int foo(struct foo_s s) { - return s.a - (int)s.b; + return (int)s.a - (int)s.b; } """) s = ffi.new("struct foo_s *", [100, 1]) @@ -1009,7 +1054,7 @@ long a; }; int foo1(struct foo_s s) { - return s.a - (int)s.b; + return (int)s.a - (int)s.b; } int (*foo)(struct foo_s s) = &foo1; """) @@ -1068,7 +1113,7 @@ def test_array_as_argument(): ffi = FFI() ffi.cdef(""" - int strlen(char string[]); + size_t strlen(char string[]); """) ffi.verify("#include ") @@ -1080,7 +1125,7 @@ """) lib = ffi.verify(""" enum foo_e { AA, CC, BB }; - int foo_func(enum foo_e e) { return e; } + int foo_func(enum foo_e e) { return (int)e; } """) assert lib.foo_func(lib.BB) == 2 py.test.raises(TypeError, lib.foo_func, "BB") @@ -1093,7 +1138,7 @@ """) lib = ffi.verify(""" enum foo_e { AA, CC, BB }; - enum foo_e foo_func(int x) { return x; } + enum foo_e foo_func(int x) { return (enum foo_e)x; } """) assert lib.foo_func(lib.BB) == lib.BB == 2 @@ -1128,6 +1173,19 @@ assert lib.AA == 0 assert lib.BB == 2 +def test_typedef_enum_as_argument(): + ffi = FFI() + ffi.cdef(""" + typedef enum { AA, BB, ... } foo_t; + int foo_func(foo_t); + """) + lib = ffi.verify(""" + typedef enum { AA, CC, BB } foo_t; + int foo_func(foo_t e) { return (int)e; } + """) + assert lib.foo_func(lib.BB) == lib.BB == 2 + py.test.raises(TypeError, lib.foo_func, "BB") + def test_typedef_enum_as_function_result(): ffi = FFI() ffi.cdef(""" @@ -1136,7 +1194,7 @@ """) lib = ffi.verify(""" typedef enum { AA, CC, BB } foo_t; - foo_t foo_func(int x) { return x; } + foo_t foo_func(int x) { return (foo_t)x; } """) assert lib.foo_func(lib.BB) == lib.BB == 2 @@ -1292,7 +1350,7 @@ """) def test_tmpdir(): - import tempfile, os, shutil + import tempfile, os from pypy.module.test_lib_pypy.cffi_tests.udir import udir tmpdir = tempfile.mkdtemp(dir=str(udir)) ffi = FFI() @@ -1301,6 +1359,20 @@ assert os.listdir(tmpdir) assert lib.foo(100) == 142 +def test_relative_to(): + import tempfile, os + from pypy.module.test_lib_pypy.cffi_tests.udir import udir + tmpdir = tempfile.mkdtemp(dir=str(udir)) + ffi = FFI() + ffi.cdef("int foo(int);") + f = open(os.path.join(tmpdir, 'foo.h'), 'w') + print >> f, "int foo(int a) { return a + 42; }" + f.close() + lib = ffi.verify('#include "foo.h"', + include_dirs=['.'], + relative_to=os.path.join(tmpdir, 'x')) + assert lib.foo(100) == 142 + def test_bug1(): ffi = FFI() ffi.cdef(""" @@ -1677,7 +1749,7 @@ static int c_callback(int how_many, ...) { va_list ap; /* collect the "..." arguments into the values[] array */ - int i, *values = alloca(how_many * sizeof(int)); + int i, *values = alloca((size_t)how_many * sizeof(int)); va_start(ap, how_many); for (i=0; i + """, libraries=['Kernel32']) + outbuf = ffi.new("TCHAR[]", 200) + n = lib.GetModuleFileName(ffi.NULL, outbuf, 500) + assert 0 < n < 500 + for i in range(n): + print repr(outbuf[i]) + assert ord(outbuf[i]) != 0 + assert ord(outbuf[n]) == 0 + assert ord(outbuf[0]) < 128 # should be a letter, or '\' + +def test_use_local_dir(): + ffi = FFI() + lib = ffi.verify("", modulename="test_use_local_dir") + this_dir = os.path.dirname(__file__) + pycache_files = os.listdir(os.path.join(this_dir, '__pycache__')) + assert any('test_use_local_dir' in s for s in pycache_files) diff --git a/pypy/module/test_lib_pypy/cffi_tests/test_zdistutils.py b/pypy/module/test_lib_pypy/cffi_tests/test_zdistutils.py --- a/pypy/module/test_lib_pypy/cffi_tests/test_zdistutils.py +++ b/pypy/module/test_lib_pypy/cffi_tests/test_zdistutils.py @@ -18,7 +18,7 @@ def teardown_class(self): if udir.isdir(): - udir.remove() + udir.remove(ignore_errors=True) def test_locate_engine_class(self): cls = _locate_engine_class(FFI(), self.generic) diff --git a/pypy/module/test_lib_pypy/cffi_tests/test_zintegration.py b/pypy/module/test_lib_pypy/cffi_tests/test_zintegration.py --- a/pypy/module/test_lib_pypy/cffi_tests/test_zintegration.py +++ b/pypy/module/test_lib_pypy/cffi_tests/test_zintegration.py @@ -76,7 +76,7 @@ class TestZIntegration(object): def teardown_class(self): if udir.isdir(): - udir.remove() + udir.remove(ignore_errors=True) def test_infrastructure(self): run_setup_and_program('infrastructure', ''' From noreply at buildbot.pypy.org Fri Jan 9 11:57:42 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 9 Jan 2015 11:57:42 +0100 (CET) Subject: [pypy-commit] cffi default: improve the error message to match pypy's Message-ID: <20150109105742.039871C1148@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1626:c5beb414dbb8 Date: 2015-01-09 11:56 +0100 http://bitbucket.org/cffi/cffi/changeset/c5beb414dbb8/ Log: improve the error message to match pypy's diff --git a/c/_cffi_backend.c b/c/_cffi_backend.c --- a/c/_cffi_backend.c +++ b/c/_cffi_backend.c @@ -4153,9 +4153,10 @@ return ffistruct; } else { + const char *place = is_result_type ? "return value" : "argument"; PyErr_Format(PyExc_NotImplementedError, - "ctype '%s' not supported as argument or return value", - ct->ct_name); + "ctype '%s' (size %zd) not supported as %s", + ct->ct_name, ct->ct_size, place); return NULL; } } diff --git a/testing/test_verify.py b/testing/test_verify.py --- a/testing/test_verify.py +++ b/testing/test_verify.py @@ -2077,13 +2077,18 @@ ffi.cdef("typedef union { int a; float b; } Data;" "typedef struct { int a:2; } MyStr;" "typedef void (*foofunc_t)(Data);" + "typedef Data (*bazfunc_t)(void);" "typedef MyStr (*barfunc_t)(void);") fooptr = ffi.cast("foofunc_t", 123) + bazptr = ffi.cast("bazfunc_t", 123) barptr = ffi.cast("barfunc_t", 123) # assert did not crash so far e = py.test.raises(NotImplementedError, fooptr, ffi.new("Data *")) assert str(e.value) == ( - "ctype 'Data' not supported as argument or return value") + "ctype 'Data' (size 4) not supported as argument") + e = py.test.raises(NotImplementedError, bazptr) + assert str(e.value) == ( + "ctype 'Data' (size 4) not supported as return value") e = py.test.raises(NotImplementedError, barptr) assert str(e.value) == ( "ctype 'MyStr' not supported as argument or return value " From noreply at buildbot.pypy.org Fri Jan 9 11:57:43 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 9 Jan 2015 11:57:43 +0100 (CET) Subject: [pypy-commit] cffi default: typo Message-ID: <20150109105743.169D31C1148@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1627:ff0c3c90d309 Date: 2015-01-09 11:58 +0100 http://bitbucket.org/cffi/cffi/changeset/ff0c3c90d309/ Log: typo diff --git a/cffi/commontypes.py b/cffi/commontypes.py --- a/cffi/commontypes.py +++ b/cffi/commontypes.py @@ -243,7 +243,7 @@ "LPTSTR": "set-unicode-needed", "PTSTR": "set-unicode-needed", "PTBYTE": "set-unicode-needed", - "PTCHAR;": "set-unicode-needed", + "PTCHAR": "set-unicode-needed", }) return result From noreply at buildbot.pypy.org Fri Jan 9 11:57:44 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 9 Jan 2015 11:57:44 +0100 (CET) Subject: [pypy-commit] cffi default: fix test Message-ID: <20150109105744.2A5761C1148@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1628:a0e43b0f5743 Date: 2015-01-09 11:58 +0100 http://bitbucket.org/cffi/cffi/changeset/a0e43b0f5743/ Log: fix test diff --git a/testing/test_parsing.py b/testing/test_parsing.py --- a/testing/test_parsing.py +++ b/testing/test_parsing.py @@ -247,7 +247,8 @@ ct = win_common_types(maxsize) clear_all(ct) for key in sorted(ct): - resolve_common_type(key) + if ct[key] != 'set-unicode-needed': + resolve_common_type(key) # assert did not crash # now try to use e.g. WPARAM (-> UINT_PTR -> unsigned 32/64-bit) for maxsize in [2**32-1, 2**64-1]: From noreply at buildbot.pypy.org Fri Jan 9 11:59:53 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 9 Jan 2015 11:59:53 +0100 (CET) Subject: [pypy-commit] pypy default: re-run import_cffi.py Message-ID: <20150109105953.DC9DB1C1148@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r75271:7559d793c6b0 Date: 2015-01-09 11:58 +0100 http://bitbucket.org/pypy/pypy/changeset/7559d793c6b0/ Log: re-run import_cffi.py diff --git a/lib_pypy/cffi/commontypes.py b/lib_pypy/cffi/commontypes.py --- a/lib_pypy/cffi/commontypes.py +++ b/lib_pypy/cffi/commontypes.py @@ -243,7 +243,7 @@ "LPTSTR": "set-unicode-needed", "PTSTR": "set-unicode-needed", "PTBYTE": "set-unicode-needed", - "PTCHAR;": "set-unicode-needed", + "PTCHAR": "set-unicode-needed", }) return result diff --git a/pypy/module/test_lib_pypy/cffi_tests/test_parsing.py b/pypy/module/test_lib_pypy/cffi_tests/test_parsing.py --- a/pypy/module/test_lib_pypy/cffi_tests/test_parsing.py +++ b/pypy/module/test_lib_pypy/cffi_tests/test_parsing.py @@ -248,7 +248,8 @@ ct = win_common_types(maxsize) clear_all(ct) for key in sorted(ct): - resolve_common_type(key) + if ct[key] != 'set-unicode-needed': + resolve_common_type(key) # assert did not crash # now try to use e.g. WPARAM (-> UINT_PTR -> unsigned 32/64-bit) for maxsize in [2**32-1, 2**64-1]: diff --git a/pypy/module/test_lib_pypy/cffi_tests/test_verify.py b/pypy/module/test_lib_pypy/cffi_tests/test_verify.py --- a/pypy/module/test_lib_pypy/cffi_tests/test_verify.py +++ b/pypy/module/test_lib_pypy/cffi_tests/test_verify.py @@ -1221,6 +1221,8 @@ import platform if platform.machine().startswith('sparc'): py.test.skip('Breaks horribly on sparc (SIGILL + corrupted stack)') + elif platform.machine() == 'mips64' and sys.maxsize > 2**32: + py.test.skip('Segfaults on mips64el') # XXX bad abuse of "struct { ...; }". It only works a bit by chance # anyway. XXX think about something better :-( ffi = FFI() @@ -2076,13 +2078,18 @@ ffi.cdef("typedef union { int a; float b; } Data;" "typedef struct { int a:2; } MyStr;" "typedef void (*foofunc_t)(Data);" + "typedef Data (*bazfunc_t)(void);" "typedef MyStr (*barfunc_t)(void);") fooptr = ffi.cast("foofunc_t", 123) + bazptr = ffi.cast("bazfunc_t", 123) barptr = ffi.cast("barfunc_t", 123) # assert did not crash so far e = py.test.raises(NotImplementedError, fooptr, ffi.new("Data *")) assert str(e.value) == ( - "ctype 'Data' not supported as argument or return value") + "ctype 'Data' (size 4) not supported as argument") + e = py.test.raises(NotImplementedError, bazptr) + assert str(e.value) == ( + "ctype 'Data' (size 4) not supported as return value") e = py.test.raises(NotImplementedError, barptr) assert str(e.value) == ( "ctype 'MyStr' not supported as argument or return value " From noreply at buildbot.pypy.org Fri Jan 9 14:06:43 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 9 Jan 2015 14:06:43 +0100 (CET) Subject: [pypy-commit] cffi default: Argh! Obscure bug tracked down to this test, which defines a Message-ID: <20150109130643.C09781D26C4@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1629:1b5d326c49bd Date: 2015-01-09 14:07 +0100 http://bitbucket.org/cffi/cffi/changeset/1b5d326c49bd/ Log: Argh! Obscure bug tracked down to this test, which defines a variable "foo" with RTLD_GLOBAL. See comments. diff --git a/testing/test_verify.py b/testing/test_verify.py --- a/testing/test_verify.py +++ b/testing/test_verify.py @@ -2055,21 +2055,28 @@ assert ffi.getwinerror()[0] == n def test_verify_dlopen_flags(): + # Careful with RTLD_GLOBAL. If by chance the FFI is not deleted + # promptly, like on PyPy, then other tests may see the same + # exported symbols as well. So we must not export a simple name + # like 'foo'! ffi1 = FFI() - ffi1.cdef("int foo;") + ffi1.cdef("int foo_verify_dlopen_flags;") - lib1 = ffi1.verify("int foo;", flags=ffi1.RTLD_GLOBAL | ffi1.RTLD_LAZY) + lib1 = ffi1.verify("int foo_verify_dlopen_flags;", + flags=ffi1.RTLD_GLOBAL | ffi1.RTLD_LAZY) lib2 = get_second_lib() - lib1.foo = 42 - - assert lib2.foo == 42 + lib1.foo_verify_dlopen_flags = 42 + assert lib2.foo_verify_dlopen_flags == 42 + lib2.foo_verify_dlopen_flags += 1 + assert lib1.foo_verify_dlopen_flags == 43 def get_second_lib(): # Hack, using modulename makes the test fail ffi2 = FFI() - ffi2.cdef("int foo;") - lib2 = ffi2.verify("int foo;", flags=ffi2.RTLD_GLOBAL | ffi2.RTLD_LAZY) + ffi2.cdef("int foo_verify_dlopen_flags;") + lib2 = ffi2.verify("int foo_verify_dlopen_flags;", + flags=ffi2.RTLD_GLOBAL | ffi2.RTLD_LAZY) return lib2 def test_consider_not_implemented_function_type(): From noreply at buildbot.pypy.org Fri Jan 9 14:07:46 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 9 Jan 2015 14:07:46 +0100 (CET) Subject: [pypy-commit] pypy default: re-run import_cffi.py Message-ID: <20150109130746.4E5151D26C4@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r75272:0cafb4f648ae Date: 2015-01-09 14:07 +0100 http://bitbucket.org/pypy/pypy/changeset/0cafb4f648ae/ Log: re-run import_cffi.py diff --git a/pypy/module/test_lib_pypy/cffi_tests/test_verify.py b/pypy/module/test_lib_pypy/cffi_tests/test_verify.py --- a/pypy/module/test_lib_pypy/cffi_tests/test_verify.py +++ b/pypy/module/test_lib_pypy/cffi_tests/test_verify.py @@ -2056,21 +2056,28 @@ assert ffi.getwinerror()[0] == n def test_verify_dlopen_flags(): + # Careful with RTLD_GLOBAL. If by chance the FFI is not deleted + # promptly, like on PyPy, then other tests may see the same + # exported symbols as well. So we must not export a simple name + # like 'foo'! ffi1 = FFI() - ffi1.cdef("int foo;") + ffi1.cdef("int foo_verify_dlopen_flags;") - lib1 = ffi1.verify("int foo;", flags=ffi1.RTLD_GLOBAL | ffi1.RTLD_LAZY) + lib1 = ffi1.verify("int foo_verify_dlopen_flags;", + flags=ffi1.RTLD_GLOBAL | ffi1.RTLD_LAZY) lib2 = get_second_lib() - lib1.foo = 42 - - assert lib2.foo == 42 + lib1.foo_verify_dlopen_flags = 42 + assert lib2.foo_verify_dlopen_flags == 42 + lib2.foo_verify_dlopen_flags += 1 + assert lib1.foo_verify_dlopen_flags == 43 def get_second_lib(): # Hack, using modulename makes the test fail ffi2 = FFI() - ffi2.cdef("int foo;") - lib2 = ffi2.verify("int foo;", flags=ffi2.RTLD_GLOBAL | ffi2.RTLD_LAZY) + ffi2.cdef("int foo_verify_dlopen_flags;") + lib2 = ffi2.verify("int foo_verify_dlopen_flags;", + flags=ffi2.RTLD_GLOBAL | ffi2.RTLD_LAZY) return lib2 def test_consider_not_implemented_function_type(): From noreply at buildbot.pypy.org Sat Jan 10 11:48:56 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 10 Jan 2015 11:48:56 +0100 (CET) Subject: [pypy-commit] pypy default: Skip break_cycles(), silencing the test which fails occasionally. There is Message-ID: <20150110104856.228471C1148@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r75273:f278784dd005 Date: 2015-01-10 11:48 +0100 http://bitbucket.org/pypy/pypy/changeset/f278784dd005/ Log: Skip break_cycles(), silencing the test which fails occasionally. There is no point any more, because that function is not used any more. diff --git a/rpython/tool/algo/graphlib.py b/rpython/tool/algo/graphlib.py --- a/rpython/tool/algo/graphlib.py +++ b/rpython/tool/algo/graphlib.py @@ -182,6 +182,8 @@ """Enumerates a reasonably minimal set of edges that must be removed to make the graph acyclic.""" + import py; py.test.skip("break_cycles() is not used any more") + # the approach is as follows: starting from each root, find some set # of cycles using a simple depth-first search. Then break the # edge that is part of the most cycles. Repeat. From noreply at buildbot.pypy.org Sat Jan 10 12:11:02 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 10 Jan 2015 12:11:02 +0100 (CET) Subject: [pypy-commit] pypy default: gcc 4.4 on Linux 32 in debug mode seems to produce some sort of thunk Message-ID: <20150110111102.4B0381C1148@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r75274:f91163d135d7 Date: 2015-01-10 11:11 +0000 http://bitbucket.org/pypy/pypy/changeset/f91163d135d7/ Log: gcc 4.4 on Linux 32 in debug mode seems to produce some sort of thunk function with just one instruction, which is never officially ended. diff --git a/rpython/translator/c/gcc/trackgcroot.py b/rpython/translator/c/gcc/trackgcroot.py --- a/rpython/translator/c/gcc/trackgcroot.py +++ b/rpython/translator/c/gcc/trackgcroot.py @@ -1526,6 +1526,9 @@ yield True, functionlines in_function = False functionlines = [] + if in_function and ".get_pc_thunk.bx" in functionlines[0]: + in_function = False # xxx? ignore this rare unclosed stub at + # the end of the file assert not in_function, ( "missed the end of the previous function") yield False, functionlines From noreply at buildbot.pypy.org Sat Jan 10 13:36:50 2015 From: noreply at buildbot.pypy.org (amauryfa) Date: Sat, 10 Jan 2015 13:36:50 +0100 (CET) Subject: [pypy-commit] pypy kill_ll_termios: A branch to move ll_termios to rlib. Message-ID: <20150110123650.822B41D36E9@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: kill_ll_termios Changeset: r75275:c277a357ccc4 Date: 2015-01-09 23:41 +0100 http://bitbucket.org/pypy/pypy/changeset/c277a357ccc4/ Log: A branch to move ll_termios to rlib. From noreply at buildbot.pypy.org Sat Jan 10 13:36:52 2015 From: noreply at buildbot.pypy.org (amauryfa) Date: Sat, 10 Jan 2015 13:36:52 +0100 (CET) Subject: [pypy-commit] pypy kill_ll_termios: termios.error is not used anymore in RPython Message-ID: <20150110123652.046B21D36E9@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: kill_ll_termios Changeset: r75276:c3cfdcd2fb78 Date: 2015-01-09 23:42 +0100 http://bitbucket.org/pypy/pypy/changeset/c3cfdcd2fb78/ Log: termios.error is not used anymore in RPython diff --git a/rpython/annotator/classdef.py b/rpython/annotator/classdef.py --- a/rpython/annotator/classdef.py +++ b/rpython/annotator/classdef.py @@ -451,11 +451,3 @@ pass else: FORCE_ATTRIBUTES_INTO_CLASSES[WindowsError] = {'winerror': SomeInteger()} - -try: - import termios -except ImportError: - pass -else: - FORCE_ATTRIBUTES_INTO_CLASSES[termios.error] = \ - {'args': SomeTuple([SomeInteger(), SomeString()])} From noreply at buildbot.pypy.org Sat Jan 10 13:36:53 2015 From: noreply at buildbot.pypy.org (amauryfa) Date: Sat, 10 Jan 2015 13:36:53 +0100 (CET) Subject: [pypy-commit] pypy kill_ll_termios: Move tcsetattr from ll_termios to rtermios Message-ID: <20150110123653.5C54A1D36E9@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: kill_ll_termios Changeset: r75277:25c15bf74988 Date: 2015-01-10 00:19 +0100 http://bitbucket.org/pypy/pypy/changeset/25c15bf74988/ Log: Move tcsetattr from ll_termios to rtermios diff --git a/rpython/rlib/rtermios.py b/rpython/rlib/rtermios.py --- a/rpython/rlib/rtermios.py +++ b/rpython/rlib/rtermios.py @@ -3,11 +3,54 @@ # returns list of mostly-strings of length one, but with few ints # inside, so we make sure it works -import termios -from termios import * +from rpython.rtyper.lltypesystem import rffi, lltype +from rpython.rtyper.tool import rffi_platform +from rpython.translator.tool.cbuild import ExternalCompilationInfo + +from rpython.rlib import rposix + +eci = ExternalCompilationInfo( + includes = ['termios.h', 'unistd.h'] +) + +class CConfig: + _compilation_info_ = eci + NCCS = rffi_platform.DefinedConstantInteger('NCCS') + TCSANOW = rffi_platform.ConstantInteger('TCSANOW') + _HAVE_STRUCT_TERMIOS_C_ISPEED = rffi_platform.Defined( + '_HAVE_STRUCT_TERMIOS_C_ISPEED') + _HAVE_STRUCT_TERMIOS_C_OSPEED = rffi_platform.Defined( + '_HAVE_STRUCT_TERMIOS_C_OSPEED') + +c_config = rffi_platform.configure(CConfig) +NCCS = c_config['NCCS'] +TCSANOW = c_config['TCSANOW'] + +TCFLAG_T = rffi.UINT +CC_T = rffi.UCHAR +SPEED_T = rffi.UINT + +_add = [] +if c_config['_HAVE_STRUCT_TERMIOS_C_ISPEED']: + _add.append(('c_ispeed', SPEED_T)) +if c_config['_HAVE_STRUCT_TERMIOS_C_OSPEED']: + _add.append(('c_ospeed', SPEED_T)) +TERMIOSP = rffi.CStructPtr('termios', ('c_iflag', TCFLAG_T), ('c_oflag', TCFLAG_T), + ('c_cflag', TCFLAG_T), ('c_lflag', TCFLAG_T), + ('c_line', CC_T), + ('c_cc', lltype.FixedSizeArray(CC_T, NCCS)), *_add) + +def c_external(name, args, result): + return rffi.llexternal(name, args, result, compilation_info=eci) + +c_tcsetattr = c_external('tcsetattr', [rffi.INT, rffi.INT, TERMIOSP], rffi.INT) +c_cfsetispeed = c_external('cfsetispeed', [TERMIOSP, SPEED_T], rffi.INT) +c_cfsetospeed = c_external('cfsetospeed', [TERMIOSP, SPEED_T], rffi.INT) + def tcgetattr(fd): # NOT_RPYTHON + import termios try: lst = list(termios.tcgetattr(fd)) except termios.error, e: @@ -22,17 +65,23 @@ lst[-1] = next_cc return tuple(lst) -def tcsetattr(fd, when, mode): - # NOT_RPYTHON - # there are some bizarre requirements for that, stealing directly - # from cpython - mode_l = list(mode) - if mode_l[3] & termios.ICANON: - cc = mode_l[-1] - cc[termios.VMIN] = ord(cc[termios.VMIN]) - cc[termios.VTIME] = ord(cc[termios.VTIME]) - mode_l[-1] = cc - try: - return termios.tcsetattr(fd, when, mode_l) - except termios.error, e: - raise OSError(*e.args) + +# This function is not an exact replacement of termios.tcsetattr: +# the last attribute must be a list of chars. +def tcsetattr(fd, when, attributes): + with lltype.scoped_alloc(TERMIOSP.TO) as c_struct: + rffi.setintfield(c_struct, 'c_c_iflag', attributes[0]) + rffi.setintfield(c_struct, 'c_c_oflag', attributes[1]) + rffi.setintfield(c_struct, 'c_c_cflag', attributes[2]) + rffi.setintfield(c_struct, 'c_c_lflag', attributes[3]) + ispeed = attributes[4] + ospeed = attributes[5] + cc = attributes[6] + for i in range(NCCS): + c_struct.c_c_cc[i] = rffi.r_uchar(ord(cc[i][0])) + if c_cfsetispeed(c_struct, ispeed) < 0: + raise OSError(rposix.get_errno(), 'tcsetattr failed') + if c_cfsetospeed(c_struct, ospeed) < 0: + raise OSError(rposix.get_errno(), 'tcsetattr failed') + if c_tcsetattr(fd, when, c_struct) < 0: + raise OSError(rposix.get_errno(), 'tcsetattr failed') diff --git a/rpython/rtyper/module/ll_termios.py b/rpython/rtyper/module/ll_termios.py --- a/rpython/rtyper/module/ll_termios.py +++ b/rpython/rtyper/module/ll_termios.py @@ -50,11 +50,8 @@ def c_external(name, args, result): return rffi.llexternal(name, args, result, compilation_info=eci) -c_tcsetattr = c_external('tcsetattr', [INT, INT, TERMIOSP], INT) c_cfgetispeed = c_external('cfgetispeed', [TERMIOSP], SPEED_T) c_cfgetospeed = c_external('cfgetospeed', [TERMIOSP], SPEED_T) -c_cfsetispeed = c_external('cfsetispeed', [TERMIOSP, SPEED_T], INT) -c_cfsetospeed = c_external('cfsetospeed', [TERMIOSP, SPEED_T], INT) c_tcsendbreak = c_external('tcsendbreak', [INT, INT], INT) c_tcdrain = c_external('tcdrain', [INT], INT) c_tcflush = c_external('tcflush', [INT, INT], INT) @@ -81,32 +78,6 @@ register_external(rtermios.tcgetattr, [int], (int, int, int, int, int, int, [str]), llimpl=tcgetattr_llimpl, export_name='termios.tcgetattr') -def tcsetattr_llimpl(fd, when, attributes): - c_struct = lltype.malloc(TERMIOSP.TO, flavor='raw') - try: - c_struct.c_c_iflag = r_uint(attributes[0]) - c_struct.c_c_oflag = r_uint(attributes[1]) - c_struct.c_c_cflag = r_uint(attributes[2]) - c_struct.c_c_lflag = r_uint(attributes[3]) - ispeed = r_uint(attributes[4]) - ospeed = r_uint(attributes[5]) - cc = attributes[6] - for i in range(NCCS): - c_struct.c_c_cc[i] = rffi.r_uchar(ord(cc[i][0])) - if c_cfsetispeed(c_struct, ispeed) < 0: - raise OSError(rposix.get_errno(), 'tcsetattr failed') - if c_cfsetospeed(c_struct, ospeed) < 0: - raise OSError(rposix.get_errno(), 'tcsetattr failed') - if c_tcsetattr(fd, when, c_struct) < 0: - raise OSError(rposix.get_errno(), 'tcsetattr failed') - finally: - lltype.free(c_struct, flavor='raw') - -r_uint = rffi.r_uint -register_external(rtermios.tcsetattr, [int, int, (int, int, int, - int, int, int, [str])], llimpl=tcsetattr_llimpl, - export_name='termios.tcsetattr') - # a bit C-c C-v code follows... def tcsendbreak_llimpl(fd, duration): From noreply at buildbot.pypy.org Sat Jan 10 13:36:54 2015 From: noreply at buildbot.pypy.org (amauryfa) Date: Sat, 10 Jan 2015 13:36:54 +0100 (CET) Subject: [pypy-commit] pypy kill_ll_termios: Move tcgetattr Message-ID: <20150110123654.B88FC1D36E9@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: kill_ll_termios Changeset: r75278:bb1f713389f1 Date: 2015-01-10 00:24 +0100 http://bitbucket.org/pypy/pypy/changeset/bb1f713389f1/ Log: Move tcgetattr diff --git a/rpython/rlib/rtermios.py b/rpython/rlib/rtermios.py --- a/rpython/rlib/rtermios.py +++ b/rpython/rlib/rtermios.py @@ -8,6 +8,7 @@ from rpython.translator.tool.cbuild import ExternalCompilationInfo from rpython.rlib import rposix +from rpython.rlib.rarithmetic import intmask eci = ExternalCompilationInfo( includes = ['termios.h', 'unistd.h'] @@ -43,27 +44,25 @@ def c_external(name, args, result): return rffi.llexternal(name, args, result, compilation_info=eci) +c_tcgetattr = c_external('tcgetattr', [rffi.INT, TERMIOSP], rffi.INT) c_tcsetattr = c_external('tcsetattr', [rffi.INT, rffi.INT, TERMIOSP], rffi.INT) +c_cfgetispeed = c_external('cfgetispeed', [TERMIOSP], SPEED_T) +c_cfgetospeed = c_external('cfgetospeed', [TERMIOSP], SPEED_T) c_cfsetispeed = c_external('cfsetispeed', [TERMIOSP, SPEED_T], rffi.INT) c_cfsetospeed = c_external('cfsetospeed', [TERMIOSP, SPEED_T], rffi.INT) def tcgetattr(fd): - # NOT_RPYTHON - import termios - try: - lst = list(termios.tcgetattr(fd)) - except termios.error, e: - raise OSError(*e.args) - cc = lst[-1] - next_cc = [] - for c in cc: - if isinstance(c, int): - next_cc.append(chr(c)) - else: - next_cc.append(c) - lst[-1] = next_cc - return tuple(lst) + with lltype.scoped_alloc(TERMIOSP.TO) as c_struct: + if c_tcgetattr(fd, c_struct) < 0: + raise OSError(rposix.get_errno(), 'tcgetattr failed') + cc = [chr(c_struct.c_c_cc[i]) for i in range(NCCS)] + ispeed = c_cfgetispeed(c_struct) + ospeed = c_cfgetospeed(c_struct) + result = (intmask(c_struct.c_c_iflag), intmask(c_struct.c_c_oflag), + intmask(c_struct.c_c_cflag), intmask(c_struct.c_c_lflag), + intmask(ispeed), intmask(ospeed), cc) + return result # This function is not an exact replacement of termios.tcsetattr: diff --git a/rpython/rtyper/module/ll_termios.py b/rpython/rtyper/module/ll_termios.py --- a/rpython/rtyper/module/ll_termios.py +++ b/rpython/rtyper/module/ll_termios.py @@ -50,34 +50,11 @@ def c_external(name, args, result): return rffi.llexternal(name, args, result, compilation_info=eci) -c_cfgetispeed = c_external('cfgetispeed', [TERMIOSP], SPEED_T) -c_cfgetospeed = c_external('cfgetospeed', [TERMIOSP], SPEED_T) c_tcsendbreak = c_external('tcsendbreak', [INT, INT], INT) c_tcdrain = c_external('tcdrain', [INT], INT) c_tcflush = c_external('tcflush', [INT, INT], INT) c_tcflow = c_external('tcflow', [INT, INT], INT) -c_tcgetattr = c_external('tcgetattr', [INT, TERMIOSP], INT) - -def tcgetattr_llimpl(fd): - c_struct = lltype.malloc(TERMIOSP.TO, flavor='raw') - - try: - if c_tcgetattr(fd, c_struct) < 0: - raise OSError(rposix.get_errno(), 'tcgetattr failed') - cc = [chr(c_struct.c_c_cc[i]) for i in range(NCCS)] - ispeed = c_cfgetispeed(c_struct) - ospeed = c_cfgetospeed(c_struct) - result = (intmask(c_struct.c_c_iflag), intmask(c_struct.c_c_oflag), - intmask(c_struct.c_c_cflag), intmask(c_struct.c_c_lflag), - intmask(ispeed), intmask(ospeed), cc) - return result - finally: - lltype.free(c_struct, flavor='raw') - -register_external(rtermios.tcgetattr, [int], (int, int, int, int, int, int, [str]), - llimpl=tcgetattr_llimpl, export_name='termios.tcgetattr') - # a bit C-c C-v code follows... def tcsendbreak_llimpl(fd, duration): From noreply at buildbot.pypy.org Sat Jan 10 13:36:56 2015 From: noreply at buildbot.pypy.org (amauryfa) Date: Sat, 10 Jan 2015 13:36:56 +0100 (CET) Subject: [pypy-commit] pypy kill_ll_termios: Move all other functions. Message-ID: <20150110123656.0A3821D36E9@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: kill_ll_termios Changeset: r75279:42c4131556e2 Date: 2015-01-10 00:33 +0100 http://bitbucket.org/pypy/pypy/changeset/42c4131556e2/ Log: Move all other functions. ll_termios is now empty. diff --git a/rpython/rlib/rtermios.py b/rpython/rlib/rtermios.py --- a/rpython/rlib/rtermios.py +++ b/rpython/rlib/rtermios.py @@ -17,15 +17,23 @@ class CConfig: _compilation_info_ = eci NCCS = rffi_platform.DefinedConstantInteger('NCCS') - TCSANOW = rffi_platform.ConstantInteger('TCSANOW') _HAVE_STRUCT_TERMIOS_C_ISPEED = rffi_platform.Defined( '_HAVE_STRUCT_TERMIOS_C_ISPEED') _HAVE_STRUCT_TERMIOS_C_OSPEED = rffi_platform.Defined( '_HAVE_STRUCT_TERMIOS_C_OSPEED') + TCSANOW = rffi_platform.ConstantInteger('TCSANOW') + TCIOFLUSH = rffi_platform.ConstantInteger('TCIOFLUSH') + TCOON = rffi_platform.ConstantInteger('TCOON') + + + c_config = rffi_platform.configure(CConfig) NCCS = c_config['NCCS'] + TCSANOW = c_config['TCSANOW'] +TCIOFLUSH = c_config['TCIOFLUSH'] +TCOON = c_config['TCOON'] TCFLAG_T = rffi.UINT CC_T = rffi.UCHAR @@ -51,6 +59,11 @@ c_cfsetispeed = c_external('cfsetispeed', [TERMIOSP, SPEED_T], rffi.INT) c_cfsetospeed = c_external('cfsetospeed', [TERMIOSP, SPEED_T], rffi.INT) +c_tcsendbreak = c_external('tcsendbreak', [rffi.INT, rffi.INT], rffi.INT) +c_tcdrain = c_external('tcdrain', [rffi.INT], rffi.INT) +c_tcflush = c_external('tcflush', [rffi.INT, rffi.INT], rffi.INT) +c_tcflow = c_external('tcflow', [rffi.INT, rffi.INT], rffi.INT) + def tcgetattr(fd): with lltype.scoped_alloc(TERMIOSP.TO) as c_struct: @@ -84,3 +97,19 @@ raise OSError(rposix.get_errno(), 'tcsetattr failed') if c_tcsetattr(fd, when, c_struct) < 0: raise OSError(rposix.get_errno(), 'tcsetattr failed') + +def tcsendbreak(fd, duration): + if c_tcsendbreak(fd, duration) < 0: + raise OSError(rposix.get_errno(), 'tcsendbreak failed') + +def tcdrain(fd): + if c_tcdrain(fd) < 0: + raise OSError(rposix.get_errno(), 'tcdrain failed') + +def tcflush(fd, queue_selector): + if c_tcflush(fd, queue_selector) < 0: + raise OSError(rposix.get_errno(), 'tcflush failed') + +def tcflow(fd, action): + if c_tcflow(fd, action) < 0: + raise OSError(rposix.get_errno(), 'tcflow failed') diff --git a/rpython/rtyper/module/ll_termios.py b/rpython/rtyper/module/ll_termios.py --- a/rpython/rtyper/module/ll_termios.py +++ b/rpython/rtyper/module/ll_termios.py @@ -50,34 +50,6 @@ def c_external(name, args, result): return rffi.llexternal(name, args, result, compilation_info=eci) -c_tcsendbreak = c_external('tcsendbreak', [INT, INT], INT) -c_tcdrain = c_external('tcdrain', [INT], INT) -c_tcflush = c_external('tcflush', [INT, INT], INT) -c_tcflow = c_external('tcflow', [INT, INT], INT) # a bit C-c C-v code follows... -def tcsendbreak_llimpl(fd, duration): - if c_tcsendbreak(fd, duration): - raise OSError(rposix.get_errno(), 'tcsendbreak failed') -register_external(termios.tcsendbreak, [int, int], - llimpl=tcsendbreak_llimpl, - export_name='termios.tcsendbreak') - -def tcdrain_llimpl(fd): - if c_tcdrain(fd) < 0: - raise OSError(rposix.get_errno(), 'tcdrain failed') -register_external(termios.tcdrain, [int], llimpl=tcdrain_llimpl, - export_name='termios.tcdrain') - -def tcflush_llimpl(fd, queue_selector): - if c_tcflush(fd, queue_selector) < 0: - raise OSError(rposix.get_errno(), 'tcflush failed') -register_external(termios.tcflush, [int, int], llimpl=tcflush_llimpl, - export_name='termios.tcflush') - -def tcflow_llimpl(fd, action): - if c_tcflow(fd, action) < 0: - raise OSError(rposix.get_errno(), 'tcflow failed') -register_external(termios.tcflow, [int, int], llimpl=tcflow_llimpl, - export_name='termios.tcflow') diff --git a/rpython/rtyper/module/test/test_ll_termios.py b/rpython/rtyper/module/test/test_ll_termios.py --- a/rpython/rtyper/module/test/test_ll_termios.py +++ b/rpython/rtyper/module/test/test_ll_termios.py @@ -78,12 +78,12 @@ def test_tcrest(self): from rpython.translator.c.test.test_genc import compile from rpython.rtyper.module import ll_termios - import termios, time + from rpython.rlib import rtermios def runs_tcall(): - termios.tcsendbreak(2, 0) - termios.tcdrain(2) - termios.tcflush(2, termios.TCIOFLUSH) - termios.tcflow(2, termios.TCOON) + rtermios.tcsendbreak(2, 0) + rtermios.tcdrain(2) + rtermios.tcflush(2, rtermios.TCIOFLUSH) + rtermios.tcflow(2, rtermios.TCOON) print "ok" fn = compile(runs_tcall, [], backendopt=False) From noreply at buildbot.pypy.org Sat Jan 10 13:36:57 2015 From: noreply at buildbot.pypy.org (amauryfa) Date: Sat, 10 Jan 2015 13:36:57 +0100 (CET) Subject: [pypy-commit] pypy kill_ll_termios: Fix translation, and define all constants directly, without using the host Python. Message-ID: <20150110123657.67C201D36E9@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: kill_ll_termios Changeset: r75280:63d0ea0e1a2b Date: 2015-01-10 12:26 +0100 http://bitbucket.org/pypy/pypy/changeset/63d0ea0e1a2b/ Log: Fix translation, and define all constants directly, without using the host Python. diff --git a/pypy/module/termios/__init__.py b/pypy/module/termios/__init__.py --- a/pypy/module/termios/__init__.py +++ b/pypy/module/termios/__init__.py @@ -1,5 +1,7 @@ from pypy.interpreter.mixedmodule import MixedModule +from rpython.rlib import rtermios + class Module(MixedModule): "This module provides an interface to the Posix calls for tty I/O control.\n\ For a complete description of these calls, see the Posix or Unix manual\n\ @@ -23,10 +25,6 @@ 'error' : 'space.fromcache(interp_termios.Cache).w_error', } -# XXX this is extremaly not-portable, but how to prevent this? - -import termios -for i in dir(termios): - val = getattr(termios, i) - if i.isupper() and type(val) is int: - Module.interpleveldefs[i] = "space.wrap(%s)" % val + for name in rtermios.all_constants: + value = getattr(rtermios, name) + interpleveldefs[name] = "space.wrap(%s)" % value diff --git a/pypy/module/termios/interp_termios.py b/pypy/module/termios/interp_termios.py --- a/pypy/module/termios/interp_termios.py +++ b/pypy/module/termios/interp_termios.py @@ -6,7 +6,6 @@ from pypy.interpreter.gateway import unwrap_spec from pypy.interpreter.error import wrap_oserror, OperationError from rpython.rlib import rtermios -import termios class Cache: def __init__(self, space): @@ -52,9 +51,9 @@ l_w = [space.wrap(i) for i in [iflag, oflag, cflag, lflag, ispeed, ospeed]] # last one need to be chosen carefully cc_w = [space.wrap(i) for i in cc] - if lflag & termios.ICANON: - cc_w[termios.VMIN] = space.wrap(ord(cc[termios.VMIN][0])) - cc_w[termios.VTIME] = space.wrap(ord(cc[termios.VTIME][0])) + if lflag & rtermios.ICANON: + cc_w[rtermios.VMIN] = space.wrap(ord(cc[rtermios.VMIN][0])) + cc_w[rtermios.VTIME] = space.wrap(ord(cc[rtermios.VTIME][0])) w_cc = space.newlist(cc_w) l_w.append(w_cc) return space.newlist(l_w) @@ -63,14 +62,14 @@ def tcsendbreak(space, w_fd, duration): fd = space.c_filedescriptor_w(w_fd) try: - termios.tcsendbreak(fd, duration) + rtermios.tcsendbreak(fd, duration) except OSError, e: raise convert_error(space, e) def tcdrain(space, w_fd): fd = space.c_filedescriptor_w(w_fd) try: - termios.tcdrain(fd) + rtermios.tcdrain(fd) except OSError, e: raise convert_error(space, e) @@ -78,7 +77,7 @@ def tcflush(space, w_fd, queue): fd = space.c_filedescriptor_w(w_fd) try: - termios.tcflush(fd, queue) + rtermios.tcflush(fd, queue) except OSError, e: raise convert_error(space, e) @@ -86,6 +85,6 @@ def tcflow(space, w_fd, action): fd = space.c_filedescriptor_w(w_fd) try: - termios.tcflow(fd, action) + rtermios.tcflow(fd, action) except OSError, e: raise convert_error(space, e) diff --git a/pypy/module/termios/test/test_termios.py b/pypy/module/termios/test/test_termios.py --- a/pypy/module/termios/test/test_termios.py +++ b/pypy/module/termios/test/test_termios.py @@ -136,7 +136,7 @@ val = getattr(termios, name) if name.isupper() and type(val) is int: d[name] = val - assert d == self.orig_module_dict + assert sorted(d.items()) == sorted(self.orig_module_dict.items()) def test_error(self): import termios, errno, os diff --git a/rpython/rlib/rtermios.py b/rpython/rlib/rtermios.py --- a/rpython/rlib/rtermios.py +++ b/rpython/rlib/rtermios.py @@ -11,30 +11,95 @@ from rpython.rlib.rarithmetic import intmask eci = ExternalCompilationInfo( - includes = ['termios.h', 'unistd.h'] + includes = ['termios.h', 'unistd.h', 'sys/ioctl.h'] ) class CConfig: _compilation_info_ = eci - NCCS = rffi_platform.DefinedConstantInteger('NCCS') _HAVE_STRUCT_TERMIOS_C_ISPEED = rffi_platform.Defined( '_HAVE_STRUCT_TERMIOS_C_ISPEED') _HAVE_STRUCT_TERMIOS_C_OSPEED = rffi_platform.Defined( '_HAVE_STRUCT_TERMIOS_C_OSPEED') - TCSANOW = rffi_platform.ConstantInteger('TCSANOW') - TCIOFLUSH = rffi_platform.ConstantInteger('TCIOFLUSH') - TCOON = rffi_platform.ConstantInteger('TCOON') - - +CONSTANT_NAMES = ( + # cfgetospeed(), cfsetospeed() constants + """B0 B50 B75 B110 B134 B150 B200 B300 B600 B1200 B1800 B2400 B4800 B9600 + B19200 B38400 B57600 B115200 B230400 B460800 CBAUDEX + """ + # tcsetattr() constants + """TCSANOW TCSADRAIN TCSAFLUSH TCSASOFT + """ + # tcflush() constants + """TCIFLUSH TCOFLUSH TCIOFLUSH + """ + # tcflow() constants + """TCOOFF TCOON TCIOFF TCION + """ + # struct termios.c_iflag constants + """IGNBRK BRKINT IGNPAR PARMRK INPCK ISTRIP INLCR IGNCR ICRNL IUCLC + IXON IXANY IXOFF IMAXBEL + """ + # struct termios.c_oflag constants + """OPOST OLCUC ONLCR OCRNL ONOCR ONLRET OFILL OFDEL + NLDLY CRDLY TABDLY BSDLY VTDLY FFDLY + """ + # struct termios.c_oflag-related values (delay mask) + """NL0 NL1 CR0 CR1 CR2 CR3 TAB0 TAB1 TAB2 TAB3 XTABS + BS0 BS1 VT0 VT1 FF0 FF1 + """ + # struct termios.c_cflag constants + """CSIZE CSTOPB CREAD PARENB PARODD HUPCL CLOCAL CIBAUD CRTSCTS + """ + # struct termios.c_cflag-related values (character size) + """CS5 CS6 CS7 CS8 + """ + # struct termios.c_lflag constants + """ISIG ICANON XCASE ECHO ECHOE ECHOK ECHONL ECHOCTL ECHOPRT ECHOKE + FLUSHO NOFLSH TOSTOP PENDIN IEXTEN + """ + # indexes into the control chars array returned by tcgetattr() + """VINTR VQUIT VERASE VKILL VEOF VTIME VMIN VSWTC VSWTCH VSTART VSTOP + VSUSP VEOL VREPRINT VDISCARD VWERASE VLNEXT VEOL2 + """ + # Others? + """CBAUD CDEL CDSUSP CEOF CEOL CEOL2 CEOT CERASE CESC CFLUSH CINTR CKILL + CLNEXT CNUL COMMON CQUIT CRPRNT CSTART CSTOP CSUSP CSWTCH CWERASE + EXTA EXTB + FIOASYNC FIOCLEX FIONBIO FIONCLEX FIONREAD + IBSHIFT INIT_C_CC IOCSIZE_MASK IOCSIZE_SHIFT + NCC NCCS NSWTCH N_MOUSE N_PPP N_SLIP N_STRIP N_TTY + TCFLSH TCGETA TCGETS TCSBRK TCSBRKP TCSETA TCSETAF TCSETAW TCSETS + TCSETSF TCSETSW TCXONC + TIOCCONS TIOCEXCL TIOCGETD TIOCGICOUNT TIOCGLCKTRMIOS TIOCGPGRP + TIOCGSERIAL TIOCGSOFTCAR TIOCGWINSZ TIOCINQ TIOCLINUX TIOCMBIC + TIOCMBIS TIOCMGET TIOCMIWAIT TIOCMSET TIOCM_CAR TIOCM_CD TIOCM_CTS + TIOCM_DSR TIOCM_DTR TIOCM_LE TIOCM_RI TIOCM_RNG TIOCM_RTS TIOCM_SR + TIOCM_ST TIOCNOTTY TIOCNXCL TIOCOUTQ TIOCPKT TIOCPKT_DATA + TIOCPKT_DOSTOP TIOCPKT_FLUSHREAD TIOCPKT_FLUSHWRITE TIOCPKT_NOSTOP + TIOCPKT_START TIOCPKT_STOP TIOCSCTTY TIOCSERCONFIG TIOCSERGETLSR + TIOCSERGETMULTI TIOCSERGSTRUCT TIOCSERGWILD TIOCSERSETMULTI + TIOCSERSWILD TIOCSER_TEMT TIOCSETD TIOCSLCKTRMIOS TIOCSPGRP + TIOCSSERIAL TIOCSSOFTCAR TIOCSTI TIOCSWINSZ TIOCTTYGSTRUCT + """).split() + +for name in CONSTANT_NAMES: + setattr(CConfig, name, rffi_platform.DefinedConstantInteger(name)) c_config = rffi_platform.configure(CConfig) -NCCS = c_config['NCCS'] -TCSANOW = c_config['TCSANOW'] -TCIOFLUSH = c_config['TCIOFLUSH'] -TCOON = c_config['TCOON'] +# Copy VSWTCH to VSWTC and vice-versa +if c_config['VSWTC'] is None: + c_config['VSWTC'] = c_config['VSWTCH'] +if c_config['VSWTCH'] is None: + c_config['VSWTCH'] = c_config['VSWTC'] +all_constants = {} +for name in CONSTANT_NAMES: + value = c_config[name] + if value is not None: + globals()[name] = value + all_constants[name] = value + TCFLAG_T = rffi.UINT CC_T = rffi.UCHAR SPEED_T = rffi.UINT From noreply at buildbot.pypy.org Sat Jan 10 13:36:58 2015 From: noreply at buildbot.pypy.org (amauryfa) Date: Sat, 10 Jan 2015 13:36:58 +0100 (CET) Subject: [pypy-commit] pypy kill_ll_termios: Finally remove ll_termios.py Message-ID: <20150110123658.A6AC71D36E9@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: kill_ll_termios Changeset: r75281:289505ce5487 Date: 2015-01-10 12:33 +0100 http://bitbucket.org/pypy/pypy/changeset/289505ce5487/ Log: Finally remove ll_termios.py diff --git a/rpython/rtyper/module/test/test_ll_termios.py b/rpython/rlib/test/test_rtermios.py rename from rpython/rtyper/module/test/test_ll_termios.py rename to rpython/rlib/test/test_rtermios.py --- a/rpython/rtyper/module/test/test_ll_termios.py +++ b/rpython/rlib/test/test_rtermios.py @@ -77,7 +77,6 @@ def test_tcrest(self): from rpython.translator.c.test.test_genc import compile - from rpython.rtyper.module import ll_termios from rpython.rlib import rtermios def runs_tcall(): rtermios.tcsendbreak(2, 0) diff --git a/rpython/rtyper/extfuncregistry.py b/rpython/rtyper/extfuncregistry.py --- a/rpython/rtyper/extfuncregistry.py +++ b/rpython/rtyper/extfuncregistry.py @@ -10,12 +10,6 @@ from rpython.rtyper.module import ll_os from rpython.rtyper.module import ll_time from rpython.rlib import rfloat -try: - import termios -except ImportError: - pass -else: - from rpython.rtyper.module import ll_termios # the following functions all take one float, return one float # and are part of math.h diff --git a/rpython/rtyper/module/ll_termios.py b/rpython/rtyper/module/ll_termios.py deleted file mode 100644 --- a/rpython/rtyper/module/ll_termios.py +++ /dev/null @@ -1,55 +0,0 @@ - -""" -The low-level implementation of termios module -note that this module should only be imported when -termios module is there -""" - -import termios -from rpython.rtyper.lltypesystem import rffi -from rpython.rtyper.lltypesystem import lltype -from rpython.rtyper.extfunc import lazy_register, register_external -from rpython.rlib.rarithmetic import intmask -from rpython.rtyper.extregistry import ExtRegistryEntry -from rpython.annotator import model as annmodel -from rpython.rtyper import rclass -from rpython.rlib import rtermios, rposix -from rpython.rtyper.tool import rffi_platform -from rpython.translator.tool.cbuild import ExternalCompilationInfo - -eci = ExternalCompilationInfo( - includes = ['termios.h', 'unistd.h'] -) - -class CConfig: - _compilation_info_ = eci - NCCS = rffi_platform.DefinedConstantInteger('NCCS') - _HAVE_STRUCT_TERMIOS_C_ISPEED = rffi_platform.Defined( - '_HAVE_STRUCT_TERMIOS_C_ISPEED') - _HAVE_STRUCT_TERMIOS_C_OSPEED = rffi_platform.Defined( - '_HAVE_STRUCT_TERMIOS_C_OSPEED') - -c_config = rffi_platform.configure(CConfig) -NCCS = c_config['NCCS'] - -TCFLAG_T = rffi.UINT -CC_T = rffi.UCHAR -SPEED_T = rffi.UINT -INT = rffi.INT - -_add = [] -if c_config['_HAVE_STRUCT_TERMIOS_C_ISPEED']: - _add.append(('c_ispeed', SPEED_T)) -if c_config['_HAVE_STRUCT_TERMIOS_C_OSPEED']: - _add.append(('c_ospeed', SPEED_T)) -TERMIOSP = rffi.CStructPtr('termios', ('c_iflag', TCFLAG_T), ('c_oflag', TCFLAG_T), - ('c_cflag', TCFLAG_T), ('c_lflag', TCFLAG_T), - ('c_line', CC_T), - ('c_cc', lltype.FixedSizeArray(CC_T, NCCS)), *_add) - -def c_external(name, args, result): - return rffi.llexternal(name, args, result, compilation_info=eci) - - -# a bit C-c C-v code follows... - From noreply at buildbot.pypy.org Sat Jan 10 13:36:59 2015 From: noreply at buildbot.pypy.org (amauryfa) Date: Sat, 10 Jan 2015 13:36:59 +0100 (CET) Subject: [pypy-commit] pypy kill_ll_termios: Close branch about to be merged Message-ID: <20150110123659.CB46F1D36E9@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: kill_ll_termios Changeset: r75282:36c47b1ba467 Date: 2015-01-10 13:32 +0100 http://bitbucket.org/pypy/pypy/changeset/36c47b1ba467/ Log: Close branch about to be merged From noreply at buildbot.pypy.org Sat Jan 10 13:37:01 2015 From: noreply at buildbot.pypy.org (amauryfa) Date: Sat, 10 Jan 2015 13:37:01 +0100 (CET) Subject: [pypy-commit] pypy default: Move implementation of termios functions from ll_termios to rtermios.py Message-ID: <20150110123701.06A221D36E9@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: Changeset: r75283:0c1f65b8adad Date: 2015-01-10 13:32 +0100 http://bitbucket.org/pypy/pypy/changeset/0c1f65b8adad/ Log: Move implementation of termios functions from ll_termios to rtermios.py diff --git a/pypy/module/termios/__init__.py b/pypy/module/termios/__init__.py --- a/pypy/module/termios/__init__.py +++ b/pypy/module/termios/__init__.py @@ -1,5 +1,7 @@ from pypy.interpreter.mixedmodule import MixedModule +from rpython.rlib import rtermios + class Module(MixedModule): "This module provides an interface to the Posix calls for tty I/O control.\n\ For a complete description of these calls, see the Posix or Unix manual\n\ @@ -23,10 +25,6 @@ 'error' : 'space.fromcache(interp_termios.Cache).w_error', } -# XXX this is extremaly not-portable, but how to prevent this? - -import termios -for i in dir(termios): - val = getattr(termios, i) - if i.isupper() and type(val) is int: - Module.interpleveldefs[i] = "space.wrap(%s)" % val + for name in rtermios.all_constants: + value = getattr(rtermios, name) + interpleveldefs[name] = "space.wrap(%s)" % value diff --git a/pypy/module/termios/interp_termios.py b/pypy/module/termios/interp_termios.py --- a/pypy/module/termios/interp_termios.py +++ b/pypy/module/termios/interp_termios.py @@ -6,7 +6,6 @@ from pypy.interpreter.gateway import unwrap_spec from pypy.interpreter.error import wrap_oserror, OperationError from rpython.rlib import rtermios -import termios class Cache: def __init__(self, space): @@ -52,9 +51,9 @@ l_w = [space.wrap(i) for i in [iflag, oflag, cflag, lflag, ispeed, ospeed]] # last one need to be chosen carefully cc_w = [space.wrap(i) for i in cc] - if lflag & termios.ICANON: - cc_w[termios.VMIN] = space.wrap(ord(cc[termios.VMIN][0])) - cc_w[termios.VTIME] = space.wrap(ord(cc[termios.VTIME][0])) + if lflag & rtermios.ICANON: + cc_w[rtermios.VMIN] = space.wrap(ord(cc[rtermios.VMIN][0])) + cc_w[rtermios.VTIME] = space.wrap(ord(cc[rtermios.VTIME][0])) w_cc = space.newlist(cc_w) l_w.append(w_cc) return space.newlist(l_w) @@ -63,14 +62,14 @@ def tcsendbreak(space, w_fd, duration): fd = space.c_filedescriptor_w(w_fd) try: - termios.tcsendbreak(fd, duration) + rtermios.tcsendbreak(fd, duration) except OSError, e: raise convert_error(space, e) def tcdrain(space, w_fd): fd = space.c_filedescriptor_w(w_fd) try: - termios.tcdrain(fd) + rtermios.tcdrain(fd) except OSError, e: raise convert_error(space, e) @@ -78,7 +77,7 @@ def tcflush(space, w_fd, queue): fd = space.c_filedescriptor_w(w_fd) try: - termios.tcflush(fd, queue) + rtermios.tcflush(fd, queue) except OSError, e: raise convert_error(space, e) @@ -86,6 +85,6 @@ def tcflow(space, w_fd, action): fd = space.c_filedescriptor_w(w_fd) try: - termios.tcflow(fd, action) + rtermios.tcflow(fd, action) except OSError, e: raise convert_error(space, e) diff --git a/pypy/module/termios/test/test_termios.py b/pypy/module/termios/test/test_termios.py --- a/pypy/module/termios/test/test_termios.py +++ b/pypy/module/termios/test/test_termios.py @@ -136,7 +136,7 @@ val = getattr(termios, name) if name.isupper() and type(val) is int: d[name] = val - assert d == self.orig_module_dict + assert sorted(d.items()) == sorted(self.orig_module_dict.items()) def test_error(self): import termios, errno, os diff --git a/rpython/annotator/classdef.py b/rpython/annotator/classdef.py --- a/rpython/annotator/classdef.py +++ b/rpython/annotator/classdef.py @@ -451,11 +451,3 @@ pass else: FORCE_ATTRIBUTES_INTO_CLASSES[WindowsError] = {'winerror': SomeInteger()} - -try: - import termios -except ImportError: - pass -else: - FORCE_ATTRIBUTES_INTO_CLASSES[termios.error] = \ - {'args': SomeTuple([SomeInteger(), SomeString()])} diff --git a/rpython/rlib/rtermios.py b/rpython/rlib/rtermios.py --- a/rpython/rlib/rtermios.py +++ b/rpython/rlib/rtermios.py @@ -3,36 +3,178 @@ # returns list of mostly-strings of length one, but with few ints # inside, so we make sure it works -import termios -from termios import * +from rpython.rtyper.lltypesystem import rffi, lltype +from rpython.rtyper.tool import rffi_platform +from rpython.translator.tool.cbuild import ExternalCompilationInfo + +from rpython.rlib import rposix +from rpython.rlib.rarithmetic import intmask + +eci = ExternalCompilationInfo( + includes = ['termios.h', 'unistd.h', 'sys/ioctl.h'] +) + +class CConfig: + _compilation_info_ = eci + _HAVE_STRUCT_TERMIOS_C_ISPEED = rffi_platform.Defined( + '_HAVE_STRUCT_TERMIOS_C_ISPEED') + _HAVE_STRUCT_TERMIOS_C_OSPEED = rffi_platform.Defined( + '_HAVE_STRUCT_TERMIOS_C_OSPEED') + +CONSTANT_NAMES = ( + # cfgetospeed(), cfsetospeed() constants + """B0 B50 B75 B110 B134 B150 B200 B300 B600 B1200 B1800 B2400 B4800 B9600 + B19200 B38400 B57600 B115200 B230400 B460800 CBAUDEX + """ + # tcsetattr() constants + """TCSANOW TCSADRAIN TCSAFLUSH TCSASOFT + """ + # tcflush() constants + """TCIFLUSH TCOFLUSH TCIOFLUSH + """ + # tcflow() constants + """TCOOFF TCOON TCIOFF TCION + """ + # struct termios.c_iflag constants + """IGNBRK BRKINT IGNPAR PARMRK INPCK ISTRIP INLCR IGNCR ICRNL IUCLC + IXON IXANY IXOFF IMAXBEL + """ + # struct termios.c_oflag constants + """OPOST OLCUC ONLCR OCRNL ONOCR ONLRET OFILL OFDEL + NLDLY CRDLY TABDLY BSDLY VTDLY FFDLY + """ + # struct termios.c_oflag-related values (delay mask) + """NL0 NL1 CR0 CR1 CR2 CR3 TAB0 TAB1 TAB2 TAB3 XTABS + BS0 BS1 VT0 VT1 FF0 FF1 + """ + # struct termios.c_cflag constants + """CSIZE CSTOPB CREAD PARENB PARODD HUPCL CLOCAL CIBAUD CRTSCTS + """ + # struct termios.c_cflag-related values (character size) + """CS5 CS6 CS7 CS8 + """ + # struct termios.c_lflag constants + """ISIG ICANON XCASE ECHO ECHOE ECHOK ECHONL ECHOCTL ECHOPRT ECHOKE + FLUSHO NOFLSH TOSTOP PENDIN IEXTEN + """ + # indexes into the control chars array returned by tcgetattr() + """VINTR VQUIT VERASE VKILL VEOF VTIME VMIN VSWTC VSWTCH VSTART VSTOP + VSUSP VEOL VREPRINT VDISCARD VWERASE VLNEXT VEOL2 + """ + # Others? + """CBAUD CDEL CDSUSP CEOF CEOL CEOL2 CEOT CERASE CESC CFLUSH CINTR CKILL + CLNEXT CNUL COMMON CQUIT CRPRNT CSTART CSTOP CSUSP CSWTCH CWERASE + EXTA EXTB + FIOASYNC FIOCLEX FIONBIO FIONCLEX FIONREAD + IBSHIFT INIT_C_CC IOCSIZE_MASK IOCSIZE_SHIFT + NCC NCCS NSWTCH N_MOUSE N_PPP N_SLIP N_STRIP N_TTY + TCFLSH TCGETA TCGETS TCSBRK TCSBRKP TCSETA TCSETAF TCSETAW TCSETS + TCSETSF TCSETSW TCXONC + TIOCCONS TIOCEXCL TIOCGETD TIOCGICOUNT TIOCGLCKTRMIOS TIOCGPGRP + TIOCGSERIAL TIOCGSOFTCAR TIOCGWINSZ TIOCINQ TIOCLINUX TIOCMBIC + TIOCMBIS TIOCMGET TIOCMIWAIT TIOCMSET TIOCM_CAR TIOCM_CD TIOCM_CTS + TIOCM_DSR TIOCM_DTR TIOCM_LE TIOCM_RI TIOCM_RNG TIOCM_RTS TIOCM_SR + TIOCM_ST TIOCNOTTY TIOCNXCL TIOCOUTQ TIOCPKT TIOCPKT_DATA + TIOCPKT_DOSTOP TIOCPKT_FLUSHREAD TIOCPKT_FLUSHWRITE TIOCPKT_NOSTOP + TIOCPKT_START TIOCPKT_STOP TIOCSCTTY TIOCSERCONFIG TIOCSERGETLSR + TIOCSERGETMULTI TIOCSERGSTRUCT TIOCSERGWILD TIOCSERSETMULTI + TIOCSERSWILD TIOCSER_TEMT TIOCSETD TIOCSLCKTRMIOS TIOCSPGRP + TIOCSSERIAL TIOCSSOFTCAR TIOCSTI TIOCSWINSZ TIOCTTYGSTRUCT + """).split() + +for name in CONSTANT_NAMES: + setattr(CConfig, name, rffi_platform.DefinedConstantInteger(name)) + +c_config = rffi_platform.configure(CConfig) + +# Copy VSWTCH to VSWTC and vice-versa +if c_config['VSWTC'] is None: + c_config['VSWTC'] = c_config['VSWTCH'] +if c_config['VSWTCH'] is None: + c_config['VSWTCH'] = c_config['VSWTC'] + +all_constants = {} +for name in CONSTANT_NAMES: + value = c_config[name] + if value is not None: + globals()[name] = value + all_constants[name] = value + +TCFLAG_T = rffi.UINT +CC_T = rffi.UCHAR +SPEED_T = rffi.UINT + +_add = [] +if c_config['_HAVE_STRUCT_TERMIOS_C_ISPEED']: + _add.append(('c_ispeed', SPEED_T)) +if c_config['_HAVE_STRUCT_TERMIOS_C_OSPEED']: + _add.append(('c_ospeed', SPEED_T)) +TERMIOSP = rffi.CStructPtr('termios', ('c_iflag', TCFLAG_T), ('c_oflag', TCFLAG_T), + ('c_cflag', TCFLAG_T), ('c_lflag', TCFLAG_T), + ('c_line', CC_T), + ('c_cc', lltype.FixedSizeArray(CC_T, NCCS)), *_add) + +def c_external(name, args, result): + return rffi.llexternal(name, args, result, compilation_info=eci) + +c_tcgetattr = c_external('tcgetattr', [rffi.INT, TERMIOSP], rffi.INT) +c_tcsetattr = c_external('tcsetattr', [rffi.INT, rffi.INT, TERMIOSP], rffi.INT) +c_cfgetispeed = c_external('cfgetispeed', [TERMIOSP], SPEED_T) +c_cfgetospeed = c_external('cfgetospeed', [TERMIOSP], SPEED_T) +c_cfsetispeed = c_external('cfsetispeed', [TERMIOSP, SPEED_T], rffi.INT) +c_cfsetospeed = c_external('cfsetospeed', [TERMIOSP, SPEED_T], rffi.INT) + +c_tcsendbreak = c_external('tcsendbreak', [rffi.INT, rffi.INT], rffi.INT) +c_tcdrain = c_external('tcdrain', [rffi.INT], rffi.INT) +c_tcflush = c_external('tcflush', [rffi.INT, rffi.INT], rffi.INT) +c_tcflow = c_external('tcflow', [rffi.INT, rffi.INT], rffi.INT) + def tcgetattr(fd): - # NOT_RPYTHON - try: - lst = list(termios.tcgetattr(fd)) - except termios.error, e: - raise OSError(*e.args) - cc = lst[-1] - next_cc = [] - for c in cc: - if isinstance(c, int): - next_cc.append(chr(c)) - else: - next_cc.append(c) - lst[-1] = next_cc - return tuple(lst) + with lltype.scoped_alloc(TERMIOSP.TO) as c_struct: + if c_tcgetattr(fd, c_struct) < 0: + raise OSError(rposix.get_errno(), 'tcgetattr failed') + cc = [chr(c_struct.c_c_cc[i]) for i in range(NCCS)] + ispeed = c_cfgetispeed(c_struct) + ospeed = c_cfgetospeed(c_struct) + result = (intmask(c_struct.c_c_iflag), intmask(c_struct.c_c_oflag), + intmask(c_struct.c_c_cflag), intmask(c_struct.c_c_lflag), + intmask(ispeed), intmask(ospeed), cc) + return result -def tcsetattr(fd, when, mode): - # NOT_RPYTHON - # there are some bizarre requirements for that, stealing directly - # from cpython - mode_l = list(mode) - if mode_l[3] & termios.ICANON: - cc = mode_l[-1] - cc[termios.VMIN] = ord(cc[termios.VMIN]) - cc[termios.VTIME] = ord(cc[termios.VTIME]) - mode_l[-1] = cc - try: - return termios.tcsetattr(fd, when, mode_l) - except termios.error, e: - raise OSError(*e.args) + +# This function is not an exact replacement of termios.tcsetattr: +# the last attribute must be a list of chars. +def tcsetattr(fd, when, attributes): + with lltype.scoped_alloc(TERMIOSP.TO) as c_struct: + rffi.setintfield(c_struct, 'c_c_iflag', attributes[0]) + rffi.setintfield(c_struct, 'c_c_oflag', attributes[1]) + rffi.setintfield(c_struct, 'c_c_cflag', attributes[2]) + rffi.setintfield(c_struct, 'c_c_lflag', attributes[3]) + ispeed = attributes[4] + ospeed = attributes[5] + cc = attributes[6] + for i in range(NCCS): + c_struct.c_c_cc[i] = rffi.r_uchar(ord(cc[i][0])) + if c_cfsetispeed(c_struct, ispeed) < 0: + raise OSError(rposix.get_errno(), 'tcsetattr failed') + if c_cfsetospeed(c_struct, ospeed) < 0: + raise OSError(rposix.get_errno(), 'tcsetattr failed') + if c_tcsetattr(fd, when, c_struct) < 0: + raise OSError(rposix.get_errno(), 'tcsetattr failed') + +def tcsendbreak(fd, duration): + if c_tcsendbreak(fd, duration) < 0: + raise OSError(rposix.get_errno(), 'tcsendbreak failed') + +def tcdrain(fd): + if c_tcdrain(fd) < 0: + raise OSError(rposix.get_errno(), 'tcdrain failed') + +def tcflush(fd, queue_selector): + if c_tcflush(fd, queue_selector) < 0: + raise OSError(rposix.get_errno(), 'tcflush failed') + +def tcflow(fd, action): + if c_tcflow(fd, action) < 0: + raise OSError(rposix.get_errno(), 'tcflow failed') diff --git a/rpython/rtyper/module/test/test_ll_termios.py b/rpython/rlib/test/test_rtermios.py rename from rpython/rtyper/module/test/test_ll_termios.py rename to rpython/rlib/test/test_rtermios.py --- a/rpython/rtyper/module/test/test_ll_termios.py +++ b/rpython/rlib/test/test_rtermios.py @@ -77,13 +77,12 @@ def test_tcrest(self): from rpython.translator.c.test.test_genc import compile - from rpython.rtyper.module import ll_termios - import termios, time + from rpython.rlib import rtermios def runs_tcall(): - termios.tcsendbreak(2, 0) - termios.tcdrain(2) - termios.tcflush(2, termios.TCIOFLUSH) - termios.tcflow(2, termios.TCOON) + rtermios.tcsendbreak(2, 0) + rtermios.tcdrain(2) + rtermios.tcflush(2, rtermios.TCIOFLUSH) + rtermios.tcflow(2, rtermios.TCOON) print "ok" fn = compile(runs_tcall, [], backendopt=False) diff --git a/rpython/rtyper/extfuncregistry.py b/rpython/rtyper/extfuncregistry.py --- a/rpython/rtyper/extfuncregistry.py +++ b/rpython/rtyper/extfuncregistry.py @@ -10,12 +10,6 @@ from rpython.rtyper.module import ll_os from rpython.rtyper.module import ll_time from rpython.rlib import rfloat -try: - import termios -except ImportError: - pass -else: - from rpython.rtyper.module import ll_termios # the following functions all take one float, return one float # and are part of math.h diff --git a/rpython/rtyper/module/ll_termios.py b/rpython/rtyper/module/ll_termios.py deleted file mode 100644 --- a/rpython/rtyper/module/ll_termios.py +++ /dev/null @@ -1,135 +0,0 @@ - -""" -The low-level implementation of termios module -note that this module should only be imported when -termios module is there -""" - -import termios -from rpython.rtyper.lltypesystem import rffi -from rpython.rtyper.lltypesystem import lltype -from rpython.rtyper.extfunc import lazy_register, register_external -from rpython.rlib.rarithmetic import intmask -from rpython.rtyper.extregistry import ExtRegistryEntry -from rpython.annotator import model as annmodel -from rpython.rtyper import rclass -from rpython.rlib import rtermios, rposix -from rpython.rtyper.tool import rffi_platform -from rpython.translator.tool.cbuild import ExternalCompilationInfo - -eci = ExternalCompilationInfo( - includes = ['termios.h', 'unistd.h'] -) - -class CConfig: - _compilation_info_ = eci - NCCS = rffi_platform.DefinedConstantInteger('NCCS') - _HAVE_STRUCT_TERMIOS_C_ISPEED = rffi_platform.Defined( - '_HAVE_STRUCT_TERMIOS_C_ISPEED') - _HAVE_STRUCT_TERMIOS_C_OSPEED = rffi_platform.Defined( - '_HAVE_STRUCT_TERMIOS_C_OSPEED') - -c_config = rffi_platform.configure(CConfig) -NCCS = c_config['NCCS'] - -TCFLAG_T = rffi.UINT -CC_T = rffi.UCHAR -SPEED_T = rffi.UINT -INT = rffi.INT - -_add = [] -if c_config['_HAVE_STRUCT_TERMIOS_C_ISPEED']: - _add.append(('c_ispeed', SPEED_T)) -if c_config['_HAVE_STRUCT_TERMIOS_C_OSPEED']: - _add.append(('c_ospeed', SPEED_T)) -TERMIOSP = rffi.CStructPtr('termios', ('c_iflag', TCFLAG_T), ('c_oflag', TCFLAG_T), - ('c_cflag', TCFLAG_T), ('c_lflag', TCFLAG_T), - ('c_line', CC_T), - ('c_cc', lltype.FixedSizeArray(CC_T, NCCS)), *_add) - -def c_external(name, args, result): - return rffi.llexternal(name, args, result, compilation_info=eci) - -c_tcsetattr = c_external('tcsetattr', [INT, INT, TERMIOSP], INT) -c_cfgetispeed = c_external('cfgetispeed', [TERMIOSP], SPEED_T) -c_cfgetospeed = c_external('cfgetospeed', [TERMIOSP], SPEED_T) -c_cfsetispeed = c_external('cfsetispeed', [TERMIOSP, SPEED_T], INT) -c_cfsetospeed = c_external('cfsetospeed', [TERMIOSP, SPEED_T], INT) -c_tcsendbreak = c_external('tcsendbreak', [INT, INT], INT) -c_tcdrain = c_external('tcdrain', [INT], INT) -c_tcflush = c_external('tcflush', [INT, INT], INT) -c_tcflow = c_external('tcflow', [INT, INT], INT) - -c_tcgetattr = c_external('tcgetattr', [INT, TERMIOSP], INT) - -def tcgetattr_llimpl(fd): - c_struct = lltype.malloc(TERMIOSP.TO, flavor='raw') - - try: - if c_tcgetattr(fd, c_struct) < 0: - raise OSError(rposix.get_errno(), 'tcgetattr failed') - cc = [chr(c_struct.c_c_cc[i]) for i in range(NCCS)] - ispeed = c_cfgetispeed(c_struct) - ospeed = c_cfgetospeed(c_struct) - result = (intmask(c_struct.c_c_iflag), intmask(c_struct.c_c_oflag), - intmask(c_struct.c_c_cflag), intmask(c_struct.c_c_lflag), - intmask(ispeed), intmask(ospeed), cc) - return result - finally: - lltype.free(c_struct, flavor='raw') - -register_external(rtermios.tcgetattr, [int], (int, int, int, int, int, int, [str]), - llimpl=tcgetattr_llimpl, export_name='termios.tcgetattr') - -def tcsetattr_llimpl(fd, when, attributes): - c_struct = lltype.malloc(TERMIOSP.TO, flavor='raw') - try: - c_struct.c_c_iflag = r_uint(attributes[0]) - c_struct.c_c_oflag = r_uint(attributes[1]) - c_struct.c_c_cflag = r_uint(attributes[2]) - c_struct.c_c_lflag = r_uint(attributes[3]) - ispeed = r_uint(attributes[4]) - ospeed = r_uint(attributes[5]) - cc = attributes[6] - for i in range(NCCS): - c_struct.c_c_cc[i] = rffi.r_uchar(ord(cc[i][0])) - if c_cfsetispeed(c_struct, ispeed) < 0: - raise OSError(rposix.get_errno(), 'tcsetattr failed') - if c_cfsetospeed(c_struct, ospeed) < 0: - raise OSError(rposix.get_errno(), 'tcsetattr failed') - if c_tcsetattr(fd, when, c_struct) < 0: - raise OSError(rposix.get_errno(), 'tcsetattr failed') - finally: - lltype.free(c_struct, flavor='raw') - -r_uint = rffi.r_uint -register_external(rtermios.tcsetattr, [int, int, (int, int, int, - int, int, int, [str])], llimpl=tcsetattr_llimpl, - export_name='termios.tcsetattr') - -# a bit C-c C-v code follows... - -def tcsendbreak_llimpl(fd, duration): - if c_tcsendbreak(fd, duration): - raise OSError(rposix.get_errno(), 'tcsendbreak failed') -register_external(termios.tcsendbreak, [int, int], - llimpl=tcsendbreak_llimpl, - export_name='termios.tcsendbreak') - -def tcdrain_llimpl(fd): - if c_tcdrain(fd) < 0: - raise OSError(rposix.get_errno(), 'tcdrain failed') -register_external(termios.tcdrain, [int], llimpl=tcdrain_llimpl, - export_name='termios.tcdrain') - -def tcflush_llimpl(fd, queue_selector): - if c_tcflush(fd, queue_selector) < 0: - raise OSError(rposix.get_errno(), 'tcflush failed') -register_external(termios.tcflush, [int, int], llimpl=tcflush_llimpl, - export_name='termios.tcflush') - -def tcflow_llimpl(fd, action): - if c_tcflow(fd, action) < 0: - raise OSError(rposix.get_errno(), 'tcflow failed') -register_external(termios.tcflow, [int, int], llimpl=tcflow_llimpl, - export_name='termios.tcflow') From noreply at buildbot.pypy.org Sat Jan 10 17:29:49 2015 From: noreply at buildbot.pypy.org (amauryfa) Date: Sat, 10 Jan 2015 17:29:49 +0100 (CET) Subject: [pypy-commit] pypy kill_ll_time: A branch to move ll_time into rlib Message-ID: <20150110162949.675831C0417@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: kill_ll_time Changeset: r75284:049a0747bc77 Date: 2015-01-10 13:45 +0100 http://bitbucket.org/pypy/pypy/changeset/049a0747bc77/ Log: A branch to move ll_time into rlib From noreply at buildbot.pypy.org Sat Jan 10 17:29:50 2015 From: noreply at buildbot.pypy.org (amauryfa) Date: Sat, 10 Jan 2015 17:29:50 +0100 (CET) Subject: [pypy-commit] pypy kill_ll_time: Create rtime.py, move implmentation of time.time() Message-ID: <20150110162950.98DC81C0417@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: kill_ll_time Changeset: r75285:06a7386c87a3 Date: 2015-01-10 16:03 +0100 http://bitbucket.org/pypy/pypy/changeset/06a7386c87a3/ Log: Create rtime.py, move implmentation of time.time() diff --git a/pypy/module/time/interp_time.py b/pypy/module/time/interp_time.py --- a/pypy/module/time/interp_time.py +++ b/pypy/module/time/interp_time.py @@ -4,7 +4,7 @@ from pypy.interpreter.gateway import unwrap_spec from rpython.rtyper.lltypesystem import lltype from rpython.rlib.rarithmetic import intmask -from rpython.rlib import rposix +from rpython.rlib import rposix, rtime from rpython.translator.tool.cbuild import ExternalCompilationInfo import os import sys @@ -359,7 +359,7 @@ # w_seconds can be a wrapped None (it will be automatically wrapped # in the callers, so we never get a real None here). if space.is_none(w_seconds): - seconds = pytime.time() + seconds = rtime.floattime() else: seconds = space.float_w(w_seconds) # @@ -396,7 +396,7 @@ raise OperationError(space.w_TypeError, space.wrap("tuple expected")) # default to the current local time - tt = rffi.r_time_t(int(pytime.time())) + tt = rffi.r_time_t(int(rtime.floattime())) t_ref = lltype.malloc(rffi.TIME_TP.TO, 1, flavor='raw') t_ref[0] = tt pbuf = c_localtime(t_ref) @@ -476,8 +476,7 @@ Return the current time in seconds since the Epoch. Fractions of a second may be present if the system clock provides them.""" - secs = pytime.time() - return space.wrap(secs) + return space.wrap(rtime.floattime()) if _WIN: class PCCache: diff --git a/rpython/rlib/rtermios.py b/rpython/rlib/rtermios.py --- a/rpython/rlib/rtermios.py +++ b/rpython/rlib/rtermios.py @@ -1,8 +1,3 @@ -# This are here only because it's always better safe than sorry. -# The issue is that from-time-to-time CPython's termios.tcgetattr -# returns list of mostly-strings of length one, but with few ints -# inside, so we make sure it works - from rpython.rtyper.lltypesystem import rffi, lltype from rpython.rtyper.tool import rffi_platform from rpython.translator.tool.cbuild import ExternalCompilationInfo diff --git a/rpython/rlib/rtime.py b/rpython/rlib/rtime.py new file mode 100644 --- /dev/null +++ b/rpython/rlib/rtime.py @@ -0,0 +1,118 @@ +import sys, time +from rpython.rtyper.lltypesystem import rffi, lltype +from rpython.rtyper.tool import rffi_platform +from rpython.translator.tool.cbuild import ExternalCompilationInfo +from rpython.rtyper.extregistry import replacement_for + +from rpython.rlib.rarithmetic import intmask + +if sys.platform == 'win32': + TIME_H = 'time.h' + FTIME = '_ftime64' + STRUCT_TIMEB = 'struct __timeb64' + includes = ['winsock2.h', 'windows.h', + TIME_H, 'sys/types.h', 'sys/timeb.h'] + need_rusage = False +else: + TIME_H = 'sys/time.h' + FTIME = 'ftime' + STRUCT_TIMEB = 'struct timeb' + includes = [TIME_H, 'time.h', 'errno.h', 'sys/select.h', + 'sys/types.h', 'unistd.h', + 'sys/time.h', 'sys/resource.h'] + + if not sys.platform.startswith("openbsd"): + includes.append('sys/timeb.h') + + need_rusage = True + +eci = ExternalCompilationInfo( + includes=includes +) + +class CConfig: + _compilation_info_ = eci + TIMEVAL = rffi_platform.Struct('struct timeval', [('tv_sec', rffi.INT), + ('tv_usec', rffi.INT)]) + HAVE_GETTIMEOFDAY = rffi_platform.Has('gettimeofday') + HAVE_FTIME = rffi_platform.Has(FTIME) + if need_rusage: + RUSAGE = rffi_platform.Struct('struct rusage', [('ru_utime', TIMEVAL), + ('ru_stime', TIMEVAL)]) + + TIMEB = rffi_platform.Struct(STRUCT_TIMEB, [('time', rffi.INT), + ('millitm', rffi.INT)]) + +constant_names = ['RUSAGE_SELF', 'EINTR', 'CLOCK_PROCESS_CPUTIME_ID'] +for const in constant_names: + setattr(CConfig, const, rffi_platform.DefinedConstantInteger(const)) +defs_names = ['GETTIMEOFDAY_NO_TZ'] +for const in defs_names: + setattr(CConfig, const, rffi_platform.Defined(const)) + +globals().update(rffi_platform.configure(CConfig)) +TIMEVALP = lltype.Ptr(TIMEVAL) + +def external(name, args, result, **kwargs): + return rffi.llexternal(name, args, result, compilation_info=eci, **kwargs) + +if HAVE_GETTIMEOFDAY: + if GETTIMEOFDAY_NO_TZ: + c_gettimeofday = external('gettimeofday', + [TIMEVALP], rffi.INT, + _nowrapper=True, releasegil=False) + else: + c_gettimeofday = external('gettimeofday', + [TIMEVALP, rffi.VOIDP], rffi.INT, + _nowrapper=True, releasegil=False) + +# On some systems (e.g. SCO ODT 3.0) gettimeofday() may +# fail, so we fall back on ftime() or time(). +if HAVE_FTIME: + c_ftime = external(FTIME, [lltype.Ptr(TIMEB)], + lltype.Void, + _nowrapper=True, releasegil=False) + +c_time = external('time', [rffi.VOIDP], rffi.TIME_T, + _nowrapper=True, releasegil=False) + + +def decode_timeval(t): + return (float(rffi.getintfield(t, 'c_tv_sec')) + + float(rffi.getintfield(t, 'c_tv_usec')) * 0.000001) + + + at replacement_for(time.time, sandboxed_name='ll_time.ll_time_time') +def floattime(): + # There are three ways to get the time: + # (1) gettimeofday() -- resolution in microseconds + # (2) ftime() -- resolution in milliseconds + # (3) time() -- resolution in seconds + # In all cases the return value is a float in seconds. + # Since on some systems (e.g. SCO ODT 3.0) gettimeofday() may + # fail, so we fall back on ftime() or time(). + # Note: clock resolution does not imply clock accuracy! + + void = lltype.nullptr(rffi.VOIDP.TO) + result = -1.0 + if HAVE_GETTIMEOFDAY: + with lltype.scoped_alloc(TIMEVAL) as t: + errcode = -1 + if GETTIMEOFDAY_NO_TZ: + errcode = c_gettimeofday(t) + else: + errcode = c_gettimeofday(t, void) + + if intmask(errcode) == 0: + result = decode_timeval(t) + if result != -1: + return result + if HAVE_FTIME: + with lltype.scoped_alloc(TIMEB) as t: + c_ftime(t) + result = (float(intmask(t.c_time)) + + float(intmask(t.c_millitm)) * 0.001) + return result + else: + return float(c_time(void)) + diff --git a/rpython/rtyper/extregistry.py b/rpython/rtyper/extregistry.py --- a/rpython/rtyper/extregistry.py +++ b/rpython/rtyper/extregistry.py @@ -144,3 +144,21 @@ except KeyError: return False return True + +def replacement_for(replaced_function, sandboxed_name=None): + # The annotated function replaces calls to the given non-RPython + # function. + def wrap(func): + from rpython.rtyper.extregistry import ExtRegistryEntry + class ExtRegistry(ExtRegistryEntry): + _about_ = replaced_function + def compute_annotation(self): + if sandboxed_name: + config = self.bookkeeper.annotator.translator.config + if config.translation.sandbox: + func._sandbox_external_name = sandboxed_name + func._dont_inline_ = True + return self.bookkeeper.immutablevalue(func) + return func + return wrap + diff --git a/rpython/rtyper/module/ll_time.py b/rpython/rtyper/module/ll_time.py --- a/rpython/rtyper/module/ll_time.py +++ b/rpython/rtyper/module/ll_time.py @@ -84,67 +84,6 @@ self.configure(CConfig) self.TIMEVALP = lltype.Ptr(self.TIMEVAL) - @registering(time.time) - def register_time_time(self): - # Note: time.time() is used by the framework GC during collect(), - # which means that we have to be very careful about not allocating - # GC memory here. This is the reason for the _nowrapper=True. - - # AWFUL - if self.HAVE_GETTIMEOFDAY: - if self.GETTIMEOFDAY_NO_TZ: - c_gettimeofday = self.llexternal('gettimeofday', - [self.TIMEVALP], rffi.INT, - _nowrapper=True, releasegil=False) - else: - c_gettimeofday = self.llexternal('gettimeofday', - [self.TIMEVALP, rffi.VOIDP], rffi.INT, - _nowrapper=True, releasegil=False) - c_ftime = None # We have gettimeofday(2), so force ftime(3) OFF. - else: - c_gettimeofday = None - - # Only look for ftime(3) if gettimeofday(2) was not found. - if self.HAVE_FTIME: - self.configure(CConfigForFTime) - c_ftime = self.llexternal(FTIME, [lltype.Ptr(self.TIMEB)], - lltype.Void, - _nowrapper=True, releasegil=False) - else: - c_ftime = None # to not confuse the flow space - - c_time = self.llexternal('time', [rffi.VOIDP], rffi.TIME_T, - _nowrapper=True, releasegil=False) - - def time_time_llimpl(): - void = lltype.nullptr(rffi.VOIDP.TO) - result = -1.0 - if self.HAVE_GETTIMEOFDAY: - t = lltype.malloc(self.TIMEVAL, flavor='raw') - - errcode = -1 - if self.GETTIMEOFDAY_NO_TZ: - errcode = c_gettimeofday(t) - else: - errcode = c_gettimeofday(t, void) - - if rffi.cast(rffi.LONG, errcode) == 0: - result = decode_timeval(t) - lltype.free(t, flavor='raw') - if result != -1: - return result - else: # assume using ftime(3) - t = lltype.malloc(self.TIMEB, flavor='raw') - c_ftime(t) - result = (float(intmask(t.c_time)) + - float(intmask(t.c_millitm)) * 0.001) - lltype.free(t, flavor='raw') - return result - return float(c_time(void)) - - return extdef([], float, llimpl=time_time_llimpl, - export_name='ll_time.ll_time_time') - @registering(time.clock) def register_time_clock(self): if sys.platform == 'win32': diff --git a/rpython/rtyper/module/test/test_ll_time.py b/rpython/rtyper/module/test/test_ll_time.py --- a/rpython/rtyper/module/test/test_ll_time.py +++ b/rpython/rtyper/module/test/test_ll_time.py @@ -2,6 +2,8 @@ from rpython.rtyper.test.tool import BaseRtypingTest #from rpython.translator.c.test.test_genc import compile +from rpython.rlib import rtime # Register functions as side-effect + import time, sys class TestTime(BaseRtypingTest): From noreply at buildbot.pypy.org Sat Jan 10 17:29:51 2015 From: noreply at buildbot.pypy.org (amauryfa) Date: Sat, 10 Jan 2015 17:29:51 +0100 (CET) Subject: [pypy-commit] pypy kill_ll_time: Move time.clock(), and fix sanbox tests. Message-ID: <20150110162951.CDACF1C0417@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: kill_ll_time Changeset: r75286:5c699a5a1e58 Date: 2015-01-10 16:42 +0100 http://bitbucket.org/pypy/pypy/changeset/5c699a5a1e58/ Log: Move time.clock(), and fix sanbox tests. diff --git a/rpython/rlib/rtime.py b/rpython/rlib/rtime.py --- a/rpython/rlib/rtime.py +++ b/rpython/rlib/rtime.py @@ -7,6 +7,7 @@ from rpython.rlib.rarithmetic import intmask if sys.platform == 'win32': + _WIN32 = True TIME_H = 'time.h' FTIME = '_ftime64' STRUCT_TIMEB = 'struct __timeb64' @@ -14,6 +15,7 @@ TIME_H, 'sys/types.h', 'sys/timeb.h'] need_rusage = False else: + _WIN32 = False TIME_H = 'sys/time.h' FTIME = 'ftime' STRUCT_TIMEB = 'struct timeb' @@ -30,6 +32,13 @@ includes=includes ) +if sys.platform.startswith('freebsd') or sys.platform.startswith('netbsd'): + libraries = ['compat'] +elif sys.platform == 'linux2': + libraries = ['rt'] +else: + libraries = [] + class CConfig: _compilation_info_ = eci TIMEVAL = rffi_platform.Struct('struct timeval', [('tv_sec', rffi.INT), @@ -116,3 +125,66 @@ else: return float(c_time(void)) + +if _WIN32: + # hacking to avoid LARGE_INTEGER which is a union... + A = lltype.FixedSizeArray(lltype.SignedLongLong, 1) + QueryPerformanceCounter = external( + 'QueryPerformanceCounter', [lltype.Ptr(A)], lltype.Void, + releasegil=False) + QueryPerformanceFrequency = external( + 'QueryPerformanceFrequency', [lltype.Ptr(A)], rffi.INT, + releasegil=False) + class ClockState(object): + divisor = 0.0 + counter_start = 0 + _clock_state = ClockState() +elif CLOCK_PROCESS_CPUTIME_ID is not None: + # Linux and other POSIX systems with clock_gettime() + class CConfigForClockGetTime: + _compilation_info_ = ExternalCompilationInfo( + includes=['time.h'], + libraries=libraries + ) + TIMESPEC = rffi_platform.Struct( + 'struct timespec', [ + ('tv_sec', rffi.LONG), + ('tv_nsec', rffi.LONG)]) + + cconfig = rffi_platform.configure(CConfigForClockGetTime) + TIMESPEC = cconfig['TIMESPEC'] + c_clock_gettime = external('clock_gettime', + [lltype.Signed, lltype.Ptr(TIMESPEC)], + rffi.INT, releasegil=False) +else: + c_getrusage = self.llexternal('getrusage', + [rffi.INT, lltype.Ptr(RUSAGE)], + lltype.Void, + releasegil=False) + + at replacement_for(time.clock, sandboxed_name='ll_time.ll_time_clock') +def clock(): + if _WIN32: + with lltype.static_alloc(A) as a: + if _clock_state.divisor == 0.0: + QueryPerformanceCounter(a) + _clock_state.counter_start = a[0] + QueryPerformanceFrequency(a) + _clock_state.divisor = float(a[0]) + QueryPerformanceCounter(a) + diff = a[0] - _clock_state.counter_start + return float(diff) / _clock_state.divisor + elif self.CLOCK_PROCESS_CPUTIME_ID is not None: + with lltype.static_alloc(TIMESPEC) as a: + c_clock_gettime(CLOCK_PROCESS_CPUTIME_ID, a) + result = (float(rffi.getintfield(a, 'c_tv_sec')) + + float(rffi.getintfield(a, 'c_tv_nsec')) * 0.000000001) + return result + else: + with lltype.static_alloc(RUSAGE) as a: + c_getrusage(RUSAGE_SELF, a) + result = (decode_timeval(a.c_ru_utime) + + decode_timeval(a.c_ru_stime)) + return result + + diff --git a/rpython/rtyper/extfuncregistry.py b/rpython/rtyper/extfuncregistry.py --- a/rpython/rtyper/extfuncregistry.py +++ b/rpython/rtyper/extfuncregistry.py @@ -2,6 +2,10 @@ from rpython.rtyper.extfunc import register_external +# Register functions as side-effect +from rpython.rlib import rtime +from rpython.rlib import rtermios + # ___________________________ # math functions diff --git a/rpython/rtyper/module/ll_time.py b/rpython/rtyper/module/ll_time.py --- a/rpython/rtyper/module/ll_time.py +++ b/rpython/rtyper/module/ll_time.py @@ -84,66 +84,6 @@ self.configure(CConfig) self.TIMEVALP = lltype.Ptr(self.TIMEVAL) - @registering(time.clock) - def register_time_clock(self): - if sys.platform == 'win32': - # hacking to avoid LARGE_INTEGER which is a union... - A = lltype.FixedSizeArray(lltype.SignedLongLong, 1) - QueryPerformanceCounter = self.llexternal( - 'QueryPerformanceCounter', [lltype.Ptr(A)], lltype.Void, - releasegil=False) - QueryPerformanceFrequency = self.llexternal( - 'QueryPerformanceFrequency', [lltype.Ptr(A)], rffi.INT, - releasegil=False) - class State(object): - pass - state = State() - state.divisor = 0.0 - state.counter_start = 0 - def time_clock_llimpl(): - a = lltype.malloc(A, flavor='raw') - if state.divisor == 0.0: - QueryPerformanceCounter(a) - state.counter_start = a[0] - QueryPerformanceFrequency(a) - state.divisor = float(a[0]) - QueryPerformanceCounter(a) - diff = a[0] - state.counter_start - lltype.free(a, flavor='raw') - return float(diff) / state.divisor - elif self.CLOCK_PROCESS_CPUTIME_ID is not None: - # Linux and other POSIX systems with clock_gettime() - self.configure(CConfigForClockGetTime) - TIMESPEC = self.TIMESPEC - CLOCK_PROCESS_CPUTIME_ID = self.CLOCK_PROCESS_CPUTIME_ID - c_clock_gettime = self.llexternal('clock_gettime', - [lltype.Signed, lltype.Ptr(TIMESPEC)], - rffi.INT, releasegil=False) - def time_clock_llimpl(): - a = lltype.malloc(TIMESPEC, flavor='raw') - c_clock_gettime(CLOCK_PROCESS_CPUTIME_ID, a) - result = (float(rffi.getintfield(a, 'c_tv_sec')) + - float(rffi.getintfield(a, 'c_tv_nsec')) * 0.000000001) - lltype.free(a, flavor='raw') - return result - else: - RUSAGE = self.RUSAGE - RUSAGE_SELF = self.RUSAGE_SELF or 0 - c_getrusage = self.llexternal('getrusage', - [rffi.INT, lltype.Ptr(RUSAGE)], - lltype.Void, - releasegil=False) - def time_clock_llimpl(): - a = lltype.malloc(RUSAGE, flavor='raw') - c_getrusage(RUSAGE_SELF, a) - result = (decode_timeval(a.c_ru_utime) + - decode_timeval(a.c_ru_stime)) - lltype.free(a, flavor='raw') - return result - - return extdef([], float, llimpl=time_clock_llimpl, - export_name='ll_time.ll_time_clock') - @registering(time.sleep) def register_time_sleep(self): if sys.platform == 'win32': diff --git a/rpython/translator/c/database.py b/rpython/translator/c/database.py --- a/rpython/translator/c/database.py +++ b/rpython/translator/c/database.py @@ -385,6 +385,9 @@ return False if hasattr(fnobj, '_safe_not_sandboxed'): return not fnobj._safe_not_sandboxed + elif getattr(getattr(fnobj, '_callable', None), + '_sandbox_external_name', None): + return True else: return "if_external" diff --git a/rpython/translator/sandbox/rsandbox.py b/rpython/translator/sandbox/rsandbox.py --- a/rpython/translator/sandbox/rsandbox.py +++ b/rpython/translator/sandbox/rsandbox.py @@ -115,7 +115,11 @@ trampoline marshals its input arguments, dumps them to STDOUT, and waits for an answer on STDIN. """ - fnname = fnobj._name + if getattr(getattr(fnobj, '_callable', None), + '_sandbox_external_name', None): + fnname = fnobj._callable._sandbox_external_name + else: + fnname = fnobj._name if hasattr(fnobj, 'graph'): # get the annotation of the input arguments and the result graph = fnobj.graph From noreply at buildbot.pypy.org Sat Jan 10 17:29:53 2015 From: noreply at buildbot.pypy.org (amauryfa) Date: Sat, 10 Jan 2015 17:29:53 +0100 (CET) Subject: [pypy-commit] pypy kill_ll_time: Move time.sleep(), fix other tests Message-ID: <20150110162953.065241C0417@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: kill_ll_time Changeset: r75287:70a372733d5c Date: 2015-01-10 17:25 +0100 http://bitbucket.org/pypy/pypy/changeset/70a372733d5c/ Log: Move time.sleep(), fix other tests diff --git a/rpython/rlib/rtime.py b/rpython/rlib/rtime.py --- a/rpython/rlib/rtime.py +++ b/rpython/rlib/rtime.py @@ -1,10 +1,11 @@ -import sys, time +import sys, time, math from rpython.rtyper.lltypesystem import rffi, lltype from rpython.rtyper.tool import rffi_platform from rpython.translator.tool.cbuild import ExternalCompilationInfo from rpython.rtyper.extregistry import replacement_for -from rpython.rlib.rarithmetic import intmask +from rpython.rlib.rarithmetic import intmask, UINT_MAX +from rpython.rlib import rposix if sys.platform == 'win32': _WIN32 = True @@ -157,15 +158,15 @@ [lltype.Signed, lltype.Ptr(TIMESPEC)], rffi.INT, releasegil=False) else: - c_getrusage = self.llexternal('getrusage', - [rffi.INT, lltype.Ptr(RUSAGE)], - lltype.Void, - releasegil=False) + c_getrusage = external('getrusage', + [rffi.INT, lltype.Ptr(RUSAGE)], + lltype.Void, + releasegil=False) @replacement_for(time.clock, sandboxed_name='ll_time.ll_time_clock') def clock(): if _WIN32: - with lltype.static_alloc(A) as a: + with lltype.scoped_alloc(A) as a: if _clock_state.divisor == 0.0: QueryPerformanceCounter(a) _clock_state.counter_start = a[0] @@ -174,17 +175,45 @@ QueryPerformanceCounter(a) diff = a[0] - _clock_state.counter_start return float(diff) / _clock_state.divisor - elif self.CLOCK_PROCESS_CPUTIME_ID is not None: - with lltype.static_alloc(TIMESPEC) as a: + elif CLOCK_PROCESS_CPUTIME_ID is not None: + with lltype.scoped_alloc(TIMESPEC) as a: c_clock_gettime(CLOCK_PROCESS_CPUTIME_ID, a) result = (float(rffi.getintfield(a, 'c_tv_sec')) + float(rffi.getintfield(a, 'c_tv_nsec')) * 0.000000001) return result else: - with lltype.static_alloc(RUSAGE) as a: + with lltype.scoped_alloc(RUSAGE) as a: c_getrusage(RUSAGE_SELF, a) result = (decode_timeval(a.c_ru_utime) + decode_timeval(a.c_ru_stime)) return result +if _WIN32: + c_Sleep = external('Sleep', [rffi.ULONG], lltype.Void) +else: + c_select = external('select', [rffi.INT, rffi.VOIDP, + rffi.VOIDP, rffi.VOIDP, + TIMEVALP], rffi.INT) + + at replacement_for(time.sleep, sandboxed_name='ll_time.ll_time_sleep') +def sleep(secs): + # On windows, this call is not interruptible. + if _WIN32: + millisecs = secs * 1000.0 + while millisecs > UINT_MAX: + c_Sleep(UINT_MAX) + millisecs -= UINT_MAX + c_Sleep(rffi.cast(rffi.ULONG, int(millisecs))) + else: + void = lltype.nullptr(rffi.VOIDP.TO) + with lltype.scoped_alloc(TIMEVAL) as t: + frac = math.fmod(secs, 1.0) + rffi.setintfield(t, 'c_tv_sec', int(secs)) + rffi.setintfield(t, 'c_tv_usec', int(frac*1000000.0)) + + if rffi.cast(rffi.LONG, c_select(0, void, void, void, t)) != 0: + errno = rposix.get_errno() + if errno != EINTR: + raise OSError(errno, "Select failed") + diff --git a/rpython/rtyper/module/ll_time.py b/rpython/rtyper/module/ll_time.py --- a/rpython/rtyper/module/ll_time.py +++ b/rpython/rtyper/module/ll_time.py @@ -84,34 +84,3 @@ self.configure(CConfig) self.TIMEVALP = lltype.Ptr(self.TIMEVAL) - @registering(time.sleep) - def register_time_sleep(self): - if sys.platform == 'win32': - Sleep = self.llexternal('Sleep', [rffi.ULONG], lltype.Void) - def time_sleep_llimpl(secs): - millisecs = secs * 1000.0 - while millisecs > UINT_MAX: - Sleep(UINT_MAX) - millisecs -= UINT_MAX - Sleep(rffi.cast(rffi.ULONG, int(millisecs))) - else: - c_select = self.llexternal('select', [rffi.INT, rffi.VOIDP, - rffi.VOIDP, rffi.VOIDP, - self.TIMEVALP], rffi.INT) - def time_sleep_llimpl(secs): - void = lltype.nullptr(rffi.VOIDP.TO) - t = lltype.malloc(self.TIMEVAL, flavor='raw') - try: - frac = math.fmod(secs, 1.0) - rffi.setintfield(t, 'c_tv_sec', int(secs)) - rffi.setintfield(t, 'c_tv_usec', int(frac*1000000.0)) - - if rffi.cast(rffi.LONG, c_select(0, void, void, void, t)) != 0: - errno = rposix.get_errno() - if errno != EINTR: - raise OSError(rposix.get_errno(), "Select failed") - finally: - lltype.free(t, flavor='raw') - - return extdef([float], None, llimpl=time_sleep_llimpl, - export_name='ll_time.ll_time_sleep') From noreply at buildbot.pypy.org Sat Jan 10 17:29:54 2015 From: noreply at buildbot.pypy.org (amauryfa) Date: Sat, 10 Jan 2015 17:29:54 +0100 (CET) Subject: [pypy-commit] pypy kill_ll_time: Finally kill ll_time.py, move tests to rlib/ Message-ID: <20150110162954.3E1551C0417@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: kill_ll_time Changeset: r75288:7a213190810b Date: 2015-01-10 17:29 +0100 http://bitbucket.org/pypy/pypy/changeset/7a213190810b/ Log: Finally kill ll_time.py, move tests to rlib/ diff --git a/rpython/rtyper/module/test/test_ll_time.py b/rpython/rlib/test/test_rtime.py rename from rpython/rtyper/module/test/test_ll_time.py rename to rpython/rlib/test/test_rtime.py diff --git a/rpython/rtyper/extfuncregistry.py b/rpython/rtyper/extfuncregistry.py --- a/rpython/rtyper/extfuncregistry.py +++ b/rpython/rtyper/extfuncregistry.py @@ -12,7 +12,6 @@ import math from rpython.rtyper.lltypesystem.module import ll_math from rpython.rtyper.module import ll_os -from rpython.rtyper.module import ll_time from rpython.rlib import rfloat # the following functions all take one float, return one float diff --git a/rpython/rtyper/module/ll_time.py b/rpython/rtyper/module/ll_time.py deleted file mode 100644 --- a/rpython/rtyper/module/ll_time.py +++ /dev/null @@ -1,86 +0,0 @@ -""" -Low-level implementations for the external functions of the 'time' module. -""" - -import time, sys, math -from errno import EINTR -from rpython.rtyper.lltypesystem import rffi -from rpython.rtyper.tool import rffi_platform as platform -from rpython.rtyper.lltypesystem import lltype -from rpython.rtyper.extfunc import BaseLazyRegistering, registering, extdef -from rpython.rlib import rposix -from rpython.rlib.rarithmetic import intmask, UINT_MAX -from rpython.translator.tool.cbuild import ExternalCompilationInfo - -if sys.platform == 'win32': - TIME_H = 'time.h' - FTIME = '_ftime64' - STRUCT_TIMEB = 'struct __timeb64' - includes = ['winsock2.h', 'windows.h', - TIME_H, 'sys/types.h', 'sys/timeb.h'] - need_rusage = False -else: - TIME_H = 'sys/time.h' - FTIME = 'ftime' - STRUCT_TIMEB = 'struct timeb' - includes = [TIME_H, 'time.h', 'errno.h', 'sys/select.h', - 'sys/types.h', 'unistd.h', - 'sys/time.h', 'sys/resource.h'] - - if not sys.platform.startswith("openbsd"): - includes.append('sys/timeb.h') - - need_rusage = True - - -class CConfig: - _compilation_info_ = ExternalCompilationInfo( - includes=includes - ) - TIMEVAL = platform.Struct('struct timeval', [('tv_sec', rffi.INT), - ('tv_usec', rffi.INT)]) - HAVE_GETTIMEOFDAY = platform.Has('gettimeofday') - HAVE_FTIME = platform.Has(FTIME) - if need_rusage: - RUSAGE = platform.Struct('struct rusage', [('ru_utime', TIMEVAL), - ('ru_stime', TIMEVAL)]) - -if sys.platform.startswith('freebsd') or sys.platform.startswith('netbsd'): - libraries = ['compat'] -elif sys.platform == 'linux2': - libraries = ['rt'] -else: - libraries = [] - -class CConfigForFTime: - _compilation_info_ = ExternalCompilationInfo( - includes=[TIME_H, 'sys/timeb.h'], - libraries=libraries - ) - TIMEB = platform.Struct(STRUCT_TIMEB, [('time', rffi.INT), - ('millitm', rffi.INT)]) - -class CConfigForClockGetTime: - _compilation_info_ = ExternalCompilationInfo( - includes=['time.h'], - libraries=libraries - ) - TIMESPEC = platform.Struct('struct timespec', [('tv_sec', rffi.LONG), - ('tv_nsec', rffi.LONG)]) - -constant_names = ['RUSAGE_SELF', 'EINTR', 'CLOCK_PROCESS_CPUTIME_ID'] -for const in constant_names: - setattr(CConfig, const, platform.DefinedConstantInteger(const)) -defs_names = ['GETTIMEOFDAY_NO_TZ'] -for const in defs_names: - setattr(CConfig, const, platform.Defined(const)) - -def decode_timeval(t): - return (float(rffi.getintfield(t, 'c_tv_sec')) + - float(rffi.getintfield(t, 'c_tv_usec')) * 0.000001) - -class RegisterTime(BaseLazyRegistering): - def __init__(self): - self.configure(CConfig) - self.TIMEVALP = lltype.Ptr(self.TIMEVAL) - From noreply at buildbot.pypy.org Sat Jan 10 18:44:40 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 10 Jan 2015 18:44:40 +0100 (CET) Subject: [pypy-commit] pypy default: workaround Message-ID: <20150110174440.C869C1C327D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r75289:b5efdff2901b Date: 2015-01-10 17:44 +0000 http://bitbucket.org/pypy/pypy/changeset/b5efdff2901b/ Log: workaround diff --git a/rpython/jit/metainterp/optimizeopt/optimizer.py b/rpython/jit/metainterp/optimizeopt/optimizer.py --- a/rpython/jit/metainterp/optimizeopt/optimizer.py +++ b/rpython/jit/metainterp/optimizeopt/optimizer.py @@ -230,8 +230,11 @@ def make_len_gt(self, mode, descr, val): if self.lenbound: - assert self.lenbound.mode == mode - assert self.lenbound.descr == descr + if self.lenbound.mode != mode or self.lenbound.descr != descr: + # XXX a rare case? it seems to occur sometimes when + # running lib-python's test_io.py in PyPy on Linux 32... + from rpython.jit.metainterp.optimize import InvalidLoop + raise InvalidLoop("bad mode/descr") self.lenbound.bound.make_gt(IntBound(val, val)) else: self.lenbound = LenBound(mode, descr, IntLowerBound(val + 1)) From noreply at buildbot.pypy.org Sat Jan 10 21:25:49 2015 From: noreply at buildbot.pypy.org (fijal) Date: Sat, 10 Jan 2015 21:25:49 +0100 (CET) Subject: [pypy-commit] pypy rewrite-unrolling: (fijal, arigo) start branch to rewrite unrolling from scratch Message-ID: <20150110202549.3D3481C31CF@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: rewrite-unrolling Changeset: r75290:3087933208c6 Date: 2015-01-10 19:36 +0100 http://bitbucket.org/pypy/pypy/changeset/3087933208c6/ Log: (fijal, arigo) start branch to rewrite unrolling from scratch diff --git a/rpython/jit/metainterp/optimizeopt/__init__.py b/rpython/jit/metainterp/optimizeopt/__init__.py --- a/rpython/jit/metainterp/optimizeopt/__init__.py +++ b/rpython/jit/metainterp/optimizeopt/__init__.py @@ -31,7 +31,7 @@ assert ENABLE_ALL_OPTS == ALL_OPTS_NAMES, ( 'please fix rlib/jit.py to say ENABLE_ALL_OPTS = %r' % (ALL_OPTS_NAMES,)) -def build_opt_chain(metainterp_sd, enable_opts): +def build_opt_chain(enable_opts): optimizations = [] unroll = 'unroll' in enable_opts # 'enable_opts' is normally a dict for name, opt in unroll_all_opts: @@ -57,7 +57,7 @@ try: loop.logops = metainterp_sd.logger_noopt.log_loop(loop.inputargs, loop.operations) - optimizations, unroll = build_opt_chain(metainterp_sd, enable_opts) + optimizations, unroll = build_opt_chain(enable_opts) if unroll: return optimize_unroll(metainterp_sd, jitdriver_sd, loop, optimizations, diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py @@ -16,24 +16,23 @@ names = [opt.__class__.__name__ for opt in chain] assert names == expected_names # - metainterp_sd = FakeMetaInterpStaticData(None) - chain, _ = build_opt_chain(metainterp_sd, "") + chain, _ = build_opt_chain("") check(chain, ["OptSimplify"]) # - chain, _ = build_opt_chain(metainterp_sd, "") + chain, _ = build_opt_chain("") check(chain, ["OptSimplify"]) # - chain, _ = build_opt_chain(metainterp_sd, "") + chain, _ = build_opt_chain("") check(chain, ["OptSimplify"]) # - chain, _ = build_opt_chain(metainterp_sd, "heap:intbounds") + chain, _ = build_opt_chain("heap:intbounds") check(chain, ["OptIntBounds", "OptHeap", "OptSimplify"]) # - chain, unroll = build_opt_chain(metainterp_sd, "unroll") + chain, unroll = build_opt_chain("unroll") check(chain, ["OptSimplify"]) assert unroll # - chain, _ = build_opt_chain(metainterp_sd, "aaa:bbb") + chain, _ = build_opt_chain("aaa:bbb") check(chain, ["OptSimplify"]) diff --git a/rpython/jit/metainterp/optimizeopt/test/test_unroll.py b/rpython/jit/metainterp/optimizeopt/test/test_unroll.py new file mode 100644 --- /dev/null +++ b/rpython/jit/metainterp/optimizeopt/test/test_unroll.py @@ -0,0 +1,37 @@ + +from rpython.jit.tool.oparser import parse +from rpython.jit.metainterp.optimizeopt import optimize_trace +from rpython.jit.metainterp.optimizeopt.test.test_util import BaseTest,\ + FakeMetaInterpStaticData +from rpython.jit.backend.llgraph import runner + +class TestUnrollDirect(BaseTest): + cpu = runner.LLGraphCPU(None) + enable_opts = "intbounds:rewrite:virtualize:string:earlyforce:pure:heap:unroll" + + def optimize(self, loop, expected=None, export_state=False, start_state=None): + metainterp_sd = FakeMetaInterpStaticData(self.cpu) + loop = parse(loop) + state = optimize_trace(metainterp_sd, None, loop, self.enable_opts, + export_state=export_state, start_state=start_state) + if expected is not None: + expected = parse(expected) + self.assert_equal(loop, expected) + return state + + def test_basic_unroll(self): + preamble = """ + [i0] + label(i0) + i1 = int_add(i0, 1) + label(i1) + """ + exported_state = self.optimize(preamble, export_state=True) + loop = """ + [i0] + label(i0) + i1 = int_add(i0, 1) + jump(i1) + """ + self.optimize(loop, loop, start_state=exported_state) + diff --git a/rpython/jit/metainterp/optimizeopt/test/test_util.py b/rpython/jit/metainterp/optimizeopt/test/test_util.py --- a/rpython/jit/metainterp/optimizeopt/test/test_util.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_util.py @@ -21,7 +21,7 @@ from rpython.jit.metainterp.counter import DeterministicJitCounter from rpython.config.translationoption import get_combined_translation_config from rpython.jit.metainterp.resoperation import rop, opname, ResOperation -from rpython.jit.metainterp.optimizeopt.unroll import Inliner +from rpython.jit.metainterp.inliner import Inliner def test_sort_descrs(): class PseudoDescr(AbstractDescr): diff --git a/rpython/jit/metainterp/optimizeopt/unroll.py b/rpython/jit/metainterp/optimizeopt/unroll2.py rename from rpython/jit/metainterp/optimizeopt/unroll.py rename to rpython/jit/metainterp/optimizeopt/unroll2.py From noreply at buildbot.pypy.org Sat Jan 10 21:25:50 2015 From: noreply at buildbot.pypy.org (fijal) Date: Sat, 10 Jan 2015 21:25:50 +0100 (CET) Subject: [pypy-commit] pypy rewrite-unrolling: (arigo, fijal) progress pure opts Message-ID: <20150110202550.8BE971C31CF@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: rewrite-unrolling Changeset: r75291:84ee4dbb2783 Date: 2015-01-10 21:25 +0100 http://bitbucket.org/pypy/pypy/changeset/84ee4dbb2783/ Log: (arigo, fijal) progress pure opts diff --git a/rpython/jit/metainterp/optimizeopt/__init__.py b/rpython/jit/metainterp/optimizeopt/__init__.py --- a/rpython/jit/metainterp/optimizeopt/__init__.py +++ b/rpython/jit/metainterp/optimizeopt/__init__.py @@ -48,8 +48,7 @@ return optimizations, unroll def optimize_trace(metainterp_sd, jitdriver_sd, loop, enable_opts, - inline_short_preamble=True, start_state=None, - export_state=True): + inline_short_preamble=True, unroller=None): """Optimize loop.operations to remove internal overheadish operations. """ @@ -61,8 +60,7 @@ if unroll: return optimize_unroll(metainterp_sd, jitdriver_sd, loop, optimizations, - inline_short_preamble, start_state, - export_state) + inline_short_preamble, unroller) else: optimizer = Optimizer(metainterp_sd, jitdriver_sd, loop, optimizations) diff --git a/rpython/jit/metainterp/optimizeopt/test/test_unroll.py b/rpython/jit/metainterp/optimizeopt/test/test_unroll.py --- a/rpython/jit/metainterp/optimizeopt/test/test_unroll.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_unroll.py @@ -1,19 +1,25 @@ from rpython.jit.tool.oparser import parse from rpython.jit.metainterp.optimizeopt import optimize_trace +from rpython.jit.metainterp.optimizeopt.optimizer import Optimizer +from rpython.jit.metainterp.optimizeopt.unroll import Unroller from rpython.jit.metainterp.optimizeopt.test.test_util import BaseTest,\ FakeMetaInterpStaticData +from rpython.jit.metainterp.optimizeopt.pure import OptPure +from rpython.jit.metainterp.resoperation import rop +from rpython.jit.metainterp.history import ConstInt, BoxInt from rpython.jit.backend.llgraph import runner class TestUnrollDirect(BaseTest): cpu = runner.LLGraphCPU(None) enable_opts = "intbounds:rewrite:virtualize:string:earlyforce:pure:heap:unroll" + metainterp_sd = FakeMetaInterpStaticData(cpu) - def optimize(self, loop, expected=None, export_state=False, start_state=None): - metainterp_sd = FakeMetaInterpStaticData(self.cpu) - loop = parse(loop) - state = optimize_trace(metainterp_sd, None, loop, self.enable_opts, - export_state=export_state, start_state=start_state) + def optimize(self, loop, expected=None, unroller=None): + if isinstance(loop, str): + loop = parse(loop) + state = optimize_trace(self.metainterp_sd, None, loop, self.enable_opts, + unroller=unroller) if expected is not None: expected = parse(expected) self.assert_equal(loop, expected) @@ -26,12 +32,34 @@ i1 = int_add(i0, 1) label(i1) """ - exported_state = self.optimize(preamble, export_state=True) + unroller = self.optimize(preamble) loop = """ [i0] label(i0) i1 = int_add(i0, 1) jump(i1) """ - self.optimize(loop, loop, start_state=exported_state) + self.optimize(loop, loop, unroller=unroller) + def test_pure_opts(self): + loop = parse(""" + [i0] + label(i0) + i1 = int_add(i0, 1) + escape(i1) + jump(i0) + """) + pure = OptPure() + i0 = loop.operations[1].getarg(0) + i1 = BoxInt() + unroller = Unroller() + unroller.optimizer = Optimizer(self.metainterp_sd, None, None, [pure]) + pure.optimizer = unroller.optimizer + pure.pure(rop.INT_ADD, [i0, ConstInt(1)], i1) + expected = """ + [i0, i1] + label(i0, i1) + escape(i1) + jump(i0, i1) + """ + self.optimize(loop, expected, unroller=unroller) From noreply at buildbot.pypy.org Sat Jan 10 21:52:27 2015 From: noreply at buildbot.pypy.org (fijal) Date: Sat, 10 Jan 2015 21:52:27 +0100 (CET) Subject: [pypy-commit] pypy rewrite-unrolling: (arigo) Forgot to add this file Message-ID: <20150110205227.E88BA1C327D@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: rewrite-unrolling Changeset: r75292:ca6b329c8e25 Date: 2015-01-10 21:52 +0100 http://bitbucket.org/pypy/pypy/changeset/ca6b329c8e25/ Log: (arigo) Forgot to add this file diff --git a/rpython/jit/metainterp/optimizeopt/unroll.py b/rpython/jit/metainterp/optimizeopt/unroll.py new file mode 100644 --- /dev/null +++ b/rpython/jit/metainterp/optimizeopt/unroll.py @@ -0,0 +1,61 @@ + +from rpython.jit.metainterp.optimizeopt.optimizer import Optimizer, OptValue + + +def optimize_unroll(metainterp_sd, jitdriver_sd, loop, optimizations, + inline_short_preamble=True, unroller=None): + if unroller is None: + unroller = Unroller() + unroller.set_optimizer(Optimizer(metainterp_sd, jitdriver_sd, + loop, optimizations)) + unroller.propagate(inline_short_preamble) + return unroller + + +class OptPureValue(OptValue): + _attrs_ = ('unroller', 'keybox') + box = None + + def __init__(self, unroller, keybox): + self.unroller = unroller + self.keybox = keybox + + def force_box(self, ignored): + if self.box is None: + self.box = self.keybox + self.unroller.reuse_pure_result(self.box) + return self.box + + +class Unroller(object): + optimizer = None + + def set_optimizer(self, optimizer): + old_optimizer = self.optimizer + self.optimizer = optimizer + if old_optimizer is not None: + self.import_state_from_optimizer(old_optimizer) + + def propagate(self, inline_short_preamble): + self.optimizer.propagate_all_forward() + + def import_state_from_optimizer(self, old_optimizer): + old_optpure = old_optimizer.optpure + if old_optpure: + # import all pure operations from the old optimizer + new_optpure = self.optimizer.optpure + for opargs, value in old_optpure.pure_operations.items(): + if not value.is_virtual(): + pure_value = OptPureValue(self, value.box) + new_optpure.pure_operations[opargs] = pure_value + + def reuse_pure_result(self, box): + label1_op = self.optimizer.loop.operations[0] + label1_args = label1_op.getarglist() + label2_op = self.optimizer.loop.operations[-1] + label2_args = label2_op.getarglist() + assert len(label1_args) == len(self.optimizer.loop.inputargs) + assert len(label2_args) == len(self.optimizer.loop.inputargs) + self.optimizer.loop.inputargs.append(box) + label1_args.append(box) + label2_args.append(box) From noreply at buildbot.pypy.org Sat Jan 10 22:55:33 2015 From: noreply at buildbot.pypy.org (mattip) Date: Sat, 10 Jan 2015 22:55:33 +0100 (CET) Subject: [pypy-commit] pypy ufuncapi: test broadcasting, add necessary but failing nditer itershape tests Message-ID: <20150110215533.73B7C1C327D@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: ufuncapi Changeset: r75293:a249b7e1b6ae Date: 2015-01-10 23:55 +0200 http://bitbucket.org/pypy/pypy/changeset/a249b7e1b6ae/ Log: test broadcasting, add necessary but failing nditer itershape tests diff --git a/pypy/module/micronumpy/test/test_nditer.py b/pypy/module/micronumpy/test/test_nditer.py --- a/pypy/module/micronumpy/test/test_nditer.py +++ b/pypy/module/micronumpy/test/test_nditer.py @@ -167,7 +167,8 @@ exc = raises(TypeError, nditer, a, op_dtypes=['complex']) assert str(exc.value).startswith("Iterator operand required copying or buffering") exc = raises(ValueError, nditer, a, op_flags=['copy'], op_dtypes=['complex128']) - assert str(exc.value) == "None of the iterator flags READWRITE, READONLY, or WRITEONLY were specified for an operand" + assert str(exc.value) == "None of the iterator flags READWRITE," \ + " READONLY, or WRITEONLY were specified for an operand" r = [] for x in nditer(a, op_flags=['readonly','copy'], op_dtypes=['complex128']): @@ -320,3 +321,34 @@ assert res == [(0, (0, 0)), (1, (0, 1)), (2, (0, 2)), (3, (1, 0)), (4, (1, 1)), (5, (1, 2))] + + def test_itershape(self): + # Check that allocated outputs work with a specified shape + from numpy import nditer, arange + a = arange(6, dtype='i2').reshape(2,3) + i = nditer([a, None], [], [['readonly'], ['writeonly','allocate']], + op_axes=[[0,1,None], None], + itershape=(-1,-1,4)) + assert_equal(i.operands[1].shape, (2,3,4)) + assert_equal(i.operands[1].strides, (24,8,2)) + + i = nditer([a.T, None], [], [['readonly'], ['writeonly','allocate']], + op_axes=[[0,1,None], None], + itershape=(-1,-1,4)) + assert_equal(i.operands[1].shape, (3,2,4)) + assert_equal(i.operands[1].strides, (8,24,2)) + + i = nditer([a.T, None], [], [['readonly'], ['writeonly','allocate']], + order='F', + op_axes=[[0,1,None], None], + itershape=(-1,-1,4)) + assert_equal(i.operands[1].shape, (3,2,4)) + assert_equal(i.operands[1].strides, (2,6,12)) + + # If we specify 1 in the itershape, it shouldn't allow broadcasting + # of that dimension to a bigger value + assert_raises(ValueError, nditer, [a, None], [], + [['readonly'], ['writeonly','allocate']], + op_axes=[[0,1,None], None], + itershape=(-1,1,4)) + diff --git a/pypy/module/micronumpy/test/test_ufuncs.py b/pypy/module/micronumpy/test/test_ufuncs.py --- a/pypy/module/micronumpy/test/test_ufuncs.py +++ b/pypy/module/micronumpy/test/test_ufuncs.py @@ -245,6 +245,31 @@ ai2 = ufunc(ai) assert (ai2 == ai * 2).all() + def test_frompyfunc_sig_broadcast(self): + def sum_along_0(in_array, out_array): + out_array[...] = in_array.sum(axis=0) + + def add_two(in0, in1, out): + out[...] = in0 + in1 + + from numpy import frompyfunc, dtype, arange + ufunc_add = frompyfunc(add_two, 2, 1, + signature='(m,n),(m,n)->(m,n)', + dtypes=[dtype(int), dtype(int), dtype(int)], + stack_inputs=True, + ) + ufunc_sum = frompyfunc([sum_along_0], 1, 1, + signature='(m,n)->(n)', + dtypes=[dtype(int), dtype(int)], + stack_inputs=True, + ) + ai = arange(18, dtype=int).reshape(3,2,3) + aout = ufunc_add(ai, ai[0,:,:]) + assert aout.shape == (3, 2, 3) + aout = ufunc_sum(ai) + assert aout.shape == (3, 3) + + def test_ufunc_kwargs(self): from numpy import ufunc, frompyfunc, arange, dtype def adder(a, b): diff --git a/pypy/module/micronumpy/ufuncs.py b/pypy/module/micronumpy/ufuncs.py --- a/pypy/module/micronumpy/ufuncs.py +++ b/pypy/module/micronumpy/ufuncs.py @@ -869,7 +869,7 @@ self.name, name, _i, j, x, y) iter_shape[offset + j] = max(x, y) #print 'Find or verify signature ixs',self.core_dim_ixs, - #print 'starting',dim_offset,'n',num_dims,'matching',dims_to_match + #print 'starting',dim_offset,'n',n,'num_dims',num_dims,'matching',dims_to_match for j in range(num_dims): core_dim_index = self.core_dim_ixs[dim_offset + j] if core_dim_index > len(dims_to_match): @@ -886,7 +886,13 @@ self.name, name, _i, j, self.signature, matched_dims[core_dim_index], dims_to_match[core_dim_index]) - arg_shapes.append(iter_shape + dims_to_match) + #print 'adding',iter_shape,'+',dims_to_match,'to arg_shapes' + if n < len(iter_shape): + #Broadcast over the len(iter_shape) - n dims of iter_shape + broadcast_dims = len(iter_shape) - n + arg_shapes.append(iter_shape[:-broadcast_dims] + [1] * broadcast_dims + dims_to_match) + else: + arg_shapes.append(iter_shape + dims_to_match) # TODO once we support obejct dtypes, # FAIL with NotImplementedError if the other object has # the __r__ method and has a higher priority than From noreply at buildbot.pypy.org Sat Jan 10 22:58:10 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 10 Jan 2015 22:58:10 +0100 (CET) Subject: [pypy-commit] pypy default: issue #1959: print(file=None) should print to sys.stdout. Message-ID: <20150110215810.3474D1C3354@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r75294:d0ccb94ead87 Date: 2015-01-10 22:58 +0100 http://bitbucket.org/pypy/pypy/changeset/d0ccb94ead87/ Log: issue #1959: print(file=None) should print to sys.stdout. diff --git a/pypy/module/__builtin__/app_io.py b/pypy/module/__builtin__/app_io.py --- a/pypy/module/__builtin__/app_io.py +++ b/pypy/module/__builtin__/app_io.py @@ -86,9 +86,9 @@ def print_(*args, **kwargs): """The new-style print function from py3k.""" - fp = kwargs.pop("file", sys.stdout) + fp = kwargs.pop("file", None) if fp is None: - return + fp = sys.stdout def write(data): if not isinstance(data, basestring): data = str(data) diff --git a/pypy/module/__builtin__/test/test_builtin.py b/pypy/module/__builtin__/test/test_builtin.py --- a/pypy/module/__builtin__/test/test_builtin.py +++ b/pypy/module/__builtin__/test/test_builtin.py @@ -651,9 +651,10 @@ out = sys.stdout = StringIO.StringIO() try: pr("Hello,", "person!") + pr("2nd line", file=None) finally: sys.stdout = save - assert out.getvalue() == "Hello, person!\n" + assert out.getvalue() == "Hello, person!\n2nd line\n" out = StringIO.StringIO() pr("Hello,", "person!", file=out) assert out.getvalue() == "Hello, person!\n" @@ -668,7 +669,6 @@ result = out.getvalue() assert isinstance(result, unicode) assert result == u"Hello, person!\n" - pr("Hello", file=None) # This works. out = StringIO.StringIO() pr(None, file=out) assert out.getvalue() == "None\n" From noreply at buildbot.pypy.org Sat Jan 10 23:54:57 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 10 Jan 2015 23:54:57 +0100 (CET) Subject: [pypy-commit] pypy default: If sys.stdout is None, the print() function does nothing, Message-ID: <20150110225457.7892A1C1148@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r75295:5953b9989bd3 Date: 2015-01-10 23:54 +0100 http://bitbucket.org/pypy/pypy/changeset/5953b9989bd3/ Log: If sys.stdout is None, the print() function does nothing, for some reason, instead of crashing. diff --git a/pypy/module/__builtin__/app_io.py b/pypy/module/__builtin__/app_io.py --- a/pypy/module/__builtin__/app_io.py +++ b/pypy/module/__builtin__/app_io.py @@ -89,6 +89,8 @@ fp = kwargs.pop("file", None) if fp is None: fp = sys.stdout + if fp is None: + return def write(data): if not isinstance(data, basestring): data = str(data) From noreply at buildbot.pypy.org Sun Jan 11 00:03:29 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 11 Jan 2015 00:03:29 +0100 (CET) Subject: [pypy-commit] pypy default: test for 5953b9989bd3 Message-ID: <20150110230329.80AC71C1148@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r75296:256f8ea1be83 Date: 2015-01-11 00:02 +0100 http://bitbucket.org/pypy/pypy/changeset/256f8ea1be83/ Log: test for 5953b9989bd3 diff --git a/pypy/module/__builtin__/test/test_builtin.py b/pypy/module/__builtin__/test/test_builtin.py --- a/pypy/module/__builtin__/test/test_builtin.py +++ b/pypy/module/__builtin__/test/test_builtin.py @@ -652,6 +652,8 @@ try: pr("Hello,", "person!") pr("2nd line", file=None) + sys.stdout = None + pr("nowhere") finally: sys.stdout = save assert out.getvalue() == "Hello, person!\n2nd line\n" From noreply at buildbot.pypy.org Sun Jan 11 11:22:20 2015 From: noreply at buildbot.pypy.org (fijal) Date: Sun, 11 Jan 2015 11:22:20 +0100 (CET) Subject: [pypy-commit] pypy rewrite-unrolling: shuffle stuff around so we don't need unroller as an attribute Message-ID: <20150111102220.5F8761D22CD@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: rewrite-unrolling Changeset: r75297:2c65ae7ccd02 Date: 2015-01-11 12:22 +0200 http://bitbucket.org/pypy/pypy/changeset/2c65ae7ccd02/ Log: shuffle stuff around so we don't need unroller as an attribute diff --git a/rpython/jit/metainterp/optimizeopt/optimizer.py b/rpython/jit/metainterp/optimizeopt/optimizer.py --- a/rpython/jit/metainterp/optimizeopt/optimizer.py +++ b/rpython/jit/metainterp/optimizeopt/optimizer.py @@ -847,6 +847,20 @@ op.getopnum(), argboxes, op.getdescr()) return resbox.constbox() + # for unrolling + def reuse_pure_result(self, box): + #if box in self.short_boxes_seen: + # return + label1_op = self.loop.operations[0] + label1_args = label1_op.getarglist() + label2_op = self.loop.operations[-1] + label2_args = label2_op.getarglist() + assert len(label1_args) == len(self.loop.inputargs) + assert len(label2_args) == len(self.loop.inputargs) + self.loop.inputargs.append(box) + label1_args.append(box) + label2_args.append(box) + #def optimize_GUARD_NO_OVERFLOW(self, op): # # otherwise the default optimizer will clear fields, which is unwanted # # in this case diff --git a/rpython/jit/metainterp/optimizeopt/test/test_util.py b/rpython/jit/metainterp/optimizeopt/test/test_util.py --- a/rpython/jit/metainterp/optimizeopt/test/test_util.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_util.py @@ -388,8 +388,7 @@ assert equaloplists(optimized.operations, expected.operations, False, remap, text_right) - def _do_optimize_loop(self, loop, call_pure_results, start_state=None, - export_state=False): + def _do_optimize_loop(self, loop, call_pure_results, unroller=None): from rpython.jit.metainterp.optimizeopt import optimize_trace from rpython.jit.metainterp.optimizeopt.util import args_dict @@ -406,8 +405,7 @@ # return optimize_trace(metainterp_sd, None, loop, self.enable_opts, - start_state=start_state, - export_state=export_state) + unroller=unroller) def unroll_and_optimize(self, loop, call_pure_results=None): self.add_guard_future_condition(loop) @@ -427,8 +425,7 @@ preamble.operations = [ResOperation(rop.LABEL, inputargs, None, descr=TargetToken(token))] + \ operations + \ [ResOperation(rop.LABEL, jump_args, None, descr=token)] - start_state = self._do_optimize_loop(preamble, call_pure_results, - export_state=True) + start_state = self._do_optimize_loop(preamble, call_pure_results) assert preamble.operations[-1].getopnum() == rop.LABEL @@ -442,8 +439,7 @@ assert loop.operations[0].getopnum() == rop.LABEL loop.inputargs = loop.operations[0].getarglist() - self._do_optimize_loop(loop, call_pure_results, start_state, - export_state=False) + self._do_optimize_loop(loop, call_pure_results, unroller=start_state) extra_same_as = [] while loop.operations[0].getopnum() != rop.LABEL: extra_same_as.append(loop.operations[0]) diff --git a/rpython/jit/metainterp/optimizeopt/unroll.py b/rpython/jit/metainterp/optimizeopt/unroll.py --- a/rpython/jit/metainterp/optimizeopt/unroll.py +++ b/rpython/jit/metainterp/optimizeopt/unroll.py @@ -13,17 +13,17 @@ class OptPureValue(OptValue): - _attrs_ = ('unroller', 'keybox') + _attrs_ = ('keybox',) box = None def __init__(self, unroller, keybox): self.unroller = unroller self.keybox = keybox - def force_box(self, ignored): + def force_box(self, optforce): if self.box is None: self.box = self.keybox - self.unroller.reuse_pure_result(self.box) + optforce.optimizer.reuse_pure_result(self.box) return self.box @@ -48,14 +48,3 @@ if not value.is_virtual(): pure_value = OptPureValue(self, value.box) new_optpure.pure_operations[opargs] = pure_value - - def reuse_pure_result(self, box): - label1_op = self.optimizer.loop.operations[0] - label1_args = label1_op.getarglist() - label2_op = self.optimizer.loop.operations[-1] - label2_args = label2_op.getarglist() - assert len(label1_args) == len(self.optimizer.loop.inputargs) - assert len(label2_args) == len(self.optimizer.loop.inputargs) - self.optimizer.loop.inputargs.append(box) - label1_args.append(box) - label2_args.append(box) From noreply at buildbot.pypy.org Sun Jan 11 15:04:14 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 11 Jan 2015 15:04:14 +0100 (CET) Subject: [pypy-commit] cffi default: Fix (thanks gkcn on irc): in cdef() we can say "#define FOO 42", but Message-ID: <20150111140414.69E841D2398@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1630:932dc0fe2e16 Date: 2015-01-11 15:04 +0100 http://bitbucket.org/cffi/cffi/changeset/932dc0fe2e16/ Log: Fix (thanks gkcn on irc): in cdef() we can say "#define FOO 42", but this declaration is completely ignored in verify(). Instead, we need to check that the real value is 42, and store the name FOO on the returned library object. diff --git a/cffi/cparser.py b/cffi/cparser.py --- a/cffi/cparser.py +++ b/cffi/cparser.py @@ -229,12 +229,18 @@ pyvalue = int(int_str, 0) self._add_constants(key, pyvalue) + self._declare('macro ' + key, pyvalue) elif value == '...': self._declare('macro ' + key, value) else: - raise api.CDefError('only supports the syntax "#define ' - '%s ..." (literally) or "#define ' - '%s 0x1FF" for now' % (key, key)) + raise api.CDefError( + 'only supports one of the following syntax:\n' + ' #define %s ... (literally dot-dot-dot)\n' + ' #define %s NUMBER (with NUMBER an integer' + ' constant, decimal/hex/octal)\n' + 'got:\n' + ' #define %s %s' + % (key, key, key, value)) def _parse_decl(self, decl): node = decl.type diff --git a/cffi/vengine_cpy.py b/cffi/vengine_cpy.py --- a/cffi/vengine_cpy.py +++ b/cffi/vengine_cpy.py @@ -592,7 +592,8 @@ # constants, likely declared with '#define' def _generate_cpy_const(self, is_int, name, tp=None, category='const', - vartp=None, delayed=True, size_too=False): + vartp=None, delayed=True, size_too=False, + check_value=None): prnt = self._prnt funcname = '_cffi_%s_%s' % (category, name) prnt('static int %s(PyObject *lib)' % funcname) @@ -604,6 +605,9 @@ else: assert category == 'const' # + if check_value is not None: + self._check_int_constant_value(name, check_value) + # if not is_int: if category == 'var': realexpr = '&' + name @@ -651,6 +655,27 @@ # ---------- # enums + def _check_int_constant_value(self, name, value, err_prefix=''): + prnt = self._prnt + if value <= 0: + prnt(' if ((%s) > 0 || (long)(%s) != %dL) {' % ( + name, name, value)) + else: + prnt(' if ((%s) <= 0 || (unsigned long)(%s) != %dUL) {' % ( + name, name, value)) + prnt(' char buf[64];') + prnt(' if ((%s) <= 0)' % name) + prnt(' snprintf(buf, 63, "%%ld", (long)(%s));' % name) + prnt(' else') + prnt(' snprintf(buf, 63, "%%lu", (unsigned long)(%s));' % + name) + prnt(' PyErr_Format(_cffi_VerificationError,') + prnt(' "%s%s has the real value %s, not %s",') + prnt(' "%s", "%s", buf, "%d");' % ( + err_prefix, name, value)) + prnt(' return -1;') + prnt(' }') + def _enum_funcname(self, prefix, name): # "$enum_$1" => "___D_enum____D_1" name = name.replace('$', '___D_') @@ -667,25 +692,8 @@ prnt('static int %s(PyObject *lib)' % funcname) prnt('{') for enumerator, enumvalue in zip(tp.enumerators, tp.enumvalues): - if enumvalue <= 0: - prnt(' if ((%s) > 0 || (long)(%s) != %dL) {' % ( - enumerator, enumerator, enumvalue)) - else: - prnt(' if ((%s) <= 0 || (unsigned long)(%s) != %dUL) {' % ( - enumerator, enumerator, enumvalue)) - prnt(' char buf[64];') - prnt(' if ((%s) <= 0)' % enumerator) - prnt(' snprintf(buf, 63, "%%ld", (long)(%s));' % enumerator) - prnt(' else') - prnt(' snprintf(buf, 63, "%%lu", (unsigned long)(%s));' % - enumerator) - prnt(' PyErr_Format(_cffi_VerificationError,') - prnt(' "enum %s: %s has the real value %s, ' - 'not %s",') - prnt(' "%s", "%s", buf, "%d");' % ( - name, enumerator, enumvalue)) - prnt(' return -1;') - prnt(' }') + self._check_int_constant_value(enumerator, enumvalue, + "enum %s: " % name) prnt(' return %s;' % self._chained_list_constants[True]) self._chained_list_constants[True] = funcname + '(lib)' prnt('}') @@ -709,8 +717,11 @@ # macros: for now only for integers def _generate_cpy_macro_decl(self, tp, name): - assert tp == '...' - self._generate_cpy_const(True, name) + if tp == '...': + check_value = None + else: + check_value = tp # an integer + self._generate_cpy_const(True, name, check_value=check_value) _generate_cpy_macro_collecttype = _generate_nothing _generate_cpy_macro_method = _generate_nothing diff --git a/cffi/vengine_gen.py b/cffi/vengine_gen.py --- a/cffi/vengine_gen.py +++ b/cffi/vengine_gen.py @@ -355,11 +355,20 @@ # ---------- # constants, likely declared with '#define' - def _generate_gen_const(self, is_int, name, tp=None, category='const'): + def _generate_gen_const(self, is_int, name, tp=None, category='const', + check_value=None): prnt = self._prnt funcname = '_cffi_%s_%s' % (category, name) self.export_symbols.append(funcname) - if is_int: + if check_value is not None: + assert is_int + assert category == 'const' + prnt('int %s(char *out_error)' % funcname) + prnt('{') + self._check_int_constant_value(name, check_value) + prnt(' return 0;') + prnt('}') + elif is_int: assert category == 'const' prnt('int %s(long long *out_value)' % funcname) prnt('{') @@ -368,6 +377,7 @@ prnt('}') else: assert tp is not None + assert check_value is None prnt(tp.get_c_name(' %s(void)' % funcname, name),) prnt('{') if category == 'var': @@ -384,9 +394,13 @@ _loading_gen_constant = _loaded_noop - def _load_constant(self, is_int, tp, name, module): + def _load_constant(self, is_int, tp, name, module, check_value=None): funcname = '_cffi_const_%s' % name - if is_int: + if check_value is not None: + assert is_int + self._load_known_int_constant(module, funcname) + value = check_value + elif is_int: BType = self.ffi._typeof_locked("long long*")[0] BFunc = self.ffi._typeof_locked("int(*)(long long*)")[0] function = module.load_function(BFunc, funcname) @@ -397,6 +411,7 @@ BLongLong = self.ffi._typeof_locked("long long")[0] value += (1 << (8*self.ffi.sizeof(BLongLong))) else: + assert check_value is None BFunc = self.ffi._typeof_locked(tp.get_c_name('(*)(void)', name))[0] function = module.load_function(BFunc, funcname) value = function() @@ -411,6 +426,36 @@ # ---------- # enums + def _check_int_constant_value(self, name, value): + prnt = self._prnt + if value <= 0: + prnt(' if ((%s) > 0 || (long)(%s) != %dL) {' % ( + name, name, value)) + else: + prnt(' if ((%s) <= 0 || (unsigned long)(%s) != %dUL) {' % ( + name, name, value)) + prnt(' char buf[64];') + prnt(' if ((%s) <= 0)' % name) + prnt(' sprintf(buf, "%%ld", (long)(%s));' % name) + prnt(' else') + prnt(' sprintf(buf, "%%lu", (unsigned long)(%s));' % + name) + prnt(' sprintf(out_error, "%s has the real value %s, not %s",') + prnt(' "%s", buf, "%d");' % (name[:100], value)) + prnt(' return -1;') + prnt(' }') + + def _load_known_int_constant(self, module, funcname): + BType = self.ffi._typeof_locked("char[]")[0] + BFunc = self.ffi._typeof_locked("int(*)(char*)")[0] + function = module.load_function(BFunc, funcname) + p = self.ffi.new(BType, 256) + if function(p) < 0: + error = self.ffi.string(p) + if sys.version_info >= (3,): + error = str(error, 'utf-8') + raise ffiplatform.VerificationError(error) + def _enum_funcname(self, prefix, name): # "$enum_$1" => "___D_enum____D_1" name = name.replace('$', '___D_') @@ -428,24 +473,7 @@ prnt('int %s(char *out_error)' % funcname) prnt('{') for enumerator, enumvalue in zip(tp.enumerators, tp.enumvalues): - if enumvalue <= 0: - prnt(' if ((%s) > 0 || (long)(%s) != %dL) {' % ( - enumerator, enumerator, enumvalue)) - else: - prnt(' if ((%s) <= 0 || (unsigned long)(%s) != %dUL) {' % ( - enumerator, enumerator, enumvalue)) - prnt(' char buf[64];') - prnt(' if ((%s) <= 0)' % enumerator) - prnt(' sprintf(buf, "%%ld", (long)(%s));' % enumerator) - prnt(' else') - prnt(' sprintf(buf, "%%lu", (unsigned long)(%s));' % - enumerator) - prnt(' sprintf(out_error,' - ' "%s has the real value %s, not %s",') - prnt(' "%s", buf, "%d");' % ( - enumerator[:100], enumvalue)) - prnt(' return -1;') - prnt(' }') + self._check_int_constant_value(enumerator, enumvalue) prnt(' return 0;') prnt('}') prnt() @@ -457,16 +485,8 @@ tp.enumvalues = tuple(enumvalues) tp.partial_resolved = True else: - BType = self.ffi._typeof_locked("char[]")[0] - BFunc = self.ffi._typeof_locked("int(*)(char*)")[0] funcname = self._enum_funcname(prefix, name) - function = module.load_function(BFunc, funcname) - p = self.ffi.new(BType, 256) - if function(p) < 0: - error = self.ffi.string(p) - if sys.version_info >= (3,): - error = str(error, 'utf-8') - raise ffiplatform.VerificationError(error) + self._load_known_int_constant(module, funcname) def _loaded_gen_enum(self, tp, name, module, library): for enumerator, enumvalue in zip(tp.enumerators, tp.enumvalues): @@ -477,13 +497,21 @@ # macros: for now only for integers def _generate_gen_macro_decl(self, tp, name): - assert tp == '...' - self._generate_gen_const(True, name) + if tp == '...': + check_value = None + else: + check_value = tp # an integer + self._generate_gen_const(True, name, check_value=check_value) _loading_gen_macro = _loaded_noop def _loaded_gen_macro(self, tp, name, module, library): - value = self._load_constant(True, tp, name, module) + if tp == '...': + check_value = None + else: + check_value = tp # an integer + value = self._load_constant(True, tp, name, module, + check_value=check_value) setattr(library, name, value) type(library)._cffi_dir.append(name) diff --git a/testing/test_parsing.py b/testing/test_parsing.py --- a/testing/test_parsing.py +++ b/testing/test_parsing.py @@ -163,8 +163,12 @@ ffi = FFI(backend=FakeBackend()) e = py.test.raises(CDefError, ffi.cdef, '#define FOO "blah"') assert str(e.value) == ( - 'only supports the syntax "#define FOO ..." (literally)' - ' or "#define FOO 0x1FF" for now') + 'only supports one of the following syntax:\n' + ' #define FOO ... (literally dot-dot-dot)\n' + ' #define FOO NUMBER (with NUMBER an integer' + ' constant, decimal/hex/octal)\n' + 'got:\n' + ' #define FOO "blah"') def test_unnamed_struct(): ffi = FFI(backend=FakeBackend()) diff --git a/testing/test_verify.py b/testing/test_verify.py --- a/testing/test_verify.py +++ b/testing/test_verify.py @@ -2139,3 +2139,15 @@ this_dir = os.path.dirname(__file__) pycache_files = os.listdir(os.path.join(this_dir, '__pycache__')) assert any('test_use_local_dir' in s for s in pycache_files) + +def test_define_known_value(): + ffi = FFI() + ffi.cdef("#define FOO 0x123") + lib = ffi.verify("#define FOO 0x123") + assert lib.FOO == 0x123 + +def test_define_wrong_value(): + ffi = FFI() + ffi.cdef("#define FOO 123") + e = py.test.raises(VerificationError, ffi.verify, "#define FOO 124") + assert str(e.value).endswith("FOO has the real value 124, not 123") From noreply at buildbot.pypy.org Sun Jan 11 22:50:29 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 11 Jan 2015 22:50:29 +0100 (CET) Subject: [pypy-commit] cffi default: Change again ffi.offsetof() and ffi.addressof(), generalizing them. Message-ID: <20150111215029.E155B1C1033@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1631:6acfa48521dd Date: 2015-01-11 22:23 +0100 http://bitbucket.org/cffi/cffi/changeset/6acfa48521dd/ Log: Change again ffi.offsetof() and ffi.addressof(), generalizing them. diff --git a/c/_cffi_backend.c b/c/_cffi_backend.c --- a/c/_cffi_backend.c +++ b/c/_cffi_backend.c @@ -4836,27 +4836,19 @@ CTypeDescrObject *ct; CFieldObject *cf; Py_ssize_t offset; - - if (!PyArg_ParseTuple(args, "O!O:typeoffsetof", - &CTypeDescr_Type, &ct, &fieldname)) + int following = 0; + + if (!PyArg_ParseTuple(args, "O!O|i:typeoffsetof", + &CTypeDescr_Type, &ct, &fieldname, &following)) return NULL; - if (fieldname == Py_None) { - if (!(ct->ct_flags & (CT_STRUCT|CT_UNION))) { - PyErr_SetString(PyExc_TypeError, - "expected a struct or union ctype"); - return NULL; - } - res = (PyObject *)ct; - offset = 0; - } - else { - if (ct->ct_flags & CT_POINTER) + if (PyTextAny_Check(fieldname)) { + if (!following && (ct->ct_flags & CT_POINTER)) ct = ct->ct_itemdescr; if (!(ct->ct_flags & (CT_STRUCT|CT_UNION)) || ct->ct_stuff == NULL) { PyErr_SetString(PyExc_TypeError, - "expected an initialized struct or union ctype, " - "or a pointer to one"); + "with a field name argument, expected an " + "initialized struct or union ctype"); return NULL; } cf = (CFieldObject *)PyDict_GetItem(ct->ct_stuff, fieldname); @@ -4871,6 +4863,29 @@ res = (PyObject *)cf->cf_type; offset = cf->cf_offset; } + else { + ssize_t index = PyInt_AsSsize_t(fieldname); + if (index < 0 && PyErr_Occurred()) { + PyErr_SetString(PyExc_TypeError, + "field name or array index expected"); + return NULL; + } + + if (!(ct->ct_flags & (CT_ARRAY|CT_POINTER)) || + ct->ct_itemdescr->ct_size < 0) { + PyErr_SetString(PyExc_TypeError, "with an integer argument, " + "expected an array ctype or a " + "pointer to non-opaque"); + return NULL; + } + res = (PyObject *)ct->ct_itemdescr; + offset = index * ct->ct_itemdescr->ct_size; + if ((offset / ct->ct_itemdescr->ct_size) != index) { + PyErr_SetString(PyExc_OverflowError, + "array offset would overflow a Py_ssize_t"); + return NULL; + } + } return Py_BuildValue("(On)", res, offset); } @@ -4878,15 +4893,17 @@ { CTypeDescrObject *ct; CDataObject *cd; - Py_ssize_t offset = 0; - - if (!PyArg_ParseTuple(args, "O!O!|n:rawaddressof", + Py_ssize_t offset; + int accepted_flags; + + if (!PyArg_ParseTuple(args, "O!O!n:rawaddressof", &CTypeDescr_Type, &ct, &CData_Type, &cd, &offset)) return NULL; - if ((cd->c_type->ct_flags & (CT_STRUCT|CT_UNION|CT_IS_PTR_TO_OWNED)) == 0) { + accepted_flags = CT_STRUCT | CT_UNION | CT_ARRAY | CT_POINTER; + if ((cd->c_type->ct_flags & accepted_flags) == 0) { PyErr_SetString(PyExc_TypeError, "expected a 'cdata struct-or-union' object"); return NULL; diff --git a/c/test_c.py b/c/test_c.py --- a/c/test_c.py +++ b/c/test_c.py @@ -2538,13 +2538,31 @@ ('a2', BChar, -1), ('a3', BChar, -1)]) py.test.raises(TypeError, typeoffsetof, BStructPtr, None) - assert typeoffsetof(BStruct, None) == (BStruct, 0) + py.test.raises(TypeError, typeoffsetof, BStruct, None) assert typeoffsetof(BStructPtr, 'a1') == (BChar, 0) assert typeoffsetof(BStruct, 'a1') == (BChar, 0) assert typeoffsetof(BStructPtr, 'a2') == (BChar, 1) assert typeoffsetof(BStruct, 'a3') == (BChar, 2) + assert typeoffsetof(BStructPtr, 'a2', 0) == (BChar, 1) + py.test.raises(TypeError, typeoffsetof, BStructPtr, 'a2', 1) py.test.raises(KeyError, typeoffsetof, BStructPtr, 'a4') py.test.raises(KeyError, typeoffsetof, BStruct, 'a5') + py.test.raises(TypeError, typeoffsetof, BStruct, 42) + py.test.raises(TypeError, typeoffsetof, BChar, 'a1') + +def test_typeoffsetof_array(): + BInt = new_primitive_type("int") + BIntP = new_pointer_type(BInt) + BArray = new_array_type(BIntP, None) + py.test.raises(TypeError, typeoffsetof, BArray, None) + py.test.raises(TypeError, typeoffsetof, BArray, 'a1') + assert typeoffsetof(BArray, 51) == (BInt, 51 * size_of_int()) + assert typeoffsetof(BIntP, 51) == (BInt, 51 * size_of_int()) + assert typeoffsetof(BArray, -51) == (BInt, -51 * size_of_int()) + MAX = sys.maxsize // size_of_int() + assert typeoffsetof(BArray, MAX) == (BInt, MAX * size_of_int()) + assert typeoffsetof(BIntP, MAX) == (BInt, MAX * size_of_int()) + py.test.raises(OverflowError, typeoffsetof, BArray, MAX + 1) def test_typeoffsetof_no_bitfield(): BInt = new_primitive_type("int") @@ -2564,14 +2582,14 @@ assert repr(p) == "" s = p[0] assert repr(s) == "" - a = rawaddressof(BStructPtr, s) + a = rawaddressof(BStructPtr, s, 0) assert repr(a).startswith(". - If 'field' is specified, return the address of this field. - The field may be 'x.y.z' in case of nested structures. + If 'fields_or_indexes' are given, returns the address of that + field or array item in the structure or array, recursively in + case of nested structures. """ ctype = self._backend.typeof(cdata) - ctype, offset = self._typeoffsetof(ctype, field) + if fields_or_indexes: + ctype, offset = self._typeoffsetof(ctype, *fields_or_indexes) + else: + if ctype.kind == "pointer": + raise TypeError("addressof(pointer)") + offset = 0 ctypeptr = self._pointer_to(ctype) return self._backend.rawaddressof(ctypeptr, cdata, offset) - def _typeoffsetof(self, ctype, field): - if field is not None and '.' in field: - offset = 0 - for field1 in field.split('.'): - ctype, offset1 = self._backend.typeoffsetof(ctype, field1) - offset += offset1 - return ctype, offset - return self._backend.typeoffsetof(ctype, field) + def _typeoffsetof(self, ctype, field_or_index, *fields_or_indexes): + ctype, offset = self._backend.typeoffsetof(ctype, field_or_index) + for field1 in fields_or_indexes: + ctype, offset1 = self._backend.typeoffsetof(ctype, field1, 1) + offset += offset1 + return ctype, offset def include(self, ffi_to_include): """Includes the typedefs, structs, unions and enums defined diff --git a/cffi/backend_ctypes.py b/cffi/backend_ctypes.py --- a/cffi/backend_ctypes.py +++ b/cffi/backend_ctypes.py @@ -169,6 +169,7 @@ class CTypesGenericPtr(CTypesData): __slots__ = ['_address', '_as_ctype_ptr'] _automatic_casts = False + kind = "pointer" @classmethod def _newp(cls, init): @@ -370,10 +371,12 @@ (CTypesPrimitive, type(source).__name__)) return source # + kind1 = kind class CTypesPrimitive(CTypesGenericPrimitive): __slots__ = ['_value'] _ctype = ctype _reftypename = '%s &' % name + kind = kind1 def __init__(self, value): self._value = value @@ -703,12 +706,13 @@ class struct_or_union(base_ctypes_class): pass struct_or_union.__name__ = '%s_%s' % (kind, name) + kind1 = kind # class CTypesStructOrUnion(CTypesBaseStructOrUnion): __slots__ = ['_blob'] _ctype = struct_or_union _reftypename = '%s &' % (name,) - _kind = kind + _kind = kind = kind1 # CTypesStructOrUnion._fix_class() return CTypesStructOrUnion @@ -994,27 +998,42 @@ def getcname(self, BType, replace_with): return BType._get_c_name(replace_with) - def typeoffsetof(self, BType, fieldname): - if fieldname is not None and issubclass(BType, CTypesGenericPtr): - BType = BType._BItem - if not issubclass(BType, CTypesBaseStructOrUnion): - raise TypeError("expected a struct or union ctype") - if fieldname is None: - return (BType, 0) - else: + def typeoffsetof(self, BType, fieldname, num=0): + if isinstance(fieldname, str): + if num == 0 and issubclass(BType, CTypesGenericPtr): + BType = BType._BItem + if not issubclass(BType, CTypesBaseStructOrUnion): + raise TypeError("expected a struct or union ctype") BField = BType._bfield_types[fieldname] if BField is Ellipsis: raise TypeError("not supported for bitfields") return (BField, BType._offsetof(fieldname)) + elif isinstance(fieldname, (int, long)): + if issubclass(BType, CTypesGenericArray): + BType = BType._CTPtr + if not issubclass(BType, CTypesGenericPtr): + raise TypeError("expected an array or ptr ctype") + BItem = BType._BItem + offset = BItem._get_size() * fieldname + if offset > sys.maxsize: + raise OverflowError + return (BItem, offset) + else: + raise TypeError(type(fieldname)) - def rawaddressof(self, BTypePtr, cdata, offset): + def rawaddressof(self, BTypePtr, cdata, offset=None): if isinstance(cdata, CTypesBaseStructOrUnion): ptr = ctypes.pointer(type(cdata)._to_ctypes(cdata)) elif isinstance(cdata, CTypesGenericPtr): + if offset is None or not issubclass(type(cdata)._BItem, + CTypesBaseStructOrUnion): + raise TypeError("unexpected cdata type") + ptr = type(cdata)._to_ctypes(cdata) + elif isinstance(cdata, CTypesGenericArray): ptr = type(cdata)._to_ctypes(cdata) else: raise TypeError("expected a ") - if offset != 0: + if offset: ptr = ctypes.cast( ctypes.c_void_p( ctypes.cast(ptr, ctypes.c_void_p).value + offset), diff --git a/doc/source/index.rst b/doc/source/index.rst --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -1301,11 +1301,15 @@ **ffi.alignof("C type")**: return the alignment of the C type. Corresponds to the ``__alignof__`` operator in GCC. -**ffi.offsetof("C struct type", "fieldname")**: return the offset within -the struct of the given field. Corresponds to ``offsetof()`` in C. +**ffi.offsetof("C struct or array type", *fields_or_indexes)**: return the +offset within the struct of the given field. Corresponds to ``offsetof()`` +in C. .. versionchanged:: 0.9 - ``"fieldname"`` can be ``"x.y"`` in case of nested structures. + You can give several field names in case of nested structures. You + can also give numeric values which correspond to array items, in case + of a pointer or array type. For example, ``ffi.offsetof("int[5]", 2)`` + is equal to the size of two integers, as is ``ffi.offsetof("int *", 2)``. **ffi.getctype("C type" or , extra="")**: return the string representation of the given C type. If non-empty, the "extra" string is @@ -1358,21 +1362,26 @@ .. "versionadded:: 0.7" --- inlined in the previous paragraph -**ffi.addressof(cdata, field=None)**: from a cdata whose type is -``struct foo_s``, return its "address", as a cdata whose type is -``struct foo_s *``. Also works on unions, but not on any other type. -(It would be difficult because only structs and unions are internally -stored as an indirect pointer to the data. If you need a C int whose -address can be taken, use ``ffi.new("int[1]")`` in the first place; -similarly, if it's a C pointer, use ``ffi.new("foo_t *[1]")``.) -If ``field`` is given, -returns the address of that field in the structure. The returned -pointer is only valid as long as the original ``cdata`` object is; be -sure to keep it alive if it was obtained directly from ``ffi.new()``. -*New in version 0.4.* +**ffi.addressof(cdata, *fields_or_indexes)**: equivalent to the C +expression ``&cdata`` or ``&cdata.field`` or ``&cdata->field`` or +``&cdata[index]`` (or any combination of fields and indexes). Works +with the same ctypes where one of the above expressions would work in +C, with one exception: if no ``fields_or_indexes`` is specified, it +cannot be used to take the address of a primitive or pointer (it would +be difficult to implement because only structs and unions and arrays +are internally stored as an indirect pointer to the data. If you need +a C int whose address can be taken, use ``ffi.new("int[1]")`` in the +first place; similarly, for a pointer, use ``ffi.new("foo_t *[1]")``.) + +The returned pointer is only valid as long as the original ``cdata`` +object is; be sure to keep it alive if it was obtained directly from +``ffi.new()``. *New in version 0.4.* .. versionchanged:: 0.9 - ``"field"`` can be ``"x.y"`` in case of nested structures. + You can give several field names in case of nested structures, and + you can give numeric values for array items. Note that + ``&cdata[index]`` can also be expressed as simply ``cdata + index``, + both in C and in CFFI. .. "versionadded:: 0.4" --- inlined in the previous paragraph diff --git a/testing/backend_tests.py b/testing/backend_tests.py --- a/testing/backend_tests.py +++ b/testing/backend_tests.py @@ -964,9 +964,19 @@ ffi.cdef("struct foo { int a, b, c; };" "struct bar { struct foo d, e; };") assert ffi.offsetof("struct bar", "e") == 12 - assert ffi.offsetof("struct bar", "e.a") == 12 - assert ffi.offsetof("struct bar", "e.b") == 16 - assert ffi.offsetof("struct bar", "e.c") == 20 + py.test.raises(KeyError, ffi.offsetof, "struct bar", "e.a") + assert ffi.offsetof("struct bar", "e", "a") == 12 + assert ffi.offsetof("struct bar", "e", "b") == 16 + assert ffi.offsetof("struct bar", "e", "c") == 20 + + def test_offsetof_array(self): + ffi = FFI(backend=self.Backend()) + assert ffi.offsetof("int[]", 51) == 51 * ffi.sizeof("int") + assert ffi.offsetof("int *", 51) == 51 * ffi.sizeof("int") + ffi.cdef("struct bar { int a, b; int c[99]; };") + assert ffi.offsetof("struct bar", "c") == 2 * ffi.sizeof("int") + assert ffi.offsetof("struct bar", "c", 0) == 2 * ffi.sizeof("int") + assert ffi.offsetof("struct bar", "c", 51) == 53 * ffi.sizeof("int") def test_alignof(self): ffi = FFI(backend=self.Backend()) @@ -1500,8 +1510,10 @@ p = ffi.new("struct foo_s *") a = ffi.addressof(p[0]) assert repr(a).startswith(" Author: Armin Rigo Branch: Changeset: r1632:a211e3ed8ba7 Date: 2015-01-11 22:48 +0100 http://bitbucket.org/cffi/cffi/changeset/a211e3ed8ba7/ Log: Another few tests, and fix the error message diff --git a/c/_cffi_backend.c b/c/_cffi_backend.c --- a/c/_cffi_backend.c +++ b/c/_cffi_backend.c @@ -4905,7 +4905,7 @@ accepted_flags = CT_STRUCT | CT_UNION | CT_ARRAY | CT_POINTER; if ((cd->c_type->ct_flags & accepted_flags) == 0) { PyErr_SetString(PyExc_TypeError, - "expected a 'cdata struct-or-union' object"); + "expected a cdata struct/union/array/pointer object"); return NULL; } if ((ct->ct_flags & CT_POINTER) == 0) { diff --git a/c/test_c.py b/c/test_c.py --- a/c/test_c.py +++ b/c/test_c.py @@ -2544,6 +2544,7 @@ assert typeoffsetof(BStructPtr, 'a2') == (BChar, 1) assert typeoffsetof(BStruct, 'a3') == (BChar, 2) assert typeoffsetof(BStructPtr, 'a2', 0) == (BChar, 1) + assert typeoffsetof(BStruct, u+'a3') == (BChar, 2) py.test.raises(TypeError, typeoffsetof, BStructPtr, 'a2', 1) py.test.raises(KeyError, typeoffsetof, BStructPtr, 'a4') py.test.raises(KeyError, typeoffsetof, BStruct, 'a5') @@ -2593,6 +2594,15 @@ # d = rawaddressof(BCharP, s, 1) assert d == cast(BCharP, p) + 1 + # + e = cast(BCharP, 109238) + f = rawaddressof(BCharP, e, 42) + assert f == e + 42 + # + BCharA = new_array_type(BCharP, None) + e = newp(BCharA, 50) + f = rawaddressof(BCharP, e, 42) + assert f == e + 42 def test_newp_signed_unsigned_char(): BCharArray = new_array_type( From noreply at buildbot.pypy.org Sun Jan 11 22:52:40 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 11 Jan 2015 22:52:40 +0100 (CET) Subject: [pypy-commit] pypy default: update to cffi/a211e3ed8ba7 Message-ID: <20150111215240.DE56E1C1033@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r75298:6aab59162052 Date: 2015-01-11 22:51 +0100 http://bitbucket.org/pypy/pypy/changeset/6aab59162052/ Log: update to cffi/a211e3ed8ba7 diff --git a/pypy/module/_cffi_backend/ctypearray.py b/pypy/module/_cffi_backend/ctypearray.py --- a/pypy/module/_cffi_backend/ctypearray.py +++ b/pypy/module/_cffi_backend/ctypearray.py @@ -107,6 +107,9 @@ return self.space.w_None return W_CTypePtrOrArray._fget(self, attrchar) + def typeoffsetof_index(self, index): + return self.ctptr.typeoffsetof_index(index) + class W_CDataIter(W_Root): _immutable_fields_ = ['ctitem', 'cdata', '_stop'] # but not '_next' diff --git a/pypy/module/_cffi_backend/ctypeobj.py b/pypy/module/_cffi_backend/ctypeobj.py --- a/pypy/module/_cffi_backend/ctypeobj.py +++ b/pypy/module/_cffi_backend/ctypeobj.py @@ -142,12 +142,14 @@ raise oefmt(space.w_ValueError, "ctype '%s' is of unknown alignment", self.name) - def typeoffsetof(self, fieldname): + def typeoffsetof_field(self, fieldname, following): space = self.space - if fieldname is None: - msg = "expected a struct or union ctype" - else: - msg = "expected a struct or union ctype, or a pointer to one" + msg = "with a field name argument, expected a struct or union ctype" + raise OperationError(space.w_TypeError, space.wrap(msg)) + + def typeoffsetof_index(self, index): + space = self.space + msg = "with an integer argument, expected an array or pointer ctype" raise OperationError(space.w_TypeError, space.wrap(msg)) def rawaddressof(self, cdata, offset): diff --git a/pypy/module/_cffi_backend/ctypeptr.py b/pypy/module/_cffi_backend/ctypeptr.py --- a/pypy/module/_cffi_backend/ctypeptr.py +++ b/pypy/module/_cffi_backend/ctypeptr.py @@ -308,24 +308,36 @@ def getcfield(self, attr): return self.ctitem.getcfield(attr) - def typeoffsetof(self, fieldname): - if fieldname is None: - return W_CTypePtrBase.typeoffsetof(self, fieldname) - else: - return self.ctitem.typeoffsetof(fieldname) + def typeoffsetof_field(self, fieldname, following): + if following == 0: + return self.ctitem.typeoffsetof_field(fieldname, -1) + return W_CTypePtrBase.typeoffsetof_field(self, fieldname, following) + + def typeoffsetof_index(self, index): + space = self.space + ctitem = self.ctitem + if ctitem.size < 0: + raise OperationError(space.w_TypeError, + space.wrap("pointer to opaque")) + try: + offset = ovfcheck(index * ctitem.size) + except OverflowError: + raise OperationError(space.w_OverflowError, + space.wrap("array offset would overflow a ssize_t")) + return ctitem, offset def rawaddressof(self, cdata, offset): from pypy.module._cffi_backend.ctypestruct import W_CTypeStructOrUnion space = self.space ctype2 = cdata.ctype if (isinstance(ctype2, W_CTypeStructOrUnion) or - (isinstance(ctype2, W_CTypePtrOrArray) and - isinstance(ctype2.ctitem, W_CTypeStructOrUnion))): + isinstance(ctype2, W_CTypePtrOrArray)): ptrdata = rffi.ptradd(cdata._cdata, offset) return cdataobj.W_CData(space, ptrdata, self) else: raise OperationError(space.w_TypeError, - space.wrap("expected a 'cdata struct-or-union' object")) + space.wrap("expected a cdata struct/union/array/pointer" + " object")) def _fget(self, attrchar): if attrchar == 'i': # item diff --git a/pypy/module/_cffi_backend/ctypestruct.py b/pypy/module/_cffi_backend/ctypestruct.py --- a/pypy/module/_cffi_backend/ctypestruct.py +++ b/pypy/module/_cffi_backend/ctypestruct.py @@ -65,9 +65,7 @@ keepalive_until_here(ob) return ob - def typeoffsetof(self, fieldname): - if fieldname is None: - return (self, 0) + def typeoffsetof_field(self, fieldname, following): self.check_complete() space = self.space try: diff --git a/pypy/module/_cffi_backend/func.py b/pypy/module/_cffi_backend/func.py --- a/pypy/module/_cffi_backend/func.py +++ b/pypy/module/_cffi_backend/func.py @@ -48,13 +48,22 @@ align = w_ctype.alignof() return space.wrap(align) - at unwrap_spec(w_ctype=ctypeobj.W_CType, fieldname="str_or_None") -def typeoffsetof(space, w_ctype, fieldname): - ctype, offset = w_ctype.typeoffsetof(fieldname) + at unwrap_spec(w_ctype=ctypeobj.W_CType, following=int) +def typeoffsetof(space, w_ctype, w_field_or_index, following=0): + try: + fieldname = space.str_w(w_field_or_index) + except OperationError, e: + if not e.match(space, space.w_TypeError): + raise + index = space.int_w(w_field_or_index) + ctype, offset = w_ctype.typeoffsetof_index(index) + else: + ctype, offset = w_ctype.typeoffsetof_field(fieldname, following) + # return space.newtuple([space.wrap(ctype), space.wrap(offset)]) @unwrap_spec(w_ctype=ctypeobj.W_CType, w_cdata=cdataobj.W_CData, offset=int) -def rawaddressof(space, w_ctype, w_cdata, offset=0): +def rawaddressof(space, w_ctype, w_cdata, offset): return w_ctype.rawaddressof(w_cdata, offset) # ____________________________________________________________ diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py --- a/pypy/module/_cffi_backend/test/_backend_test_c.py +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py @@ -2527,13 +2527,32 @@ ('a2', BChar, -1), ('a3', BChar, -1)]) py.test.raises(TypeError, typeoffsetof, BStructPtr, None) - assert typeoffsetof(BStruct, None) == (BStruct, 0) + py.test.raises(TypeError, typeoffsetof, BStruct, None) assert typeoffsetof(BStructPtr, 'a1') == (BChar, 0) assert typeoffsetof(BStruct, 'a1') == (BChar, 0) assert typeoffsetof(BStructPtr, 'a2') == (BChar, 1) assert typeoffsetof(BStruct, 'a3') == (BChar, 2) + assert typeoffsetof(BStructPtr, 'a2', 0) == (BChar, 1) + assert typeoffsetof(BStruct, u+'a3') == (BChar, 2) + py.test.raises(TypeError, typeoffsetof, BStructPtr, 'a2', 1) py.test.raises(KeyError, typeoffsetof, BStructPtr, 'a4') py.test.raises(KeyError, typeoffsetof, BStruct, 'a5') + py.test.raises(TypeError, typeoffsetof, BStruct, 42) + py.test.raises(TypeError, typeoffsetof, BChar, 'a1') + +def test_typeoffsetof_array(): + BInt = new_primitive_type("int") + BIntP = new_pointer_type(BInt) + BArray = new_array_type(BIntP, None) + py.test.raises(TypeError, typeoffsetof, BArray, None) + py.test.raises(TypeError, typeoffsetof, BArray, 'a1') + assert typeoffsetof(BArray, 51) == (BInt, 51 * size_of_int()) + assert typeoffsetof(BIntP, 51) == (BInt, 51 * size_of_int()) + assert typeoffsetof(BArray, -51) == (BInt, -51 * size_of_int()) + MAX = sys.maxsize // size_of_int() + assert typeoffsetof(BArray, MAX) == (BInt, MAX * size_of_int()) + assert typeoffsetof(BIntP, MAX) == (BInt, MAX * size_of_int()) + py.test.raises(OverflowError, typeoffsetof, BArray, MAX + 1) def test_typeoffsetof_no_bitfield(): BInt = new_primitive_type("int") @@ -2553,17 +2572,26 @@ assert repr(p) == "" s = p[0] assert repr(s) == "" - a = rawaddressof(BStructPtr, s) + a = rawaddressof(BStructPtr, s, 0) assert repr(a).startswith(" Author: mattip Branch: win32-ownlib Changeset: r1633:2668dddde371 Date: 2015-01-11 22:47 +0200 http://bitbucket.org/cffi/cffi/changeset/2668dddde371/ Log: enable tests on win32 in ownlib, 3 fail diff --git a/.hgignore b/.hgignore --- a/.hgignore +++ b/.hgignore @@ -6,6 +6,7 @@ demo/__pycache__ __pycache__ _cffi_backend*.so +_cffi_backend.pyd doc/build build dist diff --git a/testing/test_ownlib.py b/testing/test_ownlib.py --- a/testing/test_ownlib.py +++ b/testing/test_ownlib.py @@ -7,33 +7,61 @@ SOURCE = """\ #include -int test_getting_errno(void) { +#ifdef _WIN32 +#define EXPORT __declspec(dllexport) +#else +#define EXPORT export +#endif + +EXPORT int test_getting_errno(void) { errno = 123; return -1; } -int test_setting_errno(void) { +EXPORT int test_setting_errno(void) { return errno; } -int my_array[7] = {0, 1, 2, 3, 4, 5, 6}; +EXPORT int my_array[7] = {0, 1, 2, 3, 4, 5, 6}; """ class TestOwnLib(object): Backend = CTypesBackend def setup_class(cls): - if sys.platform == 'win32': - return + cls.module = None from testing.udir import udir udir.join('testownlib.c').write(SOURCE) - subprocess.check_call( - 'gcc testownlib.c -shared -fPIC -o testownlib.so', - cwd=str(udir), shell=True) - cls.module = str(udir.join('testownlib.so')) + if sys.platform == 'win32': + import os + # did we already build it? + if os.path.exists(str(udir.join('testownlib.dll'))): + cls.module = str(udir.join('testownlib.dll')) + return + # try (not too hard) to find the version used to compile this python + # no mingw + from distutils.msvc9compiler import get_build_version + version = get_build_version() + toolskey = "VS%0.f0COMNTOOLS" % version + toolsdir = os.environ.get(toolskey, None) + if toolsdir is None: + return + productdir = os.path.join(toolsdir, os.pardir, os.pardir, "VC") + productdir = os.path.abspath(productdir) + vcvarsall = os.path.join(productdir, "vcvarsall.bat") + if os.path.isfile(vcvarsall): + cmd = '"%s"' % vcvarsall + ' & cl testownlib.c /D_USRDLL /D_WINDLL' \ + ' /LD /OUT:testownlib.dll' + subprocess.check_call(cmd, cwd = str(udir), shell=True) + cls.module = str(udir.join('testownlib.dll')) + else: + subprocess.check_call( + 'gcc testownlib.c -shared -fPIC -o testownlib.so', + cwd=str(udir), shell=True) + cls.module = str(udir.join('testownlib.so')) def test_getting_errno(self): - if sys.platform == 'win32': + if self.module is None: py.test.skip("fix the auto-generation of the tiny test lib") ffi = FFI(backend=self.Backend()) ffi.cdef(""" @@ -45,7 +73,7 @@ assert ffi.errno == 123 def test_setting_errno(self): - if sys.platform == 'win32': + if self.module is None: py.test.skip("fix the auto-generation of the tiny test lib") if self.Backend is CTypesBackend and '__pypy__' in sys.modules: py.test.skip("XXX errno issue with ctypes on pypy?") @@ -60,7 +88,7 @@ assert ffi.errno == 42 def test_my_array_7(self): - if sys.platform == 'win32': + if self.module is None: py.test.skip("fix the auto-generation of the tiny test lib") ffi = FFI(backend=self.Backend()) ffi.cdef(""" @@ -80,7 +108,7 @@ assert ownlib.my_array[i] == i def test_my_array_no_length(self): - if sys.platform == 'win32': + if self.module is None: py.test.skip("fix the auto-generation of the tiny test lib") if self.Backend is CTypesBackend: py.test.skip("not supported by the ctypes backend") @@ -100,7 +128,7 @@ assert ownlib.my_array[i] == i def test_keepalive_lib(self): - if sys.platform == 'win32': + if self.module is None: py.test.skip("fix the auto-generation of the tiny test lib") ffi = FFI(backend=self.Backend()) ffi.cdef(""" @@ -118,7 +146,7 @@ assert res == -1 def test_keepalive_ffi(self): - if sys.platform == 'win32': + if self.module is None: py.test.skip("fix the auto-generation of the tiny test lib") ffi = FFI(backend=self.Backend()) ffi.cdef(""" From noreply at buildbot.pypy.org Sun Jan 11 22:54:11 2015 From: noreply at buildbot.pypy.org (mattip) Date: Sun, 11 Jan 2015 22:54:11 +0100 (CET) Subject: [pypy-commit] cffi win32-ownlib: add test for a function with many pointer, value structs Message-ID: <20150111215411.3716D1C1033@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: win32-ownlib Changeset: r1634:a463e80f2463 Date: 2015-01-11 23:54 +0200 http://bitbucket.org/cffi/cffi/changeset/a463e80f2463/ Log: add test for a function with many pointer,value structs diff --git a/testing/test_ownlib.py b/testing/test_ownlib.py --- a/testing/test_ownlib.py +++ b/testing/test_ownlib.py @@ -20,6 +20,75 @@ EXPORT int test_setting_errno(void) { return errno; +}; + +typedef struct { + long x; + long y; +} POINT; + +typedef struct { + long left; + long top; + long right; + long bottom; +} RECT; + + +EXPORT int PointInRect(RECT *prc, POINT pt) +{ + if (pt.x < prc->left) + return 0; + if (pt.x > prc->right) + return 0; + if (pt.y < prc->top) + return 0; + if (pt.y > prc->bottom) + return 0; + return 1; +} + +EXPORT long left = 10; +EXPORT long top = 20; +EXPORT long right = 30; +EXPORT long bottom = 40; + +EXPORT RECT ReturnRect(int i, RECT ar, RECT* br, POINT cp, RECT dr, + RECT *er, POINT fp, RECT gr) +{ + /*Check input */ + if (ar.left + br->left + dr.left + er->left + gr.left != left * 5) + { + ar.left = 100; + return ar; + } + if (ar.right + br->right + dr.right + er->right + gr.right != right * 5) + { + ar.right = 100; + return ar; + } + if (cp.x != fp.x) + { + ar.left = -100; + } + if (cp.y != fp.y) + { + ar.left = -200; + } + switch(i) + { + case 0: + return ar; + break; + case 1: + return dr; + break; + case 2: + return gr; + break; + + } + return ar; } EXPORT int my_array[7] = {0, 1, 2, 3, 4, 5, 6}; @@ -50,8 +119,8 @@ productdir = os.path.abspath(productdir) vcvarsall = os.path.join(productdir, "vcvarsall.bat") if os.path.isfile(vcvarsall): - cmd = '"%s"' % vcvarsall + ' & cl testownlib.c /D_USRDLL /D_WINDLL' \ - ' /LD /OUT:testownlib.dll' + cmd = '"%s"' % vcvarsall + ' & cl.exe testownlib.c ' \ + ' /LD /Fetestownlib.dll' subprocess.check_call(cmd, cwd = str(udir), shell=True) cls.module = str(udir.join('testownlib.dll')) else: @@ -163,3 +232,44 @@ res = func() assert res == -1 assert ffi.errno == 123 + + def test_struct_by_value(self): + if self.module is None: + py.test.skip("fix the auto-generation of the tiny test lib") + ffi = FFI(backend=self.Backend()) + ffi.cdef(""" + typedef struct { + long x; + long y; + } POINT; + + typedef struct { + long left; + long top; + long right; + long bottom; + } RECT; + + long left, top, right, bottom; + + RECT ReturnRect(int i, RECT ar, RECT* br, POINT cp, RECT dr, + RECT *er, POINT fp, RECT gr); + """) + ownlib = ffi.dlopen(self.module) + + rect = ffi.new('RECT[1]') + pt = ffi.new('POINT[1]') + pt[0].x = 15 + pt[0].y = 25 + rect[0].left = ownlib.left + rect[0].right = ownlib.right + rect[0].top = ownlib.top + rect[0].bottom = ownlib.bottom + + for i in range(4): + ret = ownlib.ReturnRect(i, rect[0], rect, pt[0], rect[0], + rect[0], pt[0], rect[0]) + assert ret.left == ownlib.left + assert ret.right == ownlib.right + assert ret.top == ownlib.top + assert ret.bottom == ownlib.bottom From noreply at buildbot.pypy.org Sun Jan 11 22:57:02 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 11 Jan 2015 22:57:02 +0100 (CET) Subject: [pypy-commit] pypy default: fix error message Message-ID: <20150111215702.C1F631C1033@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r75299:230d81716c11 Date: 2015-01-11 22:56 +0100 http://bitbucket.org/pypy/pypy/changeset/230d81716c11/ Log: fix error message diff --git a/pypy/module/_cffi_backend/func.py b/pypy/module/_cffi_backend/func.py --- a/pypy/module/_cffi_backend/func.py +++ b/pypy/module/_cffi_backend/func.py @@ -55,7 +55,13 @@ except OperationError, e: if not e.match(space, space.w_TypeError): raise - index = space.int_w(w_field_or_index) + try: + index = space.int_w(w_field_or_index) + except OperationError, e: + if not e.match(space, space.w_TypeError): + raise + raise OperationError(space.w_TypeError, + space.wrap("field name or array index expected")) ctype, offset = w_ctype.typeoffsetof_index(index) else: ctype, offset = w_ctype.typeoffsetof_field(fieldname, following) From noreply at buildbot.pypy.org Sun Jan 11 22:57:04 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 11 Jan 2015 22:57:04 +0100 (CET) Subject: [pypy-commit] pypy default: run pypy/tool/import_cffi.py Message-ID: <20150111215704.3AF591C1033@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r75300:d0f031cb06c8 Date: 2015-01-11 22:56 +0100 http://bitbucket.org/pypy/pypy/changeset/d0f031cb06c8/ Log: run pypy/tool/import_cffi.py diff --git a/lib_pypy/cffi/api.py b/lib_pypy/cffi/api.py --- a/lib_pypy/cffi/api.py +++ b/lib_pypy/cffi/api.py @@ -191,14 +191,16 @@ cdecl = self._typeof(cdecl) return self._backend.alignof(cdecl) - def offsetof(self, cdecl, fieldname): + def offsetof(self, cdecl, *fields_or_indexes): """Return the offset of the named field inside the given - structure, which must be given as a C type name. The field - may be 'x.y.z' in case of nested structures. + structure or array, which must be given as a C type name. + You can give several field names in case of nested structures. + You can also give numeric values which correspond to array + items, in case of an array type. """ if isinstance(cdecl, basestring): cdecl = self._typeof(cdecl) - return self._typeoffsetof(cdecl, fieldname)[1] + return self._typeoffsetof(cdecl, *fields_or_indexes)[1] def new(self, cdecl, init=None): """Allocate an instance according to the specified C type and @@ -383,24 +385,28 @@ with self._lock: return model.pointer_cache(self, ctype) - def addressof(self, cdata, field=None): + def addressof(self, cdata, *fields_or_indexes): """Return the address of a . - If 'field' is specified, return the address of this field. - The field may be 'x.y.z' in case of nested structures. + If 'fields_or_indexes' are given, returns the address of that + field or array item in the structure or array, recursively in + case of nested structures. """ ctype = self._backend.typeof(cdata) - ctype, offset = self._typeoffsetof(ctype, field) + if fields_or_indexes: + ctype, offset = self._typeoffsetof(ctype, *fields_or_indexes) + else: + if ctype.kind == "pointer": + raise TypeError("addressof(pointer)") + offset = 0 ctypeptr = self._pointer_to(ctype) return self._backend.rawaddressof(ctypeptr, cdata, offset) - def _typeoffsetof(self, ctype, field): - if field is not None and '.' in field: - offset = 0 - for field1 in field.split('.'): - ctype, offset1 = self._backend.typeoffsetof(ctype, field1) - offset += offset1 - return ctype, offset - return self._backend.typeoffsetof(ctype, field) + def _typeoffsetof(self, ctype, field_or_index, *fields_or_indexes): + ctype, offset = self._backend.typeoffsetof(ctype, field_or_index) + for field1 in fields_or_indexes: + ctype, offset1 = self._backend.typeoffsetof(ctype, field1, 1) + offset += offset1 + return ctype, offset def include(self, ffi_to_include): """Includes the typedefs, structs, unions and enums defined diff --git a/lib_pypy/cffi/backend_ctypes.py b/lib_pypy/cffi/backend_ctypes.py --- a/lib_pypy/cffi/backend_ctypes.py +++ b/lib_pypy/cffi/backend_ctypes.py @@ -169,6 +169,7 @@ class CTypesGenericPtr(CTypesData): __slots__ = ['_address', '_as_ctype_ptr'] _automatic_casts = False + kind = "pointer" @classmethod def _newp(cls, init): @@ -370,10 +371,12 @@ (CTypesPrimitive, type(source).__name__)) return source # + kind1 = kind class CTypesPrimitive(CTypesGenericPrimitive): __slots__ = ['_value'] _ctype = ctype _reftypename = '%s &' % name + kind = kind1 def __init__(self, value): self._value = value @@ -703,12 +706,13 @@ class struct_or_union(base_ctypes_class): pass struct_or_union.__name__ = '%s_%s' % (kind, name) + kind1 = kind # class CTypesStructOrUnion(CTypesBaseStructOrUnion): __slots__ = ['_blob'] _ctype = struct_or_union _reftypename = '%s &' % (name,) - _kind = kind + _kind = kind = kind1 # CTypesStructOrUnion._fix_class() return CTypesStructOrUnion @@ -994,27 +998,42 @@ def getcname(self, BType, replace_with): return BType._get_c_name(replace_with) - def typeoffsetof(self, BType, fieldname): - if fieldname is not None and issubclass(BType, CTypesGenericPtr): - BType = BType._BItem - if not issubclass(BType, CTypesBaseStructOrUnion): - raise TypeError("expected a struct or union ctype") - if fieldname is None: - return (BType, 0) - else: + def typeoffsetof(self, BType, fieldname, num=0): + if isinstance(fieldname, str): + if num == 0 and issubclass(BType, CTypesGenericPtr): + BType = BType._BItem + if not issubclass(BType, CTypesBaseStructOrUnion): + raise TypeError("expected a struct or union ctype") BField = BType._bfield_types[fieldname] if BField is Ellipsis: raise TypeError("not supported for bitfields") return (BField, BType._offsetof(fieldname)) + elif isinstance(fieldname, (int, long)): + if issubclass(BType, CTypesGenericArray): + BType = BType._CTPtr + if not issubclass(BType, CTypesGenericPtr): + raise TypeError("expected an array or ptr ctype") + BItem = BType._BItem + offset = BItem._get_size() * fieldname + if offset > sys.maxsize: + raise OverflowError + return (BItem, offset) + else: + raise TypeError(type(fieldname)) - def rawaddressof(self, BTypePtr, cdata, offset): + def rawaddressof(self, BTypePtr, cdata, offset=None): if isinstance(cdata, CTypesBaseStructOrUnion): ptr = ctypes.pointer(type(cdata)._to_ctypes(cdata)) elif isinstance(cdata, CTypesGenericPtr): + if offset is None or not issubclass(type(cdata)._BItem, + CTypesBaseStructOrUnion): + raise TypeError("unexpected cdata type") + ptr = type(cdata)._to_ctypes(cdata) + elif isinstance(cdata, CTypesGenericArray): ptr = type(cdata)._to_ctypes(cdata) else: raise TypeError("expected a ") - if offset != 0: + if offset: ptr = ctypes.cast( ctypes.c_void_p( ctypes.cast(ptr, ctypes.c_void_p).value + offset), diff --git a/lib_pypy/cffi/cparser.py b/lib_pypy/cffi/cparser.py --- a/lib_pypy/cffi/cparser.py +++ b/lib_pypy/cffi/cparser.py @@ -229,12 +229,18 @@ pyvalue = int(int_str, 0) self._add_constants(key, pyvalue) + self._declare('macro ' + key, pyvalue) elif value == '...': self._declare('macro ' + key, value) else: - raise api.CDefError('only supports the syntax "#define ' - '%s ..." (literally) or "#define ' - '%s 0x1FF" for now' % (key, key)) + raise api.CDefError( + 'only supports one of the following syntax:\n' + ' #define %s ... (literally dot-dot-dot)\n' + ' #define %s NUMBER (with NUMBER an integer' + ' constant, decimal/hex/octal)\n' + 'got:\n' + ' #define %s %s' + % (key, key, key, value)) def _parse_decl(self, decl): node = decl.type diff --git a/lib_pypy/cffi/vengine_cpy.py b/lib_pypy/cffi/vengine_cpy.py --- a/lib_pypy/cffi/vengine_cpy.py +++ b/lib_pypy/cffi/vengine_cpy.py @@ -592,7 +592,8 @@ # constants, likely declared with '#define' def _generate_cpy_const(self, is_int, name, tp=None, category='const', - vartp=None, delayed=True, size_too=False): + vartp=None, delayed=True, size_too=False, + check_value=None): prnt = self._prnt funcname = '_cffi_%s_%s' % (category, name) prnt('static int %s(PyObject *lib)' % funcname) @@ -604,6 +605,9 @@ else: assert category == 'const' # + if check_value is not None: + self._check_int_constant_value(name, check_value) + # if not is_int: if category == 'var': realexpr = '&' + name @@ -651,6 +655,27 @@ # ---------- # enums + def _check_int_constant_value(self, name, value, err_prefix=''): + prnt = self._prnt + if value <= 0: + prnt(' if ((%s) > 0 || (long)(%s) != %dL) {' % ( + name, name, value)) + else: + prnt(' if ((%s) <= 0 || (unsigned long)(%s) != %dUL) {' % ( + name, name, value)) + prnt(' char buf[64];') + prnt(' if ((%s) <= 0)' % name) + prnt(' snprintf(buf, 63, "%%ld", (long)(%s));' % name) + prnt(' else') + prnt(' snprintf(buf, 63, "%%lu", (unsigned long)(%s));' % + name) + prnt(' PyErr_Format(_cffi_VerificationError,') + prnt(' "%s%s has the real value %s, not %s",') + prnt(' "%s", "%s", buf, "%d");' % ( + err_prefix, name, value)) + prnt(' return -1;') + prnt(' }') + def _enum_funcname(self, prefix, name): # "$enum_$1" => "___D_enum____D_1" name = name.replace('$', '___D_') @@ -667,25 +692,8 @@ prnt('static int %s(PyObject *lib)' % funcname) prnt('{') for enumerator, enumvalue in zip(tp.enumerators, tp.enumvalues): - if enumvalue <= 0: - prnt(' if ((%s) > 0 || (long)(%s) != %dL) {' % ( - enumerator, enumerator, enumvalue)) - else: - prnt(' if ((%s) <= 0 || (unsigned long)(%s) != %dUL) {' % ( - enumerator, enumerator, enumvalue)) - prnt(' char buf[64];') - prnt(' if ((%s) <= 0)' % enumerator) - prnt(' snprintf(buf, 63, "%%ld", (long)(%s));' % enumerator) - prnt(' else') - prnt(' snprintf(buf, 63, "%%lu", (unsigned long)(%s));' % - enumerator) - prnt(' PyErr_Format(_cffi_VerificationError,') - prnt(' "enum %s: %s has the real value %s, ' - 'not %s",') - prnt(' "%s", "%s", buf, "%d");' % ( - name, enumerator, enumvalue)) - prnt(' return -1;') - prnt(' }') + self._check_int_constant_value(enumerator, enumvalue, + "enum %s: " % name) prnt(' return %s;' % self._chained_list_constants[True]) self._chained_list_constants[True] = funcname + '(lib)' prnt('}') @@ -709,8 +717,11 @@ # macros: for now only for integers def _generate_cpy_macro_decl(self, tp, name): - assert tp == '...' - self._generate_cpy_const(True, name) + if tp == '...': + check_value = None + else: + check_value = tp # an integer + self._generate_cpy_const(True, name, check_value=check_value) _generate_cpy_macro_collecttype = _generate_nothing _generate_cpy_macro_method = _generate_nothing diff --git a/lib_pypy/cffi/vengine_gen.py b/lib_pypy/cffi/vengine_gen.py --- a/lib_pypy/cffi/vengine_gen.py +++ b/lib_pypy/cffi/vengine_gen.py @@ -355,11 +355,20 @@ # ---------- # constants, likely declared with '#define' - def _generate_gen_const(self, is_int, name, tp=None, category='const'): + def _generate_gen_const(self, is_int, name, tp=None, category='const', + check_value=None): prnt = self._prnt funcname = '_cffi_%s_%s' % (category, name) self.export_symbols.append(funcname) - if is_int: + if check_value is not None: + assert is_int + assert category == 'const' + prnt('int %s(char *out_error)' % funcname) + prnt('{') + self._check_int_constant_value(name, check_value) + prnt(' return 0;') + prnt('}') + elif is_int: assert category == 'const' prnt('int %s(long long *out_value)' % funcname) prnt('{') @@ -368,6 +377,7 @@ prnt('}') else: assert tp is not None + assert check_value is None prnt(tp.get_c_name(' %s(void)' % funcname, name),) prnt('{') if category == 'var': @@ -384,9 +394,13 @@ _loading_gen_constant = _loaded_noop - def _load_constant(self, is_int, tp, name, module): + def _load_constant(self, is_int, tp, name, module, check_value=None): funcname = '_cffi_const_%s' % name - if is_int: + if check_value is not None: + assert is_int + self._load_known_int_constant(module, funcname) + value = check_value + elif is_int: BType = self.ffi._typeof_locked("long long*")[0] BFunc = self.ffi._typeof_locked("int(*)(long long*)")[0] function = module.load_function(BFunc, funcname) @@ -397,6 +411,7 @@ BLongLong = self.ffi._typeof_locked("long long")[0] value += (1 << (8*self.ffi.sizeof(BLongLong))) else: + assert check_value is None BFunc = self.ffi._typeof_locked(tp.get_c_name('(*)(void)', name))[0] function = module.load_function(BFunc, funcname) value = function() @@ -411,6 +426,36 @@ # ---------- # enums + def _check_int_constant_value(self, name, value): + prnt = self._prnt + if value <= 0: + prnt(' if ((%s) > 0 || (long)(%s) != %dL) {' % ( + name, name, value)) + else: + prnt(' if ((%s) <= 0 || (unsigned long)(%s) != %dUL) {' % ( + name, name, value)) + prnt(' char buf[64];') + prnt(' if ((%s) <= 0)' % name) + prnt(' sprintf(buf, "%%ld", (long)(%s));' % name) + prnt(' else') + prnt(' sprintf(buf, "%%lu", (unsigned long)(%s));' % + name) + prnt(' sprintf(out_error, "%s has the real value %s, not %s",') + prnt(' "%s", buf, "%d");' % (name[:100], value)) + prnt(' return -1;') + prnt(' }') + + def _load_known_int_constant(self, module, funcname): + BType = self.ffi._typeof_locked("char[]")[0] + BFunc = self.ffi._typeof_locked("int(*)(char*)")[0] + function = module.load_function(BFunc, funcname) + p = self.ffi.new(BType, 256) + if function(p) < 0: + error = self.ffi.string(p) + if sys.version_info >= (3,): + error = str(error, 'utf-8') + raise ffiplatform.VerificationError(error) + def _enum_funcname(self, prefix, name): # "$enum_$1" => "___D_enum____D_1" name = name.replace('$', '___D_') @@ -428,24 +473,7 @@ prnt('int %s(char *out_error)' % funcname) prnt('{') for enumerator, enumvalue in zip(tp.enumerators, tp.enumvalues): - if enumvalue <= 0: - prnt(' if ((%s) > 0 || (long)(%s) != %dL) {' % ( - enumerator, enumerator, enumvalue)) - else: - prnt(' if ((%s) <= 0 || (unsigned long)(%s) != %dUL) {' % ( - enumerator, enumerator, enumvalue)) - prnt(' char buf[64];') - prnt(' if ((%s) <= 0)' % enumerator) - prnt(' sprintf(buf, "%%ld", (long)(%s));' % enumerator) - prnt(' else') - prnt(' sprintf(buf, "%%lu", (unsigned long)(%s));' % - enumerator) - prnt(' sprintf(out_error,' - ' "%s has the real value %s, not %s",') - prnt(' "%s", buf, "%d");' % ( - enumerator[:100], enumvalue)) - prnt(' return -1;') - prnt(' }') + self._check_int_constant_value(enumerator, enumvalue) prnt(' return 0;') prnt('}') prnt() @@ -457,16 +485,8 @@ tp.enumvalues = tuple(enumvalues) tp.partial_resolved = True else: - BType = self.ffi._typeof_locked("char[]")[0] - BFunc = self.ffi._typeof_locked("int(*)(char*)")[0] funcname = self._enum_funcname(prefix, name) - function = module.load_function(BFunc, funcname) - p = self.ffi.new(BType, 256) - if function(p) < 0: - error = self.ffi.string(p) - if sys.version_info >= (3,): - error = str(error, 'utf-8') - raise ffiplatform.VerificationError(error) + self._load_known_int_constant(module, funcname) def _loaded_gen_enum(self, tp, name, module, library): for enumerator, enumvalue in zip(tp.enumerators, tp.enumvalues): @@ -477,13 +497,21 @@ # macros: for now only for integers def _generate_gen_macro_decl(self, tp, name): - assert tp == '...' - self._generate_gen_const(True, name) + if tp == '...': + check_value = None + else: + check_value = tp # an integer + self._generate_gen_const(True, name, check_value=check_value) _loading_gen_macro = _loaded_noop def _loaded_gen_macro(self, tp, name, module, library): - value = self._load_constant(True, tp, name, module) + if tp == '...': + check_value = None + else: + check_value = tp # an integer + value = self._load_constant(True, tp, name, module, + check_value=check_value) setattr(library, name, value) type(library)._cffi_dir.append(name) diff --git a/pypy/module/test_lib_pypy/cffi_tests/backend_tests.py b/pypy/module/test_lib_pypy/cffi_tests/backend_tests.py --- a/pypy/module/test_lib_pypy/cffi_tests/backend_tests.py +++ b/pypy/module/test_lib_pypy/cffi_tests/backend_tests.py @@ -965,9 +965,19 @@ ffi.cdef("struct foo { int a, b, c; };" "struct bar { struct foo d, e; };") assert ffi.offsetof("struct bar", "e") == 12 - assert ffi.offsetof("struct bar", "e.a") == 12 - assert ffi.offsetof("struct bar", "e.b") == 16 - assert ffi.offsetof("struct bar", "e.c") == 20 + py.test.raises(KeyError, ffi.offsetof, "struct bar", "e.a") + assert ffi.offsetof("struct bar", "e", "a") == 12 + assert ffi.offsetof("struct bar", "e", "b") == 16 + assert ffi.offsetof("struct bar", "e", "c") == 20 + + def test_offsetof_array(self): + ffi = FFI(backend=self.Backend()) + assert ffi.offsetof("int[]", 51) == 51 * ffi.sizeof("int") + assert ffi.offsetof("int *", 51) == 51 * ffi.sizeof("int") + ffi.cdef("struct bar { int a, b; int c[99]; };") + assert ffi.offsetof("struct bar", "c") == 2 * ffi.sizeof("int") + assert ffi.offsetof("struct bar", "c", 0) == 2 * ffi.sizeof("int") + assert ffi.offsetof("struct bar", "c", 51) == 53 * ffi.sizeof("int") def test_alignof(self): ffi = FFI(backend=self.Backend()) @@ -1501,8 +1511,10 @@ p = ffi.new("struct foo_s *") a = ffi.addressof(p[0]) assert repr(a).startswith(" Author: Armin Rigo Branch: Changeset: r75301:7cb0d2884762 Date: 2015-01-12 11:30 +0100 http://bitbucket.org/pypy/pypy/changeset/7cb0d2884762/ Log: try to fix the issue of spaces in directory names diff --git a/rpython/translator/platform/windows.py b/rpython/translator/platform/windows.py --- a/rpython/translator/platform/windows.py +++ b/rpython/translator/platform/windows.py @@ -293,7 +293,7 @@ m.comment('automatically generated makefile') definitions = [ - ('RPYDIR', rpydir), + ('RPYDIR', '"%s"' % rpydir), ('TARGET', target_name), ('DEFAULT_TARGET', exe_name.basename), ('SOURCES', rel_cfiles), From noreply at buildbot.pypy.org Mon Jan 12 12:31:49 2015 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 12 Jan 2015 12:31:49 +0100 (CET) Subject: [pypy-commit] pypy all_ordered_dicts: Add reversed_dict() in RPython Message-ID: <20150112113149.76B621C3CF4@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: all_ordered_dicts Changeset: r75302:fc9fe2200019 Date: 2015-01-12 12:12 +0100 http://bitbucket.org/pypy/pypy/changeset/fc9fe2200019/ Log: Add reversed_dict() in RPython diff --git a/rpython/rlib/objectmodel.py b/rpython/rlib/objectmodel.py --- a/rpython/rlib/objectmodel.py +++ b/rpython/rlib/objectmodel.py @@ -753,6 +753,14 @@ dict._prepare_dict_update(n_elements) # ^^ call an extra method that doesn't exist before translation + at specialize.call_location() +def reversed_dict(d): + """Equivalent to reversed(ordered_dict), but works also for + regular dicts.""" + if not we_are_translated() and type(d) is dict: + d = list(d) + return reversed(d) + # ____________________________________________________________ diff --git a/rpython/rlib/test/test_objectmodel.py b/rpython/rlib/test/test_objectmodel.py --- a/rpython/rlib/test/test_objectmodel.py +++ b/rpython/rlib/test/test_objectmodel.py @@ -341,6 +341,21 @@ res = self.interpret(g, [3]) assert res == 42 # "did not crash" + def test_reversed_dict(self): + d1 = {2:3, 4:5, 6:7} + def g(): + n1 = 0 + for key in d1: + n1 = n1 * 10 + key + n2 = 0 + for key in reversed_dict(d1): + n2 = n2 * 10 + key + return n1 * 10000 + n2 + got = str(g()) + assert len(got) == 7 and got[3] == '0' and got[:3] == got[6:3:-1] + got = str(self.interpret(g, [])) + assert len(got) == 7 and got[3] == '0' and got[:3] == got[6:3:-1] + def test_compute_hash(self): class Foo(object): pass diff --git a/rpython/rtyper/lltypesystem/rordereddict.py b/rpython/rtyper/lltypesystem/rordereddict.py --- a/rpython/rtyper/lltypesystem/rordereddict.py +++ b/rpython/rtyper/lltypesystem/rordereddict.py @@ -988,8 +988,12 @@ self.r_dict = r_dict self.variant = variant self.lowleveltype = get_ll_dictiter(r_dict.lowleveltype) - self.ll_dictiter = ll_dictiter - self._ll_dictnext = _ll_dictnext + if variant == 'reversed': + self.ll_dictiter = ll_dictiter_reversed + self._ll_dictnext = _ll_dictnext_reversed + else: + self.ll_dictiter = ll_dictiter + self._ll_dictnext = _ll_dictnext def ll_dictiter(ITERPTR, d): @@ -1019,6 +1023,26 @@ iter.dict = lltype.nullptr(lltype.typeOf(iter).TO.dict.TO) raise StopIteration +def ll_dictiter_reversed(ITERPTR, d): + iter = lltype.malloc(ITERPTR.TO) + iter.dict = d + iter.index = d.num_ever_used_items + return iter + +def _ll_dictnext_reversed(iter): + dict = iter.dict + if dict: + entries = dict.entries + index = iter.index - 1 + while index >= 0: + if entries.valid(index): + iter.index = index + return index + index = index - 1 + # clear the reference to the dict and prevent restarts + iter.dict = lltype.nullptr(lltype.typeOf(iter).TO.dict.TO) + raise StopIteration + # _____________________________________________________________ # methods diff --git a/rpython/rtyper/rdict.py b/rpython/rtyper/rdict.py --- a/rpython/rtyper/rdict.py +++ b/rpython/rtyper/rdict.py @@ -98,12 +98,12 @@ c_key = hop.inputconst(lltype.Void, 'key') v_key = hop.genop('getinteriorfield', [v_entries, v_index, c_key], resulttype=KEY) - if variant != 'keys': + if variant != 'keys' and variant != 'reversed': VALUE = ENTRIES.TO.OF.value c_value = hop.inputconst(lltype.Void, 'value') v_value = hop.genop('getinteriorfield', [v_entries,v_index,c_value], resulttype=VALUE) - if variant == 'keys': + if variant == 'keys' or variant == 'reversed': return self.r_dict.recast_key(hop.llops, v_key) elif variant == 'values': return self.r_dict.recast_value(hop.llops, v_value) From noreply at buildbot.pypy.org Mon Jan 12 14:28:27 2015 From: noreply at buildbot.pypy.org (fijal) Date: Mon, 12 Jan 2015 14:28:27 +0100 (CET) Subject: [pypy-commit] pypy rewrite-unrolling: bam reap apart the part in pure.py responsible for old unrolling Message-ID: <20150112132827.F33021C10AB@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: rewrite-unrolling Changeset: r75303:55b1de6dc730 Date: 2015-01-11 12:25 +0200 http://bitbucket.org/pypy/pypy/changeset/55b1de6dc730/ Log: bam reap apart the part in pure.py responsible for old unrolling diff --git a/rpython/jit/metainterp/optimizeopt/pure.py b/rpython/jit/metainterp/optimizeopt/pure.py --- a/rpython/jit/metainterp/optimizeopt/pure.py +++ b/rpython/jit/metainterp/optimizeopt/pure.py @@ -7,7 +7,6 @@ def __init__(self): self.postponed_op = None self.pure_operations = args_dict() - self.emitted_pure_operations = {} def propagate_forward(self, op): dispatch_opt(self, op) @@ -26,7 +25,6 @@ nextop = None args = None - remember = None if canfold: for i in range(op.numargs()): if self.get_constant_box(op.getarg(i)) is None: @@ -45,8 +43,6 @@ if oldvalue is not None: self.optimizer.make_equal_to(op.result, oldvalue, True) return - else: - remember = op # otherwise, the operation remains self.emit_operation(op) @@ -56,8 +52,6 @@ self.emit_operation(nextop) if args is not None: self.pure_operations[args] = self.getvalue(op.result) - if remember: - self.remember_emitting_pure(remember) def optimize_CALL_PURE(self, op): # Step 1: check if all arguments are constant @@ -85,7 +79,6 @@ args = op.getarglist() self.emit_operation(ResOperation(rop.CALL, args, op.result, op.getdescr())) - self.remember_emitting_pure(op) def optimize_GUARD_NO_EXCEPTION(self, op): if self.last_emitted_operation is REMOVED: @@ -114,14 +107,5 @@ def get_pure_result(self, key): return self.pure_operations.get(key, None) - def remember_emitting_pure(self, op): - if self.optimizer.exporting_state: - op = self.optimizer.get_op_replacement(op) - self.emitted_pure_operations[op] = True - - def produce_potential_short_preamble_ops(self, sb): - for op in self.emitted_pure_operations: - sb.add_potential(op) - dispatch_opt = make_dispatcher_method(OptPure, 'optimize_', default=OptPure.optimize_default) From noreply at buildbot.pypy.org Mon Jan 12 14:28:29 2015 From: noreply at buildbot.pypy.org (fijal) Date: Mon, 12 Jan 2015 14:28:29 +0100 (CET) Subject: [pypy-commit] pypy default: Assuming quite a few ops are pure, remove the need to store a crazy dict Message-ID: <20150112132829.350261C10AB@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r75304:99d27b9b9c8a Date: 2015-01-12 15:28 +0200 http://bitbucket.org/pypy/pypy/changeset/99d27b9b9c8a/ Log: Assuming quite a few ops are pure, remove the need to store a crazy dict of "emitted_pure_operations" diff --git a/rpython/jit/metainterp/optimizeopt/pure.py b/rpython/jit/metainterp/optimizeopt/pure.py --- a/rpython/jit/metainterp/optimizeopt/pure.py +++ b/rpython/jit/metainterp/optimizeopt/pure.py @@ -7,7 +7,7 @@ def __init__(self): self.postponed_op = None self.pure_operations = args_dict() - self.emitted_pure_operations = {} + self.call_pure_positions = [] def propagate_forward(self, op): dispatch_opt(self, op) @@ -26,7 +26,6 @@ nextop = None args = None - remember = None if canfold: for i in range(op.numargs()): if self.get_constant_box(op.getarg(i)) is None: @@ -45,8 +44,6 @@ if oldvalue is not None: self.optimizer.make_equal_to(op.result, oldvalue, True) return - else: - remember = op # otherwise, the operation remains self.emit_operation(op) @@ -56,8 +53,6 @@ self.emit_operation(nextop) if args is not None: self.pure_operations[args] = self.getvalue(op.result) - if remember: - self.remember_emitting_pure(remember) def optimize_CALL_PURE(self, op): # Step 1: check if all arguments are constant @@ -85,7 +80,7 @@ args = op.getarglist() self.emit_operation(ResOperation(rop.CALL, args, op.result, op.getdescr())) - self.remember_emitting_pure(op) + self.call_pure_positions.append(len(self.optimizer._newoperations) - 1) def optimize_GUARD_NO_EXCEPTION(self, op): if self.last_emitted_operation is REMOVED: @@ -114,13 +109,17 @@ def get_pure_result(self, key): return self.pure_operations.get(key, None) - def remember_emitting_pure(self, op): - if self.optimizer.exporting_state: - op = self.optimizer.get_op_replacement(op) - self.emitted_pure_operations[op] = True - def produce_potential_short_preamble_ops(self, sb): - for op in self.emitted_pure_operations: + ops = sb.optimizer._newoperations + for i, op in enumerate(ops): + if op.is_always_pure(): + sb.add_potential(op) + if op.is_ovf() and ops[i + 1].getopnum() == rop.GUARD_NO_OVERFLOW: + sb.add_potential(op) + for i in self.call_pure_positions: + op = ops[i] + assert op.getopnum() == rop.CALL + op = op.copy_and_change(rop.CALL_PURE) sb.add_potential(op) dispatch_opt = make_dispatcher_method(OptPure, 'optimize_', diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py @@ -5527,6 +5527,5 @@ """ self.optimize_loop(ops, ops) - class TestLLtype(BaseTestOptimizeBasic, LLtypeMixin): pass From noreply at buildbot.pypy.org Mon Jan 12 15:09:23 2015 From: noreply at buildbot.pypy.org (fijal) Date: Mon, 12 Jan 2015 15:09:23 +0100 (CET) Subject: [pypy-commit] pypy rewrite-unrolling: merge default Message-ID: <20150112140923.276221C056E@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: rewrite-unrolling Changeset: r75305:101fa4a6ae54 Date: 2015-01-12 15:29 +0200 http://bitbucket.org/pypy/pypy/changeset/101fa4a6ae54/ Log: merge default diff --git a/lib_pypy/cffi/api.py b/lib_pypy/cffi/api.py --- a/lib_pypy/cffi/api.py +++ b/lib_pypy/cffi/api.py @@ -191,14 +191,16 @@ cdecl = self._typeof(cdecl) return self._backend.alignof(cdecl) - def offsetof(self, cdecl, fieldname): + def offsetof(self, cdecl, *fields_or_indexes): """Return the offset of the named field inside the given - structure, which must be given as a C type name. The field - may be 'x.y.z' in case of nested structures. + structure or array, which must be given as a C type name. + You can give several field names in case of nested structures. + You can also give numeric values which correspond to array + items, in case of an array type. """ if isinstance(cdecl, basestring): cdecl = self._typeof(cdecl) - return self._typeoffsetof(cdecl, fieldname)[1] + return self._typeoffsetof(cdecl, *fields_or_indexes)[1] def new(self, cdecl, init=None): """Allocate an instance according to the specified C type and @@ -383,24 +385,28 @@ with self._lock: return model.pointer_cache(self, ctype) - def addressof(self, cdata, field=None): + def addressof(self, cdata, *fields_or_indexes): """Return the address of a . - If 'field' is specified, return the address of this field. - The field may be 'x.y.z' in case of nested structures. + If 'fields_or_indexes' are given, returns the address of that + field or array item in the structure or array, recursively in + case of nested structures. """ ctype = self._backend.typeof(cdata) - ctype, offset = self._typeoffsetof(ctype, field) + if fields_or_indexes: + ctype, offset = self._typeoffsetof(ctype, *fields_or_indexes) + else: + if ctype.kind == "pointer": + raise TypeError("addressof(pointer)") + offset = 0 ctypeptr = self._pointer_to(ctype) return self._backend.rawaddressof(ctypeptr, cdata, offset) - def _typeoffsetof(self, ctype, field): - if field is not None and '.' in field: - offset = 0 - for field1 in field.split('.'): - ctype, offset1 = self._backend.typeoffsetof(ctype, field1) - offset += offset1 - return ctype, offset - return self._backend.typeoffsetof(ctype, field) + def _typeoffsetof(self, ctype, field_or_index, *fields_or_indexes): + ctype, offset = self._backend.typeoffsetof(ctype, field_or_index) + for field1 in fields_or_indexes: + ctype, offset1 = self._backend.typeoffsetof(ctype, field1, 1) + offset += offset1 + return ctype, offset def include(self, ffi_to_include): """Includes the typedefs, structs, unions and enums defined diff --git a/lib_pypy/cffi/backend_ctypes.py b/lib_pypy/cffi/backend_ctypes.py --- a/lib_pypy/cffi/backend_ctypes.py +++ b/lib_pypy/cffi/backend_ctypes.py @@ -169,6 +169,7 @@ class CTypesGenericPtr(CTypesData): __slots__ = ['_address', '_as_ctype_ptr'] _automatic_casts = False + kind = "pointer" @classmethod def _newp(cls, init): @@ -370,10 +371,12 @@ (CTypesPrimitive, type(source).__name__)) return source # + kind1 = kind class CTypesPrimitive(CTypesGenericPrimitive): __slots__ = ['_value'] _ctype = ctype _reftypename = '%s &' % name + kind = kind1 def __init__(self, value): self._value = value @@ -703,12 +706,13 @@ class struct_or_union(base_ctypes_class): pass struct_or_union.__name__ = '%s_%s' % (kind, name) + kind1 = kind # class CTypesStructOrUnion(CTypesBaseStructOrUnion): __slots__ = ['_blob'] _ctype = struct_or_union _reftypename = '%s &' % (name,) - _kind = kind + _kind = kind = kind1 # CTypesStructOrUnion._fix_class() return CTypesStructOrUnion @@ -994,27 +998,42 @@ def getcname(self, BType, replace_with): return BType._get_c_name(replace_with) - def typeoffsetof(self, BType, fieldname): - if fieldname is not None and issubclass(BType, CTypesGenericPtr): - BType = BType._BItem - if not issubclass(BType, CTypesBaseStructOrUnion): - raise TypeError("expected a struct or union ctype") - if fieldname is None: - return (BType, 0) - else: + def typeoffsetof(self, BType, fieldname, num=0): + if isinstance(fieldname, str): + if num == 0 and issubclass(BType, CTypesGenericPtr): + BType = BType._BItem + if not issubclass(BType, CTypesBaseStructOrUnion): + raise TypeError("expected a struct or union ctype") BField = BType._bfield_types[fieldname] if BField is Ellipsis: raise TypeError("not supported for bitfields") return (BField, BType._offsetof(fieldname)) + elif isinstance(fieldname, (int, long)): + if issubclass(BType, CTypesGenericArray): + BType = BType._CTPtr + if not issubclass(BType, CTypesGenericPtr): + raise TypeError("expected an array or ptr ctype") + BItem = BType._BItem + offset = BItem._get_size() * fieldname + if offset > sys.maxsize: + raise OverflowError + return (BItem, offset) + else: + raise TypeError(type(fieldname)) - def rawaddressof(self, BTypePtr, cdata, offset): + def rawaddressof(self, BTypePtr, cdata, offset=None): if isinstance(cdata, CTypesBaseStructOrUnion): ptr = ctypes.pointer(type(cdata)._to_ctypes(cdata)) elif isinstance(cdata, CTypesGenericPtr): + if offset is None or not issubclass(type(cdata)._BItem, + CTypesBaseStructOrUnion): + raise TypeError("unexpected cdata type") + ptr = type(cdata)._to_ctypes(cdata) + elif isinstance(cdata, CTypesGenericArray): ptr = type(cdata)._to_ctypes(cdata) else: raise TypeError("expected a ") - if offset != 0: + if offset: ptr = ctypes.cast( ctypes.c_void_p( ctypes.cast(ptr, ctypes.c_void_p).value + offset), diff --git a/lib_pypy/cffi/cparser.py b/lib_pypy/cffi/cparser.py --- a/lib_pypy/cffi/cparser.py +++ b/lib_pypy/cffi/cparser.py @@ -229,12 +229,18 @@ pyvalue = int(int_str, 0) self._add_constants(key, pyvalue) + self._declare('macro ' + key, pyvalue) elif value == '...': self._declare('macro ' + key, value) else: - raise api.CDefError('only supports the syntax "#define ' - '%s ..." (literally) or "#define ' - '%s 0x1FF" for now' % (key, key)) + raise api.CDefError( + 'only supports one of the following syntax:\n' + ' #define %s ... (literally dot-dot-dot)\n' + ' #define %s NUMBER (with NUMBER an integer' + ' constant, decimal/hex/octal)\n' + 'got:\n' + ' #define %s %s' + % (key, key, key, value)) def _parse_decl(self, decl): node = decl.type diff --git a/lib_pypy/cffi/vengine_cpy.py b/lib_pypy/cffi/vengine_cpy.py --- a/lib_pypy/cffi/vengine_cpy.py +++ b/lib_pypy/cffi/vengine_cpy.py @@ -592,7 +592,8 @@ # constants, likely declared with '#define' def _generate_cpy_const(self, is_int, name, tp=None, category='const', - vartp=None, delayed=True, size_too=False): + vartp=None, delayed=True, size_too=False, + check_value=None): prnt = self._prnt funcname = '_cffi_%s_%s' % (category, name) prnt('static int %s(PyObject *lib)' % funcname) @@ -604,6 +605,9 @@ else: assert category == 'const' # + if check_value is not None: + self._check_int_constant_value(name, check_value) + # if not is_int: if category == 'var': realexpr = '&' + name @@ -651,6 +655,27 @@ # ---------- # enums + def _check_int_constant_value(self, name, value, err_prefix=''): + prnt = self._prnt + if value <= 0: + prnt(' if ((%s) > 0 || (long)(%s) != %dL) {' % ( + name, name, value)) + else: + prnt(' if ((%s) <= 0 || (unsigned long)(%s) != %dUL) {' % ( + name, name, value)) + prnt(' char buf[64];') + prnt(' if ((%s) <= 0)' % name) + prnt(' snprintf(buf, 63, "%%ld", (long)(%s));' % name) + prnt(' else') + prnt(' snprintf(buf, 63, "%%lu", (unsigned long)(%s));' % + name) + prnt(' PyErr_Format(_cffi_VerificationError,') + prnt(' "%s%s has the real value %s, not %s",') + prnt(' "%s", "%s", buf, "%d");' % ( + err_prefix, name, value)) + prnt(' return -1;') + prnt(' }') + def _enum_funcname(self, prefix, name): # "$enum_$1" => "___D_enum____D_1" name = name.replace('$', '___D_') @@ -667,25 +692,8 @@ prnt('static int %s(PyObject *lib)' % funcname) prnt('{') for enumerator, enumvalue in zip(tp.enumerators, tp.enumvalues): - if enumvalue <= 0: - prnt(' if ((%s) > 0 || (long)(%s) != %dL) {' % ( - enumerator, enumerator, enumvalue)) - else: - prnt(' if ((%s) <= 0 || (unsigned long)(%s) != %dUL) {' % ( - enumerator, enumerator, enumvalue)) - prnt(' char buf[64];') - prnt(' if ((%s) <= 0)' % enumerator) - prnt(' snprintf(buf, 63, "%%ld", (long)(%s));' % enumerator) - prnt(' else') - prnt(' snprintf(buf, 63, "%%lu", (unsigned long)(%s));' % - enumerator) - prnt(' PyErr_Format(_cffi_VerificationError,') - prnt(' "enum %s: %s has the real value %s, ' - 'not %s",') - prnt(' "%s", "%s", buf, "%d");' % ( - name, enumerator, enumvalue)) - prnt(' return -1;') - prnt(' }') + self._check_int_constant_value(enumerator, enumvalue, + "enum %s: " % name) prnt(' return %s;' % self._chained_list_constants[True]) self._chained_list_constants[True] = funcname + '(lib)' prnt('}') @@ -709,8 +717,11 @@ # macros: for now only for integers def _generate_cpy_macro_decl(self, tp, name): - assert tp == '...' - self._generate_cpy_const(True, name) + if tp == '...': + check_value = None + else: + check_value = tp # an integer + self._generate_cpy_const(True, name, check_value=check_value) _generate_cpy_macro_collecttype = _generate_nothing _generate_cpy_macro_method = _generate_nothing diff --git a/lib_pypy/cffi/vengine_gen.py b/lib_pypy/cffi/vengine_gen.py --- a/lib_pypy/cffi/vengine_gen.py +++ b/lib_pypy/cffi/vengine_gen.py @@ -355,11 +355,20 @@ # ---------- # constants, likely declared with '#define' - def _generate_gen_const(self, is_int, name, tp=None, category='const'): + def _generate_gen_const(self, is_int, name, tp=None, category='const', + check_value=None): prnt = self._prnt funcname = '_cffi_%s_%s' % (category, name) self.export_symbols.append(funcname) - if is_int: + if check_value is not None: + assert is_int + assert category == 'const' + prnt('int %s(char *out_error)' % funcname) + prnt('{') + self._check_int_constant_value(name, check_value) + prnt(' return 0;') + prnt('}') + elif is_int: assert category == 'const' prnt('int %s(long long *out_value)' % funcname) prnt('{') @@ -368,6 +377,7 @@ prnt('}') else: assert tp is not None + assert check_value is None prnt(tp.get_c_name(' %s(void)' % funcname, name),) prnt('{') if category == 'var': @@ -384,9 +394,13 @@ _loading_gen_constant = _loaded_noop - def _load_constant(self, is_int, tp, name, module): + def _load_constant(self, is_int, tp, name, module, check_value=None): funcname = '_cffi_const_%s' % name - if is_int: + if check_value is not None: + assert is_int + self._load_known_int_constant(module, funcname) + value = check_value + elif is_int: BType = self.ffi._typeof_locked("long long*")[0] BFunc = self.ffi._typeof_locked("int(*)(long long*)")[0] function = module.load_function(BFunc, funcname) @@ -397,6 +411,7 @@ BLongLong = self.ffi._typeof_locked("long long")[0] value += (1 << (8*self.ffi.sizeof(BLongLong))) else: + assert check_value is None BFunc = self.ffi._typeof_locked(tp.get_c_name('(*)(void)', name))[0] function = module.load_function(BFunc, funcname) value = function() @@ -411,6 +426,36 @@ # ---------- # enums + def _check_int_constant_value(self, name, value): + prnt = self._prnt + if value <= 0: + prnt(' if ((%s) > 0 || (long)(%s) != %dL) {' % ( + name, name, value)) + else: + prnt(' if ((%s) <= 0 || (unsigned long)(%s) != %dUL) {' % ( + name, name, value)) + prnt(' char buf[64];') + prnt(' if ((%s) <= 0)' % name) + prnt(' sprintf(buf, "%%ld", (long)(%s));' % name) + prnt(' else') + prnt(' sprintf(buf, "%%lu", (unsigned long)(%s));' % + name) + prnt(' sprintf(out_error, "%s has the real value %s, not %s",') + prnt(' "%s", buf, "%d");' % (name[:100], value)) + prnt(' return -1;') + prnt(' }') + + def _load_known_int_constant(self, module, funcname): + BType = self.ffi._typeof_locked("char[]")[0] + BFunc = self.ffi._typeof_locked("int(*)(char*)")[0] + function = module.load_function(BFunc, funcname) + p = self.ffi.new(BType, 256) + if function(p) < 0: + error = self.ffi.string(p) + if sys.version_info >= (3,): + error = str(error, 'utf-8') + raise ffiplatform.VerificationError(error) + def _enum_funcname(self, prefix, name): # "$enum_$1" => "___D_enum____D_1" name = name.replace('$', '___D_') @@ -428,24 +473,7 @@ prnt('int %s(char *out_error)' % funcname) prnt('{') for enumerator, enumvalue in zip(tp.enumerators, tp.enumvalues): - if enumvalue <= 0: - prnt(' if ((%s) > 0 || (long)(%s) != %dL) {' % ( - enumerator, enumerator, enumvalue)) - else: - prnt(' if ((%s) <= 0 || (unsigned long)(%s) != %dUL) {' % ( - enumerator, enumerator, enumvalue)) - prnt(' char buf[64];') - prnt(' if ((%s) <= 0)' % enumerator) - prnt(' sprintf(buf, "%%ld", (long)(%s));' % enumerator) - prnt(' else') - prnt(' sprintf(buf, "%%lu", (unsigned long)(%s));' % - enumerator) - prnt(' sprintf(out_error,' - ' "%s has the real value %s, not %s",') - prnt(' "%s", buf, "%d");' % ( - enumerator[:100], enumvalue)) - prnt(' return -1;') - prnt(' }') + self._check_int_constant_value(enumerator, enumvalue) prnt(' return 0;') prnt('}') prnt() @@ -457,16 +485,8 @@ tp.enumvalues = tuple(enumvalues) tp.partial_resolved = True else: - BType = self.ffi._typeof_locked("char[]")[0] - BFunc = self.ffi._typeof_locked("int(*)(char*)")[0] funcname = self._enum_funcname(prefix, name) - function = module.load_function(BFunc, funcname) - p = self.ffi.new(BType, 256) - if function(p) < 0: - error = self.ffi.string(p) - if sys.version_info >= (3,): - error = str(error, 'utf-8') - raise ffiplatform.VerificationError(error) + self._load_known_int_constant(module, funcname) def _loaded_gen_enum(self, tp, name, module, library): for enumerator, enumvalue in zip(tp.enumerators, tp.enumvalues): @@ -477,13 +497,21 @@ # macros: for now only for integers def _generate_gen_macro_decl(self, tp, name): - assert tp == '...' - self._generate_gen_const(True, name) + if tp == '...': + check_value = None + else: + check_value = tp # an integer + self._generate_gen_const(True, name, check_value=check_value) _loading_gen_macro = _loaded_noop def _loaded_gen_macro(self, tp, name, module, library): - value = self._load_constant(True, tp, name, module) + if tp == '...': + check_value = None + else: + check_value = tp # an integer + value = self._load_constant(True, tp, name, module, + check_value=check_value) setattr(library, name, value) type(library)._cffi_dir.append(name) diff --git a/pypy/module/__builtin__/app_io.py b/pypy/module/__builtin__/app_io.py --- a/pypy/module/__builtin__/app_io.py +++ b/pypy/module/__builtin__/app_io.py @@ -86,9 +86,11 @@ def print_(*args, **kwargs): """The new-style print function from py3k.""" - fp = kwargs.pop("file", sys.stdout) + fp = kwargs.pop("file", None) if fp is None: - return + fp = sys.stdout + if fp is None: + return def write(data): if not isinstance(data, basestring): data = str(data) diff --git a/pypy/module/__builtin__/test/test_builtin.py b/pypy/module/__builtin__/test/test_builtin.py --- a/pypy/module/__builtin__/test/test_builtin.py +++ b/pypy/module/__builtin__/test/test_builtin.py @@ -651,9 +651,12 @@ out = sys.stdout = StringIO.StringIO() try: pr("Hello,", "person!") + pr("2nd line", file=None) + sys.stdout = None + pr("nowhere") finally: sys.stdout = save - assert out.getvalue() == "Hello, person!\n" + assert out.getvalue() == "Hello, person!\n2nd line\n" out = StringIO.StringIO() pr("Hello,", "person!", file=out) assert out.getvalue() == "Hello, person!\n" @@ -668,7 +671,6 @@ result = out.getvalue() assert isinstance(result, unicode) assert result == u"Hello, person!\n" - pr("Hello", file=None) # This works. out = StringIO.StringIO() pr(None, file=out) assert out.getvalue() == "None\n" diff --git a/pypy/module/_cffi_backend/ctypearray.py b/pypy/module/_cffi_backend/ctypearray.py --- a/pypy/module/_cffi_backend/ctypearray.py +++ b/pypy/module/_cffi_backend/ctypearray.py @@ -107,6 +107,9 @@ return self.space.w_None return W_CTypePtrOrArray._fget(self, attrchar) + def typeoffsetof_index(self, index): + return self.ctptr.typeoffsetof_index(index) + class W_CDataIter(W_Root): _immutable_fields_ = ['ctitem', 'cdata', '_stop'] # but not '_next' diff --git a/pypy/module/_cffi_backend/ctypeobj.py b/pypy/module/_cffi_backend/ctypeobj.py --- a/pypy/module/_cffi_backend/ctypeobj.py +++ b/pypy/module/_cffi_backend/ctypeobj.py @@ -142,12 +142,14 @@ raise oefmt(space.w_ValueError, "ctype '%s' is of unknown alignment", self.name) - def typeoffsetof(self, fieldname): + def typeoffsetof_field(self, fieldname, following): space = self.space - if fieldname is None: - msg = "expected a struct or union ctype" - else: - msg = "expected a struct or union ctype, or a pointer to one" + msg = "with a field name argument, expected a struct or union ctype" + raise OperationError(space.w_TypeError, space.wrap(msg)) + + def typeoffsetof_index(self, index): + space = self.space + msg = "with an integer argument, expected an array or pointer ctype" raise OperationError(space.w_TypeError, space.wrap(msg)) def rawaddressof(self, cdata, offset): diff --git a/pypy/module/_cffi_backend/ctypeptr.py b/pypy/module/_cffi_backend/ctypeptr.py --- a/pypy/module/_cffi_backend/ctypeptr.py +++ b/pypy/module/_cffi_backend/ctypeptr.py @@ -308,24 +308,36 @@ def getcfield(self, attr): return self.ctitem.getcfield(attr) - def typeoffsetof(self, fieldname): - if fieldname is None: - return W_CTypePtrBase.typeoffsetof(self, fieldname) - else: - return self.ctitem.typeoffsetof(fieldname) + def typeoffsetof_field(self, fieldname, following): + if following == 0: + return self.ctitem.typeoffsetof_field(fieldname, -1) + return W_CTypePtrBase.typeoffsetof_field(self, fieldname, following) + + def typeoffsetof_index(self, index): + space = self.space + ctitem = self.ctitem + if ctitem.size < 0: + raise OperationError(space.w_TypeError, + space.wrap("pointer to opaque")) + try: + offset = ovfcheck(index * ctitem.size) + except OverflowError: + raise OperationError(space.w_OverflowError, + space.wrap("array offset would overflow a ssize_t")) + return ctitem, offset def rawaddressof(self, cdata, offset): from pypy.module._cffi_backend.ctypestruct import W_CTypeStructOrUnion space = self.space ctype2 = cdata.ctype if (isinstance(ctype2, W_CTypeStructOrUnion) or - (isinstance(ctype2, W_CTypePtrOrArray) and - isinstance(ctype2.ctitem, W_CTypeStructOrUnion))): + isinstance(ctype2, W_CTypePtrOrArray)): ptrdata = rffi.ptradd(cdata._cdata, offset) return cdataobj.W_CData(space, ptrdata, self) else: raise OperationError(space.w_TypeError, - space.wrap("expected a 'cdata struct-or-union' object")) + space.wrap("expected a cdata struct/union/array/pointer" + " object")) def _fget(self, attrchar): if attrchar == 'i': # item diff --git a/pypy/module/_cffi_backend/ctypestruct.py b/pypy/module/_cffi_backend/ctypestruct.py --- a/pypy/module/_cffi_backend/ctypestruct.py +++ b/pypy/module/_cffi_backend/ctypestruct.py @@ -65,9 +65,7 @@ keepalive_until_here(ob) return ob - def typeoffsetof(self, fieldname): - if fieldname is None: - return (self, 0) + def typeoffsetof_field(self, fieldname, following): self.check_complete() space = self.space try: diff --git a/pypy/module/_cffi_backend/func.py b/pypy/module/_cffi_backend/func.py --- a/pypy/module/_cffi_backend/func.py +++ b/pypy/module/_cffi_backend/func.py @@ -48,13 +48,28 @@ align = w_ctype.alignof() return space.wrap(align) - at unwrap_spec(w_ctype=ctypeobj.W_CType, fieldname="str_or_None") -def typeoffsetof(space, w_ctype, fieldname): - ctype, offset = w_ctype.typeoffsetof(fieldname) + at unwrap_spec(w_ctype=ctypeobj.W_CType, following=int) +def typeoffsetof(space, w_ctype, w_field_or_index, following=0): + try: + fieldname = space.str_w(w_field_or_index) + except OperationError, e: + if not e.match(space, space.w_TypeError): + raise + try: + index = space.int_w(w_field_or_index) + except OperationError, e: + if not e.match(space, space.w_TypeError): + raise + raise OperationError(space.w_TypeError, + space.wrap("field name or array index expected")) + ctype, offset = w_ctype.typeoffsetof_index(index) + else: + ctype, offset = w_ctype.typeoffsetof_field(fieldname, following) + # return space.newtuple([space.wrap(ctype), space.wrap(offset)]) @unwrap_spec(w_ctype=ctypeobj.W_CType, w_cdata=cdataobj.W_CData, offset=int) -def rawaddressof(space, w_ctype, w_cdata, offset=0): +def rawaddressof(space, w_ctype, w_cdata, offset): return w_ctype.rawaddressof(w_cdata, offset) # ____________________________________________________________ diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py --- a/pypy/module/_cffi_backend/test/_backend_test_c.py +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py @@ -2527,13 +2527,32 @@ ('a2', BChar, -1), ('a3', BChar, -1)]) py.test.raises(TypeError, typeoffsetof, BStructPtr, None) - assert typeoffsetof(BStruct, None) == (BStruct, 0) + py.test.raises(TypeError, typeoffsetof, BStruct, None) assert typeoffsetof(BStructPtr, 'a1') == (BChar, 0) assert typeoffsetof(BStruct, 'a1') == (BChar, 0) assert typeoffsetof(BStructPtr, 'a2') == (BChar, 1) assert typeoffsetof(BStruct, 'a3') == (BChar, 2) + assert typeoffsetof(BStructPtr, 'a2', 0) == (BChar, 1) + assert typeoffsetof(BStruct, u+'a3') == (BChar, 2) + py.test.raises(TypeError, typeoffsetof, BStructPtr, 'a2', 1) py.test.raises(KeyError, typeoffsetof, BStructPtr, 'a4') py.test.raises(KeyError, typeoffsetof, BStruct, 'a5') + py.test.raises(TypeError, typeoffsetof, BStruct, 42) + py.test.raises(TypeError, typeoffsetof, BChar, 'a1') + +def test_typeoffsetof_array(): + BInt = new_primitive_type("int") + BIntP = new_pointer_type(BInt) + BArray = new_array_type(BIntP, None) + py.test.raises(TypeError, typeoffsetof, BArray, None) + py.test.raises(TypeError, typeoffsetof, BArray, 'a1') + assert typeoffsetof(BArray, 51) == (BInt, 51 * size_of_int()) + assert typeoffsetof(BIntP, 51) == (BInt, 51 * size_of_int()) + assert typeoffsetof(BArray, -51) == (BInt, -51 * size_of_int()) + MAX = sys.maxsize // size_of_int() + assert typeoffsetof(BArray, MAX) == (BInt, MAX * size_of_int()) + assert typeoffsetof(BIntP, MAX) == (BInt, MAX * size_of_int()) + py.test.raises(OverflowError, typeoffsetof, BArray, MAX + 1) def test_typeoffsetof_no_bitfield(): BInt = new_primitive_type("int") @@ -2553,17 +2572,26 @@ assert repr(p) == "" s = p[0] assert repr(s) == "" - a = rawaddressof(BStructPtr, s) + a = rawaddressof(BStructPtr, s, 0) assert repr(a).startswith(" Author: Maciej Fijalkowski Branch: rewrite-unrolling Changeset: r75306:62e5eb68ec00 Date: 2015-01-12 15:44 +0200 http://bitbucket.org/pypy/pypy/changeset/62e5eb68ec00/ Log: fix the merge enough to pass tests diff --git a/rpython/jit/metainterp/optimizeopt/pure.py b/rpython/jit/metainterp/optimizeopt/pure.py --- a/rpython/jit/metainterp/optimizeopt/pure.py +++ b/rpython/jit/metainterp/optimizeopt/pure.py @@ -95,11 +95,13 @@ def setup(self): self.optimizer.optpure = self - def pure(self, opnum, args, result): + def pure(self, opnum, args, result, value=None): op = ResOperation(opnum, args, result) key = self.optimizer.make_args_key(op) if key not in self.pure_operations: - self.pure_operations[key] = self.getvalue(result) + if value is None: + value = self.getvalue(result) + self.pure_operations[key] = value def has_pure_result(self, opnum, args, descr): op = ResOperation(opnum, args, None, descr) @@ -109,19 +111,5 @@ def get_pure_result(self, key): return self.pure_operations.get(key, None) - def produce_potential_short_preamble_ops(self, sb): - xxx - ops = sb.optimizer._newoperations - for i, op in enumerate(ops): - if op.is_always_pure(): - sb.add_potential(op) - if op.is_ovf() and ops[i + 1].getopnum() == rop.GUARD_NO_OVERFLOW: - sb.add_potential(op) - for i in self.call_pure_positions: - op = ops[i] - assert op.getopnum() == rop.CALL - op = op.copy_and_change(rop.CALL_PURE) - sb.add_potential(op) - dispatch_opt = make_dispatcher_method(OptPure, 'optimize_', default=OptPure.optimize_default) diff --git a/rpython/jit/metainterp/optimizeopt/test/test_unroll.py b/rpython/jit/metainterp/optimizeopt/test/test_unroll.py --- a/rpython/jit/metainterp/optimizeopt/test/test_unroll.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_unroll.py @@ -6,7 +6,7 @@ from rpython.jit.metainterp.optimizeopt.test.test_util import BaseTest,\ FakeMetaInterpStaticData from rpython.jit.metainterp.optimizeopt.pure import OptPure -from rpython.jit.metainterp.resoperation import rop +from rpython.jit.metainterp.resoperation import rop, ResOperation from rpython.jit.metainterp.history import ConstInt, BoxInt from rpython.jit.backend.llgraph import runner @@ -54,8 +54,10 @@ i1 = BoxInt() unroller = Unroller() unroller.optimizer = Optimizer(self.metainterp_sd, None, None, [pure]) + unroller.optimizer._newoperations = [ + ResOperation(rop.INT_ADD, [i0, ConstInt(1)], i1) + ] pure.optimizer = unroller.optimizer - pure.pure(rop.INT_ADD, [i0, ConstInt(1)], i1) expected = """ [i0, i1] label(i0, i1) diff --git a/rpython/jit/metainterp/optimizeopt/unroll.py b/rpython/jit/metainterp/optimizeopt/unroll.py --- a/rpython/jit/metainterp/optimizeopt/unroll.py +++ b/rpython/jit/metainterp/optimizeopt/unroll.py @@ -44,7 +44,26 @@ if old_optpure: # import all pure operations from the old optimizer new_optpure = self.optimizer.optpure - for opargs, value in old_optpure.pure_operations.items(): - if not value.is_virtual(): - pure_value = OptPureValue(self, value.box) - new_optpure.pure_operations[opargs] = pure_value + old_ops = old_optimizer._newoperations + for op in old_ops: + if op.is_always_pure(): + pure_value = OptPureValue(self, op.result) + new_optpure.pure(op.getopnum(), op.getarglist(), + op.result, pure_value) + # for opargs, value in old_optpure.pure_operations.items(): + # if not value.is_virtual(): + # pure_value = OptPureValue(self, value.box) + # new_optpure.pure_operations[opargs] = pure_value + + # def produce_potential_short_preamble_ops(self, sb): + # ops = sb.optimizer._newoperations + # for i, op in enumerate(ops): + # if op.is_always_pure(): + # sb.add_potential(op) + # if op.is_ovf() and ops[i + 1].getopnum() == rop.GUARD_NO_OVERFLOW: + # sb.add_potential(op) + # for i in self.call_pure_positions: + # op = ops[i] + # assert op.getopnum() == rop.CALL + # op = op.copy_and_change(rop.CALL_PURE) + # sb.add_potential(op) From noreply at buildbot.pypy.org Mon Jan 12 15:09:25 2015 From: noreply at buildbot.pypy.org (fijal) Date: Mon, 12 Jan 2015 15:09:25 +0100 (CET) Subject: [pypy-commit] pypy default: shuffle around sythesising of pure reverse ops (why we don't do more???) Message-ID: <20150112140925.8A0A31C056E@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r75307:b1bb8c9381a6 Date: 2015-01-12 16:06 +0200 http://bitbucket.org/pypy/pypy/changeset/b1bb8c9381a6/ Log: shuffle around sythesising of pure reverse ops (why we don't do more???) diff --git a/rpython/jit/metainterp/optimizeopt/optimizer.py b/rpython/jit/metainterp/optimizeopt/optimizer.py --- a/rpython/jit/metainterp/optimizeopt/optimizer.py +++ b/rpython/jit/metainterp/optimizeopt/optimizer.py @@ -847,6 +847,27 @@ op.getopnum(), argboxes, op.getdescr()) return resbox.constbox() + def pure_reverse(self, op): + if self.optpure is None: + return + optpure = self.optpure + if op.getopnum() == rop.INT_ADD: + optpure.pure(rop.INT_ADD, [op.getarg(1), op.getarg(0)], op.result) + # Synthesize the reverse op for optimize_default to reuse + optpure.pure(rop.INT_SUB, [op.result, op.getarg(1)], op.getarg(0)) + optpure.pure(rop.INT_SUB, [op.result, op.getarg(0)], op.getarg(1)) + elif op.getopnum() == rop.INT_SUB: + optpure.pure(rop.INT_ADD, [op.result, op.getarg(1)], op.getarg(0)) + optpure.pure(rop.INT_SUB, [op.getarg(0), op.result], op.getarg(1)) + elif op.getopnum() == rop.FLOAT_MUL: + optpure.pure(rop.FLOAT_MUL, [op.getarg(1), op.getarg(0)], op.result) + elif op.getopnum() == rop.FLOAT_NEG: + optpure.pure(rop.FLOAT_NEG, [op.result], op.getarg(0)) + elif op.getopnum() == rop.CAST_INT_TO_PTR: + optpure.pure(rop.CAST_PTR_TO_INT, [op.result], op.getarg(0)) + elif op.getopnum() == rop.CAST_PTR_TO_INT: + optpure.pure(rop.CAST_INT_TO_PTR, [op.result], op.getarg(0)) + #def optimize_GUARD_NO_OVERFLOW(self, op): # # otherwise the default optimizer will clear fields, which is unwanted # # in this case diff --git a/rpython/jit/metainterp/optimizeopt/rewrite.py b/rpython/jit/metainterp/optimizeopt/rewrite.py --- a/rpython/jit/metainterp/optimizeopt/rewrite.py +++ b/rpython/jit/metainterp/optimizeopt/rewrite.py @@ -117,9 +117,7 @@ self.make_constant_int(op.result, 0) else: self.emit_operation(op) - # Synthesize the reverse ops for optimize_default to reuse - self.pure(rop.INT_ADD, [op.result, op.getarg(1)], op.getarg(0)) - self.pure(rop.INT_SUB, [op.getarg(0), op.result], op.getarg(1)) + self.optimizer.pure_reverse(op) def optimize_INT_ADD(self, op): v1 = self.getvalue(op.getarg(0)) @@ -132,10 +130,7 @@ self.make_equal_to(op.result, v1) else: self.emit_operation(op) - self.pure(rop.INT_ADD, [op.getarg(1), op.getarg(0)], op.result) - # Synthesize the reverse op for optimize_default to reuse - self.pure(rop.INT_SUB, [op.result, op.getarg(1)], op.getarg(0)) - self.pure(rop.INT_SUB, [op.result, op.getarg(0)], op.getarg(1)) + self.optimizer.pure_reverse(op) def optimize_INT_MUL(self, op): v1 = self.getvalue(op.getarg(0)) @@ -222,7 +217,7 @@ )) return self.emit_operation(op) - self.pure(rop.FLOAT_MUL, [arg2, arg1], op.result) + self.optimizer.pure_reverse(op) def optimize_FLOAT_TRUEDIV(self, op): arg1 = op.getarg(0) @@ -244,9 +239,8 @@ self.emit_operation(op) def optimize_FLOAT_NEG(self, op): - v1 = op.getarg(0) self.emit_operation(op) - self.pure(rop.FLOAT_NEG, [op.result], v1) + self.optimizer.pure_reverse(op) def optimize_guard(self, op, constbox, emit_operation=True): value = self.getvalue(op.getarg(0)) @@ -583,11 +577,11 @@ self.emit_operation(op) def optimize_CAST_PTR_TO_INT(self, op): - self.pure(rop.CAST_INT_TO_PTR, [op.result], op.getarg(0)) + self.optimizer.pure_reverse(op) self.emit_operation(op) def optimize_CAST_INT_TO_PTR(self, op): - self.pure(rop.CAST_PTR_TO_INT, [op.result], op.getarg(0)) + self.optimizer.pure_reverse(op) self.emit_operation(op) def optimize_SAME_AS(self, op): From noreply at buildbot.pypy.org Mon Jan 12 16:45:28 2015 From: noreply at buildbot.pypy.org (Raemi) Date: Mon, 12 Jan 2015 16:45:28 +0100 (CET) Subject: [pypy-commit] stmgc c8-private-pages: add short description about how to save memory in the future Message-ID: <20150112154528.67C771C056E@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c8-private-pages Changeset: r1515:be35498ceb30 Date: 2015-01-12 13:35 +0100 http://bitbucket.org/pypy/stmgc/changeset/be35498ceb30/ Log: add short description about how to save memory in the future diff --git a/c8/stm/pages.h b/c8/stm/pages.h --- a/c8/stm/pages.h +++ b/c8/stm/pages.h @@ -6,6 +6,14 @@ Each virtual page is either accessible, or PAGE_NO_ACCESS (and then has no underlying memory). + + TODO: one way to save memory is to re-share pages during major GC. + The pages are mapped MAP_PRIVATE in all segments. We could use an + extra segment that is mapped SHARED to underlying file pages so + we can map PRIVATE pages from segments to it. The idea is that + a major GC first validates all segments (incl. the extra seg.), + then re-maps all PRIVATE, unmodified pages to the SHARED (unmodified) + page. Thus, we get "free" copy-on-write supported by the kernel. */ #define PAGE_FLAG_START END_NURSERY_PAGE From noreply at buildbot.pypy.org Mon Jan 12 16:45:29 2015 From: noreply at buildbot.pypy.org (Raemi) Date: Mon, 12 Jan 2015 16:45:29 +0100 (CET) Subject: [pypy-commit] stmgc c8-private-pages: WIP: start fixing compilation everywhere Message-ID: <20150112154529.7EBDB1C056E@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c8-private-pages Changeset: r1516:21374c6583cf Date: 2015-01-12 14:18 +0100 http://bitbucket.org/pypy/stmgc/changeset/21374c6583cf/ Log: WIP: start fixing compilation everywhere diff --git a/c8/stm/core.c b/c8/stm/core.c --- a/c8/stm/core.c +++ b/c8/stm/core.c @@ -130,20 +130,12 @@ assert(get_page_status_in(my_segnum, pagenum) == PAGE_NO_ACCESS); /* find who has the most recent revision of our page */ - int shared_page_holder = -1; - int shared_ref_count = 0; int copy_from_segnum = -1; uint64_t most_recent_rev = 0; for (i = 0; i < NB_SEGMENTS; i++) { if (i == my_segnum) continue; - if (get_page_status_in(i, pagenum) == PAGE_SHARED) { - /* mostly for debugging now: */ - shared_page_holder = i; - shared_ref_count++; - } - struct stm_commit_log_entry_s *log_entry; log_entry = get_priv_segment(i)->last_commit_log_entry; if (get_page_status_in(i, pagenum) != PAGE_NO_ACCESS @@ -152,17 +144,11 @@ most_recent_rev = log_entry->rev_num; } } - OPT_ASSERT(shared_page_holder != -1); OPT_ASSERT(copy_from_segnum != -1 && copy_from_segnum != my_segnum); - /* XXX: for now, we don't try to get the single shared page. We simply - regard it as private for its holder. */ - /* this assert should be true for now... */ - assert(shared_ref_count == 1); - /* make our page private */ - page_privatize_in(my_segnum, pagenum); - assert(get_page_status_in(my_segnum, pagenum) == PAGE_PRIVATE); + page_mark_accessible(my_segnum, pagenum); + assert(get_page_status_in(my_segnum, pagenum) == PAGE_ACCESSIBLE); /* before copying anything, acquire modification locks from our and the other segment */ @@ -279,7 +265,7 @@ } bool needs_abort = false; - + while(1) { /* retry IF: */ /* if at the time of "HERE" (s.b.) there happen to be diff --git a/c8/stm/core.h b/c8/stm/core.h --- a/c8/stm/core.h +++ b/c8/stm/core.h @@ -148,10 +148,8 @@ #ifndef STM_TESTS static char *stm_object_pages; -static char *stm_file_pages; #else char *stm_object_pages; -char *stm_file_pages; #endif static int stm_object_pages_fd; static stm_thread_local_t *stm_all_thread_locals = NULL; diff --git a/c8/stm/gcpage.c b/c8/stm/gcpage.c --- a/c8/stm/gcpage.c +++ b/c8/stm/gcpage.c @@ -5,8 +5,8 @@ static void setup_gcpage(void) { - //uninitialized_page_start = stm_file_pages; - //uninitialized_page_stop = stm_file_pages + NB_SHARED_PAGES * 4096UL; + uninitialized_page_start = stm_object_pages + END_NURSERY_PAGE * 4096UL; + uninitialized_page_stop = uninitialized_page_start + NB_SHARED_PAGES * 4096UL; } static void teardown_gcpage(void) @@ -15,15 +15,17 @@ static void setup_N_pages(char *pages_addr, uint64_t num) { - /* initialize to |S|N|N|N| */ + /* initialize to |N|P|N|N| */ long i; for (i = 0; i < NB_SEGMENTS; i++) { acquire_privatization_lock(i); } - pages_initialize_shared_for( - STM_SEGMENT->segment_num, - get_page_of_file_page((pages_addr - stm_file_pages) / 4096UL), - num); + + uintptr_t p = (pages_addr - stm_object_pages) / 4096UL; + while (num-->0) { + page_mark_accessible(STM_SEGMENT->segment_num, p + num); + } + for (i = NB_SEGMENTS-1; i >= 0; i--) { release_privatization_lock(i); } @@ -58,7 +60,7 @@ (uintptr_t)addr / 4096UL + END_NURSERY_PAGE)); spinlock_release(lock_growth_large); - return (stm_char*)(addr - stm_file_pages + END_NURSERY_PAGE * 4096UL); + return (stm_char*)(addr - stm_object_pages); } object_t *_stm_allocate_old(ssize_t size_rounded_up) diff --git a/c8/stm/gcpage.h b/c8/stm/gcpage.h --- a/c8/stm/gcpage.h +++ b/c8/stm/gcpage.h @@ -2,8 +2,8 @@ /* Granularity when grabbing more unused pages: take 50 at a time */ #define GCPAGE_NUM_PAGES 50 -//static char *uninitialized_page_start; /* within segment 0 */ -//static char *uninitialized_page_stop; +static char *uninitialized_page_start; /* within segment 0 */ +static char *uninitialized_page_stop; static void setup_gcpage(void); static void teardown_gcpage(void); diff --git a/c8/stm/nursery.c b/c8/stm/nursery.c --- a/c8/stm/nursery.c +++ b/c8/stm/nursery.c @@ -91,13 +91,14 @@ realobj = REAL_ADDRESS(STM_SEGMENT->segment_base, obj); size = stmcb_size_rounded_up((struct object_s *)realobj); - if (size > GC_LAST_SMALL_SIZE) { + if (true /*size > GC_LAST_SMALL_SIZE*/) { /* case 1: object is not small enough. Ask gcpage.c for an allocation via largemalloc. */ nobj = (object_t *)allocate_outside_nursery_large(size); } else { /* case "small enough" */ + abort(); nobj = (object_t *)allocate_outside_nursery_small(size); } diff --git a/c8/stm/setup.c b/c8/stm/setup.c --- a/c8/stm/setup.c +++ b/c8/stm/setup.c @@ -70,7 +70,6 @@ assert(stm_object_pages_fd); assert(stm_object_pages); - assert(stm_file_pages); setup_protection_settings(); setup_signal_handler(); diff --git a/c8/stm/smallmalloc.c b/c8/stm/smallmalloc.c --- a/c8/stm/smallmalloc.c +++ b/c8/stm/smallmalloc.c @@ -21,7 +21,7 @@ static fpsz_t *get_fpsz(char *smallpage) { - uintptr_t pagenum = (((char *)smallpage) - stm_file_pages) / 4096; + uintptr_t pagenum = (((char *)smallpage) - END_NURSERY_PAGE * 4096UL - stm_object_pages) / 4096; assert(PAGE_SMSIZE_START <= pagenum && pagenum < PAGE_SMSIZE_END); return &full_pages_object_size[pagenum - PAGE_SMSIZE_START]; } @@ -60,7 +60,7 @@ goto out_of_memory; uninitialized_page_stop -= decrease_by; - first_small_uniform_loc = uninitialized_page_stop - stm_file_pages + END_NURSERY_PAGE * 4096UL; + first_small_uniform_loc = uninitialized_page_stop - stm_object_pages; /* XXX: */ /* char *base = stm_object_pages + END_NURSERY_PAGE * 4096UL; */ @@ -163,11 +163,11 @@ if (UNLIKELY(result == NULL)) return (stm_char*) - (_allocate_small_slowpath(size) - stm_file_pages + END_NURSERY_PAGE * 4096UL); + (_allocate_small_slowpath(size) - stm_object_pages); *fl = result->next; return (stm_char*) - ((char *)result - stm_file_pages + END_NURSERY_PAGE * 4096UL); + ((char *)result - stm_object_pages); } object_t *_stm_allocate_old_small(ssize_t size_rounded_up) @@ -193,8 +193,7 @@ #ifdef STM_TESTS if (_stm_smallmalloc_keep != NULL) { // test wants a TLPREFIXd address - return _stm_smallmalloc_keep( - p - stm_file_pages + (char*)(END_NURSERY_PAGE * 4096UL)); + return _stm_smallmalloc_keep((char*)(p - stm_object_pages)); } #endif abort(); From noreply at buildbot.pypy.org Mon Jan 12 16:45:30 2015 From: noreply at buildbot.pypy.org (Raemi) Date: Mon, 12 Jan 2015 16:45:30 +0100 (CET) Subject: [pypy-commit] stmgc c8-private-pages: fix compilation, test_basic passes (surprisingly) Message-ID: <20150112154530.874E71C056E@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c8-private-pages Changeset: r1517:f5c8d19deeb4 Date: 2015-01-12 14:27 +0100 http://bitbucket.org/pypy/stmgc/changeset/f5c8d19deeb4/ Log: fix compilation, test_basic passes (surprisingly) diff --git a/c8/stm/core.c b/c8/stm/core.c --- a/c8/stm/core.c +++ b/c8/stm/core.c @@ -154,8 +154,8 @@ the other segment */ uint64_t to_lock = (1UL << copy_from_segnum)| (1UL << my_segnum); acquire_modification_lock_set(to_lock); - pagecopy((char*)(get_virt_page_of(my_segnum, pagenum) * 4096UL), - (char*)(get_virt_page_of(copy_from_segnum, pagenum) * 4096UL)); + pagecopy(get_virtual_page(my_segnum, pagenum), + get_virtual_page(copy_from_segnum, pagenum)); /* if there were modifications in the page, revert them. */ copy_bk_objs_in_page_from(copy_from_segnum, pagenum, false); @@ -505,13 +505,10 @@ uintptr_t page; for (page = first_page; page <= end_page; page++) { - /* check if our page is private or we are the only shared-page holder */ - switch (get_page_status_in(my_segnum, page)) { - - case PAGE_PRIVATE: - continue; - - case PAGE_NO_ACCESS: + if (get_page_status_in(my_segnum, page) == PAGE_NO_ACCESS) { + /* should not happen right now, since we do not make other + segment's pages NO_ACCESS anymore (later maybe in GC safe points) */ + abort(); /* happens if there is a concurrent WB between us making the backup and acquiring the locks */ release_all_privatization_locks(); @@ -520,34 +517,10 @@ *dummy; /* force segfault */ goto retry; - - case PAGE_SHARED: - break; - - default: - assert(0); - } - /* make sure all the others are NO_ACCESS - choosing to make us PRIVATE is harder because then nobody must ever - update the shared page in stm_validate() except if it is the sole - reader of it. But then we don't actually know which revision the page is at. */ - /* XXX this is a temporary solution I suppose */ - int i; - for (i = 0; i < NB_SEGMENTS; i++) { - if (i == my_segnum) - continue; - - if (get_page_status_in(i, page) == PAGE_SHARED) { - /* xxx: unmap? */ - set_page_status_in(i, page, PAGE_NO_ACCESS); - mprotect((char*)(get_virt_page_of(i, page) * 4096UL), 4096UL, PROT_NONE); - dprintf(("NO_ACCESS in seg %d page %lu\n", i, page)); - } } } - /* all pages are either private or we were the first to write to a shared - page and therefore got it as our private one */ - + /* all pages are private to us and we hold the privatization_locks so + we are allowed to modify them */ /* phew, now add the obj to the write-set and register the backup copy. */ diff --git a/c8/stm/core.h b/c8/stm/core.h --- a/c8/stm/core.h +++ b/c8/stm/core.h @@ -151,7 +151,6 @@ #else char *stm_object_pages; #endif -static int stm_object_pages_fd; static stm_thread_local_t *stm_all_thread_locals = NULL; diff --git a/c8/stm/pages.c b/c8/stm/pages.c --- a/c8/stm/pages.c +++ b/c8/stm/pages.c @@ -28,6 +28,7 @@ set_page_status_in(segnum, pagenum, PAGE_ACCESSIBLE); } +__attribute__((unused)) static void page_mark_inaccessible(long segnum, uintptr_t pagenum) { assert(get_page_status_in(segnum, pagenum) == PAGE_ACCESSIBLE); diff --git a/c8/stm/setup.c b/c8/stm/setup.c --- a/c8/stm/setup.c +++ b/c8/stm/setup.c @@ -68,7 +68,6 @@ setup_mmap("initial stm_object_pages mmap()"); - assert(stm_object_pages_fd); assert(stm_object_pages); setup_protection_settings(); diff --git a/c8/stm/setup.h b/c8/stm/setup.h --- a/c8/stm/setup.h +++ b/c8/stm/setup.h @@ -1,5 +1,4 @@ static void setup_mmap(char *reason); -static void close_fd_mmap(int map_fd); static void setup_protection_settings(void); static pthread_t *_get_cpth(stm_thread_local_t *); From noreply at buildbot.pypy.org Mon Jan 12 16:45:31 2015 From: noreply at buildbot.pypy.org (Raemi) Date: Mon, 12 Jan 2015 16:45:31 +0100 (CET) Subject: [pypy-commit] stmgc c8-private-pages: fix Message-ID: <20150112154531.885BA1C056E@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c8-private-pages Changeset: r1518:5fa8d7347e41 Date: 2015-01-12 16:37 +0100 http://bitbucket.org/pypy/stmgc/changeset/5fa8d7347e41/ Log: fix diff --git a/c8/stm/pages.c b/c8/stm/pages.c --- a/c8/stm/pages.c +++ b/c8/stm/pages.c @@ -34,7 +34,7 @@ assert(get_page_status_in(segnum, pagenum) == PAGE_ACCESSIBLE); dprintf(("page_mark_inaccessible(%lu) in seg:%ld\n", pagenum, segnum)); - set_page_status_in(segnum, pagenum, PAGE_ACCESSIBLE); + set_page_status_in(segnum, pagenum, PAGE_NO_ACCESS); char *addr = get_virtual_page(segnum, pagenum); madvise(get_virtual_page(segnum, pagenum), 4096, MADV_DONTNEED); From noreply at buildbot.pypy.org Mon Jan 12 18:35:09 2015 From: noreply at buildbot.pypy.org (dmalcolm) Date: Mon, 12 Jan 2015 18:35:09 +0100 (CET) Subject: [pypy-commit] pypy libgccjit-backend: notes.rst: gcc r219480 onwards should have everything needed by the backend Message-ID: <20150112173509.D03621D2312@cobra.cs.uni-duesseldorf.de> Author: David Malcolm Branch: libgccjit-backend Changeset: r75308:eddd1a835153 Date: 2015-01-12 12:43 -0500 http://bitbucket.org/pypy/pypy/changeset/eddd1a835153/ Log: notes.rst: gcc r219480 onwards should have everything needed by the backend diff --git a/rpython/jit/backend/libgccjit/notes.rst b/rpython/jit/backend/libgccjit/notes.rst --- a/rpython/jit/backend/libgccjit/notes.rst +++ b/rpython/jit/backend/libgccjit/notes.rst @@ -1,18 +1,18 @@ This is an experiment, and merely a work-in-progress. -In particular, this needs some changes to libgccjit that are currently -only in my local repo: +In particular, this needs some late-breaking changes to gcc 5's +libgccjit; gcc r219480 onwards (2015-01-12) should have everything: * new API entrypoints: - * :c:func:`gcc_jit_result_get_global` + * :c:func:`gcc_jit_result_get_global` (added in gcc r219480) - * :c:func:`gcc_jit_context_new_rvalue_from_long` + * :c:func:`gcc_jit_context_new_rvalue_from_long` (added in gcc r219401) - * :c:func:`gcc_jit_context_get_last_error` + * :c:func:`gcc_jit_context_get_last_error` (added in gcc r219363) * a new value :c:macro:`GCC_JIT_UNARY_OP_ABS` within - :c:type:`enum gcc_jit_unary_op`. + :c:type:`enum gcc_jit_unary_op` (added in r219321) * an extra param to :c:func:`gcc_jit_context_new_global` - (enum gcc_jit_global_kind). + (enum gcc_jit_global_kind; added in gcc r219480). From noreply at buildbot.pypy.org Mon Jan 12 19:04:20 2015 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 12 Jan 2015 19:04:20 +0100 (CET) Subject: [pypy-commit] pypy default: Issue #1925: tentative fix for the degenerating complexity Message-ID: <20150112180420.4CD841C01E8@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r75309:f176f55e3cb7 Date: 2015-01-12 18:22 +0100 http://bitbucket.org/pypy/pypy/changeset/f176f55e3cb7/ Log: Issue #1925: tentative fix for the degenerating complexity diff --git a/rpython/memory/gc/incminimark.py b/rpython/memory/gc/incminimark.py --- a/rpython/memory/gc/incminimark.py +++ b/rpython/memory/gc/incminimark.py @@ -773,11 +773,17 @@ raise MemoryError # # If somebody calls this function a lot, we must eventually - # force a full collection. XXX make this more incremental! - if (float(self.get_total_memory_used()) + raw_malloc_usage(totalsize) > - self.next_major_collection_threshold): - self.gc_step_until(STATE_SWEEPING) - self.gc_step_until(STATE_FINALIZING, raw_malloc_usage(totalsize)) + # force a collection. XXX make this more incremental! For now + # the logic is to first do a minor GC only, and check if that + # was enough to free a bunch of large young objects. If not, + # we do a complete major GC. + if self.get_total_memory_free() < raw_malloc_usage(totalsize): + self.minor_collection() + if self.get_total_memory_free() < (raw_malloc_usage(totalsize) + + self.nursery_size // 2): + self.gc_step_until(STATE_SWEEPING) + self.gc_step_until(STATE_FINALIZING, + raw_malloc_usage(totalsize)) # # Check if the object would fit in the ArenaCollection. if raw_malloc_usage(totalsize) <= self.small_request_threshold: @@ -1054,6 +1060,10 @@ """ return self.ac.total_memory_used + self.rawmalloced_total_size + def get_total_memory_free(self): + return (self.next_major_collection_threshold - + float(self.get_total_memory_used())) + def card_marking_words_for_length(self, length): # --- Unoptimized version: #num_bits = ((length-1) >> self.card_page_shift) + 1 From noreply at buildbot.pypy.org Mon Jan 12 21:48:05 2015 From: noreply at buildbot.pypy.org (mattip) Date: Mon, 12 Jan 2015 21:48:05 +0100 (CET) Subject: [pypy-commit] pypy stdlib-2.7.9: merge default into branch Message-ID: <20150112204805.138731C0976@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: stdlib-2.7.9 Changeset: r75310:619bddd17b1a Date: 2015-01-12 22:48 +0200 http://bitbucket.org/pypy/pypy/changeset/619bddd17b1a/ Log: merge default into branch diff too long, truncating to 2000 out of 9281 lines diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -28,7 +28,7 @@ DEALINGS IN THE SOFTWARE. -PyPy Copyright holders 2003-2014 +PyPy Copyright holders 2003-2015 ----------------------------------- Except when otherwise stated (look for LICENSE files or information at diff --git a/lib-python/2.7/distutils/unixccompiler.py b/lib-python/2.7/distutils/unixccompiler.py --- a/lib-python/2.7/distutils/unixccompiler.py +++ b/lib-python/2.7/distutils/unixccompiler.py @@ -58,7 +58,7 @@ executables = {'preprocessor' : None, 'compiler' : ["cc"], 'compiler_so' : ["cc"], - 'compiler_cxx' : ["cc"], + 'compiler_cxx' : ["c++"], # pypy: changed, 'cc' is bogus 'linker_so' : ["cc", "-shared"], 'linker_exe' : ["cc"], 'archiver' : ["ar", "-cr"], diff --git a/lib-python/2.7/subprocess.py b/lib-python/2.7/subprocess.py --- a/lib-python/2.7/subprocess.py +++ b/lib-python/2.7/subprocess.py @@ -1597,7 +1597,7 @@ 'copyfile' in caller.f_globals): dest_dir = sys.pypy_resolvedirof(target_executable) src_dir = sys.pypy_resolvedirof(sys.executable) - for libname in ['libpypy-c.so']: + for libname in ['libpypy-c.so', 'libpypy-c.dylib']: dest_library = os.path.join(dest_dir, libname) src_library = os.path.join(src_dir, libname) if os.path.exists(src_library): diff --git a/lib-python/2.7/test/test_collections.py b/lib-python/2.7/test/test_collections.py --- a/lib-python/2.7/test/test_collections.py +++ b/lib-python/2.7/test/test_collections.py @@ -1117,6 +1117,16 @@ od.popitem() self.assertEqual(len(od), 0) + def test_popitem_first(self): + pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)] + shuffle(pairs) + od = OrderedDict(pairs) + while pairs: + self.assertEqual(od.popitem(last=False), pairs.pop(0)) + with self.assertRaises(KeyError): + od.popitem(last=False) + self.assertEqual(len(od), 0) + def test_pop(self): pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)] shuffle(pairs) @@ -1188,7 +1198,11 @@ od = OrderedDict(pairs) # yaml.dump(od) --> # '!!python/object/apply:__main__.OrderedDict\n- - [a, 1]\n - [b, 2]\n' - self.assertTrue(all(type(pair)==list for pair in od.__reduce__()[1])) + + # PyPy bug fix: added [0] at the end of this line, because the + # test is really about the 2-tuples that need to be 2-lists + # inside the list of 6 of them + self.assertTrue(all(type(pair)==list for pair in od.__reduce__()[1][0])) def test_reduce_not_too_fat(self): # do not save instance dictionary if not needed @@ -1198,6 +1212,16 @@ od.x = 10 self.assertEqual(len(od.__reduce__()), 3) + def test_reduce_exact_output(self): + # PyPy: test that __reduce__() produces the exact same answer as + # CPython does, even though in the 'all_ordered_dicts' branch we + # have to emulate it. + pairs = [['c', 1], ['b', 2], ['d', 4]] + od = OrderedDict(pairs) + self.assertEqual(od.__reduce__(), (OrderedDict, (pairs,))) + od.x = 10 + self.assertEqual(od.__reduce__(), (OrderedDict, (pairs,), {'x': 10})) + def test_repr(self): od = OrderedDict([('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]) self.assertEqual(repr(od), diff --git a/lib-python/2.7/test/test_xml_etree.py b/lib-python/2.7/test/test_xml_etree.py --- a/lib-python/2.7/test/test_xml_etree.py +++ b/lib-python/2.7/test/test_xml_etree.py @@ -225,9 +225,9 @@ >>> element.remove(subelement) >>> serialize(element) # 5 '' - >>> element.remove(subelement) + >>> element.remove(subelement) # doctest: +ELLIPSIS Traceback (most recent call last): - ValueError: list.remove(x): x not in list + ValueError: list.remove(... >>> serialize(element) # 6 '' >>> element[0:0] = [subelement, subelement, subelement] diff --git a/lib-python/stdlib-upgrade.txt b/lib-python/stdlib-upgrade.txt --- a/lib-python/stdlib-upgrade.txt +++ b/lib-python/stdlib-upgrade.txt @@ -7,7 +7,7 @@ 1. check out the branch vendor/stdlib 2. upgrade the files there -3. update stdlib-versions.txt with the output of hg -id from the cpython repo +3. update stdlib-version.txt with the output of hg -id from the cpython repo 4. commit 5. update to default/py3k 6. create a integration branch for the new stdlib diff --git a/lib_pypy/_functools.py b/lib_pypy/_functools.py --- a/lib_pypy/_functools.py +++ b/lib_pypy/_functools.py @@ -9,7 +9,10 @@ of the given arguments and keywords. """ - def __init__(self, func, *args, **keywords): + def __init__(self, *args, **keywords): + if not args: + raise TypeError('__init__() takes at least 2 arguments (1 given)') + func, args = args[0], args[1:] if not callable(func): raise TypeError("the first argument must be callable") self._func = func diff --git a/lib_pypy/cffi/__init__.py b/lib_pypy/cffi/__init__.py --- a/lib_pypy/cffi/__init__.py +++ b/lib_pypy/cffi/__init__.py @@ -6,3 +6,8 @@ __version__ = "0.8.6" __version_info__ = (0, 8, 6) + +# The verifier module file names are based on the CRC32 of a string that +# contains the following version number. It may be older than __version__ +# if nothing is clearly incompatible. +__version_verifier_modules__ = "0.8.6" diff --git a/lib_pypy/cffi/api.py b/lib_pypy/cffi/api.py --- a/lib_pypy/cffi/api.py +++ b/lib_pypy/cffi/api.py @@ -69,6 +69,7 @@ self._function_caches = [] self._libraries = [] self._cdefsources = [] + self._windows_unicode = None if hasattr(backend, 'set_ffi'): backend.set_ffi(self) for name in backend.__dict__: @@ -77,6 +78,7 @@ # with self._lock: self.BVoidP = self._get_cached_btype(model.voidp_type) + self.BCharA = self._get_cached_btype(model.char_array_type) if isinstance(backend, types.ModuleType): # _cffi_backend: attach these constants to the class if not hasattr(FFI, 'NULL'): @@ -189,13 +191,16 @@ cdecl = self._typeof(cdecl) return self._backend.alignof(cdecl) - def offsetof(self, cdecl, fieldname): + def offsetof(self, cdecl, *fields_or_indexes): """Return the offset of the named field inside the given - structure, which must be given as a C type name. + structure or array, which must be given as a C type name. + You can give several field names in case of nested structures. + You can also give numeric values which correspond to array + items, in case of an array type. """ if isinstance(cdecl, basestring): cdecl = self._typeof(cdecl) - return self._backend.typeoffsetof(cdecl, fieldname)[1] + return self._typeoffsetof(cdecl, *fields_or_indexes)[1] def new(self, cdecl, init=None): """Allocate an instance according to the specified C type and @@ -264,6 +269,16 @@ """ return self._backend.buffer(cdata, size) + def from_buffer(self, python_buffer): + """Return a that points to the data of the + given Python object, which must support the buffer interface. + Note that this is not meant to be used on the built-in types str, + unicode, or bytearray (you can build 'char[]' arrays explicitly) + but only on objects containing large quantities of raw data + in some other format, like 'array.array' or numpy arrays. + """ + return self._backend.from_buffer(self.BCharA, python_buffer) + def callback(self, cdecl, python_callable=None, error=None): """Return a callback object or a decorator making such a callback object. 'cdecl' must name a C function pointer type. @@ -335,9 +350,23 @@ which requires binary compatibility in the signatures. """ from .verifier import Verifier, _caller_dir_pycache + # + # If set_unicode(True) was called, insert the UNICODE and + # _UNICODE macro declarations + if self._windows_unicode: + self._apply_windows_unicode(kwargs) + # + # Set the tmpdir here, and not in Verifier.__init__: it picks + # up the caller's directory, which we want to be the caller of + # ffi.verify(), as opposed to the caller of Veritier(). tmpdir = tmpdir or _caller_dir_pycache() + # + # Make a Verifier() and use it to load the library. self.verifier = Verifier(self, source, tmpdir, **kwargs) lib = self.verifier.load_library() + # + # Save the loaded library for keep-alive purposes, even + # if the caller doesn't keep it alive itself (it should). self._libraries.append(lib) return lib @@ -356,15 +385,29 @@ with self._lock: return model.pointer_cache(self, ctype) - def addressof(self, cdata, field=None): + def addressof(self, cdata, *fields_or_indexes): """Return the address of a . - If 'field' is specified, return the address of this field. + If 'fields_or_indexes' are given, returns the address of that + field or array item in the structure or array, recursively in + case of nested structures. """ ctype = self._backend.typeof(cdata) - ctype, offset = self._backend.typeoffsetof(ctype, field) + if fields_or_indexes: + ctype, offset = self._typeoffsetof(ctype, *fields_or_indexes) + else: + if ctype.kind == "pointer": + raise TypeError("addressof(pointer)") + offset = 0 ctypeptr = self._pointer_to(ctype) return self._backend.rawaddressof(ctypeptr, cdata, offset) + def _typeoffsetof(self, ctype, field_or_index, *fields_or_indexes): + ctype, offset = self._backend.typeoffsetof(ctype, field_or_index) + for field1 in fields_or_indexes: + ctype, offset1 = self._backend.typeoffsetof(ctype, field1, 1) + offset += offset1 + return ctype, offset + def include(self, ffi_to_include): """Includes the typedefs, structs, unions and enums defined in another FFI instance. Usage is similar to a #include in C, @@ -387,6 +430,44 @@ def from_handle(self, x): return self._backend.from_handle(x) + def set_unicode(self, enabled_flag): + """Windows: if 'enabled_flag' is True, enable the UNICODE and + _UNICODE defines in C, and declare the types like TCHAR and LPTCSTR + to be (pointers to) wchar_t. If 'enabled_flag' is False, + declare these types to be (pointers to) plain 8-bit characters. + This is mostly for backward compatibility; you usually want True. + """ + if self._windows_unicode is not None: + raise ValueError("set_unicode() can only be called once") + enabled_flag = bool(enabled_flag) + if enabled_flag: + self.cdef("typedef wchar_t TBYTE;" + "typedef wchar_t TCHAR;" + "typedef const wchar_t *LPCTSTR;" + "typedef const wchar_t *PCTSTR;" + "typedef wchar_t *LPTSTR;" + "typedef wchar_t *PTSTR;" + "typedef TBYTE *PTBYTE;" + "typedef TCHAR *PTCHAR;") + else: + self.cdef("typedef char TBYTE;" + "typedef char TCHAR;" + "typedef const char *LPCTSTR;" + "typedef const char *PCTSTR;" + "typedef char *LPTSTR;" + "typedef char *PTSTR;" + "typedef TBYTE *PTBYTE;" + "typedef TCHAR *PTCHAR;") + self._windows_unicode = enabled_flag + + def _apply_windows_unicode(self, kwds): + defmacros = kwds.get('define_macros', ()) + if not isinstance(defmacros, (list, tuple)): + raise TypeError("'define_macros' must be a list or tuple") + defmacros = list(defmacros) + [('UNICODE', '1'), + ('_UNICODE', '1')] + kwds['define_macros'] = defmacros + def _load_backend_lib(backend, name, flags): if name is None: diff --git a/lib_pypy/cffi/backend_ctypes.py b/lib_pypy/cffi/backend_ctypes.py --- a/lib_pypy/cffi/backend_ctypes.py +++ b/lib_pypy/cffi/backend_ctypes.py @@ -169,6 +169,7 @@ class CTypesGenericPtr(CTypesData): __slots__ = ['_address', '_as_ctype_ptr'] _automatic_casts = False + kind = "pointer" @classmethod def _newp(cls, init): @@ -370,10 +371,12 @@ (CTypesPrimitive, type(source).__name__)) return source # + kind1 = kind class CTypesPrimitive(CTypesGenericPrimitive): __slots__ = ['_value'] _ctype = ctype _reftypename = '%s &' % name + kind = kind1 def __init__(self, value): self._value = value @@ -703,12 +706,13 @@ class struct_or_union(base_ctypes_class): pass struct_or_union.__name__ = '%s_%s' % (kind, name) + kind1 = kind # class CTypesStructOrUnion(CTypesBaseStructOrUnion): __slots__ = ['_blob'] _ctype = struct_or_union _reftypename = '%s &' % (name,) - _kind = kind + _kind = kind = kind1 # CTypesStructOrUnion._fix_class() return CTypesStructOrUnion @@ -994,27 +998,42 @@ def getcname(self, BType, replace_with): return BType._get_c_name(replace_with) - def typeoffsetof(self, BType, fieldname): - if fieldname is not None and issubclass(BType, CTypesGenericPtr): - BType = BType._BItem - if not issubclass(BType, CTypesBaseStructOrUnion): - raise TypeError("expected a struct or union ctype") - if fieldname is None: - return (BType, 0) - else: + def typeoffsetof(self, BType, fieldname, num=0): + if isinstance(fieldname, str): + if num == 0 and issubclass(BType, CTypesGenericPtr): + BType = BType._BItem + if not issubclass(BType, CTypesBaseStructOrUnion): + raise TypeError("expected a struct or union ctype") BField = BType._bfield_types[fieldname] if BField is Ellipsis: raise TypeError("not supported for bitfields") return (BField, BType._offsetof(fieldname)) + elif isinstance(fieldname, (int, long)): + if issubclass(BType, CTypesGenericArray): + BType = BType._CTPtr + if not issubclass(BType, CTypesGenericPtr): + raise TypeError("expected an array or ptr ctype") + BItem = BType._BItem + offset = BItem._get_size() * fieldname + if offset > sys.maxsize: + raise OverflowError + return (BItem, offset) + else: + raise TypeError(type(fieldname)) - def rawaddressof(self, BTypePtr, cdata, offset): + def rawaddressof(self, BTypePtr, cdata, offset=None): if isinstance(cdata, CTypesBaseStructOrUnion): ptr = ctypes.pointer(type(cdata)._to_ctypes(cdata)) elif isinstance(cdata, CTypesGenericPtr): + if offset is None or not issubclass(type(cdata)._BItem, + CTypesBaseStructOrUnion): + raise TypeError("unexpected cdata type") + ptr = type(cdata)._to_ctypes(cdata) + elif isinstance(cdata, CTypesGenericArray): ptr = type(cdata)._to_ctypes(cdata) else: raise TypeError("expected a ") - if offset != 0: + if offset: ptr = ctypes.cast( ctypes.c_void_p( ctypes.cast(ptr, ctypes.c_void_p).value + offset), diff --git a/lib_pypy/cffi/commontypes.py b/lib_pypy/cffi/commontypes.py --- a/lib_pypy/cffi/commontypes.py +++ b/lib_pypy/cffi/commontypes.py @@ -29,6 +29,9 @@ result = model.PointerType(resolve_common_type(result[:-2])) elif result in model.PrimitiveType.ALL_PRIMITIVE_TYPES: result = model.PrimitiveType(result) + elif result == 'set-unicode-needed': + raise api.FFIError("The Windows type %r is only available after " + "you call ffi.set_unicode()" % (commontype,)) else: if commontype == result: raise api.FFIError("Unsupported type: %r. Please file a bug " @@ -86,8 +89,6 @@ "ULONGLONG": "unsigned long long", "WCHAR": "wchar_t", "SHORT": "short", - "TBYTE": "WCHAR", - "TCHAR": "WCHAR", "UCHAR": "unsigned char", "UINT": "unsigned int", "UINT8": "unsigned char", @@ -157,14 +158,12 @@ "LPCVOID": model.const_voidp_type, "LPCWSTR": "const WCHAR *", - "LPCTSTR": "LPCWSTR", "LPDWORD": "DWORD *", "LPHANDLE": "HANDLE *", "LPINT": "int *", "LPLONG": "long *", "LPSTR": "CHAR *", "LPWSTR": "WCHAR *", - "LPTSTR": "LPWSTR", "LPVOID": model.voidp_type, "LPWORD": "WORD *", "LRESULT": "LONG_PTR", @@ -173,7 +172,6 @@ "PBYTE": "BYTE *", "PCHAR": "CHAR *", "PCSTR": "const CHAR *", - "PCTSTR": "LPCWSTR", "PCWSTR": "const WCHAR *", "PDWORD": "DWORD *", "PDWORDLONG": "DWORDLONG *", @@ -200,9 +198,6 @@ "PSIZE_T": "SIZE_T *", "PSSIZE_T": "SSIZE_T *", "PSTR": "CHAR *", - "PTBYTE": "TBYTE *", - "PTCHAR": "TCHAR *", - "PTSTR": "LPWSTR", "PUCHAR": "UCHAR *", "PUHALF_PTR": "UHALF_PTR *", "PUINT": "UINT *", @@ -240,6 +235,15 @@ "USN": "LONGLONG", "VOID": model.void_type, "WPARAM": "UINT_PTR", + + "TBYTE": "set-unicode-needed", + "TCHAR": "set-unicode-needed", + "LPCTSTR": "set-unicode-needed", + "PCTSTR": "set-unicode-needed", + "LPTSTR": "set-unicode-needed", + "PTSTR": "set-unicode-needed", + "PTBYTE": "set-unicode-needed", + "PTCHAR": "set-unicode-needed", }) return result diff --git a/lib_pypy/cffi/cparser.py b/lib_pypy/cffi/cparser.py --- a/lib_pypy/cffi/cparser.py +++ b/lib_pypy/cffi/cparser.py @@ -1,4 +1,3 @@ - from . import api, model from .commontypes import COMMON_TYPES, resolve_common_type try: @@ -209,6 +208,8 @@ def _add_constants(self, key, val): if key in self._int_constants: + if self._int_constants[key] == val: + return # ignore identical double declarations raise api.FFIError( "multiple declarations of constant: %s" % (key,)) self._int_constants[key] = val @@ -228,12 +229,18 @@ pyvalue = int(int_str, 0) self._add_constants(key, pyvalue) + self._declare('macro ' + key, pyvalue) elif value == '...': self._declare('macro ' + key, value) else: - raise api.CDefError('only supports the syntax "#define ' - '%s ..." (literally) or "#define ' - '%s 0x1FF" for now' % (key, key)) + raise api.CDefError( + 'only supports one of the following syntax:\n' + ' #define %s ... (literally dot-dot-dot)\n' + ' #define %s NUMBER (with NUMBER an integer' + ' constant, decimal/hex/octal)\n' + 'got:\n' + ' #define %s %s' + % (key, key, key, value)) def _parse_decl(self, decl): node = decl.type @@ -460,6 +467,8 @@ elif kind == 'union': tp = model.UnionType(explicit_name, None, None, None) elif kind == 'enum': + if explicit_name == '__dotdotdot__': + raise CDefError("Enums cannot be declared with ...") tp = self._build_enum_type(explicit_name, type.values) else: raise AssertionError("kind = %r" % (kind,)) @@ -532,9 +541,24 @@ def _parse_constant(self, exprnode, partial_length_ok=False): # for now, limited to expressions that are an immediate number - # or negative number + # or positive/negative number if isinstance(exprnode, pycparser.c_ast.Constant): - return int(exprnode.value, 0) + s = exprnode.value + if s.startswith('0'): + if s.startswith('0x') or s.startswith('0X'): + return int(s, 16) + return int(s, 8) + elif '1' <= s[0] <= '9': + return int(s, 10) + elif s[0] == "'" and s[-1] == "'" and ( + len(s) == 3 or (len(s) == 4 and s[1] == "\\")): + return ord(s[-2]) + else: + raise api.CDefError("invalid constant %r" % (s,)) + # + if (isinstance(exprnode, pycparser.c_ast.UnaryOp) and + exprnode.op == '+'): + return self._parse_constant(exprnode.expr) # if (isinstance(exprnode, pycparser.c_ast.UnaryOp) and exprnode.op == '-'): diff --git a/lib_pypy/cffi/ffiplatform.py b/lib_pypy/cffi/ffiplatform.py --- a/lib_pypy/cffi/ffiplatform.py +++ b/lib_pypy/cffi/ffiplatform.py @@ -11,6 +11,9 @@ """ +LIST_OF_FILE_NAMES = ['sources', 'include_dirs', 'library_dirs', + 'extra_objects', 'depends'] + def get_extension(srcfilename, modname, sources=(), **kwds): from distutils.core import Extension allsources = [srcfilename] diff --git a/lib_pypy/cffi/model.py b/lib_pypy/cffi/model.py --- a/lib_pypy/cffi/model.py +++ b/lib_pypy/cffi/model.py @@ -235,6 +235,8 @@ BPtrItem = PointerType(self.item).get_cached_btype(ffi, finishlist) return global_cache(self, ffi, 'new_array_type', BPtrItem, self.length) +char_array_type = ArrayType(PrimitiveType('char'), None) + class StructOrUnionOrEnum(BaseTypeByIdentity): _attrs_ = ('name',) @@ -478,7 +480,7 @@ try: res = getattr(ffi._backend, funcname)(*args) except NotImplementedError as e: - raise NotImplementedError("%r: %s" % (srctype, e)) + raise NotImplementedError("%s: %r: %s" % (funcname, srctype, e)) # note that setdefault() on WeakValueDictionary is not atomic # and contains a rare bug (http://bugs.python.org/issue19542); # we have to use a lock and do it ourselves diff --git a/lib_pypy/cffi/vengine_cpy.py b/lib_pypy/cffi/vengine_cpy.py --- a/lib_pypy/cffi/vengine_cpy.py +++ b/lib_pypy/cffi/vengine_cpy.py @@ -65,7 +65,7 @@ # The following two 'chained_list_constants' items contains # the head of these two chained lists, as a string that gives the # call to do, if any. - self._chained_list_constants = ['0', '0'] + self._chained_list_constants = ['((void)lib,0)', '((void)lib,0)'] # prnt = self._prnt # first paste some standard set of lines that are mostly '#define' @@ -138,15 +138,22 @@ prnt() prnt('#endif') - def load_library(self): + def load_library(self, flags=None): # XXX review all usages of 'self' here! # import it as a new extension module + if hasattr(sys, "getdlopenflags"): + previous_flags = sys.getdlopenflags() try: + if hasattr(sys, "setdlopenflags") and flags is not None: + sys.setdlopenflags(flags) module = imp.load_dynamic(self.verifier.get_module_name(), self.verifier.modulefilename) except ImportError as e: error = "importing %r: %s" % (self.verifier.modulefilename, e) raise ffiplatform.VerificationError(error) + finally: + if hasattr(sys, "setdlopenflags"): + sys.setdlopenflags(previous_flags) # # call loading_cpy_struct() to get the struct layout inferred by # the C compiler @@ -228,7 +235,8 @@ converter = '_cffi_to_c_int' extraarg = ', %s' % tp.name else: - converter = '_cffi_to_c_%s' % (tp.name.replace(' ', '_'),) + converter = '(%s)_cffi_to_c_%s' % (tp.get_c_name(''), + tp.name.replace(' ', '_')) errvalue = '-1' # elif isinstance(tp, model.PointerType): @@ -267,8 +275,8 @@ self._prnt(' if (datasize != 0) {') self._prnt(' if (datasize < 0)') self._prnt(' %s;' % errcode) - self._prnt(' %s = alloca(datasize);' % (tovar,)) - self._prnt(' memset((void *)%s, 0, datasize);' % (tovar,)) + self._prnt(' %s = alloca((size_t)datasize);' % (tovar,)) + self._prnt(' memset((void *)%s, 0, (size_t)datasize);' % (tovar,)) self._prnt(' if (_cffi_convert_array_from_object(' '(char *)%s, _cffi_type(%d), %s) < 0)' % ( tovar, self._gettypenum(tp), fromvar)) @@ -336,7 +344,7 @@ prnt = self._prnt numargs = len(tp.args) if numargs == 0: - argname = 'no_arg' + argname = 'noarg' elif numargs == 1: argname = 'arg0' else: @@ -386,6 +394,9 @@ prnt(' Py_END_ALLOW_THREADS') prnt() # + prnt(' (void)self; /* unused */') + if numargs == 0: + prnt(' (void)noarg; /* unused */') if result_code: prnt(' return %s;' % self._convert_expr_from_c(tp.result, 'result', 'result type')) @@ -452,6 +463,7 @@ prnt('static void %s(%s *p)' % (checkfuncname, cname)) prnt('{') prnt(' /* only to generate compile-time warnings or errors */') + prnt(' (void)p;') for fname, ftype, fbitsize in tp.enumfields(): if (isinstance(ftype, model.PrimitiveType) and ftype.is_integer_type()) or fbitsize >= 0: @@ -482,6 +494,8 @@ prnt(' sizeof(((%s *)0)->%s),' % (cname, fname)) prnt(' -1') prnt(' };') + prnt(' (void)self; /* unused */') + prnt(' (void)noarg; /* unused */') prnt(' return _cffi_get_struct_layout(nums);') prnt(' /* the next line is not executed, but compiled */') prnt(' %s(0);' % (checkfuncname,)) @@ -578,7 +592,8 @@ # constants, likely declared with '#define' def _generate_cpy_const(self, is_int, name, tp=None, category='const', - vartp=None, delayed=True, size_too=False): + vartp=None, delayed=True, size_too=False, + check_value=None): prnt = self._prnt funcname = '_cffi_%s_%s' % (category, name) prnt('static int %s(PyObject *lib)' % funcname) @@ -590,6 +605,9 @@ else: assert category == 'const' # + if check_value is not None: + self._check_int_constant_value(name, check_value) + # if not is_int: if category == 'var': realexpr = '&' + name @@ -637,6 +655,27 @@ # ---------- # enums + def _check_int_constant_value(self, name, value, err_prefix=''): + prnt = self._prnt + if value <= 0: + prnt(' if ((%s) > 0 || (long)(%s) != %dL) {' % ( + name, name, value)) + else: + prnt(' if ((%s) <= 0 || (unsigned long)(%s) != %dUL) {' % ( + name, name, value)) + prnt(' char buf[64];') + prnt(' if ((%s) <= 0)' % name) + prnt(' snprintf(buf, 63, "%%ld", (long)(%s));' % name) + prnt(' else') + prnt(' snprintf(buf, 63, "%%lu", (unsigned long)(%s));' % + name) + prnt(' PyErr_Format(_cffi_VerificationError,') + prnt(' "%s%s has the real value %s, not %s",') + prnt(' "%s", "%s", buf, "%d");' % ( + err_prefix, name, value)) + prnt(' return -1;') + prnt(' }') + def _enum_funcname(self, prefix, name): # "$enum_$1" => "___D_enum____D_1" name = name.replace('$', '___D_') @@ -653,25 +692,8 @@ prnt('static int %s(PyObject *lib)' % funcname) prnt('{') for enumerator, enumvalue in zip(tp.enumerators, tp.enumvalues): - if enumvalue < 0: - prnt(' if ((%s) >= 0 || (long)(%s) != %dL) {' % ( - enumerator, enumerator, enumvalue)) - else: - prnt(' if ((%s) < 0 || (unsigned long)(%s) != %dUL) {' % ( - enumerator, enumerator, enumvalue)) - prnt(' char buf[64];') - prnt(' if ((%s) < 0)' % enumerator) - prnt(' snprintf(buf, 63, "%%ld", (long)(%s));' % enumerator) - prnt(' else') - prnt(' snprintf(buf, 63, "%%lu", (unsigned long)(%s));' % - enumerator) - prnt(' PyErr_Format(_cffi_VerificationError,') - prnt(' "enum %s: %s has the real value %s, ' - 'not %s",') - prnt(' "%s", "%s", buf, "%d");' % ( - name, enumerator, enumvalue)) - prnt(' return -1;') - prnt(' }') + self._check_int_constant_value(enumerator, enumvalue, + "enum %s: " % name) prnt(' return %s;' % self._chained_list_constants[True]) self._chained_list_constants[True] = funcname + '(lib)' prnt('}') @@ -695,8 +717,11 @@ # macros: for now only for integers def _generate_cpy_macro_decl(self, tp, name): - assert tp == '...' - self._generate_cpy_const(True, name) + if tp == '...': + check_value = None + else: + check_value = tp # an integer + self._generate_cpy_const(True, name, check_value=check_value) _generate_cpy_macro_collecttype = _generate_nothing _generate_cpy_macro_method = _generate_nothing @@ -783,6 +808,24 @@ typedef unsigned __int16 uint16_t; typedef unsigned __int32 uint32_t; typedef unsigned __int64 uint64_t; + typedef __int8 int_least8_t; + typedef __int16 int_least16_t; + typedef __int32 int_least32_t; + typedef __int64 int_least64_t; + typedef unsigned __int8 uint_least8_t; + typedef unsigned __int16 uint_least16_t; + typedef unsigned __int32 uint_least32_t; + typedef unsigned __int64 uint_least64_t; + typedef __int8 int_fast8_t; + typedef __int16 int_fast16_t; + typedef __int32 int_fast32_t; + typedef __int64 int_fast64_t; + typedef unsigned __int8 uint_fast8_t; + typedef unsigned __int16 uint_fast16_t; + typedef unsigned __int32 uint_fast32_t; + typedef unsigned __int64 uint_fast64_t; + typedef __int64 intmax_t; + typedef unsigned __int64 uintmax_t; # else # include # endif @@ -828,12 +871,15 @@ PyLong_FromLongLong((long long)(x))) #define _cffi_from_c_int(x, type) \ - (((type)-1) > 0 ? /* unsigned */ \ - (sizeof(type) < sizeof(long) ? PyInt_FromLong(x) : \ - sizeof(type) == sizeof(long) ? PyLong_FromUnsignedLong(x) : \ - PyLong_FromUnsignedLongLong(x)) \ - : (sizeof(type) <= sizeof(long) ? PyInt_FromLong(x) : \ - PyLong_FromLongLong(x))) + (((type)-1) > 0 ? /* unsigned */ \ + (sizeof(type) < sizeof(long) ? \ + PyInt_FromLong((long)x) : \ + sizeof(type) == sizeof(long) ? \ + PyLong_FromUnsignedLong((unsigned long)x) : \ + PyLong_FromUnsignedLongLong((unsigned long long)x)) : \ + (sizeof(type) <= sizeof(long) ? \ + PyInt_FromLong((long)x) : \ + PyLong_FromLongLong((long long)x))) #define _cffi_to_c_int(o, type) \ (sizeof(type) == 1 ? (((type)-1) > 0 ? (type)_cffi_to_c_u8(o) \ @@ -844,7 +890,7 @@ : (type)_cffi_to_c_i32(o)) : \ sizeof(type) == 8 ? (((type)-1) > 0 ? (type)_cffi_to_c_u64(o) \ : (type)_cffi_to_c_i64(o)) : \ - (Py_FatalError("unsupported size for type " #type), 0)) + (Py_FatalError("unsupported size for type " #type), (type)0)) #define _cffi_to_c_i8 \ ((int(*)(PyObject *))_cffi_exports[1]) @@ -907,6 +953,7 @@ { PyObject *library; int was_alive = (_cffi_types != NULL); + (void)self; /* unused */ if (!PyArg_ParseTuple(args, "OOO", &_cffi_types, &_cffi_VerificationError, &library)) return NULL; diff --git a/lib_pypy/cffi/vengine_gen.py b/lib_pypy/cffi/vengine_gen.py --- a/lib_pypy/cffi/vengine_gen.py +++ b/lib_pypy/cffi/vengine_gen.py @@ -58,12 +58,12 @@ modname = self.verifier.get_module_name() prnt("void %s%s(void) { }\n" % (prefix, modname)) - def load_library(self): + def load_library(self, flags=0): # import it with the CFFI backend backend = self.ffi._backend # needs to make a path that contains '/', on Posix filename = os.path.join(os.curdir, self.verifier.modulefilename) - module = backend.load_library(filename) + module = backend.load_library(filename, flags) # # call loading_gen_struct() to get the struct layout inferred by # the C compiler @@ -235,6 +235,7 @@ prnt('static void %s(%s *p)' % (checkfuncname, cname)) prnt('{') prnt(' /* only to generate compile-time warnings or errors */') + prnt(' (void)p;') for fname, ftype, fbitsize in tp.enumfields(): if (isinstance(ftype, model.PrimitiveType) and ftype.is_integer_type()) or fbitsize >= 0: @@ -354,11 +355,20 @@ # ---------- # constants, likely declared with '#define' - def _generate_gen_const(self, is_int, name, tp=None, category='const'): + def _generate_gen_const(self, is_int, name, tp=None, category='const', + check_value=None): prnt = self._prnt funcname = '_cffi_%s_%s' % (category, name) self.export_symbols.append(funcname) - if is_int: + if check_value is not None: + assert is_int + assert category == 'const' + prnt('int %s(char *out_error)' % funcname) + prnt('{') + self._check_int_constant_value(name, check_value) + prnt(' return 0;') + prnt('}') + elif is_int: assert category == 'const' prnt('int %s(long long *out_value)' % funcname) prnt('{') @@ -367,6 +377,7 @@ prnt('}') else: assert tp is not None + assert check_value is None prnt(tp.get_c_name(' %s(void)' % funcname, name),) prnt('{') if category == 'var': @@ -383,9 +394,13 @@ _loading_gen_constant = _loaded_noop - def _load_constant(self, is_int, tp, name, module): + def _load_constant(self, is_int, tp, name, module, check_value=None): funcname = '_cffi_const_%s' % name - if is_int: + if check_value is not None: + assert is_int + self._load_known_int_constant(module, funcname) + value = check_value + elif is_int: BType = self.ffi._typeof_locked("long long*")[0] BFunc = self.ffi._typeof_locked("int(*)(long long*)")[0] function = module.load_function(BFunc, funcname) @@ -396,6 +411,7 @@ BLongLong = self.ffi._typeof_locked("long long")[0] value += (1 << (8*self.ffi.sizeof(BLongLong))) else: + assert check_value is None BFunc = self.ffi._typeof_locked(tp.get_c_name('(*)(void)', name))[0] function = module.load_function(BFunc, funcname) value = function() @@ -410,6 +426,36 @@ # ---------- # enums + def _check_int_constant_value(self, name, value): + prnt = self._prnt + if value <= 0: + prnt(' if ((%s) > 0 || (long)(%s) != %dL) {' % ( + name, name, value)) + else: + prnt(' if ((%s) <= 0 || (unsigned long)(%s) != %dUL) {' % ( + name, name, value)) + prnt(' char buf[64];') + prnt(' if ((%s) <= 0)' % name) + prnt(' sprintf(buf, "%%ld", (long)(%s));' % name) + prnt(' else') + prnt(' sprintf(buf, "%%lu", (unsigned long)(%s));' % + name) + prnt(' sprintf(out_error, "%s has the real value %s, not %s",') + prnt(' "%s", buf, "%d");' % (name[:100], value)) + prnt(' return -1;') + prnt(' }') + + def _load_known_int_constant(self, module, funcname): + BType = self.ffi._typeof_locked("char[]")[0] + BFunc = self.ffi._typeof_locked("int(*)(char*)")[0] + function = module.load_function(BFunc, funcname) + p = self.ffi.new(BType, 256) + if function(p) < 0: + error = self.ffi.string(p) + if sys.version_info >= (3,): + error = str(error, 'utf-8') + raise ffiplatform.VerificationError(error) + def _enum_funcname(self, prefix, name): # "$enum_$1" => "___D_enum____D_1" name = name.replace('$', '___D_') @@ -427,24 +473,7 @@ prnt('int %s(char *out_error)' % funcname) prnt('{') for enumerator, enumvalue in zip(tp.enumerators, tp.enumvalues): - if enumvalue < 0: - prnt(' if ((%s) >= 0 || (long)(%s) != %dL) {' % ( - enumerator, enumerator, enumvalue)) - else: - prnt(' if ((%s) < 0 || (unsigned long)(%s) != %dUL) {' % ( - enumerator, enumerator, enumvalue)) - prnt(' char buf[64];') - prnt(' if ((%s) < 0)' % enumerator) - prnt(' sprintf(buf, "%%ld", (long)(%s));' % enumerator) - prnt(' else') - prnt(' sprintf(buf, "%%lu", (unsigned long)(%s));' % - enumerator) - prnt(' sprintf(out_error,' - ' "%s has the real value %s, not %s",') - prnt(' "%s", buf, "%d");' % ( - enumerator[:100], enumvalue)) - prnt(' return -1;') - prnt(' }') + self._check_int_constant_value(enumerator, enumvalue) prnt(' return 0;') prnt('}') prnt() @@ -456,16 +485,8 @@ tp.enumvalues = tuple(enumvalues) tp.partial_resolved = True else: - BType = self.ffi._typeof_locked("char[]")[0] - BFunc = self.ffi._typeof_locked("int(*)(char*)")[0] funcname = self._enum_funcname(prefix, name) - function = module.load_function(BFunc, funcname) - p = self.ffi.new(BType, 256) - if function(p) < 0: - error = self.ffi.string(p) - if sys.version_info >= (3,): - error = str(error, 'utf-8') - raise ffiplatform.VerificationError(error) + self._load_known_int_constant(module, funcname) def _loaded_gen_enum(self, tp, name, module, library): for enumerator, enumvalue in zip(tp.enumerators, tp.enumvalues): @@ -476,13 +497,21 @@ # macros: for now only for integers def _generate_gen_macro_decl(self, tp, name): - assert tp == '...' - self._generate_gen_const(True, name) + if tp == '...': + check_value = None + else: + check_value = tp # an integer + self._generate_gen_const(True, name, check_value=check_value) _loading_gen_macro = _loaded_noop def _loaded_gen_macro(self, tp, name, module, library): - value = self._load_constant(True, tp, name, module) + if tp == '...': + check_value = None + else: + check_value = tp # an integer + value = self._load_constant(True, tp, name, module, + check_value=check_value) setattr(library, name, value) type(library)._cffi_dir.append(name) @@ -565,6 +594,24 @@ typedef unsigned __int16 uint16_t; typedef unsigned __int32 uint32_t; typedef unsigned __int64 uint64_t; + typedef __int8 int_least8_t; + typedef __int16 int_least16_t; + typedef __int32 int_least32_t; + typedef __int64 int_least64_t; + typedef unsigned __int8 uint_least8_t; + typedef unsigned __int16 uint_least16_t; + typedef unsigned __int32 uint_least32_t; + typedef unsigned __int64 uint_least64_t; + typedef __int8 int_fast8_t; + typedef __int16 int_fast16_t; + typedef __int32 int_fast32_t; + typedef __int64 int_fast64_t; + typedef unsigned __int8 uint_fast8_t; + typedef unsigned __int16 uint_fast16_t; + typedef unsigned __int32 uint_fast32_t; + typedef unsigned __int64 uint_fast64_t; + typedef __int64 intmax_t; + typedef unsigned __int64 uintmax_t; # else # include # endif diff --git a/lib_pypy/cffi/verifier.py b/lib_pypy/cffi/verifier.py --- a/lib_pypy/cffi/verifier.py +++ b/lib_pypy/cffi/verifier.py @@ -1,12 +1,23 @@ -import sys, os, binascii, imp, shutil -from . import __version__ +import sys, os, binascii, shutil +from . import __version_verifier_modules__ from . import ffiplatform +if sys.version_info >= (3, 3): + import importlib.machinery + def _extension_suffixes(): + return importlib.machinery.EXTENSION_SUFFIXES[:] +else: + import imp + def _extension_suffixes(): + return [suffix for suffix, _, type in imp.get_suffixes() + if type == imp.C_EXTENSION] + class Verifier(object): def __init__(self, ffi, preamble, tmpdir=None, modulename=None, - ext_package=None, tag='', force_generic_engine=False, **kwds): + ext_package=None, tag='', force_generic_engine=False, + source_extension='.c', flags=None, relative_to=None, **kwds): self.ffi = ffi self.preamble = preamble if not modulename: @@ -14,14 +25,15 @@ vengine_class = _locate_engine_class(ffi, force_generic_engine) self._vengine = vengine_class(self) self._vengine.patch_extension_kwds(kwds) - self.kwds = kwds + self.flags = flags + self.kwds = self.make_relative_to(kwds, relative_to) # if modulename: if tag: raise TypeError("can't specify both 'modulename' and 'tag'") else: - key = '\x00'.join([sys.version[:3], __version__, preamble, - flattened_kwds] + + key = '\x00'.join([sys.version[:3], __version_verifier_modules__, + preamble, flattened_kwds] + ffi._cdefsources) if sys.version_info >= (3,): key = key.encode('utf-8') @@ -33,7 +45,7 @@ k1, k2) suffix = _get_so_suffixes()[0] self.tmpdir = tmpdir or _caller_dir_pycache() - self.sourcefilename = os.path.join(self.tmpdir, modulename + '.c') + self.sourcefilename = os.path.join(self.tmpdir, modulename + source_extension) self.modulefilename = os.path.join(self.tmpdir, modulename + suffix) self.ext_package = ext_package self._has_source = False @@ -97,6 +109,20 @@ def generates_python_module(self): return self._vengine._gen_python_module + def make_relative_to(self, kwds, relative_to): + if relative_to and os.path.dirname(relative_to): + dirname = os.path.dirname(relative_to) + kwds = kwds.copy() + for key in ffiplatform.LIST_OF_FILE_NAMES: + if key in kwds: + lst = kwds[key] + if not isinstance(lst, (list, tuple)): + raise TypeError("keyword '%s' should be a list or tuple" + % (key,)) + lst = [os.path.join(dirname, fn) for fn in lst] + kwds[key] = lst + return kwds + # ---------- def _locate_module(self): @@ -148,7 +174,10 @@ def _load_library(self): assert self._has_module - return self._vengine.load_library() + if self.flags is not None: + return self._vengine.load_library(self.flags) + else: + return self._vengine.load_library() # ____________________________________________________________ @@ -181,6 +210,9 @@ def _caller_dir_pycache(): if _TMPDIR: return _TMPDIR + result = os.environ.get('CFFI_TMPDIR') + if result: + return result filename = sys._getframe(2).f_code.co_filename return os.path.abspath(os.path.join(os.path.dirname(filename), '__pycache__')) @@ -222,11 +254,7 @@ pass def _get_so_suffixes(): - suffixes = [] - for suffix, mode, type in imp.get_suffixes(): - if type == imp.C_EXTENSION: - suffixes.append(suffix) - + suffixes = _extension_suffixes() if not suffixes: # bah, no C_EXTENSION available. Occurs on pypy without cpyext if sys.platform == 'win32': diff --git a/lib_pypy/readline.py b/lib_pypy/readline.py --- a/lib_pypy/readline.py +++ b/lib_pypy/readline.py @@ -6,4 +6,11 @@ are only stubs at the moment. """ -from pyrepl.readline import * +try: + from pyrepl.readline import * +except ImportError: + import sys + if sys.platform == 'win32': + raise ImportError("the 'readline' module is not available on Windows" + " (on either PyPy or CPython)") + raise diff --git a/pypy/doc/embedding.rst b/pypy/doc/embedding.rst --- a/pypy/doc/embedding.rst +++ b/pypy/doc/embedding.rst @@ -97,12 +97,18 @@ return res; } -If we save it as ``x.c`` now, compile it and run it with:: +If we save it as ``x.c`` now, compile it and run it (on linux) with:: fijal at hermann:/opt/pypy$ gcc -o x x.c -lpypy-c -L. fijal at hermann:/opt/pypy$ LD_LIBRARY_PATH=. ./x hello from pypy +on OSX it is necessary to set the rpath of the binary if one wants to link to it:: + + gcc -o x x.c -lpypy-c -L. -Wl,-rpath -Wl, at executable_path + ./x + hello from pypy + Worked! .. note:: If the compilation fails because of missing PyPy.h header file, diff --git a/pypy/doc/install.rst b/pypy/doc/install.rst --- a/pypy/doc/install.rst +++ b/pypy/doc/install.rst @@ -38,14 +38,13 @@ and not move the binary there, else PyPy would not be able to find its library. -If you want to install 3rd party libraries, the most convenient way is to -install distribute_ and pip_: +If you want to install 3rd party libraries, the most convenient way is +to install pip_ (unless you want to install virtualenv as explained +below; then you can directly use pip inside virtualenvs): .. code-block:: console - $ curl -O http://python-distribute.org/distribute_setup.py - $ curl -O https://raw.githubusercontent.com/pypa/pip/master/contrib/get-pip.py - $ ./pypy-2.1/bin/pypy distribute_setup.py + $ curl -O https://bootstrap.pypa.io/get-pip.py $ ./pypy-2.1/bin/pypy get-pip.py $ ./pypy-2.1/bin/pip install pygments # for example @@ -69,7 +68,6 @@ Note that bin/python is now a symlink to bin/pypy. -.. _distribute: http://www.python-distribute.org/ .. _pip: http://pypi.python.org/pypi/pip diff --git a/pypy/goal/targetpypystandalone.py b/pypy/goal/targetpypystandalone.py --- a/pypy/goal/targetpypystandalone.py +++ b/pypy/goal/targetpypystandalone.py @@ -234,8 +234,7 @@ enable_translationmodules(config) config.translation.suggest(check_str_without_nul=True) - if sys.platform.startswith('linux'): - config.translation.suggest(shared=True) + config.translation.suggest(shared=True) if config.translation.thread: config.objspace.usemodules.thread = True diff --git a/pypy/interpreter/astcompiler/optimize.py b/pypy/interpreter/astcompiler/optimize.py --- a/pypy/interpreter/astcompiler/optimize.py +++ b/pypy/interpreter/astcompiler/optimize.py @@ -83,17 +83,16 @@ class __extend__(ast.BoolOp): - def _accept_jump_if_any_is(self, gen, condition, target): - self.values[0].accept_jump_if(gen, condition, target) - for i in range(1, len(self.values)): + def _accept_jump_if_any_is(self, gen, condition, target, skip_last=0): + for i in range(len(self.values) - skip_last): self.values[i].accept_jump_if(gen, condition, target) def accept_jump_if(self, gen, condition, target): if condition and self.op == ast.And or \ (not condition and self.op == ast.Or): end = gen.new_block() - self._accept_jump_if_any_is(gen, not condition, end) - gen.emit_jump(ops.JUMP_FORWARD, target) + self._accept_jump_if_any_is(gen, not condition, end, skip_last=1) + self.values[-1].accept_jump_if(gen, condition, target) gen.use_next_block(end) else: self._accept_jump_if_any_is(gen, condition, target) diff --git a/pypy/module/__builtin__/app_io.py b/pypy/module/__builtin__/app_io.py --- a/pypy/module/__builtin__/app_io.py +++ b/pypy/module/__builtin__/app_io.py @@ -86,9 +86,11 @@ def print_(*args, **kwargs): """The new-style print function from py3k.""" - fp = kwargs.pop("file", sys.stdout) + fp = kwargs.pop("file", None) if fp is None: - return + fp = sys.stdout + if fp is None: + return def write(data): if not isinstance(data, basestring): data = str(data) diff --git a/pypy/module/__builtin__/test/test_builtin.py b/pypy/module/__builtin__/test/test_builtin.py --- a/pypy/module/__builtin__/test/test_builtin.py +++ b/pypy/module/__builtin__/test/test_builtin.py @@ -651,9 +651,12 @@ out = sys.stdout = StringIO.StringIO() try: pr("Hello,", "person!") + pr("2nd line", file=None) + sys.stdout = None + pr("nowhere") finally: sys.stdout = save - assert out.getvalue() == "Hello, person!\n" + assert out.getvalue() == "Hello, person!\n2nd line\n" out = StringIO.StringIO() pr("Hello,", "person!", file=out) assert out.getvalue() == "Hello, person!\n" @@ -668,7 +671,6 @@ result = out.getvalue() assert isinstance(result, unicode) assert result == u"Hello, person!\n" - pr("Hello", file=None) # This works. out = StringIO.StringIO() pr(None, file=out) assert out.getvalue() == "None\n" diff --git a/pypy/module/_cffi_backend/__init__.py b/pypy/module/_cffi_backend/__init__.py --- a/pypy/module/_cffi_backend/__init__.py +++ b/pypy/module/_cffi_backend/__init__.py @@ -34,6 +34,7 @@ 'newp_handle': 'handle.newp_handle', 'from_handle': 'handle.from_handle', '_get_types': 'func._get_types', + 'from_buffer': 'func.from_buffer', 'string': 'func.string', 'buffer': 'cbuffer.buffer', diff --git a/pypy/module/_cffi_backend/ccallback.py b/pypy/module/_cffi_backend/ccallback.py --- a/pypy/module/_cffi_backend/ccallback.py +++ b/pypy/module/_cffi_backend/ccallback.py @@ -45,8 +45,9 @@ # cif_descr = self.getfunctype().cif_descr if not cif_descr: - raise OperationError(space.w_NotImplementedError, - space.wrap("callbacks with '...'")) + raise oefmt(space.w_NotImplementedError, + "%s: callback with unsupported argument or " + "return type or with '...'", self.getfunctype().name) res = clibffi.c_ffi_prep_closure(self.get_closure(), cif_descr.cif, invoke_callback, rffi.cast(rffi.VOIDP, self.unique_id)) diff --git a/pypy/module/_cffi_backend/cdataobj.py b/pypy/module/_cffi_backend/cdataobj.py --- a/pypy/module/_cffi_backend/cdataobj.py +++ b/pypy/module/_cffi_backend/cdataobj.py @@ -440,6 +440,25 @@ return "handle to %s" % (self.space.str_w(w_repr),) +class W_CDataFromBuffer(W_CData): + _attrs_ = ['buf', 'length', 'w_keepalive'] + _immutable_fields_ = ['buf', 'length', 'w_keepalive'] + + def __init__(self, space, cdata, ctype, buf, w_object): + W_CData.__init__(self, space, cdata, ctype) + self.buf = buf + self.length = buf.getlength() + self.w_keepalive = w_object + + def get_array_length(self): + return self.length + + def _repr_extra(self): + w_repr = self.space.repr(self.w_keepalive) + return "buffer len %d from '%s' object" % ( + self.length, self.space.type(self.w_keepalive).name) + + W_CData.typedef = TypeDef( '_cffi_backend.CData', __module__ = '_cffi_backend', diff --git a/pypy/module/_cffi_backend/ctypearray.py b/pypy/module/_cffi_backend/ctypearray.py --- a/pypy/module/_cffi_backend/ctypearray.py +++ b/pypy/module/_cffi_backend/ctypearray.py @@ -107,6 +107,9 @@ return self.space.w_None return W_CTypePtrOrArray._fget(self, attrchar) + def typeoffsetof_index(self, index): + return self.ctptr.typeoffsetof_index(index) + class W_CDataIter(W_Root): _immutable_fields_ = ['ctitem', 'cdata', '_stop'] # but not '_next' diff --git a/pypy/module/_cffi_backend/ctypefunc.py b/pypy/module/_cffi_backend/ctypefunc.py --- a/pypy/module/_cffi_backend/ctypefunc.py +++ b/pypy/module/_cffi_backend/ctypefunc.py @@ -27,6 +27,8 @@ _immutable_fields_ = ['fargs[*]', 'ellipsis', 'cif_descr'] kind = "function" + cif_descr = lltype.nullptr(CIF_DESCRIPTION) + def __init__(self, space, fargs, fresult, ellipsis): extra = self._compute_extra_text(fargs, fresult, ellipsis) size = rffi.sizeof(rffi.VOIDP) @@ -41,7 +43,17 @@ # at all. The cif is computed on every call from the actual # types passed in. For all other functions, the cif_descr # is computed here. - CifDescrBuilder(fargs, fresult).rawallocate(self) + builder = CifDescrBuilder(fargs, fresult) + try: + builder.rawallocate(self) + except OperationError, e: + if not e.match(space, space.w_NotImplementedError): + raise + # else, eat the NotImplementedError. We will get the + # exception if we see an actual call + if self.cif_descr: # should not be True, but you never know + lltype.free(self.cif_descr, flavor='raw') + self.cif_descr = lltype.nullptr(CIF_DESCRIPTION) def new_ctypefunc_completing_argtypes(self, args_w): space = self.space @@ -57,10 +69,12 @@ "argument %d passed in the variadic part needs to " "be a cdata object (got %T)", i + 1, w_obj) fvarargs[i] = ct + # xxx call instantiate() directly. It's a bit of a hack. ctypefunc = instantiate(W_CTypeFunc) ctypefunc.space = space ctypefunc.fargs = fvarargs ctypefunc.ctitem = self.ctitem + #ctypefunc.cif_descr = NULL --- already provided as the default CifDescrBuilder(fvarargs, self.ctitem).rawallocate(ctypefunc) return ctypefunc @@ -178,8 +192,6 @@ # ____________________________________________________________ -W_CTypeFunc.cif_descr = lltype.nullptr(CIF_DESCRIPTION) # default value - BIG_ENDIAN = sys.byteorder == 'big' USE_C_LIBFFI_MSVC = getattr(clibffi, 'USE_C_LIBFFI_MSVC', False) @@ -295,18 +307,18 @@ nflat = 0 for i, cf in enumerate(ctype.fields_list): if cf.is_bitfield(): - raise OperationError(space.w_NotImplementedError, - space.wrap("cannot pass as argument or return value " - "a struct with bit fields")) + raise oefmt(space.w_NotImplementedError, + "ctype '%s' not supported as argument or return value" + " (it is a struct with bit fields)", ctype.name) flat = 1 ct = cf.ctype while isinstance(ct, ctypearray.W_CTypeArray): flat *= ct.length ct = ct.ctitem if flat <= 0: - raise OperationError(space.w_NotImplementedError, - space.wrap("cannot pass as argument or return value " - "a struct with a zero-length array")) + raise oefmt(space.w_NotImplementedError, + "ctype '%s' not supported as argument or return value" + " (it is a struct with a zero-length array)", ctype.name) nflat += flat if USE_C_LIBFFI_MSVC and is_result_type: diff --git a/pypy/module/_cffi_backend/ctypeobj.py b/pypy/module/_cffi_backend/ctypeobj.py --- a/pypy/module/_cffi_backend/ctypeobj.py +++ b/pypy/module/_cffi_backend/ctypeobj.py @@ -142,12 +142,14 @@ raise oefmt(space.w_ValueError, "ctype '%s' is of unknown alignment", self.name) - def typeoffsetof(self, fieldname): + def typeoffsetof_field(self, fieldname, following): space = self.space - if fieldname is None: - msg = "expected a struct or union ctype" - else: - msg = "expected a struct or union ctype, or a pointer to one" + msg = "with a field name argument, expected a struct or union ctype" + raise OperationError(space.w_TypeError, space.wrap(msg)) + + def typeoffsetof_index(self, index): + space = self.space + msg = "with an integer argument, expected an array or pointer ctype" raise OperationError(space.w_TypeError, space.wrap(msg)) def rawaddressof(self, cdata, offset): diff --git a/pypy/module/_cffi_backend/ctypeptr.py b/pypy/module/_cffi_backend/ctypeptr.py --- a/pypy/module/_cffi_backend/ctypeptr.py +++ b/pypy/module/_cffi_backend/ctypeptr.py @@ -308,24 +308,36 @@ def getcfield(self, attr): return self.ctitem.getcfield(attr) - def typeoffsetof(self, fieldname): - if fieldname is None: - return W_CTypePtrBase.typeoffsetof(self, fieldname) - else: - return self.ctitem.typeoffsetof(fieldname) + def typeoffsetof_field(self, fieldname, following): + if following == 0: + return self.ctitem.typeoffsetof_field(fieldname, -1) + return W_CTypePtrBase.typeoffsetof_field(self, fieldname, following) + + def typeoffsetof_index(self, index): + space = self.space + ctitem = self.ctitem + if ctitem.size < 0: + raise OperationError(space.w_TypeError, + space.wrap("pointer to opaque")) + try: + offset = ovfcheck(index * ctitem.size) + except OverflowError: + raise OperationError(space.w_OverflowError, + space.wrap("array offset would overflow a ssize_t")) + return ctitem, offset def rawaddressof(self, cdata, offset): from pypy.module._cffi_backend.ctypestruct import W_CTypeStructOrUnion space = self.space ctype2 = cdata.ctype if (isinstance(ctype2, W_CTypeStructOrUnion) or - (isinstance(ctype2, W_CTypePtrOrArray) and - isinstance(ctype2.ctitem, W_CTypeStructOrUnion))): + isinstance(ctype2, W_CTypePtrOrArray)): ptrdata = rffi.ptradd(cdata._cdata, offset) return cdataobj.W_CData(space, ptrdata, self) else: raise OperationError(space.w_TypeError, - space.wrap("expected a 'cdata struct-or-union' object")) + space.wrap("expected a cdata struct/union/array/pointer" + " object")) def _fget(self, attrchar): if attrchar == 'i': # item diff --git a/pypy/module/_cffi_backend/ctypestruct.py b/pypy/module/_cffi_backend/ctypestruct.py --- a/pypy/module/_cffi_backend/ctypestruct.py +++ b/pypy/module/_cffi_backend/ctypestruct.py @@ -65,9 +65,7 @@ keepalive_until_here(ob) return ob - def typeoffsetof(self, fieldname): - if fieldname is None: - return (self, 0) + def typeoffsetof_field(self, fieldname, following): self.check_complete() space = self.space try: diff --git a/pypy/module/_cffi_backend/func.py b/pypy/module/_cffi_backend/func.py --- a/pypy/module/_cffi_backend/func.py +++ b/pypy/module/_cffi_backend/func.py @@ -48,13 +48,28 @@ align = w_ctype.alignof() return space.wrap(align) - at unwrap_spec(w_ctype=ctypeobj.W_CType, fieldname="str_or_None") -def typeoffsetof(space, w_ctype, fieldname): - ctype, offset = w_ctype.typeoffsetof(fieldname) + at unwrap_spec(w_ctype=ctypeobj.W_CType, following=int) +def typeoffsetof(space, w_ctype, w_field_or_index, following=0): + try: + fieldname = space.str_w(w_field_or_index) + except OperationError, e: + if not e.match(space, space.w_TypeError): + raise + try: + index = space.int_w(w_field_or_index) + except OperationError, e: + if not e.match(space, space.w_TypeError): + raise + raise OperationError(space.w_TypeError, + space.wrap("field name or array index expected")) + ctype, offset = w_ctype.typeoffsetof_index(index) + else: + ctype, offset = w_ctype.typeoffsetof_field(fieldname, following) + # return space.newtuple([space.wrap(ctype), space.wrap(offset)]) @unwrap_spec(w_ctype=ctypeobj.W_CType, w_cdata=cdataobj.W_CData, offset=int) -def rawaddressof(space, w_ctype, w_cdata, offset=0): +def rawaddressof(space, w_ctype, w_cdata, offset): return w_ctype.rawaddressof(w_cdata, offset) # ____________________________________________________________ @@ -76,3 +91,32 @@ def _get_types(space): return space.newtuple([space.gettypefor(cdataobj.W_CData), space.gettypefor(ctypeobj.W_CType)]) + +# ____________________________________________________________ + + at unwrap_spec(w_ctype=ctypeobj.W_CType) +def from_buffer(space, w_ctype, w_x): + from pypy.module._cffi_backend import ctypearray, ctypeprim + # + if (not isinstance(w_ctype, ctypearray.W_CTypeArray) or + not isinstance(w_ctype.ctptr.ctitem, ctypeprim.W_CTypePrimitiveChar)): + raise oefmt(space.w_TypeError, + "needs 'char[]', got '%s'", w_ctype.name) + # + # xxx do we really need to implement the same mess as in CPython 2.7 + # w.r.t. buffers and memoryviews?? + try: + buf = space.readbuf_w(w_x) + except OperationError, e: + if not e.match(space, space.w_TypeError): + raise + buf = space.buffer_w(w_x, space.BUF_SIMPLE) + try: + _cdata = buf.get_raw_address() + except ValueError: + raise oefmt(space.w_TypeError, + "from_buffer() got a '%T' object, which supports the " + "buffer interface but cannot be rendered as a plain " + "raw address on PyPy", w_x) + # + return cdataobj.W_CDataFromBuffer(space, _cdata, w_ctype, buf, w_x) diff --git a/pypy/module/_cffi_backend/newtype.py b/pypy/module/_cffi_backend/newtype.py --- a/pypy/module/_cffi_backend/newtype.py +++ b/pypy/module/_cffi_backend/newtype.py @@ -62,10 +62,54 @@ eptype("intptr_t", rffi.INTPTR_T, ctypeprim.W_CTypePrimitiveSigned) eptype("uintptr_t", rffi.UINTPTR_T, ctypeprim.W_CTypePrimitiveUnsigned) -eptype("ptrdiff_t", rffi.INTPTR_T, ctypeprim.W_CTypePrimitiveSigned) # <-xxx eptype("size_t", rffi.SIZE_T, ctypeprim.W_CTypePrimitiveUnsigned) eptype("ssize_t", rffi.SSIZE_T, ctypeprim.W_CTypePrimitiveSigned) +_WCTSigned = ctypeprim.W_CTypePrimitiveSigned +_WCTUnsign = ctypeprim.W_CTypePrimitiveUnsigned + +eptype("ptrdiff_t", getattr(rffi, 'PTRDIFF_T', rffi.INTPTR_T), _WCTSigned) +eptype("intmax_t", getattr(rffi, 'INTMAX_T', rffi.LONGLONG), _WCTSigned) +eptype("uintmax_t", getattr(rffi, 'UINTMAX_T', rffi.LONGLONG), _WCTUnsign) + +if hasattr(rffi, 'INT_LEAST8_T'): + eptype("int_least8_t", rffi.INT_LEAST8_T, _WCTSigned) + eptype("int_least16_t", rffi.INT_LEAST16_T, _WCTSigned) + eptype("int_least32_t", rffi.INT_LEAST32_T, _WCTSigned) + eptype("int_least64_t", rffi.INT_LEAST64_T, _WCTSigned) + eptype("uint_least8_t", rffi.UINT_LEAST8_T, _WCTUnsign) + eptype("uint_least16_t",rffi.UINT_LEAST16_T, _WCTUnsign) + eptype("uint_least32_t",rffi.UINT_LEAST32_T, _WCTUnsign) + eptype("uint_least64_t",rffi.UINT_LEAST64_T, _WCTUnsign) +else: + eptypesize("int_least8_t", 1, _WCTSigned) + eptypesize("uint_least8_t", 1, _WCTUnsign) + eptypesize("int_least16_t", 2, _WCTSigned) + eptypesize("uint_least16_t", 2, _WCTUnsign) + eptypesize("int_least32_t", 4, _WCTSigned) + eptypesize("uint_least32_t", 4, _WCTUnsign) + eptypesize("int_least64_t", 8, _WCTSigned) + eptypesize("uint_least64_t", 8, _WCTUnsign) + +if hasattr(rffi, 'INT_FAST8_T'): + eptype("int_fast8_t", rffi.INT_FAST8_T, _WCTSigned) + eptype("int_fast16_t", rffi.INT_FAST16_T, _WCTSigned) + eptype("int_fast32_t", rffi.INT_FAST32_T, _WCTSigned) + eptype("int_fast64_t", rffi.INT_FAST64_T, _WCTSigned) + eptype("uint_fast8_t", rffi.UINT_FAST8_T, _WCTUnsign) + eptype("uint_fast16_t",rffi.UINT_FAST16_T, _WCTUnsign) + eptype("uint_fast32_t",rffi.UINT_FAST32_T, _WCTUnsign) + eptype("uint_fast64_t",rffi.UINT_FAST64_T, _WCTUnsign) +else: + eptypesize("int_fast8_t", 1, _WCTSigned) + eptypesize("uint_fast8_t", 1, _WCTUnsign) + eptypesize("int_fast16_t", 2, _WCTSigned) + eptypesize("uint_fast16_t", 2, _WCTUnsign) + eptypesize("int_fast32_t", 4, _WCTSigned) + eptypesize("uint_fast32_t", 4, _WCTUnsign) + eptypesize("int_fast64_t", 8, _WCTSigned) + eptypesize("uint_fast64_t", 8, _WCTUnsign) + @unwrap_spec(name=str) def new_primitive_type(space, name): try: diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py --- a/pypy/module/_cffi_backend/test/_backend_test_c.py +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py @@ -397,7 +397,7 @@ def test_invalid_indexing(): p = new_primitive_type("int") x = cast(p, 42) - py.test.raises(TypeError, "p[0]") + py.test.raises(TypeError, "x[0]") def test_default_str(): BChar = new_primitive_type("char") @@ -1030,11 +1030,12 @@ BInt = new_primitive_type("int") BArray0 = new_array_type(new_pointer_type(BInt), 0) BStruct = new_struct_type("struct foo") + BStructP = new_pointer_type(BStruct) complete_struct_or_union(BStruct, [('a', BArray0)]) - py.test.raises(NotImplementedError, new_function_type, - (BStruct,), BInt, False) - py.test.raises(NotImplementedError, new_function_type, - (BInt,), BStruct, False) + BFunc = new_function_type((BStruct,), BInt, False) + py.test.raises(NotImplementedError, cast(BFunc, 123), cast(BStructP, 123)) + BFunc2 = new_function_type((BInt,), BStruct, False) + py.test.raises(NotImplementedError, cast(BFunc2, 123), 123) def test_call_function_9(): BInt = new_primitive_type("int") @@ -1805,7 +1806,8 @@ new_function_type((), new_pointer_type(BFunc)) BUnion = new_union_type("union foo_u") complete_struct_or_union(BUnion, []) - py.test.raises(NotImplementedError, new_function_type, (), BUnion) + BFunc = new_function_type((), BUnion) + py.test.raises(NotImplementedError, cast(BFunc, 123)) py.test.raises(TypeError, new_function_type, (), BArray) def test_struct_return_in_func(): @@ -2525,13 +2527,32 @@ ('a2', BChar, -1), ('a3', BChar, -1)]) py.test.raises(TypeError, typeoffsetof, BStructPtr, None) - assert typeoffsetof(BStruct, None) == (BStruct, 0) + py.test.raises(TypeError, typeoffsetof, BStruct, None) assert typeoffsetof(BStructPtr, 'a1') == (BChar, 0) assert typeoffsetof(BStruct, 'a1') == (BChar, 0) assert typeoffsetof(BStructPtr, 'a2') == (BChar, 1) assert typeoffsetof(BStruct, 'a3') == (BChar, 2) + assert typeoffsetof(BStructPtr, 'a2', 0) == (BChar, 1) + assert typeoffsetof(BStruct, u+'a3') == (BChar, 2) + py.test.raises(TypeError, typeoffsetof, BStructPtr, 'a2', 1) py.test.raises(KeyError, typeoffsetof, BStructPtr, 'a4') py.test.raises(KeyError, typeoffsetof, BStruct, 'a5') + py.test.raises(TypeError, typeoffsetof, BStruct, 42) + py.test.raises(TypeError, typeoffsetof, BChar, 'a1') + +def test_typeoffsetof_array(): + BInt = new_primitive_type("int") + BIntP = new_pointer_type(BInt) + BArray = new_array_type(BIntP, None) + py.test.raises(TypeError, typeoffsetof, BArray, None) + py.test.raises(TypeError, typeoffsetof, BArray, 'a1') + assert typeoffsetof(BArray, 51) == (BInt, 51 * size_of_int()) + assert typeoffsetof(BIntP, 51) == (BInt, 51 * size_of_int()) + assert typeoffsetof(BArray, -51) == (BInt, -51 * size_of_int()) + MAX = sys.maxsize // size_of_int() + assert typeoffsetof(BArray, MAX) == (BInt, MAX * size_of_int()) + assert typeoffsetof(BIntP, MAX) == (BInt, MAX * size_of_int()) + py.test.raises(OverflowError, typeoffsetof, BArray, MAX + 1) def test_typeoffsetof_no_bitfield(): BInt = new_primitive_type("int") @@ -2551,17 +2572,26 @@ assert repr(p) == "" s = p[0] assert repr(s) == "" - a = rawaddressof(BStructPtr, s) + a = rawaddressof(BStructPtr, s, 0) assert repr(a).startswith("" + p = new_pointer_type(new_primitive_type("unsigned short")) + cast(p, c)[1] += 500 + assert list(a) == [10000, 20500, 30000] + def test_version(): # this test is here mostly for PyPy assert __version__ == "0.8.6" diff --git a/pypy/module/_cffi_backend/test/test_c.py b/pypy/module/_cffi_backend/test/test_c.py --- a/pypy/module/_cffi_backend/test/test_c.py +++ b/pypy/module/_cffi_backend/test/test_c.py @@ -30,7 +30,7 @@ class AppTestC(object): """Populated below, hack hack hack.""" - spaceconfig = dict(usemodules=('_cffi_backend', 'cStringIO')) + spaceconfig = dict(usemodules=('_cffi_backend', 'cStringIO', 'array')) def setup_class(cls): testfuncs_w = [] diff --git a/pypy/module/_io/interp_bufferedio.py b/pypy/module/_io/interp_bufferedio.py --- a/pypy/module/_io/interp_bufferedio.py +++ b/pypy/module/_io/interp_bufferedio.py @@ -565,7 +565,7 @@ # Flush the write buffer if necessary if self.writable: - self._writer_flush_unlocked(space) + self._flush_and_rewind_unlocked(space) self._reader_reset_buf() # Read whole blocks, and don't buffer them diff --git a/pypy/module/_io/test/test_io.py b/pypy/module/_io/test/test_io.py --- a/pypy/module/_io/test/test_io.py +++ b/pypy/module/_io/test/test_io.py @@ -362,3 +362,32 @@ f.read(1) f.seek(-1, 1) f.write(b'') + + def test_issue1902_2(self): + import _io + with _io.open(self.tmpfile, 'w+b', 4096) as f: + f.write(b'\xff' * 13569) + f.flush() + f.seek(0, 0) + + f.read(1) + f.seek(-1, 1) + f.write(b'\xff') + f.seek(1, 0) + f.read(4123) + f.seek(-4123, 1) + + def test_issue1902_3(self): + import _io + buffer_size = 4096 + with _io.open(self.tmpfile, 'w+b', buffer_size) as f: + f.write(b'\xff' * buffer_size * 3) + f.flush() + f.seek(0, 0) + + f.read(1) + f.seek(-1, 1) + f.write(b'\xff') + f.seek(1, 0) + f.read(buffer_size * 2) + assert f.tell() == 1 + buffer_size * 2 diff --git a/pypy/module/_rawffi/buffer.py b/pypy/module/_rawffi/buffer.py --- a/pypy/module/_rawffi/buffer.py +++ b/pypy/module/_rawffi/buffer.py @@ -1,4 +1,5 @@ from rpython.rlib.buffer import Buffer +from rpython.rtyper.lltypesystem import rffi # XXX not the most efficient implementation @@ -20,3 +21,7 @@ def setitem(self, index, char): ll_buffer = self.datainstance.ll_buffer ll_buffer[index] = char + + def get_raw_address(self): + ll_buffer = self.datainstance.ll_buffer + return rffi.cast(rffi.CCHARP, ll_buffer) diff --git a/pypy/module/_rawffi/test/test__rawffi.py b/pypy/module/_rawffi/test/test__rawffi.py --- a/pypy/module/_rawffi/test/test__rawffi.py +++ b/pypy/module/_rawffi/test/test__rawffi.py @@ -1144,6 +1144,15 @@ b[3] = b'x' assert b[3] == b'x' + def test_pypy_raw_address(self): + import _rawffi + S = _rawffi.Structure((40, 1)) + s = S(autofree=True) + addr = buffer(s)._pypy_raw_address() + assert type(addr) is int + assert buffer(s)._pypy_raw_address() == addr + assert buffer(s, 10)._pypy_raw_address() == addr + 10 + def test_union(self): import _rawffi longsize = _rawffi.sizeof('l') diff --git a/pypy/module/cpyext/slotdefs.py b/pypy/module/cpyext/slotdefs.py --- a/pypy/module/cpyext/slotdefs.py +++ b/pypy/module/cpyext/slotdefs.py @@ -244,6 +244,9 @@ def getitem(self, index): return self.ptr[index] + def get_raw_address(self): + return rffi.cast(rffi.CCHARP, self.ptr) + def wrap_getreadbuffer(space, w_self, w_args, func): func_target = rffi.cast(readbufferproc, func) with lltype.scoped_alloc(rffi.VOIDPP.TO, 1) as ptr: diff --git a/pypy/module/gc/__init__.py b/pypy/module/gc/__init__.py --- a/pypy/module/gc/__init__.py +++ b/pypy/module/gc/__init__.py @@ -30,6 +30,7 @@ 'get_referrers': 'referents.get_referrers', '_dump_rpy_heap': 'referents._dump_rpy_heap', 'get_typeids_z': 'referents.get_typeids_z', + 'get_typeids_list': 'referents.get_typeids_list', 'GcRef': 'referents.W_GcRef', }) MixedModule.__init__(self, space, w_name) diff --git a/pypy/module/gc/app_referents.py b/pypy/module/gc/app_referents.py --- a/pypy/module/gc/app_referents.py +++ b/pypy/module/gc/app_referents.py @@ -16,7 +16,8 @@ [0][0][0][-1] inserted after all GC roots, before all non-roots. If the argument is a filename and the 'zlib' module is available, - we also write a 'typeids.txt' in the same directory, if none exists. + we also write 'typeids.txt' and 'typeids.lst' in the same directory, + if they don't already exist. """ if isinstance(file, str): f = open(file, 'wb') @@ -30,7 +31,13 @@ filename2 = os.path.join(os.path.dirname(file), 'typeids.txt') if not os.path.exists(filename2): data = zlib.decompress(gc.get_typeids_z()) - f = open(filename2, 'wb') + f = open(filename2, 'w') + f.write(data) + f.close() + filename2 = os.path.join(os.path.dirname(file), 'typeids.lst') + if not os.path.exists(filename2): + data = ''.join(['%d\n' % n for n in gc.get_typeids_list()]) + f = open(filename2, 'w') f.write(data) f.close() else: diff --git a/pypy/module/gc/referents.py b/pypy/module/gc/referents.py --- a/pypy/module/gc/referents.py +++ b/pypy/module/gc/referents.py @@ -228,3 +228,8 @@ a = rgc.get_typeids_z() s = ''.join([a[i] for i in range(len(a))]) return space.wrap(s) + +def get_typeids_list(space): + l = rgc.get_typeids_list() + list_w = [space.wrap(l[i]) for i in range(len(l))] + return space.newlist(list_w) diff --git a/pypy/module/micronumpy/descriptor.py b/pypy/module/micronumpy/descriptor.py --- a/pypy/module/micronumpy/descriptor.py +++ b/pypy/module/micronumpy/descriptor.py @@ -392,7 +392,7 @@ alignment = space.int_w(space.getitem(w_data, space.wrap(6))) if (w_names == space.w_None) != (w_fields == space.w_None): - raise oefmt(space.w_ValueError, "inconsistent fields and names") + raise oefmt(space.w_ValueError, "inconsistent fields and names in Numpy dtype unpickling") self.byteorder = endian self.shape = [] diff --git a/pypy/module/micronumpy/test/test_complex.py b/pypy/module/micronumpy/test/test_complex.py --- a/pypy/module/micronumpy/test/test_complex.py +++ b/pypy/module/micronumpy/test/test_complex.py @@ -478,6 +478,15 @@ for i in range(4): assert c[i] == max(a[i], b[i]) + + def test_abs_overflow(self): + from numpy import array, absolute, isinf + a = array(complex(1.5e308,1.5e308)) + # Prints a RuntimeWarning, but does not raise + b = absolute(a) + assert isinf(b) + + def test_basic(self): import sys from numpy import (dtype, add, array, dtype, diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -665,6 +665,7 @@ assert numpy.int64(9223372036854775807) == 9223372036854775807 assert numpy.int64(9223372036854775807) == 9223372036854775807 + assert numpy.int64(-9223372036854775807) == -9223372036854775807 raises(OverflowError, numpy.int64, 9223372036854775808) raises(OverflowError, numpy.int64, 9223372036854775808L) @@ -1233,7 +1234,8 @@ d = np.dtype((' Author: Amaury Forgeot d'Arc Branch: kill_ll_time Changeset: r75311:97fbbce870d7 Date: 2015-01-12 22:11 +0100 http://bitbucket.org/pypy/pypy/changeset/97fbbce870d7/ Log: time.time is called as "self.timer()", wrap it in a static method. diff --git a/rpython/jit/metainterp/jitprof.py b/rpython/jit/metainterp/jitprof.py --- a/rpython/jit/metainterp/jitprof.py +++ b/rpython/jit/metainterp/jitprof.py @@ -48,7 +48,7 @@ class Profiler(BaseProfiler): initialized = False - timer = time.time + timer = staticmethod(time.time) starttime = 0 t1 = 0 times = None From noreply at buildbot.pypy.org Mon Jan 12 22:22:54 2015 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 12 Jan 2015 22:22:54 +0100 (CET) Subject: [pypy-commit] cffi default: Say "From cffi callback" instead of just "From callback" Message-ID: <20150112212254.888731C01E8@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1635:2d04ac7331c7 Date: 2015-01-12 22:23 +0100 http://bitbucket.org/cffi/cffi/changeset/2d04ac7331c7/ Log: Say "From cffi callback" instead of just "From callback" diff --git a/c/_cffi_backend.c b/c/_cffi_backend.c --- a/c/_cffi_backend.c +++ b/c/_cffi_backend.c @@ -4515,7 +4515,7 @@ #endif f = PySys_GetObject("stderr"); if (f != NULL) { - PyFile_WriteString("From callback ", f); + PyFile_WriteString("From cffi callback ", f); PyFile_WriteObject(obj, f, 0); PyFile_WriteString(":\n", f); if (extra_error_line != NULL) diff --git a/c/test_c.py b/c/test_c.py --- a/c/test_c.py +++ b/c/test_c.py @@ -1186,7 +1186,7 @@ assert sys.stderr.getvalue() == '' assert f(10000) == -42 assert matches(sys.stderr.getvalue(), """\ -From callback : +From cffi callback : Traceback (most recent call last): File "$", line $, in Zcb1 $ @@ -1198,7 +1198,7 @@ bigvalue = 20000 assert f(bigvalue) == -42 assert matches(sys.stderr.getvalue(), """\ -From callback : +From cffi callback : Trying to convert the result back to C: OverflowError: integer 60000 does not fit 'short' """) From noreply at buildbot.pypy.org Mon Jan 12 22:24:08 2015 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 12 Jan 2015 22:24:08 +0100 (CET) Subject: [pypy-commit] pypy default: update to cffi/2d04ac7331c7 Message-ID: <20150112212408.39A761C01E8@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r75312:e7d1efa3564f Date: 2015-01-12 22:23 +0100 http://bitbucket.org/pypy/pypy/changeset/e7d1efa3564f/ Log: update to cffi/2d04ac7331c7 diff --git a/pypy/module/_cffi_backend/ccallback.py b/pypy/module/_cffi_backend/ccallback.py --- a/pypy/module/_cffi_backend/ccallback.py +++ b/pypy/module/_cffi_backend/ccallback.py @@ -99,7 +99,7 @@ def print_error(self, operr, extra_line): space = self.space - operr.write_unraisable(space, "callback ", self.w_callable, + operr.write_unraisable(space, "cffi callback ", self.w_callable, with_traceback=True, extra_line=extra_line) def write_error_return_value(self, ll_res): diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py --- a/pypy/module/_cffi_backend/test/_backend_test_c.py +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py @@ -1175,7 +1175,7 @@ assert sys.stderr.getvalue() == '' assert f(10000) == -42 assert matches(sys.stderr.getvalue(), """\ -From callback : +From cffi callback : Traceback (most recent call last): File "$", line $, in Zcb1 $ @@ -1187,7 +1187,7 @@ bigvalue = 20000 assert f(bigvalue) == -42 assert matches(sys.stderr.getvalue(), """\ -From callback : +From cffi callback : Trying to convert the result back to C: OverflowError: integer 60000 does not fit 'short' """) From noreply at buildbot.pypy.org Mon Jan 12 23:53:33 2015 From: noreply at buildbot.pypy.org (mattip) Date: Mon, 12 Jan 2015 23:53:33 +0100 (CET) Subject: [pypy-commit] cffi win32-ownlib: fix tests for linux Message-ID: <20150112225333.E8DE51C0976@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: win32-ownlib Changeset: r1636:247aba97de43 Date: 2015-01-12 22:16 +0200 http://bitbucket.org/cffi/cffi/changeset/247aba97de43/ Log: fix tests for linux diff --git a/testing/test_ownlib.py b/testing/test_ownlib.py --- a/testing/test_ownlib.py +++ b/testing/test_ownlib.py @@ -10,7 +10,7 @@ #ifdef _WIN32 #define EXPORT __declspec(dllexport) #else -#define EXPORT export +#define EXPORT #endif EXPORT int test_getting_errno(void) { @@ -46,7 +46,7 @@ if (pt.y > prc->bottom) return 0; return 1; -} +}; EXPORT long left = 10; EXPORT long top = 20; From noreply at buildbot.pypy.org Mon Jan 12 23:53:35 2015 From: noreply at buildbot.pypy.org (mattip) Date: Mon, 12 Jan 2015 23:53:35 +0100 (CET) Subject: [pypy-commit] cffi win32-ownlib: fix test, still fails on arm64 (libffi issue?) Message-ID: <20150112225335.0F6081C0976@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: win32-ownlib Changeset: r1637:cafc2dd59316 Date: 2015-01-13 00:53 +0200 http://bitbucket.org/cffi/cffi/changeset/cafc2dd59316/ Log: fix test, still fails on arm64 (libffi issue?) diff --git a/testing/test_ownlib.py b/testing/test_ownlib.py --- a/testing/test_ownlib.py +++ b/testing/test_ownlib.py @@ -268,7 +268,7 @@ for i in range(4): ret = ownlib.ReturnRect(i, rect[0], rect, pt[0], rect[0], - rect[0], pt[0], rect[0]) + rect, pt[0], rect[0]) assert ret.left == ownlib.left assert ret.right == ownlib.right assert ret.top == ownlib.top From noreply at buildbot.pypy.org Tue Jan 13 10:16:53 2015 From: noreply at buildbot.pypy.org (fijal) Date: Tue, 13 Jan 2015 10:16:53 +0100 (CET) Subject: [pypy-commit] pypy rewrite-unrolling: fix Message-ID: <20150113091653.31D0D1D2380@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: rewrite-unrolling Changeset: r75313:ddb633f9d3de Date: 2015-01-13 11:07 +0200 http://bitbucket.org/pypy/pypy/changeset/ddb633f9d3de/ Log: fix diff --git a/rpython/jit/metainterp/optimizeopt/test/test_util.py b/rpython/jit/metainterp/optimizeopt/test/test_util.py --- a/rpython/jit/metainterp/optimizeopt/test/test_util.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_util.py @@ -428,8 +428,9 @@ start_state = self._do_optimize_loop(preamble, call_pure_results) assert preamble.operations[-1].getopnum() == rop.LABEL + new_inp_args = preamble.operations[-1].getarglist() - inliner = Inliner(inputargs, jump_args) + inliner = Inliner(inputargs, new_inp_args) loop.operations = [preamble.operations[-1]] + \ [inliner.inline_op(op, clone=False) for op in cloned_operations] + \ [ResOperation(rop.JUMP, [inliner.inline_arg(a) for a in jump_args], diff --git a/rpython/jit/metainterp/optimizeopt/unroll.py b/rpython/jit/metainterp/optimizeopt/unroll.py --- a/rpython/jit/metainterp/optimizeopt/unroll.py +++ b/rpython/jit/metainterp/optimizeopt/unroll.py @@ -50,6 +50,7 @@ pure_value = OptPureValue(self, op.result) new_optpure.pure(op.getopnum(), op.getarglist(), op.result, pure_value) + # for opargs, value in old_optpure.pure_operations.items(): # if not value.is_virtual(): # pure_value = OptPureValue(self, value.box) From noreply at buildbot.pypy.org Tue Jan 13 10:16:54 2015 From: noreply at buildbot.pypy.org (fijal) Date: Tue, 13 Jan 2015 10:16:54 +0100 (CET) Subject: [pypy-commit] pypy rewrite-unrolling: progress Message-ID: <20150113091654.858EA1D2380@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: rewrite-unrolling Changeset: r75314:278be974c29d Date: 2015-01-13 11:16 +0200 http://bitbucket.org/pypy/pypy/changeset/278be974c29d/ Log: progress diff --git a/rpython/jit/metainterp/optimizeopt/earlyforce.py b/rpython/jit/metainterp/optimizeopt/earlyforce.py --- a/rpython/jit/metainterp/optimizeopt/earlyforce.py +++ b/rpython/jit/metainterp/optimizeopt/earlyforce.py @@ -25,7 +25,8 @@ for arg in op.getarglist(): if arg in self.optimizer.values: value = self.getvalue(arg) - value.force_box(self) + if value.is_virtual(): + value.force_box(self) self.emit_operation(op) def setup(self): diff --git a/rpython/jit/metainterp/optimizeopt/optimizer.py b/rpython/jit/metainterp/optimizeopt/optimizer.py --- a/rpython/jit/metainterp/optimizeopt/optimizer.py +++ b/rpython/jit/metainterp/optimizeopt/optimizer.py @@ -861,6 +861,27 @@ label1_args.append(box) label2_args.append(box) + def pure_reverse(self, op): + if self.optpure is None: + return + optpure = self.optpure + if op.getopnum() == rop.INT_ADD: + optpure.pure(rop.INT_ADD, [op.getarg(1), op.getarg(0)], op.result) + # Synthesize the reverse op for optimize_default to reuse + optpure.pure(rop.INT_SUB, [op.result, op.getarg(1)], op.getarg(0)) + optpure.pure(rop.INT_SUB, [op.result, op.getarg(0)], op.getarg(1)) + elif op.getopnum() == rop.INT_SUB: + optpure.pure(rop.INT_ADD, [op.result, op.getarg(1)], op.getarg(0)) + optpure.pure(rop.INT_SUB, [op.getarg(0), op.result], op.getarg(1)) + elif op.getopnum() == rop.FLOAT_MUL: + optpure.pure(rop.FLOAT_MUL, [op.getarg(1), op.getarg(0)], op.result) + elif op.getopnum() == rop.FLOAT_NEG: + optpure.pure(rop.FLOAT_NEG, [op.result], op.getarg(0)) + elif op.getopnum() == rop.CAST_INT_TO_PTR: + optpure.pure(rop.CAST_PTR_TO_INT, [op.result], op.getarg(0)) + elif op.getopnum() == rop.CAST_PTR_TO_INT: + optpure.pure(rop.CAST_INT_TO_PTR, [op.result], op.getarg(0)) + #def optimize_GUARD_NO_OVERFLOW(self, op): # # otherwise the default optimizer will clear fields, which is unwanted # # in this case diff --git a/rpython/jit/metainterp/optimizeopt/rewrite.py b/rpython/jit/metainterp/optimizeopt/rewrite.py --- a/rpython/jit/metainterp/optimizeopt/rewrite.py +++ b/rpython/jit/metainterp/optimizeopt/rewrite.py @@ -117,9 +117,7 @@ self.make_constant_int(op.result, 0) else: self.emit_operation(op) - # Synthesize the reverse ops for optimize_default to reuse - self.pure(rop.INT_ADD, [op.result, op.getarg(1)], op.getarg(0)) - self.pure(rop.INT_SUB, [op.getarg(0), op.result], op.getarg(1)) + self.optimizer.pure_reverse(op) def optimize_INT_ADD(self, op): v1 = self.getvalue(op.getarg(0)) @@ -132,10 +130,7 @@ self.make_equal_to(op.result, v1) else: self.emit_operation(op) - self.pure(rop.INT_ADD, [op.getarg(1), op.getarg(0)], op.result) - # Synthesize the reverse op for optimize_default to reuse - self.pure(rop.INT_SUB, [op.result, op.getarg(1)], op.getarg(0)) - self.pure(rop.INT_SUB, [op.result, op.getarg(0)], op.getarg(1)) + self.optimizer.pure_reverse(op) def optimize_INT_MUL(self, op): v1 = self.getvalue(op.getarg(0)) @@ -222,7 +217,7 @@ )) return self.emit_operation(op) - self.pure(rop.FLOAT_MUL, [arg2, arg1], op.result) + self.optimizer.pure_reverse(op) def optimize_FLOAT_TRUEDIV(self, op): arg1 = op.getarg(0) @@ -244,9 +239,8 @@ self.emit_operation(op) def optimize_FLOAT_NEG(self, op): - v1 = op.getarg(0) self.emit_operation(op) - self.pure(rop.FLOAT_NEG, [op.result], v1) + self.optimizer.pure_reverse(op) def optimize_guard(self, op, constbox, emit_operation=True): value = self.getvalue(op.getarg(0)) @@ -583,11 +577,11 @@ self.emit_operation(op) def optimize_CAST_PTR_TO_INT(self, op): - self.pure(rop.CAST_INT_TO_PTR, [op.result], op.getarg(0)) + self.optimizer.pure_reverse(op) self.emit_operation(op) def optimize_CAST_INT_TO_PTR(self, op): - self.pure(rop.CAST_PTR_TO_INT, [op.result], op.getarg(0)) + self.optimizer.pure_reverse(op) self.emit_operation(op) def optimize_SAME_AS(self, op): diff --git a/rpython/jit/metainterp/optimizeopt/unroll.py b/rpython/jit/metainterp/optimizeopt/unroll.py --- a/rpython/jit/metainterp/optimizeopt/unroll.py +++ b/rpython/jit/metainterp/optimizeopt/unroll.py @@ -20,12 +20,18 @@ self.unroller = unroller self.keybox = keybox + def is_virtual(self): + return False + def force_box(self, optforce): if self.box is None: self.box = self.keybox optforce.optimizer.reuse_pure_result(self.box) return self.box + def get_key_box(self): + return self.keybox + class Unroller(object): optimizer = None @@ -50,7 +56,7 @@ pure_value = OptPureValue(self, op.result) new_optpure.pure(op.getopnum(), op.getarglist(), op.result, pure_value) - + self.optimizer.pure_reverse(op) # for opargs, value in old_optpure.pure_operations.items(): # if not value.is_virtual(): # pure_value = OptPureValue(self, value.box) From noreply at buildbot.pypy.org Tue Jan 13 11:33:52 2015 From: noreply at buildbot.pypy.org (Raemi) Date: Tue, 13 Jan 2015 11:33:52 +0100 (CET) Subject: [pypy-commit] stmgc c8-private-pages: add some debug prints Message-ID: <20150113103352.CDB961D2380@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c8-private-pages Changeset: r1519:9ee0afe59b44 Date: 2015-01-13 09:52 +0100 http://bitbucket.org/pypy/stmgc/changeset/9ee0afe59b44/ Log: add some debug prints diff --git a/c8/stm/core.c b/c8/stm/core.c --- a/c8/stm/core.c +++ b/c8/stm/core.c @@ -197,7 +197,7 @@ int segnum = get_segment_of_linear_address(addr); if (segnum != STM_SEGMENT->segment_num) { fprintf(stderr, "Segmentation fault: accessing %p (seg %d) from" - " seg %d\n", addr, STM_SEGMENT->segment_num, segnum); + " seg %d\n", addr, segnum, STM_SEGMENT->segment_num); abort(); } dprintf(("-> segment: %d\n", segnum)); diff --git a/c8/stm/gcpage.c b/c8/stm/gcpage.c --- a/c8/stm/gcpage.c +++ b/c8/stm/gcpage.c @@ -22,6 +22,7 @@ } uintptr_t p = (pages_addr - stm_object_pages) / 4096UL; + dprintf(("setup_N_pages(%p, %lu): pagenum %lu\n", pages_addr, num, p)); while (num-->0) { page_mark_accessible(STM_SEGMENT->segment_num, p + num); } diff --git a/c8/stm/smallmalloc.c b/c8/stm/smallmalloc.c --- a/c8/stm/smallmalloc.c +++ b/c8/stm/smallmalloc.c @@ -87,6 +87,7 @@ static char *_allocate_small_slowpath(uint64_t size) { + dprintf(("_allocate_small_slowpath(%lu)\n", size)); long n = size / 8; struct small_free_loc_s *smallpage; struct small_free_loc_s *TLPREFIX *fl = From noreply at buildbot.pypy.org Tue Jan 13 11:33:53 2015 From: noreply at buildbot.pypy.org (Raemi) Date: Tue, 13 Jan 2015 11:33:53 +0100 (CET) Subject: [pypy-commit] stmgc c8-private-pages: switch back to using an extra segment (seg0) for malloc-things and later for optimizing memory usage Message-ID: <20150113103353.E47371D2380@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c8-private-pages Changeset: r1520:f78d4b6f968e Date: 2015-01-13 10:43 +0100 http://bitbucket.org/pypy/stmgc/changeset/f78d4b6f968e/ Log: switch back to using an extra segment (seg0) for malloc-things and later for optimizing memory usage diff --git a/c8/stm/core.c b/c8/stm/core.c --- a/c8/stm/core.c +++ b/c8/stm/core.c @@ -132,7 +132,7 @@ /* find who has the most recent revision of our page */ int copy_from_segnum = -1; uint64_t most_recent_rev = 0; - for (i = 0; i < NB_SEGMENTS; i++) { + for (i = 1; i < NB_SEGMENTS; i++) { if (i == my_segnum) continue; @@ -350,7 +350,7 @@ /* XXX: this optimization fails in test_basic.py, bug3 */ /* OPT_ASSERT(segment_really_copied_from < (1 << NB_SEGMENTS)); */ /* int segnum; */ - /* for (segnum = 0; segnum < NB_SEGMENTS; segnum++) { */ + /* for (segnum = 1; segnum < NB_SEGMENTS; segnum++) { */ /* if (segment_really_copied_from & (1UL << segnum)) { */ /* /\* here we can actually have our own modified version, so */ /* make sure to only copy things that are not modified in our */ @@ -1030,6 +1030,7 @@ ssize_t frag_size = STM_PSEGMENT->sq_fragsizes[j]; char *src = REAL_ADDRESS(STM_SEGMENT->segment_base, frag); + /* XXX: including the sharing segment? */ for (i = 0; i < NB_SEGMENTS; i++) { if (i == myself) continue; diff --git a/c8/stm/core.h b/c8/stm/core.h --- a/c8/stm/core.h +++ b/c8/stm/core.h @@ -17,7 +17,7 @@ #define NB_PAGES (2500*256) // 2500MB -#define NB_SEGMENTS STM_NB_SEGMENTS +#define NB_SEGMENTS (STM_NB_SEGMENTS+1) /* +1 for sharing seg 0 */ #define NB_SEGMENTS_MAX 240 /* don't increase NB_SEGMENTS past this */ #define NB_NURSERY_PAGES (STM_GC_NURSERY/4) @@ -215,7 +215,7 @@ { #ifndef NDEBUG long l; - for (l = 0; l < NB_SEGMENTS; l++) { + for (l = 1; l < NB_SEGMENTS; l++) { if (!get_priv_segment(l)->privatization_lock) return false; } @@ -228,7 +228,7 @@ static inline void acquire_all_privatization_locks() { long l; - for (l = 0; l < NB_SEGMENTS; l++) { + for (l = 1; l < NB_SEGMENTS; l++) { acquire_privatization_lock(l); } } @@ -236,7 +236,7 @@ static inline void release_all_privatization_locks() { long l; - for (l = NB_SEGMENTS-1; l >= 0; l--) { + for (l = NB_SEGMENTS-1; l >= 1; l--) { release_privatization_lock(l); } } @@ -268,7 +268,7 @@ /* acquire locks in global order */ int i; - for (i = 0; i < NB_SEGMENTS; i++) { + for (i = 1; i < NB_SEGMENTS; i++) { if ((seg_set & (1 << i)) == 0) continue; @@ -282,7 +282,7 @@ OPT_ASSERT(seg_set < (1 << NB_SEGMENTS)); int i; - for (i = 0; i < NB_SEGMENTS; i++) { + for (i = 1; i < NB_SEGMENTS; i++) { if ((seg_set & (1 << i)) == 0) continue; diff --git a/c8/stm/fprintcolor.c b/c8/stm/fprintcolor.c --- a/c8/stm/fprintcolor.c +++ b/c8/stm/fprintcolor.c @@ -8,7 +8,8 @@ char buffer[2048]; va_list ap; int result; - int size = (int)sprintf(buffer, "\033[%dm[%d,%lx] ", dprintfcolor(), + int size = (int)sprintf(buffer, "\033[%dm[%d,%d,%lx] ", + dprintfcolor(), STM_SEGMENT->segment_num, (int)getpid(), (long)pthread_self()); assert(size >= 0); diff --git a/c8/stm/gcpage.c b/c8/stm/gcpage.c --- a/c8/stm/gcpage.c +++ b/c8/stm/gcpage.c @@ -16,10 +16,7 @@ static void setup_N_pages(char *pages_addr, uint64_t num) { /* initialize to |N|P|N|N| */ - long i; - for (i = 0; i < NB_SEGMENTS; i++) { - acquire_privatization_lock(i); - } + acquire_all_privatization_locks(); uintptr_t p = (pages_addr - stm_object_pages) / 4096UL; dprintf(("setup_N_pages(%p, %lu): pagenum %lu\n", pages_addr, num, p)); @@ -27,9 +24,7 @@ page_mark_accessible(STM_SEGMENT->segment_num, p + num); } - for (i = NB_SEGMENTS-1; i >= 0; i--) { - release_privatization_lock(i); - } + release_all_privatization_locks(); } @@ -68,7 +63,8 @@ { /* only for tests xxx but stm_setup_prebuilt() uses this now too */ stm_char *p = allocate_outside_nursery_large(size_rounded_up); - memset(stm_object_pages + (uintptr_t)p, 0, size_rounded_up); + /* hardcode segment 1 */ + memset(get_virtual_address(STM_SEGMENT->segment_num, (object_t *)p), 0, size_rounded_up); object_t *o = (object_t *)p; o->stm_flags = GCFLAG_WRITE_BARRIER; diff --git a/c8/stm/misc.c b/c8/stm/misc.c --- a/c8/stm/misc.c +++ b/c8/stm/misc.c @@ -15,7 +15,7 @@ char *_stm_get_segment_base(long index) { - return get_segment_base(index); + return get_segment_base(index+1); } struct stm_priv_segment_info_s *_stm_segment(void) diff --git a/c8/stm/nursery.c b/c8/stm/nursery.c --- a/c8/stm/nursery.c +++ b/c8/stm/nursery.c @@ -18,8 +18,10 @@ assert(_STM_FAST_ALLOC <= NURSERY_SIZE); _stm_nursery_start = NURSERY_START; - long i; - for (i = 0; i < NB_SEGMENTS; i++) { + long i = 0; + get_segment(i)->nursery_current = (stm_char *)-1; + get_segment(i)->nursery_end = -1; + for (i = 1; i < NB_SEGMENTS; i++) { get_segment(i)->nursery_current = (stm_char *)NURSERY_START; get_segment(i)->nursery_end = NURSERY_END; } @@ -91,14 +93,13 @@ realobj = REAL_ADDRESS(STM_SEGMENT->segment_base, obj); size = stmcb_size_rounded_up((struct object_s *)realobj); - if (true /*size > GC_LAST_SMALL_SIZE*/) { + if (true || size > GC_LAST_SMALL_SIZE) { /* case 1: object is not small enough. Ask gcpage.c for an allocation via largemalloc. */ nobj = (object_t *)allocate_outside_nursery_large(size); } else { /* case "small enough" */ - abort(); nobj = (object_t *)allocate_outside_nursery_small(size); } diff --git a/c8/stm/pages.h b/c8/stm/pages.h --- a/c8/stm/pages.h +++ b/c8/stm/pages.h @@ -50,6 +50,11 @@ return get_segment_base(segnum) + pagenum * 4096; } +static inline char *get_virtual_address(long segnum, object_t *obj) +{ + return get_segment_base(segnum) + (uintptr_t)obj; +} + static inline bool get_page_status_in(long segnum, uintptr_t pagenum) { OPT_ASSERT(segnum < 8 * sizeof(struct page_shared_s)); diff --git a/c8/stm/setup.c b/c8/stm/setup.c --- a/c8/stm/setup.c +++ b/c8/stm/setup.c @@ -74,6 +74,7 @@ setup_signal_handler(); long i; + /* including seg0 */ for (i = 0; i < NB_SEGMENTS; i++) { char *segment_base = get_segment_base(i); @@ -195,22 +196,21 @@ if (stm_all_thread_locals == NULL) { stm_all_thread_locals = tl->next = tl->prev = tl; num = 0; - } - else { + } else { tl->next = stm_all_thread_locals; tl->prev = stm_all_thread_locals->prev; stm_all_thread_locals->prev->next = tl; stm_all_thread_locals->prev = tl; - num = (tl->prev->associated_segment_num + 1) % NB_SEGMENTS; + num = (tl->prev->associated_segment_num) % (NB_SEGMENTS-1); } /* assign numbers consecutively, but that's for tests; we could also assign the same number to all of them and they would get their own numbers automatically. */ - tl->associated_segment_num = num; + tl->associated_segment_num = num + 1; *_get_cpth(tl) = pthread_self(); _init_shadow_stack(tl); - set_gs_register(get_segment_base(num)); + set_gs_register(get_segment_base(num + 1)); s_mutex_unlock(); DEBUG_EXPECT_SEGFAULT(true); diff --git a/c8/stm/smallmalloc.c b/c8/stm/smallmalloc.c --- a/c8/stm/smallmalloc.c +++ b/c8/stm/smallmalloc.c @@ -284,7 +284,7 @@ small_page_lists[szword] = NULL; /* process the pages that the various segments are busy filling */ - for (i = 0; i < NB_SEGMENTS; i++) { + for (i = 1; i < NB_SEGMENTS; i++) { struct stm_priv_segment_info_s *pseg = get_priv_segment(i); struct small_free_loc_s **fl = &pseg->small_malloc_data.loc_free[szword]; diff --git a/c8/stm/sync.c b/c8/stm/sync.c --- a/c8/stm/sync.c +++ b/c8/stm/sync.c @@ -13,7 +13,7 @@ pthread_mutex_t global_mutex; pthread_cond_t cond[_C_TOTAL]; /* some additional pieces of global state follow */ - uint8_t in_use1[NB_SEGMENTS]; /* 1 if running a pthread */ + uint8_t in_use1[NB_SEGMENTS]; /* 1 if running a pthread, idx=0 unused */ }; char reserved[192]; } sync_ctl __attribute__((aligned(64))); @@ -110,28 +110,29 @@ assert(_has_mutex()); assert(_is_tl_registered(tl)); - int num = tl->associated_segment_num; - if (sync_ctl.in_use1[num] == 0) { + int num = tl->associated_segment_num - 1; // 0..NB_SEG-1 + OPT_ASSERT(num >= 0); + if (sync_ctl.in_use1[num+1] == 0) { /* fast-path: we can get the same segment number than the one we had before. The value stored in GS is still valid. */ #ifdef STM_TESTS /* that can be optimized away, except during tests, because they use only one thread */ - set_gs_register(get_segment_base(num)); + set_gs_register(get_segment_base(num+1)); #endif - dprintf(("acquired same segment: %d\n", num)); + dprintf(("acquired same segment: %d\n", num+1)); goto got_num; } /* Look for the next free segment. If there is none, wait for the condition variable. */ int retries; - for (retries = 0; retries < NB_SEGMENTS; retries++) { - num = num % NB_SEGMENTS; - if (sync_ctl.in_use1[num] == 0) { + for (retries = 0; retries < NB_SEGMENTS-1; retries++) { + num = (num+1) % (NB_SEGMENTS-1); + if (sync_ctl.in_use1[num+1] == 0) { /* we're getting 'num', a different number. */ - dprintf(("acquired different segment: %d->%d\n", tl->associated_segment_num, num)); - tl->associated_segment_num = num; - set_gs_register(get_segment_base(num)); + dprintf(("acquired different segment: %d->%d\n", tl->associated_segment_num, num+1)); + tl->associated_segment_num = num+1; + set_gs_register(get_segment_base(num+1)); goto got_num; } } @@ -142,8 +143,9 @@ /* Return false to the caller, which will call us again */ return false; got_num: - sync_ctl.in_use1[num] = 1; - assert(STM_SEGMENT->segment_num == num); + OPT_ASSERT(num >= 0 && num < NB_SEGMENTS-1); + sync_ctl.in_use1[num+1] = 1; + assert(STM_SEGMENT->segment_num == num+1); assert(STM_SEGMENT->running_thread == NULL); STM_SEGMENT->running_thread = tl; return true; @@ -171,7 +173,7 @@ bool _stm_in_transaction(stm_thread_local_t *tl) { int num = tl->associated_segment_num; - assert(0 <= num && num < NB_SEGMENTS); + assert(1 <= num && num < NB_SEGMENTS); return get_segment(num)->running_thread == tl; } @@ -184,7 +186,7 @@ void _stm_test_switch_segment(int segnum) { - set_gs_register(get_segment_base(segnum)); + set_gs_register(get_segment_base(segnum+1)); } #if STM_TESTS @@ -219,7 +221,7 @@ assert(_has_mutex()); long i; - for (i = 0; i < NB_SEGMENTS; i++) { + for (i = 1; i < NB_SEGMENTS; i++) { if (get_segment(i)->nursery_end == NURSERY_END) get_segment(i)->nursery_end = NSE_SIGPAUSE; } @@ -235,7 +237,7 @@ long result = 0; int my_num = STM_SEGMENT->segment_num; - for (i = 0; i < NB_SEGMENTS; i++) { + for (i = 1; i < NB_SEGMENTS; i++) { if (i != my_num && get_priv_segment(i)->safe_point == SP_RUNNING) { assert(get_segment(i)->nursery_end <= _STM_NSE_SIGNAL_MAX); result++; @@ -252,7 +254,7 @@ assert((_safe_points_requested = false, 1)); long i; - for (i = 0; i < NB_SEGMENTS; i++) { + for (i = 1; i < NB_SEGMENTS; i++) { assert(get_segment(i)->nursery_end != NURSERY_END); if (get_segment(i)->nursery_end == NSE_SIGPAUSE) get_segment(i)->nursery_end = NURSERY_END; diff --git a/c8/test/support.py b/c8/test/support.py --- a/c8/test/support.py +++ b/c8/test/support.py @@ -1,4 +1,3 @@ -import os import cffi, weakref from common import parent_dir, source_files From noreply at buildbot.pypy.org Tue Jan 13 11:33:54 2015 From: noreply at buildbot.pypy.org (Raemi) Date: Tue, 13 Jan 2015 11:33:54 +0100 (CET) Subject: [pypy-commit] stmgc c8-private-pages: fix smallmalloc and prebuilt objs Message-ID: <20150113103354.E9BA11D2380@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c8-private-pages Changeset: r1521:074ee9be5109 Date: 2015-01-13 11:34 +0100 http://bitbucket.org/pypy/stmgc/changeset/074ee9be5109/ Log: fix smallmalloc and prebuilt objs diff --git a/c8/stm/gcpage.c b/c8/stm/gcpage.c --- a/c8/stm/gcpage.c +++ b/c8/stm/gcpage.c @@ -63,10 +63,9 @@ { /* only for tests xxx but stm_setup_prebuilt() uses this now too */ stm_char *p = allocate_outside_nursery_large(size_rounded_up); - /* hardcode segment 1 */ - memset(get_virtual_address(STM_SEGMENT->segment_num, (object_t *)p), 0, size_rounded_up); + object_t *o = (object_t *)p; - object_t *o = (object_t *)p; + memset(get_virtual_address(STM_SEGMENT->segment_num, o), 0, size_rounded_up); o->stm_flags = GCFLAG_WRITE_BARRIER; dprintf(("allocate_old(%lu): %p, seg=%d, page=%lu\n", diff --git a/c8/stm/hash_id.c b/c8/stm/hash_id.c --- a/c8/stm/hash_id.c +++ b/c8/stm/hash_id.c @@ -58,7 +58,7 @@ void stm_set_prebuilt_identityhash(object_t *obj, long hash) { struct object_s *realobj = (struct object_s *) - REAL_ADDRESS(stm_object_pages, obj); + get_virtual_address(STM_SEGMENT->segment_num, obj); assert(realobj->stm_flags == GCFLAG_WRITE_BARRIER); realobj->stm_flags |= GCFLAG_HAS_SHADOW; diff --git a/c8/stm/nursery.c b/c8/stm/nursery.c --- a/c8/stm/nursery.c +++ b/c8/stm/nursery.c @@ -93,7 +93,7 @@ realobj = REAL_ADDRESS(STM_SEGMENT->segment_base, obj); size = stmcb_size_rounded_up((struct object_s *)realobj); - if (true || size > GC_LAST_SMALL_SIZE) { + if (size > GC_LAST_SMALL_SIZE) { /* case 1: object is not small enough. Ask gcpage.c for an allocation via largemalloc. */ nobj = (object_t *)allocate_outside_nursery_large(size); diff --git a/c8/stm/pages.h b/c8/stm/pages.h --- a/c8/stm/pages.h +++ b/c8/stm/pages.h @@ -52,7 +52,7 @@ static inline char *get_virtual_address(long segnum, object_t *obj) { - return get_segment_base(segnum) + (uintptr_t)obj; + return REAL_ADDRESS(get_segment_base(segnum), obj); } static inline bool get_page_status_in(long segnum, uintptr_t pagenum) diff --git a/c8/stm/prebuilt.c b/c8/stm/prebuilt.c --- a/c8/stm/prebuilt.c +++ b/c8/stm/prebuilt.c @@ -32,7 +32,7 @@ object_t *nobj = _stm_allocate_old(size + sizeof(long)); /* Copy the object */ - char *realnobj = REAL_ADDRESS(stm_object_pages, nobj); + char *realnobj = get_virtual_address(STM_SEGMENT->segment_num, nobj); memcpy(realnobj, (char *)objaddr, size); /* Fix the flags in the copied object, asserting that it was zero so far */ diff --git a/c8/stm/smallmalloc.c b/c8/stm/smallmalloc.c --- a/c8/stm/smallmalloc.c +++ b/c8/stm/smallmalloc.c @@ -67,11 +67,14 @@ /* if (!_stm_largemalloc_resize_arena(uninitialized_page_stop - base)) */ /* goto out_of_memory; */ - setup_N_pages(uninitialized_page_stop, GCPAGE_NUM_PAGES); char *p = uninitialized_page_stop; long i; for (i = 0; i < GCPAGE_NUM_PAGES; i++) { + /* accessible in seg0: */ + page_mark_accessible(0, (p - stm_object_pages) / 4096UL); + + /* add to free_uniform_pages list */ ((struct small_free_loc_s *)p)->nextpage = free_uniform_pages; free_uniform_pages = (struct small_free_loc_s *)p; p += 4096; @@ -120,6 +123,10 @@ smallpage->nextpage))) goto retry; + /* make page accessible in our segment too: */ + page_mark_accessible(STM_SEGMENT->segment_num, + ((char*)smallpage - stm_object_pages) / 4096UL); + /* Succeeded: we have a page in 'smallpage', which is not initialized so far, apart from the 'nextpage' field read above. Initialize it. @@ -174,9 +181,9 @@ object_t *_stm_allocate_old_small(ssize_t size_rounded_up) { stm_char *p = allocate_outside_nursery_small(size_rounded_up); - memset(stm_object_pages + (uintptr_t)p, 0, size_rounded_up); + object_t *o = (object_t *)p; - object_t *o = (object_t *)p; + memset(get_virtual_address(STM_SEGMENT->segment_num, o), 0, size_rounded_up); o->stm_flags = GCFLAG_WRITE_BARRIER; dprintf(("allocate_old_small(%lu): %p, seg=%d, page=%lu\n", @@ -244,6 +251,17 @@ } else if (!_smallmalloc_sweep_keep(p)) { /* the location should be freed now */ + //dprintf(("free small obj %p\n", (object_t*)(p - stm_object_pages))); +#ifdef STM_TESTS + /* fill location with 0xdd in all segs except seg0 */ + int j; + object_t *obj = (object_t*)(p - stm_object_pages); + uintptr_t page = (baseptr - stm_object_pages) / 4096UL; + for (j = 1; j < NB_SEGMENTS; j++) + if (get_page_status_in(j, page) == PAGE_ACCESSIBLE) + memset(get_virtual_address(j, obj), 0xdd, szword*8); +#endif + if (flprev == NULL) { flprev = (struct small_free_loc_s *)p; flprev->next = fl; @@ -262,6 +280,14 @@ } } if (!any_object_remaining) { + /* give page back to free_uniform_pages and thus make it + inaccessible from all other segments again (except seg0) */ + uintptr_t page = (baseptr - stm_object_pages) / 4096UL; + for (i = 1; i < NB_SEGMENTS; i++) { + if (get_page_status_in(i, page) == PAGE_ACCESSIBLE) + page_mark_inaccessible(i, page); + } + ((struct small_free_loc_s *)baseptr)->nextpage = free_uniform_pages; free_uniform_pages = (struct small_free_loc_s *)baseptr; } diff --git a/c8/test/test_smallmalloc.py b/c8/test/test_smallmalloc.py --- a/c8/test/test_smallmalloc.py +++ b/c8/test/test_smallmalloc.py @@ -42,27 +42,37 @@ def test_sweep_freeing_simple(self): p1 = stm_allocate_old_small(16) + self.has_been_asked_for = [] lib._stm_smallmalloc_sweep() + assert p1 in self.has_been_asked_for def test_sweep_freeing_random_subset(self): for i in range(50): + # allocate a page's worth of objs page0 = [stm_allocate_old_small(16) for i in range(0, 4096, 16)] - assert len(set(map(pageof, page0))) == 1 - tid = lib._get_type_id(page0[0]) + assert len(set(map(pageof, page0))) == 1, "all in the same page" + tid = lib._get_type_id(page0[0]) # 58 + + # repeatedly free a subset until no objs are left in that page while len(page0) > 0: + # keep half of them around self.keep_me = set(random.sample(page0, len(page0) // 2)) self.has_been_asked_for = [] lib._stm_smallmalloc_sweep() - assert sorted(page0) == self.has_been_asked_for - page0r = [] + assert sorted(page0) == self.has_been_asked_for, "all objs were observed" + + # get list of objs that were not freed + page0remaining = [] for p in page0: if p in self.keep_me: assert lib._get_type_id(p) == tid - page0r.append(p) - else: - assert lib._get_type_id(p) != tid - page0 = page0r + page0remaining.append(p) + elif len(self.keep_me) > 0: # otherwise page not accessible from seg1 + assert lib._get_type_id(p) != tid, "should have garbage there now (0xdd)" + page0 = page0remaining + if len(page0) > 10: + # allocate one obj for noise if we do another iteration anyway p = stm_allocate_old_small(16) assert pageof(p) == pageof(page0[0]) page0.append(p) From noreply at buildbot.pypy.org Tue Jan 13 17:04:25 2015 From: noreply at buildbot.pypy.org (Raemi) Date: Tue, 13 Jan 2015 17:04:25 +0100 (CET) Subject: [pypy-commit] stmgc c8-private-pages: fix validating transaction never reaching a safepoint Message-ID: <20150113160425.70A3B1D2910@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c8-private-pages Changeset: r1522:fd795a050676 Date: 2015-01-13 13:27 +0100 http://bitbucket.org/pypy/stmgc/changeset/fd795a050676/ Log: fix validating transaction never reaching a safepoint diff --git a/c8/stm/core.c b/c8/stm/core.c --- a/c8/stm/core.c +++ b/c8/stm/core.c @@ -274,7 +274,7 @@ is itself more recent than last_cl. This is fixed by re-validating. */ first_cl = STM_PSEGMENT->last_commit_log_entry; - if (first_cl->next == NULL) + if (first_cl->next == NULL || first_cl->next == INEV_RUNNING) break; /* Find the set of segments we need to copy from and lock them: */ From noreply at buildbot.pypy.org Tue Jan 13 17:04:26 2015 From: noreply at buildbot.pypy.org (Raemi) Date: Tue, 13 Jan 2015 17:04:26 +0100 (CET) Subject: [pypy-commit] stmgc c8-private-pages: Trying to fix what was basically just exceeding max_map_count. Now checking return values and showing an error message Message-ID: <20150113160426.99B221D2910@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c8-private-pages Changeset: r1523:f15bc3c633a0 Date: 2015-01-13 15:50 +0100 http://bitbucket.org/pypy/stmgc/changeset/f15bc3c633a0/ Log: Trying to fix what was basically just exceeding max_map_count. Now checking return values and showing an error message diff --git a/c8/demo/demo_random2.c b/c8/demo/demo_random2.c --- a/c8/demo/demo_random2.c +++ b/c8/demo/demo_random2.c @@ -10,10 +10,10 @@ #include "stmgc.h" #define NUMTHREADS 3 -#define STEPS_PER_THREAD 500 -#define THREAD_STARTS 1000 // how many restarts of threads +#define STEPS_PER_THREAD 50000 +#define THREAD_STARTS 100 // how many restarts of threads #define PREBUILT_ROOTS 3 -#define FORKS 3 +#define FORKS 0 #define ACTIVE_ROOTS_SET_SIZE 100 // max num of roots created/alive in one transaction #define MAX_ROOTS_ON_SS 1000 // max on shadow stack @@ -232,11 +232,13 @@ break; case 3: // allocate fresh 'p' pushed = push_roots(); - size_t sizes[4] = {sizeof(struct node_s), - sizeof(struct node_s) + (get_rand(100000) & ~15), - sizeof(struct node_s) + 4096, - sizeof(struct node_s) + 4096*70}; - size_t size = sizes[get_rand(4)]; + size_t sizes[] = { + sizeof(struct node_s), sizeof(struct node_s)+16, + sizeof(struct node_s), sizeof(struct node_s)+16, + sizeof(struct node_s)+32, sizeof(struct node_s)+48, + sizeof(struct node_s)+32, sizeof(struct node_s)+48, + sizeof(struct node_s) + (get_rand(100000) & ~15)}; + size_t size = sizes[get_rand(sizeof(sizes) / sizeof(size_t))]; p = stm_allocate(size); ((nodeptr_t)p)->sig = SIGNATURE; ((nodeptr_t)p)->my_size = size; @@ -352,7 +354,7 @@ /* "interpreter main loop": this is one "application-frame" */ while (td.steps_left-->0 && get_rand(10) != 0) { if (td.steps_left % 8 == 0) - fprintf(stdout, "#"); + fprintf(stderr, "#"); assert(p == NULL || ((nodeptr_t)p)->sig == SIGNATURE); diff --git a/c8/stm/core.c b/c8/stm/core.c --- a/c8/stm/core.c +++ b/c8/stm/core.c @@ -299,6 +299,8 @@ } last_cl = cl; /* HERE */ + + acquire_privatization_lock(STM_SEGMENT->segment_num); acquire_modification_lock_set(segments_to_lock); @@ -330,6 +332,7 @@ struct stm_undo_s *end = cl->written + cl->written_count; segment_really_copied_from |= (1UL << cl->segment_num); + import_objects(cl->segment_num, -1, undo, end); /* here we can actually have our own modified version, so @@ -364,6 +367,7 @@ /* done with modifications */ release_modification_lock_set(segments_to_lock); + release_privatization_lock(STM_SEGMENT->segment_num); } if (needs_abort) { diff --git a/c8/stm/gcpage.c b/c8/stm/gcpage.c --- a/c8/stm/gcpage.c +++ b/c8/stm/gcpage.c @@ -13,7 +13,7 @@ { } -static void setup_N_pages(char *pages_addr, uint64_t num) +static void setup_N_pages(char *pages_addr, long num) { /* initialize to |N|P|N|N| */ acquire_all_privatization_locks(); @@ -21,6 +21,7 @@ uintptr_t p = (pages_addr - stm_object_pages) / 4096UL; dprintf(("setup_N_pages(%p, %lu): pagenum %lu\n", pages_addr, num, p)); while (num-->0) { + /* XXX: page_range_mark_accessible() */ page_mark_accessible(STM_SEGMENT->segment_num, p + num); } @@ -52,8 +53,8 @@ } dprintf(("allocate_outside_nursery_large(%lu): %p, page=%lu\n", - size, addr, - (uintptr_t)addr / 4096UL + END_NURSERY_PAGE)); + size, (char*)(addr - stm_object_pages), + (uintptr_t)(addr - stm_object_pages) / 4096UL)); spinlock_release(lock_growth_large); return (stm_char*)(addr - stm_object_pages); diff --git a/c8/stm/gcpage.h b/c8/stm/gcpage.h --- a/c8/stm/gcpage.h +++ b/c8/stm/gcpage.h @@ -7,5 +7,5 @@ static void setup_gcpage(void); static void teardown_gcpage(void); -static void setup_N_pages(char *pages_addr, uint64_t num); +static void setup_N_pages(char *pages_addr, long num); static stm_char *allocate_outside_nursery_large(uint64_t size); diff --git a/c8/stm/pages.c b/c8/stm/pages.c --- a/c8/stm/pages.c +++ b/c8/stm/pages.c @@ -22,7 +22,11 @@ assert(get_page_status_in(segnum, pagenum) == PAGE_NO_ACCESS); dprintf(("page_mark_accessible(%lu) in seg:%ld\n", pagenum, segnum)); - mprotect(get_virtual_page(segnum, pagenum), 4096, PROT_READ | PROT_WRITE); + dprintf(("RW(seg%ld, page%lu)\n", segnum, pagenum)); + if (mprotect(get_virtual_page(segnum, pagenum), 4096, PROT_READ | PROT_WRITE)) { + perror("mprotect"); + stm_fatalerror("mprotect failed! Consider running 'sysctl vm.max_map_count=16777216'"); + } /* set this flag *after* we un-protected it, because XXX later */ set_page_status_in(segnum, pagenum, PAGE_ACCESSIBLE); @@ -36,7 +40,11 @@ set_page_status_in(segnum, pagenum, PAGE_NO_ACCESS); + dprintf(("NONE(seg%ld, page%lu)\n", segnum, pagenum)); char *addr = get_virtual_page(segnum, pagenum); - madvise(get_virtual_page(segnum, pagenum), 4096, MADV_DONTNEED); - mprotect(addr, 4096, PROT_NONE); + madvise(addr, 4096, MADV_DONTNEED); + if (mprotect(addr, 4096, PROT_NONE)) { + perror("mprotect"); + stm_fatalerror("mprotect failed! Consider running 'sysctl vm.max_map_count=16777216'"); + } } diff --git a/c8/stm/pages.h b/c8/stm/pages.h --- a/c8/stm/pages.h +++ b/c8/stm/pages.h @@ -57,6 +57,9 @@ static inline bool get_page_status_in(long segnum, uintptr_t pagenum) { + /* reading page status requires "read"-lock: */ + assert(STM_PSEGMENT->privatization_lock); + OPT_ASSERT(segnum < 8 * sizeof(struct page_shared_s)); volatile struct page_shared_s *ps = (volatile struct page_shared_s *) &pages_status[pagenum - PAGE_FLAG_START]; @@ -67,6 +70,9 @@ static inline void set_page_status_in(long segnum, uintptr_t pagenum, bool status) { + /* writing page status requires "write"-lock: */ + assert(all_privatization_locks_acquired()); + OPT_ASSERT(segnum < 8 * sizeof(struct page_shared_s)); volatile struct page_shared_s *ps = (volatile struct page_shared_s *) &pages_status[pagenum - PAGE_FLAG_START]; diff --git a/c8/stm/smallmalloc.c b/c8/stm/smallmalloc.c --- a/c8/stm/smallmalloc.c +++ b/c8/stm/smallmalloc.c @@ -67,6 +67,10 @@ /* if (!_stm_largemalloc_resize_arena(uninitialized_page_stop - base)) */ /* goto out_of_memory; */ + /* lock acquiring not necessary because the affected pages don't + need privatization protection. (but there is an assert right + now to enforce that XXXXXX) */ + acquire_all_privatization_locks(); char *p = uninitialized_page_stop; long i; @@ -79,6 +83,7 @@ free_uniform_pages = (struct small_free_loc_s *)p; p += 4096; } + release_all_privatization_locks(); } spinlock_release(gmfp_lock); @@ -123,9 +128,16 @@ smallpage->nextpage))) goto retry; + + + /* lock acquiring not necessary because the affected pages don't + need privatization protection. (but there is an assert right + now to enforce that XXXXXX) */ + acquire_all_privatization_locks(); /* make page accessible in our segment too: */ page_mark_accessible(STM_SEGMENT->segment_num, ((char*)smallpage - stm_object_pages) / 4096UL); + release_all_privatization_locks(); /* Succeeded: we have a page in 'smallpage', which is not initialized so far, apart from the 'nextpage' field read From noreply at buildbot.pypy.org Tue Jan 13 17:06:40 2015 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 13 Jan 2015 17:06:40 +0100 (CET) Subject: [pypy-commit] pypy default: Fix the comments: nothing too special any more Message-ID: <20150113160640.714E51D2910@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r75315:6cebdb2f2659 Date: 2015-01-13 17:06 +0100 http://bitbucket.org/pypy/pypy/changeset/6cebdb2f2659/ Log: Fix the comments: nothing too special any more diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -298,22 +298,14 @@ if slowpathaddr == 0 or not self.cpu.propagate_exception_descr: return # no stack check (for tests, or non-translated) # - # make a "function" that is called immediately at the start of - # an assembler function. In particular, the stack looks like: - # - # | ... | <-- aligned to a multiple of 16 - # | retaddr of caller | - # | my own retaddr | <-- esp - # +---------------------+ - # + # make a regular function that is called from a point near the start + # of an assembler function (after it adjusts the stack and saves + # registers). mc = codebuf.MachineCodeBlockWrapper() # if IS_X86_64: - # on the x86_64, we have to save all the registers that may - # have been used to pass arguments. Note that we pass only - # one argument, that is the frame mc.MOV_rr(edi.value, esp.value) - mc.SUB_ri(esp.value, WORD) + mc.SUB_ri(esp.value, WORD) # alignment # if IS_X86_32: mc.SUB_ri(esp.value, 2*WORD) # alignment From noreply at buildbot.pypy.org Tue Jan 13 17:43:35 2015 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 13 Jan 2015 17:43:35 +0100 (CET) Subject: [pypy-commit] pypy vmprof: (fijal, arigo) Message-ID: <20150113164335.86B4F1C1056@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: vmprof Changeset: r75316:73f3e2793377 Date: 2015-01-13 17:43 +0100 http://bitbucket.org/pypy/pypy/changeset/73f3e2793377/ Log: (fijal, arigo) Work towards maintaining the current frame's stack depth everywhere systematically. diff --git a/rpython/jit/backend/x86/arch.py b/rpython/jit/backend/x86/arch.py --- a/rpython/jit/backend/x86/arch.py +++ b/rpython/jit/backend/x86/arch.py @@ -47,3 +47,6 @@ THREADLOCAL_OFS = (FRAME_FIXED_SIZE - 1) * WORD assert PASS_ON_MY_FRAME >= 12 # asmgcc needs at least JIT_USE_WORDS + 3 + +# return address, followed by FRAME_FIXED_SIZE words +DEFAULT_FRAME_BYTES = (1 + FRAME_FIXED_SIZE) * WORD diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -18,7 +18,8 @@ from rpython.jit.backend.llsupport.regalloc import (get_scale, valid_addressing_size) from rpython.jit.backend.x86.arch import (FRAME_FIXED_SIZE, WORD, IS_X86_64, JITFRAME_FIXED_SIZE, IS_X86_32, - PASS_ON_MY_FRAME, THREADLOCAL_OFS) + PASS_ON_MY_FRAME, THREADLOCAL_OFS, + DEFAULT_FRAME_BYTES) from rpython.jit.backend.x86.regloc import (eax, ecx, edx, ebx, esp, ebp, esi, xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, r8, r9, r10, r11, edi, r12, r13, r14, r15, X86_64_SCRATCH_REG, X86_64_XMM_SCRATCH_REG, @@ -266,6 +267,10 @@ # the correct "ret" arg offset = mc.get_relative_pos() - jz_location mc.overwrite32(jz_location-4, offset) + # From now on this function is basically "merged" with + # its caller and so contains DEFAULT_FRAME_BYTES bytes + # plus my own return address, which we'll ignore next + mc.force_frame_size(DEFAULT_FRAME_BYTES + WORD) mc.ADD_ri(esp.value, WORD) mc.JMP(imm(self.propagate_exception_path)) # @@ -277,6 +282,7 @@ return # not supported (for tests, or non-translated) # self.mc = codebuf.MachineCodeBlockWrapper() + self.mc.force_frame_size(DEFAULT_FRAME_BYTES) # # read and reset the current exception @@ -298,22 +304,14 @@ if slowpathaddr == 0 or not self.cpu.propagate_exception_descr: return # no stack check (for tests, or non-translated) # - # make a "function" that is called immediately at the start of - # an assembler function. In particular, the stack looks like: - # - # | ... | <-- aligned to a multiple of 16 - # | retaddr of caller | - # | my own retaddr | <-- esp - # +---------------------+ - # + # make a regular function that is called from a point near the start + # of an assembler function (after it adjusts the stack and saves + # registers). mc = codebuf.MachineCodeBlockWrapper() # if IS_X86_64: - # on the x86_64, we have to save all the registers that may - # have been used to pass arguments. Note that we pass only - # one argument, that is the frame mc.MOV_rr(edi.value, esp.value) - mc.SUB_ri(esp.value, WORD) + mc.SUB_ri(esp.value, WORD) # alignment # if IS_X86_32: mc.SUB_ri(esp.value, 2*WORD) # alignment @@ -338,7 +336,10 @@ offset = mc.get_relative_pos() - jnz_location assert 0 < offset <= 127 mc.overwrite(jnz_location-1, chr(offset)) - # adjust the esp to point back to the previous return + # From now on this function is basically "merged" with + # its caller and so contains DEFAULT_FRAME_BYTES bytes + # plus my own return address, which we'll ignore next + mc.force_frame_size(DEFAULT_FRAME_BYTES + WORD) mc.ADD_ri(esp.value, WORD) mc.JMP(imm(self.propagate_exception_path)) # @@ -416,6 +417,8 @@ mc.LEA_rs(esp.value, 2 * WORD) self._pop_all_regs_from_frame(mc, [], withfloats, callee_only=True) mc.RET16_i(WORD) + # Note that wb_slowpath[0..3] end with a RET16_i, which must be + # taken care of in the caller by stack_frame_size_delta(-WORD) else: if IS_X86_32: mc.MOV_rs(edx.value, 4 * WORD) @@ -521,6 +524,7 @@ assert len(set(inputargs)) == len(inputargs) self.setup(original_loop_token) + self.mc.force_frame_size(DEFAULT_FRAME_BYTES) descr_number = compute_unique_id(faildescr) if log: operations = self._inject_debugging_code(faildescr, operations, @@ -693,6 +697,7 @@ # place, but clobber the recovery stub with a jump to the real # target. mc = codebuf.MachineCodeBlockWrapper() + mc.force_frame_size(DEFAULT_FRAME_BYTES) if rx86.fits_in_32bits(offset): mc.writeimm32(offset) mc.copy_to_raw_memory(adr_jump_offset) @@ -1763,6 +1768,7 @@ def generate_propagate_error_64(self): assert WORD == 8 + self.mc.force_frame_size(DEFAULT_FRAME_BYTES) startpos = self.mc.get_relative_pos() self.mc.JMP(imm(self.propagate_exception_path)) return startpos @@ -1770,6 +1776,7 @@ def generate_quick_failure(self, guardtok): """ Gather information about failure """ + self.mc.force_frame_size(DEFAULT_FRAME_BYTES) startpos = self.mc.get_relative_pos() fail_descr, target = self.store_info_on_descr(startpos, guardtok) self.mc.PUSH(imm(fail_descr)) @@ -1845,6 +1852,9 @@ def _build_failure_recovery(self, exc, withfloats=False): mc = codebuf.MachineCodeBlockWrapper() + # this is jumped to, from a stack that has DEFAULT_FRAME_BYTES + # followed by 2 extra words just pushed + mc.force_frame_size(DEFAULT_FRAME_BYTES + 2 * WORD) self.mc = mc self._push_all_regs_to_frame(mc, [], withfloats) @@ -1916,6 +1926,7 @@ self.mc.J_il(rx86.Conditions[condition], 0) else: self.mc.JMP_l(0) + self.mc.force_frame_size(DEFAULT_FRAME_BYTES) guard_token.pos_jump_offset = self.mc.get_relative_pos() - 4 self.pending_guard_tokens.append(guard_token) @@ -2006,6 +2017,7 @@ offset = jmp_location - je_location assert 0 < offset <= 127 self.mc.overwrite(je_location - 1, chr(offset)) + self.mc.force_frame_size(DEFAULT_FRAME_BYTES) # return jmp_location @@ -2090,6 +2102,8 @@ if is_frame and align_stack: mc.SUB_ri(esp.value, 16 - WORD) # erase the return address mc.CALL(imm(self.wb_slowpath[helper_num])) + if not is_frame: + mc.stack_frame_size_delta(-WORD) if is_frame and align_stack: mc.ADD_ri(esp.value, 16 - WORD) # erase the return address @@ -2326,6 +2340,7 @@ offset = self.mc.get_relative_pos() - jmp_adr1 assert 0 < offset <= 127 self.mc.overwrite(jmp_adr1-1, chr(offset)) + self.mc.force_frame_size(DEFAULT_FRAME_BYTES) # write down the tid, but not if it's the result of the CALL self.mc.MOV(mem(eax, 0), imm(arraydescr.tid)) # while we're at it, this line is not needed if we've done the CALL diff --git a/rpython/jit/backend/x86/callbuilder.py b/rpython/jit/backend/x86/callbuilder.py --- a/rpython/jit/backend/x86/callbuilder.py +++ b/rpython/jit/backend/x86/callbuilder.py @@ -38,7 +38,7 @@ if not self.fnloc_is_immediate: self.fnloc = None self.arglocs = arglocs + [fnloc] - self.current_esp = 0 # 0 or (usually) negative, counted in bytes + self.start_frame_size = self.mc._frame_size def select_call_release_gil_mode(self): AbstractCallBuilder.select_call_release_gil_mode(self) @@ -50,13 +50,15 @@ def subtract_esp_aligned(self, count): if count > 0: align = align_stack_words(count) - self.current_esp -= align * WORD self.mc.SUB_ri(esp.value, align * WORD) + def get_current_esp(self): + return self.start_frame_size - self.mc._frame_size + def restore_stack_pointer(self, target_esp=0): - if self.current_esp != target_esp: - self.mc.ADD_ri(esp.value, target_esp - self.current_esp) - self.current_esp = target_esp + current_esp = self.get_current_esp() + if current_esp != target_esp: + self.mc.ADD_ri(esp.value, target_esp - current_esp) def load_result(self): """Overridden in CallBuilder32 and CallBuilder64""" @@ -79,9 +81,10 @@ # after the rearrangements done just before, ignoring the return # value eax, if necessary assert not self.is_call_release_gil - self.change_extra_stack_depth = (self.current_esp != 0) + current_esp = self.get_current_esp() + self.change_extra_stack_depth = (current_esp != 0) if self.change_extra_stack_depth: - self.asm.set_extra_stack_depth(self.mc, -self.current_esp) + self.asm.set_extra_stack_depth(self.mc, -current_esp) noregs = self.asm.cpu.gc_ll_descr.is_shadow_stack() gcmap = self.asm._regalloc.get_gcmap([eax], noregs=noregs) self.asm.push_gcmap(self.mc, gcmap, store=True) @@ -122,7 +125,7 @@ # and 5/7 words as described for asmgcroot.ASM_FRAMEDATA, for a # total size of JIT_USE_WORDS. This structure is found at # [ESP+css]. - css = -self.current_esp + ( + css = -self.get_current_esp() + ( WORD * (PASS_ON_MY_FRAME - asmgcroot.JIT_USE_WORDS)) assert css >= 2 * WORD # Save ebp @@ -307,7 +310,10 @@ else: self.mc.CALL(self.fnloc) if self.callconv != FFI_DEFAULT_ABI: - self.current_esp += self._fix_stdcall(self.callconv) + # in the STDCALL ABI, the CALL above has an effect on + # the stack depth. Adjust 'mc._frame_size'. + delta = self._fix_stdcall(self.callconv) + self.mc.stack_frame_size_delta(-delta) def _fix_stdcall(self, callconv): from rpython.rlib.clibffi import FFI_STDCALL diff --git a/rpython/jit/backend/x86/codebuf.py b/rpython/jit/backend/x86/codebuf.py --- a/rpython/jit/backend/x86/codebuf.py +++ b/rpython/jit/backend/x86/codebuf.py @@ -22,6 +22,7 @@ LocationCodeBuilder, codebuilder_cls): def __init__(self): + codebuilder_cls.__init__(self) self.init_block_builder() # a list of relative positions; for each position p, the bytes # at [p-4:p] encode an absolute address that will need to be diff --git a/rpython/jit/backend/x86/regalloc.py b/rpython/jit/backend/x86/regalloc.py --- a/rpython/jit/backend/x86/regalloc.py +++ b/rpython/jit/backend/x86/regalloc.py @@ -12,7 +12,7 @@ valid_addressing_size) from rpython.jit.backend.x86 import rx86 from rpython.jit.backend.x86.arch import (WORD, JITFRAME_FIXED_SIZE, IS_X86_32, - IS_X86_64) + IS_X86_64, DEFAULT_FRAME_BYTES) from rpython.jit.backend.x86.jump import remap_frame_layout_mixed from rpython.jit.backend.x86.regloc import (FrameLoc, RegLoc, ConstFloatLoc, FloatImmedLoc, ImmedLoc, imm, imm0, imm1, ecx, eax, edx, ebx, esi, edi, @@ -314,6 +314,7 @@ while i < len(operations): op = operations[i] self.assembler.mc.mark_op(op) + assert self.assembler.mc._frame_size == DEFAULT_FRAME_BYTES self.rm.position = i self.xrm.position = i if op.has_no_side_effect() and op.result not in self.longevity: diff --git a/rpython/jit/backend/x86/rx86.py b/rpython/jit/backend/x86/rx86.py --- a/rpython/jit/backend/x86/rx86.py +++ b/rpython/jit/backend/x86/rx86.py @@ -447,6 +447,9 @@ class AbstractX86CodeBuilder(object): """Abstract base class.""" + def __init__(self): + self.force_frame_size(self.WORD) + def writechar(self, char): raise NotImplementedError @@ -464,6 +467,19 @@ self.writechar(chr((imm >> 16) & 0xFF)) self.writechar(chr((imm >> 24) & 0xFF)) + def force_frame_size(self, frame_size): + self._frame_size = frame_size + + def stack_frame_size_delta(self, delta): + "Called when we generate an instruction that changes the value of ESP" + self._frame_size += delta + assert self._frame_size >= self.WORD + + def check_stack_size_at_ret(self): + assert self._frame_size == self.WORD + if not we_are_translated(): + self._frame_size = None + # ------------------------------ MOV ------------------------------ MOV_ri = insn(register(1), '\xB8', immediate(2)) @@ -474,14 +490,24 @@ INC_m = insn(rex_w, '\xFF', orbyte(0), mem_reg_plus_const(1)) INC_j = insn(rex_w, '\xFF', orbyte(0), abs_(1)) - ADD_ri,ADD_rr,ADD_rb,_,_,ADD_rm,ADD_rj,_,_ = common_modes(0) + AD1_ri,ADD_rr,ADD_rb,_,_,ADD_rm,ADD_rj,_,_ = common_modes(0) OR_ri, OR_rr, OR_rb, _,_,OR_rm, OR_rj, _,_ = common_modes(1) AND_ri,AND_rr,AND_rb,_,_,AND_rm,AND_rj,_,_ = common_modes(4) - SUB_ri,SUB_rr,SUB_rb,_,_,SUB_rm,SUB_rj,SUB_ji8,SUB_mi8 = common_modes(5) + SU1_ri,SUB_rr,SUB_rb,_,_,SUB_rm,SUB_rj,SUB_ji8,SUB_mi8 = common_modes(5) SBB_ri,SBB_rr,SBB_rb,_,_,SBB_rm,SBB_rj,_,_ = common_modes(3) XOR_ri,XOR_rr,XOR_rb,_,_,XOR_rm,XOR_rj,_,_ = common_modes(6) CMP_ri,CMP_rr,CMP_rb,CMP_bi,CMP_br,CMP_rm,CMP_rj,_,_ = common_modes(7) + def ADD_ri(self, reg, immed): + self.AD1_ri(reg, immed) + if reg == R.esp: + self.stack_frame_size_delta(-immed) + + def SUB_ri(self, reg, immed): + self.SU1_ri(reg, immed) + if reg == R.esp: + self.stack_frame_size_delta(+immed) + CMP_mi8 = insn(rex_w, '\x83', orbyte(7<<3), mem_reg_plus_const(1), immediate(2, 'b')) CMP_mi32 = insn(rex_w, '\x81', orbyte(7<<3), mem_reg_plus_const(1), immediate(2)) CMP_mi = select_8_or_32_bit_immed(CMP_mi8, CMP_mi32) @@ -531,29 +557,60 @@ # ------------------------------ Misc stuff ------------------------------ NOP = insn('\x90') - RET = insn('\xC3') - RET16_i = insn('\xC2', immediate(1, 'h')) + RE1 = insn('\xC3') + RE116_i = insn('\xC2', immediate(1, 'h')) - PUSH_r = insn(rex_nw, register(1), '\x50') - PUSH_b = insn(rex_nw, '\xFF', orbyte(6<<3), stack_bp(1)) - PUSH_i8 = insn('\x6A', immediate(1, 'b')) - PUSH_i32 = insn('\x68', immediate(1, 'i')) - def PUSH_i(mc, immed): + def RET(self): + self.check_stack_size_at_ret() + self.RE1() + + def RET16_i(self, immed): + self.check_stack_size_at_ret() + self.RE116_i(immed) + + PUS1_r = insn(rex_nw, register(1), '\x50') + PUS1_b = insn(rex_nw, '\xFF', orbyte(6<<3), stack_bp(1)) + PUS1_i8 = insn('\x6A', immediate(1, 'b')) + PUS1_i32 = insn('\x68', immediate(1, 'i')) + + def PUSH_r(self, reg): + self.PUS1_r(reg) + self.stack_frame_size_delta(+self.WORD) + + def PUSH_b(self, ofs): + self.PUS1_b(ofs) + self.stack_frame_size_delta(+self.WORD) + + def PUSH_i(self, immed): if single_byte(immed): - mc.PUSH_i8(immed) + self.PUS1_i8(immed) else: - mc.PUSH_i32(immed) + self.PUS1_i32(immed) + self.stack_frame_size_delta(+self.WORD) - POP_r = insn(rex_nw, register(1), '\x58') - POP_b = insn(rex_nw, '\x8F', orbyte(0<<3), stack_bp(1)) + PO1_r = insn(rex_nw, register(1), '\x58') + PO1_b = insn(rex_nw, '\x8F', orbyte(0<<3), stack_bp(1)) + + def POP_r(self, reg): + self.PO1_r(reg) + self.stack_frame_size_delta(-self.WORD) + + def POP_b(self, ofs): + self.PO1_b(ofs) + self.stack_frame_size_delta(-self.WORD) LEA_rb = insn(rex_w, '\x8D', register(1,8), stack_bp(2)) - LEA_rs = insn(rex_w, '\x8D', register(1,8), stack_sp(2)) + LE1_rs = insn(rex_w, '\x8D', register(1,8), stack_sp(2)) LEA32_rb = insn(rex_w, '\x8D', register(1,8),stack_bp(2,force_32bits=True)) LEA_ra = insn(rex_w, '\x8D', register(1, 8), mem_reg_plus_scaled_reg_plus_const(2)) LEA_rm = insn(rex_w, '\x8D', register(1, 8), mem_reg_plus_const(2)) LEA_rj = insn(rex_w, '\x8D', register(1, 8), abs_(2)) + def LEA_rs(self, reg, ofs): + self.LE1_rs(reg, ofs) + if reg == R.esp: + self.stack_frame_size_delta(-ofs) + CALL_l = insn('\xE8', relative(1)) CALL_r = insn(rex_nw, '\xFF', register(1), chr(0xC0 | (2<<3))) CALL_b = insn('\xFF', orbyte(2<<3), stack_bp(1)) @@ -564,15 +621,30 @@ # register-register exchange. XCHG_rr = insn(rex_w, '\x87', register(1), register(2,8), '\xC0') - JMP_l = insn('\xE9', relative(1)) - JMP_r = insn(rex_nw, '\xFF', orbyte(4<<3), register(1), '\xC0') + JM1_l = insn('\xE9', relative(1)) + JM1_r = insn(rex_nw, '\xFF', orbyte(4<<3), register(1), '\xC0') # FIXME: J_il8 and JMP_l8 assume the caller will do the appropriate # calculation to find the displacement, but J_il does it for the caller. # We need to be consistent. - JMP_l8 = insn('\xEB', immediate(1, 'b')) + JM1_l8 = insn('\xEB', immediate(1, 'b')) J_il8 = insn(immediate(1, 'o'), '\x70', immediate(2, 'b')) J_il = insn('\x0F', immediate(1,'o'), '\x80', relative(2)) + def JMP_l(self, rel): + self.JM1_l(rel) + if not we_are_translated(): + self._frame_size = None + + def JMP_r(self, reg): + self.JM1_r(reg) + if not we_are_translated(): + self._frame_size = None + + def JMP_l8(self, rel): + self.JM1_l8(rel) + if not we_are_translated(): + self._frame_size = None + SET_ir = insn(rex_fw, '\x0F', immediate(1,'o'),'\x90', byte_register(2), '\xC0') # The 64-bit version of this, CQO, is defined in X86_64_CodeBuilder diff --git a/rpython/jit/backend/x86/test/test_callbuilder.py b/rpython/jit/backend/x86/test/test_callbuilder.py --- a/rpython/jit/backend/x86/test/test_callbuilder.py +++ b/rpython/jit/backend/x86/test/test_callbuilder.py @@ -3,7 +3,8 @@ class FakeAssembler: - mc = None + class mc: + _frame_size = 42 class _regalloc: class rm: free_regs = [ebx] diff --git a/rpython/jit/backend/x86/test/test_rx86.py b/rpython/jit/backend/x86/test/test_rx86.py --- a/rpython/jit/backend/x86/test/test_rx86.py +++ b/rpython/jit/backend/x86/test/test_rx86.py @@ -5,6 +5,7 @@ class CodeBuilderMixin(object): def __init__(self): self.buffer = [] + super(CodeBuilderMixin, self).__init__() def writechar(self, c): assert isinstance(c, str) and len(c) == 1 diff --git a/rpython/jit/backend/x86/test/test_rx86_32_auto_encoding.py b/rpython/jit/backend/x86/test/test_rx86_32_auto_encoding.py --- a/rpython/jit/backend/x86/test/test_rx86_32_auto_encoding.py +++ b/rpython/jit/backend/x86/test/test_rx86_32_auto_encoding.py @@ -33,6 +33,12 @@ def done(self): assert len(self.expected) == self.index + def stack_frame_size_delta(self, delta): + pass # ignored + + def check_stack_size_at_ret(self): + pass # ignored + def hexdump(s): return ' '.join(["%02X" % ord(c) for c in s]) From noreply at buildbot.pypy.org Tue Jan 13 18:40:58 2015 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 13 Jan 2015 18:40:58 +0100 (CET) Subject: [pypy-commit] pypy all_ordered_dicts: Implement __pypy__.reversed_dict() Message-ID: <20150113174058.ACC9C1C0F1B@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: all_ordered_dicts Changeset: r75317:d6780844383e Date: 2015-01-13 18:40 +0100 http://bitbucket.org/pypy/pypy/changeset/d6780844383e/ Log: Implement __pypy__.reversed_dict() diff --git a/pypy/module/__pypy__/__init__.py b/pypy/module/__pypy__/__init__.py --- a/pypy/module/__pypy__/__init__.py +++ b/pypy/module/__pypy__/__init__.py @@ -78,6 +78,7 @@ 'newlist_hint' : 'interp_magic.newlist_hint', 'add_memory_pressure' : 'interp_magic.add_memory_pressure', 'newdict' : 'interp_dict.newdict', + 'reversed_dict' : 'interp_dict.reversed_dict', 'strategy' : 'interp_magic.strategy', # dict,set,list 'set_debug' : 'interp_magic.set_debug', 'locals_to_fast' : 'interp_magic.locals_to_fast', diff --git a/pypy/module/__pypy__/interp_dict.py b/pypy/module/__pypy__/interp_dict.py --- a/pypy/module/__pypy__/interp_dict.py +++ b/pypy/module/__pypy__/interp_dict.py @@ -30,3 +30,9 @@ return space.newdict(strdict=True) else: raise oefmt(space.w_TypeError, "unknown type of dict %s", type) + +def reversed_dict(space, w_obj): + from pypy.objspace.std.dictmultiobject import W_DictMultiObject + if not isinstance(w_obj, W_DictMultiObject): + raise OperationError(space.w_TypeError, space.w_None) + return w_obj.nondescr_reversed_dict(space) diff --git a/pypy/objspace/std/dictmultiobject.py b/pypy/objspace/std/dictmultiobject.py --- a/pypy/objspace/std/dictmultiobject.py +++ b/pypy/objspace/std/dictmultiobject.py @@ -258,6 +258,17 @@ """D.itervalues() -> an iterator over the values of D""" return W_DictMultiIterValuesObject(space, self.itervalues()) + def nondescr_reversed_dict(self, space): + """Not exposed directly to app-level, but via __pypy__.reversed_dict(). + """ + if self.strategy.getiterreversed is not None: + it = self.strategy.iterreversed(self) + return W_DictMultiIterKeysObject(space, it) + else: + # fall-back + w_keys = self.w_keys() + return space.call_method(w_keys, '__reversed__') + def descr_viewitems(self, space): """D.viewitems() -> a set-like object providing a view on D's items""" return W_DictViewItemsObject(space, self) @@ -501,6 +512,8 @@ def getiteritems(self, w_dict): raise NotImplementedError + getiterreversed = None # means no implementation is available + def rev_update1_dict_dict(self, w_dict, w_updatedict): iteritems = self.iteritems(w_dict) while True: @@ -621,6 +634,9 @@ def getiteritems(self, w_dict): return iter([]) + def getiterreversed(self, w_dict): + return iter([]) + # Iterator Implementation base classes @@ -709,10 +725,6 @@ 'setitem_untyped_%s' % dictimpl.__name__) class IterClassKeys(BaseKeyIterator): - def __init__(self, space, strategy, impl): - self.iterator = strategy.getiterkeys(impl) - BaseIteratorImplementation.__init__(self, space, strategy, impl) - def next_key_entry(self): for key in self.iterator: return wrapkey(self.space, key) @@ -720,10 +732,6 @@ return None class IterClassValues(BaseValueIterator): - def __init__(self, space, strategy, impl): - self.iterator = strategy.getitervalues(impl) - BaseIteratorImplementation.__init__(self, space, strategy, impl) - def next_value_entry(self): for value in self.iterator: return wrapvalue(self.space, value) @@ -731,10 +739,6 @@ return None class IterClassItems(BaseItemIterator): - def __init__(self, space, strategy, impl): - self.iterator = strategy.getiteritems(impl) - BaseIteratorImplementation.__init__(self, space, strategy, impl) - if override_next_item is not None: next_item_entry = override_next_item else: @@ -746,13 +750,26 @@ return None, None def iterkeys(self, w_dict): - return IterClassKeys(self.space, self, w_dict) + it = IterClassKeys(self.space, self, w_dict) + it.iterator = self.getiterkeys(w_dict) + return it def itervalues(self, w_dict): - return IterClassValues(self.space, self, w_dict) + it = IterClassValues(self.space, self, w_dict) + it.iterator = self.getitervalues(w_dict) + return it def iteritems(self, w_dict): - return IterClassItems(self.space, self, w_dict) + it = IterClassItems(self.space, self, w_dict) + it.iterator = self.getiteritems(w_dict) + return it + + if dictimpl.getiterreversed is not None: + def iterreversed(self, w_dict): + it = IterClassKeys(self.space, self, w_dict) + it.iterator = self.getiterreversed(w_dict) + return it + dictimpl.iterreversed = iterreversed @jit.look_inside_iff(lambda self, w_dict, w_updatedict: w_dict_unrolling_heuristic(w_dict)) @@ -763,6 +780,7 @@ # this is very similar to the general version, but the difference # is that it is specialized to call a specific next_item() iteritems = IterClassItems(self.space, self, w_dict) + iteritems.iterator = self.getiteritems(w_dict) w_key, w_value = iteritems.next_item() if w_key is None: return @@ -930,6 +948,9 @@ def getiteritems(self, w_dict): return self.unerase(w_dict.dstorage).iteritems() + def getiterreversed(self, w_dict): + return objectmodel.reversed_dict(self.unerase(w_dict.dstorage)) + def prepare_update(self, w_dict, num_extra): objectmodel.prepare_dict_update(self.unerase(w_dict.dstorage), num_extra) diff --git a/pypy/objspace/std/test/test_dictmultiobject.py b/pypy/objspace/std/test/test_dictmultiobject.py --- a/pypy/objspace/std/test/test_dictmultiobject.py +++ b/pypy/objspace/std/test/test_dictmultiobject.py @@ -254,6 +254,12 @@ values.append(k) assert values == d.values() + def test_reversed_dict(self): + import __pypy__ + for d in [{}, {1: 2, 3: 4, 5: 6}, {"a": 5, "b": 2, "c": 6}]: + assert list(__pypy__.reversed_dict(d)) == d.keys()[::-1] + raises(TypeError, __pypy__.reversed_dict, 42) + def test_keys(self): d = {1: 2, 3: 4} kys = d.keys() diff --git a/pypy/objspace/std/test/test_mapdict.py b/pypy/objspace/std/test/test_mapdict.py --- a/pypy/objspace/std/test/test_mapdict.py +++ b/pypy/objspace/std/test/test_mapdict.py @@ -700,6 +700,15 @@ del a.x raises(AttributeError, "a.x") + def test_reversed_dict(self): + import __pypy__ + class X(object): + pass + x = X(); x.a = 10; x.b = 20; x.c = 30 + d = x.__dict__ + assert list(__pypy__.reversed_dict(d)) == d.keys()[::-1] + + class AppTestWithMapDictAndCounters(object): spaceconfig = {"objspace.std.withmapdict": True, "objspace.std.withmethodcachecounter": True} From noreply at buildbot.pypy.org Tue Jan 13 18:57:36 2015 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 13 Jan 2015 18:57:36 +0100 (CET) Subject: [pypy-commit] pypy all_ordered_dicts: Kill some methods and comments on OrderedDict, Message-ID: <20150113175736.C136E1D2388@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: all_ordered_dicts Changeset: r75318:67fb2c164c5e Date: 2015-01-13 18:57 +0100 http://bitbucket.org/pypy/pypy/changeset/67fb2c164c5e/ Log: Kill some methods and comments on OrderedDict, simplifying the special parts of the API of OrderedDict down further. diff --git a/lib-python/2.7/collections.py b/lib-python/2.7/collections.py --- a/lib-python/2.7/collections.py +++ b/lib-python/2.7/collections.py @@ -17,6 +17,10 @@ except ImportError: assert '__pypy__' not in _sys.builtin_module_names newdict = lambda _ : {} +try: + from __pypy__ import reversed_dict +except ImportError: + reversed_dict = lambda d: reversed(d.keys()) try: from thread import get_ident as _get_ident @@ -32,44 +36,17 @@ '''Dictionary that remembers insertion order. In PyPy all dicts are ordered anyway. This is mostly useful as a - placeholder to mean "this dict must be ordered even on CPython.''' + placeholder to mean "this dict must be ordered even on CPython". - def __iter__(self): - # This method allows some concurrent changes to the dictionary - # while iterating. The annoying part is that the exact allowed - # changes are messy to define and different than CPython's own - # messy definition (which the docs have nothing to say about). - # For now, we'll suppose it is good enough. Precisely: we - # iterate over the list of keys grabbed at the start; we return - # all keys that are still in the dictionary at the time we - # reach them. This is a simple rule, but if a key is deleted - # and re-added, this method will return it in its old position, - # which is arguably wrong. Also, any newly-added key is never - # returned, unlike CPython (which usually returns them, but not - # always). - for k in dict.keys(self): - if k in self: - yield k + Known difference: iterating over an OrderedDict which is being + concurrently modified raises RuntimeError in PyPy. In CPython + instead we get some behavior that appears reasonable in some + cases but is nonsensical in other cases. This is officially + forbidden by the CPython docs, so we forbid it explicitly for now. + ''' def __reversed__(self): - 'od.__reversed__() <==> reversed(od)' - for k in reversed(dict.keys(self)): - if k in self: - yield k - - def iterkeys(self): - 'od.iterkeys() -> an iterator over the keys in od' - return iter(self) - - def itervalues(self): - 'od.itervalues -> an iterator over the values in od' - for k in self: - yield self[k] - - def iteritems(self): - 'od.iteritems -> an iterator over the (key, value) pairs in od' - for k in self: - yield (k, self[k]) + return reversed_dict(self) def popitem(self, last=True): '''od.popitem() -> (k, v), return and remove a (key, value) pair. @@ -111,17 +88,6 @@ 'od.copy() -> a shallow copy of od' return self.__class__(self) - @classmethod - def fromkeys(cls, iterable, value=None): - '''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S. - If not specified, the value defaults to None. - - ''' - self = cls() - for key in iterable: - self[key] = value - return self - def __eq__(self, other): '''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive while comparison to a regular mapping is order-insensitive. From noreply at buildbot.pypy.org Tue Jan 13 19:03:22 2015 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 13 Jan 2015 19:03:22 +0100 (CET) Subject: [pypy-commit] pypy all_ordered_dicts: Add another test Message-ID: <20150113180322.E82691D23E0@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: all_ordered_dicts Changeset: r75319:09d278245a93 Date: 2015-01-13 19:03 +0100 http://bitbucket.org/pypy/pypy/changeset/09d278245a93/ Log: Add another test diff --git a/pypy/objspace/std/test/test_dictmultiobject.py b/pypy/objspace/std/test/test_dictmultiobject.py --- a/pypy/objspace/std/test/test_dictmultiobject.py +++ b/pypy/objspace/std/test/test_dictmultiobject.py @@ -260,6 +260,15 @@ assert list(__pypy__.reversed_dict(d)) == d.keys()[::-1] raises(TypeError, __pypy__.reversed_dict, 42) + def test_reversed_dict_runtimeerror(self): + import __pypy__ + d = {1: 2, 3: 4, 5: 6} + it = __pypy__.reversed_dict(d) + key = it.next() + assert key in [1, 3, 5] + del d[key] + raises(RuntimeError, it.next) + def test_keys(self): d = {1: 2, 3: 4} kys = d.keys() From noreply at buildbot.pypy.org Tue Jan 13 19:10:27 2015 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 13 Jan 2015 19:10:27 +0100 (CET) Subject: [pypy-commit] pypy all_ordered_dicts: translation fix: revert a few changes from d6780844383e Message-ID: <20150113181027.C4D4A1C0F1B@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: all_ordered_dicts Changeset: r75320:8152cc2d178c Date: 2015-01-13 19:07 +0100 http://bitbucket.org/pypy/pypy/changeset/8152cc2d178c/ Log: translation fix: revert a few changes from d6780844383e diff --git a/pypy/objspace/std/dictmultiobject.py b/pypy/objspace/std/dictmultiobject.py --- a/pypy/objspace/std/dictmultiobject.py +++ b/pypy/objspace/std/dictmultiobject.py @@ -725,6 +725,10 @@ 'setitem_untyped_%s' % dictimpl.__name__) class IterClassKeys(BaseKeyIterator): + def __init__(self, space, strategy, impl): + self.iterator = strategy.getiterkeys(impl) + BaseIteratorImplementation.__init__(self, space, strategy, impl) + def next_key_entry(self): for key in self.iterator: return wrapkey(self.space, key) @@ -732,6 +736,10 @@ return None class IterClassValues(BaseValueIterator): + def __init__(self, space, strategy, impl): + self.iterator = strategy.getitervalues(impl) + BaseIteratorImplementation.__init__(self, space, strategy, impl) + def next_value_entry(self): for value in self.iterator: return wrapvalue(self.space, value) @@ -739,6 +747,10 @@ return None class IterClassItems(BaseItemIterator): + def __init__(self, space, strategy, impl): + self.iterator = strategy.getiteritems(impl) + BaseIteratorImplementation.__init__(self, space, strategy, impl) + if override_next_item is not None: next_item_entry = override_next_item else: @@ -749,26 +761,29 @@ else: return None, None + class IterClassReversed(BaseKeyIterator): + def __init__(self, space, strategy, impl): + self.iterator = strategy.getiterreversed(impl) + BaseIteratorImplementation.__init__(self, space, strategy, impl) + + def next_key_entry(self): + for key in self.iterator: + return wrapkey(self.space, key) + else: + return None + def iterkeys(self, w_dict): - it = IterClassKeys(self.space, self, w_dict) - it.iterator = self.getiterkeys(w_dict) - return it + return IterClassKeys(self.space, self, w_dict) def itervalues(self, w_dict): - it = IterClassValues(self.space, self, w_dict) - it.iterator = self.getitervalues(w_dict) - return it + return IterClassValues(self.space, self, w_dict) def iteritems(self, w_dict): - it = IterClassItems(self.space, self, w_dict) - it.iterator = self.getiteritems(w_dict) - return it + return IterClassItems(self.space, self, w_dict) if dictimpl.getiterreversed is not None: def iterreversed(self, w_dict): - it = IterClassKeys(self.space, self, w_dict) - it.iterator = self.getiterreversed(w_dict) - return it + return IterClassReversed(self.space, self, w_dict) dictimpl.iterreversed = iterreversed @jit.look_inside_iff(lambda self, w_dict, w_updatedict: @@ -780,7 +795,6 @@ # this is very similar to the general version, but the difference # is that it is specialized to call a specific next_item() iteritems = IterClassItems(self.space, self, w_dict) - iteritems.iterator = self.getiteritems(w_dict) w_key, w_value = iteritems.next_item() if w_key is None: return From noreply at buildbot.pypy.org Tue Jan 13 19:35:26 2015 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 13 Jan 2015 19:35:26 +0100 (CET) Subject: [pypy-commit] pypy all_ordered_dicts: more translation fixes Message-ID: <20150113183526.D1FF91D3640@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: all_ordered_dicts Changeset: r75321:87900ff58c9a Date: 2015-01-13 19:21 +0100 http://bitbucket.org/pypy/pypy/changeset/87900ff58c9a/ Log: more translation fixes diff --git a/pypy/objspace/std/dictmultiobject.py b/pypy/objspace/std/dictmultiobject.py --- a/pypy/objspace/std/dictmultiobject.py +++ b/pypy/objspace/std/dictmultiobject.py @@ -261,7 +261,7 @@ def nondescr_reversed_dict(self, space): """Not exposed directly to app-level, but via __pypy__.reversed_dict(). """ - if self.strategy.getiterreversed is not None: + if self.strategy.has_iterreversed: it = self.strategy.iterreversed(self) return W_DictMultiIterKeysObject(space, it) else: @@ -512,7 +512,8 @@ def getiteritems(self, w_dict): raise NotImplementedError - getiterreversed = None # means no implementation is available + has_iterreversed = False + # no 'getiterreversed': no default implementation available def rev_update1_dict_dict(self, w_dict, w_updatedict): iteritems = self.iteritems(w_dict) @@ -781,10 +782,11 @@ def iteritems(self, w_dict): return IterClassItems(self.space, self, w_dict) - if dictimpl.getiterreversed is not None: + if hasattr(dictimpl, 'getiterreversed'): def iterreversed(self, w_dict): return IterClassReversed(self.space, self, w_dict) dictimpl.iterreversed = iterreversed + dictimpl.has_iterreversed = True @jit.look_inside_iff(lambda self, w_dict, w_updatedict: w_dict_unrolling_heuristic(w_dict)) From noreply at buildbot.pypy.org Tue Jan 13 19:35:28 2015 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 13 Jan 2015 19:35:28 +0100 (CET) Subject: [pypy-commit] pypy all_ordered_dicts: Write docstring for __pypy__.reversed_dict(). Message-ID: <20150113183528.14CE91D3640@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: all_ordered_dicts Changeset: r75322:0f19f34109da Date: 2015-01-13 19:34 +0100 http://bitbucket.org/pypy/pypy/changeset/0f19f34109da/ Log: Write docstring for __pypy__.reversed_dict(). diff --git a/pypy/module/__pypy__/interp_dict.py b/pypy/module/__pypy__/interp_dict.py --- a/pypy/module/__pypy__/interp_dict.py +++ b/pypy/module/__pypy__/interp_dict.py @@ -32,6 +32,14 @@ raise oefmt(space.w_TypeError, "unknown type of dict %s", type) def reversed_dict(space, w_obj): + """Enumerate the keys in a dictionary object in reversed order. + + This is a __pypy__ function instead of being simply done by calling + reversed(), for CPython compatibility: dictionaries are only ordered + on PyPy. You should use the collections.OrderedDict class for cases + where ordering is important. That class implements __reversed__ by + calling __pypy__.reversed_dict(). + """ from pypy.objspace.std.dictmultiobject import W_DictMultiObject if not isinstance(w_obj, W_DictMultiObject): raise OperationError(space.w_TypeError, space.w_None) diff --git a/rpython/rlib/objectmodel.py b/rpython/rlib/objectmodel.py --- a/rpython/rlib/objectmodel.py +++ b/rpython/rlib/objectmodel.py @@ -757,8 +757,11 @@ def reversed_dict(d): """Equivalent to reversed(ordered_dict), but works also for regular dicts.""" - if not we_are_translated() and type(d) is dict: - d = list(d) + # note that there is also __pypy__.reversed_dict(), which we could + # try to use here if we're not translated and running on top of pypy, + # but that seems a bit pointless + if not we_are_translated(): + d = d.keys() return reversed(d) From noreply at buildbot.pypy.org Wed Jan 14 10:03:01 2015 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 14 Jan 2015 10:03:01 +0100 (CET) Subject: [pypy-commit] pypy.org extradoc: update the values Message-ID: <20150114090301.8EDCC1D2812@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r560:a6dfae537f39 Date: 2015-01-14 10:03 +0100 http://bitbucket.org/pypy/pypy.org/changeset/a6dfae537f39/ Log: update the values diff --git a/don1.html b/don1.html --- a/don1.html +++ b/don1.html @@ -15,7 +15,7 @@ - $58557 of $105000 (55.8%) + $58605 of $105000 (55.8%)
diff --git a/don3.html b/don3.html --- a/don3.html +++ b/don3.html @@ -15,7 +15,7 @@ - $51176 of $60000 (85.3%) + $51195 of $60000 (85.3%)
diff --git a/don4.html b/don4.html --- a/don4.html +++ b/don4.html @@ -17,7 +17,7 @@ 2nd call: - $21661 of $80000 (27.1%) + $21681 of $80000 (27.1%)
From noreply at buildbot.pypy.org Wed Jan 14 14:08:40 2015 From: noreply at buildbot.pypy.org (Raemi) Date: Wed, 14 Jan 2015 14:08:40 +0100 (CET) Subject: [pypy-commit] stmgc c8-private-pages: fix for tests not aborting in stm_validate if there is an inevitable transaction Message-ID: <20150114130840.9EF291D287D@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c8-private-pages Changeset: r1524:a5a56fe9b07e Date: 2015-01-14 13:39 +0100 http://bitbucket.org/pypy/stmgc/changeset/a5a56fe9b07e/ Log: fix for tests not aborting in stm_validate if there is an inevitable transaction diff --git a/c8/stm/core.c b/c8/stm/core.c --- a/c8/stm/core.c +++ b/c8/stm/core.c @@ -274,9 +274,12 @@ is itself more recent than last_cl. This is fixed by re-validating. */ first_cl = STM_PSEGMENT->last_commit_log_entry; - if (first_cl->next == NULL || first_cl->next == INEV_RUNNING) + if (first_cl->next == NULL) break; + if (first_cl->next == INEV_RUNNING) + _stm_collectable_safe_point(); /* otherwise, we may deadlock */ + /* Find the set of segments we need to copy from and lock them: */ uint64_t segments_to_lock = 1UL << my_segnum; cl = first_cl; @@ -298,6 +301,7 @@ } } last_cl = cl; + /* HERE */ acquire_privatization_lock(STM_SEGMENT->segment_num); From noreply at buildbot.pypy.org Wed Jan 14 14:08:42 2015 From: noreply at buildbot.pypy.org (Raemi) Date: Wed, 14 Jan 2015 14:08:42 +0100 (CET) Subject: [pypy-commit] stmgc c8-private-pages: start with largemalloc support (WIP) Message-ID: <20150114130842.21E961D287D@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c8-private-pages Changeset: r1525:349d0e3910ea Date: 2015-01-14 13:41 +0100 http://bitbucket.org/pypy/stmgc/changeset/349d0e3910ea/ Log: start with largemalloc support (WIP) diff --git a/c8/stm/core.c b/c8/stm/core.c --- a/c8/stm/core.c +++ b/c8/stm/core.c @@ -144,11 +144,18 @@ most_recent_rev = log_entry->rev_num; } } - OPT_ASSERT(copy_from_segnum != -1 && copy_from_segnum != my_segnum); + OPT_ASSERT(copy_from_segnum != my_segnum); - /* make our page private */ + /* make our page write-ready */ page_mark_accessible(my_segnum, pagenum); - assert(get_page_status_in(my_segnum, pagenum) == PAGE_ACCESSIBLE); + + if (copy_from_segnum == -1) { + /* this page is only accessible in the sharing segment so far (new + allocation). We can thus simply mark it accessible here and + not care about its contents so far. */ + release_all_privatization_locks(); + return; + } /* before copying anything, acquire modification locks from our and the other segment */ diff --git a/c8/stm/gcpage.c b/c8/stm/gcpage.c --- a/c8/stm/gcpage.c +++ b/c8/stm/gcpage.c @@ -2,27 +2,47 @@ # error "must be compiled via stmgc.c" #endif +static struct list_s *testing_prebuilt_objs = NULL; +static struct tree_s *tree_prebuilt_objs = NULL; /* XXX refactor */ + static void setup_gcpage(void) { + char *base = stm_object_pages + END_NURSERY_PAGE * 4096UL; + uintptr_t length = (NB_PAGES - END_NURSERY_PAGE) * 4096UL; + _stm_largemalloc_init_arena(base, length); + uninitialized_page_start = stm_object_pages + END_NURSERY_PAGE * 4096UL; uninitialized_page_stop = uninitialized_page_start + NB_SHARED_PAGES * 4096UL; } static void teardown_gcpage(void) { + LIST_FREE(testing_prebuilt_objs); + if (tree_prebuilt_objs != NULL) { + tree_free(tree_prebuilt_objs); + tree_prebuilt_objs = NULL; + } } + + static void setup_N_pages(char *pages_addr, long num) { - /* initialize to |N|P|N|N| */ + /* make pages accessible in sharing segment only (pages already + PROT_READ/WRITE (see setup.c), but not marked accessible as page + status). */ + + /* lock acquiring maybe not necessary because the affected pages don't + need privatization protection. (but there is an assert right + now to enforce that XXXXXX) */ acquire_all_privatization_locks(); uintptr_t p = (pages_addr - stm_object_pages) / 4096UL; dprintf(("setup_N_pages(%p, %lu): pagenum %lu\n", pages_addr, num, p)); while (num-->0) { /* XXX: page_range_mark_accessible() */ - page_mark_accessible(STM_SEGMENT->segment_num, p + num); + page_mark_accessible(0, p + num); } release_all_privatization_locks(); @@ -33,14 +53,23 @@ static stm_char *allocate_outside_nursery_large(uint64_t size) { - /* XXX: real allocation */ + /* Allocate the object with largemalloc.c from the lower addresses. */ + char *addr = _stm_large_malloc(size); + if (addr == NULL) + stm_fatalerror("not enough memory!"); + + if (LIKELY(addr + size <= uninitialized_page_start)) + return (stm_char*)(addr - stm_object_pages); + + + /* uncommon case: need to initialize some more pages */ spinlock_acquire(lock_growth_large); - char *addr = uninitialized_page_start; char *start = uninitialized_page_start; - if (addr + size > start) { /* XXX: always for now */ + if (addr + size > start) { uintptr_t npages; - npages = (addr + size - start) / 4096UL + 1; + npages = (addr + size - start) / 4096UL; + npages += GCPAGE_NUM_PAGES; if (uninitialized_page_stop - start < npages * 4096UL) { stm_fatalerror("out of memory!"); /* XXX */ } diff --git a/c8/stm/largemalloc.c b/c8/stm/largemalloc.c new file mode 100644 --- /dev/null +++ b/c8/stm/largemalloc.c @@ -0,0 +1,623 @@ +#ifndef _STM_CORE_H_ +# error "must be compiled via stmgc.c" +#endif + +/* This contains a lot of inspiration from malloc() in the GNU C Library. + More precisely, this is (a subset of) the part that handles large + blocks, which in our case means at least 288 bytes. It is actually + a general allocator, although it doesn't contain any of the small- + or medium-block support that are also present in the GNU C Library. +*/ + +#define largebin_index(sz) \ + (((sz) < (48 << 6)) ? ((sz) >> 6): /* 0 - 47 */ \ + ((sz) < (24 << 9)) ? 42 + ((sz) >> 9): /* 48 - 65 */ \ + ((sz) < (12 << 12)) ? 63 + ((sz) >> 12): /* 66 - 74 */ \ + ((sz) < (6 << 15)) ? 74 + ((sz) >> 15): /* 75 - 79 */ \ + ((sz) < (3 << 18)) ? 80 + ((sz) >> 18): /* 80 - 82 */ \ + 83) +#define N_BINS 84 +#define LAST_BIN_INDEX(sz) ((sz) >= (3 << 18)) + +typedef struct dlist_s { + struct dlist_s *next; /* a circular doubly-linked list */ + struct dlist_s *prev; +} dlist_t; + +typedef struct ulist_s { + struct ulist_s *up; /* a non-circular doubly-linked list */ + struct ulist_s *down; +} ulist_t; + +typedef struct malloc_chunk { + size_t prev_size; /* - if the previous chunk is free: size of its data + - otherwise, if this chunk is free: 1 + - otherwise, 0. */ + size_t size; /* size of the data in this chunk */ + + dlist_t d; /* if free: a doubly-linked list 'largebins' */ + /* if not free: the user data starts here */ + ulist_t u; /* if free, if unsorted: up==UU_UNSORTED + if free, if sorted: a doubly-linked list */ + + /* The chunk has a total size of 'size'. It is immediately followed + in memory by another chunk. This list ends with the last "chunk" + being actually only two words long, with END_MARKER as 'size'. + Both this last chunk and the theoretical chunk before the first + one are considered "not free". */ +} mchunk_t; + +#define UU_UNSORTED ((ulist_t *) 1) +#define THIS_CHUNK_FREE 1 +#define BOTH_CHUNKS_USED 0 +#define CHUNK_HEADER_SIZE offsetof(struct malloc_chunk, d) +#define END_MARKER 0xDEADBEEF +#define MIN_ALLOC_SIZE (sizeof(struct malloc_chunk) - CHUNK_HEADER_SIZE) + +#define chunk_at_offset(p, ofs) ((mchunk_t *)(((char *)(p)) + (ofs))) +#define data2chunk(p) chunk_at_offset(p, -CHUNK_HEADER_SIZE) +#define updown2chunk(p) chunk_at_offset(p, \ + -(CHUNK_HEADER_SIZE + sizeof(dlist_t))) + +static mchunk_t *next_chunk(mchunk_t *p) +{ + return chunk_at_offset(p, CHUNK_HEADER_SIZE + p->size); +} + + +/* The free chunks are stored in "bins". Each bin is a doubly-linked + list of chunks. There are 84 bins, with largebin_index() giving the + correspondence between sizes and bin indices. + + Each free chunk is preceeded in memory by a non-free chunk (or no + chunk at all). Each free chunk is followed in memory by a non-free + chunk (or no chunk at all). Chunks are consolidated with their + neighbors to ensure this. + + In each bin's doubly-linked list, chunks are sorted by their size in + decreasing order (if you follow 'largebins[n].next', + 'largebins[n].next->next', etc.). At the end of this list are some + unsorted chunks. All unsorted chunks are after all sorted chunks. + Unsorted chunks are distinguished by having 'u.up == UU_UNSORTED'. + + Note that if the user always calls large_malloc() with a large + enough argument, then the few bins corresponding to smaller values + will never be sorted at all. They are still populated with the + fragments of space between bigger allocations. + + Following the 'd' linked list, we get only one chunk of every size. + The additional chunks of a given size are linked "vertically" in + the secondary 'u' doubly-linked list. + + + +-----+ + | 296 | + +-----+ + ^ | + | v + +-----+ +-----+ + | 296 | | 288 | + +-----+ +-----+ + ^ | ^ | UU_UNSORTED + | v | v | + largebins +-----+ +-----+ +-----+ +-----+ largebins + [4].next <-> | 304 | <-> | 296 | <-> | 288 | <-> | 296 | <-> [4].prev + +-----+ +-----+ +-----+ +-----+ + +*/ + + +static struct { + int lock; + mchunk_t *first_chunk, *last_chunk; + dlist_t largebins[N_BINS]; +} lm __attribute__((aligned(64))); + + +static void lm_lock(void) +{ + spinlock_acquire(lm.lock); +} + +static void lm_unlock(void) +{ + spinlock_release(lm.lock); +} + + +static void insert_unsorted(mchunk_t *new) +{ + size_t index = LAST_BIN_INDEX(new->size) ? N_BINS - 1 + : largebin_index(new->size); + new->d.next = &lm.largebins[index]; + new->d.prev = lm.largebins[index].prev; + new->d.prev->next = &new->d; + new->u.up = UU_UNSORTED; + new->u.down = NULL; + lm.largebins[index].prev = &new->d; +} + +static int compare_chunks(const void *vchunk1, const void *vchunk2) +{ + /* sort by size */ + mchunk_t *chunk1 = *(mchunk_t *const *)vchunk1; + mchunk_t *chunk2 = *(mchunk_t *const *)vchunk2; + if (chunk1->size < chunk2->size) + return -1; + if (chunk1->size == chunk2->size) + return 0; + else + return +1; +} + +#define MAX_STACK_COUNT 64 + +static void really_sort_bin(size_t index) +{ + dlist_t *unsorted = lm.largebins[index].prev; + dlist_t *end = &lm.largebins[index]; + dlist_t *scan = unsorted->prev; + size_t count = 1; + while (scan != end && data2chunk(scan)->u.up == UU_UNSORTED) { + scan = scan->prev; + ++count; + } + end->prev = scan; + scan->next = end; + + mchunk_t *chunk1; + mchunk_t *chunk_array[MAX_STACK_COUNT]; + mchunk_t **chunks = chunk_array; + + if (count == 1) { + chunk1 = data2chunk(unsorted); /* common case */ + count = 0; + } + else { + if (count > MAX_STACK_COUNT) { + chunks = malloc(count * sizeof(mchunk_t *)); + if (chunks == NULL) { + stm_fatalerror("out of memory"); // XXX + } + } + size_t i; + for (i = 0; i < count; i++) { + chunks[i] = data2chunk(unsorted); + unsorted = unsorted->prev; + } + assert(unsorted == scan); + qsort(chunks, count, sizeof(mchunk_t *), compare_chunks); + + chunk1 = chunks[--count]; + } + size_t search_size = chunk1->size; + dlist_t *head = lm.largebins[index].next; + + while (1) { + if (head == end || data2chunk(head)->size < search_size) { + /* insert 'chunk1' here, before the current head */ + head->prev->next = &chunk1->d; + chunk1->d.prev = head->prev; + head->prev = &chunk1->d; + chunk1->d.next = head; + chunk1->u.up = NULL; + chunk1->u.down = NULL; + head = &chunk1->d; + } + else if (data2chunk(head)->size == search_size) { + /* insert 'chunk1' vertically in the 'u' list */ + ulist_t *uhead = &data2chunk(head)->u; + chunk1->u.up = uhead->up; + chunk1->u.down = uhead; + if (uhead->up != NULL) + uhead->up->down = &chunk1->u; + uhead->up = &chunk1->u; +#ifndef NDEBUG + chunk1->d.next = (dlist_t *)0x42; /* not used */ + chunk1->d.prev = (dlist_t *)0x42; +#endif + } + else { + head = head->next; + continue; + } + if (count == 0) + break; /* all done */ + chunk1 = chunks[--count]; + search_size = chunk1->size; + } + + if (chunks != chunk_array) + free(chunks); +} + +static void sort_bin(size_t index) +{ + dlist_t *last = lm.largebins[index].prev; + if (last != &lm.largebins[index] && data2chunk(last)->u.up == UU_UNSORTED) + really_sort_bin(index); +} + +static void unlink_chunk(mchunk_t *mscan) +{ + if (mscan->u.down != NULL) { + /* unlink mscan from the vertical list 'u' */ + ulist_t *up = mscan->u.up; + ulist_t *down = mscan->u.down; + down->up = up; + if (up != NULL) up->down = down; + } + else { + dlist_t *prev = mscan->d.prev; + dlist_t *next = mscan->d.next; + if (mscan->u.up == NULL || mscan->u.up == UU_UNSORTED) { + /* unlink mscan from the doubly-linked list 'd' */ + next->prev = prev; + prev->next = next; + } + else { + /* relink in the 'd' list the item above me */ + mchunk_t *above = updown2chunk(mscan->u.up); + next->prev = &above->d; + prev->next = &above->d; + above->d.next = next; + above->d.prev = prev; + above->u.down = NULL; + } + } +} + +char *_stm_large_malloc(size_t request_size) +{ + /* 'request_size' should already be a multiple of the word size here */ + assert((request_size & (sizeof(char *)-1)) == 0); + + /* it can be very small, but we need to ensure a minimal size + (currently 32 bytes) */ + if (request_size < MIN_ALLOC_SIZE) + request_size = MIN_ALLOC_SIZE; + + lm_lock(); + + size_t index = largebin_index(request_size); + sort_bin(index); + + /* scan through the chunks of current bin in reverse order + to find the smallest that fits. */ + dlist_t *scan = lm.largebins[index].prev; + dlist_t *end = &lm.largebins[index]; + mchunk_t *mscan; + while (scan != end) { + mscan = data2chunk(scan); + assert(mscan->prev_size == THIS_CHUNK_FREE); + assert(next_chunk(mscan)->prev_size == mscan->size); + assert(IMPLY(mscan->d.prev != end, + data2chunk(mscan->d.prev)->size > mscan->size)); + + if (mscan->size >= request_size) + goto found; + scan = mscan->d.prev; + } + + /* search now through all higher bins. We only need to take the + smallest item of the first non-empty bin, as it will be large + enough. */ + while (++index < N_BINS) { + if (lm.largebins[index].prev != &lm.largebins[index]) { + /* non-empty bin. */ + sort_bin(index); + scan = lm.largebins[index].prev; + mscan = data2chunk(scan); + goto found; + } + } + + /* not enough memory. */ + lm_unlock(); + return NULL; + + found: + assert(mscan->size >= request_size); + assert(mscan->u.up != UU_UNSORTED); + + if (mscan->u.up != NULL) { + /* fast path: grab the item that is just above, to avoid needing + to rearrange the 'd' list */ + mchunk_t *above = updown2chunk(mscan->u.up); + ulist_t *two_above = above->u.up; + mscan->u.up = two_above; + if (two_above != NULL) two_above->down = &mscan->u; + mscan = above; + } + else { + unlink_chunk(mscan); + } + + size_t remaining_size = mscan->size - request_size; + if (remaining_size < sizeof(struct malloc_chunk)) { + next_chunk(mscan)->prev_size = BOTH_CHUNKS_USED; + request_size = mscan->size; + } + else { + /* only part of the chunk is being used; reduce the size + of 'mscan' down to 'request_size', and create a new + chunk of the 'remaining_size' afterwards */ + mchunk_t *new = chunk_at_offset(mscan, CHUNK_HEADER_SIZE + + request_size); + new->prev_size = THIS_CHUNK_FREE; + size_t remaining_data_size = remaining_size - CHUNK_HEADER_SIZE; + new->size = remaining_data_size; + next_chunk(new)->prev_size = remaining_data_size; + insert_unsorted(new); + } + mscan->size = request_size; + mscan->prev_size = BOTH_CHUNKS_USED; +#ifndef NDEBUG + memset((char *)&mscan->d, 0xda, request_size); +#endif + + lm_unlock(); + + return (char *)&mscan->d; +} + +static void _large_free(mchunk_t *chunk) +{ + assert((chunk->size & (sizeof(char *) - 1)) == 0); + assert(chunk->prev_size != THIS_CHUNK_FREE); + + /* 'size' is at least MIN_ALLOC_SIZE */ + +#ifndef NDEBUG + { + char *data = (char *)&chunk->d; + assert(chunk->size >= sizeof(dlist_t)); + assert(chunk->size <= (((char *)lm.last_chunk) - data)); + memset(data, 0xDE, chunk->size); + } +#endif + + /* try to merge with the following chunk in memory */ + size_t msize = chunk->size + CHUNK_HEADER_SIZE; + mchunk_t *mscan = chunk_at_offset(chunk, msize); + + if (mscan->prev_size == BOTH_CHUNKS_USED) { + assert((mscan->size & (sizeof(char *) - 1)) == 0); + mscan->prev_size = chunk->size; + } + else { + size_t fsize = mscan->size; + mchunk_t *fscan = chunk_at_offset(mscan, fsize + CHUNK_HEADER_SIZE); + + /* unlink the following chunk */ + unlink_chunk(mscan); +#ifndef NDEBUG + mscan->prev_size = (size_t)-258; /* 0xfffffffffffffefe */ + mscan->size = (size_t)-515; /* 0xfffffffffffffdfd */ +#endif + + /* merge the two chunks */ + assert(fsize == fscan->prev_size); + fsize += msize; + fscan->prev_size = fsize; + chunk->size = fsize; + } + + /* try to merge with the previous chunk in memory */ + if (chunk->prev_size == BOTH_CHUNKS_USED) { + chunk->prev_size = THIS_CHUNK_FREE; + } + else { + assert((chunk->prev_size & (sizeof(char *) - 1)) == 0); + + /* get at the previous chunk */ + msize = chunk->prev_size + CHUNK_HEADER_SIZE; + mscan = chunk_at_offset(chunk, -msize); + assert(mscan->prev_size == THIS_CHUNK_FREE); + assert(mscan->size == chunk->prev_size); + + /* unlink the previous chunk */ + unlink_chunk(mscan); + + /* merge the two chunks */ + mscan->size = msize + chunk->size; + next_chunk(mscan)->prev_size = mscan->size; + + assert(chunk->prev_size = (size_t)-1); + assert(chunk->size = (size_t)-1); + chunk = mscan; + } + + insert_unsorted(chunk); +} + +void _stm_large_free(char *data) +{ + lm_lock(); + _large_free(data2chunk(data)); + lm_unlock(); +} + + +void _stm_large_dump(void) +{ + lm_lock(); + char *data = ((char *)lm.first_chunk) + 16; + size_t prev_size_if_free = 0; + fprintf(stderr, "\n"); + while (1) { + assert((((uintptr_t)data) & 7) == 0); /* alignment */ + fprintf(stderr, "[ %p: %zu", data - 16, *(size_t*)(data - 16)); + if (prev_size_if_free == 0) { + assert(*(size_t*)(data - 16) == THIS_CHUNK_FREE || + *(size_t*)(data - 16) == BOTH_CHUNKS_USED); + if (*(size_t*)(data - 16) == THIS_CHUNK_FREE) + prev_size_if_free = (*(size_t*)(data - 8)); + } + else { + assert(*(size_t*)(data - 16) == prev_size_if_free); + prev_size_if_free = 0; + } + if (*(size_t*)(data - 8) == END_MARKER) + break; + if (prev_size_if_free) { + fprintf(stderr, " \t(up %p / down %p)", + *(void **)(data + 16), *(void **)(data + 24)); + } + fprintf(stderr, "\n %p: %zu ]", data - 8, *(size_t*)(data - 8)); + if (prev_size_if_free) { + fprintf(stderr, "\t(prev %p <-> next %p)\n", + *(void **)(data + 8), *(void **)data); + } + else { + fprintf(stderr, "\n"); + } + assert(*(ssize_t*)(data - 8) >= 16); + data += *(size_t*)(data - 8); + data += 16; + } + fprintf(stderr, "\n %p: end. ]\n\n", data - 8); + assert(data - 16 == (char *)lm.last_chunk); + lm_unlock(); +} + +char *_stm_largemalloc_data_start(void) +{ + return (char *)lm.first_chunk; +} + +#ifdef STM_LARGEMALLOC_TEST +bool (*_stm_largemalloc_keep)(char *data); /* a hook for tests */ +#endif + +void _stm_largemalloc_init_arena(char *data_start, size_t data_size) +{ + int i; + for (i = 0; i < N_BINS; i++) { + lm.largebins[i].prev = &lm.largebins[i]; + lm.largebins[i].next = &lm.largebins[i]; + } + + assert(data_size >= 2 * sizeof(struct malloc_chunk)); + assert((data_size & 31) == 0); + lm.first_chunk = (mchunk_t *)data_start; + lm.first_chunk->prev_size = THIS_CHUNK_FREE; + lm.first_chunk->size = data_size - 2 * CHUNK_HEADER_SIZE; + lm.last_chunk = chunk_at_offset(lm.first_chunk, + data_size - CHUNK_HEADER_SIZE); + lm.last_chunk->prev_size = lm.first_chunk->size; + lm.last_chunk->size = END_MARKER; + assert(lm.last_chunk == next_chunk(lm.first_chunk)); + lm.lock = 0; + + insert_unsorted(lm.first_chunk); + +#ifdef STM_LARGEMALLOC_TEST + _stm_largemalloc_keep = NULL; +#endif +} + +int _stm_largemalloc_resize_arena(size_t new_size) +{ + int result = 0; + lm_lock(); + + if (new_size < 2 * sizeof(struct malloc_chunk)) + goto fail; + OPT_ASSERT((new_size & 31) == 0); + + new_size -= CHUNK_HEADER_SIZE; + mchunk_t *new_last_chunk = chunk_at_offset(lm.first_chunk, new_size); + mchunk_t *old_last_chunk = lm.last_chunk; + size_t old_size = ((char *)old_last_chunk) - (char *)lm.first_chunk; + + if (new_size < old_size) { + /* check if there is enough free space at the end to allow + such a reduction */ + size_t lsize = lm.last_chunk->prev_size; + assert(lsize != THIS_CHUNK_FREE); + if (lsize == BOTH_CHUNKS_USED) + goto fail; + lsize += CHUNK_HEADER_SIZE; + mchunk_t *prev_chunk = chunk_at_offset(lm.last_chunk, -lsize); + if (((char *)new_last_chunk) < ((char *)prev_chunk) + + sizeof(struct malloc_chunk)) + goto fail; + + /* unlink the prev_chunk from the doubly-linked list */ + unlink_chunk(prev_chunk); + + /* reduce the prev_chunk */ + assert(prev_chunk->size == lm.last_chunk->prev_size); + prev_chunk->size = ((char*)new_last_chunk) - (char *)prev_chunk + - CHUNK_HEADER_SIZE; + + /* make a fresh-new last chunk */ + new_last_chunk->prev_size = prev_chunk->size; + new_last_chunk->size = END_MARKER; + lm.last_chunk = new_last_chunk; + assert(lm.last_chunk == next_chunk(prev_chunk)); + + insert_unsorted(prev_chunk); + } + else if (new_size > old_size) { + /* make the new last chunk first, with only the extra size */ + mchunk_t *old_last_chunk = lm.last_chunk; + old_last_chunk->size = (new_size - old_size) - CHUNK_HEADER_SIZE; + new_last_chunk->prev_size = BOTH_CHUNKS_USED; + new_last_chunk->size = END_MARKER; + lm.last_chunk = new_last_chunk; + assert(lm.last_chunk == next_chunk(old_last_chunk)); + + /* then free the last_chunk (turn it from "used" to "free) */ + _large_free(old_last_chunk); + } + + result = 1; + fail: + lm_unlock(); + return result; +} + + +static inline bool _largemalloc_sweep_keep(mchunk_t *chunk) +{ +#ifdef STM_LARGEMALLOC_TEST + if (_stm_largemalloc_keep != NULL) + return _stm_largemalloc_keep((char *)&chunk->d); +#endif + return true; + //XXX: return largemalloc_keep_object_at((char *)&chunk->d); +} + +void _stm_largemalloc_sweep(void) +{ + lm_lock(); + + /* This may be slightly optimized by inlining _large_free() and + making cases, e.g. we might know already if the previous block + was free or not. It's probably not really worth it. */ + mchunk_t *mnext, *chunk = lm.first_chunk; + + if (chunk->prev_size == THIS_CHUNK_FREE) + chunk = next_chunk(chunk); /* go to the first non-free chunk */ + + while (chunk != lm.last_chunk) { + /* here, the chunk we're pointing to is not free */ + assert(chunk->prev_size != THIS_CHUNK_FREE); + + /* first figure out the next non-free chunk */ + mnext = next_chunk(chunk); + if (mnext->prev_size == THIS_CHUNK_FREE) + mnext = next_chunk(mnext); + + /* use the callback to know if 'chunk' contains an object that + survives or dies */ + if (!_largemalloc_sweep_keep(chunk)) { + _large_free(chunk); /* dies */ + } + chunk = mnext; + } + + lm_unlock(); +} diff --git a/c8/stm/largemalloc.h b/c8/stm/largemalloc.h new file mode 100644 --- /dev/null +++ b/c8/stm/largemalloc.h @@ -0,0 +1,18 @@ + +/* all addresses passed to this interface should be "char *" pointers + in the segment 0. */ +void _stm_largemalloc_init_arena(char *data_start, size_t data_size); +int _stm_largemalloc_resize_arena(size_t new_size); +char *_stm_largemalloc_data_start(void); + +/* large_malloc() and large_free() are not thread-safe. This is + due to the fact that they should be mostly called during minor or + major collections, which have their own synchronization mecanisms. */ +char *_stm_large_malloc(size_t request_size); +void _stm_large_free(char *data); +void _stm_largemalloc_sweep(void); + +void _stm_large_dump(void); + + +#define LARGE_MALLOC_OVERHEAD (2 * sizeof(size_t)) /* estimate */ diff --git a/c8/stm/setup.c b/c8/stm/setup.c --- a/c8/stm/setup.c +++ b/c8/stm/setup.c @@ -31,6 +31,11 @@ (NB_READMARKER_PAGES + NB_NURSERY_PAGES) * 4096, PROT_READ | PROT_WRITE); } + + /* make the sharing segment writable for the memory allocator: */ + mprotect(stm_object_pages + END_NURSERY_PAGE * 4096UL, + (NB_PAGES - END_NURSERY_PAGE) * 4096UL, + PROT_READ | PROT_WRITE); } diff --git a/c8/stm/smallmalloc.c b/c8/stm/smallmalloc.c --- a/c8/stm/smallmalloc.c +++ b/c8/stm/smallmalloc.c @@ -67,23 +67,17 @@ /* if (!_stm_largemalloc_resize_arena(uninitialized_page_stop - base)) */ /* goto out_of_memory; */ - /* lock acquiring not necessary because the affected pages don't - need privatization protection. (but there is an assert right - now to enforce that XXXXXX) */ - acquire_all_privatization_locks(); + /* make writable in sharing seg */ + setup_N_pages(uninitialized_page_stop, GCPAGE_NUM_PAGES); char *p = uninitialized_page_stop; long i; for (i = 0; i < GCPAGE_NUM_PAGES; i++) { - /* accessible in seg0: */ - page_mark_accessible(0, (p - stm_object_pages) / 4096UL); - /* add to free_uniform_pages list */ ((struct small_free_loc_s *)p)->nextpage = free_uniform_pages; free_uniform_pages = (struct small_free_loc_s *)p; p += 4096; } - release_all_privatization_locks(); } spinlock_release(gmfp_lock); @@ -128,17 +122,6 @@ smallpage->nextpage))) goto retry; - - - /* lock acquiring not necessary because the affected pages don't - need privatization protection. (but there is an assert right - now to enforce that XXXXXX) */ - acquire_all_privatization_locks(); - /* make page accessible in our segment too: */ - page_mark_accessible(STM_SEGMENT->segment_num, - ((char*)smallpage - stm_object_pages) / 4096UL); - release_all_privatization_locks(); - /* Succeeded: we have a page in 'smallpage', which is not initialized so far, apart from the 'nextpage' field read above. Initialize it. @@ -315,6 +298,7 @@ void _stm_smallmalloc_sweep(void) { + acquire_all_privatization_locks(); /* should be done outside, but tests... */ long i, szword; for (szword = 2; szword < GC_N_SMALL_REQUESTS; szword++) { struct small_free_loc_s *page = small_page_lists[szword]; @@ -362,4 +346,5 @@ sweep_small_page(pageptr, NULL, sz); } } + release_all_privatization_locks(); } diff --git a/c8/stmgc.c b/c8/stmgc.c --- a/c8/stmgc.c +++ b/c8/stmgc.c @@ -6,6 +6,7 @@ #include "stm/core.h" #include "stm/pagecopy.h" #include "stm/pages.h" +#include "stm/largemalloc.h" #include "stm/gcpage.h" #include "stm/sync.h" #include "stm/setup.h" @@ -20,6 +21,7 @@ #include "stm/pagecopy.c" #include "stm/pages.c" #include "stm/prebuilt.c" +#include "stm/largemalloc.c" #include "stm/gcpage.c" #include "stm/nursery.c" #include "stm/sync.c" diff --git a/c8/stmgc.h b/c8/stmgc.h --- a/c8/stmgc.h +++ b/c8/stmgc.h @@ -79,6 +79,16 @@ void _stm_test_switch_segment(int segnum); void _push_obj_to_other_segments(object_t *obj); +void _stm_largemalloc_init_arena(char *data_start, size_t data_size); +int _stm_largemalloc_resize_arena(size_t new_size); +char *_stm_largemalloc_data_start(void); +char *_stm_large_malloc(size_t request_size); +void _stm_large_free(char *data); +void _stm_large_dump(void); +bool (*_stm_largemalloc_keep)(char *data); +void _stm_largemalloc_sweep(void); + + char *stm_object_pages; char *stm_file_pages; object_t *_stm_allocate_old_small(ssize_t size_rounded_up); diff --git a/c8/test/support.py b/c8/test/support.py --- a/c8/test/support.py +++ b/c8/test/support.py @@ -81,6 +81,15 @@ void stm_collect(long level); void _stm_set_nursery_free_count(uint64_t free_count); +void _stm_largemalloc_init_arena(char *data_start, size_t data_size); +int _stm_largemalloc_resize_arena(size_t new_size); +char *_stm_largemalloc_data_start(void); +char *_stm_large_malloc(size_t request_size); +void _stm_large_free(char *data); +void _stm_large_dump(void); +bool (*_stm_largemalloc_keep)(char *data); +void _stm_largemalloc_sweep(void); + long stm_identityhash(object_t *obj); long stm_id(object_t *obj); diff --git a/c8/test/test_largemalloc.py b/c8/test/test_largemalloc.py new file mode 100644 --- /dev/null +++ b/c8/test/test_largemalloc.py @@ -0,0 +1,181 @@ +from support import * +import sys, random + +ra = lambda x: x # backward compat. + +class TestLargeMalloc(BaseTest): + def setup_method(self, meth): + # initialize some big heap in stm_setup() + BaseTest.setup_method(self, meth) + + # now re-initialize the heap to 1MB with 0xcd in it + self.size = 1024 * 1024 # 1MB + self.rawmem = lib._stm_largemalloc_data_start() + + lib.memset(self.rawmem, 0xcd, self.size) + lib._stm_largemalloc_init_arena(self.rawmem, self.size) + + def test_simple(self): + # + lib._stm_large_dump() + d1 = lib._stm_large_malloc(7000) + lib._stm_large_dump() + d2 = lib._stm_large_malloc(8000) + print d1 + print d2 + assert ra(d2) - ra(d1) == 7016 + d3 = lib._stm_large_malloc(9000) + assert ra(d3) - ra(d2) == 8016 + # + lib._stm_large_free(d1) + lib._stm_large_free(d2) + # + d4 = lib._stm_large_malloc(600) + assert d4 == d1 + d5 = lib._stm_large_malloc(600) + assert ra(d5) == ra(d4) + 616 + # + lib._stm_large_free(d5) + # + d6 = lib._stm_large_malloc(600) + assert d6 == d5 + # + lib._stm_large_free(d4) + # + d7 = lib._stm_large_malloc(608) + assert ra(d7) == ra(d6) + 616 + d8 = lib._stm_large_malloc(600) + assert d8 == d4 + # + lib._stm_large_dump() + + def test_overflow_1(self): + d = lib._stm_large_malloc(self.size - 32) + assert ra(d) == self.rawmem + 16 + lib._stm_large_dump() + + def test_overflow_2(self): + d = lib._stm_large_malloc(self.size - 16) + assert d == ffi.NULL + lib._stm_large_dump() + + def test_overflow_3(self): + d = lib._stm_large_malloc(sys.maxint & ~7) + assert d == ffi.NULL + lib._stm_large_dump() + + def test_resize_arena_reduce_1(self): + r = lib._stm_largemalloc_resize_arena(self.size - 32) + assert r == 1 + d = lib._stm_large_malloc(self.size - 32) + assert d == ffi.NULL + lib._stm_large_dump() + + def test_resize_arena_reduce_2(self): + lib._stm_large_malloc(self.size // 2 - 80) + r = lib._stm_largemalloc_resize_arena(self.size // 2) + assert r == 1 + lib._stm_large_dump() + + def test_resize_arena_reduce_3(self): + d1 = lib._stm_large_malloc(128) + r = lib._stm_largemalloc_resize_arena(self.size // 2) + assert r == 1 + d2 = lib._stm_large_malloc(128) + assert ra(d1) == self.rawmem + 16 + assert ra(d2) == ra(d1) + 128 + 16 + lib._stm_large_dump() + + def test_resize_arena_cannot_reduce_1(self): + lib._stm_large_malloc(self.size // 2) + r = lib._stm_largemalloc_resize_arena(self.size // 2) + assert r == 0 + lib._stm_large_dump() + + def test_resize_arena_cannot_reduce_2(self): + lib._stm_large_malloc(self.size // 2 - 56) + r = lib._stm_largemalloc_resize_arena(self.size // 2) + assert r == 0 + lib._stm_large_dump() + + def test_random(self): + r = random.Random(1007) + p = [] + for i in range(100000): + if len(p) != 0 and (len(p) > 100 or r.randrange(0, 5) < 2): + index = r.randrange(0, len(p)) + d, length, content1, content2 = p.pop(index) + print ' free %5d (%s)' % (length, d) + assert ra(d)[0] == content1 + assert ra(d)[length - 1] == content2 + lib._stm_large_free(d) + else: + sz = r.randrange(8, 160) * 8 + d = lib._stm_large_malloc(sz) + print 'alloc %5d (%s)' % (sz, d) + assert d != ffi.NULL + lib.memset(ra(d), 0xdd, sz) + content1 = chr(r.randrange(0, 256)) + content2 = chr(r.randrange(0, 256)) + ra(d)[0] = content1 + ra(d)[sz - 1] = content2 + p.append((d, sz, content1, content2)) + lib._stm_large_dump() + + def test_random_largemalloc_sweep(self, constrained_size_range=False): + @ffi.callback("bool(char *)") + def keep(data): + try: + if data in from_before: + return False + index = all.index(data) + seen_for.add(index) + return index in keep_me + except Exception, e: + errors.append(e) + raise + lib._stm_largemalloc_keep = keep + errors = [] + from_before = set() + + r = random.Random(1000) + for j in range(500): + if constrained_size_range: + max = 120 + else: + max = 500 + sizes = [random.choice(range(104, max, 8)) for i in range(20)] + all = [lib._stm_large_malloc(size) for size in sizes] + print all + + for i in range(len(all)): + all[i][50] = chr(65 + i) + all_orig = all[:] + + keep_me = set() + for i in range(len(all)): + if r.random() < 0.5: + print 'free:', all[i] + lib._stm_large_free(all[i]) + all[i] = None + elif r.random() < 0.5: + keep_me.add(i) + + seen_for = set() + lib._stm_largemalloc_sweep() + if errors: + raise errors[0] + assert seen_for == set([i for i in range(len(all)) + if all[i] is not None]) + lib._stm_large_dump() + + from_before = [all[i] for i in keep_me] + + for i in range(len(all)): + if i in keep_me: + assert all[i][50] == chr(65 + i) + else: + assert all_orig[i][50] == '\xDE' + + def test_random_largemalloc_sweep_constrained_size_range(self): + self.test_random_largemalloc_sweep(constrained_size_range=True) From noreply at buildbot.pypy.org Wed Jan 14 14:08:43 2015 From: noreply at buildbot.pypy.org (Raemi) Date: Wed, 14 Jan 2015 14:08:43 +0100 (CET) Subject: [pypy-commit] stmgc c8-private-pages: make 'backup' in commit log entry a slice too instead of the whole object Message-ID: <20150114130843.4F34C1D287D@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c8-private-pages Changeset: r1526:4e6bc07dfd5f Date: 2015-01-14 14:04 +0100 http://bitbucket.org/pypy/stmgc/changeset/4e6bc07dfd5f/ Log: make 'backup' in commit log entry a slice too instead of the whole object diff --git a/c8/stm/core.c b/c8/stm/core.c --- a/c8/stm/core.c +++ b/c8/stm/core.c @@ -60,7 +60,7 @@ if (src_segment_base != NULL) src = REAL_ADDRESS(src_segment_base, oslice); else - src = undo->backup + SLICE_OFFSET(undo->slice); + src = undo->backup; dst = REAL_ADDRESS(STM_SEGMENT->segment_base, oslice); memcpy(dst, src, SLICE_SIZE(undo->slice)); @@ -504,14 +504,8 @@ return; } - /* create backup copy (this may cause several page faults - XXX: do backup later and maybe allow for having NO_ACCESS - pages around anyway (kind of card marking)): */ - struct object_s *bk_obj = malloc(obj_size); - memcpy(bk_obj, realobj, obj_size); - assert(!(bk_obj->stm_flags & GCFLAG_WB_EXECUTED)); - - dprintf(("write_slowpath(%p): sz=%lu, bk=%p\n", obj, obj_size, bk_obj)); + assert(!(obj->stm_flags & GCFLAG_WB_EXECUTED)); + dprintf(("write_slowpath(%p): sz=%lu\n", obj, obj_size)); retry: /* privatize pages: */ @@ -521,11 +515,7 @@ uintptr_t page; for (page = first_page; page <= end_page; page++) { if (get_page_status_in(my_segnum, page) == PAGE_NO_ACCESS) { - /* should not happen right now, since we do not make other - segment's pages NO_ACCESS anymore (later maybe in GC safe points) */ - abort(); - /* happens if there is a concurrent WB between us making the backup - and acquiring the locks */ + /* XXX: slow? */ release_all_privatization_locks(); volatile char *dummy = REAL_ADDRESS(STM_SEGMENT->segment_base, page * 4096UL); @@ -561,11 +551,17 @@ slice_sz = 4096UL - in_page_offset; } + size_t slice_off = obj_size - remaining_obj_sz; + + /* make backup slice: */ + char *bk_slice = malloc(slice_sz); + memcpy(bk_slice, realobj + slice_off, slice_sz); + STM_PSEGMENT->modified_old_objects = list_append3( STM_PSEGMENT->modified_old_objects, (uintptr_t)obj, /* obj */ - (uintptr_t)bk_obj, /* bk_addr */ - NEW_SLICE(obj_size - remaining_obj_sz, slice_sz)); + (uintptr_t)bk_slice, /* bk_addr */ + NEW_SLICE(slice_off, slice_sz)); remaining_obj_sz -= slice_sz; in_page_offset = (in_page_offset + slice_sz) % 4096UL; /* mostly 0 */ @@ -774,18 +770,13 @@ char *dst = REAL_ADDRESS(pseg->pub.segment_base, obj); memcpy(dst + SLICE_OFFSET(undo->slice), - undo->backup + SLICE_OFFSET(undo->slice), + undo->backup, SLICE_SIZE(undo->slice)); - size_t obj_size = stmcb_size_rounded_up((struct object_s*)undo->backup); - dprintf(("reset_modified_from_backup_copies(%d): obj=%p off=%lu bk=%p obj_sz=%lu\n", - segment_num, obj, SLICE_OFFSET(undo->slice), undo->backup, obj_size)); + dprintf(("reset_modified_from_backup_copies(%d): obj=%p off=%lu bk=%p\n", + segment_num, obj, SLICE_OFFSET(undo->slice), undo->backup)); - if (obj_size - SLICE_OFFSET(undo->slice) <= 4096UL) { - /* only free bk copy once (last slice): */ - free(undo->backup); - dprintf(("-> free(%p)\n", undo->backup)); - } + free(undo->backup); } /* check that all objects have the GCFLAG_WRITE_BARRIER afterwards */ From noreply at buildbot.pypy.org Wed Jan 14 14:08:44 2015 From: noreply at buildbot.pypy.org (Raemi) Date: Wed, 14 Jan 2015 14:08:44 +0100 (CET) Subject: [pypy-commit] stmgc c8-private-pages: fix this debug check Message-ID: <20150114130844.8F7B61D287D@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c8-private-pages Changeset: r1527:9d6e43eac521 Date: 2015-01-14 14:08 +0100 http://bitbucket.org/pypy/stmgc/changeset/9d6e43eac521/ Log: fix this debug check diff --git a/c8/stm/core.c b/c8/stm/core.c --- a/c8/stm/core.c +++ b/c8/stm/core.c @@ -64,8 +64,8 @@ dst = REAL_ADDRESS(STM_SEGMENT->segment_base, oslice); memcpy(dst, src, SLICE_SIZE(undo->slice)); - if (src_segment_base == NULL) { - /* backups never should have WB_EXECUTED */ + if (src_segment_base == NULL && SLICE_OFFSET(undo->slice) == 0) { + /* check that restored obj doesn't have WB_EXECUTED */ assert(!(obj->stm_flags & GCFLAG_WB_EXECUTED)); } } From noreply at buildbot.pypy.org Wed Jan 14 15:20:39 2015 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 14 Jan 2015 15:20:39 +0100 (CET) Subject: [pypy-commit] pypy default: Consolidate the various 'cmov*' and 'j*' operations Message-ID: <20150114142039.B6BE51D2655@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r75323:f6b89b5d806d Date: 2015-01-14 15:19 +0100 http://bitbucket.org/pypy/pypy/changeset/f6b89b5d806d/ Log: Consolidate the various 'cmov*' and 'j*' operations diff --git a/rpython/translator/c/gcc/trackgcroot.py b/rpython/translator/c/gcc/trackgcroot.py --- a/rpython/translator/c/gcc/trackgcroot.py +++ b/rpython/translator/c/gcc/trackgcroot.py @@ -588,13 +588,6 @@ else: return [] - # The various cmov* operations - for name in ''' - e ne g ge l le a ae b be nb p np s ns o no - '''.split(): - locals()['visit_cmov' + name] = binary_insn - locals()['visit_cmov' + name + 'l'] = binary_insn - def _visit_and(self, line): match = self.r_binaryinsn.match(line) target = match.group("target") @@ -828,23 +821,18 @@ return prefix + [InsnCondJump(label)] + postfix visit_jmpl = visit_jmp - visit_jg = conditional_jump - visit_jge = conditional_jump - visit_jl = conditional_jump - visit_jle = conditional_jump - visit_ja = conditional_jump - visit_jae = conditional_jump - visit_jb = conditional_jump - visit_jbe = conditional_jump - visit_jp = conditional_jump - visit_jnb = conditional_jump - visit_jnp = conditional_jump - visit_js = conditional_jump - visit_jns = conditional_jump - visit_jo = conditional_jump - visit_jno = conditional_jump - visit_jc = conditional_jump - visit_jnc = conditional_jump + + # The various conditional jumps and cmov* operations + for name in ''' + e g ge l le a ae b be p s o c + '''.split(): + # NB. visit_je() and visit_jne() are overridden below + locals()['visit_j' + name] = conditional_jump + locals()['visit_jn' + name] = conditional_jump + locals()['visit_cmov' + name] = binary_insn + locals()['visit_cmov' + name + 'l'] = binary_insn + locals()['visit_cmovn' + name] = binary_insn + locals()['visit_cmovn' + name + 'l'] = binary_insn def visit_je(self, line): return self.conditional_jump(line, je=True) From noreply at buildbot.pypy.org Wed Jan 14 17:15:16 2015 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 14 Jan 2015 17:15:16 +0100 (CET) Subject: [pypy-commit] pypy errno-again: A branch for issue #1961 Message-ID: <20150114161516.046451C0F1B@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: errno-again Changeset: r75324:9367da683ee9 Date: 2015-01-14 12:30 +0100 http://bitbucket.org/pypy/pypy/changeset/9367da683ee9/ Log: A branch for issue #1961 From noreply at buildbot.pypy.org Wed Jan 14 17:15:18 2015 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 14 Jan 2015 17:15:18 +0100 (CET) Subject: [pypy-commit] pypy errno-again: in-progress: move the errno reading code inside the llexternal functions, Message-ID: <20150114161518.0ACA81C0F1B@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: errno-again Changeset: r75325:60d2fecaae5c Date: 2015-01-14 17:14 +0100 http://bitbucket.org/pypy/pypy/changeset/60d2fecaae5c/ Log: in-progress: move the errno reading code inside the llexternal functions, before we reacquire the GIL. diff --git a/pypy/module/thread/gil.py b/pypy/module/thread/gil.py --- a/pypy/module/thread/gil.py +++ b/pypy/module/thread/gil.py @@ -7,12 +7,11 @@ # all but one will be blocked. The other threads get a chance to run # from time to time, using the periodic action GILReleaseAction. -from rpython.rlib import rthread, rgil, rwin32 +from rpython.rlib import rthread, rgil from pypy.module.thread.error import wrap_thread_error from pypy.interpreter.executioncontext import PeriodicAsyncAction from pypy.module.thread.threadlocals import OSThreadLocals from rpython.rlib.objectmodel import invoke_around_extcall -from rpython.rlib.rposix import get_errno, set_errno class GILThreadLocals(OSThreadLocals): """A version of OSThreadLocals that enforces a GIL.""" @@ -75,16 +74,9 @@ before_external_call._dont_reach_me_in_del_ = True def after_external_call(): - e = get_errno() - e2 = 0 - if rwin32.WIN32: - e2 = rwin32.GetLastError() rgil.gil_acquire() rthread.gc_thread_run() after_thread_switch() - if rwin32.WIN32: - rwin32.SetLastError(e2) - set_errno(e) after_external_call._gctransformer_hint_cannot_collect_ = True after_external_call._dont_reach_me_in_del_ = True diff --git a/pypy/module/thread/threadlocals.py b/pypy/module/thread/threadlocals.py --- a/pypy/module/thread/threadlocals.py +++ b/pypy/module/thread/threadlocals.py @@ -17,7 +17,8 @@ "NOT_RPYTHON" self._valuedict = {} # {thread_ident: ExecutionContext()} self._cleanup_() - self.raw_thread_local = rthread.ThreadLocalReference(ExecutionContext) + self.raw_thread_local = rthread.ThreadLocalReference(ExecutionContext, + loop_invariant=True) def _cleanup_(self): self._valuedict.clear() diff --git a/rpython/jit/codewriter/jtransform.py b/rpython/jit/codewriter/jtransform.py --- a/rpython/jit/codewriter/jtransform.py +++ b/rpython/jit/codewriter/jtransform.py @@ -1999,9 +1999,15 @@ or isinstance(RESTYPE, lltype.Ptr)) c_offset, = op.args op1 = self.prepare_builtin_call(op, 'threadlocalref_get', [c_offset]) + if c_offset.value.startswith('RPY_TLOFSLOOPINVARIANT_'): + effect = EffectInfo.EF_LOOPINVARIANT + elif c_offset.value.startswith('RPY_TLOFS_'): + effect = EffectInfo.EF_CANNOT_RAISE + else: + assert 0 return self.handle_residual_call(op1, oopspecindex=EffectInfo.OS_THREADLOCALREF_GET, - extraeffect=EffectInfo.EF_LOOPINVARIANT) + extraeffect=effect) # ____________________________________________________________ diff --git a/rpython/rlib/rposix.py b/rpython/rlib/rposix.py --- a/rpython/rlib/rposix.py +++ b/rpython/rlib/rposix.py @@ -89,26 +89,23 @@ separate_module_sources=separate_module_sources, ) +# Direct getters/setters, don't use directly! _get_errno, _set_errno = CExternVariable(INT, 'errno', errno_eci, CConstantErrno, sandboxsafe=True, _nowrapper=True, c_type='int') -# the default wrapper for set_errno is not suitable for use in critical places -# like around GIL handling logic, so we provide our own wrappers. -def get_errno(): - if jit.we_are_jitted(): - from rpython.rlib import rthread - perrno = rthread.tlfield_p_errno.getraw() - return intmask(perrno[0]) - return intmask(_get_errno()) +def get_saved_errno(): + """Return the saved value of the errno. This value is saved after a call + to an llexternal function with 'save_err & RFFI_ERRNO_AFTER != 0'.""" + from rpython.rlib import rthread + return intmask(rthread.tlfield_rpy_errno.getraw()) -def set_errno(errno): - if jit.we_are_jitted(): - from rpython.rlib import rthread - perrno = rthread.tlfield_p_errno.getraw() - perrno[0] = rffi.cast(INT, errno) - return - _set_errno(rffi.cast(INT, errno)) +def set_saved_errno(errno): + """Set the saved value of the errno. This value will be used by a + following llexternal function with 'save_err & RFFI_ERRNO_BEFORE != 0'.""" + from rpython.rlib import rthread + rthread.tlfield_rpy_errno.setraw(rffi.cast(INT, errno)) + if os.name == 'nt': is_valid_fd = jit.dont_look_inside(rffi.llexternal( diff --git a/rpython/rlib/rthread.py b/rpython/rlib/rthread.py --- a/rpython/rlib/rthread.py +++ b/rpython/rlib/rthread.py @@ -1,7 +1,7 @@ from rpython.rtyper.lltypesystem import rffi, lltype, llmemory from rpython.translator.tool.cbuild import ExternalCompilationInfo from rpython.translator import cdir -import py +import py, sys from rpython.rlib import jit, rgc from rpython.rlib.debug import ll_assert from rpython.rlib.objectmodel import we_are_translated, specialize @@ -266,19 +266,23 @@ # ____________________________________________________________ # -# Thread-locals. Only for references that change "not too often" -- -# for now, the JIT compiles get() as a loop-invariant, so basically -# don't change them. +# Thread-locals. # KEEP THE REFERENCE ALIVE, THE GC DOES NOT FOLLOW THEM SO FAR! # We use _make_sure_does_not_move() to make sure the pointer will not move. class ThreadLocalField(object): - def __init__(self, FIELDTYPE, fieldname): + def __init__(self, FIELDTYPE, fieldname, loop_invariant=False): "NOT_RPYTHON: must be prebuilt" self.FIELDTYPE = FIELDTYPE self.fieldname = fieldname - offset = CDefinedIntSymbolic('RPY_TLOFS_%s' % self.fieldname, + self.loop_invariant = loop_invariant + if loop_invariant: + invariant = 'LOOPINVARIANT' + else: + invariant = '' + offset = CDefinedIntSymbolic('RPY_TLOFS%s_%s' % (invariant, + self.fieldname), default='?') self.offset = offset @@ -309,14 +313,15 @@ class ThreadLocalReference(ThreadLocalField): _COUNT = 1 - def __init__(self, Cls): + def __init__(self, Cls, loop_invariant=False): "NOT_RPYTHON: must be prebuilt" import thread self.Cls = Cls self.local = thread._local() # <- NOT_RPYTHON unique_id = ThreadLocalReference._COUNT ThreadLocalReference._COUNT += 1 - ThreadLocalField.__init__(self, lltype.Signed, 'tlref%d' % unique_id) + ThreadLocalField.__init__(self, lltype.Signed, 'tlref%d' % unique_id, + loop_invariant=loop_invariant) setraw = self.setraw offset = self.offset @@ -350,8 +355,13 @@ self.set = set -tlfield_thread_ident = ThreadLocalField(lltype.Signed, "thread_ident") -tlfield_p_errno = ThreadLocalField(rffi.CArrayPtr(rffi.INT), "p_errno") +tlfield_thread_ident = ThreadLocalField(lltype.Signed, "thread_ident", + loop_invariant=True) +tlfield_p_errno = ThreadLocalField(rffi.CArrayPtr(rffi.INT), "p_errno", + loop_invariant=True) +tlfield_rpy_errno = ThreadLocalField(rffi.INT, "rpy_errno") +if sys.platform == "win32": + tlfield_rpy_lasterror = ThreadLocalField(rwin32.DWORD, "rpy_lasterror") def _threadlocalref_seeme(field): "NOT_RPYTHON" diff --git a/rpython/rlib/rwin32.py b/rpython/rlib/rwin32.py --- a/rpython/rlib/rwin32.py +++ b/rpython/rlib/rwin32.py @@ -123,16 +123,23 @@ _SetLastError = winexternal('SetLastError', [DWORD], lltype.Void, _nowrapper=True, sandboxsafe=True) - @jit.dont_look_inside - def GetLastError(): + def GetLastError_real(): return rffi.cast(lltype.Signed, _GetLastError()) - @jit.dont_look_inside - def SetLastError(err): + + def SetLastError_real(err): _SetLastError(rffi.cast(DWORD, err)) - # In tests, the first call to GetLastError is always wrong, because error - # is hidden by operations in ll2ctypes. Call it now. - GetLastError() + def GetLastError_saved(): + from rpython.rlib import rthread + return rffi.cast(lltype.Signed, rthread.tlfield_rpy_lasterror.getraw()) + + def SetLastError_saved(err): + from rpython.rlib import rthread + rthread.tlfield_rpy_lasterror.setraw(rffi.cast(DWORD, err)) + + # In tests, the first call to GetLastError_real() is always wrong, + # because error is hidden by operations in ll2ctypes. Call it now. + GetLastError_real() GetModuleHandle = winexternal('GetModuleHandleA', [rffi.CCHARP], HMODULE) LoadLibrary = winexternal('LoadLibraryA', [rffi.CCHARP], HMODULE) @@ -260,7 +267,7 @@ return 'Windows Error %d' % (code,) def lastWindowsError(context="Windows Error"): - code = GetLastError() + code = GetLastError_saved() return WindowsError(code, context) def FAILED(hr): diff --git a/rpython/rtyper/lltypesystem/rffi.py b/rpython/rtyper/lltypesystem/rffi.py --- a/rpython/rtyper/lltypesystem/rffi.py +++ b/rpython/rtyper/lltypesystem/rffi.py @@ -59,12 +59,22 @@ hop.exception_cannot_occur() return hop.inputconst(lltype.Bool, hop.s_result.const) +RFFI_SAVE_ERRNO = 1 +RFFI_READSAVED_ERRNO = 2 +RFFI_FULL_ERRNO = RFFI_SAVE_ERRNO | RFFI_READSAVED_ERRNO +RFFI_SAVE_LASTERROR = 4 +RFFI_READSAVED_LASTERROR = 8 +RFFI_FULL_LASTERROR = RFFI_SAVE_LASTERROR | RFFI_READSAVED_LASTERROR +RFFI_ERR_NONE = 0 +RFFI_ERR_ALL = RFFI_FULL_ERRNO | RFFI_FULL_LASTERROR + def llexternal(name, args, result, _callable=None, compilation_info=ExternalCompilationInfo(), sandboxsafe=False, releasegil='auto', _nowrapper=False, calling_conv='c', elidable_function=False, macro=None, - random_effects_on_gcobjs='auto'): + random_effects_on_gcobjs='auto', + save_err=RFFI_ERR_NONE): """Build an external function that will invoke the C function 'name' with the given 'args' types and 'result' type. @@ -141,9 +151,14 @@ _callable.funcptr = funcptr if _nowrapper: + assert save_err == RFFI_ERR_NONE return funcptr + argnames = ', '.join(['a%d' % i for i in range(len(args))]) + errno_before = (save_err & RFFI_READSAVED_ERRNO) != 0 + errno_after = (save_err & RFFI_SAVE_ERRNO) != 0 + if invoke_around_handlers: # The around-handlers are releasing the GIL in a threaded pypy. # We need tons of care to ensure that no GC operation and no @@ -154,13 +169,25 @@ # neither '*args' nor the GC objects originally passed in as # argument to wrapper(), if any (e.g. RPython strings). - argnames = ', '.join(['a%d' % i for i in range(len(args))]) source = py.code.Source(""" + if %(errno_before)s or %(errno_after)s: + from rpython.rlib import rposix, rthread + def call_external_function(%(argnames)s): before = aroundstate.before if before: before() # NB. it is essential that no exception checking occurs here! + # + # restore errno from its saved value + if %(errno_before)s: + rposix._set_errno(rthread.tlfield_rpy_errno.getraw()) + # res = funcptr(%(argnames)s) + # + # save errno away + if %(errno_after)s: + rthread.tlfield_rpy_errno.setraw(rposix._get_errno()) + # after = aroundstate.after if after: after() return res @@ -188,15 +215,27 @@ else: # if we don't have to invoke the aroundstate, we can just call # the low-level function pointer carelessly - if macro is None: + if macro is None and save_err == RFFI_ERR_NONE: call_external_function = funcptr else: # ...well, unless it's a macro, in which case we still have # to hide it from the JIT... - argnames = ', '.join(['a%d' % i for i in range(len(args))]) source = py.code.Source(""" + if %(errno_before)s or %(errno_after)s: + from rpython.rlib import rposix, rthread + def call_external_function(%(argnames)s): - return funcptr(%(argnames)s) + # restore errno from its saved value + if %(errno_before)s: + rposix._set_errno(rthread.tlfield_rpy_errno.getraw()) + # + res = funcptr(%(argnames)s) + # + # save errno away + if %(errno_after)s: + rthread.tlfield_rpy_errno.setraw(rposix._get_errno()) + # + return res """ % locals()) miniglobals = {'funcptr': funcptr, '__name__': __name__, diff --git a/rpython/rtyper/module/ll_os.py b/rpython/rtyper/module/ll_os.py --- a/rpython/rtyper/module/ll_os.py +++ b/rpython/rtyper/module/ll_os.py @@ -187,11 +187,12 @@ # a simple, yet useful factory def extdef_for_os_function_returning_int(self, name, **kwds): - c_func = self.llexternal(name, [], rffi.INT, **kwds) + c_func = self.llexternal(name, [], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO, **kwds) def c_func_llimpl(): res = rffi.cast(rffi.SIGNED, c_func()) if res == -1: - raise OSError(rposix.get_errno(), "%s failed" % name) + raise OSError(rposix.get_saved_errno(), "%s failed" % name) return res c_func_llimpl.func_name = name + '_llimpl' @@ -199,11 +200,12 @@ export_name='ll_os.ll_os_' + name) def extdef_for_os_function_accepting_int(self, name, **kwds): - c_func = self.llexternal(name, [rffi.INT], rffi.INT, **kwds) + c_func = self.llexternal(name, [rffi.INT], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO, **kwds) def c_func_llimpl(arg): res = rffi.cast(rffi.SIGNED, c_func(arg)) if res == -1: - raise OSError(rposix.get_errno(), "%s failed" % name) + raise OSError(rposix.get_saved_errno(), "%s failed" % name) c_func_llimpl.func_name = name + '_llimpl' @@ -211,11 +213,12 @@ export_name='ll_os.ll_os_' + name) def extdef_for_os_function_accepting_2int(self, name, **kwds): - c_func = self.llexternal(name, [rffi.INT, rffi.INT], rffi.INT, **kwds) + c_func = self.llexternal(name, [rffi.INT, rffi.INT], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO, **kwds) def c_func_llimpl(arg, arg2): res = rffi.cast(rffi.SIGNED, c_func(arg, arg2)) if res == -1: - raise OSError(rposix.get_errno(), "%s failed" % name) + raise OSError(rposix.get_saved_errno(), "%s failed" % name) c_func_llimpl.func_name = name + '_llimpl' @@ -223,11 +226,12 @@ export_name='ll_os.ll_os_' + name) def extdef_for_os_function_accepting_0int(self, name, **kwds): - c_func = self.llexternal(name, [], rffi.INT, **kwds) + c_func = self.llexternal(name, [], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO, **kwds) def c_func_llimpl(): res = rffi.cast(rffi.SIGNED, c_func()) if res == -1: - raise OSError(rposix.get_errno(), "%s failed" % name) + raise OSError(rposix.get_saved_errno(), "%s failed" % name) c_func_llimpl.func_name = name + '_llimpl' @@ -235,11 +239,12 @@ export_name='ll_os.ll_os_' + name) def extdef_for_os_function_int_to_int(self, name, **kwds): - c_func = self.llexternal(name, [rffi.INT], rffi.INT, **kwds) + c_func = self.llexternal(name, [rffi.INT], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO, **kwds) def c_func_llimpl(arg): res = rffi.cast(rffi.SIGNED, c_func(arg)) if res == -1: - raise OSError(rposix.get_errno(), "%s failed" % name) + raise OSError(rposix.get_saved_errno(), "%s failed" % name) return res c_func_llimpl.func_name = name + '_llimpl' @@ -251,13 +256,14 @@ def register_os_execv(self): os_execv = self.llexternal( 'execv', - [rffi.CCHARP, rffi.CCHARPP], rffi.INT) + [rffi.CCHARP, rffi.CCHARPP], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) def execv_llimpl(path, args): l_args = rffi.ll_liststr2charpp(args) os_execv(path, l_args) rffi.free_charpp(l_args) - raise OSError(rposix.get_errno(), "execv failed") + raise OSError(rposix.get_saved_errno(), "execv failed") return extdef([str0, [str0]], s_ImpossibleValue, llimpl=execv_llimpl, export_name="ll_os.ll_os_execv") @@ -267,7 +273,8 @@ def register_os_execve(self): os_execve = self.llexternal( 'execve', - [rffi.CCHARP, rffi.CCHARPP, rffi.CCHARPP], rffi.INT) + [rffi.CCHARP, rffi.CCHARPP, rffi.CCHARPP], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) def execve_llimpl(path, args, env): # XXX Check path, args, env for \0 and raise TypeErrors as @@ -285,7 +292,7 @@ rffi.free_charpp(l_env) rffi.free_charpp(l_args) - raise OSError(rposix.get_errno(), "execve failed") + raise OSError(rposix.get_saved_errno(), "execve failed") return extdef( [str0, [str0], {str0: str0}], @@ -298,7 +305,8 @@ def register_os_spawnv(self): os_spawnv = self.llexternal('spawnv', [rffi.INT, rffi.CCHARP, rffi.CCHARPP], - rffi.INT) + rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) def spawnv_llimpl(mode, path, args): mode = rffi.cast(rffi.INT, mode) @@ -306,7 +314,7 @@ childpid = os_spawnv(mode, path, l_args) rffi.free_charpp(l_args) if childpid == -1: - raise OSError(rposix.get_errno(), "os_spawnv failed") + raise OSError(rposix.get_saved_errno(), "os_spawnv failed") return rffi.cast(lltype.Signed, childpid) return extdef([int, str0, [str0]], int, llimpl=spawnv_llimpl, @@ -317,7 +325,8 @@ os_spawnve = self.llexternal('spawnve', [rffi.INT, rffi.CCHARP, rffi.CCHARPP, rffi.CCHARPP], - rffi.INT) + rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) def spawnve_llimpl(mode, path, args, env): envstrs = [] @@ -331,7 +340,7 @@ rffi.free_charpp(l_env) rffi.free_charpp(l_args) if childpid == -1: - raise OSError(rposix.get_errno(), "os_spawnve failed") + raise OSError(rposix.get_saved_errno(), "os_spawnve failed") return rffi.cast(lltype.Signed, childpid) return extdef([int, str0, [str0], {str0: str0}], int, @@ -341,13 +350,14 @@ @registering(os.dup) def register_os_dup(self): os_dup = self.llexternal(UNDERSCORE_ON_WIN32 + 'dup', - [rffi.INT], rffi.INT) + [rffi.INT], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) def dup_llimpl(fd): rposix.validate_fd(fd) newfd = rffi.cast(lltype.Signed, os_dup(rffi.cast(rffi.INT, fd))) if newfd == -1: - raise OSError(rposix.get_errno(), "dup failed") + raise OSError(rposix.get_saved_errno(), "dup failed") return newfd return extdef([int], int, llimpl=dup_llimpl, export_name="ll_os.ll_os_dup") @@ -355,26 +365,29 @@ @registering(os.dup2) def register_os_dup2(self): os_dup2 = self.llexternal(UNDERSCORE_ON_WIN32 + 'dup2', - [rffi.INT, rffi.INT], rffi.INT) + [rffi.INT, rffi.INT], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) def dup2_llimpl(fd, newfd): rposix.validate_fd(fd) error = rffi.cast(lltype.Signed, os_dup2(rffi.cast(rffi.INT, fd), rffi.cast(rffi.INT, newfd))) if error == -1: - raise OSError(rposix.get_errno(), "dup2 failed") + raise OSError(rposix.get_saved_errno(), "dup2 failed") return extdef([int, int], s_None, llimpl=dup2_llimpl, export_name="ll_os.ll_os_dup2") @registering_if(os, "getlogin", condition=not _WIN32) def register_os_getlogin(self): - os_getlogin = self.llexternal('getlogin', [], rffi.CCHARP, releasegil=False) + os_getlogin = self.llexternal('getlogin', [], rffi.CCHARP, + releasegil=False, + save_err=rffi.RFFI_SAVE_ERRNO) def getlogin_llimpl(): result = os_getlogin() if not result: - raise OSError(rposix.get_errno(), "getlogin failed") + raise OSError(rposix.get_saved_errno(), "getlogin failed") return rffi.charp2str(result) @@ -384,7 +397,8 @@ @registering_str_unicode(os.utime) def register_os_utime(self, traits): UTIMBUFP = lltype.Ptr(self.UTIMBUF) - os_utime = self.llexternal('utime', [rffi.CCHARP, UTIMBUFP], rffi.INT) + os_utime = self.llexternal('utime', [rffi.CCHARP, UTIMBUFP], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) if not _WIN32: includes = ['sys/time.h'] @@ -409,7 +423,8 @@ TIMEVAL = config['TIMEVAL'] TIMEVAL2P = rffi.CArrayPtr(TIMEVAL) os_utimes = self.llexternal('utimes', [rffi.CCHARP, TIMEVAL2P], - rffi.INT, compilation_info=eci) + rffi.INT, compilation_info=eci, + save_err=rffi.RFFI_SAVE_ERRNO) def os_utime_platform(path, actime, modtime): import math @@ -448,7 +463,7 @@ error = os_utime_platform(path, actime, modtime) error = rffi.cast(lltype.Signed, error) if error == -1: - raise OSError(rposix.get_errno(), "os_utime failed") + raise OSError(rposix.get_saved_errno(), "os_utime failed") else: from rpython.rtyper.module.ll_win32file import make_utime_impl os_utime_llimpl = make_utime_impl(traits) @@ -518,7 +533,8 @@ return TMSP = lltype.Ptr(self.TMS) - os_times = self.llexternal('times', [TMSP], self.CLOCK_T) + os_times = self.llexternal('times', [TMSP], self.CLOCK_T, + save_err=rffi.RFFI_SAVE_ERRNO) # Here is a random extra platform parameter which is important. # Strictly speaking, this should probably be retrieved at runtime, not @@ -531,7 +547,7 @@ result = os_times(l_tmsbuf) result = rffi.cast(lltype.Signed, result) if result == -1: - raise OSError(rposix.get_errno(), "times failed") + raise OSError(rposix.get_saved_errno(), "times failed") return ( rffi.cast(lltype.Signed, l_tmsbuf.c_tms_utime) / CLOCK_TICKS_PER_SECOND, @@ -550,11 +566,12 @@ @registering_if(os, 'setsid') def register_os_setsid(self): - os_setsid = self.llexternal('setsid', [], rffi.PID_T) + os_setsid = self.llexternal('setsid', [], rffi.PID_T, + save_err=rffi.RFFI_SAVE_ERRNO) def setsid_llimpl(): result = rffi.cast(lltype.Signed, os_setsid()) if result == -1: - raise OSError(rposix.get_errno(), "os_setsid failed") + raise OSError(rposix.get_saved_errno(), "os_setsid failed") return result return extdef([], int, export_name="ll_os.ll_os_setsid", @@ -562,11 +579,12 @@ @registering_if(os, 'chroot') def register_os_chroot(self): - os_chroot = self.llexternal('chroot', [rffi.CCHARP], rffi.INT) + os_chroot = self.llexternal('chroot', [rffi.CCHARP], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) def chroot_llimpl(arg): result = os_chroot(arg) if result == -1: - raise OSError(rposix.get_errno(), "os_chroot failed") + raise OSError(rposix.get_saved_errno(), "os_chroot failed") return extdef([str0], None, export_name="ll_os.ll_os_chroot", llimpl=chroot_llimpl) @@ -588,13 +606,14 @@ UTSNAMEP = lltype.Ptr(config['UTSNAME']) os_uname = self.llexternal('uname', [UTSNAMEP], rffi.INT, - compilation_info=CConfig._compilation_info_) + compilation_info=CConfig._compilation_info_, + save_err=rffi.RFFI_SAVE_ERRNO) def uname_llimpl(): l_utsbuf = lltype.malloc(UTSNAMEP.TO, flavor='raw') result = os_uname(l_utsbuf) if result == -1: - raise OSError(rposix.get_errno(), "os_uname failed") + raise OSError(rposix.get_saved_errno(), "os_uname failed") retval = ( rffi.charp2str(rffi.cast(rffi.CCHARP, l_utsbuf.c_sysname)), rffi.charp2str(rffi.cast(rffi.CCHARP, l_utsbuf.c_nodename)), @@ -610,13 +629,14 @@ @registering_if(os, 'sysconf') def register_os_sysconf(self): - c_sysconf = self.llexternal('sysconf', [rffi.INT], rffi.LONG) + c_sysconf = self.llexternal('sysconf', [rffi.INT], rffi.LONG, + save_err=rffi.RFFI_FULL_ERRNO) def sysconf_llimpl(i): - rposix.set_errno(0) + rposix.set_saved_errno(0) res = c_sysconf(i) if res == -1: - errno = rposix.get_errno() + errno = rposix.get_saved_errno() if errno != 0: raise OSError(errno, "sysconf failed") return res @@ -625,13 +645,14 @@ @registering_if(os, 'fpathconf') def register_os_fpathconf(self): c_fpathconf = self.llexternal('fpathconf', - [rffi.INT, rffi.INT], rffi.LONG) + [rffi.INT, rffi.INT], rffi.LONG, + save_err=rffi.RFFI_FULL_ERRNO) def fpathconf_llimpl(fd, i): - rposix.set_errno(0) + rposix.set_saved_errno(0) res = c_fpathconf(fd, i) if res == -1: - errno = rposix.get_errno() + errno = rposix.get_saved_errno() if errno != 0: raise OSError(errno, "fpathconf failed") return res @@ -641,13 +662,14 @@ @registering_if(os, 'pathconf') def register_os_pathconf(self): c_pathconf = self.llexternal('pathconf', - [rffi.CCHARP, rffi.INT], rffi.LONG) + [rffi.CCHARP, rffi.INT], rffi.LONG, + save_err=rffi.RFFI_FULL_ERRNO) def pathconf_llimpl(path, i): - rposix.set_errno(0) + rposix.set_saved_errno(0) res = c_pathconf(path, i) if res == -1: - errno = rposix.get_errno() + errno = rposix.get_saved_errno() if errno != 0: raise OSError(errno, "pathconf failed") return res @@ -657,10 +679,11 @@ @registering_if(os, 'confstr') def register_os_confstr(self): c_confstr = self.llexternal('confstr', [rffi.INT, rffi.CCHARP, - rffi.SIZE_T], rffi.SIZE_T) + rffi.SIZE_T], rffi.SIZE_T, + save_err=rffi.RFFI_FULL_ERRNO) def confstr_llimpl(i): - rposix.set_errno(0) + rposix.set_saved_errno(0) n = c_confstr(i, lltype.nullptr(rffi.CCHARP.TO), 0) n = rffi.cast(lltype.Signed, n) if n > 0: @@ -671,7 +694,7 @@ finally: lltype.free(buf, flavor='raw') else: - errno = rposix.get_errno() + errno = rposix.get_saved_errno() if errno != 0: raise OSError(errno, "confstr failed") return None @@ -717,7 +740,8 @@ @registering_if(os, 'getgroups') def register_os_getgroups(self): GP = rffi.CArrayPtr(rffi.PID_T) - c_getgroups = self.llexternal('getgroups', [rffi.INT, GP], rffi.INT) + c_getgroups = self.llexternal('getgroups', [rffi.INT, GP], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) def getgroups_llimpl(): n = c_getgroups(0, lltype.nullptr(GP.TO)) @@ -731,7 +755,7 @@ lltype.free(groups, flavor='raw') if n >= 0: return result - raise OSError(rposix.get_errno(), "os_getgroups failed") + raise OSError(rposix.get_saved_errno(), "os_getgroups failed") return extdef([], [int], llimpl=getgroups_llimpl, export_name="ll_os.ll_getgroups") @@ -739,7 +763,8 @@ @registering_if(os, 'setgroups') def register_os_setgroups(self): GP = rffi.CArrayPtr(rffi.PID_T) - c_setgroups = self.llexternal('setgroups', [rffi.SIZE_T, GP], rffi.INT) + c_setgroups = self.llexternal('setgroups', [rffi.SIZE_T, GP], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) def setgroups_llimpl(list): n = len(list) @@ -751,7 +776,7 @@ finally: lltype.free(groups, flavor='raw') if n != 0: - raise OSError(rposix.get_errno(), "os_setgroups failed") + raise OSError(rposix.get_saved_errno(), "os_setgroups failed") return extdef([[int]], None, llimpl=setgroups_llimpl, export_name="ll_os.ll_setgroups") @@ -759,12 +784,13 @@ @registering_if(os, 'initgroups') def register_os_initgroups(self): c_initgroups = self.llexternal('initgroups', - [rffi.CCHARP, rffi.PID_T], rffi.INT) + [rffi.CCHARP, rffi.PID_T], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) def initgroups_llimpl(user, group): n = c_initgroups(user, rffi.cast(rffi.PID_T, group)) if n != 0: - raise OSError(rposix.get_errno(), "os_initgroups failed") + raise OSError(rposix.get_saved_errno(), "os_initgroups failed") return extdef([str, int], None, llimpl=initgroups_llimpl, export_name="ll_os.ll_initgroups") @@ -773,11 +799,12 @@ def register_os_getpgrp(self): name = 'getpgrp' if self.GETPGRP_HAVE_ARG: - c_func = self.llexternal(name, [rffi.INT], rffi.INT) + c_func = self.llexternal(name, [rffi.INT], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) def c_func_llimpl(): res = rffi.cast(rffi.SIGNED, c_func(0)) if res == -1: - raise OSError(rposix.get_errno(), "%s failed" % name) + raise OSError(rposix.get_saved_errno(), "%s failed" % name) return res c_func_llimpl.func_name = name + '_llimpl' @@ -791,11 +818,12 @@ def register_os_setpgrp(self): name = 'setpgrp' if self.SETPGRP_HAVE_ARG: - c_func = self.llexternal(name, [rffi.INT, rffi.INT], rffi.INT) + c_func = self.llexternal(name, [rffi.INT, rffi.INT], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) def c_func_llimpl(): res = rffi.cast(rffi.SIGNED, c_func(0, 0)) if res == -1: - raise OSError(rposix.get_errno(), "%s failed" % name) + raise OSError(rposix.get_saved_errno(), "%s failed" % name) c_func_llimpl.func_name = name + '_llimpl' @@ -806,13 +834,14 @@ @registering_if(os, 'tcgetpgrp') def register_os_tcgetpgrp(self): - c_tcgetpgrp = self.llexternal('tcgetpgrp', [rffi.INT], rffi.PID_T) + c_tcgetpgrp = self.llexternal('tcgetpgrp', [rffi.INT], rffi.PID_T, + save_err=rffi.RFFI_SAVE_ERRNO) def c_tcgetpgrp_llimpl(fd): res = c_tcgetpgrp(rffi.cast(rffi.INT, fd)) res = rffi.cast(lltype.Signed, res) if res == -1: - raise OSError(rposix.get_errno(), "tcgetpgrp failed") + raise OSError(rposix.get_saved_errno(), "tcgetpgrp failed") return res return extdef([int], int, llimpl=c_tcgetpgrp_llimpl, @@ -821,14 +850,15 @@ @registering_if(os, 'tcsetpgrp') def register_os_tcsetpgrp(self): c_tcsetpgrp = self.llexternal('tcsetpgrp', [rffi.INT, rffi.PID_T], - rffi.INT) + rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) def c_tcsetpgrp_llimpl(fd, pgrp): res = c_tcsetpgrp(rffi.cast(rffi.INT, fd), rffi.cast(rffi.PID_T, pgrp)) res = rffi.cast(lltype.Signed, res) if res == -1: - raise OSError(rposix.get_errno(), "tcsetpgrp failed") + raise OSError(rposix.get_saved_errno(), "tcsetpgrp failed") return extdef([int, int], None, llimpl=c_tcsetpgrp_llimpl, export_name='ll_os.ll_os_tcsetpgrp') @@ -863,7 +893,8 @@ @registering_if(os, 'getresuid') def register_os_getresuid(self): - c_getresuid = self.llexternal('getresuid', [rffi.INTP] * 3, rffi.INT) + c_getresuid = self.llexternal('getresuid', [rffi.INTP] * 3, rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) def c_getresuid_llimpl(): out = lltype.malloc(rffi.INTP.TO, 3, flavor='raw') @@ -873,7 +904,7 @@ rffi.ptradd(out, 2)) res = rffi.cast(lltype.Signed, res) if res == -1: - raise OSError(rposix.get_errno(), "getresuid failed") + raise OSError(rposix.get_saved_errno(), "getresuid failed") return (rffi.cast(lltype.Signed, out[0]), rffi.cast(lltype.Signed, out[1]), rffi.cast(lltype.Signed, out[2])) @@ -885,7 +916,8 @@ @registering_if(os, 'getresgid') def register_os_getresgid(self): - c_getresgid = self.llexternal('getresgid', [rffi.INTP] * 3, rffi.INT) + c_getresgid = self.llexternal('getresgid', [rffi.INTP] * 3, rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) def c_getresgid_llimpl(): out = lltype.malloc(rffi.INTP.TO, 3, flavor='raw') @@ -895,7 +927,7 @@ rffi.ptradd(out, 2)) res = rffi.cast(lltype.Signed, res) if res == -1: - raise OSError(rposix.get_errno(), "getresgid failed") + raise OSError(rposix.get_saved_errno(), "getresgid failed") return (rffi.cast(lltype.Signed, out[0]), rffi.cast(lltype.Signed, out[1]), rffi.cast(lltype.Signed, out[2])) @@ -907,26 +939,28 @@ @registering_if(os, 'setresuid') def register_os_setresuid(self): - c_setresuid = self.llexternal('setresuid', [rffi.INT] * 3, rffi.INT) + c_setresuid = self.llexternal('setresuid', [rffi.INT] * 3, rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) def c_setresuid_llimpl(ruid, euid, suid): res = c_setresuid(ruid, euid, suid) res = rffi.cast(lltype.Signed, res) if res == -1: - raise OSError(rposix.get_errno(), "setresuid failed") + raise OSError(rposix.get_saved_errno(), "setresuid failed") return extdef([int, int, int], None, llimpl=c_setresuid_llimpl, export_name='ll_os.ll_os_setresuid') @registering_if(os, 'setresgid') def register_os_setresgid(self): - c_setresgid = self.llexternal('setresgid', [rffi.INT] * 3, rffi.INT) + c_setresgid = self.llexternal('setresgid', [rffi.INT] * 3, rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) def c_setresgid_llimpl(rgid, egid, sgid): res = c_setresgid(rgid, egid, sgid) res = rffi.cast(lltype.Signed, res) if res == -1: - raise OSError(rposix.get_errno(), "setresgid failed") + raise OSError(rposix.get_saved_errno(), "setresgid failed") return extdef([int, int, int], None, llimpl=c_setresgid_llimpl, export_name='ll_os.ll_os_setresgid') @@ -935,11 +969,12 @@ def register_os_open(self, traits): os_open = self.llexternal(traits.posix_function_name('open'), [traits.CCHARP, rffi.INT, rffi.MODE_T], - rffi.INT) + rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) def os_open_llimpl(path, flags, mode): result = rffi.cast(lltype.Signed, os_open(path, flags, mode)) if result == -1: - raise OSError(rposix.get_errno(), "os_open failed") + raise OSError(rposix.get_saved_errno(), "os_open failed") return result return extdef([traits.str0, int, int], int, traits.ll_os_name('open'), @@ -991,7 +1026,7 @@ def register_os_read(self): os_read = self.llexternal(UNDERSCORE_ON_WIN32 + 'read', [rffi.INT, rffi.VOIDP, rffi.SIZE_T], - rffi.SIZE_T) + rffi.SIZE_T, save_err=rffi.RFFI_SAVE_ERRNO) def os_read_llimpl(fd, count): if count < 0: @@ -1001,7 +1036,7 @@ void_buf = rffi.cast(rffi.VOIDP, buf.raw) got = rffi.cast(lltype.Signed, os_read(fd, void_buf, count)) if got < 0: - raise OSError(rposix.get_errno(), "os_read failed") + raise OSError(rposix.get_saved_errno(), "os_read failed") return buf.str(got) return extdef([int, int], SomeString(can_be_None=True), @@ -1011,7 +1046,8 @@ def register_os_write(self): os_write = self.llexternal(UNDERSCORE_ON_WIN32 + 'write', [rffi.INT, rffi.VOIDP, rffi.SIZE_T], - rffi.SIZE_T) + rffi.SIZE_T, + save_err=rffi.RFFI_SAVE_ERRNO) def os_write_llimpl(fd, data): count = len(data) @@ -1021,7 +1057,7 @@ rffi.cast(rffi.INT, fd), buf, rffi.cast(rffi.SIZE_T, count))) if written < 0: - raise OSError(rposix.get_errno(), "os_write failed") + raise OSError(rposix.get_saved_errno(), "os_write failed") return written return extdef([int, str], SomeInteger(nonneg=True), @@ -1030,13 +1066,14 @@ @registering(os.close) def register_os_close(self): os_close = self.llexternal(UNDERSCORE_ON_WIN32 + 'close', [rffi.INT], - rffi.INT, releasegil=False) + rffi.INT, releasegil=False, + save_err=rffi.RFFI_SAVE_ERRNO) def close_llimpl(fd): rposix.validate_fd(fd) error = rffi.cast(lltype.Signed, os_close(rffi.cast(rffi.INT, fd))) if error == -1: - raise OSError(rposix.get_errno(), "close failed") + raise OSError(rposix.get_saved_errno(), "close failed") return extdef([int], s_None, llimpl=close_llimpl, export_name="ll_os.ll_os_close") @@ -1066,7 +1103,8 @@ os_lseek = self.llexternal(funcname, [rffi.INT, rffi.LONGLONG, rffi.INT], - rffi.LONGLONG, macro=True) + rffi.LONGLONG, macro=True, + save_err=rffi.RFFI_SAVE_ERRNO) def lseek_llimpl(fd, pos, how): rposix.validate_fd(fd) @@ -1076,7 +1114,7 @@ rffi.cast(rffi.INT, how)) res = rffi.cast(lltype.SignedLongLong, res) if res < 0: - raise OSError(rposix.get_errno(), "os_lseek failed") + raise OSError(rposix.get_saved_errno(), "os_lseek failed") return res return extdef([int, r_longlong, int], @@ -1087,7 +1125,9 @@ @registering_if(os, 'ftruncate') def register_os_ftruncate(self): os_ftruncate = self.llexternal('ftruncate', - [rffi.INT, rffi.LONGLONG], rffi.INT, macro=True) + [rffi.INT, rffi.LONGLONG], rffi.INT, + macro=True, + save_err=rffi.RFFI_SAVE_ERRNO) def ftruncate_llimpl(fd, length): rposix.validate_fd(fd) @@ -1095,7 +1135,7 @@ os_ftruncate(rffi.cast(rffi.INT, fd), rffi.cast(rffi.LONGLONG, length))) if res < 0: - raise OSError(rposix.get_errno(), "os_ftruncate failed") + raise OSError(rposix.get_saved_errno(), "os_ftruncate failed") return extdef([int, r_longlong], s_None, llimpl = ftruncate_llimpl, @@ -1104,41 +1144,45 @@ @registering_if(os, 'fsync') def register_os_fsync(self): if not _WIN32: - os_fsync = self.llexternal('fsync', [rffi.INT], rffi.INT) + os_fsync = self.llexternal('fsync', [rffi.INT], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) else: - os_fsync = self.llexternal('_commit', [rffi.INT], rffi.INT) + os_fsync = self.llexternal('_commit', [rffi.INT], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) def fsync_llimpl(fd): rposix.validate_fd(fd) res = rffi.cast(rffi.SIGNED, os_fsync(rffi.cast(rffi.INT, fd))) if res < 0: - raise OSError(rposix.get_errno(), "fsync failed") + raise OSError(rposix.get_saved_errno(), "fsync failed") return extdef([int], s_None, llimpl=fsync_llimpl, export_name="ll_os.ll_os_fsync") @registering_if(os, 'fdatasync') def register_os_fdatasync(self): - os_fdatasync = self.llexternal('fdatasync', [rffi.INT], rffi.INT) + os_fdatasync = self.llexternal('fdatasync', [rffi.INT], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) def fdatasync_llimpl(fd): rposix.validate_fd(fd) res = rffi.cast(rffi.SIGNED, os_fdatasync(rffi.cast(rffi.INT, fd))) if res < 0: - raise OSError(rposix.get_errno(), "fdatasync failed") + raise OSError(rposix.get_saved_errno(), "fdatasync failed") return extdef([int], s_None, llimpl=fdatasync_llimpl, export_name="ll_os.ll_os_fdatasync") @registering_if(os, 'fchdir') def register_os_fchdir(self): - os_fchdir = self.llexternal('fchdir', [rffi.INT], rffi.INT) + os_fchdir = self.llexternal('fchdir', [rffi.INT], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) def fchdir_llimpl(fd): rposix.validate_fd(fd) res = rffi.cast(rffi.SIGNED, os_fchdir(rffi.cast(rffi.INT, fd))) if res < 0: - raise OSError(rposix.get_errno(), "fchdir failed") + raise OSError(rposix.get_saved_errno(), "fchdir failed") return extdef([int], s_None, llimpl=fchdir_llimpl, export_name="ll_os.ll_os_fchdir") @@ -1180,7 +1224,8 @@ def register_os_getcwd(self): os_getcwd = self.llexternal(UNDERSCORE_ON_WIN32 + 'getcwd', [rffi.CCHARP, rffi.SIZE_T], - rffi.CCHARP) + rffi.CCHARP, + save_err=rffi.RFFI_SAVE_ERRNO) def os_getcwd_llimpl(): bufsize = 256 @@ -1189,7 +1234,7 @@ res = os_getcwd(buf, rffi.cast(rffi.SIZE_T, bufsize)) if res: break # ok - error = rposix.get_errno() + error = rposix.get_saved_errno() lltype.free(buf, flavor='raw') if error != errno.ERANGE: raise OSError(error, "getcwd failed") @@ -1208,7 +1253,8 @@ def register_os_getcwdu(self): os_wgetcwd = self.llexternal(UNDERSCORE_ON_WIN32 + 'wgetcwd', [rffi.CWCHARP, rffi.SIZE_T], - rffi.CWCHARP) + rffi.CWCHARP, + save_err=rffi.RFFI_SAVE_ERRNO) def os_getcwd_llimpl(): bufsize = 256 @@ -1217,7 +1263,7 @@ res = os_wgetcwd(buf, rffi.cast(rffi.SIZE_T, bufsize)) if res: break # ok - error = rposix.get_errno() + error = rposix.get_saved_errno() lltype.free(buf, flavor='raw') if error != errno.ERANGE: raise OSError(error, "getcwd failed") @@ -1253,11 +1299,13 @@ DIRENT = config['DIRENT'] DIRENTP = lltype.Ptr(DIRENT) os_opendir = self.llexternal('opendir', [rffi.CCHARP], DIRP, - compilation_info=compilation_info) + compilation_info=compilation_info, + save_err=rffi.RFFI_SAVE_ERRNO) # XXX macro=True is hack to make sure we get the correct kind of # dirent struct (which depends on defines) os_readdir = self.llexternal('readdir', [DIRP], DIRENTP, compilation_info=compilation_info, + save_err=rffi.RFFI_FULL_ERRNO, macro=True) os_closedir = self.llexternal('closedir', [DIRP], rffi.INT, compilation_info=compilation_info) @@ -1265,13 +1313,13 @@ def os_listdir_llimpl(path): dirp = os_opendir(path) if not dirp: - raise OSError(rposix.get_errno(), "os_opendir failed") + raise OSError(rposix.get_saved_errno(), "os_opendir failed") result = [] while True: - rposix.set_errno(0) + rposix.set_saved_errno(0) direntp = os_readdir(dirp) if not direntp: - error = rposix.get_errno() + error = rposix.get_saved_errno() break namep = rffi.cast(rffi.CCHARP, direntp.c_d_name) name = rffi.charp2str(namep) @@ -1322,7 +1370,8 @@ else: INT_ARRAY_P = rffi.CArrayPtr(rffi.INT) - os_pipe = self.llexternal('pipe', [INT_ARRAY_P], rffi.INT) + os_pipe = self.llexternal('pipe', [INT_ARRAY_P], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) def os_pipe_llimpl(): filedes = lltype.malloc(INT_ARRAY_P.TO, 2, flavor='raw') @@ -1331,7 +1380,7 @@ write_fd = filedes[1] lltype.free(filedes, flavor='raw') if error != 0: - raise OSError(rposix.get_errno(), "os_pipe failed") + raise OSError(rposix.get_saved_errno(), "os_pipe failed") return (rffi.cast(lltype.Signed, read_fd), rffi.cast(lltype.Signed, write_fd)) @@ -1342,12 +1391,13 @@ @registering_if(os, 'chown') def register_os_chown(self): os_chown = self.llexternal('chown', [rffi.CCHARP, rffi.INT, rffi.INT], - rffi.INT) + rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) def os_chown_llimpl(path, uid, gid): res = os_chown(path, uid, gid) if res == -1: - raise OSError(rposix.get_errno(), "os_chown failed") + raise OSError(rposix.get_saved_errno(), "os_chown failed") return extdef([str0, int, int], None, "ll_os.ll_os_chown", llimpl=os_chown_llimpl) @@ -1355,12 +1405,13 @@ @registering_if(os, 'lchown') def register_os_lchown(self): os_lchown = self.llexternal('lchown',[rffi.CCHARP, rffi.INT, rffi.INT], - rffi.INT) + rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) def os_lchown_llimpl(path, uid, gid): res = os_lchown(path, uid, gid) if res == -1: - raise OSError(rposix.get_errno(), "os_lchown failed") + raise OSError(rposix.get_saved_errno(), "os_lchown failed") return extdef([str0, int, int], None, "ll_os.ll_os_lchown", llimpl=os_lchown_llimpl) @@ -1368,12 +1419,13 @@ @registering_if(os, 'fchown') def register_os_fchown(self): os_fchown = self.llexternal('fchown',[rffi.INT, rffi.INT, rffi.INT], - rffi.INT) + rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) def os_fchown_llimpl(fd, uid, gid): res = os_fchown(fd, uid, gid) if res == -1: - raise OSError(rposix.get_errno(), "os_fchown failed") + raise OSError(rposix.get_saved_errno(), "os_fchown failed") return extdef([int, int, int], None, "ll_os.ll_os_fchown", llimpl=os_fchown_llimpl) @@ -1382,7 +1434,9 @@ def register_os_readlink(self): os_readlink = self.llexternal('readlink', [rffi.CCHARP, rffi.CCHARP, rffi.SIZE_T], - rffi.INT) + rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) + # XXX SSIZE_T in POSIX.1-2001 def os_readlink_llimpl(path): @@ -1394,7 +1448,7 @@ res = rffi.cast(lltype.Signed, os_readlink(l_path, buf, bufsize)) lltype.free(l_path, flavor='raw') if res < 0: - error = rposix.get_errno() # failed + error = rposix.get_saved_errno() # failed lltype.free(buf, flavor='raw') raise OSError(error, "readlink failed") elif res < bufsize: @@ -1418,7 +1472,8 @@ # emulate waitpid() with the _cwait() of Microsoft's compiler os__cwait = self.llexternal('_cwait', [rffi.INTP, rffi.PID_T, rffi.INT], - rffi.PID_T) + rffi.PID_T, + save_err=rffi.RFFI_SAVE_ERRNO) def os_waitpid(pid, status_p, options): result = os__cwait(status_p, pid, options) # shift the status left a byte so this is more @@ -1432,11 +1487,13 @@ if _CYGWIN: os_waitpid = self.llexternal('cygwin_waitpid', [rffi.PID_T, rffi.INTP, rffi.INT], - rffi.PID_T) + rffi.PID_T, + save_err=rffi.RFFI_SAVE_ERRNO) else: os_waitpid = self.llexternal('waitpid', [rffi.PID_T, rffi.INTP, rffi.INT], - rffi.PID_T) + rffi.PID_T, + save_err=rffi.RFFI_SAVE_ERRNO) def os_waitpid_llimpl(pid, options): status_p = lltype.malloc(rffi.INTP.TO, 1, flavor='raw') @@ -1448,7 +1505,7 @@ status = status_p[0] lltype.free(status_p, flavor='raw') if result == -1: - raise OSError(rposix.get_errno(), "os_waitpid failed") + raise OSError(rposix.get_saved_errno(), "os_waitpid failed") return (rffi.cast(lltype.Signed, result), rffi.cast(lltype.Signed, status)) @@ -1497,12 +1554,13 @@ @registering_str_unicode(os.unlink) def register_os_unlink(self, traits): os_unlink = self.llexternal(traits.posix_function_name('unlink'), - [traits.CCHARP], rffi.INT) + [traits.CCHARP], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) def unlink_llimpl(pathname): res = rffi.cast(lltype.Signed, os_unlink(pathname)) if res < 0: - raise OSError(rposix.get_errno(), "os_unlink failed") + raise OSError(rposix.get_saved_errno(), "os_unlink failed") if sys.platform == 'win32': from rpython.rtyper.module.ll_win32file import make_win32_traits @@ -1519,12 +1577,13 @@ @registering_str_unicode(os.chdir) def register_os_chdir(self, traits): os_chdir = self.llexternal(traits.posix_function_name('chdir'), - [traits.CCHARP], rffi.INT) + [traits.CCHARP], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) def os_chdir_llimpl(path): res = rffi.cast(lltype.Signed, os_chdir(path)) if res < 0: - raise OSError(rposix.get_errno(), "os_chdir failed") + raise OSError(rposix.get_saved_errno(), "os_chdir failed") # On Windows, use an implementation that will produce Win32 errors if sys.platform == 'win32': @@ -1537,7 +1596,8 @@ @registering_str_unicode(os.mkdir) def register_os_mkdir(self, traits): os_mkdir = self.llexternal(traits.posix_function_name('mkdir'), - [traits.CCHARP, rffi.MODE_T], rffi.INT) + [traits.CCHARP, rffi.MODE_T], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) if sys.platform == 'win32': from rpython.rtyper.module.ll_win32file import make_win32_traits @@ -1552,7 +1612,7 @@ res = os_mkdir(pathname, mode) res = rffi.cast(lltype.Signed, res) if res < 0: - raise OSError(rposix.get_errno(), "os_mkdir failed") + raise OSError(rposix.get_saved_errno(), "os_mkdir failed") return extdef([traits.str0, int], s_None, llimpl=os_mkdir_llimpl, export_name=traits.ll_os_name('mkdir')) @@ -1560,12 +1620,13 @@ @registering_str_unicode(os.rmdir) def register_os_rmdir(self, traits): os_rmdir = self.llexternal(traits.posix_function_name('rmdir'), - [traits.CCHARP], rffi.INT) + [traits.CCHARP], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) def rmdir_llimpl(pathname): res = rffi.cast(lltype.Signed, os_rmdir(pathname)) if res < 0: - raise OSError(rposix.get_errno(), "os_rmdir failed") + raise OSError(rposix.get_saved_errno(), "os_rmdir failed") return extdef([traits.str0], s_None, llimpl=rmdir_llimpl, export_name=traits.ll_os_name('rmdir')) @@ -1573,12 +1634,13 @@ @registering_str_unicode(os.chmod) def register_os_chmod(self, traits): os_chmod = self.llexternal(traits.posix_function_name('chmod'), - [traits.CCHARP, rffi.MODE_T], rffi.INT) + [traits.CCHARP, rffi.MODE_T], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) def chmod_llimpl(path, mode): res = rffi.cast(lltype.Signed, os_chmod(path, rffi.cast(rffi.MODE_T, mode))) if res < 0: - raise OSError(rposix.get_errno(), "os_chmod failed") + raise OSError(rposix.get_saved_errno(), "os_chmod failed") if sys.platform == 'win32': from rpython.rtyper.module.ll_win32file import make_chmod_impl @@ -1590,13 +1652,14 @@ @registering_if(os, 'fchmod') def register_os_fchmod(self): os_fchmod = self.llexternal('fchmod', [rffi.INT, rffi.MODE_T], - rffi.INT) + rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) def fchmod_llimpl(fd, mode): mode = rffi.cast(rffi.MODE_T, mode) res = rffi.cast(lltype.Signed, os_fchmod(fd, mode)) if res < 0: - raise OSError(rposix.get_errno(), "os_fchmod failed") + raise OSError(rposix.get_saved_errno(), "os_fchmod failed") return extdef([int, int], s_None, "ll_os.ll_os_fchmod", llimpl=fchmod_llimpl) @@ -1604,12 +1667,13 @@ @registering_str_unicode(os.rename) def register_os_rename(self, traits): os_rename = self.llexternal(traits.posix_function_name('rename'), - [traits.CCHARP, traits.CCHARP], rffi.INT) + [traits.CCHARP, traits.CCHARP], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) def rename_llimpl(oldpath, newpath): res = rffi.cast(lltype.Signed, os_rename(oldpath, newpath)) if res < 0: - raise OSError(rposix.get_errno(), "os_rename failed") + raise OSError(rposix.get_saved_errno(), "os_rename failed") if sys.platform == 'win32': from rpython.rtyper.module.ll_win32file import make_win32_traits @@ -1626,12 +1690,13 @@ @registering_str_unicode(getattr(os, 'mkfifo', None)) def register_os_mkfifo(self, traits): os_mkfifo = self.llexternal(traits.posix_function_name('mkfifo'), - [traits.CCHARP, rffi.MODE_T], rffi.INT) + [traits.CCHARP, rffi.MODE_T], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) def mkfifo_llimpl(path, mode): res = rffi.cast(lltype.Signed, os_mkfifo(path, mode)) if res < 0: - raise OSError(rposix.get_errno(), "os_mkfifo failed") + raise OSError(rposix.get_saved_errno(), "os_mkfifo failed") return extdef([traits.str0, int], s_None, llimpl=mkfifo_llimpl, export_name=traits.ll_os_name('mkfifo')) @@ -1640,12 +1705,13 @@ def register_os_mknod(self, traits): os_mknod = self.llexternal(traits.posix_function_name('mknod'), [traits.CCHARP, rffi.MODE_T, rffi.INT], - rffi.INT) # xxx: actually ^^^ dev_t + rffi.INT, # xxx: actually ^^^ dev_t + save_err=rffi.RFFI_SAVE_ERRNO) def mknod_llimpl(path, mode, dev): res = rffi.cast(lltype.Signed, os_mknod(path, mode, dev)) if res < 0: - raise OSError(rposix.get_errno(), "os_mknod failed") + raise OSError(rposix.get_sved_errno(), "os_mknod failed") return extdef([traits.str0, int, int], s_None, llimpl=mknod_llimpl, export_name=traits.ll_os_name('mknod')) @@ -1665,25 +1731,27 @@ @registering_if(os, 'kill', sys.platform != 'win32') def register_os_kill(self): os_kill = self.llexternal('kill', [rffi.PID_T, rffi.INT], - rffi.INT) + rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) def kill_llimpl(pid, sig): res = rffi.cast(lltype.Signed, os_kill(rffi.cast(rffi.PID_T, pid), rffi.cast(rffi.INT, sig))) if res < 0: - raise OSError(rposix.get_errno(), "os_kill failed") + raise OSError(rposix.get_saved_errno(), "os_kill failed") return extdef([int, int], s_None, llimpl=kill_llimpl, export_name="ll_os.ll_os_kill") @registering_if(os, 'killpg') def register_os_killpg(self): os_killpg = self.llexternal('killpg', [rffi.INT, rffi.INT], - rffi.INT) + rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) def killpg_llimpl(pid, sig): res = rffi.cast(lltype.Signed, os_killpg(rffi.cast(rffi.INT, pid), rffi.cast(rffi.INT, sig))) if res < 0: - raise OSError(rposix.get_errno(), "os_killpg failed") + raise OSError(rposix.get_saved_errno(), "os_killpg failed") return extdef([int, int], s_None, llimpl=killpg_llimpl, export_name="ll_os.ll_os_killpg") @@ -1691,12 +1759,13 @@ @registering_if(os, 'link') def register_os_link(self): os_link = self.llexternal('link', [rffi.CCHARP, rffi.CCHARP], - rffi.INT) + rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) def link_llimpl(oldpath, newpath): res = rffi.cast(lltype.Signed, os_link(oldpath, newpath)) if res < 0: - raise OSError(rposix.get_errno(), "os_link failed") + raise OSError(rposix.get_saved_errno(), "os_link failed") return extdef([str0, str0], s_None, llimpl=link_llimpl, export_name="ll_os.ll_os_link") @@ -1704,12 +1773,13 @@ @registering_if(os, 'symlink') def register_os_symlink(self): os_symlink = self.llexternal('symlink', [rffi.CCHARP, rffi.CCHARP], - rffi.INT) + rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) def symlink_llimpl(oldpath, newpath): res = rffi.cast(lltype.Signed, os_symlink(oldpath, newpath)) if res < 0: - raise OSError(rposix.get_errno(), "os_symlink failed") + raise OSError(rposix.get_saved_errno(), "os_symlink failed") return extdef([str0, str0], s_None, llimpl=symlink_llimpl, export_name="ll_os.ll_os_symlink") @@ -1725,9 +1795,10 @@ ofs = debug.debug_offset() opaqueaddr = rthread.gc_thread_before_fork() childpid = rffi.cast(lltype.Signed, os_fork()) + errno = rposix._get_errno() rthread.gc_thread_after_fork(childpid, opaqueaddr) if childpid == -1: - raise OSError(rposix.get_errno(), "os_fork failed") + raise OSError(errno, "os_fork failed") if childpid == 0: debug.debug_forked(ofs) return rffi.cast(lltype.Signed, childpid) @@ -1741,7 +1812,8 @@ 'openpty', [rffi.INTP, rffi.INTP, rffi.VOIDP, rffi.VOIDP, rffi.VOIDP], rffi.INT, - compilation_info=ExternalCompilationInfo(libraries=['util'])) + compilation_info=ExternalCompilationInfo(libraries=['util']), + save_err=rffi.RFFI_SAVE_ERRNO) def openpty_llimpl(): master_p = lltype.malloc(rffi.INTP.TO, 1, flavor='raw') slave_p = lltype.malloc(rffi.INTP.TO, 1, flavor='raw') @@ -1751,7 +1823,7 @@ lltype.free(master_p, flavor='raw') lltype.free(slave_p, flavor='raw') if result == -1: - raise OSError(rposix.get_errno(), "os_openpty failed") + raise OSError(rposix.get_saved_errno(), "os_openpty failed") return (rffi.cast(lltype.Signed, master_fd), rffi.cast(lltype.Signed, slave_fd)) @@ -1765,7 +1837,8 @@ 'forkpty', [rffi.INTP, rffi.VOIDP, rffi.VOIDP, rffi.VOIDP], rffi.PID_T, - compilation_info=ExternalCompilationInfo(libraries=['util'])) + compilation_info=ExternalCompilationInfo(libraries=['util']), + save_err=rffi.RFFI_SAVE_ERRNO) def forkpty_llimpl(): master_p = lltype.malloc(rffi.INTP.TO, 1, flavor='raw') master_p[0] = rffi.cast(rffi.INT, -1) @@ -1777,7 +1850,7 @@ master_fd = master_p[0] lltype.free(master_p, flavor='raw') if childpid == -1: - raise OSError(rposix.get_errno(), "os_forkpty failed") + raise OSError(rposix.get_saved_errno(), "os_forkpty failed") if childpid == 0: debug.debug_forked(ofs) return (rffi.cast(lltype.Signed, childpid), @@ -1800,16 +1873,17 @@ @registering_if(os, 'nice') def register_os_nice(self): - os_nice = self.llexternal('nice', [rffi.INT], rffi.INT) + os_nice = self.llexternal('nice', [rffi.INT], rffi.INT, + save_err=rffi.RFFI_FULL_ERRNO) def nice_llimpl(inc): # Assume that the system provides a standard-compliant version # of nice() that returns the new priority. Nowadays, FreeBSD # might be the last major non-compliant system (xxx check me). - rposix.set_errno(0) + rposix.set_saved_errno(0) res = rffi.cast(lltype.Signed, os_nice(inc)) if res == -1: - err = rposix.get_errno() + err = rposix.get_saved_errno() if err != 0: raise OSError(err, "os_nice failed") return res @@ -1907,12 +1981,14 @@ @registering_if(os, 'ttyname') def register_os_ttyname(self): - os_ttyname = self.llexternal('ttyname', [lltype.Signed], rffi.CCHARP, releasegil=False) + os_ttyname = self.llexternal('ttyname', [lltype.Signed], rffi.CCHARP, + releasegil=False, + save_err=rffi.RFFI_SAVE_ERRNO) def ttyname_llimpl(fd): l_name = os_ttyname(fd) if not l_name: - raise OSError(rposix.get_errno(), "ttyname raised") + raise OSError(rposix.get_saved_errno(), "ttyname raised") return rffi.charp2str(l_name) return extdef([int], str, "ll_os.ttyname", diff --git a/rpython/rtyper/module/ll_os_environ.py b/rpython/rtyper/module/ll_os_environ.py --- a/rpython/rtyper/module/ll_os_environ.py +++ b/rpython/rtyper/module/ll_os_environ.py @@ -118,7 +118,8 @@ os_getenv = rffi.llexternal('getenv', [rffi.CCHARP], rffi.CCHARP, releasegil=False) -os_putenv = rffi.llexternal(prefix + 'putenv', [rffi.CCHARP], rffi.INT) +os_putenv = rffi.llexternal(prefix + 'putenv', [rffi.CCHARP], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) if _WIN32: _wgetenv = rffi.llexternal('_wgetenv', [rffi.CWCHARP], rffi.CWCHARP, compilation_info=eci, releasegil=False) @@ -138,7 +139,7 @@ byname, eq = envkeepalive.byname, '=' def last_error(msg): from rpython.rlib import rposix - raise OSError(rposix.get_errno(), msg) + raise OSError(rposix.get_saved_errno(), msg) else: traits = UnicodeTraits() get_environ, getenv, putenv = get__wenviron, _wgetenv, _wputenv @@ -197,14 +198,15 @@ r_putenv(name, '') if hasattr(__import__(os.name), 'unsetenv'): - os_unsetenv = rffi.llexternal('unsetenv', [rffi.CCHARP], rffi.INT) + os_unsetenv = rffi.llexternal('unsetenv', [rffi.CCHARP], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) def unsetenv_llimpl(name): with rffi.scoped_str2charp(name) as l_name: error = rffi.cast(lltype.Signed, os_unsetenv(l_name)) if error: from rpython.rlib import rposix - raise OSError(rposix.get_errno(), "os_unsetenv failed") + raise OSError(rposix.get_saved_errno(), "os_unsetenv failed") try: l_oldstring = envkeepalive.byname[name] except KeyError: diff --git a/rpython/rtyper/module/ll_os_stat.py b/rpython/rtyper/module/ll_os_stat.py --- a/rpython/rtyper/module/ll_os_stat.py +++ b/rpython/rtyper/module/ll_os_stat.py @@ -349,7 +349,8 @@ posix_mystat = rffi.llexternal(c_func_name, [ARG1, STAT_STRUCT], rffi.INT, - compilation_info=compilation_info) + compilation_info=compilation_info, + save_err=rffi.RFFI_SAVE_ERRNO) @func_renamer('os_%s_llimpl' % (name,)) def posix_stat_llimpl(arg): @@ -361,7 +362,7 @@ if arg_is_path: traits.free_charp(arg) if error != 0: - raise OSError(rposix.get_errno(), "os_?stat failed") + raise OSError(rposix.get_saved_errno(), "os_?stat failed") return build_stat_result(stresult) finally: lltype.free(stresult, flavor='raw') @@ -401,8 +402,8 @@ posix_mystatvfs = rffi.llexternal(name, [ARG1, STATVFS_STRUCT], rffi.INT, - compilation_info=compilation_info - ) + compilation_info=compilation_info, + save_err=rffi.RFFI_SAVE_ERRNO) @func_renamer('os_%s_llimpl' % (name,)) def posix_statvfs_llimpl(arg): @@ -414,7 +415,7 @@ if arg_is_path: traits.free_charp(arg) if error != 0: - raise OSError(rposix.get_errno(), "os_?statvfs failed") + raise OSError(rposix.get_saved_errno(), "os_?statvfs failed") return build_statvfs_result(stresult) finally: lltype.free(stresult, flavor='raw') diff --git a/rpython/rtyper/module/ll_win32file.py b/rpython/rtyper/module/ll_win32file.py --- a/rpython/rtyper/module/ll_win32file.py +++ b/rpython/rtyper/module/ll_win32file.py @@ -168,7 +168,8 @@ CreateDirectory = external( 'CreateDirectory' + suffix, [traits.CCHARP, rffi.VOIDP], - rwin32.BOOL) + rwin32.BOOL, + XXX) # save_err=rffi.RFFI_SAVE_LASTERROR SetEnvironmentVariable = external( 'SetEnvironmentVariable' + suffix, diff --git a/rpython/translator/c/src/thread_gil.c b/rpython/translator/c/src/thread_gil.c --- a/rpython/translator/c/src/thread_gil.c +++ b/rpython/translator/c/src/thread_gil.c @@ -52,11 +52,6 @@ void RPyGilAcquire(void) { /* Acquires the GIL. - - Note: in the slow path, this function saves and restores 'errno'. - This is needed for now because it may be *followed* by reading - the 'errno'. It's a bit strange, because we could read the errno - before calling RPyGilAcquire(), but it's simpler this way. */ long old_fastgil = lock_test_and_set(&rpy_fastgil, 1); @@ -67,7 +62,6 @@ } else { /* Otherwise, another thread is busy with the GIL. */ - SAVE_ERRNO(); /* Register me as one of the threads that is actively waiting for the GIL. The number of such threads is found in @@ -109,8 +103,6 @@ atomic_decrement(&rpy_waiting_threads); mutex2_loop_stop(&mutex_gil); mutex1_unlock(&mutex_gil_stealer); - - RESTORE_ERRNO(); } assert(RPY_FASTGIL_LOCKED(rpy_fastgil)); diff --git a/rpython/translator/c/src/thread_nt.c b/rpython/translator/c/src/thread_nt.c --- a/rpython/translator/c/src/thread_nt.c +++ b/rpython/translator/c/src/thread_nt.c @@ -8,7 +8,6 @@ #include #include #include -#include /* @@ -244,9 +243,4 @@ #define atomic_increment(ptr) InterlockedIncrement(ptr) #define atomic_decrement(ptr) InterlockedDecrement(ptr) -#define SAVE_ERRNO() int saved_errno = errno; \ - DWORD saved_lasterr = GetLastError() -#define RESTORE_ERRNO() errno = saved_errno; \ - SetLastError(saved_lasterr) - #include "src/thread_gil.c" diff --git a/rpython/translator/c/src/thread_pthread.c b/rpython/translator/c/src/thread_pthread.c --- a/rpython/translator/c/src/thread_pthread.c +++ b/rpython/translator/c/src/thread_pthread.c @@ -533,7 +533,4 @@ #define atomic_increment(ptr) __sync_fetch_and_add(ptr, 1) #define atomic_decrement(ptr) __sync_fetch_and_sub(ptr, 1) -#define SAVE_ERRNO() int saved_errno = errno -#define RESTORE_ERRNO() errno = saved_errno - #include "src/thread_gil.c" diff --git a/rpython/translator/c/src/threadlocal.c b/rpython/translator/c/src/threadlocal.c --- a/rpython/translator/c/src/threadlocal.c +++ b/rpython/translator/c/src/threadlocal.c @@ -2,7 +2,6 @@ #include "structdef.h" /* for struct pypy_threadlocal_s */ #include #include -#include #include #ifndef _WIN32 # include @@ -13,9 +12,6 @@ static void _RPy_ThreadLocals_Init(void *p) { memset(p, 0, sizeof(struct pypy_threadlocal_s)); -#ifdef RPY_TLOFS_p_errno - ((struct pypy_threadlocal_s *)p)->p_errno = &errno; -#endif #ifdef RPY_TLOFS_thread_ident ((struct pypy_threadlocal_s *)p)->thread_ident = # ifdef _WIN32 From noreply at buildbot.pypy.org Wed Jan 14 17:32:14 2015 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 14 Jan 2015 17:32:14 +0100 (CET) Subject: [pypy-commit] pypy errno-again: more Message-ID: <20150114163214.28DF61C0F1B@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: errno-again Changeset: r75326:f3a322ed81fc Date: 2015-01-14 17:31 +0100 http://bitbucket.org/pypy/pypy/changeset/f3a322ed81fc/ Log: more diff --git a/rpython/rtyper/lltypesystem/module/ll_math.py b/rpython/rtyper/lltypesystem/module/ll_math.py --- a/rpython/rtyper/lltypesystem/module/ll_math.py +++ b/rpython/rtyper/lltypesystem/module/ll_math.py @@ -58,11 +58,15 @@ math_atan2 = llexternal('atan2', [rffi.DOUBLE, rffi.DOUBLE], rffi.DOUBLE) math_frexp = llexternal('frexp', [rffi.DOUBLE, rffi.INTP], rffi.DOUBLE) math_modf = llexternal('modf', [rffi.DOUBLE, rffi.DOUBLEP], rffi.DOUBLE) -math_ldexp = llexternal('ldexp', [rffi.DOUBLE, rffi.INT], rffi.DOUBLE) -math_pow = llexternal('pow', [rffi.DOUBLE, rffi.DOUBLE], rffi.DOUBLE) -math_fmod = llexternal('fmod', [rffi.DOUBLE, rffi.DOUBLE], rffi.DOUBLE) +math_ldexp = llexternal('ldexp', [rffi.DOUBLE, rffi.INT], rffi.DOUBLE, + save_err=rffi.RFFI_FULL_ERRNO_ZERO) +math_pow = llexternal('pow', [rffi.DOUBLE, rffi.DOUBLE], rffi.DOUBLE, + save_err=rffi.RFFI_FULL_ERRNO_ZERO) +math_fmod = llexternal('fmod', [rffi.DOUBLE, rffi.DOUBLE], rffi.DOUBLE, + save_err=rffi.RFFI_FULL_ERRNO_ZERO) math_hypot = llexternal(UNDERSCORE_ON_WIN32 + 'hypot', - [rffi.DOUBLE, rffi.DOUBLE], rffi.DOUBLE) + [rffi.DOUBLE, rffi.DOUBLE], rffi.DOUBLE, + save_err=rffi.RFFI_FULL_ERRNO_ZERO) math_floor = llexternal('floor', [rffi.DOUBLE], rffi.DOUBLE, elidable_function=True) math_sqrt = llexternal('sqrt', [rffi.DOUBLE], rffi.DOUBLE) math_sin = llexternal('sin', [rffi.DOUBLE], rffi.DOUBLE, elidable_function=True) @@ -80,9 +84,6 @@ ERANGE = errno.ERANGE EDOM = errno.EDOM -def _error_reset(): - rposix.set_errno(0) - def _likely_raise(errno, x): """Call this with errno != 0. It usually raises the proper RPython exception, but may also just ignore it and return in case of underflow. @@ -209,9 +210,8 @@ r = math_copysign(0.0, x) errno = 0 else: - _error_reset() r = math_ldexp(x, exp) - errno = rposix.get_errno() + errno = rposix.get_saved_errno() if isinf(r): errno = ERANGE if errno: @@ -241,9 +241,8 @@ if isinf(y) and isfinite(x): return x - _error_reset() r = math_fmod(x, y) - errno = rposix.get_errno() + errno = rposix.get_saved_errno() if isnan(r): if isnan(x) or isnan(y): errno = 0 @@ -261,9 +260,8 @@ if isinf(y): return math_fabs(y) - _error_reset() r = math_hypot(x, y) - errno = rposix.get_errno() + errno = rposix.get_saved_errno() if not isfinite(r): if isnan(r): if isnan(x) or isnan(y): @@ -319,9 +317,8 @@ else: return 0.0 - _error_reset() r = math_pow(x, y) - errno = rposix.get_errno() + errno = rposix.get_saved_errno() if not isfinite(r): if isnan(r): # a NaN result should arise only from (-ve)**(finite non-integer) @@ -382,15 +379,16 @@ def new_unary_math_function(name, can_overflow, c99): if sys.platform == 'win32' and c99: - c_func = math_llexternal(name, [rffi.DOUBLE], rffi.DOUBLE) + c_func = math_llexternal(name, [rffi.DOUBLE], rffi.DOUBLE, + save_err=rffi.RFFI_FULL_ERRNO_ZERO) else: - c_func = llexternal(name, [rffi.DOUBLE], rffi.DOUBLE) + c_func = llexternal(name, [rffi.DOUBLE], rffi.DOUBLE, + save_err=rffi.RFFI_FULL_ERRNO_ZERO) def ll_math(x): - _error_reset() r = c_func(x) # Error checking fun. Copied from CPython 2.6 - errno = rposix.get_errno() + errno = rposix.get_saved_errno() if not isfinite(r): if isnan(r): if isnan(x): diff --git a/rpython/rtyper/lltypesystem/rffi.py b/rpython/rtyper/lltypesystem/rffi.py --- a/rpython/rtyper/lltypesystem/rffi.py +++ b/rpython/rtyper/lltypesystem/rffi.py @@ -59,11 +59,13 @@ hop.exception_cannot_occur() return hop.inputconst(lltype.Bool, hop.s_result.const) -RFFI_SAVE_ERRNO = 1 -RFFI_READSAVED_ERRNO = 2 +RFFI_SAVE_ERRNO = 1 # save the real errno after the call +RFFI_READSAVED_ERRNO = 2 # copy saved errno into real errno before call +RFFI_ZERO_ERRNO_BEFORE = 4 # copy the value 0 into real errno before call RFFI_FULL_ERRNO = RFFI_SAVE_ERRNO | RFFI_READSAVED_ERRNO -RFFI_SAVE_LASTERROR = 4 -RFFI_READSAVED_LASTERROR = 8 +RFFI_FULL_ERRNO_ZERO = RFFI_SAVE_ERRNO | RFFI_ZERO_ERRNO_BEFORE +RFFI_SAVE_LASTERROR = 8 +RFFI_READSAVED_LASTERROR = 16 RFFI_FULL_LASTERROR = RFFI_SAVE_LASTERROR | RFFI_READSAVED_LASTERROR RFFI_ERR_NONE = 0 RFFI_ERR_ALL = RFFI_FULL_ERRNO | RFFI_FULL_LASTERROR @@ -157,7 +159,9 @@ argnames = ', '.join(['a%d' % i for i in range(len(args))]) errno_before = (save_err & RFFI_READSAVED_ERRNO) != 0 + errno_zero_before = (save_err & RFFI_ZERO_ERRNO_BEFORE) != 0 errno_after = (save_err & RFFI_SAVE_ERRNO) != 0 + errno_any = errno_before or errno_zero_before or errno_after if invoke_around_handlers: # The around-handlers are releasing the GIL in a threaded pypy. @@ -170,7 +174,7 @@ # argument to wrapper(), if any (e.g. RPython strings). source = py.code.Source(""" - if %(errno_before)s or %(errno_after)s: + if %(errno_any)s: from rpython.rlib import rposix, rthread def call_external_function(%(argnames)s): @@ -181,6 +185,8 @@ # restore errno from its saved value if %(errno_before)s: rposix._set_errno(rthread.tlfield_rpy_errno.getraw()) + elif %(errno_zero_before)s: + rposix._set_errno(int_zero) # res = funcptr(%(argnames)s) # @@ -194,6 +200,7 @@ """ % locals()) miniglobals = {'aroundstate': aroundstate, 'funcptr': funcptr, + 'int_zero': cast(INT, 0), '__name__': __name__, # for module name propagation } exec source.compile() in miniglobals @@ -221,13 +228,15 @@ # ...well, unless it's a macro, in which case we still have # to hide it from the JIT... source = py.code.Source(""" - if %(errno_before)s or %(errno_after)s: + if %(errno_any)s: from rpython.rlib import rposix, rthread def call_external_function(%(argnames)s): # restore errno from its saved value if %(errno_before)s: rposix._set_errno(rthread.tlfield_rpy_errno.getraw()) + elif %(errno_zero_before)s: + rposix._set_errno(int_zero) # res = funcptr(%(argnames)s) # @@ -238,6 +247,7 @@ return res """ % locals()) miniglobals = {'funcptr': funcptr, + 'int_zero': cast(INT, 0), '__name__': __name__, } exec source.compile() in miniglobals diff --git a/rpython/rtyper/module/ll_os.py b/rpython/rtyper/module/ll_os.py --- a/rpython/rtyper/module/ll_os.py +++ b/rpython/rtyper/module/ll_os.py @@ -630,10 +630,9 @@ @registering_if(os, 'sysconf') def register_os_sysconf(self): c_sysconf = self.llexternal('sysconf', [rffi.INT], rffi.LONG, - save_err=rffi.RFFI_FULL_ERRNO) + save_err=rffi.RFFI_FULL_ERRNO_ZERO) def sysconf_llimpl(i): - rposix.set_saved_errno(0) res = c_sysconf(i) if res == -1: errno = rposix.get_saved_errno() @@ -646,10 +645,9 @@ def register_os_fpathconf(self): c_fpathconf = self.llexternal('fpathconf', [rffi.INT, rffi.INT], rffi.LONG, - save_err=rffi.RFFI_FULL_ERRNO) + save_err=rffi.RFFI_FULL_ERRNO_ZERO) def fpathconf_llimpl(fd, i): - rposix.set_saved_errno(0) res = c_fpathconf(fd, i) if res == -1: errno = rposix.get_saved_errno() @@ -663,10 +661,9 @@ def register_os_pathconf(self): c_pathconf = self.llexternal('pathconf', [rffi.CCHARP, rffi.INT], rffi.LONG, - save_err=rffi.RFFI_FULL_ERRNO) + save_err=rffi.RFFI_FULL_ERRNO_ZERO) def pathconf_llimpl(path, i): - rposix.set_saved_errno(0) res = c_pathconf(path, i) if res == -1: errno = rposix.get_saved_errno() @@ -680,10 +677,9 @@ def register_os_confstr(self): c_confstr = self.llexternal('confstr', [rffi.INT, rffi.CCHARP, rffi.SIZE_T], rffi.SIZE_T, - save_err=rffi.RFFI_FULL_ERRNO) + save_err=rffi.RFFI_FULL_ERRNO_ZERO) def confstr_llimpl(i): - rposix.set_saved_errno(0) n = c_confstr(i, lltype.nullptr(rffi.CCHARP.TO), 0) n = rffi.cast(lltype.Signed, n) if n > 0: @@ -1305,7 +1301,7 @@ # dirent struct (which depends on defines) os_readdir = self.llexternal('readdir', [DIRP], DIRENTP, compilation_info=compilation_info, - save_err=rffi.RFFI_FULL_ERRNO, + save_err=rffi.RFFI_FULL_ERRNO_ZERO, macro=True) os_closedir = self.llexternal('closedir', [DIRP], rffi.INT, compilation_info=compilation_info) @@ -1316,7 +1312,6 @@ raise OSError(rposix.get_saved_errno(), "os_opendir failed") result = [] while True: - rposix.set_saved_errno(0) direntp = os_readdir(dirp) if not direntp: error = rposix.get_saved_errno() @@ -1711,7 +1706,7 @@ def mknod_llimpl(path, mode, dev): res = rffi.cast(lltype.Signed, os_mknod(path, mode, dev)) if res < 0: - raise OSError(rposix.get_sved_errno(), "os_mknod failed") + raise OSError(rposix.get_saved_errno(), "os_mknod failed") return extdef([traits.str0, int, int], s_None, llimpl=mknod_llimpl, export_name=traits.ll_os_name('mknod')) @@ -1874,13 +1869,12 @@ @registering_if(os, 'nice') def register_os_nice(self): os_nice = self.llexternal('nice', [rffi.INT], rffi.INT, - save_err=rffi.RFFI_FULL_ERRNO) + save_err=rffi.RFFI_FULL_ERRNO_ZERO) def nice_llimpl(inc): # Assume that the system provides a standard-compliant version # of nice() that returns the new priority. Nowadays, FreeBSD # might be the last major non-compliant system (xxx check me). - rposix.set_saved_errno(0) res = rffi.cast(lltype.Signed, os_nice(inc)) if res == -1: err = rposix.get_saved_errno() diff --git a/rpython/rtyper/module/ll_time.py b/rpython/rtyper/module/ll_time.py --- a/rpython/rtyper/module/ll_time.py +++ b/rpython/rtyper/module/ll_time.py @@ -218,7 +218,8 @@ else: c_select = self.llexternal('select', [rffi.INT, rffi.VOIDP, rffi.VOIDP, rffi.VOIDP, - self.TIMEVALP], rffi.INT) + self.TIMEVALP], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) def time_sleep_llimpl(secs): void = lltype.nullptr(rffi.VOIDP.TO) t = lltype.malloc(self.TIMEVAL, flavor='raw') @@ -228,9 +229,9 @@ rffi.setintfield(t, 'c_tv_usec', int(frac*1000000.0)) if rffi.cast(rffi.LONG, c_select(0, void, void, void, t)) != 0: - errno = rposix.get_errno() + errno = rposix.get_saved_errno() if errno != EINTR: - raise OSError(rposix.get_errno(), "Select failed") + raise OSError(errno, "Select failed") finally: lltype.free(t, flavor='raw') From noreply at buildbot.pypy.org Wed Jan 14 18:18:43 2015 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 14 Jan 2015 18:18:43 +0100 (CET) Subject: [pypy-commit] pypy errno-again: in-progress Message-ID: <20150114171843.DBEF61C0F1B@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: errno-again Changeset: r75327:abcf3346ba45 Date: 2015-01-14 18:18 +0100 http://bitbucket.org/pypy/pypy/changeset/abcf3346ba45/ Log: in-progress diff --git a/rpython/jit/codewriter/jtransform.py b/rpython/jit/codewriter/jtransform.py --- a/rpython/jit/codewriter/jtransform.py +++ b/rpython/jit/codewriter/jtransform.py @@ -1999,12 +1999,10 @@ or isinstance(RESTYPE, lltype.Ptr)) c_offset, = op.args op1 = self.prepare_builtin_call(op, 'threadlocalref_get', [c_offset]) - if c_offset.value.startswith('RPY_TLOFSLOOPINVARIANT_'): + if c_offset.value.loop_invariant: effect = EffectInfo.EF_LOOPINVARIANT - elif c_offset.value.startswith('RPY_TLOFS_'): + else: effect = EffectInfo.EF_CANNOT_RAISE - else: - assert 0 return self.handle_residual_call(op1, oopspecindex=EffectInfo.OS_THREADLOCALREF_GET, extraeffect=effect) diff --git a/rpython/jit/codewriter/test/test_jtransform.py b/rpython/jit/codewriter/test/test_jtransform.py --- a/rpython/jit/codewriter/test/test_jtransform.py +++ b/rpython/jit/codewriter/test/test_jtransform.py @@ -160,7 +160,7 @@ elif oopspecindex == EI.OS_RAW_FREE: assert extraeffect == EI.EF_CANNOT_RAISE elif oopspecindex == EI.OS_THREADLOCALREF_GET: - assert extraeffect == EI.EF_LOOPINVARIANT + assert extraeffect == self.expected_effect_of_threadlocalref_get else: assert extraeffect == EI.EF_ELIDABLE_CANNOT_RAISE return 'calldescr-%d' % oopspecindex @@ -1342,14 +1342,19 @@ assert op1.result is None assert op2 is None -def test_threadlocalref_get(): +def _test_threadlocalref_get(loop_inv): from rpython.rlib.rthread import ThreadLocalField - tlfield = ThreadLocalField(lltype.Signed, 'foobar_test_') + tlfield = ThreadLocalField(lltype.Signed, 'foobar_test_', + loop_invariant=loop_inv) OS_THREADLOCALREF_GET = effectinfo.EffectInfo.OS_THREADLOCALREF_GET c = const(tlfield.offset) v = varoftype(lltype.Signed) op = SpaceOperation('threadlocalref_get', [c], v) - tr = Transformer(FakeCPU(), FakeBuiltinCallControl()) + cc = FakeBuiltinCallControl() + cc.expected_effect_of_threadlocalref_get = ( + effectinfo.EffectInfo.EF_LOOPINVARIANT if loop_inv + else effectinfo.EffectInfo.EF_CANNOT_RAISE) + tr = Transformer(FakeCPU(), cc) op0 = tr.rewrite_operation(op) assert op0.opname == 'residual_call_ir_i' assert op0.args[0].value == 'threadlocalref_get' # pseudo-function as str @@ -1358,6 +1363,12 @@ assert op0.args[3] == 'calldescr-%d' % OS_THREADLOCALREF_GET assert op0.result == v +def test_threadlocalref_get_no_loop_inv(): + _test_threadlocalref_get(loop_inv=False) + +def test_threadlocalref_get_with_loop_inv(): + _test_threadlocalref_get(loop_inv=True) + def test_unknown_operation(): op = SpaceOperation('foobar', [], varoftype(lltype.Void)) tr = Transformer() diff --git a/rpython/rlib/_rsocket_rffi.py b/rpython/rlib/_rsocket_rffi.py --- a/rpython/rlib/_rsocket_rffi.py +++ b/rpython/rlib/_rsocket_rffi.py @@ -462,6 +462,9 @@ if WIN32: WSAEVENT = cConfig.WSAEVENT WSANETWORKEVENTS = cConfig.WSANETWORKEVENTS + SAVE_ERR = rffi.RFFI_ERR_NONE +else: + SAVE_ERR = rffi.RFFI_SAVE_ERRNO timeval = cConfig.timeval @@ -474,7 +477,7 @@ calling_conv='c', **kwargs) if _POSIX: - dup = external('dup', [socketfd_type], socketfd_type) + dup = external('dup', [socketfd_type], socketfd_type, save_err=SAVE_ERR) gai_strerror = external('gai_strerror', [rffi.INT], CCHARP) #h_errno = c_int.in_dll(socketdll, 'h_errno') @@ -486,11 +489,14 @@ socket = external('socket', [rffi.INT, rffi.INT, rffi.INT], socketfd_type) if WIN32: - socketclose = external('closesocket', [socketfd_type], rffi.INT, releasegil=False) + socketclose = external('closesocket', [socketfd_type], rffi.INT, + releasegil=False, save_err=SAVE_ERR) else: - socketclose = external('close', [socketfd_type], rffi.INT, releasegil=False) + socketclose = external('close', [socketfd_type], rffi.INT, + releasegil=False, save_err=SAVE_ERR) -socketconnect = external('connect', [socketfd_type, sockaddr_ptr, socklen_t], rffi.INT) +socketconnect = external('connect', [socketfd_type, sockaddr_ptr, socklen_t], + rffi.INT, save_err=SAVE_ERR) getaddrinfo = external('getaddrinfo', [CCHARP, CCHARP, addrinfo_ptr, @@ -518,36 +524,48 @@ if _POSIX: inet_pton = external('inet_pton', [rffi.INT, rffi.CCHARP, - rffi.VOIDP], rffi.INT) + rffi.VOIDP], rffi.INT, + save_err=SAVE_ERR) inet_ntop = external('inet_ntop', [rffi.INT, rffi.VOIDP, CCHARP, - socklen_t], CCHARP) + socklen_t], CCHARP, + save_err=SAVE_ERR) inet_addr = external('inet_addr', [rffi.CCHARP], rffi.UINT) socklen_t_ptr = lltype.Ptr(rffi.CFixedArray(socklen_t, 1)) socketaccept = external('accept', [socketfd_type, sockaddr_ptr, - socklen_t_ptr], socketfd_type) + socklen_t_ptr], socketfd_type, + save_err=SAVE_ERR) socketbind = external('bind', [socketfd_type, sockaddr_ptr, socklen_t], - rffi.INT) -socketlisten = external('listen', [socketfd_type, rffi.INT], rffi.INT) + rffi.INT, save_err=SAVE_ERR) +socketlisten = external('listen', [socketfd_type, rffi.INT], rffi.INT, + save_err=SAVE_ERR) socketgetpeername = external('getpeername', [socketfd_type, - sockaddr_ptr, socklen_t_ptr], rffi.INT) + sockaddr_ptr, socklen_t_ptr], rffi.INT, + save_err=SAVE_ERR) socketgetsockname = external('getsockname', [socketfd_type, - sockaddr_ptr, socklen_t_ptr], rffi.INT) + sockaddr_ptr, socklen_t_ptr], rffi.INT, + save_err=SAVE_ERR) socketgetsockopt = external('getsockopt', [socketfd_type, rffi.INT, - rffi.INT, rffi.VOIDP, socklen_t_ptr], rffi.INT) + rffi.INT, rffi.VOIDP, socklen_t_ptr], rffi.INT, + save_err=SAVE_ERR) socketsetsockopt = external('setsockopt', [socketfd_type, rffi.INT, - rffi.INT, rffi.VOIDP, socklen_t], rffi.INT) + rffi.INT, rffi.VOIDP, socklen_t], rffi.INT, + save_err=SAVE_ERR) socketrecv = external('recv', [socketfd_type, rffi.VOIDP, rffi.INT, - rffi.INT], ssize_t) + rffi.INT], ssize_t, save_err=SAVE_ERR) recvfrom = external('recvfrom', [socketfd_type, rffi.VOIDP, size_t, - rffi.INT, sockaddr_ptr, socklen_t_ptr], rffi.INT) + rffi.INT, sockaddr_ptr, socklen_t_ptr], rffi.INT, + save_err=SAVE_ERR) send = external('send', [socketfd_type, rffi.CCHARP, size_t, rffi.INT], - ssize_t) + ssize_t, save_err=SAVE_ERR) sendto = external('sendto', [socketfd_type, rffi.VOIDP, size_t, rffi.INT, - sockaddr_ptr, socklen_t], ssize_t) -socketshutdown = external('shutdown', [socketfd_type, rffi.INT], rffi.INT) -gethostname = external('gethostname', [rffi.CCHARP, rffi.INT], rffi.INT) + sockaddr_ptr, socklen_t], ssize_t, + save_err=SAVE_ERR) +socketshutdown = external('shutdown', [socketfd_type, rffi.INT], rffi.INT, + save_err=SAVE_ERR) +gethostname = external('gethostname', [rffi.CCHARP, rffi.INT], rffi.INT, + save_err=SAVE_ERR) gethostbyname = external('gethostbyname', [rffi.CCHARP], lltype.Ptr(cConfig.hostent)) gethostbyaddr = external('gethostbyaddr', [rffi.VOIDP, rffi.INT, rffi.INT], lltype.Ptr(cConfig.hostent)) @@ -559,7 +577,8 @@ fcntl = external('fcntl', [socketfd_type, rffi.INT, rffi.INT], rffi.INT) socketpair_t = rffi.CArray(socketfd_type) socketpair = external('socketpair', [rffi.INT, rffi.INT, rffi.INT, - lltype.Ptr(socketpair_t)], rffi.INT) + lltype.Ptr(socketpair_t)], rffi.INT, + save_err=SAVE_ERR) if _HAS_AF_PACKET: ioctl = external('ioctl', [socketfd_type, rffi.INT, lltype.Ptr(ifreq)], rffi.INT) @@ -572,7 +591,8 @@ select = external('select', [rffi.INT, fd_set, fd_set, fd_set, lltype.Ptr(timeval)], - rffi.INT) + rffi.INT, + save_err=SAVE_ERR) FD_CLR = external_c('FD_CLR', [rffi.INT, fd_set], lltype.Void, macro=True) FD_ISSET = external_c('FD_ISSET', [rffi.INT, fd_set], rffi.INT, macro=True) @@ -582,7 +602,7 @@ if _POSIX: pollfdarray = rffi.CArray(pollfd) poll = external('poll', [lltype.Ptr(pollfdarray), nfds_t, rffi.INT], - rffi.INT) + rffi.INT, save_err=SAVE_ERR) # workaround for Mac OS/X on which poll() seems to behave a bit strangely # (see test_recv_send_timeout in pypy.module._socket.test.test_sock_app) # https://issues.apache.org/bugzilla/show_bug.cgi?id=34332 @@ -626,12 +646,12 @@ WSADuplicateSocket = external('WSADuplicateSocketA', [socketfd_type, rwin32.DWORD, lltype.Ptr(WSAPROTOCOL_INFO)], - rffi.INT) + rffi.INT, save_err=SAVE_ERR) WSASocket = external('WSASocketA', [rffi.INT, rffi.INT, rffi.INT, lltype.Ptr(WSAPROTOCOL_INFO), rwin32.DWORD, rwin32.DWORD], - socketfd_type) + socketfd_type, save_err=save_err) if WIN32: WSAData = cConfig.WSAData @@ -657,7 +677,7 @@ MAX_FD_SIZE = None else: - from rpython.rlib.rposix import get_errno as geterrno + from rpython.rlib.rposix import get_saved_errno as geterrno socket_strerror_str = os.strerror def gai_strerror_str(errno): diff --git a/rpython/rlib/rthread.py b/rpython/rlib/rthread.py --- a/rpython/rlib/rthread.py +++ b/rpython/rlib/rthread.py @@ -274,33 +274,40 @@ class ThreadLocalField(object): def __init__(self, FIELDTYPE, fieldname, loop_invariant=False): "NOT_RPYTHON: must be prebuilt" + from thread import _local self.FIELDTYPE = FIELDTYPE self.fieldname = fieldname - self.loop_invariant = loop_invariant - if loop_invariant: - invariant = 'LOOPINVARIANT' - else: - invariant = '' - offset = CDefinedIntSymbolic('RPY_TLOFS%s_%s' % (invariant, - self.fieldname), + self.local = _local() # <- NOT_RPYTHON + zero = rffi.cast(FIELDTYPE, 0) + offset = CDefinedIntSymbolic('RPY_TLOFS_%s' % self.fieldname, default='?') + offset.loop_invariant = loop_invariant self.offset = offset def getraw(): - _threadlocalref_seeme(self) - return llop.threadlocalref_get(FIELDTYPE, offset) + if we_are_translated(): + _threadlocalref_seeme(self) + return llop.threadlocalref_get(FIELDTYPE, offset) + else: + return getattr(self.local, 'rawvalue', zero) @jit.dont_look_inside def get_or_make_raw(): - _threadlocalref_seeme(self) - addr = llop.threadlocalref_addr(llmemory.Address) - return llop.raw_load(FIELDTYPE, addr, offset) + if we_are_translated(): + _threadlocalref_seeme(self) + addr = llop.threadlocalref_addr(llmemory.Address) + return llop.raw_load(FIELDTYPE, addr, offset) + else: + return getattr(self.local, 'rawvalue', zero) @jit.dont_look_inside def setraw(value): - _threadlocalref_seeme(self) - addr = llop.threadlocalref_addr(llmemory.Address) - llop.raw_store(lltype.Void, addr, offset, value) + if we_are_translated(): + _threadlocalref_seeme(self) + addr = llop.threadlocalref_addr(llmemory.Address) + llop.raw_store(lltype.Void, addr, offset, value) + else: + self.local.rawvalue = value self.getraw = getraw self.get_or_make_raw = get_or_make_raw @@ -315,9 +322,7 @@ def __init__(self, Cls, loop_invariant=False): "NOT_RPYTHON: must be prebuilt" - import thread self.Cls = Cls - self.local = thread._local() # <- NOT_RPYTHON unique_id = ThreadLocalReference._COUNT ThreadLocalReference._COUNT += 1 ThreadLocalField.__init__(self, lltype.Signed, 'tlref%d' % unique_id, diff --git a/rpython/rtyper/lltypesystem/rffi.py b/rpython/rtyper/lltypesystem/rffi.py --- a/rpython/rtyper/lltypesystem/rffi.py +++ b/rpython/rtyper/lltypesystem/rffi.py @@ -64,8 +64,8 @@ RFFI_ZERO_ERRNO_BEFORE = 4 # copy the value 0 into real errno before call RFFI_FULL_ERRNO = RFFI_SAVE_ERRNO | RFFI_READSAVED_ERRNO RFFI_FULL_ERRNO_ZERO = RFFI_SAVE_ERRNO | RFFI_ZERO_ERRNO_BEFORE -RFFI_SAVE_LASTERROR = 8 -RFFI_READSAVED_LASTERROR = 16 +RFFI_SAVE_LASTERROR = 8 # XXX implement me! +RFFI_READSAVED_LASTERROR = 16 # XXX implement me! RFFI_FULL_LASTERROR = RFFI_SAVE_LASTERROR | RFFI_READSAVED_LASTERROR RFFI_ERR_NONE = 0 RFFI_ERR_ALL = RFFI_FULL_ERRNO | RFFI_FULL_LASTERROR From noreply at buildbot.pypy.org Wed Jan 14 18:26:20 2015 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 14 Jan 2015 18:26:20 +0100 (CET) Subject: [pypy-commit] pypy errno-again: in-progress Message-ID: <20150114172620.E62891C0F1B@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: errno-again Changeset: r75328:24f8ef4e0443 Date: 2015-01-14 18:26 +0100 http://bitbucket.org/pypy/pypy/changeset/24f8ef4e0443/ Log: in-progress diff --git a/rpython/memory/gc/inspector.py b/rpython/memory/gc/inspector.py --- a/rpython/memory/gc/inspector.py +++ b/rpython/memory/gc/inspector.py @@ -130,7 +130,8 @@ rffi.cast(llmemory.Address, self.writebuffer), rffi.cast(rffi.SIZE_T, bytes)) if rffi.cast(lltype.Signed, count) != bytes: - raise OSError(rposix.get_errno(), "raw_os_write failed") + raise OSError(rffi.cast(lltype.Signed, rposix._get_errno()), + "raw_os_write failed") self.buf_count = 0 flush._dont_inline_ = True diff --git a/rpython/rtyper/lltypesystem/test/test_ll2ctypes.py b/rpython/rtyper/lltypesystem/test/test_ll2ctypes.py --- a/rpython/rtyper/lltypesystem/test/test_ll2ctypes.py +++ b/rpython/rtyper/lltypesystem/test/test_ll2ctypes.py @@ -756,21 +756,23 @@ old_err_mode = ctypes.windll.kernel32.GetErrorMode() new_err_mode = old_err_mode | SEM_NOGPFAULTERRORBOX ctypes.windll.kernel32.SetErrorMode(new_err_mode) - strlen = rffi.llexternal('strlen', [rffi.CCHARP], rffi.SIZE_T, - compilation_info=eci) + os_write_no_errno = rffi.llexternal(UNDERSCORE_ON_WIN32 + 'write', + [rffi.INT, rffi.CCHARP, rffi.SIZE_T], + rffi.SIZE_T, save_err=rffi.RFFI_ERR_NONE) os_write = rffi.llexternal(UNDERSCORE_ON_WIN32 + 'write', [rffi.INT, rffi.CCHARP, rffi.SIZE_T], - rffi.SIZE_T) + rffi.SIZE_T, save_err=rffi.RFFI_SAVE_ERRNO) buffer = lltype.malloc(rffi.CCHARP.TO, 5, flavor='raw') written = os_write(12312312, buffer, 5) if sys.platform.startswith('win'): ctypes.windll.kernel32.SetErrorMode(old_err_mode) + assert rffi.cast(rffi.LONG, written) < 0 + # the next line is a different external function call + # without RFFI_SAVE_ERRNO, to check that it doesn't reset errno + buffer[0] = '\n' + os_write_no_errno(2, buffer, 1) lltype.free(buffer, flavor='raw') - assert rffi.cast(rffi.LONG, written) < 0 - # the next line is a random external function call, - # to check that it doesn't reset errno - strlen("hi!") - err = rposix.get_errno() + err = rposix.get_saved_errno() import errno assert err == errno.EBADF assert not ALLOCATED # detects memory leaks in the test diff --git a/rpython/rtyper/lltypesystem/test/test_rffi.py b/rpython/rtyper/lltypesystem/test/test_rffi.py --- a/rpython/rtyper/lltypesystem/test/test_rffi.py +++ b/rpython/rtyper/lltypesystem/test/test_rffi.py @@ -3,7 +3,7 @@ import sys from rpython.rtyper.lltypesystem.rffi import * from rpython.rtyper.lltypesystem.rffi import _keeper_for_type # crap -from rpython.rlib.rposix import get_errno, set_errno +from rpython.rlib.rposix import get_saved_errno, set_saved_errno from rpython.translator.c.test.test_genc import compile as compile_c from rpython.rtyper.lltypesystem.lltype import Signed, Ptr, Char, malloc from rpython.rtyper.lltypesystem import lltype @@ -207,15 +207,15 @@ bad_fd = 12312312 def f(): - set_errno(12) - return get_errno() + set_saved_errno(12) + return get_saved_errno() def g(): try: os.write(bad_fd, "xxx") except OSError: pass - return get_errno() + return get_saved_errno() fn = self.compile(f, []) assert fn() == 12 From noreply at buildbot.pypy.org Wed Jan 14 18:32:08 2015 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 14 Jan 2015 18:32:08 +0100 (CET) Subject: [pypy-commit] pypy errno-again: fix test Message-ID: <20150114173208.D9E651C0F1B@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: errno-again Changeset: r75329:fe33936f4984 Date: 2015-01-14 18:31 +0100 http://bitbucket.org/pypy/pypy/changeset/fe33936f4984/ Log: fix test diff --git a/rpython/translator/c/test/test_standalone.py b/rpython/translator/c/test/test_standalone.py --- a/rpython/translator/c/test/test_standalone.py +++ b/rpython/translator/c/test/test_standalone.py @@ -1166,10 +1166,10 @@ self.tail = tail def check_errno(value): - rposix.set_errno(value) + rposix.set_saved_errno(value) for i in range(10000000): pass - assert rposix.get_errno() == value + assert rposix.get_saved_errno() == value def bootstrap(): rthread.gc_thread_start() From noreply at buildbot.pypy.org Wed Jan 14 18:48:48 2015 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 14 Jan 2015 18:48:48 +0100 (CET) Subject: [pypy-commit] pypy errno-again: fixes Message-ID: <20150114174848.4428A1C0F1B@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: errno-again Changeset: r75330:8eed807e65cb Date: 2015-01-14 18:43 +0100 http://bitbucket.org/pypy/pypy/changeset/8eed807e65cb/ Log: fixes diff --git a/rpython/memory/gc/inspector.py b/rpython/memory/gc/inspector.py --- a/rpython/memory/gc/inspector.py +++ b/rpython/memory/gc/inspector.py @@ -4,7 +4,7 @@ from rpython.rtyper.lltypesystem import lltype, llmemory, rffi, llgroup from rpython.rlib.objectmodel import free_non_gc_object from rpython.rtyper.module.ll_os import UNDERSCORE_ON_WIN32 -from rpython.rlib import rposix, rgc +from rpython.rlib import rposix, rgc, jit from rpython.memory.support import AddressDict, get_address_stack @@ -123,6 +123,7 @@ lltype.free(self.writebuffer, flavor='raw') free_non_gc_object(self) + @jit.dont_look_inside def flush(self): if self.buf_count > 0: bytes = self.buf_count * rffi.sizeof(rffi.LONG) diff --git a/rpython/rlib/rfile.py b/rpython/rlib/rfile.py --- a/rpython/rlib/rfile.py +++ b/rpython/rlib/rfile.py @@ -57,17 +57,23 @@ def llexternal(*args, **kwargs): return rffi.llexternal(*args, compilation_info=eci, **kwargs) -c_fopen = llexternal('fopen', [rffi.CCHARP, rffi.CCHARP], FILEP) -c_popen = llexternal('popen', [rffi.CCHARP, rffi.CCHARP], FILEP) +c_fopen = llexternal('fopen', [rffi.CCHARP, rffi.CCHARP], FILEP, + save_err=rffi.RFFI_SAVE_ERRNO) +c_popen = llexternal('popen', [rffi.CCHARP, rffi.CCHARP], FILEP, + save_err=rffi.RFFI_SAVE_ERRNO) c_fdopen = llexternal(('_' if os.name == 'nt' else '') + 'fdopen', - [rffi.INT, rffi.CCHARP], FILEP) -c_tmpfile = llexternal('tmpfile', [], FILEP) + [rffi.INT, rffi.CCHARP], FILEP, + save_err=rffi.RFFI_SAVE_ERRNO) +c_tmpfile = llexternal('tmpfile', [], FILEP, + save_err=rffi.RFFI_SAVE_ERRNO) c_setvbuf = llexternal('setvbuf', [FILEP, rffi.CCHARP, rffi.INT, rffi.SIZE_T], rffi.INT) -c_fclose = llexternal('fclose', [FILEP], rffi.INT) -c_pclose = llexternal('pclose', [FILEP], rffi.INT) +c_fclose = llexternal('fclose', [FILEP], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) +c_pclose = llexternal('pclose', [FILEP], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) # Note: the following two functions are called from __del__ methods, # so must be 'releasegil=False'. Otherwise, a program using both @@ -84,12 +90,16 @@ rffi.SIZE_T) c_fwrite = llexternal('fwrite', [rffi.CCHARP, rffi.SIZE_T, rffi.SIZE_T, FILEP], - rffi.SIZE_T) -c_fflush = llexternal('fflush', [FILEP], rffi.INT) -c_ftruncate = llexternal(ftruncate, [rffi.INT, OFF_T], rffi.INT, macro=True) + rffi.SIZE_T, save_err=rffi.RFFI_SAVE_ERRNO) +c_fflush = llexternal('fflush', [FILEP], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) +c_ftruncate = llexternal(ftruncate, [rffi.INT, OFF_T], rffi.INT, macro=True, + save_err=rffi.RFFI_SAVE_ERRNO) -c_fseek = llexternal('fseek', [FILEP, rffi.LONG, rffi.INT], rffi.INT) -c_ftell = llexternal('ftell', [FILEP], rffi.LONG) +c_fseek = llexternal('fseek', [FILEP, rffi.LONG, rffi.INT], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) +c_ftell = llexternal('ftell', [FILEP], rffi.LONG, + save_err=rffi.RFFI_SAVE_ERRNO) c_fileno = llexternal(fileno, [FILEP], rffi.INT) c_feof = llexternal('feof', [FILEP], rffi.INT) @@ -149,7 +159,7 @@ try: ll_file = c_fopen(ll_name, ll_mode) if not ll_file: - errno = rposix.get_errno() + errno = rposix.get_saved_errno() raise IOError(errno, os.strerror(errno)) finally: lltype.free(ll_mode, flavor='raw') @@ -169,7 +179,7 @@ try: ll_file = c_fdopen(fd, ll_mode) if not ll_file: - errno = rposix.get_errno() + errno = rposix.get_saved_errno() raise OSError(errno, os.strerror(errno)) finally: lltype.free(ll_mode, flavor='raw') @@ -182,7 +192,7 @@ def create_temp_rfile(): res = c_tmpfile() if not res: - errno = rposix.get_errno() + errno = rposix.get_saved_errno() raise OSError(errno, os.strerror(errno)) return RFile(res) @@ -194,7 +204,7 @@ try: ll_file = c_popen(ll_command, ll_type) if not ll_file: - errno = rposix.get_errno() + errno = rposix.get_saved_errno() raise OSError(errno, os.strerror(errno)) finally: lltype.free(ll_type, flavor='raw') @@ -274,7 +284,7 @@ if do_close: res = do_close(ll_file) if res == -1: - errno = rposix.get_errno() + errno = rposix.get_saved_errno() raise IOError(errno, os.strerror(errno)) finally: if self._setbuf: @@ -474,7 +484,7 @@ length = len(value) bytes = c_fwrite(ll_value, 1, length, self._ll_file) if bytes != length: - errno = rposix.get_errno() + errno = rposix.get_saved_errno() c_clearerr(self._ll_file) raise IOError(errno, os.strerror(errno)) finally: @@ -484,7 +494,7 @@ self._check_closed() res = c_fflush(self._ll_file) if res != 0: - errno = rposix.get_errno() + errno = rposix.get_saved_errno() raise IOError(errno, os.strerror(errno)) def truncate(self, arg=-1): @@ -494,14 +504,14 @@ self.flush() res = c_ftruncate(self.fileno(), arg) if res == -1: - errno = rposix.get_errno() + errno = rposix.get_saved_errno() raise IOError(errno, os.strerror(errno)) def seek(self, pos, whence=0): self._check_closed() res = c_fseek(self._ll_file, pos, whence) if res == -1: - errno = rposix.get_errno() + errno = rposix.get_saved_errno() raise IOError(errno, os.strerror(errno)) self._skipnextlf = False @@ -509,7 +519,7 @@ self._check_closed() res = intmask(c_ftell(self._ll_file)) if res == -1: - errno = rposix.get_errno() + errno = rposix.get_saved_errno() raise IOError(errno, os.strerror(errno)) if self._skipnextlf: c = c_getc(self._ll_file) diff --git a/rpython/rlib/rmmap.py b/rpython/rlib/rmmap.py --- a/rpython/rlib/rmmap.py +++ b/rpython/rlib/rmmap.py @@ -108,13 +108,16 @@ else: HAVE_LARGEFILE_SUPPORT = False -def external(name, args, result, **kwargs): +def external(name, args, result, save_err_on_unsafe=0, save_err_on_safe=0, + **kwargs): unsafe = rffi.llexternal(name, args, result, compilation_info=CConfig._compilation_info_, + save_err=save_err_on_unsafe, **kwargs) safe = rffi.llexternal(name, args, result, compilation_info=CConfig._compilation_info_, sandboxsafe=True, releasegil=False, + save_err=save_err_on_safe, **kwargs) return unsafe, safe @@ -142,10 +145,12 @@ if _POSIX: has_mremap = cConfig['has_mremap'] c_mmap, c_mmap_safe = external('mmap', [PTR, size_t, rffi.INT, rffi.INT, - rffi.INT, off_t], PTR, macro=True) + rffi.INT, off_t], PTR, macro=True, + save_err_on_unsafe=rffi.RFFI_SAVE_ERRNO) # 'mmap' on linux32 is a macro that calls 'mmap64' _, c_munmap_safe = external('munmap', [PTR, size_t], rffi.INT) - c_msync, _ = external('msync', [PTR, size_t, rffi.INT], rffi.INT) + c_msync, _ = external('msync', [PTR, size_t, rffi.INT], rffi.INT, + save_err_on_unsafe=rffi.RFFI_SAVE_ERRNO) if has_mremap: c_mremap, _ = external('mremap', [PTR, size_t, size_t, rffi.ULONG], PTR) @@ -501,7 +506,7 @@ elif _POSIX: res = c_msync(start, size, MS_SYNC) if res == -1: - errno = rposix.get_errno() + errno = rposix.get_saved_errno() raise OSError(errno, os.strerror(errno)) return 0 @@ -671,7 +676,7 @@ # to be annotated with a non-constant pointer. res = c_mmap(NonConstant(NULL), map_size, prot, flags, fd, offset) if res == rffi.cast(PTR, -1): - errno = rposix.get_errno() + errno = rposix.get_saved_errno() raise OSError(errno, os.strerror(errno)) m.setdata(res, map_size) diff --git a/rpython/rtyper/module/ll_os.py b/rpython/rtyper/module/ll_os.py --- a/rpython/rtyper/module/ll_os.py +++ b/rpython/rtyper/module/ll_os.py @@ -20,7 +20,7 @@ from rpython.rtyper.lltypesystem import rffi from rpython.rtyper.lltypesystem import lltype from rpython.rtyper.tool import rffi_platform as platform -from rpython.rlib import rposix +from rpython.rlib import rposix, jit from rpython.translator.tool.cbuild import ExternalCompilationInfo from rpython.rlib.objectmodel import specialize from rpython.translator import cdir @@ -1785,6 +1785,7 @@ os_fork = self.llexternal('fork', [], rffi.PID_T, _nowrapper = True) + @jit.dont_look_inside def fork_llimpl(): # NB. keep forkpty() up-to-date, too ofs = debug.debug_offset() From noreply at buildbot.pypy.org Wed Jan 14 18:48:49 2015 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 14 Jan 2015 18:48:49 +0100 (CET) Subject: [pypy-commit] pypy errno-again: more fixes Message-ID: <20150114174849.AC1241C0F1B@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: errno-again Changeset: r75331:1180859920a4 Date: 2015-01-14 18:46 +0100 http://bitbucket.org/pypy/pypy/changeset/1180859920a4/ Log: more fixes diff --git a/rpython/rlib/rposix.py b/rpython/rlib/rposix.py --- a/rpython/rlib/rposix.py +++ b/rpython/rlib/rposix.py @@ -114,7 +114,8 @@ )) def validate_fd(fd): if not is_valid_fd(fd): - raise OSError(get_errno(), 'Bad file descriptor') + from errno import EBADF + raise OSError(EBADF, 'Bad file descriptor') else: def is_valid_fd(fd): return 1 diff --git a/rpython/rlib/rtermios.py b/rpython/rlib/rtermios.py --- a/rpython/rlib/rtermios.py +++ b/rpython/rlib/rtermios.py @@ -114,26 +114,34 @@ ('c_line', CC_T), ('c_cc', lltype.FixedSizeArray(CC_T, NCCS)), *_add) -def c_external(name, args, result): - return rffi.llexternal(name, args, result, compilation_info=eci) +def c_external(name, args, result, **kwds): + return rffi.llexternal(name, args, result, compilation_info=eci, **kwds) -c_tcgetattr = c_external('tcgetattr', [rffi.INT, TERMIOSP], rffi.INT) -c_tcsetattr = c_external('tcsetattr', [rffi.INT, rffi.INT, TERMIOSP], rffi.INT) +c_tcgetattr = c_external('tcgetattr', [rffi.INT, TERMIOSP], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) +c_tcsetattr = c_external('tcsetattr', [rffi.INT, rffi.INT, TERMIOSP], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) c_cfgetispeed = c_external('cfgetispeed', [TERMIOSP], SPEED_T) c_cfgetospeed = c_external('cfgetospeed', [TERMIOSP], SPEED_T) -c_cfsetispeed = c_external('cfsetispeed', [TERMIOSP, SPEED_T], rffi.INT) -c_cfsetospeed = c_external('cfsetospeed', [TERMIOSP, SPEED_T], rffi.INT) +c_cfsetispeed = c_external('cfsetispeed', [TERMIOSP, SPEED_T], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) +c_cfsetospeed = c_external('cfsetospeed', [TERMIOSP, SPEED_T], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) -c_tcsendbreak = c_external('tcsendbreak', [rffi.INT, rffi.INT], rffi.INT) -c_tcdrain = c_external('tcdrain', [rffi.INT], rffi.INT) -c_tcflush = c_external('tcflush', [rffi.INT, rffi.INT], rffi.INT) -c_tcflow = c_external('tcflow', [rffi.INT, rffi.INT], rffi.INT) +c_tcsendbreak = c_external('tcsendbreak', [rffi.INT, rffi.INT], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) +c_tcdrain = c_external('tcdrain', [rffi.INT], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) +c_tcflush = c_external('tcflush', [rffi.INT, rffi.INT], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) +c_tcflow = c_external('tcflow', [rffi.INT, rffi.INT], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) def tcgetattr(fd): with lltype.scoped_alloc(TERMIOSP.TO) as c_struct: if c_tcgetattr(fd, c_struct) < 0: - raise OSError(rposix.get_errno(), 'tcgetattr failed') + raise OSError(rposix.get_saved_errno(), 'tcgetattr failed') cc = [chr(c_struct.c_c_cc[i]) for i in range(NCCS)] ispeed = c_cfgetispeed(c_struct) ospeed = c_cfgetospeed(c_struct) @@ -157,24 +165,24 @@ for i in range(NCCS): c_struct.c_c_cc[i] = rffi.r_uchar(ord(cc[i][0])) if c_cfsetispeed(c_struct, ispeed) < 0: - raise OSError(rposix.get_errno(), 'tcsetattr failed') + raise OSError(rposix.get_saved_errno(), 'tcsetattr failed') if c_cfsetospeed(c_struct, ospeed) < 0: - raise OSError(rposix.get_errno(), 'tcsetattr failed') + raise OSError(rposix.get_saved_errno(), 'tcsetattr failed') if c_tcsetattr(fd, when, c_struct) < 0: - raise OSError(rposix.get_errno(), 'tcsetattr failed') + raise OSError(rposix.get_saved_errno(), 'tcsetattr failed') def tcsendbreak(fd, duration): if c_tcsendbreak(fd, duration) < 0: - raise OSError(rposix.get_errno(), 'tcsendbreak failed') + raise OSError(rposix.get_saved_errno(), 'tcsendbreak failed') def tcdrain(fd): if c_tcdrain(fd) < 0: - raise OSError(rposix.get_errno(), 'tcdrain failed') + raise OSError(rposix.get_saved_errno(), 'tcdrain failed') def tcflush(fd, queue_selector): if c_tcflush(fd, queue_selector) < 0: - raise OSError(rposix.get_errno(), 'tcflush failed') + raise OSError(rposix.get_saved_errno(), 'tcflush failed') def tcflow(fd, action): if c_tcflow(fd, action) < 0: - raise OSError(rposix.get_errno(), 'tcflow failed') + raise OSError(rposix.get_saved_errno(), 'tcflow failed') From noreply at buildbot.pypy.org Wed Jan 14 18:48:50 2015 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 14 Jan 2015 18:48:50 +0100 (CET) Subject: [pypy-commit] pypy errno-again: fixes Message-ID: <20150114174850.CF1751C0F1B@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: errno-again Changeset: r75332:8e7fde0c4855 Date: 2015-01-14 18:48 +0100 http://bitbucket.org/pypy/pypy/changeset/8e7fde0c4855/ Log: fixes diff --git a/rpython/jit/backend/x86/oprofile.py b/rpython/jit/backend/x86/oprofile.py --- a/rpython/jit/backend/x86/oprofile.py +++ b/rpython/jit/backend/x86/oprofile.py @@ -2,7 +2,7 @@ from rpython.rtyper.lltypesystem import lltype, llmemory, rffi from rpython.translator.tool.cbuild import ExternalCompilationInfo from rpython.rlib.objectmodel import we_are_translated -from rpython.rlib.rposix import get_errno +from rpython.rlib import rposix from rpython.jit.backend.x86 import profagent class OProfileError(Exception): @@ -26,19 +26,22 @@ "op_open_agent", [], AGENT, - compilation_info=eci) + compilation_info=eci, + save_err=rffi.RFFI_SAVE_ERRNO) op_close_agent = rffi.llexternal( "op_close_agent", [AGENT], rffi.INT, - compilation_info=eci) + compilation_info=eci, + save_err=rffi.RFFI_SAVE_ERRNO) # arguments are: # agent, symbol_name, address in memory, address in memory again, size op_write_native_code = rffi.llexternal( "op_write_native_code", [AGENT, rffi.CCHARP, uint64_t, rffi.VOIDP, rffi.UINT], rffi.INT, - compilation_info=eci) + compilation_info=eci, + save_err=rffi.RFFI_SAVE_ERRNO) class OProfileAgent(profagent.ProfileAgent): @@ -48,7 +51,7 @@ return agent = op_open_agent() if not agent: - raise OProfileError(get_errno(), "startup") + raise OProfileError(rposix.get_saved_errno(), "startup") self.agent = agent def shutdown(self): @@ -56,7 +59,7 @@ return success = op_close_agent(self.agent) if success != 0: - raise OProfileError(get_errno(), "shutdown") + raise OProfileError(rposix.get_saved_errno(), "shutdown") def native_code_written(self, name, address, size): assert size > 0 @@ -65,4 +68,4 @@ uaddress = rffi.cast(rffi.ULONG, address) success = op_write_native_code(self.agent, name, uaddress, rffi.cast(rffi.VOIDP, 0), size) if success != 0: - raise OProfileError(get_errno(), "write") + raise OProfileError(rposix.get_saved_errno(), "write") From noreply at buildbot.pypy.org Wed Jan 14 19:40:47 2015 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 14 Jan 2015 19:40:47 +0100 (CET) Subject: [pypy-commit] pypy errno-again: in-progress Message-ID: <20150114184047.8533B1C0F1B@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: errno-again Changeset: r75333:73a539098acc Date: 2015-01-14 19:40 +0100 http://bitbucket.org/pypy/pypy/changeset/73a539098acc/ Log: in-progress diff --git a/pypy/interpreter/error.py b/pypy/interpreter/error.py --- a/pypy/interpreter/error.py +++ b/pypy/interpreter/error.py @@ -486,10 +486,10 @@ w_exception_class=w_exception_class) wrap_oserror._annspecialcase_ = 'specialize:arg(3)' -def exception_from_errno(space, w_type): - from rpython.rlib.rposix import get_errno +def exception_from_saved_errno(space, w_type): + from rpython.rlib.rposix import get_saved_errno - errno = get_errno() + errno = get_saved_errno() msg = os.strerror(errno) w_error = space.call_function(w_type, space.wrap(errno), space.wrap(msg)) return OperationError(w_type, w_error) diff --git a/pypy/module/__pypy__/interp_time.py b/pypy/module/__pypy__/interp_time.py --- a/pypy/module/__pypy__/interp_time.py +++ b/pypy/module/__pypy__/interp_time.py @@ -1,7 +1,7 @@ from __future__ import with_statement import sys -from pypy.interpreter.error import exception_from_errno +from pypy.interpreter.error import exception_from_saved_errno from pypy.interpreter.gateway import unwrap_spec from rpython.rtyper.lltypesystem import rffi, lltype from rpython.rtyper.tool import rffi_platform @@ -48,11 +48,13 @@ c_clock_gettime = rffi.llexternal("clock_gettime", [lltype.Signed, lltype.Ptr(TIMESPEC)], rffi.INT, - compilation_info=CConfig._compilation_info_, releasegil=False + compilation_info=CConfig._compilation_info_, releasegil=False, + save_err=rffi.RFFI_SAVE_ERRNO ) c_clock_getres = rffi.llexternal("clock_getres", [lltype.Signed, lltype.Ptr(TIMESPEC)], rffi.INT, - compilation_info=CConfig._compilation_info_, releasegil=False + compilation_info=CConfig._compilation_info_, releasegil=False, + save_err=rffi.RFFI_SAVE_ERRNO ) @unwrap_spec(clk_id="c_int") @@ -60,7 +62,7 @@ with lltype.scoped_alloc(TIMESPEC) as tp: ret = c_clock_gettime(clk_id, tp) if ret != 0: - raise exception_from_errno(space, space.w_IOError) + raise exception_from_saved_errno(space, space.w_IOError) return space.wrap(int(tp.c_tv_sec) + 1e-9 * int(tp.c_tv_nsec)) @unwrap_spec(clk_id="c_int") @@ -68,5 +70,5 @@ with lltype.scoped_alloc(TIMESPEC) as tp: ret = c_clock_getres(clk_id, tp) if ret != 0: - raise exception_from_errno(space, space.w_IOError) + raise exception_from_saved_errno(space, space.w_IOError) return space.wrap(int(tp.c_tv_sec) + 1e-9 * int(tp.c_tv_nsec)) diff --git a/pypy/module/_rawffi/interp_rawffi.py b/pypy/module/_rawffi/interp_rawffi.py --- a/pypy/module/_rawffi/interp_rawffi.py +++ b/pypy/module/_rawffi/interp_rawffi.py @@ -613,10 +613,10 @@ return space.wrap(W_CDLL(space, name, cdll)) def get_errno(space): - return space.wrap(rposix.get_errno()) + return space.wrap(rposix.get_saved_errno()) def set_errno(space, w_errno): - rposix.set_errno(space.int_w(w_errno)) + rposix.set_saved_errno(space.int_w(w_errno)) if sys.platform == 'win32': def get_last_error(space): diff --git a/pypy/module/fcntl/interp_fcntl.py b/pypy/module/fcntl/interp_fcntl.py --- a/pypy/module/fcntl/interp_fcntl.py +++ b/pypy/module/fcntl/interp_fcntl.py @@ -59,18 +59,24 @@ return rffi.llexternal(name, args, result, compilation_info=CConfig._compilation_info_) _flock = lltype.Ptr(cConfig.flock) -fcntl_int = external('fcntl', [rffi.INT, rffi.INT, rffi.INT], rffi.INT) -fcntl_str = external('fcntl', [rffi.INT, rffi.INT, rffi.CCHARP], rffi.INT) -fcntl_flock = external('fcntl', [rffi.INT, rffi.INT, _flock], rffi.INT) -ioctl_int = external('ioctl', [rffi.INT, rffi.UINT, rffi.INT], rffi.INT) -ioctl_str = external('ioctl', [rffi.INT, rffi.UINT, rffi.CCHARP], rffi.INT) +fcntl_int = external('fcntl', [rffi.INT, rffi.INT, rffi.INT], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) +fcntl_str = external('fcntl', [rffi.INT, rffi.INT, rffi.CCHARP], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) +fcntl_flock = external('fcntl', [rffi.INT, rffi.INT, _flock], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) +ioctl_int = external('ioctl', [rffi.INT, rffi.UINT, rffi.INT], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) +ioctl_str = external('ioctl', [rffi.INT, rffi.UINT, rffi.CCHARP], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) has_flock = cConfig.has_flock if has_flock: - c_flock = external('flock', [rffi.INT, rffi.INT], rffi.INT) + c_flock = external('flock', [rffi.INT, rffi.INT], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) def _get_error(space, funcname): - errno = rposix.get_errno() + errno = rposix.get_saved_errno() return wrap_oserror(space, OSError(errno, funcname), exception_name = 'w_IOError') diff --git a/pypy/module/select/interp_epoll.py b/pypy/module/select/interp_epoll.py --- a/pypy/module/select/interp_epoll.py +++ b/pypy/module/select/interp_epoll.py @@ -4,12 +4,13 @@ from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.gateway import interp2app, unwrap_spec -from pypy.interpreter.error import OperationError, exception_from_errno, oefmt +from pypy.interpreter.error import OperationError, oefmt +from pypy.interpreter.error import exception_from_saved_errno from pypy.interpreter.typedef import TypeDef, GetSetProperty from rpython.rtyper.lltypesystem import lltype, rffi from rpython.rtyper.tool import rffi_platform from rpython.rlib._rsocket_rffi import socketclose, FD_SETSIZE -from rpython.rlib.rposix import get_errno +from rpython.rlib.rposix import get_saved_errno from rpython.rlib.rarithmetic import intmask from rpython.translator.tool.cbuild import ExternalCompilationInfo @@ -53,19 +54,22 @@ EPOLL_CTL_DEL = cconfig["EPOLL_CTL_DEL"] epoll_create = rffi.llexternal( - "epoll_create", [rffi.INT], rffi.INT, compilation_info=eci + "epoll_create", [rffi.INT], rffi.INT, compilation_info=eci, + save_err=rffi.RFFI_SAVE_ERRNO ) epoll_ctl = rffi.llexternal( "epoll_ctl", [rffi.INT, rffi.INT, rffi.INT, lltype.Ptr(epoll_event)], rffi.INT, - compilation_info=eci + compilation_info=eci, + save_err=rffi.RFFI_SAVE_ERRNO ) epoll_wait = rffi.llexternal( "epoll_wait", [rffi.INT, rffi.CArrayPtr(epoll_event), rffi.INT, rffi.INT], rffi.INT, compilation_info=eci, + save_err=rffi.RFFI_SAVE_ERRNO ) @@ -82,7 +86,7 @@ "sizehint must be greater than zero, got %d", sizehint) epfd = epoll_create(sizehint) if epfd < 0: - raise exception_from_errno(space, space.w_IOError) + raise exception_from_saved_errno(space, space.w_IOError) return space.wrap(W_Epoll(space, epfd)) @@ -114,10 +118,10 @@ rffi.setintfield(ev.c_data, 'c_fd', fd) result = epoll_ctl(self.epfd, ctl, fd, ev) - if ignore_ebadf and get_errno() == errno.EBADF: + if ignore_ebadf and get_saved_errno() == errno.EBADF: result = 0 if result < 0: - raise exception_from_errno(space, space.w_IOError) + raise exception_from_saved_errno(space, space.w_IOError) def descr_get_closed(self, space): return space.wrap(self.get_closed()) @@ -160,7 +164,7 @@ with lltype.scoped_alloc(rffi.CArray(epoll_event), maxevents) as evs: nfds = epoll_wait(self.epfd, evs, maxevents, int(timeout)) if nfds < 0: - raise exception_from_errno(space, space.w_IOError) + raise exception_from_saved_errno(space, space.w_IOError) elist_w = [None] * nfds for i in xrange(nfds): diff --git a/pypy/module/select/interp_kqueue.py b/pypy/module/select/interp_kqueue.py --- a/pypy/module/select/interp_kqueue.py +++ b/pypy/module/select/interp_kqueue.py @@ -1,5 +1,6 @@ from pypy.interpreter.baseobjspace import W_Root -from pypy.interpreter.error import OperationError, exception_from_errno, oefmt +from pypy.interpreter.error import OperationError, oefmt +from pypy.interpreter.error import exception_from_saved_errno from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault from pypy.interpreter.typedef import TypeDef, generic_new_descr, GetSetProperty from rpython.rlib._rsocket_rffi import socketclose @@ -86,7 +87,8 @@ "kqueue", [], rffi.INT, - compilation_info=eci + compilation_info=eci, + save_err=rffi.RFFI_SAVE_ERRNO ) syscall_kevent = rffi.llexternal( @@ -99,7 +101,8 @@ lltype.Ptr(timespec) ], rffi.INT, - compilation_info=eci + compilation_info=eci, + save_err=rffi.RFFI_SAVE_ERRNO ) @@ -110,7 +113,7 @@ def descr__new__(space, w_subtype): kqfd = syscall_kqueue() if kqfd < 0: - raise exception_from_errno(space, space.w_IOError) + raise exception_from_saved_errno(space, space.w_IOError) return space.wrap(W_Kqueue(space, kqfd)) @unwrap_spec(fd=int) @@ -198,7 +201,7 @@ max_events, ptimeout) if nfds < 0: - raise exception_from_errno(space, space.w_IOError) + raise exception_from_saved_errno(space, space.w_IOError) else: elist_w = [None] * nfds for i in xrange(nfds): diff --git a/pypy/module/signal/interp_signal.py b/pypy/module/signal/interp_signal.py --- a/pypy/module/signal/interp_signal.py +++ b/pypy/module/signal/interp_signal.py @@ -5,7 +5,7 @@ import os import errno -from pypy.interpreter.error import OperationError, exception_from_errno +from pypy.interpreter.error import OperationError, exception_from_saved_errno from pypy.interpreter.executioncontext import (AsyncAction, AbstractActionFlag, PeriodicAsyncAction) from pypy.interpreter.gateway import unwrap_spec @@ -258,7 +258,7 @@ def siginterrupt(space, signum, flag): check_signum_in_range(space, signum) if rffi.cast(lltype.Signed, c_siginterrupt(signum, flag)) < 0: - errno = rposix.get_errno() + errno = rposix.get_saved_errno() raise OperationError(space.w_RuntimeError, space.wrap(errno)) @@ -311,7 +311,7 @@ ret = c_setitimer(which, new, old) if ret != 0: - raise exception_from_errno(space, get_itimer_error(space)) + raise exception_from_saved_errno(space, get_itimer_error(space)) return itimer_retval(space, old[0]) diff --git a/rpython/jit/backend/llsupport/test/zrpy_releasegil_test.py b/rpython/jit/backend/llsupport/test/zrpy_releasegil_test.py --- a/rpython/jit/backend/llsupport/test/zrpy_releasegil_test.py +++ b/rpython/jit/backend/llsupport/test/zrpy_releasegil_test.py @@ -95,3 +95,24 @@ def test_close_stack(self): self.run('close_stack') assert 'call_release_gil' in udir.join('TestCompileFramework.log').read() + + ## def define_get_set_errno(self): + + ## c_strchr = rffi.llexternal('strchr', [rffi.CCHARP, lltype.Signed], + ## rffi.CCHARP, ... + + ## def before(n, x): + ## return (n, None, None, None, None, None, + ## None, None, None, None, None, None) + ## # + ## def f(n, x, *args): + ## a = rffi.str2charp(str(n)) + ## c_strchr(a, ord('0')) + ## lltype.free(a, flavor='raw') + ## n -= 1 + ## return (n, x) + args + ## return before, f, None + + ## def test_get_set_errno(self): + ## self.run('get_set_errno') + ## assert 'call_release_gil' in udir.join('TestCompileFramework.log').read() diff --git a/rpython/jit/codewriter/call.py b/rpython/jit/codewriter/call.py --- a/rpython/jit/codewriter/call.py +++ b/rpython/jit/codewriter/call.py @@ -10,6 +10,7 @@ EffectInfo, CallInfoCollection) from rpython.rtyper.lltypesystem import lltype, llmemory from rpython.rtyper.typesystem import getfunctionptr +from rpython.rlib import rposix from rpython.translator.backendopt.canraise import RaiseAnalyzer from rpython.translator.backendopt.writeanalyze import ReadWriteAnalyzer from rpython.translator.backendopt.graphanalyze import DependencyTracker @@ -114,6 +115,10 @@ if self.jitdriver_sd_from_portal_runner_ptr(funcptr) is not None: return 'recursive' funcobj = funcptr._obj + assert (funcobj is not rposix._get_errno and + funcobj is not rposix._set_errno), ( + "the JIT must never come close to _get_errno() or _set_errno();" + " it should all be done at a lower level") if getattr(funcobj, 'graph', None) is None: return 'residual' targetgraph = funcobj.graph @@ -206,7 +211,7 @@ # get the 'elidable' and 'loopinvariant' flags from the function object elidable = False loopinvariant = False - call_release_gil_target = llmemory.NULL + call_release_gil_target = EffectInfo._NO_CALL_RELEASE_GIL_TARGET if op.opname == "direct_call": funcobj = op.args[0].value._obj assert getattr(funcobj, 'calling_conv', 'c') == 'c', ( @@ -218,9 +223,9 @@ assert not NON_VOID_ARGS, ("arguments not supported for " "loop-invariant function!") if getattr(func, "_call_aroundstate_target_", None): - call_release_gil_target = func._call_aroundstate_target_ - call_release_gil_target = llmemory.cast_ptr_to_adr( - call_release_gil_target) + tgt_func, tgt_saveerr = func._call_aroundstate_target_ + tgt_func = llmemory.cast_ptr_to_adr(tgt_func) + call_release_gil_target = (tgt_func, tgt_saveerr) elif op.opname == 'indirect_call': # check that we're not trying to call indirectly some # function with the special flags diff --git a/rpython/jit/codewriter/effectinfo.py b/rpython/jit/codewriter/effectinfo.py --- a/rpython/jit/codewriter/effectinfo.py +++ b/rpython/jit/codewriter/effectinfo.py @@ -98,6 +98,8 @@ OS_NOT_IN_TRACE, ]) + _NO_CALL_RELEASE_GIL_TARGET = (llmemory.NULL, 0) + def __new__(cls, readonly_descrs_fields, readonly_descrs_arrays, readonly_descrs_interiorfields, write_descrs_fields, write_descrs_arrays, @@ -105,7 +107,7 @@ extraeffect=EF_CAN_RAISE, oopspecindex=OS_NONE, can_invalidate=False, - call_release_gil_target=llmemory.NULL, + call_release_gil_target=_NO_CALL_RELEASE_GIL_TARGET, extradescrs=None): key = (frozenset_or_none(readonly_descrs_fields), frozenset_or_none(readonly_descrs_arrays), @@ -116,7 +118,8 @@ extraeffect, oopspecindex, can_invalidate) - if call_release_gil_target: + tgt_func, tgt_saveerr = call_release_gil_target + if tgt_func: key += (object(),) # don't care about caching in this case if key in cls._cache: return cls._cache[key] @@ -171,7 +174,8 @@ return self.extraeffect >= self.EF_RANDOM_EFFECTS def is_call_release_gil(self): - return bool(self.call_release_gil_target) + tgt_func, tgt_saveerr = self.call_release_gil_target + return bool(tgt_func) def __repr__(self): more = '' @@ -194,7 +198,8 @@ extraeffect=EffectInfo.EF_CAN_RAISE, oopspecindex=EffectInfo.OS_NONE, can_invalidate=False, - call_release_gil_target=llmemory.NULL, + call_release_gil_target= + EffectInfo._NO_CALL_RELEASE_GIL_TARGET, extradescr=None): from rpython.translator.backendopt.writeanalyze import top_set if effects is top_set or extraeffect == EffectInfo.EF_RANDOM_EFFECTS: diff --git a/rpython/rlib/clibffi.py b/rpython/rlib/clibffi.py --- a/rpython/rlib/clibffi.py +++ b/rpython/rlib/clibffi.py @@ -330,7 +330,8 @@ else: c_ffi_call_return_type = lltype.Void c_ffi_call = external('ffi_call', [FFI_CIFP, rffi.VOIDP, rffi.VOIDP, - VOIDPP], c_ffi_call_return_type) + VOIDPP], c_ffi_call_return_type, + save_err=rffi.RFFI_ERR_ALL) CALLBACK_TP = rffi.CCallback([FFI_CIFP, rffi.VOIDP, rffi.VOIDPP, rffi.VOIDP], lltype.Void) c_ffi_prep_closure = external('ffi_prep_closure', [FFI_CLOSUREP, FFI_CIFP, diff --git a/rpython/rlib/rsignal.py b/rpython/rlib/rsignal.py --- a/rpython/rlib/rsignal.py +++ b/rpython/rlib/rsignal.py @@ -89,10 +89,12 @@ elidable_function=True) c_alarm = external('alarm', [rffi.INT], rffi.INT) c_pause = external('pause', [], rffi.INT, releasegil=True) -c_siginterrupt = external('siginterrupt', [rffi.INT, rffi.INT], rffi.INT) +c_siginterrupt = external('siginterrupt', [rffi.INT, rffi.INT], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) if sys.platform != 'win32': itimervalP = rffi.CArrayPtr(itimerval) c_setitimer = external('setitimer', - [rffi.INT, itimervalP, itimervalP], rffi.INT) + [rffi.INT, itimervalP, itimervalP], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) c_getitimer = external('getitimer', [rffi.INT, itimervalP], rffi.INT) diff --git a/rpython/rtyper/lltypesystem/rffi.py b/rpython/rtyper/lltypesystem/rffi.py --- a/rpython/rtyper/lltypesystem/rffi.py +++ b/rpython/rtyper/lltypesystem/rffi.py @@ -213,7 +213,7 @@ # CALL_RELEASE_GIL directly to 'funcptr'. This doesn't work if # 'funcptr' might be a C macro, though. if macro is None: - call_external_function._call_aroundstate_target_ = funcptr + call_external_function._call_aroundstate_target_ = funcptr, save_err # call_external_function = func_with_new_name(call_external_function, 'ccall_' + name) From noreply at buildbot.pypy.org Wed Jan 14 19:52:07 2015 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 14 Jan 2015 19:52:07 +0100 (CET) Subject: [pypy-commit] pypy errno-again: in-progress Message-ID: <20150114185207.ED9AF1C1056@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: errno-again Changeset: r75334:6337a26add9b Date: 2015-01-14 19:51 +0100 http://bitbucket.org/pypy/pypy/changeset/6337a26add9b/ Log: in-progress diff --git a/rpython/jit/backend/llgraph/runner.py b/rpython/jit/backend/llgraph/runner.py --- a/rpython/jit/backend/llgraph/runner.py +++ b/rpython/jit/backend/llgraph/runner.py @@ -935,7 +935,7 @@ del self.force_guard_op return res - def execute_call_release_gil(self, descr, func, *args): + def execute_call_release_gil(self, descr, saveerr, func, *args): if hasattr(descr, '_original_func_'): func = descr._original_func_ # see pyjitpl.py # we want to call the function that does the aroundstate diff --git a/rpython/jit/backend/test/runner_test.py b/rpython/jit/backend/test/runner_test.py --- a/rpython/jit/backend/test/runner_test.py +++ b/rpython/jit/backend/test/runner_test.py @@ -2512,7 +2512,7 @@ tok = BoxInt() faildescr = BasicFailDescr(1) ops = [ - ResOperation(rop.CALL_RELEASE_GIL, [funcbox, i1], i2, + ResOperation(rop.CALL_RELEASE_GIL, [ConstInt(0), funcbox, i1], i2, descr=calldescr), ResOperation(rop.GUARD_NOT_FORCED, [], None, descr=faildescr), ResOperation(rop.FINISH, [i2], None, descr=BasicFinalDescr(0)) @@ -2570,7 +2570,8 @@ tok = BoxInt() faildescr = BasicFailDescr(1) ops = [ - ResOperation(rop.CALL_RELEASE_GIL, [funcbox, i0, i1, i2, i3], None, + ResOperation(rop.CALL_RELEASE_GIL, + [ConstInt(0), funcbox, i0, i1, i2, i3], None, descr=calldescr), ResOperation(rop.GUARD_NOT_FORCED, [], None, descr=faildescr), ResOperation(rop.FINISH, [], None, descr=BasicFinalDescr(0)) @@ -2625,7 +2626,8 @@ for i in range(50): i3 = BoxInt() ops += [ - ResOperation(rop.CALL_RELEASE_GIL, [funcbox, i1, i2], i3, + ResOperation(rop.CALL_RELEASE_GIL, + [ConstInt(0), funcbox, i1, i2], i3, descr=calldescr), ResOperation(rop.GUARD_NOT_FORCED, [], None, descr=faildescr), ] @@ -2697,7 +2699,7 @@ assert 0, kind # ops = [ - ResOperation(rop.CALL_RELEASE_GIL, [funcbox], b3, + ResOperation(rop.CALL_RELEASE_GIL, [ConstInt(0), funcbox], b3, descr=calldescr), ResOperation(rop.GUARD_NOT_FORCED, [], None, descr=faildescr), ResOperation(rop.FINISH, [b3], None, descr=BasicFinalDescr(0)) @@ -2881,7 +2883,8 @@ loadcodes = ''.join(loadcodes) print loadcodes ops += [ - ResOperation(rop.CALL_RELEASE_GIL, insideboxes, None, + ResOperation(rop.CALL_RELEASE_GIL, + [ConstInt(0)] + insideboxes, None, descr=calldescr), ResOperation(rop.GUARD_NOT_FORCED, [], None, descr=faildescr), ResOperation(rop.FINISH, [], None, descr=BasicFinalDescr(0)) @@ -2916,6 +2919,20 @@ assert got == expected, '\n'.join( ['bad args, signature %r' % codes[1:]] + different_values) + def test_call_release_gil_save_errno(self): + XXX + + def test_call_release_gil_readsaved_errno(self): + XXX + + def test_call_release_gil_zero_errno_before(self): + XXX + + def test_call_release_gil_save_lasterror(self): + XXX + + def test_call_release_gil_readsaved_lasterror(self): + XXX def test_guard_not_invalidated(self): cpu = self.cpu diff --git a/rpython/jit/codewriter/test/test_call.py b/rpython/jit/codewriter/test/test_call.py --- a/rpython/jit/codewriter/test/test_call.py +++ b/rpython/jit/codewriter/test/test_call.py @@ -206,7 +206,8 @@ from rpython.jit.backend.llgraph.runner import LLGraphCPU T = rffi.CArrayPtr(rffi.TIME_T) - external = rffi.llexternal("time", [T], rffi.TIME_T, releasegil=True) + external = rffi.llexternal("time", [T], rffi.TIME_T, releasegil=True, + save_err=rffi.RFFI_SAVE_ERRNO) # no jit.dont_look_inside in this test def f(): @@ -220,12 +221,16 @@ [llext_graph] = [x for x in res if x.func is external] [block, _] = list(llext_graph.iterblocks()) [op] = block.operations - call_target = op.args[0].value._obj.graph.func._call_aroundstate_target_ + tgt_tuple = op.args[0].value._obj.graph.func._call_aroundstate_target_ + assert type(tgt_tuple) is tuple and len(tgt_tuple) == 2 + call_target, saveerr = tgt_tuple + assert saveerr == rffi.RFFI_SAVE_ERRNO call_target = llmemory.cast_ptr_to_adr(call_target) call_descr = cc.getcalldescr(op) assert call_descr.extrainfo.has_random_effects() assert call_descr.extrainfo.is_call_release_gil() is True - assert call_descr.extrainfo.call_release_gil_target == call_target + assert call_descr.extrainfo.call_release_gil_target == ( + call_target, rffi.RFFI_SAVE_ERRNO) def test_random_effects_on_stacklet_switch(): from rpython.jit.backend.llgraph.runner import LLGraphCPU diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -2875,8 +2875,11 @@ arg_boxes.append(box_arg) # box_result = op.result + # for now, any call via libffi saves and restores everything + # (that is, errno and SetLastError/GetLastError on Windows) + c_saveall = ConstInt(rffi.RFFI_ERR_ALL) self.history.record(rop.CALL_RELEASE_GIL, - [op.getarg(2)] + arg_boxes, + [c_saveall, op.getarg(2)] + arg_boxes, box_result, calldescr) # self.history.operations.extend(extra_guards) @@ -2889,10 +2892,11 @@ assert op.opnum == rop.CALL_MAY_FORCE descr = op.getdescr() effectinfo = descr.get_extra_info() - realfuncaddr = effectinfo.call_release_gil_target + realfuncaddr, saveerr = effectinfo.call_release_gil_target funcbox = ConstInt(heaptracker.adr2int(realfuncaddr)) + savebox = ConstInt(saveerr) self.history.record(rop.CALL_RELEASE_GIL, - [funcbox] + op.getarglist()[1:], + [savebox, funcbox] + op.getarglist()[1:], op.result, descr) if not we_are_translated(): # for llgraph descr._original_func_ = op.getarg(0).value From noreply at buildbot.pypy.org Wed Jan 14 21:17:13 2015 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 14 Jan 2015 21:17:13 +0100 (CET) Subject: [pypy-commit] pypy default: - fix the mismatch between "int" and "long" return types for Message-ID: <20150114201713.890871D2655@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r75335:b9d53b23c50b Date: 2015-01-14 21:17 +0100 http://bitbucket.org/pypy/pypy/changeset/b9d53b23c50b/ Log: - fix the mismatch between "int" and "long" return types for pypy_setup_home() - fix the docs to say that calling pypy_setup_home() is not optional (we get a segfault if we don't call it) diff --git a/pypy/doc/embedding.rst b/pypy/doc/embedding.rst --- a/pypy/doc/embedding.rst +++ b/pypy/doc/embedding.rst @@ -30,12 +30,10 @@ Initialize threads. Only need to be called if there are any threads involved -.. function:: long pypy_setup_home(char* home, int verbose); +.. function:: int pypy_setup_home(char* home, int verbose); This function searches the PyPy standard library starting from the given - "PyPy home directory". It is not strictly necessary to execute it before - running Python code, but without it you will not be able to import any - non-builtin module from the standard library. The arguments are: + "PyPy home directory". The arguments are: * ``home``: NULL terminated path to an executable inside the pypy directory (can be a .so name, can be made up) @@ -84,17 +82,22 @@ const char source[] = "print 'hello from pypy'"; - int main() + int main(void) { - int res; + int res; - rpython_startup_code(); - // pypy_setup_home() is not needed in this trivial example - res = pypy_execute_source((char*)source); - if (res) { - printf("Error calling pypy_execute_source!\n"); - } - return res; + rpython_startup_code(); + res = pypy_setup_home("/opt/pypy/bin/libpypy-c.so", 1); + if (res) { + printf("Error setting pypy home!\n"); + return 1; + } + + res = pypy_execute_source((char*)source); + if (res) { + printf("Error calling pypy_execute_source!\n"); + } + return res; } If we save it as ``x.c`` now, compile it and run it (on linux) with:: diff --git a/pypy/goal/targetpypystandalone.py b/pypy/goal/targetpypystandalone.py --- a/pypy/goal/targetpypystandalone.py +++ b/pypy/goal/targetpypystandalone.py @@ -101,7 +101,7 @@ if space.is_none(w_path): if verbose: debug("Failed to find library based on pypy_find_stdlib") - return 1 + return rffi.cast(rffi.INT, 1) space.startup() space.call_function(w_pathsetter, w_path) # import site @@ -109,13 +109,13 @@ import_ = space.getattr(space.getbuiltinmodule('__builtin__'), space.wrap('__import__')) space.call_function(import_, space.wrap('site')) - return 0 + return rffi.cast(rffi.INT, 0) except OperationError, e: if verbose: debug("OperationError:") debug(" operror-type: " + e.w_type.getname(space)) debug(" operror-value: " + space.str_w(space.str(e.get_w_value(space)))) - return -1 + return rffi.cast(rffi.INT, -1) @entrypoint('main', [rffi.CCHARP], c_name='pypy_execute_source') def pypy_execute_source(ll_source): From noreply at buildbot.pypy.org Wed Jan 14 22:58:59 2015 From: noreply at buildbot.pypy.org (mattip) Date: Wed, 14 Jan 2015 22:58:59 +0100 (CET) Subject: [pypy-commit] pypy ufuncapi: add more types Message-ID: <20150114215859.074AD1D2655@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: ufuncapi Changeset: r75336:e1871503d572 Date: 2015-01-14 23:59 +0200 http://bitbucket.org/pypy/pypy/changeset/e1871503d572/ Log: add more types diff --git a/pypy/module/cpyext/include/numpy/npy_common.h b/pypy/module/cpyext/include/numpy/npy_common.h --- a/pypy/module/cpyext/include/numpy/npy_common.h +++ b/pypy/module/cpyext/include/numpy/npy_common.h @@ -3,6 +3,8 @@ typedef Py_intptr_t npy_intp; typedef Py_uintptr_t npy_uintp; +typedef PY_LONG_LONG npy_longlong; +typedef unsigned PY_LONG_LONG npy_ulonglong; typedef unsigned char npy_bool; typedef long npy_int32; typedef unsigned long npy_uint32; @@ -10,6 +12,24 @@ typedef long npy_int64; typedef unsigned long npy_uint64; typedef unsigned char npy_uint8; + +typedef signed char npy_byte; +typedef unsigned char npy_ubyte; +typedef unsigned short npy_ushort; +typedef unsigned int npy_uint; +typedef unsigned long npy_ulong; + +/* These are for completeness */ +typedef char npy_char; +typedef short npy_short; +typedef int npy_int; +typedef long npy_long; +typedef float npy_float; +typedef double npy_double; + +typedef struct { float real, imag; } npy_cfloat; +typedef struct { double real, imag; } npy_cdouble; +typedef npy_cdouble npy_complex128; #if defined(_MSC_VER) #define NPY_INLINE __inline #elif defined(__GNUC__) From noreply at buildbot.pypy.org Thu Jan 15 11:07:21 2015 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 15 Jan 2015 11:07:21 +0100 (CET) Subject: [pypy-commit] pypy errno-again: Write and pass test_call_release_gil_save_errno, the first test Message-ID: <20150115100721.37FCD1C1056@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: errno-again Changeset: r75337:de4fa7e98110 Date: 2015-01-15 11:07 +0100 http://bitbucket.org/pypy/pypy/changeset/de4fa7e98110/ Log: Write and pass test_call_release_gil_save_errno, the first test about the errno handling in callbuilder. diff --git a/rpython/jit/backend/llsupport/callbuilder.py b/rpython/jit/backend/llsupport/callbuilder.py --- a/rpython/jit/backend/llsupport/callbuilder.py +++ b/rpython/jit/backend/llsupport/callbuilder.py @@ -42,9 +42,9 @@ self.pop_gcmap() self.load_result() - def emit_call_release_gil(self): + def emit_call_release_gil(self, save_err): """Emit a CALL_RELEASE_GIL, including calls to releasegil_addr - and reacqgil_addr.""" + and reacqgil_addr. 'save_err' is a combination of rffi.RFFI_*ERR*.""" fastgil = rffi.cast(lltype.Signed, rgil.gil_fetch_fastgil()) self.select_call_release_gil_mode() self.prepare_arguments() @@ -52,6 +52,7 @@ self.call_releasegil_addr_and_move_real_arguments(fastgil) self.emit_raw_call() self.restore_stack_pointer() + self.save_errno(save_err) self.move_real_result_and_call_reacqgil_addr(fastgil) self.pop_gcmap() self.load_result() @@ -62,6 +63,9 @@ def move_real_result_and_call_reacqgil_addr(self, fastgil): raise NotImplementedError + def save_errno(self, save_err): + raise NotImplementedError + def select_call_release_gil_mode(self): """Overridden in CallBuilder64""" self.is_call_release_gil = True diff --git a/rpython/jit/backend/llsupport/llerrno.py b/rpython/jit/backend/llsupport/llerrno.py new file mode 100644 --- /dev/null +++ b/rpython/jit/backend/llsupport/llerrno.py @@ -0,0 +1,41 @@ +from rpython.rtyper.lltypesystem import lltype, rffi +from rpython.translator.tool.cbuild import ExternalCompilationInfo +from rpython.jit.backend.llsupport.symbolic import WORD + + +def get_debug_saved_errno(cpu): + return cpu._debug_errno_container[3] + +def set_debug_saved_errno(cpu, nerrno): + assert nerrno >= 0 + cpu._debug_errno_container[3] = nerrno + +def get_rpy_errno_offset(cpu): + if cpu.translate_support_code: + from rpython.rlib import rthread + return rthread.tlfield_rpy_errno.offset + else: + return 3 * WORD + +def _fetch_addr_errno(): + eci = ExternalCompilationInfo( + separate_module_sources=[''' + #include + RPY_EXPORTED long fetch_addr_errno(void) { + return (long)(&errno); + } + ''']) + func1_ptr = rffi.llexternal('fetch_addr_errno', [], lltype.Signed, + compilation_info=eci, _nowrapper=True) + return func1_ptr() + +def get_p_errno_offset(cpu): + if cpu.translate_support_code: + from rpython.rlib import rthread + return rthread.tlfield_p_errno.offset + else: + if cpu._debug_errno_container[2] == 0: + addr_errno = _fetch_addr_errno() + assert addr_errno != 0 + cpu._debug_errno_container[2] = addr_errno + return 2 * WORD diff --git a/rpython/jit/backend/llsupport/llmodel.py b/rpython/jit/backend/llsupport/llmodel.py --- a/rpython/jit/backend/llsupport/llmodel.py +++ b/rpython/jit/backend/llsupport/llmodel.py @@ -62,6 +62,9 @@ self.floatarraydescr = ArrayDescr(ad.basesize, ad.itemsize, ad.lendescr, FLAG_FLOAT) self.setup() + self._debug_errno_container = lltype.malloc( + rffi.CArray(lltype.Signed), 5, flavor='raw', zero=True, + track_allocation=False) def getarraydescr_for_frame(self, type): if type == history.FLOAT: @@ -259,7 +262,8 @@ ll_threadlocal_addr = llop.threadlocalref_addr( llmemory.Address) else: - ll_threadlocal_addr = llmemory.NULL + ll_threadlocal_addr = rffi.cast(llmemory.Address, + self._debug_errno_container) llop.gc_writebarrier(lltype.Void, ll_frame) ll_frame = func(ll_frame, ll_threadlocal_addr) finally: diff --git a/rpython/jit/backend/test/runner_test.py b/rpython/jit/backend/test/runner_test.py --- a/rpython/jit/backend/test/runner_test.py +++ b/rpython/jit/backend/test/runner_test.py @@ -2920,7 +2920,45 @@ ['bad args, signature %r' % codes[1:]] + different_values) def test_call_release_gil_save_errno(self): - XXX + from rpython.translator.tool.cbuild import ExternalCompilationInfo + from rpython.rlib.libffi import types + from rpython.jit.backend.llsupport import llerrno + # + eci = ExternalCompilationInfo( + separate_module_sources=[''' + #include + RPY_EXPORTED void test_call_release_gil_save_errno(void) { + errno = 42; + } + ''']) + fn_name = 'test_call_release_gil_save_errno' + func1_ptr = rffi.llexternal(fn_name, [], lltype.Void, + compilation_info=eci, _nowrapper=True) + func1_adr = rffi.cast(lltype.Signed, func1_ptr) + calldescr = self.cpu._calldescr_dynamic_for_tests([], types.void) + # + for saveerr in [rffi.RFFI_ERR_NONE, rffi.RFFI_SAVE_ERRNO]: + faildescr = BasicFailDescr(1) + ops = [ + ResOperation(rop.CALL_RELEASE_GIL, + [ConstInt(saveerr), ConstInt(func1_adr)], None, + descr=calldescr), + ResOperation(rop.GUARD_NOT_FORCED, [], None, descr=faildescr), + ResOperation(rop.FINISH, [], None, descr=BasicFinalDescr(0)) + ] + ops[-2].setfailargs([]) + looptoken = JitCellToken() + self.cpu.compile_loop([], ops, looptoken) + # + llerrno.set_debug_saved_errno(self.cpu, 24) + self.cpu.execute_token(looptoken) + result = llerrno.get_debug_saved_errno(self.cpu) + print 'saveerr =', saveerr, ': got result =', result + # + if saveerr == rffi.RFFI_SAVE_ERRNO: + assert result == 42 # from the C code + else: + assert result == 24 # not touched def test_call_release_gil_readsaved_errno(self): XXX diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -1917,7 +1917,9 @@ def _genop_call(self, op, arglocs, resloc, is_call_release_gil=False): from rpython.jit.backend.llsupport.descr import CallDescr - cb = callbuilder.CallBuilder(self, arglocs[2], arglocs[3:], resloc) + func_index = 2 + is_call_release_gil + cb = callbuilder.CallBuilder(self, arglocs[func_index], + arglocs[func_index+1:], resloc) descr = op.getdescr() assert isinstance(descr, CallDescr) @@ -1932,7 +1934,9 @@ cb.ressign = signloc.value if is_call_release_gil: - cb.emit_call_release_gil() + saveerrloc = arglocs[2] + assert isinstance(saveerrloc, ImmedLoc) + cb.emit_call_release_gil(saveerrloc.value) else: cb.emit() @@ -2345,8 +2349,9 @@ # This loads the stack location THREADLOCAL_OFS into a # register, and then read the word at the given offset. # It is only supported if 'translate_support_code' is - # true; otherwise, the original call to the piece of assembler - # was done with a dummy NULL value. + # true; otherwise, the execute_token() was done with a + # dummy value for the stack location THREADLOCAL_OFS + # assert self.cpu.translate_support_code assert isinstance(resloc, RegLoc) self.mc.MOV_rs(resloc.value, THREADLOCAL_OFS) diff --git a/rpython/jit/backend/x86/callbuilder.py b/rpython/jit/backend/x86/callbuilder.py --- a/rpython/jit/backend/x86/callbuilder.py +++ b/rpython/jit/backend/x86/callbuilder.py @@ -3,13 +3,16 @@ from rpython.rlib.objectmodel import we_are_translated from rpython.jit.metainterp.history import INT, FLOAT from rpython.jit.backend.x86.arch import (WORD, IS_X86_64, IS_X86_32, - PASS_ON_MY_FRAME, FRAME_FIXED_SIZE) + PASS_ON_MY_FRAME, FRAME_FIXED_SIZE, + THREADLOCAL_OFS) from rpython.jit.backend.x86.regloc import (eax, ecx, edx, ebx, esp, ebp, esi, xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, r8, r9, r10, r11, edi, r12, r13, r14, r15, X86_64_SCRATCH_REG, X86_64_XMM_SCRATCH_REG, RegLoc, RawEspLoc, RawEbpLoc, imm, ImmedLoc) from rpython.jit.backend.x86.jump import remap_frame_layout from rpython.jit.backend.llsupport.callbuilder import AbstractCallBuilder +from rpython.jit.backend.llsupport import llerrno +from rpython.rtyper.lltypesystem import rffi # darwin requires the stack to be 16 bytes aligned on calls. @@ -146,6 +149,19 @@ if not we_are_translated(): # for testing: we should not access self.mc.ADD(ebp, imm(1)) # ebp any more + def save_errno(self, save_err): + if save_err & rffi.RFFI_SAVE_ERRNO: + # Just after a call, read the real 'errno' and save a copy of + # it inside our thread-local 'rpy_errno'. Most registers are + # free here, including the callee-saved ones, except 'ebx'. + rpy_errno = llerrno.get_rpy_errno_offset(self.asm.cpu) + p_errno = llerrno.get_p_errno_offset(self.asm.cpu) + mc = self.mc + mc.MOV_rs(esi.value, THREADLOCAL_OFS) + mc.MOV_rm(edi.value, (esi.value, p_errno)) + mc.MOV32_rm(edi.value, (edi.value, 0)) + mc.MOV32_mr((esi.value, rpy_errno), edi.value) + def move_real_result_and_call_reacqgil_addr(self, fastgil): from rpython.jit.backend.x86 import rx86 # @@ -195,7 +211,8 @@ # in 'ebx'), and if not, we fall back to 'reacqgil_addr'. mc.J_il8(rx86.Conditions['NE'], 0) jne_location = mc.get_relative_pos() - # here, ecx is zero (so rpy_fastgil was not acquired) + # here, ecx is zero (so rpy_fastgil was in 'released' state + # before the XCHG, but the XCHG acquired it by writing 1) rst = gcrootmap.get_root_stack_top_addr() mc = self.mc mc.CMP(ebx, heap(rst)) diff --git a/rpython/jit/backend/x86/regalloc.py b/rpython/jit/backend/x86/regalloc.py --- a/rpython/jit/backend/x86/regalloc.py +++ b/rpython/jit/backend/x86/regalloc.py @@ -743,10 +743,10 @@ else: self.perform(op, arglocs, resloc) - def _consider_call(self, op, guard_not_forced_op=None): + def _consider_call(self, op, guard_not_forced_op=None, first_arg_index=1): calldescr = op.getdescr() assert isinstance(calldescr, CallDescr) - assert len(calldescr.arg_classes) == op.numargs() - 1 + assert len(calldescr.arg_classes) == op.numargs() - first_arg_index size = calldescr.get_result_size() sign = calldescr.is_result_signed() if sign: @@ -795,8 +795,9 @@ self._consider_call(op, guard_op) def consider_call_release_gil(self, op, guard_op): + # [Const(save_err), func_addr, args...] assert guard_op is not None - self._consider_call(op, guard_op) + self._consider_call(op, guard_op, first_arg_index=2) def consider_call_malloc_gc(self, op): self._consider_call(op) From noreply at buildbot.pypy.org Thu Jan 15 11:33:30 2015 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 15 Jan 2015 11:33:30 +0100 (CET) Subject: [pypy-commit] pypy errno-again: rffi.RFFI_READSAVED_ERRNO Message-ID: <20150115103330.23CAC1C0188@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: errno-again Changeset: r75338:1aeb4e18a1a3 Date: 2015-01-15 11:33 +0100 http://bitbucket.org/pypy/pypy/changeset/1aeb4e18a1a3/ Log: rffi.RFFI_READSAVED_ERRNO diff --git a/rpython/jit/backend/llsupport/callbuilder.py b/rpython/jit/backend/llsupport/callbuilder.py --- a/rpython/jit/backend/llsupport/callbuilder.py +++ b/rpython/jit/backend/llsupport/callbuilder.py @@ -50,9 +50,10 @@ self.prepare_arguments() self.push_gcmap_for_call_release_gil() self.call_releasegil_addr_and_move_real_arguments(fastgil) + self.write_real_errno(save_err) self.emit_raw_call() self.restore_stack_pointer() - self.save_errno(save_err) + self.read_real_errno(save_err) self.move_real_result_and_call_reacqgil_addr(fastgil) self.pop_gcmap() self.load_result() @@ -63,7 +64,10 @@ def move_real_result_and_call_reacqgil_addr(self, fastgil): raise NotImplementedError - def save_errno(self, save_err): + def write_real_errno(self, save_err): + raise NotImplementedError + + def read_real_errno(self, save_err): raise NotImplementedError def select_call_release_gil_mode(self): diff --git a/rpython/jit/backend/test/runner_test.py b/rpython/jit/backend/test/runner_test.py --- a/rpython/jit/backend/test/runner_test.py +++ b/rpython/jit/backend/test/runner_test.py @@ -2961,7 +2961,45 @@ assert result == 24 # not touched def test_call_release_gil_readsaved_errno(self): - XXX + from rpython.translator.tool.cbuild import ExternalCompilationInfo + from rpython.rlib.libffi import types + from rpython.jit.backend.llsupport import llerrno + # + eci = ExternalCompilationInfo( + separate_module_sources=[r''' + #include + #include + RPY_EXPORTED int test_call_release_gil_readsaved_errno(void) { + int r = errno; + printf("read saved errno: %d\n", r); + return r; + } + ''']) + fn_name = 'test_call_release_gil_readsaved_errno' + func1_ptr = rffi.llexternal(fn_name, [], rffi.INT, + compilation_info=eci, _nowrapper=True) + func1_adr = rffi.cast(lltype.Signed, func1_ptr) + calldescr = self.cpu._calldescr_dynamic_for_tests([], types.sint32) + # + for saveerr in [rffi.RFFI_READSAVED_ERRNO]: + faildescr = BasicFailDescr(1) + i1 = BoxInt() + ops = [ + ResOperation(rop.CALL_RELEASE_GIL, + [ConstInt(saveerr), ConstInt(func1_adr)], i1, + descr=calldescr), + ResOperation(rop.GUARD_NOT_FORCED, [], None, descr=faildescr), + ResOperation(rop.FINISH, [i1], None, descr=BasicFinalDescr(0)) + ] + ops[-2].setfailargs([]) + looptoken = JitCellToken() + self.cpu.compile_loop([], ops, looptoken) + # + llerrno.set_debug_saved_errno(self.cpu, 24) + deadframe = self.cpu.execute_token(looptoken) + result = self.cpu.get_int_value(deadframe, 0) + assert llerrno.get_debug_saved_errno(self.cpu) == 24 + assert result == 24 def test_call_release_gil_zero_errno_before(self): XXX diff --git a/rpython/jit/backend/x86/callbuilder.py b/rpython/jit/backend/x86/callbuilder.py --- a/rpython/jit/backend/x86/callbuilder.py +++ b/rpython/jit/backend/x86/callbuilder.py @@ -149,7 +149,21 @@ if not we_are_translated(): # for testing: we should not access self.mc.ADD(ebp, imm(1)) # ebp any more - def save_errno(self, save_err): + def write_real_errno(self, save_err): + if save_err & rffi.RFFI_READSAVED_ERRNO: + # Just before a call, read 'rpy_errno' and write it into the + # real 'errno'. Most registers are free here, including the + # callee-saved ones, except 'ebx' and except the ones used to + # pass the arguments on x86-64. + rpy_errno = llerrno.get_rpy_errno_offset(self.asm.cpu) + p_errno = llerrno.get_p_errno_offset(self.asm.cpu) + mc = self.mc + mc.MOV_rs(eax.value, THREADLOCAL_OFS - self.current_esp) + mc.MOV_rm(edx.value, (eax.value, p_errno)) + mc.MOV32_rm(eax.value, (eax.value, rpy_errno)) + mc.MOV32_mr((edx.value, 0), eax.value) + + def read_real_errno(self, save_err): if save_err & rffi.RFFI_SAVE_ERRNO: # Just after a call, read the real 'errno' and save a copy of # it inside our thread-local 'rpy_errno'. Most registers are From noreply at buildbot.pypy.org Thu Jan 15 11:36:02 2015 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 15 Jan 2015 11:36:02 +0100 (CET) Subject: [pypy-commit] pypy errno-again: rffi.RFFI_ZERO_ERRNO_BEFORE Message-ID: <20150115103602.1A1361C0188@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: errno-again Changeset: r75339:05b2fc705f1a Date: 2015-01-15 11:35 +0100 http://bitbucket.org/pypy/pypy/changeset/05b2fc705f1a/ Log: rffi.RFFI_ZERO_ERRNO_BEFORE diff --git a/rpython/jit/backend/test/runner_test.py b/rpython/jit/backend/test/runner_test.py --- a/rpython/jit/backend/test/runner_test.py +++ b/rpython/jit/backend/test/runner_test.py @@ -2981,7 +2981,7 @@ func1_adr = rffi.cast(lltype.Signed, func1_ptr) calldescr = self.cpu._calldescr_dynamic_for_tests([], types.sint32) # - for saveerr in [rffi.RFFI_READSAVED_ERRNO]: + for saveerr in [rffi.RFFI_READSAVED_ERRNO, rffi.RFFI_ZERO_ERRNO_BEFORE]: faildescr = BasicFailDescr(1) i1 = BoxInt() ops = [ @@ -2999,10 +2999,11 @@ deadframe = self.cpu.execute_token(looptoken) result = self.cpu.get_int_value(deadframe, 0) assert llerrno.get_debug_saved_errno(self.cpu) == 24 - assert result == 24 - - def test_call_release_gil_zero_errno_before(self): - XXX + # + if saveerr == rffi.RFFI_READSAVED_ERRNO: + assert result == 24 + else: + assert result == 0 def test_call_release_gil_save_lasterror(self): XXX diff --git a/rpython/jit/backend/x86/callbuilder.py b/rpython/jit/backend/x86/callbuilder.py --- a/rpython/jit/backend/x86/callbuilder.py +++ b/rpython/jit/backend/x86/callbuilder.py @@ -162,6 +162,13 @@ mc.MOV_rm(edx.value, (eax.value, p_errno)) mc.MOV32_rm(eax.value, (eax.value, rpy_errno)) mc.MOV32_mr((edx.value, 0), eax.value) + elif save_err & rffi.RFFI_ZERO_ERRNO_BEFORE: + # Same, but write zero. + p_errno = llerrno.get_p_errno_offset(self.asm.cpu) + mc = self.mc + mc.MOV_rs(eax.value, THREADLOCAL_OFS - self.current_esp) + mc.MOV_rm(eax.value, (eax.value, p_errno)) + mc.MOV32_mi((eax.value, 0), 0) def read_real_errno(self, save_err): if save_err & rffi.RFFI_SAVE_ERRNO: From noreply at buildbot.pypy.org Thu Jan 15 12:13:40 2015 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 15 Jan 2015 12:13:40 +0100 (CET) Subject: [pypy-commit] pypy errno-again: Translation fix. Add translated test. Message-ID: <20150115111341.00EC81C0013@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: errno-again Changeset: r75340:8f429ef3128d Date: 2015-01-15 12:11 +0100 http://bitbucket.org/pypy/pypy/changeset/8f429ef3128d/ Log: Translation fix. Add translated test. diff --git a/rpython/jit/backend/llsupport/llerrno.py b/rpython/jit/backend/llsupport/llerrno.py --- a/rpython/jit/backend/llsupport/llerrno.py +++ b/rpython/jit/backend/llsupport/llerrno.py @@ -13,7 +13,7 @@ def get_rpy_errno_offset(cpu): if cpu.translate_support_code: from rpython.rlib import rthread - return rthread.tlfield_rpy_errno.offset + return rthread.tlfield_rpy_errno.getoffset() else: return 3 * WORD @@ -32,8 +32,10 @@ def get_p_errno_offset(cpu): if cpu.translate_support_code: from rpython.rlib import rthread - return rthread.tlfield_p_errno.offset + return rthread.tlfield_p_errno.getoffset() else: + # fetch the real address of errno (in this thread), and store it + # at offset 2 in the _debug_errno_container if cpu._debug_errno_container[2] == 0: addr_errno = _fetch_addr_errno() assert addr_errno != 0 diff --git a/rpython/jit/backend/llsupport/test/zrpy_releasegil_test.py b/rpython/jit/backend/llsupport/test/zrpy_releasegil_test.py --- a/rpython/jit/backend/llsupport/test/zrpy_releasegil_test.py +++ b/rpython/jit/backend/llsupport/test/zrpy_releasegil_test.py @@ -2,6 +2,8 @@ from rpython.rlib.jit import dont_look_inside from rpython.rlib.objectmodel import invoke_around_extcall from rpython.jit.metainterp.optimizeopt import ALL_OPTS_NAMES +from rpython.translator.tool.cbuild import ExternalCompilationInfo +from rpython.rlib import rposix from rpython.rtyper.annlowlevel import llhelper @@ -96,23 +98,36 @@ self.run('close_stack') assert 'call_release_gil' in udir.join('TestCompileFramework.log').read() - ## def define_get_set_errno(self): + def define_get_set_errno(self): + eci = ExternalCompilationInfo( + post_include_bits=[r''' + #include + static int test_get_set_errno(void) { + int r = errno; + //fprintf(stderr, "read saved errno: %d\n", r); + errno = 42; + return r; + } + ''']) - ## c_strchr = rffi.llexternal('strchr', [rffi.CCHARP, lltype.Signed], - ## rffi.CCHARP, ... + c_test = rffi.llexternal('test_get_set_errno', [], rffi.INT, + compilation_info=eci, + save_err=rffi.RFFI_FULL_ERRNO) - ## def before(n, x): - ## return (n, None, None, None, None, None, - ## None, None, None, None, None, None) - ## # - ## def f(n, x, *args): - ## a = rffi.str2charp(str(n)) - ## c_strchr(a, ord('0')) - ## lltype.free(a, flavor='raw') - ## n -= 1 - ## return (n, x) + args - ## return before, f, None + def before(n, x): + return (n, None, None, None, None, None, + None, None, None, None, None, None) + # + def f(n, x, *args): + rposix.set_saved_errno(24) + result1 = c_test() + result2 = rposix.get_saved_errno() + assert result1 == 24 + assert result2 == 42 + n -= 1 + return (n, x) + args + return before, f, None - ## def test_get_set_errno(self): - ## self.run('get_set_errno') - ## assert 'call_release_gil' in udir.join('TestCompileFramework.log').read() + def test_get_set_errno(self): + self.run('get_set_errno') + assert 'call_release_gil' in udir.join('TestCompileFramework.log').read() diff --git a/rpython/rlib/rthread.py b/rpython/rlib/rthread.py --- a/rpython/rlib/rthread.py +++ b/rpython/rlib/rthread.py @@ -309,9 +309,14 @@ else: self.local.rawvalue = value + def getoffset(): + _threadlocalref_seeme(self) + return offset + self.getraw = getraw self.get_or_make_raw = get_or_make_raw self.setraw = setraw + self.getoffset = getoffset def _freeze_(self): return True diff --git a/rpython/translator/c/src/threadlocal.c b/rpython/translator/c/src/threadlocal.c --- a/rpython/translator/c/src/threadlocal.c +++ b/rpython/translator/c/src/threadlocal.c @@ -12,6 +12,9 @@ static void _RPy_ThreadLocals_Init(void *p) { memset(p, 0, sizeof(struct pypy_threadlocal_s)); +#ifdef RPY_TLOFS_p_errno + ((struct pypy_threadlocal_s *)p)->p_errno = &errno; +#endif #ifdef RPY_TLOFS_thread_ident ((struct pypy_threadlocal_s *)p)->thread_ident = # ifdef _WIN32 From noreply at buildbot.pypy.org Thu Jan 15 13:35:59 2015 From: noreply at buildbot.pypy.org (fijal) Date: Thu, 15 Jan 2015 13:35:59 +0100 (CET) Subject: [pypy-commit] pypy vmprof: add rbisect Message-ID: <20150115123559.5A04D1C008E@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: vmprof Changeset: r75341:3d101135a1f5 Date: 2015-01-15 13:15 +0200 http://bitbucket.org/pypy/pypy/changeset/3d101135a1f5/ Log: add rbisect diff --git a/rpython/jit/backend/llsupport/asmmemmgr.py b/rpython/jit/backend/llsupport/asmmemmgr.py --- a/rpython/jit/backend/llsupport/asmmemmgr.py +++ b/rpython/jit/backend/llsupport/asmmemmgr.py @@ -24,6 +24,10 @@ self.free_blocks = {} # map {start: stop} self.free_blocks_end = {} # map {stop: start} self.blocks_by_size = [[] for i in range(self.num_indices)] + # two lists of jit addresses (sorted) and the corresponding stack + # depths + self.jit_addr_map = [] + self.jit_frame_depth_map = [] def malloc(self, minsize, maxsize): """Allocate executable memory, between minsize and maxsize bytes, @@ -151,6 +155,18 @@ del self.free_blocks_end[stop] return (start, stop) + def register_frame_depth_map(self, rawstart, frame_positions, + frame_assignments): + if not self.jit_addr_map or rawstart > self.jit_addr_map[-1]: + start = len(self.jit_addr_map) + self.jit_addr_map += [0] * len(frame_positions) + self.jit_frame_depth_map += [0] * len(frame_positions) + for i, pos in enumerate(frame_positions): + self.jit_addr_map[i + start] = pos + rawstart + self.jit_frame_depth_map[i + start] = frame_assignments[i] + else: + xxx + def _delete(self): "NOT_RPYTHON" if self._allocated: @@ -311,6 +327,10 @@ assert gcrootmap is not None for pos, mark in self.gcroot_markers: gcrootmap.register_asm_addr(rawstart + pos, mark) + asmmemmgr.register_frame_depth_map(rawstart, self.frame_positions, + self.frame_assignments) + self.frame_positions = None + self.frame_assignments = None return rawstart def _become_a_plain_block_builder(self): diff --git a/rpython/jit/backend/llsupport/llmodel.py b/rpython/jit/backend/llsupport/llmodel.py --- a/rpython/jit/backend/llsupport/llmodel.py +++ b/rpython/jit/backend/llsupport/llmodel.py @@ -16,7 +16,7 @@ FieldDescr, ArrayDescr, CallDescr, InteriorFieldDescr, FLAG_POINTER, FLAG_FLOAT) from rpython.jit.backend.llsupport.memcpy import memset_fn -from rpython.jit.backend.llsupport.asmmemmgr import AsmMemoryManager +from rpython.jit.backend.llsupport import asmmemmgr from rpython.rlib.unroll import unrolling_iterable @@ -48,7 +48,8 @@ self._setup_exception_handling_translated() else: self._setup_exception_handling_untranslated() - self.asmmemmgr = AsmMemoryManager() + self.asmmemmgr = asmmemmgr.AsmMemoryManager() + asmmemmgr._memmngr = self.asmmemmgr self._setup_frame_realloc(translate_support_code) ad = self.gc_ll_descr.getframedescrs(self).arraydescr self.signedarraydescr = ad diff --git a/rpython/jit/backend/x86/codebuf.py b/rpython/jit/backend/x86/codebuf.py --- a/rpython/jit/backend/x86/codebuf.py +++ b/rpython/jit/backend/x86/codebuf.py @@ -22,8 +22,8 @@ LocationCodeBuilder, codebuilder_cls): def __init__(self): + self.init_block_builder() codebuilder_cls.__init__(self) - self.init_block_builder() # a list of relative positions; for each position p, the bytes # at [p-4:p] encode an absolute address that will need to be # made relative. Only works on 32-bit! diff --git a/rpython/jit/backend/x86/rx86.py b/rpython/jit/backend/x86/rx86.py --- a/rpython/jit/backend/x86/rx86.py +++ b/rpython/jit/backend/x86/rx86.py @@ -448,6 +448,8 @@ """Abstract base class.""" def __init__(self): + self.frame_positions = [] + self.frame_assignments = [] self.force_frame_size(self.WORD) def writechar(self, char): @@ -468,11 +470,15 @@ self.writechar(chr((imm >> 24) & 0xFF)) def force_frame_size(self, frame_size): + self.frame_positions.append(self.get_relative_pos()) + self.frame_assignments.append(frame_size) self._frame_size = frame_size def stack_frame_size_delta(self, delta): "Called when we generate an instruction that changes the value of ESP" self._frame_size += delta + self.frame_positions.append(self.get_relative_pos()) + self.frame_assignments.append(self._frame_size) assert self._frame_size >= self.WORD def check_stack_size_at_ret(self): diff --git a/rpython/rlib/rbisect.py b/rpython/rlib/rbisect.py new file mode 100644 --- /dev/null +++ b/rpython/rlib/rbisect.py @@ -0,0 +1,9 @@ + +def bisect(a, x): + lo = 0 + hi = len(a) + while lo < hi: + mid = (lo+hi)//2 + if x < a[mid]: hi = mid + else: lo = mid+1 + return lo diff --git a/rpython/rlib/test/test_rbisect.py b/rpython/rlib/test/test_rbisect.py new file mode 100644 --- /dev/null +++ b/rpython/rlib/test/test_rbisect.py @@ -0,0 +1,47 @@ + +from rpython.rlib.rbisect import bisect + +def test_bisect(): + cases = [ + ([], 1, 0), + ([1], 0, 0), + ([1], 1, 1), + ([1], 2, 1), + ([1, 1], 0, 0), + ([1, 1], 1, 2), + ([1, 1], 2, 2), + ([1, 1, 1], 0, 0), + ([1, 1, 1], 1, 3), + ([1, 1, 1], 2, 3), + ([1, 1, 1, 1], 0, 0), + ([1, 1, 1, 1], 1, 4), + ([1, 1, 1, 1], 2, 4), + ([1, 2], 0, 0), + ([1, 2], 1, 1), + ([1, 2], 1.5, 1), + ([1, 2], 2, 2), + ([1, 2], 3, 2), + ([1, 1, 2, 2], 0, 0), + ([1, 1, 2, 2], 1, 2), + ([1, 1, 2, 2], 1.5, 2), + ([1, 1, 2, 2], 2, 4), + ([1, 1, 2, 2], 3, 4), + ([1, 2, 3], 0, 0), + ([1, 2, 3], 1, 1), + ([1, 2, 3], 1.5, 1), + ([1, 2, 3], 2, 2), + ([1, 2, 3], 2.5, 2), + ([1, 2, 3], 3, 3), + ([1, 2, 3], 4, 3), + ([1, 2, 2, 3, 3, 3, 4, 4, 4, 4], 0, 0), + ([1, 2, 2, 3, 3, 3, 4, 4, 4, 4], 1, 1), + ([1, 2, 2, 3, 3, 3, 4, 4, 4, 4], 1.5, 1), + ([1, 2, 2, 3, 3, 3, 4, 4, 4, 4], 2, 3), + ([1, 2, 2, 3, 3, 3, 4, 4, 4, 4], 2.5, 3), + ([1, 2, 2, 3, 3, 3, 4, 4, 4, 4], 3, 6), + ([1, 2, 2, 3, 3, 3, 4, 4, 4, 4], 3.5, 6), + ([1, 2, 2, 3, 3, 3, 4, 4, 4, 4], 4, 10), + ([1, 2, 2, 3, 3, 3, 4, 4, 4, 4], 5, 10), + ] + for lst, elem, exp in cases: + assert bisect(lst, elem) == exp From noreply at buildbot.pypy.org Thu Jan 15 13:36:00 2015 From: noreply at buildbot.pypy.org (fijal) Date: Thu, 15 Jan 2015 13:36:00 +0100 (CET) Subject: [pypy-commit] pypy vmprof: try to improve _vmprof by using the jit address correctly from the jit Message-ID: <20150115123600.864841C008E@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: vmprof Changeset: r75342:8e42a00ec79f Date: 2015-01-15 14:35 +0200 http://bitbucket.org/pypy/pypy/changeset/8e42a00ec79f/ Log: try to improve _vmprof by using the jit address correctly from the jit diff --git a/pypy/module/_vmprof/src/get_custom_offset.c b/pypy/module/_vmprof/src/get_custom_offset.c new file mode 100644 --- /dev/null +++ b/pypy/module/_vmprof/src/get_custom_offset.c @@ -0,0 +1,13 @@ + +long pypy_jit_start_addr(); +long pypy_jit_end_addr(); +long pypy_jit_stack_depth_at_loc(long); + +static ptrdiff_t vmprof_unw_get_custom_offset(void* ip) { + long ip_l = (long)ip; + + if (ip < pypy_jit_start_addr() or ip > pypy_jit_end_addr()) { + return -1; + } + return pypy_jit_stack_depth_at_loc(ip); +} diff --git a/pypy/module/_vmprof/src/vmprof.c b/pypy/module/_vmprof/src/vmprof.c --- a/pypy/module/_vmprof/src/vmprof.c +++ b/pypy/module/_vmprof/src/vmprof.c @@ -77,21 +77,7 @@ * ****************************************************** */ -static void* jit_start = NULL; -static void* jit_end = NULL; -void vmprof_set_jit_range(void* start, void* end) { - jit_start = start; - jit_end = end; -} - -static ptrdiff_t vmprof_unw_get_custom_offset(void* ip) { - /* temporary hack to determine is this particular frame is JITted or not */ - if (ip >= jit_start && ip <= jit_end) { - // it's probably a JIT frame - return 19*8; // XXX - } - return -1; // not JITted code -} +#include "get_custom_offset.c" typedef struct { @@ -119,9 +105,9 @@ // setting the IP and SP in the cursor vmprof_hacked_unw_cursor_t *cp2 = (vmprof_hacked_unw_cursor_t*)cp; void* bp = (void*)sp + sp_offset; - cp2->sp = bp+8; // the ret will pop a word, so the SP of the caller is - // 8 bytes away from us - cp2->ip = ((void**)bp)[0]; // the ret is on the top of the stack + cp2->sp = bp; + cp2->ip = ((void**)(bp - sizeof(void*))[0]; + // the ret is on the top of the stack return 1; } } diff --git a/rpython/jit/backend/llsupport/asmmemmgr.py b/rpython/jit/backend/llsupport/asmmemmgr.py --- a/rpython/jit/backend/llsupport/asmmemmgr.py +++ b/rpython/jit/backend/llsupport/asmmemmgr.py @@ -5,6 +5,34 @@ from rpython.rlib.debug import debug_start, debug_print, debug_stop from rpython.rlib.debug import have_debug_prints from rpython.rtyper.lltypesystem import lltype, llmemory, rffi +from rpython.rlib.rbisect import bisect +from rpython.rlib import rgc +from rpython.rlib.entrypoint import entrypoint_lowlevel + +_memmngr = None # global reference so we can use @entrypoint :/ + + at entrypoint_lowlevel('main', [lltype.Signed], + c_name='pypy_jit_stack_depth_at_loc') + at rgc.no_collect +def stack_depth_at_loc(loc): + global _memmngr + + pos = bisect(_memmngr.jit_addr_map, loc) + if pos == 0 or pos == len(_memmngr.jit_addr_map): + return -1 + return _memmngr.jit_frame_depth_map[pos-1] + + at entrypoint_lowlevel('main', [], c_name='pypy_jit_start_addr') +def jit_start_addr(loc): + global _memmngr + + return _memmngr.jit_addr_map[0] + + at entrypoint_lowlevel('main', [], c_name='pypy_jit_end_addr') +def jit_end_addr(loc): + global _memmngr + + return _memmngr.jit_addr_map[-1] class AsmMemoryManager(object): @@ -49,6 +77,13 @@ if r_uint is not None: self.total_mallocs -= r_uint(stop - start) self._add_free_block(start, stop) + # fix up jit_addr_map + jit_adr_start = bisect(self.jit_addr_map, start) + jit_adr_stop = bisect(self.jit_addr_map, stop) + self.jit_addr_map = (self.jit_addr_map[:jit_adr_start] + + self.jit_addr_map[jit_adr_stop:]) + self.jit_frame_depth_map = (self.jit_frame_depth_map[:jit_adr_start] + + self.jit_frame_depth_map[jit_adr_stop:]) def open_malloc(self, minsize): """Allocate at least minsize bytes. Returns (start, stop).""" @@ -157,15 +192,23 @@ def register_frame_depth_map(self, rawstart, frame_positions, frame_assignments): + if not frame_positions: + return if not self.jit_addr_map or rawstart > self.jit_addr_map[-1]: start = len(self.jit_addr_map) self.jit_addr_map += [0] * len(frame_positions) self.jit_frame_depth_map += [0] * len(frame_positions) - for i, pos in enumerate(frame_positions): - self.jit_addr_map[i + start] = pos + rawstart - self.jit_frame_depth_map[i + start] = frame_assignments[i] else: - xxx + start = bisect(self.jit_addr_map, rawstart) + self.jit_addr_map = (self.jit_addr_map[:start] + + [0] * len(frame_positions) + + self.jit_addr_map[start:]) + self.jit_frame_depth_map = (self.jit_frame_depth_map[:start] + + [0] * len(frame_positions) + + self.jit_frame_depth_map[start:]) + for i, pos in enumerate(frame_positions): + self.jit_addr_map[i + start] = pos + rawstart + self.jit_frame_depth_map[i + start] = frame_assignments[i] def _delete(self): "NOT_RPYTHON" @@ -226,6 +269,9 @@ gcroot_markers = None + frame_positions = None + frame_assignments = None + def __init__(self, translated=None): if translated is None: translated = we_are_translated() diff --git a/rpython/jit/backend/llsupport/test/test_asmmemmgr.py b/rpython/jit/backend/llsupport/test/test_asmmemmgr.py --- a/rpython/jit/backend/llsupport/test/test_asmmemmgr.py +++ b/rpython/jit/backend/llsupport/test/test_asmmemmgr.py @@ -2,6 +2,7 @@ from rpython.jit.backend.llsupport.asmmemmgr import AsmMemoryManager from rpython.jit.backend.llsupport.asmmemmgr import MachineDataBlockWrapper from rpython.jit.backend.llsupport.asmmemmgr import BlockBuilderMixin +from rpython.jit.backend.llsupport import asmmemmgr from rpython.rtyper.lltypesystem import lltype, rffi from rpython.rlib import debug @@ -157,6 +158,7 @@ class FakeGcRootMap: def register_asm_addr(self, retaddr, mark): puts.append((retaddr, mark)) + # mc = BlockBuilderMixin() mc.writechar('X') @@ -262,3 +264,16 @@ md.done() assert allblocks == [(1597, 1697), (1797, 1835)] assert ops == [('free', 1835, 1897)] + +def test_find_jit_frame_depth(): + mgr = AsmMemoryManager() + mgr.register_frame_depth_map(11, [0, 5, 10], [1, 2, 3]) + mgr.register_frame_depth_map(30, [0, 5, 10], [4, 5, 6]) + mgr.register_frame_depth_map(0, [0, 5, 10], [7, 8, 9]) + asmmemmgr._memmngr = mgr + assert asmmemmgr.stack_depth_at_loc(13) == 1 + assert asmmemmgr.stack_depth_at_loc(-3) == -1 + assert asmmemmgr.stack_depth_at_loc(41) == -1 + assert asmmemmgr.stack_depth_at_loc(5) == 8 + assert asmmemmgr.stack_depth_at_loc(17) == 2 + assert asmmemmgr.stack_depth_at_loc(38) == 5 diff --git a/rpython/jit/backend/x86/test/test_regloc.py b/rpython/jit/backend/x86/test/test_regloc.py --- a/rpython/jit/backend/x86/test/test_regloc.py +++ b/rpython/jit/backend/x86/test/test_regloc.py @@ -8,10 +8,18 @@ import py.test class LocationCodeBuilder32(CodeBuilder32, LocationCodeBuilder): - pass + def force_frame_size(self, frame_size): + pass + + def stack_frame_size_delta(self, delta): + pass class LocationCodeBuilder64(CodeBuilder64, LocationCodeBuilder): - pass + def force_frame_size(self, frame_size): + pass + + def stack_frame_size_delta(self, delta): + pass cb32 = LocationCodeBuilder32 cb64 = LocationCodeBuilder64 diff --git a/rpython/jit/backend/x86/test/test_rx86.py b/rpython/jit/backend/x86/test/test_rx86.py --- a/rpython/jit/backend/x86/test/test_rx86.py +++ b/rpython/jit/backend/x86/test/test_rx86.py @@ -14,6 +14,12 @@ def getvalue(self): return ''.join(self.buffer) + def force_frame_size(self, frame_size): + pass + + def stack_frame_size_delta(self, delta): + pass + def assert_encodes_as(code_builder_cls, insn_name, args, expected_encoding): s = code_builder_cls() getattr(s, insn_name)(*args) From noreply at buildbot.pypy.org Thu Jan 15 13:52:33 2015 From: noreply at buildbot.pypy.org (fijal) Date: Thu, 15 Jan 2015 13:52:33 +0100 (CET) Subject: [pypy-commit] pypy vmprof: try to make jit entrypoints work Message-ID: <20150115125233.6EB2A1C0035@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: vmprof Changeset: r75343:3d53ff995d24 Date: 2015-01-15 14:52 +0200 http://bitbucket.org/pypy/pypy/changeset/3d53ff995d24/ Log: try to make jit entrypoints work diff --git a/rpython/jit/backend/llsupport/asmmemmgr.py b/rpython/jit/backend/llsupport/asmmemmgr.py --- a/rpython/jit/backend/llsupport/asmmemmgr.py +++ b/rpython/jit/backend/llsupport/asmmemmgr.py @@ -7,12 +7,12 @@ from rpython.rtyper.lltypesystem import lltype, llmemory, rffi from rpython.rlib.rbisect import bisect from rpython.rlib import rgc -from rpython.rlib.entrypoint import entrypoint_lowlevel +from rpython.rlib.entrypoint import jit_entrypoint _memmngr = None # global reference so we can use @entrypoint :/ - at entrypoint_lowlevel('main', [lltype.Signed], - c_name='pypy_jit_stack_depth_at_loc') + at jit_entrypoint([lltype.Signed], lltype.Signed, + c_name='pypy_jit_stack_depth_at_loc') @rgc.no_collect def stack_depth_at_loc(loc): global _memmngr @@ -22,13 +22,13 @@ return -1 return _memmngr.jit_frame_depth_map[pos-1] - at entrypoint_lowlevel('main', [], c_name='pypy_jit_start_addr') + at jit_entrypoint([], lltype.Signed, c_name='pypy_jit_start_addr') def jit_start_addr(loc): global _memmngr return _memmngr.jit_addr_map[0] - at entrypoint_lowlevel('main', [], c_name='pypy_jit_end_addr') + at jit_entrypoint([], lltype.Signed, c_name='pypy_jit_end_addr') def jit_end_addr(loc): global _memmngr diff --git a/rpython/jit/metainterp/warmspot.py b/rpython/jit/metainterp/warmspot.py --- a/rpython/jit/metainterp/warmspot.py +++ b/rpython/jit/metainterp/warmspot.py @@ -25,6 +25,7 @@ from rpython.jit.codewriter.policy import JitPolicy from rpython.jit.codewriter.effectinfo import EffectInfo from rpython.jit.metainterp.optimizeopt import ALL_OPTS_NAMES +from rpython.rlib.entrypoint import all_jit_entrypoints # ____________________________________________________________ @@ -228,6 +229,7 @@ verbose = False # not self.cpu.translate_support_code self.rewrite_access_helpers() + self.create_jit_entry_points() self.codewriter.make_jitcodes(verbose=verbose) self.rewrite_can_enter_jits() self.rewrite_set_param_and_get_stats() @@ -676,6 +678,10 @@ op = block.operations[index] self.rewrite_access_helper(op) + def create_jit_entry_points(self): + for func, args_s, s_result in all_jit_entrypoints: + self.helper_func(func, args_s, s_result) + def rewrite_access_helper(self, op): # make sure we make a copy of function so it no longer belongs # to extregistry diff --git a/rpython/rlib/entrypoint.py b/rpython/rlib/entrypoint.py --- a/rpython/rlib/entrypoint.py +++ b/rpython/rlib/entrypoint.py @@ -10,6 +10,16 @@ func.exported_symbol = True return func +all_jit_entrypoints = [] + +def jit_entrypoint(argtypes, restype, c_name): + def deco(func): + func.c_name = c_name + func.relax_sig_check = True + export_symbol(func) + all_jit_entrypoints.append((func, argtypes, restype)) + return func + return deco def entrypoint_lowlevel(key, argtypes, c_name=None, relax=False): """ Note: entrypoint should call llop.gc_stack_bottom on it's own. From noreply at buildbot.pypy.org Thu Jan 15 14:26:08 2015 From: noreply at buildbot.pypy.org (fijal) Date: Thu, 15 Jan 2015 14:26:08 +0100 (CET) Subject: [pypy-commit] pypy vmprof: hrmpf Message-ID: <20150115132608.DC6981C0035@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: vmprof Changeset: r75344:ff99e82da184 Date: 2015-01-15 15:25 +0200 http://bitbucket.org/pypy/pypy/changeset/ff99e82da184/ Log: hrmpf diff --git a/rpython/jit/metainterp/warmspot.py b/rpython/jit/metainterp/warmspot.py --- a/rpython/jit/metainterp/warmspot.py +++ b/rpython/jit/metainterp/warmspot.py @@ -679,8 +679,8 @@ self.rewrite_access_helper(op) def create_jit_entry_points(self): - for func, args_s, s_result in all_jit_entrypoints: - self.helper_func(func, args_s, s_result) + for func, args, result in all_jit_entrypoints: + self.helper_func(lltype.FuncPtr(args, result), func) def rewrite_access_helper(self, op): # make sure we make a copy of function so it no longer belongs From noreply at buildbot.pypy.org Thu Jan 15 14:43:09 2015 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 15 Jan 2015 14:43:09 +0100 (CET) Subject: [pypy-commit] pypy errno-again: Fix _cffi_backend. Message-ID: <20150115134309.D09B61C0013@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: errno-again Changeset: r75345:c247cd8b63fe Date: 2015-01-15 14:42 +0100 http://bitbucket.org/pypy/pypy/changeset/c247cd8b63fe/ Log: Fix _cffi_backend. diff --git a/pypy/module/_cffi_backend/ccallback.py b/pypy/module/_cffi_backend/ccallback.py --- a/pypy/module/_cffi_backend/ccallback.py +++ b/pypy/module/_cffi_backend/ccallback.py @@ -160,7 +160,7 @@ @jit.jit_callback("CFFI") -def invoke_callback(ffi_cif, ll_res, ll_args, ll_userdata): +def _invoke_callback(ffi_cif, ll_res, ll_args, ll_userdata): """ Callback specification. ffi_cif - something ffi specific, don't care ll_args - rffi.VOIDPP - pointer to array of pointers to args @@ -168,7 +168,6 @@ ll_userdata - a special structure which holds necessary information (what the real callback is for example), casted to VOIDP """ - e = cerrno.get_real_errno() ll_res = rffi.cast(rffi.CCHARP, ll_res) unique_id = rffi.cast(lltype.Signed, ll_userdata) callback = global_callback_mapping.get(unique_id) @@ -185,12 +184,9 @@ return # must_leave = False - ec = None space = callback.space try: must_leave = space.threadlocals.try_enter_thread(space) - ec = cerrno.get_errno_container(space) - cerrno.save_errno_into(ec, e) extra_line = '' try: w_res = callback.invoke(ll_args) @@ -212,5 +208,8 @@ callback.write_error_return_value(ll_res) if must_leave: space.threadlocals.leave_thread(space) - if ec is not None: - cerrno.restore_errno_from(ec) + +def invoke_callback(ffi_cif, ll_res, ll_args, ll_userdata): + cerrno._errno_after(rffi.RFFI_ERR_ALL) + _invoke_callback(ffi_cif, ll_res, ll_args, ll_userdata) + cerrno._errno_before(rffi.RFFI_ERR_ALL) diff --git a/pypy/module/_cffi_backend/cerrno.py b/pypy/module/_cffi_backend/cerrno.py --- a/pypy/module/_cffi_backend/cerrno.py +++ b/pypy/module/_cffi_backend/cerrno.py @@ -2,7 +2,6 @@ from rpython.rlib import rposix -from pypy.interpreter.executioncontext import ExecutionContext from pypy.interpreter.gateway import unwrap_spec WIN32 = sys.platform == 'win32' @@ -10,40 +9,21 @@ from rpython.rlib import rwin32 -ExecutionContext._cffi_saved_errno = 0 -ExecutionContext._cffi_saved_LastError = 0 - - -def get_errno_container(space): - return space.getexecutioncontext() - -get_real_errno = rposix.get_errno - - -def restore_errno_from(ec): - if WIN32: - rwin32.SetLastError(ec._cffi_saved_LastError) - rposix.set_errno(ec._cffi_saved_errno) - -def save_errno_into(ec, errno): - ec._cffi_saved_errno = errno - if WIN32: - ec._cffi_saved_LastError = rwin32.GetLastError() - +_errno_before = rposix._errno_before +_errno_after = rposix._errno_after def get_errno(space): - ec = get_errno_container(space) - return space.wrap(ec._cffi_saved_errno) + return space.wrap(rposix.get_saved_errno()) @unwrap_spec(errno=int) def set_errno(space, errno): - ec = get_errno_container(space) - ec._cffi_saved_errno = errno + rposix.set_saved_errno(errno) # ____________________________________________________________ @unwrap_spec(code=int) def getwinerror(space, code=-1): + XXX from rpython.rlib.rwin32 import FormatError if code == -1: ec = get_errno_container(space) diff --git a/pypy/module/_cffi_backend/ctypefunc.py b/pypy/module/_cffi_backend/ctypefunc.py --- a/pypy/module/_cffi_backend/ctypefunc.py +++ b/pypy/module/_cffi_backend/ctypefunc.py @@ -155,13 +155,9 @@ # argtype is a pointer type, and w_obj a list/tuple/str mustfree_max_plus_1 = i + 1 - ec = cerrno.get_errno_container(space) - cerrno.restore_errno_from(ec) jit_libffi.jit_ffi_call(cif_descr, rffi.cast(rffi.VOIDP, funcaddr), buffer) - e = cerrno.get_real_errno() - cerrno.save_errno_into(ec, e) resultdata = rffi.ptradd(buffer, cif_descr.exchange_result) w_res = self.ctitem.copy_and_convert_to_object(resultdata) diff --git a/rpython/rlib/rposix.py b/rpython/rlib/rposix.py --- a/rpython/rlib/rposix.py +++ b/rpython/rlib/rposix.py @@ -107,6 +107,21 @@ rthread.tlfield_rpy_errno.setraw(rffi.cast(INT, errno)) +def _errno_before(save_err): + if save_err & rffi.RFFI_READSAVED_ERRNO: + from rpython.rlib import rthread + _set_errno(rthread.tlfield_rpy_errno.getraw()) + elif save_err & rffi.RFFI_ZERO_ERRNO_BEFORE: + _set_errno(rffi.cast(rffi.INT, 0)) +_errno_before._always_inline_ = True + +def _errno_after(save_err): + if save_err & rffi.RFFI_SAVE_ERRNO: + from rpython.rlib import rthread + rthread.tlfield_rpy_errno.setraw(_get_errno()) +_errno_after._always_inline_ = True + + if os.name == 'nt': is_valid_fd = jit.dont_look_inside(rffi.llexternal( "_PyVerify_fd", [rffi.INT], rffi.INT, diff --git a/rpython/rtyper/lltypesystem/rffi.py b/rpython/rtyper/lltypesystem/rffi.py --- a/rpython/rtyper/lltypesystem/rffi.py +++ b/rpython/rtyper/lltypesystem/rffi.py @@ -157,12 +157,6 @@ return funcptr - argnames = ', '.join(['a%d' % i for i in range(len(args))]) - errno_before = (save_err & RFFI_READSAVED_ERRNO) != 0 - errno_zero_before = (save_err & RFFI_ZERO_ERRNO_BEFORE) != 0 - errno_after = (save_err & RFFI_SAVE_ERRNO) != 0 - errno_any = errno_before or errno_zero_before or errno_after - if invoke_around_handlers: # The around-handlers are releasing the GIL in a threaded pypy. # We need tons of care to ensure that no GC operation and no @@ -173,34 +167,23 @@ # neither '*args' nor the GC objects originally passed in as # argument to wrapper(), if any (e.g. RPython strings). + argnames = ', '.join(['a%d' % i for i in range(len(args))]) source = py.code.Source(""" - if %(errno_any)s: - from rpython.rlib import rposix, rthread + from rpython.rlib import rposix def call_external_function(%(argnames)s): before = aroundstate.before if before: before() # NB. it is essential that no exception checking occurs here! - # - # restore errno from its saved value - if %(errno_before)s: - rposix._set_errno(rthread.tlfield_rpy_errno.getraw()) - elif %(errno_zero_before)s: - rposix._set_errno(int_zero) - # + rposix._errno_before(%(save_err)d) res = funcptr(%(argnames)s) - # - # save errno away - if %(errno_after)s: - rthread.tlfield_rpy_errno.setraw(rposix._get_errno()) - # + rposix._errno_after(%(save_err)d) after = aroundstate.after if after: after() return res """ % locals()) miniglobals = {'aroundstate': aroundstate, 'funcptr': funcptr, - 'int_zero': cast(INT, 0), '__name__': __name__, # for module name propagation } exec source.compile() in miniglobals @@ -227,27 +210,17 @@ else: # ...well, unless it's a macro, in which case we still have # to hide it from the JIT... + argnames = ', '.join(['a%d' % i for i in range(len(args))]) source = py.code.Source(""" - if %(errno_any)s: - from rpython.rlib import rposix, rthread + from rpython.rlib import rposix def call_external_function(%(argnames)s): - # restore errno from its saved value - if %(errno_before)s: - rposix._set_errno(rthread.tlfield_rpy_errno.getraw()) - elif %(errno_zero_before)s: - rposix._set_errno(int_zero) - # + rposix._errno_before(%(save_err)d) res = funcptr(%(argnames)s) - # - # save errno away - if %(errno_after)s: - rthread.tlfield_rpy_errno.setraw(rposix._get_errno()) - # + rposix._errno_after(%(save_err)d) return res """ % locals()) miniglobals = {'funcptr': funcptr, - 'int_zero': cast(INT, 0), '__name__': __name__, } exec source.compile() in miniglobals From noreply at buildbot.pypy.org Thu Jan 15 14:51:53 2015 From: noreply at buildbot.pypy.org (fijal) Date: Thu, 15 Jan 2015 14:51:53 +0100 (CET) Subject: [pypy-commit] pypy vmprof: I was sure we had a shortcut Message-ID: <20150115135153.1EA321C0013@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: vmprof Changeset: r75346:693985864bfc Date: 2015-01-15 15:51 +0200 http://bitbucket.org/pypy/pypy/changeset/693985864bfc/ Log: I was sure we had a shortcut diff --git a/rpython/jit/metainterp/warmspot.py b/rpython/jit/metainterp/warmspot.py --- a/rpython/jit/metainterp/warmspot.py +++ b/rpython/jit/metainterp/warmspot.py @@ -680,7 +680,7 @@ def create_jit_entry_points(self): for func, args, result in all_jit_entrypoints: - self.helper_func(lltype.FuncPtr(args, result), func) + self.helper_func(lltype.Ptr(lltype.FuncType(args, result), func)) def rewrite_access_helper(self, op): # make sure we make a copy of function so it no longer belongs From noreply at buildbot.pypy.org Thu Jan 15 15:08:46 2015 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 15 Jan 2015 15:08:46 +0100 (CET) Subject: [pypy-commit] extradoc extradoc: Next sprint Message-ID: <20150115140846.9FB321C0083@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r5484:001bcc45ba60 Date: 2015-01-15 15:09 +0100 http://bitbucket.org/pypy/extradoc/changeset/001bcc45ba60/ Log: Next sprint diff --git a/sprintinfo/leysin-winter-2015/announcement.txt b/sprintinfo/leysin-winter-2015/announcement.txt new file mode 100644 --- /dev/null +++ b/sprintinfo/leysin-winter-2015/announcement.txt @@ -0,0 +1,69 @@ +===================================================================== + PyPy Leysin Winter Sprint (20-28th February 2015) +===================================================================== + +The next PyPy sprint will be in Leysin, Switzerland, for the tenth time. +This is a fully public sprint: newcomers and topics other than those +proposed below are welcome. + +------------------------------ +Goals and topics of the sprint +------------------------------ + +The details depend on who is here and ready to work. We might touch +topics such as: + +* cleaning up the optimization step in the JIT, change the register + allocation done by the JIT's backend, or improvements to the + warm-up time + +* STM (Software Transaction Memory), notably: try to come up with + benchmarks, and measure them carefully in order to test and improve + the conflict reporting tools, and more generally to figure out how + practical it is in large projects to avoid conflicts + +* Py3k (Python 3.x support), NumPyPy (the numpy module) + +* And as usual, the main side goal is to have fun in winter sports :-) + We can take a day off for ski. + +----------- +Exact times +----------- + +For a change, and as an attempt to simplify things, I specified the +dates as 20-28 Februrary 2015, where 20 and 28 are travel days. We will +work full days between the 21 and the 27. You are of course allowed to +show up for a part of that time only, too. + +----------------------- +Location & Accomodation +----------------------- + +Leysin, Switzerland, "same place as before". Let me refresh your +memory: both the sprint venue and the lodging will be in a very spacious +pair of chalets built specifically for bed & breakfast: +http://www.ermina.ch/. The place has a good ADSL Internet connexion +with wireless installed. You can of course arrange your own lodging +anywhere (as long as you are in Leysin, you cannot be more than a 15 +minutes walk away from the sprint venue), but I definitely recommend +lodging there too -- you won't find a better view anywhere else (though +you probably won't get much worse ones easily, either :-) + +Please *confirm* that you are coming so that we can adjust the +reservations as appropriate. The rate so far used to be around 60 CHF a +night all included in 2-person rooms, with breakfast. The rooms +available now are either single-person (or couple), or 3-4 persons; +the latter choice should be under 60 CHF per person. + +Please register by Mercurial:: + + https://bitbucket.org/pypy/extradoc/ + https://bitbucket.org/pypy/extradoc/raw/extradoc/sprintinfo/leysin-winter-2015 + +or on the pypy-dev mailing list if you do not yet have check-in rights: + + http://mail.python.org/mailman/listinfo/pypy-dev + +You need a Swiss-to-(insert country here) power adapter. There will be +some Swiss-to-EU adapters around, and at least one EU-format power strip. diff --git a/sprintinfo/leysin-winter-2015/people.txt b/sprintinfo/leysin-winter-2015/people.txt new file mode 100644 --- /dev/null +++ b/sprintinfo/leysin-winter-2015/people.txt @@ -0,0 +1,62 @@ + +People coming to the Leysin sprint Winter 2014 +================================================== + +People who have a ``?`` in their arrive/depart or accomodation +column are known to be coming but there are no details +available yet from them. + + +==================== ============== ======================= + Name Arrive/Depart Accomodation +==================== ============== ======================= +Armin Rigo private +Maciej Fijalkowski 20-28 Ermina +==================== ============== ======================= + + +People on the following list were present at previous sprints: + +==================== ============== ===================== + Name Arrive/Depart Accomodation +==================== ============== ===================== +Romain Guillebert ? ? +Remi Meier ? ? +Christian Clauss ? ? +Johan Råde ? ? +Antonio Cuni ? ? +Manuel Jacob ? ? +Michael Foord ? ? +David Schneider ? ? +Jacob Hallen ? ? +Laura Creighton ? ? +Hakan Ardo ? ? +Carl Friedrich Bolz ? ? +Samuele Pedroni ? ? +Anders Hammarquist ? ? +Christian Tismer ? ? +Niko Matsakis ? ? +Toby Watson ? ? +Paul deGrandis ? ? +Michael Hudson ? ? +Anders Lehmann ? ? +Niklaus Haldimann ? ? +Lene Wagner ? ? +Amaury Forgeot d'Arc ? ? +Valentino Volonghi ? ? +Boris Feigin ? ? +Andrew Thompson ? ? +Bert Freudenberg ? ? +Beatrice Duering ? ? +Richard Emslie ? ? +Johan Hahn ? ? +Stephan Diehl ? ? +Alexander Schremmer ? ? +Anders Chrigstroem ? ? +Eric van Riet Paap ? ? +Holger Krekel ? ? +Guido Wesdorp ? ? +Leonardo Santagada ? ? +Alexandre Fayolle ? ? +Sylvain Thénault ? ? +==================== ============== ===================== From noreply at buildbot.pypy.org Thu Jan 15 15:10:09 2015 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 15 Jan 2015 15:10:09 +0100 (CET) Subject: [pypy-commit] extradoc extradoc: update Message-ID: <20150115141009.1AE881C0035@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r5485:d6d242c5b961 Date: 2015-01-15 15:10 +0100 http://bitbucket.org/pypy/extradoc/changeset/d6d242c5b961/ Log: update diff --git a/sprintinfo/leysin-winter-2015/announcement.txt b/sprintinfo/leysin-winter-2015/announcement.txt --- a/sprintinfo/leysin-winter-2015/announcement.txt +++ b/sprintinfo/leysin-winter-2015/announcement.txt @@ -51,10 +51,10 @@ you probably won't get much worse ones easily, either :-) Please *confirm* that you are coming so that we can adjust the -reservations as appropriate. The rate so far used to be around 60 CHF a -night all included in 2-person rooms, with breakfast. The rooms -available now are either single-person (or couple), or 3-4 persons; -the latter choice should be under 60 CHF per person. +reservations as appropriate. In the past, the rates were around 60 CHF a +night all included in 2-person rooms, with breakfast. Now, the rooms +available are either single-person (or couple), or rooms for 3 persons. +The latter choice is recommended and should be under 60 CHF per person. Please register by Mercurial:: From noreply at buildbot.pypy.org Thu Jan 15 15:31:51 2015 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 15 Jan 2015 15:31:51 +0100 (CET) Subject: [pypy-commit] pypy errno-again: Add some more get_errno() -> get_saved_errno() Message-ID: <20150115143151.7B6871C0046@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: errno-again Changeset: r75347:56abcde18cf2 Date: 2015-01-15 15:31 +0100 http://bitbucket.org/pypy/pypy/changeset/56abcde18cf2/ Log: Add some more get_errno() -> get_saved_errno() diff --git a/pypy/module/_io/interp_bufferedio.py b/pypy/module/_io/interp_bufferedio.py --- a/pypy/module/_io/interp_bufferedio.py +++ b/pypy/module/_io/interp_bufferedio.py @@ -19,10 +19,16 @@ def make_write_blocking_error(space, written): + # XXX CPython reads 'errno' here. I *think* it doesn't make sense, + # because we might reach this point after calling a write() method + # that may be overridden by the user, if that method returns None. + # In that case what we get is a potentially nonsense errno. But + # we'll use get_saved_errno() anyway, and hope (like CPython does) + # that we're getting a reasonable value at this point. w_type = space.gettypeobject(W_BlockingIOError.typedef) w_value = space.call_function( w_type, - space.wrap(rposix.get_errno()), + space.wrap(rposix.get_saved_errno()), space.wrap("write could not complete without blocking"), space.wrap(written)) return OperationError(w_type, w_value) diff --git a/pypy/module/_locale/interp_locale.py b/pypy/module/_locale/interp_locale.py --- a/pypy/module/_locale/interp_locale.py +++ b/pypy/module/_locale/interp_locale.py @@ -300,7 +300,8 @@ return space.wrap(result) _bindtextdomain = rlocale.external('bindtextdomain', [rffi.CCHARP, rffi.CCHARP], - rffi.CCHARP) + rffi.CCHARP, + save_err=rffi.RFFI_SAVE_ERRNO) @unwrap_spec(domain=str) def bindtextdomain(space, domain, w_dir): @@ -325,7 +326,7 @@ rffi.free_charp(dir_c) if not dirname: - errno = rposix.get_errno() + errno = rposix.get_saved_errno() raise OperationError(space.w_OSError, space.wrap(errno)) return space.wrap(rffi.charp2str(dirname)) diff --git a/pypy/module/_multiprocessing/interp_semaphore.py b/pypy/module/_multiprocessing/interp_semaphore.py --- a/pypy/module/_multiprocessing/interp_semaphore.py +++ b/pypy/module/_multiprocessing/interp_semaphore.py @@ -81,51 +81,59 @@ _sem_open = external('sem_open', [rffi.CCHARP, rffi.INT, rffi.INT, rffi.UINT], - SEM_T) + SEM_T, save_err=rffi.RFFI_SAVE_ERRNO) # sem_close is releasegil=False to be able to use it in the __del__ - _sem_close = external('sem_close', [SEM_T], rffi.INT, releasegil=False) - _sem_unlink = external('sem_unlink', [rffi.CCHARP], rffi.INT) - _sem_wait = external('sem_wait', [SEM_T], rffi.INT) - _sem_trywait = external('sem_trywait', [SEM_T], rffi.INT) - _sem_post = external('sem_post', [SEM_T], rffi.INT) - _sem_getvalue = external('sem_getvalue', [SEM_T, rffi.INTP], rffi.INT) + _sem_close = external('sem_close', [SEM_T], rffi.INT, releasegil=False, + save_err=rffi.RFFI_SAVE_ERRNO) + _sem_unlink = external('sem_unlink', [rffi.CCHARP], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) + _sem_wait = external('sem_wait', [SEM_T], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) + _sem_trywait = external('sem_trywait', [SEM_T], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) + _sem_post = external('sem_post', [SEM_T], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) + _sem_getvalue = external('sem_getvalue', [SEM_T, rffi.INTP], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) - _gettimeofday = external('gettimeofday', [TIMEVALP, rffi.VOIDP], rffi.INT) + _gettimeofday = external('gettimeofday', [TIMEVALP, rffi.VOIDP], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) _select = external('select', [rffi.INT, rffi.VOIDP, rffi.VOIDP, rffi.VOIDP, - TIMEVALP], rffi.INT) + TIMEVALP], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) @jit.dont_look_inside def sem_open(name, oflag, mode, value): res = _sem_open(name, oflag, mode, value) if res == rffi.cast(SEM_T, SEM_FAILED): - raise OSError(rposix.get_errno(), "sem_open failed") + raise OSError(rposix.get_saved_errno(), "sem_open failed") return res def sem_close(handle): res = _sem_close(handle) if res < 0: - raise OSError(rposix.get_errno(), "sem_close failed") + raise OSError(rposix.get_saved_errno(), "sem_close failed") def sem_unlink(name): res = _sem_unlink(name) if res < 0: - raise OSError(rposix.get_errno(), "sem_unlink failed") + raise OSError(rposix.get_saved_errno(), "sem_unlink failed") def sem_wait(sem): res = _sem_wait(sem) if res < 0: - raise OSError(rposix.get_errno(), "sem_wait failed") + raise OSError(rposix.get_saved_errno(), "sem_wait failed") def sem_trywait(sem): res = _sem_trywait(sem) if res < 0: - raise OSError(rposix.get_errno(), "sem_trywait failed") + raise OSError(rposix.get_saved_errno(), "sem_trywait failed") def sem_timedwait(sem, deadline): res = _sem_timedwait(sem, deadline) if res < 0: - raise OSError(rposix.get_errno(), "sem_timedwait failed") + raise OSError(rposix.get_saved_errno(), "sem_timedwait failed") def _sem_timedwait_save(sem, deadline): delay = 0 @@ -135,7 +143,7 @@ # poll if _sem_trywait(sem) == 0: return 0 - elif rposix.get_errno() != errno.EAGAIN: + elif rposix.get_saved_errno() != errno.EAGAIN: return -1 now = gettimeofday() @@ -143,7 +151,7 @@ c_tv_nsec = rffi.getintfield(deadline[0], 'c_tv_nsec') if (c_tv_sec < now[0] or (c_tv_sec == now[0] and c_tv_nsec <= now[1])): - rposix.set_errno(errno.ETIMEDOUT) + rposix.set_saved_errno(errno.ETIMEDOUT) return -1 @@ -166,21 +174,21 @@ if SEM_TIMED_WAIT: _sem_timedwait = external('sem_timedwait', [SEM_T, TIMESPECP], - rffi.INT) + rffi.INT, save_err=rffi.RFFI_SAVE_ERRNO) else: _sem_timedwait = _sem_timedwait_save def sem_post(sem): res = _sem_post(sem) if res < 0: - raise OSError(rposix.get_errno(), "sem_post failed") + raise OSError(rposix.get_saved_errno(), "sem_post failed") def sem_getvalue(sem): sval_ptr = lltype.malloc(rffi.INTP.TO, 1, flavor='raw') try: res = _sem_getvalue(sem, sval_ptr) if res < 0: - raise OSError(rposix.get_errno(), "sem_getvalue failed") + raise OSError(rposix.get_saved_errno(), "sem_getvalue failed") return rffi.cast(lltype.Signed, sval_ptr[0]) finally: lltype.free(sval_ptr, flavor='raw') @@ -190,7 +198,7 @@ try: res = _gettimeofday(now, None) if res < 0: - raise OSError(rposix.get_errno(), "gettimeofday failed") + raise OSError(rposix.get_saved_errno(), "gettimeofday failed") return (rffi.getintfield(now[0], 'c_tv_sec'), rffi.getintfield(now[0], 'c_tv_usec')) finally: diff --git a/pypy/module/cpyext/pyerrors.py b/pypy/module/cpyext/pyerrors.py --- a/pypy/module/cpyext/pyerrors.py +++ b/pypy/module/cpyext/pyerrors.py @@ -9,7 +9,7 @@ PyObject, PyObjectP, make_ref, from_ref, Py_DecRef, borrow_from) from pypy.module.cpyext.state import State from pypy.module.cpyext.import_ import PyImport_Import -from rpython.rlib.rposix import get_errno +from rpython.rlib import rposix, jit @cpython_api([PyObject, PyObject], lltype.Void) def PyErr_SetObject(space, w_type, w_value): @@ -159,6 +159,7 @@ PyErr_SetFromErrnoWithFilenameObject(space, w_type, filename) @cpython_api([PyObject, PyObject], PyObject) + at jit.dont_look_inside # direct use of _get_errno() def PyErr_SetFromErrnoWithFilenameObject(space, w_type, w_value): """Similar to PyErr_SetFromErrno(), with the additional behavior that if w_value is not NULL, it is passed to the constructor of type as a @@ -166,7 +167,7 @@ this is used to define the filename attribute of the exception instance. Return value: always NULL.""" # XXX Doesn't actually do anything with PyErr_CheckSignals. - errno = get_errno() + errno = rposix._get_errno() msg = os.strerror(errno) if w_value: w_error = space.call_function(w_type, diff --git a/pypy/module/cpyext/pystrtod.py b/pypy/module/cpyext/pystrtod.py --- a/pypy/module/cpyext/pystrtod.py +++ b/pypy/module/cpyext/pystrtod.py @@ -4,12 +4,13 @@ from pypy.module.cpyext.pyobject import PyObject from rpython.rlib import rdtoa from rpython.rlib import rfloat -from rpython.rlib import rposix +from rpython.rlib import rposix, jit from rpython.rtyper.lltypesystem import lltype from rpython.rtyper.lltypesystem import rffi @cpython_api([rffi.CCHARP, rffi.CCHARPP, PyObject], rffi.DOUBLE, error=-1.0) + at jit.dont_look_inside # direct use of _get_errno() def PyOS_string_to_double(space, s, endptr, w_overflow_exception): """Convert a string s to a double, raising a Python exception on failure. The set of accepted strings corresponds to @@ -52,8 +53,8 @@ raise OperationError( space.w_ValueError, space.wrap('invalid input at position %s' % endpos)) - if rposix.get_errno() == errno.ERANGE: - rposix.set_errno(0) + if rposix._get_errno() == errno.ERANGE: + rposix._set_errno(0) if w_overflow_exception is None: if result > 0: return rfloat.INFINITY diff --git a/pypy/module/time/interp_time.py b/pypy/module/time/interp_time.py --- a/pypy/module/time/interp_time.py +++ b/pypy/module/time/interp_time.py @@ -169,10 +169,12 @@ c_clock = external('clock', [rffi.TIME_TP], clock_t) c_time = external('time', [rffi.TIME_TP], rffi.TIME_T) c_ctime = external('ctime', [rffi.TIME_TP], rffi.CCHARP) -c_gmtime = external('gmtime', [rffi.TIME_TP], TM_P) +c_gmtime = external('gmtime', [rffi.TIME_TP], TM_P, + save_err=rffi.RFFI_SAVE_ERRNO) c_mktime = external('mktime', [TM_P], rffi.TIME_T) c_asctime = external('asctime', [TM_P], rffi.CCHARP) -c_localtime = external('localtime', [rffi.TIME_TP], TM_P) +c_localtime = external('localtime', [rffi.TIME_TP], TM_P, + save_err=rffi.RFFI_SAVE_ERRNO) if _POSIX: c_tzset = external('tzset', [], lltype.Void) if _WIN: @@ -304,7 +306,7 @@ _set_module_object(space, 'altzone', space.wrap(altzone)) def _get_error_msg(): - errno = rposix.get_errno() + errno = rposix.get_saved_errno() return os.strerror(errno) if sys.platform != 'win32': diff --git a/rpython/rlib/clibffi.py b/rpython/rlib/clibffi.py --- a/rpython/rlib/clibffi.py +++ b/rpython/rlib/clibffi.py @@ -11,7 +11,7 @@ from rpython.rlib.rmmap import alloc from rpython.rlib.rdynload import dlopen, dlclose, dlsym, dlsym_byordinal from rpython.rlib.rdynload import DLOpenError, DLLHANDLE -from rpython.rlib import jit +from rpython.rlib import jit, rposix from rpython.rlib.objectmodel import specialize from rpython.translator.tool.cbuild import ExternalCompilationInfo from rpython.translator.platform import platform @@ -412,7 +412,7 @@ @jit.jit_callback("CLIBFFI") -def ll_callback(ffi_cif, ll_res, ll_args, ll_userdata): +def _ll_callback(ffi_cif, ll_res, ll_args, ll_userdata): """ Callback specification. ffi_cif - something ffi specific, don't care ll_args - rffi.VOIDPP - pointer to array of pointers to args @@ -423,6 +423,12 @@ userdata = rffi.cast(USERDATA_P, ll_userdata) userdata.callback(ll_args, ll_res, userdata) +def ll_callback(ffi_cif, ll_res, ll_args, ll_userdata): + rposix._errno_after(rffi.RFFI_ERR_ALL) + _ll_callback(ffi_cif, ll_res, ll_args, ll_userdata) + rposix._errno_before(rffi.RFFI_ERR_ALL) + + class StackCheckError(ValueError): message = None def __init__(self, message): From noreply at buildbot.pypy.org Thu Jan 15 15:49:21 2015 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 15 Jan 2015 15:49:21 +0100 (CET) Subject: [pypy-commit] pypy errno-again: fixes Message-ID: <20150115144921.B37CC1C0013@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: errno-again Changeset: r75348:2985a88ffb08 Date: 2015-01-15 15:42 +0100 http://bitbucket.org/pypy/pypy/changeset/2985a88ffb08/ Log: fixes diff --git a/pypy/module/cpyext/pyerrors.py b/pypy/module/cpyext/pyerrors.py --- a/pypy/module/cpyext/pyerrors.py +++ b/pypy/module/cpyext/pyerrors.py @@ -167,7 +167,7 @@ this is used to define the filename attribute of the exception instance. Return value: always NULL.""" # XXX Doesn't actually do anything with PyErr_CheckSignals. - errno = rposix._get_errno() + errno = rffi.cast(lltype.Signed, rposix._get_errno()) msg = os.strerror(errno) if w_value: w_error = space.call_function(w_type, diff --git a/pypy/module/cpyext/pystrtod.py b/pypy/module/cpyext/pystrtod.py --- a/pypy/module/cpyext/pystrtod.py +++ b/pypy/module/cpyext/pystrtod.py @@ -53,8 +53,9 @@ raise OperationError( space.w_ValueError, space.wrap('invalid input at position %s' % endpos)) - if rposix._get_errno() == errno.ERANGE: - rposix._set_errno(0) + errno = rffi.cast(lltype.Signed, rposix._get_errno()) + if errno == errno.ERANGE: + rposix._set_errno(rffi.cast(rffi.INT, 0)) if w_overflow_exception is None: if result > 0: return rfloat.INFINITY diff --git a/rpython/rtyper/module/ll_os.py b/rpython/rtyper/module/ll_os.py --- a/rpython/rtyper/module/ll_os.py +++ b/rpython/rtyper/module/ll_os.py @@ -1791,7 +1791,7 @@ ofs = debug.debug_offset() opaqueaddr = rthread.gc_thread_before_fork() childpid = rffi.cast(lltype.Signed, os_fork()) - errno = rposix._get_errno() + errno = rffi.cast(rffi.Signed, rposix._get_errno()) rthread.gc_thread_after_fork(childpid, opaqueaddr) if childpid == -1: raise OSError(errno, "os_fork failed") From noreply at buildbot.pypy.org Thu Jan 15 15:49:22 2015 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 15 Jan 2015 15:49:22 +0100 (CET) Subject: [pypy-commit] pypy errno-again: more fixes Message-ID: <20150115144922.DB9601C0013@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: errno-again Changeset: r75349:27b29172fa0a Date: 2015-01-15 15:48 +0100 http://bitbucket.org/pypy/pypy/changeset/27b29172fa0a/ Log: more fixes diff --git a/pypy/module/cpyext/pystrtod.py b/pypy/module/cpyext/pystrtod.py --- a/pypy/module/cpyext/pystrtod.py +++ b/pypy/module/cpyext/pystrtod.py @@ -53,8 +53,8 @@ raise OperationError( space.w_ValueError, space.wrap('invalid input at position %s' % endpos)) - errno = rffi.cast(lltype.Signed, rposix._get_errno()) - if errno == errno.ERANGE: + err = rffi.cast(lltype.Signed, rposix._get_errno()) + if err == errno.ERANGE: rposix._set_errno(rffi.cast(rffi.INT, 0)) if w_overflow_exception is None: if result > 0: diff --git a/pypy/module/fcntl/interp_fcntl.py b/pypy/module/fcntl/interp_fcntl.py --- a/pypy/module/fcntl/interp_fcntl.py +++ b/pypy/module/fcntl/interp_fcntl.py @@ -55,8 +55,10 @@ constants[name] = value locals().update(constants) -def external(name, args, result): - return rffi.llexternal(name, args, result, compilation_info=CConfig._compilation_info_) +def external(name, args, result, **kwds): + return rffi.llexternal(name, args, result, + compilation_info=CConfig._compilation_info_, + **kwds) _flock = lltype.Ptr(cConfig.flock) fcntl_int = external('fcntl', [rffi.INT, rffi.INT, rffi.INT], rffi.INT, diff --git a/pypy/module/time/interp_time.py b/pypy/module/time/interp_time.py --- a/pypy/module/time/interp_time.py +++ b/pypy/module/time/interp_time.py @@ -142,7 +142,7 @@ setattr(cConfig, k, v) cConfig.tm.__name__ = "_tm" -def external(name, args, result, eci=CConfig._compilation_info_): +def external(name, args, result, eci=CConfig._compilation_info_, **kwds): if _WIN and rffi.sizeof(rffi.TIME_T) == 8: # Recent Microsoft compilers use 64bit time_t and # the corresponding functions are named differently @@ -152,7 +152,8 @@ return rffi.llexternal(name, args, result, compilation_info=eci, calling_conv=calling_conv, - releasegil=False) + releasegil=False, + **kwds) if _POSIX: cConfig.timeval.__name__ = "_timeval" diff --git a/rpython/rlib/rposix.py b/rpython/rlib/rposix.py --- a/rpython/rlib/rposix.py +++ b/rpython/rlib/rposix.py @@ -107,19 +107,19 @@ rthread.tlfield_rpy_errno.setraw(rffi.cast(INT, errno)) + at specialize.call_location() def _errno_before(save_err): if save_err & rffi.RFFI_READSAVED_ERRNO: from rpython.rlib import rthread _set_errno(rthread.tlfield_rpy_errno.getraw()) elif save_err & rffi.RFFI_ZERO_ERRNO_BEFORE: _set_errno(rffi.cast(rffi.INT, 0)) -_errno_before._always_inline_ = True + at specialize.call_location() def _errno_after(save_err): if save_err & rffi.RFFI_SAVE_ERRNO: from rpython.rlib import rthread rthread.tlfield_rpy_errno.setraw(_get_errno()) -_errno_after._always_inline_ = True if os.name == 'nt': From noreply at buildbot.pypy.org Thu Jan 15 15:52:13 2015 From: noreply at buildbot.pypy.org (fijal) Date: Thu, 15 Jan 2015 15:52:13 +0100 (CET) Subject: [pypy-commit] pypy vmprof: will I ever get this call right, 3rd try Message-ID: <20150115145213.B6DED1C0013@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: vmprof Changeset: r75350:9edfd6d74085 Date: 2015-01-15 16:51 +0200 http://bitbucket.org/pypy/pypy/changeset/9edfd6d74085/ Log: will I ever get this call right, 3rd try diff --git a/rpython/jit/metainterp/warmspot.py b/rpython/jit/metainterp/warmspot.py --- a/rpython/jit/metainterp/warmspot.py +++ b/rpython/jit/metainterp/warmspot.py @@ -680,7 +680,7 @@ def create_jit_entry_points(self): for func, args, result in all_jit_entrypoints: - self.helper_func(lltype.Ptr(lltype.FuncType(args, result), func)) + self.helper_func(lltype.Ptr(lltype.FuncType(args, result)), func) def rewrite_access_helper(self, op): # make sure we make a copy of function so it no longer belongs From noreply at buildbot.pypy.org Thu Jan 15 15:56:02 2015 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 15 Jan 2015 15:56:02 +0100 (CET) Subject: [pypy-commit] pypy errno-again: fix Message-ID: <20150115145602.53E7F1C0013@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: errno-again Changeset: r75351:c778135940cd Date: 2015-01-15 15:55 +0100 http://bitbucket.org/pypy/pypy/changeset/c778135940cd/ Log: fix diff --git a/pypy/module/_cffi_backend/ctypeptr.py b/pypy/module/_cffi_backend/ctypeptr.py --- a/pypy/module/_cffi_backend/ctypeptr.py +++ b/pypy/module/_cffi_backend/ctypeptr.py @@ -347,7 +347,8 @@ # ____________________________________________________________ -rffi_fdopen = rffi.llexternal("fdopen", [rffi.INT, rffi.CCHARP], rffi.CCHARP) +rffi_fdopen = rffi.llexternal("fdopen", [rffi.INT, rffi.CCHARP], rffi.CCHARP, + save_err=rffi.RFFI_SAVE_ERRNO) rffi_setbuf = rffi.llexternal("setbuf", [rffi.CCHARP, rffi.CCHARP], lltype.Void) rffi_fclose = rffi.llexternal("fclose", [rffi.CCHARP], rffi.INT) @@ -357,7 +358,7 @@ def __init__(self, fd, mode): self.llf = rffi_fdopen(fd, mode) if not self.llf: - raise OSError(rposix.get_errno(), "fdopen failed") + raise OSError(rposix.get_saved_errno(), "fdopen failed") rffi_setbuf(self.llf, lltype.nullptr(rffi.CCHARP.TO)) def close(self): From noreply at buildbot.pypy.org Thu Jan 15 15:57:56 2015 From: noreply at buildbot.pypy.org (fijal) Date: Thu, 15 Jan 2015 15:57:56 +0100 (CET) Subject: [pypy-commit] extradoc extradoc: add vmprof Message-ID: <20150115145756.2A5CD1C0013@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: extradoc Changeset: r5486:9843eaa3a719 Date: 2015-01-15 16:58 +0200 http://bitbucket.org/pypy/extradoc/changeset/9843eaa3a719/ Log: add vmprof diff --git a/sprintinfo/leysin-winter-2015/announcement.txt b/sprintinfo/leysin-winter-2015/announcement.txt --- a/sprintinfo/leysin-winter-2015/announcement.txt +++ b/sprintinfo/leysin-winter-2015/announcement.txt @@ -22,6 +22,9 @@ the conflict reporting tools, and more generally to figure out how practical it is in large projects to avoid conflicts +* vmprof - a statistical profiler for CPython and PyPy work, including + making it more user friendly. + * Py3k (Python 3.x support), NumPyPy (the numpy module) * And as usual, the main side goal is to have fun in winter sports :-) From noreply at buildbot.pypy.org Thu Jan 15 16:08:47 2015 From: noreply at buildbot.pypy.org (rguillebert) Date: Thu, 15 Jan 2015 16:08:47 +0100 (CET) Subject: [pypy-commit] pypy object-dtype: Add w_object to FakeSpace Message-ID: <20150115150847.1DCCF1C003A@cobra.cs.uni-duesseldorf.de> Author: Romain Guillebert Branch: object-dtype Changeset: r75352:d02fe29f5e55 Date: 2015-01-15 16:08 +0100 http://bitbucket.org/pypy/pypy/changeset/d02fe29f5e55/ Log: Add w_object to FakeSpace diff --git a/pypy/module/micronumpy/compile.py b/pypy/module/micronumpy/compile.py --- a/pypy/module/micronumpy/compile.py +++ b/pypy/module/micronumpy/compile.py @@ -67,6 +67,7 @@ w_unicode = W_TypeObject("unicode") w_complex = W_TypeObject("complex") w_dict = W_TypeObject("dict") + w_object = W_TypeObject("object") def __init__(self): """NOT_RPYTHON""" From noreply at buildbot.pypy.org Thu Jan 15 16:12:39 2015 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 15 Jan 2015 16:12:39 +0100 (CET) Subject: [pypy-commit] pypy errno-again: fix Message-ID: <20150115151239.4D1561C0035@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: errno-again Changeset: r75353:3e15021a8ad9 Date: 2015-01-15 16:12 +0100 http://bitbucket.org/pypy/pypy/changeset/3e15021a8ad9/ Log: fix diff --git a/rpython/rtyper/module/ll_os.py b/rpython/rtyper/module/ll_os.py --- a/rpython/rtyper/module/ll_os.py +++ b/rpython/rtyper/module/ll_os.py @@ -1791,7 +1791,7 @@ ofs = debug.debug_offset() opaqueaddr = rthread.gc_thread_before_fork() childpid = rffi.cast(lltype.Signed, os_fork()) - errno = rffi.cast(rffi.Signed, rposix._get_errno()) + errno = rffi.cast(lltype.Signed, rposix._get_errno()) rthread.gc_thread_after_fork(childpid, opaqueaddr) if childpid == -1: raise OSError(errno, "os_fork failed") From noreply at buildbot.pypy.org Thu Jan 15 16:13:56 2015 From: noreply at buildbot.pypy.org (fijal) Date: Thu, 15 Jan 2015 16:13:56 +0100 (CET) Subject: [pypy-commit] pypy vmprof: forgot to kill the args Message-ID: <20150115151356.8C3811C0035@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: vmprof Changeset: r75354:de4f2bcba707 Date: 2015-01-15 17:13 +0200 http://bitbucket.org/pypy/pypy/changeset/de4f2bcba707/ Log: forgot to kill the args diff --git a/rpython/jit/backend/llsupport/asmmemmgr.py b/rpython/jit/backend/llsupport/asmmemmgr.py --- a/rpython/jit/backend/llsupport/asmmemmgr.py +++ b/rpython/jit/backend/llsupport/asmmemmgr.py @@ -23,13 +23,13 @@ return _memmngr.jit_frame_depth_map[pos-1] @jit_entrypoint([], lltype.Signed, c_name='pypy_jit_start_addr') -def jit_start_addr(loc): +def jit_start_addr(): global _memmngr return _memmngr.jit_addr_map[0] @jit_entrypoint([], lltype.Signed, c_name='pypy_jit_end_addr') -def jit_end_addr(loc): +def jit_end_addr(): global _memmngr return _memmngr.jit_addr_map[-1] From noreply at buildbot.pypy.org Thu Jan 15 16:57:19 2015 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 15 Jan 2015 16:57:19 +0100 (CET) Subject: [pypy-commit] pypy errno-again: Support threadlocalref_get in the JIT even if the result is not exactly Message-ID: <20150115155719.23A811C0013@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: errno-again Changeset: r75355:e090b33dff6c Date: 2015-01-15 16:57 +0100 http://bitbucket.org/pypy/pypy/changeset/e090b33dff6c/ Log: Support threadlocalref_get in the JIT even if the result is not exactly one word (like an rffi.INT). diff --git a/rpython/jit/backend/arm/regalloc.py b/rpython/jit/backend/arm/regalloc.py --- a/rpython/jit/backend/arm/regalloc.py +++ b/rpython/jit/backend/arm/regalloc.py @@ -627,6 +627,7 @@ def _prepare_threadlocalref_get(self, op, fcond): ofs0 = imm(op.getarg(1).getint()) + xxxxxxxxxxxxxxxx check the size and signedness of op.getdescr() res = self.force_allocate_reg(op.result) return [ofs0, res] diff --git a/rpython/jit/backend/llsupport/llmodel.py b/rpython/jit/backend/llsupport/llmodel.py --- a/rpython/jit/backend/llsupport/llmodel.py +++ b/rpython/jit/backend/llsupport/llmodel.py @@ -225,7 +225,8 @@ # as arguments, and it returns the (possibly reallocated) jitframe. # The backend can optimize OS_THREADLOCALREF_GET calls to return a # field of this threadlocal_addr, but only if 'translate_support_code': - # in untranslated tests, threadlocal_addr is a dummy NULL. + # in untranslated tests, threadlocal_addr is a dummy container + # for errno tests only. FUNCPTR = lltype.Ptr(lltype.FuncType([llmemory.GCREF, llmemory.Address], llmemory.GCREF)) diff --git a/rpython/jit/backend/llsupport/test/ztranslation_test.py b/rpython/jit/backend/llsupport/test/ztranslation_test.py --- a/rpython/jit/backend/llsupport/test/ztranslation_test.py +++ b/rpython/jit/backend/llsupport/test/ztranslation_test.py @@ -5,7 +5,7 @@ from rpython.rlib.jit import promote from rpython.rlib import jit_hooks, rposix from rpython.rlib.objectmodel import keepalive_until_here -from rpython.rlib.rthread import ThreadLocalReference +from rpython.rlib.rthread import ThreadLocalReference, ThreadLocalField from rpython.jit.backend.detect_cpu import getcpuclass from rpython.jit.backend.test.support import CCompiledMixin from rpython.jit.codewriter.policy import StopAtXPolicy @@ -128,7 +128,8 @@ class Foo(object): pass - t = ThreadLocalReference(Foo) + t = ThreadLocalReference(Foo, loop_invariant=True) + tf = ThreadLocalField(lltype.Char, "test_call_assembler_") def change(newthing): somewhere_else.frame.thing = newthing @@ -156,6 +157,7 @@ frame.thing = Thing(nextval + 1) i += 1 if t.get().nine != 9: raise ValueError + if ord(tf.getraw()) != 0x92: raise ValueError return frame.thing.val driver2 = JitDriver(greens = [], reds = ['n']) @@ -181,6 +183,7 @@ foo = Foo() foo.nine = value t.set(foo) + tf.setraw("\x92") return foo def mainall(codeno, bound): diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -2345,7 +2345,7 @@ assert isinstance(reg, RegLoc) self.mc.MOV_rr(reg.value, ebp.value) - def threadlocalref_get(self, offset, resloc): + def threadlocalref_get(self, offset, resloc, size, sign): # This loads the stack location THREADLOCAL_OFS into a # register, and then read the word at the given offset. # It is only supported if 'translate_support_code' is @@ -2355,7 +2355,8 @@ assert self.cpu.translate_support_code assert isinstance(resloc, RegLoc) self.mc.MOV_rs(resloc.value, THREADLOCAL_OFS) - self.mc.MOV_rm(resloc.value, (resloc.value, offset)) + self.load_from_mem(resloc, addr_add_const(resloc, offset), + imm(size), imm(sign)) def genop_discard_zero_array(self, op, arglocs): (base_loc, startindex_loc, bytes_loc, diff --git a/rpython/jit/backend/x86/regalloc.py b/rpython/jit/backend/x86/regalloc.py --- a/rpython/jit/backend/x86/regalloc.py +++ b/rpython/jit/backend/x86/regalloc.py @@ -702,8 +702,11 @@ def _consider_threadlocalref_get(self, op): if self.translate_support_code: offset = op.getarg(1).getint() # getarg(0) == 'threadlocalref_get' + calldescr = op.getdescr() + size = calldescr.get_result_size() + sign = calldescr.is_result_signed() resloc = self.force_allocate_reg(op.result) - self.assembler.threadlocalref_get(offset, resloc) + self.assembler.threadlocalref_get(offset, resloc, size, sign) else: self._consider_call(op) diff --git a/rpython/jit/codewriter/jtransform.py b/rpython/jit/codewriter/jtransform.py --- a/rpython/jit/codewriter/jtransform.py +++ b/rpython/jit/codewriter/jtransform.py @@ -1993,10 +1993,6 @@ return [op0, op1] def rewrite_op_threadlocalref_get(self, op): - # only supports RESTYPE being exactly one word. - RESTYPE = op.result.concretetype - assert (RESTYPE in (lltype.Signed, lltype.Unsigned, llmemory.Address) - or isinstance(RESTYPE, lltype.Ptr)) c_offset, = op.args op1 = self.prepare_builtin_call(op, 'threadlocalref_get', [c_offset]) if c_offset.value.loop_invariant: diff --git a/rpython/jit/metainterp/test/test_threadlocal.py b/rpython/jit/metainterp/test/test_threadlocal.py --- a/rpython/jit/metainterp/test/test_threadlocal.py +++ b/rpython/jit/metainterp/test/test_threadlocal.py @@ -17,6 +17,16 @@ res = self.interp_operations(f, []) assert res == 0x544c + def test_threadlocalref_get_char(self): + tlfield = rthread.ThreadLocalField(lltype.Char, 'foobar_test_char_') + + def f(): + tlfield.setraw('\x92') + return ord(tlfield.getraw()) + + res = self.interp_operations(f, []) + assert res == 0x92 + class TestLLtype(ThreadLocalTest, LLJitMixin): pass From noreply at buildbot.pypy.org Thu Jan 15 17:05:09 2015 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 15 Jan 2015 17:05:09 +0100 (CET) Subject: [pypy-commit] pypy all_ordered_dicts: fix Message-ID: <20150115160509.4B8881C0013@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: all_ordered_dicts Changeset: r75356:72231428d947 Date: 2015-01-15 17:04 +0100 http://bitbucket.org/pypy/pypy/changeset/72231428d947/ Log: fix diff --git a/rpython/jit/metainterp/optimizeopt/heap.py b/rpython/jit/metainterp/optimizeopt/heap.py --- a/rpython/jit/metainterp/optimizeopt/heap.py +++ b/rpython/jit/metainterp/optimizeopt/heap.py @@ -361,7 +361,7 @@ return False else: if flag != FLAG_LOOKUP: - if not res_v.intbound.known_ge(IntBound(0, 0)): + if not res_v.getintbound().known_ge(IntBound(0, 0)): return False self.make_equal_to(op.result, res_v) self.last_emitted_operation = REMOVED From noreply at buildbot.pypy.org Thu Jan 15 17:10:04 2015 From: noreply at buildbot.pypy.org (rguillebert) Date: Thu, 15 Jan 2015 17:10:04 +0100 (CET) Subject: [pypy-commit] pypy object-dtype: Unskip test Message-ID: <20150115161004.6FFDD1C0013@cobra.cs.uni-duesseldorf.de> Author: Romain Guillebert Branch: object-dtype Changeset: r75357:d6e6f6529ef3 Date: 2015-01-15 17:09 +0100 http://bitbucket.org/pypy/pypy/changeset/d6e6f6529ef3/ Log: Unskip test diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -472,11 +472,8 @@ class O(object): pass for o in [object, O]: - if '__pypy__' not in sys.builtin_module_names: - assert np.dtype(o).str == '|O8' - else: - exc = raises(NotImplementedError, "np.dtype(o)") - assert exc.value[0] == "cannot create dtype with type '%s'" % o.__name__ + print np.dtype(o).byteorder + assert np.dtype(o).str == '|O8' class AppTestTypes(BaseAppTestDtypes): def test_abstract_types(self): From noreply at buildbot.pypy.org Thu Jan 15 17:16:57 2015 From: noreply at buildbot.pypy.org (fijal) Date: Thu, 15 Jan 2015 17:16:57 +0100 (CET) Subject: [pypy-commit] pypy vmprof: reimport vmprof.c and wrap errors Message-ID: <20150115161657.A672C1C0013@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: vmprof Changeset: r75358:d14d2b0ad1b6 Date: 2015-01-15 18:16 +0200 http://bitbucket.org/pypy/pypy/changeset/d14d2b0ad1b6/ Log: reimport vmprof.c and wrap errors diff --git a/pypy/module/_vmprof/interp_vmprof.py b/pypy/module/_vmprof/interp_vmprof.py --- a/pypy/module/_vmprof/interp_vmprof.py +++ b/pypy/module/_vmprof/interp_vmprof.py @@ -3,10 +3,10 @@ from rpython.translator.tool.cbuild import ExternalCompilationInfo from rpython.rtyper.annlowlevel import cast_instance_to_gcref, cast_base_ptr_to_instance from rpython.rlib.objectmodel import we_are_translated, CDefinedIntSymbolic -from rpython.rlib import jit +from rpython.rlib import jit, rgc, rposix from rpython.tool.pairtype import extendabletype from pypy.interpreter.baseobjspace import W_Root -from pypy.interpreter.error import OperationError, oefmt +from pypy.interpreter.error import OperationError, oefmt, wrap_oserror from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.pyframe import PyFrame from pypy.interpreter.pycode import PyCode @@ -36,7 +36,8 @@ separate_module_sources=[""" void pypy_vmprof_init(void) { - vmprof_set_mainloop(pypy_execute_frame_trampoline, 0, pypy_vmprof_get_virtual_ip); + vmprof_set_mainloop(pypy_execute_frame_trampoline, 0, + pypy_vmprof_get_virtual_ip); } """], ) @@ -59,14 +60,18 @@ _nowrapper=True, sandboxsafe=True, random_effects_on_gcobjs=True) -pypy_vmprof_init = rffi.llexternal("pypy_vmprof_init", [], lltype.Void, compilation_info=eci) -vmprof_enable = rffi.llexternal("vmprof_enable", [rffi.CCHARP, rffi.LONG], lltype.Void, compilation_info=eci) -vmprof_disable = rffi.llexternal("vmprof_disable", [], lltype.Void, compilation_info=eci) +pypy_vmprof_init = rffi.llexternal("pypy_vmprof_init", [], lltype.Void, + compilation_info=eci) +vmprof_enable = rffi.llexternal("vmprof_enable", + [rffi.INT, rffi.INT, rffi.LONG], + rffi.INT, compilation_info=eci) +vmprof_disable = rffi.llexternal("vmprof_disable", [], rffi.INT, + compilation_info=eci) -vmprof_register_virtual_function = rffi.llexternal("vmprof_register_virtual_function", - [rffi.CCHARP, rffi.VOIDP, rffi.VOIDP], - lltype.Void, - compilation_info=eci) +vmprof_register_virtual_function = rffi.llexternal( + "vmprof_register_virtual_function", + [rffi.CCHARP, rffi.VOIDP, rffi.VOIDP], lltype.Void, + compilation_info=eci) original_execute_frame = PyFrame.execute_frame.im_func original_execute_frame.c_name = 'pypy_pyframe_execute_frame' @@ -115,29 +120,57 @@ get_virtual_ip.c_name = 'pypy_vmprof_get_virtual_ip' get_virtual_ip._dont_inline_ = True +def strncpy(src, tgt, tgt_ofs, count): + if len(src) < count: + count = len(src) + i = 0 + while i < count: + tgt[i + tgt_ofs] = src[i] + i += 1 + return i + +def int2str(num, s, ofs): + if num == 0: + s[ofs] = '0' + return 1 + count = 0 + c = num + while c != 0: + count += 1 + c /= 10 + pos = ofs + count - 1 + c = num + while c != 0: + s[pos] = chr(ord('0') + c % 10) + c /= 10 + pos -= 1 + return count + + at rgc.no_collect def do_get_virtual_ip(frame): virtual_ip = frame.pycode._vmprof_virtual_ip - if frame.pycode._vmprof_registered != _vmprof.counter: + if not frame.pycode._vmprof_registered: # we need to register this code object name = frame.pycode.co_name + filename = frame.pycode.co_filename + firstlineno = frame.pycode.co_firstlineno start = rffi.cast(rffi.VOIDP, virtual_ip) end = start # ignored for now # # manually fill the C buffer; we cannot use str2charp because we # cannot call malloc from a signal handler strbuf = _vmprof.strbuf - strbuf[0] = 'p' - strbuf[1] = 'y' - strbuf[2] = ':' - maxbuflen = min(len(name), 124) - i = 0 - while i < maxbuflen: - strbuf[i+3] = name[i] - i += 1 - strbuf[i+3] = '\0' - # + ofs = strncpy("py:", _vmprof.strbuf, 0, len("py:")) + ofs += strncpy(filename, _vmprof.strbuf, ofs, 128) + _vmprof.strbuf[ofs] = ':' + ofs += 1 + ofs += int2str(firstlineno, _vmprof.strbuf, ofs) + _vmprof.strbuf[ofs] = ':' + ofs += 1 + ofs += strncpy(name, _vmprof.strbuf, ofs, 1024 - 1 - ofs) + _vmprof.strbuf[ofs] = '\x00' vmprof_register_virtual_function(strbuf, start, end) - frame.pycode._vmprof_registered = _vmprof.counter + frame.pycode._vmprof_registered = 1 # return virtual_ip @@ -146,9 +179,9 @@ class VMProf(object): def __init__(self): self.virtual_ip = 0 - self.counter = 0 # the number of times we called enable() self.is_enabled = False - self.strbuf = lltype.malloc(rffi.CCHARP.TO, 128, flavor='raw', immortal=True, zero=True) + self.ever_enabled = False + self.strbuf = lltype.malloc(rffi.CCHARP.TO, 1024, flavor='raw', immortal=True, zero=True) def get_next_virtual_IP(self): self.virtual_ip -= 1 @@ -158,23 +191,30 @@ def _annotate_get_virtual_ip(self): if FALSE_BUT_NON_CONSTANT: # make sure it's annotated - gcref = rffi.cast(llmemory.GCREF, self.counter) # just a random non-constant value + gcref = rffi.cast(llmemory.GCREF, self.virtual_ip) # just a random non-constant value get_virtual_ip(gcref) - def enable(self, space, filename, period): + def enable(self, space, fileno, symno, period): self._annotate_get_virtual_ip() if self.is_enabled: raise oefmt(space.w_ValueError, "_vmprof already enabled") self.is_enabled = True - pypy_vmprof_init() - self.counter += 1 - vmprof_enable(filename, period) + if not self.ever_enabled: + pypy_vmprof_init() + self.ever_enabled = True + res = vmprof_enable(fileno, symno, period) + if res == -1: + raise wrap_oserror(space, OSError(rposix.get_errno(), + "_vmprof.enable")) def disable(self, space): if not self.is_enabled: raise oefmt(space.w_ValueError, "_vmprof not enabled") - vmprof_disable() self.is_enabled = False + res = vmprof_disable() + if res == -1: + raise wrap_oserror(space, OSError(rposix.get_errno(), + "_vmprof.disable")) _vmprof = VMProf() diff --git a/pypy/module/_vmprof/src/vmprof.c b/pypy/module/_vmprof/src/vmprof.c --- a/pypy/module/_vmprof/src/vmprof.c +++ b/pypy/module/_vmprof/src/vmprof.c @@ -79,7 +79,6 @@ #include "get_custom_offset.c" - typedef struct { void* _unused1; void* _unused2; @@ -107,7 +106,7 @@ void* bp = (void*)sp + sp_offset; cp2->sp = bp; cp2->ip = ((void**)(bp - sizeof(void*))[0]; - // the ret is on the top of the stack + // the ret is on the top of the stack minus WORD return 1; } } @@ -198,16 +197,27 @@ * ************************************************************* */ -static void open_profile(const char* filename, long period_usec) { - char buf[4096]; - profile_file = fopen(filename, "wb"); +static int open_profile(int fd, int sym_fd, long period_usec) { + if ((fd = dup(fd)) == -1) { + return -1; + } + if ((sym_fd = dup(sym_fd)) == -1) { + return -1; + } + profile_file = fdopen(fd, "wb"); + if (!profile_file) { + return -1; + } prof_header(profile_file, period_usec); - assert(strlen(filename) < 4096); - sprintf(buf, "%s.sym", filename); - symbol_file = fopen(buf, "w"); + symbol_file = fdopen(sym_fd, "w"); + if (!symbol_file) { + return -1; + } + return 0; } -static void close_profile(void) { +static int close_profile(void) { + // XXX all of this can happily fail FILE* src; char buf[BUFSIZ]; size_t size; @@ -222,40 +232,50 @@ fclose(src); fclose(profile_file); fclose(symbol_file); + return 0; } -static void install_sigprof_handler(void) { +static int install_sigprof_handler(void) { struct sigaction sa; memset(&sa, 0, sizeof(sa)); sa.sa_sigaction = sigprof_handler; sa.sa_flags = SA_RESTART | SA_SIGINFO; - sigemptyset(&sa.sa_mask); - sigaction(SIGPROF, &sa, NULL); + if (sigemptyset(&sa.sa_mask) == -1 || + sigaction(SIGPROF, &sa, NULL) == -1) { + return -1; + } + return 0; } -static void remove_sigprof_handler(void) { - signal(SIGPROF, SIG_DFL); +static int remove_sigprof_handler(void) { + //sighandler_t res = signal(SIGPROF, SIG_DFL); + //if (res == SIG_ERR) { + // return -1; + //} + return 0; }; -static void install_sigprof_timer(long period_usec) { +static int install_sigprof_timer(long period_usec) { static struct itimerval timer; timer.it_interval.tv_sec = 0; timer.it_interval.tv_usec = period_usec; timer.it_value = timer.it_interval; if (setitimer(ITIMER_PROF, &timer, NULL) != 0) { - printf("Timer could not be initialized \n"); + return -1; } + return 0; } -static void remove_sigprof_timer(void) { +static int remove_sigprof_timer(void) { static struct itimerval timer; timer.it_interval.tv_sec = 0; timer.it_interval.tv_usec = 0; timer.it_value = timer.it_interval; if (setitimer(ITIMER_PROF, &timer, NULL) != 0) { - printf("Timer could not be deleted \n"); + return -1; } + return 0; } /* ************************************************************* @@ -270,18 +290,32 @@ mainloop_get_virtual_ip = get_virtual_ip; } -void vmprof_enable(const char* filename, long period_usec) { +int vmprof_enable(int fd, int sym_fd, long period_usec) { if (period_usec == -1) period_usec = 1000000 / 100; /* 100hz */ - open_profile(filename, period_usec); - install_sigprof_handler(); - install_sigprof_timer(period_usec); + if (open_profile(fd, sym_fd, period_usec) == -1) { + return -1; + } + if (install_sigprof_handler() == -1) { + return -1; + } + if (install_sigprof_timer(period_usec) == -1) { + return -1; + } + return 0; } -void vmprof_disable(void) { - remove_sigprof_timer(); - remove_sigprof_handler(); - close_profile(); +int vmprof_disable(void) { + if (remove_sigprof_timer() == -1) { + return -1; + } + if (remove_sigprof_handler() == -1) { + return -1; + } + if (close_profile() == -1) { + return -1; + } + return 0; } void vmprof_register_virtual_function(const char* name, void* start, void* end) { diff --git a/pypy/module/_vmprof/src/vmprof.h b/pypy/module/_vmprof/src/vmprof.h --- a/pypy/module/_vmprof/src/vmprof.h +++ b/pypy/module/_vmprof/src/vmprof.h @@ -12,7 +12,7 @@ void vmprof_register_virtual_function(const char* name, void* start, void* end); -void vmprof_enable(const char* filename, long period_usec); -void vmprof_disable(void); +int vmprof_enable(int fd, int sym_fd, long period_usec); +int vmprof_disable(void); #endif diff --git a/pypy/module/_vmprof/test/test__vmprof.py b/pypy/module/_vmprof/test/test__vmprof.py --- a/pypy/module/_vmprof/test/test__vmprof.py +++ b/pypy/module/_vmprof/test/test__vmprof.py @@ -13,6 +13,8 @@ def __init__(self, co_name): self.co_name = co_name + self.co_filename = 'filename' + self.co_firstlineno = 13 self._vmprof_setup_maybe() def test_get_virtual_ip(monkeypatch): @@ -31,7 +33,7 @@ _vmprof.counter = 42 ip = do_get_virtual_ip(myframe) assert ip == mycode._vmprof_virtual_ip - assert functions == [('py:foo', ip, ip)] + assert functions == [('py:filename:13:foo', ip, ip)] # the second time, we don't register it again functions = [] @@ -40,7 +42,7 @@ assert functions == [] # now, let's try with a long name - mycode = FakePyCode('abcde' * 200) + mycode = FakePyCode('abcde' + 'f' * 20000) myframe = FakePyFrame(mycode) functions = [] ip2 = do_get_virtual_ip(myframe) @@ -48,6 +50,6 @@ assert ip2 < ip # because it was generated later assert len(functions) == 1 name, start, end = functions[0] - assert len(name) == 127 - assert name == 'py:' + ('abcde'*200)[:124] + assert len(name) < 1025 + assert name == 'py:filename:13:abcde' + 'f' * (1024 - 20 - 1) From noreply at buildbot.pypy.org Thu Jan 15 17:20:00 2015 From: noreply at buildbot.pypy.org (fijal) Date: Thu, 15 Jan 2015 17:20:00 +0100 (CET) Subject: [pypy-commit] pypy vmprof: fix for C legality Message-ID: <20150115162000.DC5261C0035@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: vmprof Changeset: r75359:583172a8ff62 Date: 2015-01-15 18:19 +0200 http://bitbucket.org/pypy/pypy/changeset/583172a8ff62/ Log: fix for C legality diff --git a/pypy/module/_vmprof/src/vmprof.c b/pypy/module/_vmprof/src/vmprof.c --- a/pypy/module/_vmprof/src/vmprof.c +++ b/pypy/module/_vmprof/src/vmprof.c @@ -105,7 +105,8 @@ vmprof_hacked_unw_cursor_t *cp2 = (vmprof_hacked_unw_cursor_t*)cp; void* bp = (void*)sp + sp_offset; cp2->sp = bp; - cp2->ip = ((void**)(bp - sizeof(void*))[0]; + bp -= 1; + cp2->ip = ((void**)bp)[0]; // the ret is on the top of the stack minus WORD return 1; } From noreply at buildbot.pypy.org Thu Jan 15 17:26:12 2015 From: noreply at buildbot.pypy.org (fijal) Date: Thu, 15 Jan 2015 17:26:12 +0100 (CET) Subject: [pypy-commit] pypy vmprof: fix the signature Message-ID: <20150115162612.31E921C0035@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: vmprof Changeset: r75360:0fa14047a55c Date: 2015-01-15 18:25 +0200 http://bitbucket.org/pypy/pypy/changeset/0fa14047a55c/ Log: fix the signature diff --git a/pypy/module/_vmprof/interp_vmprof.py b/pypy/module/_vmprof/interp_vmprof.py --- a/pypy/module/_vmprof/interp_vmprof.py +++ b/pypy/module/_vmprof/interp_vmprof.py @@ -218,9 +218,9 @@ _vmprof = VMProf() - at unwrap_spec(filename=str, period=int) -def enable(space, filename, period=-1): - _vmprof.enable(space, filename, period) + at unwrap_spec(fileno=int, symno=int, period=int) +def enable(space, fileno, symno, period=-1): + _vmprof.enable(space, fileno, symno, period) def disable(space): _vmprof.disable(space) From noreply at buildbot.pypy.org Thu Jan 15 18:42:57 2015 From: noreply at buildbot.pypy.org (Raemi) Date: Thu, 15 Jan 2015 18:42:57 +0100 (CET) Subject: [pypy-commit] stmgc c8-private-pages: start porting gcpage tests Message-ID: <20150115174257.DABA21C0035@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c8-private-pages Changeset: r1528:290f1d2c1891 Date: 2015-01-15 10:18 +0100 http://bitbucket.org/pypy/stmgc/changeset/290f1d2c1891/ Log: start porting gcpage tests diff --git a/c8/stm/misc.c b/c8/stm/misc.c --- a/c8/stm/misc.c +++ b/c8/stm/misc.c @@ -43,6 +43,14 @@ #ifdef STM_TESTS +bool _stm_is_accessible_page(uintptr_t pagenum) +{ + acquire_privatization_lock(STM_SEGMENT->segment_num); + bool res = get_page_status_in(STM_SEGMENT->segment_num, pagenum) == PAGE_ACCESSIBLE; + release_privatization_lock(STM_SEGMENT->segment_num); + return res; +} + long _stm_count_modified_old_objects(void) { assert(STM_PSEGMENT->modified_old_objects); diff --git a/c8/stmgc.h b/c8/stmgc.h --- a/c8/stmgc.h +++ b/c8/stmgc.h @@ -74,6 +74,8 @@ bool _stm_was_read(object_t *obj); bool _stm_was_written(object_t *obj); +bool _stm_is_accessible_page(uintptr_t pagenum); + long stm_can_move(object_t *obj); void _stm_test_switch(stm_thread_local_t *tl); void _stm_test_switch_segment(int segnum); diff --git a/c8/test/support.py b/c8/test/support.py --- a/c8/test/support.py +++ b/c8/test/support.py @@ -64,6 +64,7 @@ char *_stm_real_address(object_t *o); void _stm_test_switch(stm_thread_local_t *tl); void _stm_test_switch_segment(int segnum); +bool _stm_is_accessible_page(uintptr_t pagenum); void clear_jmpbuf(stm_thread_local_t *tl); long _check_start_transaction(stm_thread_local_t *tl); @@ -428,8 +429,8 @@ def stm_major_collect(): lib.stm_collect(1) -def stm_get_private_page(pagenum): - return lib._stm_get_private_page(pagenum) +def stm_is_accessible_page(pagenum): + return lib._stm_is_accessible_page(pagenum) def stm_get_obj_size(o): res = lib._checked_stmcb_size_rounded_up(stm_get_real_address(o)) diff --git a/c8/test/test_gcpage.py b/c8/test/test_gcpage.py new file mode 100644 --- /dev/null +++ b/c8/test/test_gcpage.py @@ -0,0 +1,254 @@ +from support import * +import py + + +LMO = LARGE_MALLOC_OVERHEAD + + +class TestGCPage(BaseTest): + + def test_large_obj_alloc(self): + # test obj which doesn't fit into the size_classes + # for now, we will still allocate it in the nursery. + # expects: GC_N_SMALL_REQUESTS 36 + size_class = 1000 # too big + obj_size = size_class * 8 + assert obj_size > 4096 # we want more than 1 page + assert obj_size < lib._STM_FAST_ALLOC # in the nursery + + self.start_transaction() + new = stm_allocate(obj_size) + assert is_in_nursery(new) + self.push_root(new) + stm_minor_collect() + new = self.pop_root() + + pages = stm_get_obj_pages(new) + assert len(pages) == 2 + assert ([stm_is_accessible_page(p) for p in pages] + == [1, 1]) + + assert not is_in_nursery(new) + stm_write(new) + self.commit_transaction() + + # now proceed to write into the object in a new transaction + self.start_transaction() + assert ([stm_is_accessible_page(p) for p in pages] + == [True, True]) + stm_write(new) + assert ([stm_is_accessible_page(p) for p in pages] + == [True, True]) + + # write to 2nd page of object!! + wnew = stm_get_real_address(new) + wnew[4097] = 'x' + + self.switch(1) + self.start_transaction() + assert ([stm_is_accessible_page(p) for p in pages] + == [False, False]) + stm_read(new) + rnew = stm_get_real_address(new) + assert rnew[4097] == '\0' + assert ([stm_is_accessible_page(p) for p in pages] + == [False, True]) + self.abort_transaction() + + self.switch(0) + self.abort_transaction() + assert ([stm_is_accessible_page(p) for p in pages] + == [True, True]) + + def test_partial_alloced_pages(self): + self.start_transaction() + new = stm_allocate(16) + self.push_root(new) + stm_minor_collect() + new = self.pop_root() + + assert stm_get_private_page(stm_get_obj_pages(new)[0]) == 0 + assert stm_get_flags(new) & GCFLAG_WRITE_BARRIER + + stm_write(new) + assert not (stm_get_flags(new) & GCFLAG_WRITE_BARRIER) + + self.commit_transaction() + assert stm_get_private_page(stm_get_obj_pages(new)[0]) == 0 + assert stm_get_flags(new) & GCFLAG_WRITE_BARRIER + + self.start_transaction() + newer = stm_allocate(16) + self.push_root(newer) + stm_minor_collect() + newer = self.pop_root() + # 'new' is still in shared_page and committed + assert stm_get_private_page(stm_get_obj_pages(new)[0]) == 0 + assert stm_get_flags(new) & GCFLAG_WRITE_BARRIER + # 'newer' is now part of the SHARED page with 'new', but + # uncommitted, so no privatization has to take place: + assert stm_get_obj_pages(new) == stm_get_obj_pages(newer) + assert stm_get_flags(newer) & GCFLAG_WRITE_BARRIER + stm_write(newer) # does not privatize + assert not (stm_get_flags(newer) & GCFLAG_WRITE_BARRIER) + assert stm_get_private_page(stm_get_obj_pages(newer)[0]) == 0 + self.commit_transaction() + + assert stm_get_private_page(stm_get_obj_pages(newer)[0]) == 0 + assert stm_get_flags(newer) & GCFLAG_WRITE_BARRIER + + def test_major_collection(self): + self.start_transaction() + new = stm_allocate(5000) + self.push_root(new) + stm_minor_collect() + assert lib._stm_total_allocated() == 5000 + LMO + + self.pop_root() + stm_minor_collect() + assert lib._stm_total_allocated() == 5000 + LMO + + stm_major_collect() + assert lib._stm_total_allocated() == 0 + + def test_mark_recursive(self): + def make_chain(sz): + prev = ffi.cast("object_t *", ffi.NULL) + for i in range(10): + self.push_root(prev) + new = stm_allocate_refs(sz/8-1) + prev = self.pop_root() + stm_set_ref(new, 42, prev) + prev = new + return prev + + self.start_transaction() + self.push_root(make_chain(5000)) + self.push_root(make_chain(4312)) + stm_minor_collect() + assert lib._stm_total_allocated() == (10 * (5000 + LMO) + + 10 * (4312 + LMO)) + stm_major_collect() + assert lib._stm_total_allocated() == (10 * (5000 + LMO) + + 10 * (4312 + LMO)) + stm_major_collect() + assert lib._stm_total_allocated() == (10 * (5000 + LMO) + + 10 * (4312 + LMO)) + self.pop_root() + stm_major_collect() + assert lib._stm_total_allocated() == 10 * (5000 + LMO) + + def test_trace_all_versions(self): + self.start_transaction() + x = stm_allocate(5000) + stm_set_char(x, 'A') + stm_set_char(x, 'a', 4999) + self.push_root(x) + self.commit_transaction() + assert lib._stm_total_allocated() == 5000 + LMO + + self.start_transaction() + x = self.pop_root() + self.push_root(x) + assert lib._stm_total_allocated() == 5000 + LMO + stm_set_char(x, 'B') + stm_set_char(x, 'b', 4999) + assert lib._stm_total_allocated() == 5000 + LMO + 2 * 4096 # 2 pages + stm_major_collect() + + assert stm_get_char(x) == 'B' + assert stm_get_char(x, 4999) == 'b' + + self.switch(1) + self.start_transaction() + assert stm_get_char(x) == 'A' + assert stm_get_char(x, 4999) == 'a' + + self.switch(0) + assert stm_get_char(x) == 'B' + assert stm_get_char(x, 4999) == 'b' + assert lib._stm_total_allocated() == 5000 + LMO + 2 * 4096 # 2 pages + + def test_trace_correct_version_of_overflow_objects_1(self, size=32): + self.start_transaction() + # + self.switch(1) + self.start_transaction() + x = stm_allocate(size) + stm_set_char(x, 'E', size - 1) + self.push_root(x) + # + self.switch(0) + stm_major_collect() + # + self.switch(1) + x = self.pop_root() + assert stm_get_char(x, size - 1) == 'E' + + def test_trace_correct_version_of_overflow_objects_2(self): + self.test_trace_correct_version_of_overflow_objects_1(size=5000) + + def test_reshare_if_no_longer_modified_0(self, invert=0): + if invert: + self.switch(1) + self.start_transaction() + x = stm_allocate(5000) + self.push_root(x) + self.commit_transaction() + x = self.pop_root() + # + self.switch(1 - invert) + self.start_transaction() + self.push_root(x) + stm_set_char(x, 'A') + stm_major_collect() + assert lib._stm_total_allocated() == 5000 + LMO + 2 * 4096 # 2 pages + self.commit_transaction() + # + self.start_transaction() + stm_major_collect() + assert lib._stm_total_allocated() == 5000 + LMO # shared again + + def test_reshare_if_no_longer_modified_1(self): + self.test_reshare_if_no_longer_modified_0(invert=1) + + def test_threadlocal_at_start_of_transaction(self): + self.start_transaction() + x = stm_allocate(16) + stm_set_char(x, 'L') + self.set_thread_local_obj(x) + self.commit_transaction() + + self.start_transaction() + assert stm_get_char(self.get_thread_local_obj()) == 'L' + self.set_thread_local_obj(stm_allocate(32)) + stm_minor_collect() + self.abort_transaction() + + self.start_transaction() + assert stm_get_char(self.get_thread_local_obj()) == 'L' + self.set_thread_local_obj(stm_allocate(32)) + stm_major_collect() + self.abort_transaction() + + self.start_transaction() + assert stm_get_char(self.get_thread_local_obj()) == 'L' + + def test_marker_1(self): + self.start_transaction() + p1 = stm_allocate(600) + stm_set_char(p1, 'o') + self.push_root(p1) + self.push_root(ffi.cast("object_t *", 123)) + p2 = stm_allocate(600) + stm_set_char(p2, 't') + self.push_root(p2) + stm_major_collect() + assert lib._stm_total_allocated() == 2 * 616 + # + p2 = self.pop_root() + m = self.pop_root() + assert m == ffi.cast("object_t *", 123) + p1 = self.pop_root() + assert stm_get_char(p1) == 'o' + assert stm_get_char(p2) == 't' diff --git a/c8/test/test_random.py b/c8/test/test_random.py --- a/c8/test/test_random.py +++ b/c8/test/test_random.py @@ -581,7 +581,7 @@ op_minor_collect, #op_major_collect, ] - for _ in range(1000): + for _ in range(2000): # make sure we are in a transaction: curr_thread = op_switch_thread(ex, global_state, curr_thread) @@ -618,6 +618,6 @@ test_fun.__name__ = 'test_random_%d' % seed return test_fun - for _seed in range(5000, 5400): + for _seed in range(5000, 5200): _fn = _make_fun(_seed) locals()[_fn.__name__] = _fn From noreply at buildbot.pypy.org Thu Jan 15 18:42:58 2015 From: noreply at buildbot.pypy.org (Raemi) Date: Thu, 15 Jan 2015 18:42:58 +0100 (CET) Subject: [pypy-commit] stmgc c8-private-pages: port and enhance another test Message-ID: <20150115174258.E3B8B1C0035@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c8-private-pages Changeset: r1529:14cfe5b96c95 Date: 2015-01-15 10:33 +0100 http://bitbucket.org/pypy/stmgc/changeset/14cfe5b96c95/ Log: port and enhance another test diff --git a/c8/stm/gcpage.h b/c8/stm/gcpage.h --- a/c8/stm/gcpage.h +++ b/c8/stm/gcpage.h @@ -1,6 +1,6 @@ -/* Granularity when grabbing more unused pages: take 50 at a time */ -#define GCPAGE_NUM_PAGES 50 +/* Granularity when grabbing more unused pages: take 20 at a time */ +#define GCPAGE_NUM_PAGES 20 static char *uninitialized_page_start; /* within segment 0 */ static char *uninitialized_page_stop; diff --git a/c8/test/test_gcpage.py b/c8/test/test_gcpage.py --- a/c8/test/test_gcpage.py +++ b/c8/test/test_gcpage.py @@ -67,35 +67,56 @@ stm_minor_collect() new = self.pop_root() - assert stm_get_private_page(stm_get_obj_pages(new)[0]) == 0 + pages = stm_get_obj_pages(new) + assert stm_is_accessible_page(pages[0]) == True assert stm_get_flags(new) & GCFLAG_WRITE_BARRIER - stm_write(new) + stm_set_char(new, 'x') assert not (stm_get_flags(new) & GCFLAG_WRITE_BARRIER) self.commit_transaction() - assert stm_get_private_page(stm_get_obj_pages(new)[0]) == 0 + + ####### + + assert stm_is_accessible_page(pages[0]) == True assert stm_get_flags(new) & GCFLAG_WRITE_BARRIER self.start_transaction() + assert stm_get_char(new) == 'x' newer = stm_allocate(16) self.push_root(newer) stm_minor_collect() newer = self.pop_root() - # 'new' is still in shared_page and committed - assert stm_get_private_page(stm_get_obj_pages(new)[0]) == 0 + pageser = stm_get_obj_pages(newer) + assert stm_get_flags(new) & GCFLAG_WRITE_BARRIER - # 'newer' is now part of the SHARED page with 'new', but - # uncommitted, so no privatization has to take place: - assert stm_get_obj_pages(new) == stm_get_obj_pages(newer) + # same page as committed obj + assert pages == pageser assert stm_get_flags(newer) & GCFLAG_WRITE_BARRIER - stm_write(newer) # does not privatize + + stm_set_char(newer, 'y') assert not (stm_get_flags(newer) & GCFLAG_WRITE_BARRIER) - assert stm_get_private_page(stm_get_obj_pages(newer)[0]) == 0 self.commit_transaction() - assert stm_get_private_page(stm_get_obj_pages(newer)[0]) == 0 - assert stm_get_flags(newer) & GCFLAG_WRITE_BARRIER + ##################### + + self.switch(1) + + self.start_transaction() + assert stm_is_accessible_page(pages[0]) == False + assert stm_get_char(new) == 'x' + assert stm_get_char(newer) == 'y' + assert stm_is_accessible_page(pages[0]) == True + + another = stm_allocate(16) + self.push_root(another) + stm_minor_collect() + another = self.pop_root() + # segment has its own small-obj-pages: + assert stm_get_obj_pages(another) != pages + + self.commit_transaction() + def test_major_collection(self): self.start_transaction() From noreply at buildbot.pypy.org Thu Jan 15 18:42:59 2015 From: noreply at buildbot.pypy.org (Raemi) Date: Thu, 15 Jan 2015 18:42:59 +0100 (CET) Subject: [pypy-commit] stmgc c8-private-pages: re-introduce the total_allocated counter Message-ID: <20150115174259.F29631C0035@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c8-private-pages Changeset: r1530:749fe619048d Date: 2015-01-15 10:44 +0100 http://bitbucket.org/pypy/stmgc/changeset/749fe619048d/ Log: re-introduce the total_allocated counter diff --git a/c8/stm/gcpage.h b/c8/stm/gcpage.h --- a/c8/stm/gcpage.h +++ b/c8/stm/gcpage.h @@ -2,6 +2,13 @@ /* Granularity when grabbing more unused pages: take 20 at a time */ #define GCPAGE_NUM_PAGES 20 +/* More parameters fished directly from PyPy's default GC + XXX document me */ +#define GC_MIN (NB_NURSERY_PAGES * 4096 * 8) +#define GC_MAJOR_COLLECT 1.82 + + + static char *uninitialized_page_start; /* within segment 0 */ static char *uninitialized_page_stop; diff --git a/c8/stm/largemalloc.c b/c8/stm/largemalloc.c --- a/c8/stm/largemalloc.c +++ b/c8/stm/largemalloc.c @@ -352,6 +352,7 @@ } mscan->size = request_size; mscan->prev_size = BOTH_CHUNKS_USED; + increment_total_allocated(request_size + LARGE_MALLOC_OVERHEAD); #ifndef NDEBUG memset((char *)&mscan->d, 0xda, request_size); #endif @@ -367,6 +368,7 @@ assert(chunk->prev_size != THIS_CHUNK_FREE); /* 'size' is at least MIN_ALLOC_SIZE */ + increment_total_allocated(-(chunk->size + LARGE_MALLOC_OVERHEAD)); #ifndef NDEBUG { diff --git a/c8/stm/misc.c b/c8/stm/misc.c --- a/c8/stm/misc.c +++ b/c8/stm/misc.c @@ -99,4 +99,9 @@ return NULL; return _last_cl_entry->written[_last_cl_entry_index++].object; } + +uint64_t _stm_total_allocated(void) +{ + return increment_total_allocated(0); +} #endif diff --git a/c8/stm/pages.c b/c8/stm/pages.c --- a/c8/stm/pages.c +++ b/c8/stm/pages.c @@ -4,16 +4,37 @@ #include /************************************************************/ +struct { + volatile bool major_collection_requested; + uint64_t total_allocated; /* keep track of how much memory we're + using, ignoring nurseries */ + uint64_t total_allocated_bound; +} pages_ctl; + static void setup_pages(void) { + pages_ctl.total_allocated_bound = GC_MIN; } static void teardown_pages(void) { + memset(&pages_ctl, 0, sizeof(pages_ctl)); memset(pages_status, 0, sizeof(pages_status)); } +static uint64_t increment_total_allocated(ssize_t add_or_remove) +{ + uint64_t ta = __sync_add_and_fetch(&pages_ctl.total_allocated, + add_or_remove); + + if (ta >= pages_ctl.total_allocated_bound) + pages_ctl.major_collection_requested = true; + + return ta; +} + + /************************************************************/ @@ -30,6 +51,9 @@ /* set this flag *after* we un-protected it, because XXX later */ set_page_status_in(segnum, pagenum, PAGE_ACCESSIBLE); + + // XXX: maybe? + //increment_total_allocated(4096); } __attribute__((unused)) @@ -47,4 +71,7 @@ perror("mprotect"); stm_fatalerror("mprotect failed! Consider running 'sysctl vm.max_map_count=16777216'"); } + + // XXX: maybe? + //increment_total_allocated(-4096); } diff --git a/c8/stm/pages.h b/c8/stm/pages.h --- a/c8/stm/pages.h +++ b/c8/stm/pages.h @@ -43,6 +43,8 @@ static void page_mark_accessible(long segnum, uintptr_t pagenum); static void page_mark_inaccessible(long segnum, uintptr_t pagenum); +static uint64_t increment_total_allocated(ssize_t add_or_remove); + static inline char *get_virtual_page(long segnum, uintptr_t pagenum) { diff --git a/c8/stmgc.h b/c8/stmgc.h --- a/c8/stmgc.h +++ b/c8/stmgc.h @@ -109,6 +109,8 @@ object_t *_stm_enum_objects_pointing_to_nursery(long index); object_t *_stm_next_last_cl_entry(); void _stm_start_enum_last_cl_entry(); + +uint64_t _stm_total_allocated(void); #endif /* ==================== HELPERS ==================== */ diff --git a/c8/test/support.py b/c8/test/support.py --- a/c8/test/support.py +++ b/c8/test/support.py @@ -80,6 +80,7 @@ object_t * _get_ptr(object_t *obj, int n); void stm_collect(long level); +uint64_t _stm_total_allocated(void); void _stm_set_nursery_free_count(uint64_t free_count); void _stm_largemalloc_init_arena(char *data_start, size_t data_size); From noreply at buildbot.pypy.org Thu Jan 15 18:43:01 2015 From: noreply at buildbot.pypy.org (Raemi) Date: Thu, 15 Jan 2015 18:43:01 +0100 (CET) Subject: [pypy-commit] stmgc c8-private-pages: WIP: starting with major collections, add a validation step to all segs at the start of it Message-ID: <20150115174301.110991C0035@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c8-private-pages Changeset: r1531:4c49f4a4e817 Date: 2015-01-15 15:10 +0100 http://bitbucket.org/pypy/stmgc/changeset/4c49f4a4e817/ Log: WIP: starting with major collections, add a validation step to all segs at the start of it diff --git a/c8/stm/core.c b/c8/stm/core.c --- a/c8/stm/core.c +++ b/c8/stm/core.c @@ -254,9 +254,11 @@ static void reset_modified_from_backup_copies(int segment_num); /* forward */ -static void _stm_validate(void *free_if_abort) +static bool _stm_validate() { - dprintf(("_stm_validate(%p)\n", free_if_abort)); + /* returns true if we reached a valid state, or false if + we need to abort now */ + dprintf(("_stm_validate()\n")); /* go from last known entry in commit log to the most current one and apply all changes done by other transactions. Abort if we have read one of @@ -268,7 +270,7 @@ if (STM_PSEGMENT->transaction_state == TS_INEVITABLE) { assert(first_cl->next == INEV_RUNNING); - return; + return true; } bool needs_abort = false; @@ -284,8 +286,14 @@ if (first_cl->next == NULL) break; - if (first_cl->next == INEV_RUNNING) - _stm_collectable_safe_point(); /* otherwise, we may deadlock */ + if (first_cl->next == INEV_RUNNING) { +#if STM_TESTS + stm_abort_transaction(); +#endif + /* need to reach safe point if an INEV transaction + is waiting for us, otherwise deadlock */ + break; + } /* Find the set of segments we need to copy from and lock them: */ uint64_t segments_to_lock = 1UL << my_segnum; @@ -293,8 +301,6 @@ while ((next_cl = cl->next) != NULL) { if (next_cl == INEV_RUNNING) { #if STM_TESTS - if (free_if_abort != (void *)-1) - free(free_if_abort); stm_abort_transaction(); #endif /* only validate entries up to INEV */ @@ -381,13 +387,7 @@ release_privatization_lock(STM_SEGMENT->segment_num); } - if (needs_abort) { - if (free_if_abort != (void *)-1) - free(free_if_abort); - /* pages may be inconsistent */ - - stm_abort_transaction(); - } + return !needs_abort; } static struct stm_commit_log_entry_s *_create_commit_log_entry(void) @@ -416,7 +416,10 @@ struct stm_commit_log_entry_s *old; while (1) { - _stm_validate(/* free_if_abort =*/ new); + if (!_stm_validate()) { + free(new); + stm_abort_transaction(); + } /* try to attach to commit log: */ old = STM_PSEGMENT->last_commit_log_entry; @@ -468,7 +471,8 @@ /* ############# STM ############# */ void stm_validate() { - _stm_validate(NULL); + if (!_stm_validate()) + stm_abort_transaction(); } @@ -829,7 +833,7 @@ stm_rewind_jmp_restore_shadowstack(tl); assert(tl->shadowstack == pseg->shadowstack_at_start_of_transaction); #endif -tl->last_abort__bytes_in_nursery = bytes_in_nursery; + tl->last_abort__bytes_in_nursery = bytes_in_nursery; #pragma pop_macro("STM_SEGMENT") #pragma pop_macro("STM_PSEGMENT") diff --git a/c8/stm/core.h b/c8/stm/core.h --- a/c8/stm/core.h +++ b/c8/stm/core.h @@ -38,6 +38,7 @@ GCFLAG_WRITE_BARRIER = _STM_GCFLAG_WRITE_BARRIER, GCFLAG_HAS_SHADOW = 0x02, GCFLAG_WB_EXECUTED = 0x04, + GCFLAG_VISITED = 0x05, }; @@ -190,7 +191,7 @@ static void synchronize_objects_flush(void); static void _signal_handler(int sig, siginfo_t *siginfo, void *context); -static void _stm_validate(void *free_if_abort); +static bool _stm_validate(); static inline void _duck(void) { /* put a call to _duck() between two instructions that set 0 into diff --git a/c8/stm/gcpage.c b/c8/stm/gcpage.c --- a/c8/stm/gcpage.c +++ b/c8/stm/gcpage.c @@ -104,3 +104,258 @@ (uintptr_t)p / 4096UL)); return o; } + + +/************************************************************/ + + +static void major_collection_if_requested(void) +{ + assert(!_has_mutex()); + if (!is_major_collection_requested()) + return; + + s_mutex_lock(); + + if (is_major_collection_requested()) { /* if still true */ + + synchronize_all_threads(STOP_OTHERS_UNTIL_MUTEX_UNLOCK); + + if (is_major_collection_requested()) { /* if *still* true */ + major_collection_now_at_safe_point(); + } + + } + + s_mutex_unlock(); +} + + +/************************************************************/ + +/* objects to trace are traced in the sharing seg0 or in a + certain segment if there exist modifications there. + All other segments' versions should be identical to seg0's + version and thus don't need tracing. */ +static struct list_s *mark_objects_to_trace; + +/* we use the sharing seg0's pages for the GCFLAG_VISITED flag */ + +static inline struct object_s *mark_loc(object_t *obj) +{ + /* uses the memory in seg0 for marking: */ + struct object_s *result = (struct object_s*)REAL_ADDRESS(stm_object_pages, obj); + return result; +} + +static inline bool mark_visited_test(object_t *obj) +{ + struct object_s *realobj = mark_loc(obj); + return !!(realobj->stm_flags & GCFLAG_VISITED); +} + +static inline bool mark_visited_test_and_set(object_t *obj) +{ + struct object_s *realobj = mark_loc(obj); + if (realobj->stm_flags & GCFLAG_VISITED) { + return true; + } + else { + realobj->stm_flags |= GCFLAG_VISITED; + return false; + } +} + +static inline bool mark_visited_test_and_clear(object_t *obj) +{ + struct object_s *realobj = mark_loc(obj); + if (realobj->stm_flags & GCFLAG_VISITED) { + realobj->stm_flags &= ~GCFLAG_VISITED; + return true; + } + else { + return false; + } +} + + +/************************************************************/ + +/* static void mark_and_trace(object_t *obj, char *segment_base) */ +/* { */ +/* assert(list_is_empty(mark_objects_to_trace)); */ + +/* while (1) { */ +/* /\* trace into the object (the version from 'segment_base') *\/ */ +/* struct object_s *realobj = */ +/* (struct object_s *)REAL_ADDRESS(segment_base, obj); */ +/* stmcb_trace(realobj, &mark_record_trace); */ + +/* if (list_is_empty(mark_objects_to_trace)) */ +/* break; */ + +/* obj = (object_t *)list_pop_item(mark_objects_to_trace); */ +/* } */ +/* } */ + +/* static inline void mark_visit_object(object_t *obj, char *segment_base) */ +/* { */ +/* /\* if already visited, don't trace *\/ */ +/* if (obj == NULL || mark_visited_test_and_set(obj)) */ +/* return; */ +/* mark_and_trace(obj, segment_base); */ +/* } */ + +/* static void *mark_visit_objects_from_ss(void *_, const void *slice, size_t size) */ +/* { */ +/* const struct stm_shadowentry_s *p, *end; */ +/* p = (const struct stm_shadowentry_s *)slice; */ +/* end = (const struct stm_shadowentry_s *)(slice + size); */ +/* for (; p < end; p++) */ +/* if ((((uintptr_t)p->ss) & 3) == 0) */ +/* mark_visit_object(p->ss, stm_object_pages); */ +/* return NULL; */ +/* } */ + +/* static void assert_obj_accessible_in(long segnum, object_t *obj) */ +/* { */ +/* #ifndef NDEBUG */ +/* uintptr_t page = (uintptr_t)item / 4096UL; */ +/* assert(get_page_status_in(segnum, page) == PAGE_ACCESSIBLE); */ + +/* struct object_s *realobj = */ +/* (struct object_s *)REAL_ADDRESS(get_segment_base(segnum), obj); */ + +/* size_t obj_size = stmcb_size_rounded_up(realobj); */ +/* uintptr_t count = obj_size / 4096UL + 1; */ +/* while (count--> 0) { */ +/* assert(get_page_status_in(segnum, page) == PAGE_ACCESSIBLE); */ +/* page++; */ +/* } */ +/* #endif */ +/* } */ + + +/* static void mark_visit_from_modified_objects(void) */ +/* { */ +/* /\* look for modified objects in segments and mark all of them */ +/* for further tracing (XXX: don't if we are going to share */ +/* some of the pages) *\/ */ + +/* long i; */ +/* for (i = 1; i < NB_SEGMENTS; i++) { */ +/* char *base = get_segment_base(i); */ + +/* LIST_FOREACH_R( */ +/* get_priv_segment(i)->modified_old_objects, */ +/* object_t * /\*item*\/, */ +/* ({ */ +/* /\* All modified objs have all pages accessible for now. */ +/* This is because we create a backup of the whole obj */ +/* and thus make all pages accessible. *\/ */ +/* assert_obj_accessible_in(i, item); */ + +/* mark_visited_test_and_set(item); */ +/* mark_and_trace(item, base); /\* private version *\/ */ +/* })); */ +/* } */ +/* } */ + +/* static void mark_visit_from_roots(void) */ +/* { */ +/* if (testing_prebuilt_objs != NULL) { */ +/* LIST_FOREACH_R(testing_prebuilt_objs, object_t * /\*item*\/, */ +/* mark_visit_object(item, stm_object_pages)); */ +/* } */ + +/* stm_thread_local_t *tl = stm_all_thread_locals; */ +/* do { */ +/* /\* If 'tl' is currently running, its 'associated_segment_num' */ +/* field is the segment number. If not, then the */ +/* field is still some correct segment number, and it doesn't */ +/* matter which one we pick. *\/ */ +/* char *segment_base = get_segment_base(tl->associated_segment_num); */ + +/* struct stm_shadowentry_s *current = tl->shadowstack; */ +/* struct stm_shadowentry_s *base = tl->shadowstack_base; */ +/* while (current-- != base) { */ +/* if ((((uintptr_t)current->ss) & 3) == 0) */ +/* mark_visit_object(current->ss, segment_base); */ +/* } */ +/* mark_visit_object(tl->thread_local_obj, segment_base); */ + +/* tl = tl->next; */ +/* } while (tl != stm_all_thread_locals); */ + +/* long i; */ +/* for (i = 1; i <= NB_SEGMENTS; i++) { */ +/* if (get_priv_segment(i)->transaction_state != TS_NONE) { */ +/* mark_visit_object( */ +/* get_priv_segment(i)->threadlocal_at_start_of_transaction, */ +/* get_segment_base(i)); */ +/* stm_rewind_jmp_enum_shadowstack( */ +/* get_segment(i)->running_thread, */ +/* mark_visit_objects_from_ss); */ +/* } */ +/* } */ +/* } */ + +static inline bool largemalloc_keep_object_at(char *data) +{ + /* /\* this is called by _stm_largemalloc_sweep() *\/ */ + /* object_t *obj = (object_t *)(data - stm_object_pages); */ + /* if (!mark_visited_test_and_clear(obj)) { */ + /* /\* This is actually needed in order to avoid random write-read */ + /* conflicts with objects read and freed long in the past. */ + /* It is probably rare enough, but still, we want to avoid any */ + /* false conflict. (test_random hits it sometimes) *\/ */ + /* long i; */ + /* for (i = 1; i <= NB_SEGMENTS; i++) { */ + /* ((struct stm_read_marker_s *) */ + /* (get_segment_base(i) + (((uintptr_t)obj) >> 4)))->rm = 0; */ + /* } */ + /* return false; */ + /* } */ + return true; +} + +static void major_collection_now_at_safe_point(void) +{ + dprintf(("\n")); + dprintf((" .----- major collection -----------------------\n")); + assert(_has_mutex()); + + /* first, force a minor collection in each of the other segments */ + major_do_validation_and_minor_collections(); + + dprintf((" | used before collection: %ld\n", + (long)pages_ctl.total_allocated)); + + /* only necessary because of assert that fails otherwise (XXX) */ + acquire_all_privatization_locks(); + + DEBUG_EXPECT_SEGFAULT(false); + + /* marking */ + /* LIST_CREATE(mark_objects_to_trace); */ + /* mark_visit_from_modified_objects(); */ + /* mark_visit_from_roots(); */ + /* LIST_FREE(mark_objects_to_trace); */ + + /* /\* cleanup *\/ */ + /* clean_up_segment_lists(); */ + + /* /\* sweeping *\/ */ + /* sweep_large_objects(); */ + /* //sweep_uniform_pages(); */ + + dprintf((" | used after collection: %ld\n", + (long)pages_ctl.total_allocated)); + dprintf((" `----------------------------------------------\n")); + + reset_major_collection_requested(); + + DEBUG_EXPECT_SEGFAULT(true); + + release_all_privatization_locks(); +} diff --git a/c8/stm/gcpage.h b/c8/stm/gcpage.h --- a/c8/stm/gcpage.h +++ b/c8/stm/gcpage.h @@ -16,3 +16,8 @@ static void teardown_gcpage(void); static void setup_N_pages(char *pages_addr, long num); static stm_char *allocate_outside_nursery_large(uint64_t size); + + +static void major_collection_if_requested(void); +static void major_collection_now_at_safe_point(void); +static bool largemalloc_keep_object_at(char *data); /* for largemalloc.c */ diff --git a/c8/stm/largemalloc.c b/c8/stm/largemalloc.c --- a/c8/stm/largemalloc.c +++ b/c8/stm/largemalloc.c @@ -588,8 +588,7 @@ if (_stm_largemalloc_keep != NULL) return _stm_largemalloc_keep((char *)&chunk->d); #endif - return true; - //XXX: return largemalloc_keep_object_at((char *)&chunk->d); + return largemalloc_keep_object_at((char *)&chunk->d); } void _stm_largemalloc_sweep(void) diff --git a/c8/stm/nursery.c b/c8/stm/nursery.c --- a/c8/stm/nursery.c +++ b/c8/stm/nursery.c @@ -18,10 +18,8 @@ assert(_STM_FAST_ALLOC <= NURSERY_SIZE); _stm_nursery_start = NURSERY_START; - long i = 0; - get_segment(i)->nursery_current = (stm_char *)-1; - get_segment(i)->nursery_end = -1; - for (i = 1; i < NB_SEGMENTS; i++) { + long i; + for (i = 0; i < NB_SEGMENTS; i++) { get_segment(i)->nursery_current = (stm_char *)NURSERY_START; get_segment(i)->nursery_end = NURSERY_END; } @@ -292,10 +290,10 @@ void stm_collect(long level) { if (level > 0) - abort(); + force_major_collection_request(); minor_collection(/*commit=*/ false); - /* XXX: major_collection_if_requested(); */ + major_collection_if_requested(); } @@ -382,6 +380,52 @@ } +static void major_do_validation_and_minor_collections(void) +{ + int original_num = STM_SEGMENT->segment_num; + long i; + + /* including the sharing seg0 */ + for (i = 0; i < NB_SEGMENTS; i++) { + set_gs_register(get_segment_base(i)); + + if (!_stm_validate()) { + /* tell it to abort when continuing */ + STM_PSEGMENT->pub.nursery_end = NSE_SIGABORT; + assert(must_abort()); + + dprintf(("abort data structures\n")); + abort_data_structures_from_segment_num(i); + continue; + } + + + if (MINOR_NOTHING_TO_DO(STM_PSEGMENT)) /*TS_NONE segments have NOTHING_TO_DO*/ + continue; + + assert(STM_PSEGMENT->transaction_state != TS_NONE); + assert(STM_PSEGMENT->safe_point != SP_RUNNING); + assert(STM_PSEGMENT->safe_point != SP_NO_TRANSACTION); + + + /* Other segments that will abort immediately after resuming: we + have to ignore them, not try to collect them anyway! + Collecting might fail due to invalid state. + */ + if (!must_abort()) { + _do_minor_collection(/*commit=*/ false); + assert(MINOR_NOTHING_TO_DO(STM_PSEGMENT)); + } + else { + dprintf(("abort data structures\n")); + abort_data_structures_from_segment_num(i); + } + } + + set_gs_register(get_segment_base(original_num)); +} + + static object_t *allocate_shadow(object_t *obj) { char *realobj = REAL_ADDRESS(STM_SEGMENT->segment_base, obj); diff --git a/c8/stm/nursery.h b/c8/stm/nursery.h --- a/c8/stm/nursery.h +++ b/c8/stm/nursery.h @@ -1,9 +1,11 @@ #define NSE_SIGPAUSE _STM_NSE_SIGNAL_MAX +#define NSE_SIGABORT _STM_NSE_SIGNAL_ABORT static void minor_collection(bool commit); static void check_nursery_at_transaction_start(void); static size_t throw_away_nursery(struct stm_priv_segment_info_s *pseg); +static void major_do_validation_and_minor_collections(void); static void assert_memset_zero(void *s, size_t n); diff --git a/c8/stm/pages.c b/c8/stm/pages.c --- a/c8/stm/pages.c +++ b/c8/stm/pages.c @@ -34,6 +34,29 @@ return ta; } +static bool is_major_collection_requested(void) +{ + return pages_ctl.major_collection_requested; +} + +static void force_major_collection_request(void) +{ + pages_ctl.major_collection_requested = true; +} + +static void reset_major_collection_requested(void) +{ + assert(_has_mutex()); + + uint64_t next_bound = (uint64_t)((double)pages_ctl.total_allocated * + GC_MAJOR_COLLECT); + if (next_bound < GC_MIN) + next_bound = GC_MIN; + + pages_ctl.total_allocated_bound = next_bound; + pages_ctl.major_collection_requested = false; +} + /************************************************************/ diff --git a/c8/stm/pages.h b/c8/stm/pages.h --- a/c8/stm/pages.h +++ b/c8/stm/pages.h @@ -44,6 +44,9 @@ static void page_mark_inaccessible(long segnum, uintptr_t pagenum); static uint64_t increment_total_allocated(ssize_t add_or_remove); +static bool is_major_collection_requested(void); +static void force_major_collection_request(void); +static void reset_major_collection_requested(void); static inline char *get_virtual_page(long segnum, uintptr_t pagenum) diff --git a/c8/stmgc.h b/c8/stmgc.h --- a/c8/stmgc.h +++ b/c8/stmgc.h @@ -59,7 +59,8 @@ #define _STM_GCFLAG_WRITE_BARRIER 0x01 #define _STM_FAST_ALLOC (66*1024) -#define _STM_NSE_SIGNAL_MAX 1 +#define _STM_NSE_SIGNAL_ABORT 1 +#define _STM_NSE_SIGNAL_MAX 2 void _stm_write_slowpath(object_t *); object_t *_stm_allocate_slowpath(ssize_t); diff --git a/c8/test/support.py b/c8/test/support.py --- a/c8/test/support.py +++ b/c8/test/support.py @@ -302,7 +302,7 @@ ], undef_macros=['NDEBUG'], include_dirs=[parent_dir], - extra_compile_args=['-g', '-O0', '-Wall', '-Werror', '-ferror-limit=1'], + extra_compile_args=['-g', '-O0', '-Wall', '-Werror', '-ferror-limit=5'], extra_link_args=['-g', '-lrt'], force_generic_engine=True) diff --git a/c8/test/test_gcpage.py b/c8/test/test_gcpage.py --- a/c8/test/test_gcpage.py +++ b/c8/test/test_gcpage.py @@ -60,6 +60,7 @@ assert ([stm_is_accessible_page(p) for p in pages] == [True, True]) + def test_partial_alloced_pages(self): self.start_transaction() new = stm_allocate(16) From noreply at buildbot.pypy.org Thu Jan 15 18:43:02 2015 From: noreply at buildbot.pypy.org (Raemi) Date: Thu, 15 Jan 2015 18:43:02 +0100 (CET) Subject: [pypy-commit] stmgc c8-private-pages: first test passes Message-ID: <20150115174302.173F61C0035@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c8-private-pages Changeset: r1532:e3e791a65654 Date: 2015-01-15 16:46 +0100 http://bitbucket.org/pypy/stmgc/changeset/e3e791a65654/ Log: first test passes diff --git a/c8/stm/core.c b/c8/stm/core.c --- a/c8/stm/core.c +++ b/c8/stm/core.c @@ -189,7 +189,7 @@ static void _signal_handler(int sig, siginfo_t *siginfo, void *context) { - assert(_stm_segfault_expected); + assert(_stm_segfault_expected > 0); int saved_errno = errno; char *addr = siginfo->si_addr; @@ -1033,19 +1033,14 @@ --j; stm_char *frag = STM_PSEGMENT->sq_fragments[j]; uintptr_t page = ((uintptr_t)frag) / 4096UL; - /* XXX: necessary? */ - /* if (is_shared_log_page(page)) */ - /* continue; */ - ssize_t frag_size = STM_PSEGMENT->sq_fragsizes[j]; char *src = REAL_ADDRESS(STM_SEGMENT->segment_base, frag); - /* XXX: including the sharing segment? */ for (i = 0; i < NB_SEGMENTS; i++) { if (i == myself) continue; - if (get_page_status_in(i, page) != PAGE_NO_ACCESS) { + if (i == 0 || (get_page_status_in(i, page) != PAGE_NO_ACCESS)) { /* shared or private, but never segfault */ char *dst = REAL_ADDRESS(get_segment_base(i), frag); memcpy(dst, src, frag_size); diff --git a/c8/stm/core.h b/c8/stm/core.h --- a/c8/stm/core.h +++ b/c8/stm/core.h @@ -38,7 +38,7 @@ GCFLAG_WRITE_BARRIER = _STM_GCFLAG_WRITE_BARRIER, GCFLAG_HAS_SHADOW = 0x02, GCFLAG_WB_EXECUTED = 0x04, - GCFLAG_VISITED = 0x05, + GCFLAG_VISITED = 0x08, }; diff --git a/c8/stm/gcpage.c b/c8/stm/gcpage.c --- a/c8/stm/gcpage.c +++ b/c8/stm/gcpage.c @@ -137,7 +137,7 @@ certain segment if there exist modifications there. All other segments' versions should be identical to seg0's version and thus don't need tracing. */ -static struct list_s *mark_objects_to_trace; +static struct list_s *marked_objects_to_trace; /* we use the sharing seg0's pages for the GCFLAG_VISITED flag */ @@ -181,144 +181,220 @@ /************************************************************/ -/* static void mark_and_trace(object_t *obj, char *segment_base) */ -/* { */ -/* assert(list_is_empty(mark_objects_to_trace)); */ +static inline void mark_record_trace(object_t **pobj) +{ + /* takes a normal pointer to a thread-local pointer to an object */ + object_t *obj = *pobj; -/* while (1) { */ -/* /\* trace into the object (the version from 'segment_base') *\/ */ -/* struct object_s *realobj = */ -/* (struct object_s *)REAL_ADDRESS(segment_base, obj); */ -/* stmcb_trace(realobj, &mark_record_trace); */ + /* Note: this obj might be visited already, but from a different + segment. We ignore this case and skip re-visiting the object + anyway. The idea is that such an object is old (not from the + current transaction), otherwise it would not be possible to see + it in two segments; and moreover it is not modified, otherwise + mark_trace() would have been called on two different segments + already. That means that this object is identical in all + segments and only needs visiting once. (It may actually be in a + shared page, or maybe not.) + */ + if (obj == NULL || mark_visited_test_and_set(obj)) + return; /* already visited this object */ -/* if (list_is_empty(mark_objects_to_trace)) */ -/* break; */ + LIST_APPEND(marked_objects_to_trace, obj); +} -/* obj = (object_t *)list_pop_item(mark_objects_to_trace); */ -/* } */ -/* } */ -/* static inline void mark_visit_object(object_t *obj, char *segment_base) */ -/* { */ -/* /\* if already visited, don't trace *\/ */ -/* if (obj == NULL || mark_visited_test_and_set(obj)) */ -/* return; */ -/* mark_and_trace(obj, segment_base); */ -/* } */ +static void mark_and_trace(object_t *obj, char *segment_base) +{ + /* mark the obj and trace all reachable objs from it */ -/* static void *mark_visit_objects_from_ss(void *_, const void *slice, size_t size) */ -/* { */ -/* const struct stm_shadowentry_s *p, *end; */ -/* p = (const struct stm_shadowentry_s *)slice; */ -/* end = (const struct stm_shadowentry_s *)(slice + size); */ -/* for (; p < end; p++) */ -/* if ((((uintptr_t)p->ss) & 3) == 0) */ -/* mark_visit_object(p->ss, stm_object_pages); */ -/* return NULL; */ -/* } */ + assert(list_is_empty(marked_objects_to_trace)); -/* static void assert_obj_accessible_in(long segnum, object_t *obj) */ -/* { */ -/* #ifndef NDEBUG */ -/* uintptr_t page = (uintptr_t)item / 4096UL; */ -/* assert(get_page_status_in(segnum, page) == PAGE_ACCESSIBLE); */ + while (1) { + /* trace into the object (the version from 'segment_base') */ + struct object_s *realobj = + (struct object_s *)REAL_ADDRESS(segment_base, obj); + stmcb_trace(realobj, &mark_record_trace); -/* struct object_s *realobj = */ -/* (struct object_s *)REAL_ADDRESS(get_segment_base(segnum), obj); */ + if (list_is_empty(marked_objects_to_trace)) + break; -/* size_t obj_size = stmcb_size_rounded_up(realobj); */ -/* uintptr_t count = obj_size / 4096UL + 1; */ -/* while (count--> 0) { */ -/* assert(get_page_status_in(segnum, page) == PAGE_ACCESSIBLE); */ -/* page++; */ -/* } */ -/* #endif */ -/* } */ + obj = (object_t *)list_pop_item(marked_objects_to_trace); + } +} +static inline void mark_visit_object(object_t *obj, char *segment_base) +{ + /* if already visited, don't trace */ + if (obj == NULL || mark_visited_test_and_set(obj)) + return; + mark_and_trace(obj, segment_base); +} -/* static void mark_visit_from_modified_objects(void) */ -/* { */ -/* /\* look for modified objects in segments and mark all of them */ -/* for further tracing (XXX: don't if we are going to share */ -/* some of the pages) *\/ */ +static void *mark_visit_objects_from_ss(void *_, const void *slice, size_t size) +{ + const struct stm_shadowentry_s *p, *end; + p = (const struct stm_shadowentry_s *)slice; + end = (const struct stm_shadowentry_s *)(slice + size); + for (; p < end; p++) + if ((((uintptr_t)p->ss) & 3) == 0) + mark_visit_object(p->ss, stm_object_pages); // seg0 + return NULL; +} -/* long i; */ -/* for (i = 1; i < NB_SEGMENTS; i++) { */ -/* char *base = get_segment_base(i); */ +static void assert_obj_accessible_in(long segnum, object_t *obj) +{ +#ifndef NDEBUG + uintptr_t page = (uintptr_t)obj / 4096UL; + assert(get_page_status_in(segnum, page) == PAGE_ACCESSIBLE); -/* LIST_FOREACH_R( */ -/* get_priv_segment(i)->modified_old_objects, */ -/* object_t * /\*item*\/, */ -/* ({ */ -/* /\* All modified objs have all pages accessible for now. */ -/* This is because we create a backup of the whole obj */ -/* and thus make all pages accessible. *\/ */ -/* assert_obj_accessible_in(i, item); */ + struct object_s *realobj = + (struct object_s *)REAL_ADDRESS(get_segment_base(segnum), obj); -/* mark_visited_test_and_set(item); */ -/* mark_and_trace(item, base); /\* private version *\/ */ -/* })); */ -/* } */ -/* } */ + size_t obj_size = stmcb_size_rounded_up(realobj); + uintptr_t count = obj_size / 4096UL + 1; + while (count--> 0) { + assert(get_page_status_in(segnum, page) == PAGE_ACCESSIBLE); + page++; + } +#endif +} -/* static void mark_visit_from_roots(void) */ -/* { */ -/* if (testing_prebuilt_objs != NULL) { */ -/* LIST_FOREACH_R(testing_prebuilt_objs, object_t * /\*item*\/, */ -/* mark_visit_object(item, stm_object_pages)); */ -/* } */ -/* stm_thread_local_t *tl = stm_all_thread_locals; */ -/* do { */ -/* /\* If 'tl' is currently running, its 'associated_segment_num' */ -/* field is the segment number. If not, then the */ -/* field is still some correct segment number, and it doesn't */ -/* matter which one we pick. *\/ */ -/* char *segment_base = get_segment_base(tl->associated_segment_num); */ +static void mark_visit_from_modified_objects(void) +{ + /* look for modified objects in segments and mark all of them + for further tracing (XXX: don't if we are going to share + some of the pages) */ -/* struct stm_shadowentry_s *current = tl->shadowstack; */ -/* struct stm_shadowentry_s *base = tl->shadowstack_base; */ -/* while (current-- != base) { */ -/* if ((((uintptr_t)current->ss) & 3) == 0) */ -/* mark_visit_object(current->ss, segment_base); */ -/* } */ -/* mark_visit_object(tl->thread_local_obj, segment_base); */ + long i; + for (i = 1; i < NB_SEGMENTS; i++) { + char *base = get_segment_base(i); -/* tl = tl->next; */ -/* } while (tl != stm_all_thread_locals); */ + LIST_FOREACH_R( + get_priv_segment(i)->modified_old_objects, + object_t * /*item*/, + ({ + /* All modified objs have all pages accessible for now. + This is because we create a backup of the whole obj + and thus make all pages accessible. */ + assert_obj_accessible_in(i, item); -/* long i; */ -/* for (i = 1; i <= NB_SEGMENTS; i++) { */ -/* if (get_priv_segment(i)->transaction_state != TS_NONE) { */ -/* mark_visit_object( */ -/* get_priv_segment(i)->threadlocal_at_start_of_transaction, */ -/* get_segment_base(i)); */ -/* stm_rewind_jmp_enum_shadowstack( */ -/* get_segment(i)->running_thread, */ -/* mark_visit_objects_from_ss); */ -/* } */ -/* } */ -/* } */ + mark_visited_test_and_set(item); + mark_and_trace(item, stm_object_pages); /* shared, committed version */ + mark_and_trace(item, base); /* private, modified version */ + })); + } +} + +static void mark_visit_from_roots(void) +{ + if (testing_prebuilt_objs != NULL) { + LIST_FOREACH_R(testing_prebuilt_objs, object_t * /*item*/, + mark_visit_object(item, stm_object_pages)); // seg0 + } + + stm_thread_local_t *tl = stm_all_thread_locals; + do { + /* look at all objs on the shadow stack (they are old but may + be uncommitted so far, so only exist in the associated_segment_num). + + However, since we just executed a minor collection, they were + all synced to the sharing seg0. Thus we can trace them there. + + If they were again modified since then, they were traced + by mark_visit_from_modified_object() already. + */ + struct stm_shadowentry_s *current = tl->shadowstack; + struct stm_shadowentry_s *base = tl->shadowstack_base; + while (current-- != base) { + if ((((uintptr_t)current->ss) & 3) == 0) + mark_visit_object(current->ss, stm_object_pages); + } + + tl = tl->next; + } while (tl != stm_all_thread_locals); + + /* also visit all objs in the rewind-shadowstack */ + long i; + for (i = 1; i < NB_SEGMENTS; i++) { + if (get_priv_segment(i)->transaction_state != TS_NONE) { + stm_rewind_jmp_enum_shadowstack( + get_segment(i)->running_thread, + mark_visit_objects_from_ss); + } + } +} + + +static void clean_up_segment_lists(void) +{ +#pragma push_macro("STM_PSEGMENT") +#pragma push_macro("STM_SEGMENT") +#undef STM_PSEGMENT +#undef STM_SEGMENT + + long i; + for (i = 1; i < NB_SEGMENTS; i++) { + struct stm_priv_segment_info_s *pseg = get_priv_segment(i); + struct list_s *lst; + + /* 'objects_pointing_to_nursery' should be empty, but isn't + necessarily because it also lists objects that have been + written to but don't actually point to the nursery. Clear + it up and set GCFLAG_WRITE_BARRIER again on the objects. + This is the case for transactions where + MINOR_NOTHING_TO_DO() == true + but they still did write-barriers on objects + */ + lst = pseg->objects_pointing_to_nursery; + if (!list_is_empty(lst)) { + abort(); // check that there is a test + LIST_FOREACH_R(lst, object_t* /*item*/, + ({ + struct object_s *realobj = (struct object_s *) + REAL_ADDRESS(pseg->pub.segment_base, (uintptr_t)item); + + assert(realobj->stm_flags & GCFLAG_WB_EXECUTED); + assert(!(realobj->stm_flags & GCFLAG_WRITE_BARRIER)); + + realobj->stm_flags |= GCFLAG_WRITE_BARRIER; + })); + list_clear(lst); + } else { + /* if here MINOR_NOTHING_TO_DO() was true before, it's like + we "didn't do a collection" at all. So nothing to do on + modified_old_objs. */ + } + } +#pragma pop_macro("STM_SEGMENT") +#pragma pop_macro("STM_PSEGMENT") +} static inline bool largemalloc_keep_object_at(char *data) { - /* /\* this is called by _stm_largemalloc_sweep() *\/ */ - /* object_t *obj = (object_t *)(data - stm_object_pages); */ - /* if (!mark_visited_test_and_clear(obj)) { */ - /* /\* This is actually needed in order to avoid random write-read */ - /* conflicts with objects read and freed long in the past. */ - /* It is probably rare enough, but still, we want to avoid any */ - /* false conflict. (test_random hits it sometimes) *\/ */ - /* long i; */ - /* for (i = 1; i <= NB_SEGMENTS; i++) { */ - /* ((struct stm_read_marker_s *) */ - /* (get_segment_base(i) + (((uintptr_t)obj) >> 4)))->rm = 0; */ - /* } */ - /* return false; */ - /* } */ + /* this is called by _stm_largemalloc_sweep() */ + object_t *obj = (object_t *)(data - stm_object_pages); + dprintf(("keep obj %p ? -> %d\n", obj, mark_visited_test(obj))); + if (!mark_visited_test_and_clear(obj)) { + /* This is actually needed in order to avoid random write-read + conflicts with objects read and freed long in the past. + It is probably rare enough, but still, we want to avoid any + false conflict. (test_random hits it sometimes) */ + long i; + for (i = 1; i < NB_SEGMENTS; i++) { + /* reset read marker */ + *((char *)(get_segment_base(i) + (((uintptr_t)obj) >> 4))) = 0; + } + return false; + } return true; } +static void sweep_large_objects(void) +{ + _stm_largemalloc_sweep(); +} + static void major_collection_now_at_safe_point(void) { dprintf(("\n")); @@ -337,16 +413,16 @@ DEBUG_EXPECT_SEGFAULT(false); /* marking */ - /* LIST_CREATE(mark_objects_to_trace); */ - /* mark_visit_from_modified_objects(); */ - /* mark_visit_from_roots(); */ - /* LIST_FREE(mark_objects_to_trace); */ + LIST_CREATE(marked_objects_to_trace); + mark_visit_from_modified_objects(); + mark_visit_from_roots(); + LIST_FREE(marked_objects_to_trace); /* /\* cleanup *\/ */ - /* clean_up_segment_lists(); */ + clean_up_segment_lists(); /* /\* sweeping *\/ */ - /* sweep_large_objects(); */ + sweep_large_objects(); /* //sweep_uniform_pages(); */ dprintf((" | used after collection: %ld\n", diff --git a/c8/stm/setup.h b/c8/stm/setup.h --- a/c8/stm/setup.h +++ b/c8/stm/setup.h @@ -3,8 +3,8 @@ static pthread_t *_get_cpth(stm_thread_local_t *); #ifndef NDEBUG -static __thread long _stm_segfault_expected = false; -#define DEBUG_EXPECT_SEGFAULT(v) do {_stm_segfault_expected = (v);} while (0) +static __thread long _stm_segfault_expected = 0; +#define DEBUG_EXPECT_SEGFAULT(v) do {if (v) _stm_segfault_expected++; else _stm_segfault_expected--;} while (0) #else #define DEBUG_EXPECT_SEGFAULT(v) {} #endif diff --git a/c8/test/test_gcpage.py b/c8/test/test_gcpage.py --- a/c8/test/test_gcpage.py +++ b/c8/test/test_gcpage.py @@ -126,7 +126,8 @@ stm_minor_collect() assert lib._stm_total_allocated() == 5000 + LMO - self.pop_root() + new = self.pop_root() + assert not is_in_nursery(new) stm_minor_collect() assert lib._stm_total_allocated() == 5000 + LMO From noreply at buildbot.pypy.org Thu Jan 15 18:43:03 2015 From: noreply at buildbot.pypy.org (Raemi) Date: Thu, 15 Jan 2015 18:43:03 +0100 (CET) Subject: [pypy-commit] stmgc c8-private-pages: fix for different modified_old_objects list elements Message-ID: <20150115174303.15EE41C0035@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c8-private-pages Changeset: r1533:97bee124dc0b Date: 2015-01-15 17:00 +0100 http://bitbucket.org/pypy/stmgc/changeset/97bee124dc0b/ Log: fix for different modified_old_objects list elements diff --git a/c8/stm/gcpage.c b/c8/stm/gcpage.c --- a/c8/stm/gcpage.c +++ b/c8/stm/gcpage.c @@ -270,19 +270,19 @@ for (i = 1; i < NB_SEGMENTS; i++) { char *base = get_segment_base(i); - LIST_FOREACH_R( - get_priv_segment(i)->modified_old_objects, - object_t * /*item*/, - ({ - /* All modified objs have all pages accessible for now. - This is because we create a backup of the whole obj - and thus make all pages accessible. */ - assert_obj_accessible_in(i, item); + struct list_s *lst = get_priv_segment(i)->modified_old_objects; + long j, count = list_count(lst); + for (j = 0; j < count; j += 3) { + object_t *item = (object_t*)list_item(lst, j); + /* All modified objs have all pages accessible for now. + This is because we create a backup of the whole obj + and thus make all pages accessible. */ + assert_obj_accessible_in(i, item); - mark_visited_test_and_set(item); - mark_and_trace(item, stm_object_pages); /* shared, committed version */ - mark_and_trace(item, base); /* private, modified version */ - })); + mark_visited_test_and_set(item); + mark_and_trace(item, stm_object_pages); /* shared, committed version */ + mark_and_trace(item, base); /* private, modified version */ + } } } @@ -374,7 +374,7 @@ { /* this is called by _stm_largemalloc_sweep() */ object_t *obj = (object_t *)(data - stm_object_pages); - dprintf(("keep obj %p ? -> %d\n", obj, mark_visited_test(obj))); + //dprintf(("keep obj %p ? -> %d\n", obj, mark_visited_test(obj))); if (!mark_visited_test_and_clear(obj)) { /* This is actually needed in order to avoid random write-read conflicts with objects read and freed long in the past. diff --git a/c8/test/test_gcpage.py b/c8/test/test_gcpage.py --- a/c8/test/test_gcpage.py +++ b/c8/test/test_gcpage.py @@ -176,6 +176,8 @@ assert lib._stm_total_allocated() == 5000 + LMO stm_set_char(x, 'B') stm_set_char(x, 'b', 4999) + + py.test.skip("we don't account for private pages right now") assert lib._stm_total_allocated() == 5000 + LMO + 2 * 4096 # 2 pages stm_major_collect() @@ -219,12 +221,15 @@ self.push_root(x) self.commit_transaction() x = self.pop_root() + assert not is_in_nursery(x) # self.switch(1 - invert) self.start_transaction() self.push_root(x) stm_set_char(x, 'A') stm_major_collect() + + py.test.skip("we don't account for private pages right now") assert lib._stm_total_allocated() == 5000 + LMO + 2 * 4096 # 2 pages self.commit_transaction() # @@ -236,6 +241,7 @@ self.test_reshare_if_no_longer_modified_0(invert=1) def test_threadlocal_at_start_of_transaction(self): + py.test.skip("no threadlocal right now") self.start_transaction() x = stm_allocate(16) stm_set_char(x, 'L') From noreply at buildbot.pypy.org Thu Jan 15 20:55:50 2015 From: noreply at buildbot.pypy.org (mattip) Date: Thu, 15 Jan 2015 20:55:50 +0100 (CET) Subject: [pypy-commit] pypy ufuncapi: work around nditer shape issues instead of fixing nditer(opaxes=...) for now Message-ID: <20150115195550.E40331C0013@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: ufuncapi Changeset: r75361:5958b6623b29 Date: 2015-01-15 21:51 +0200 http://bitbucket.org/pypy/pypy/changeset/5958b6623b29/ Log: work around nditer shape issues instead of fixing nditer(opaxes=...) for now diff --git a/pypy/module/micronumpy/nditer.py b/pypy/module/micronumpy/nditer.py --- a/pypy/module/micronumpy/nditer.py +++ b/pypy/module/micronumpy/nditer.py @@ -231,6 +231,9 @@ backstrides = imp.backstrides r = calculate_broadcast_strides(strides, backstrides, imp.shape, shape, backward) + if len(shape) != len(r[0]): + # shape can be shorter when using an external loop, just return a view + return ConcreteIter(imp, imp.get_size(), imp.shape, r[0], r[1], op_flags, base) return ConcreteIter(imp, imp.get_size(), shape, r[0], r[1], op_flags, base) def calculate_ndim(op_in, oa_ndim): @@ -418,7 +421,11 @@ out_shape = shape_agreement_multiple(space, [self.seq[i] for i in outargs]) else: out_shape = None - self.shape = shape_agreement_multiple(space, self.seq, + if space.isinstance_w(w_itershape, space.w_tuple) or \ + space.isinstance_w(w_itershape, space.w_list): + self.shape = [space.int_w(i) for i in space.listview(w_itershape)] + else: + self.shape = shape_agreement_multiple(space, self.seq, shape=out_shape) if len(outargs) > 0: # Make None operands writeonly and flagged for allocation diff --git a/pypy/module/micronumpy/test/test_nditer.py b/pypy/module/micronumpy/test/test_nditer.py --- a/pypy/module/micronumpy/test/test_nditer.py +++ b/pypy/module/micronumpy/test/test_nditer.py @@ -325,6 +325,9 @@ def test_itershape(self): # Check that allocated outputs work with a specified shape from numpy import nditer, arange + import sys + if '__pypy__' in sys.builtin_module_names: + skip("op_axes not totally supported yet") a = arange(6, dtype='i2').reshape(2,3) i = nditer([a, None], [], [['readonly'], ['writeonly','allocate']], op_axes=[[0,1,None], None], diff --git a/pypy/module/micronumpy/ufuncs.py b/pypy/module/micronumpy/ufuncs.py --- a/pypy/module/micronumpy/ufuncs.py +++ b/pypy/module/micronumpy/ufuncs.py @@ -890,7 +890,7 @@ if n < len(iter_shape): #Broadcast over the len(iter_shape) - n dims of iter_shape broadcast_dims = len(iter_shape) - n - arg_shapes.append(iter_shape[:-broadcast_dims] + [1] * broadcast_dims + dims_to_match) + arg_shapes.append(iter_shape[:n] + [1] * broadcast_dims + dims_to_match) else: arg_shapes.append(iter_shape + dims_to_match) # TODO once we support obejct dtypes, From noreply at buildbot.pypy.org Thu Jan 15 20:55:54 2015 From: noreply at buildbot.pypy.org (mattip) Date: Thu, 15 Jan 2015 20:55:54 +0100 (CET) Subject: [pypy-commit] pypy ufuncapi: merge default into branch Message-ID: <20150115195554.81DF81C0013@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: ufuncapi Changeset: r75362:a0cbd2bf13da Date: 2015-01-15 21:55 +0200 http://bitbucket.org/pypy/pypy/changeset/a0cbd2bf13da/ Log: merge default into branch diff too long, truncating to 2000 out of 6448 lines diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -28,7 +28,7 @@ DEALINGS IN THE SOFTWARE. -PyPy Copyright holders 2003-2014 +PyPy Copyright holders 2003-2015 ----------------------------------- Except when otherwise stated (look for LICENSE files or information at diff --git a/lib-python/2.7/distutils/unixccompiler.py b/lib-python/2.7/distutils/unixccompiler.py --- a/lib-python/2.7/distutils/unixccompiler.py +++ b/lib-python/2.7/distutils/unixccompiler.py @@ -58,7 +58,7 @@ executables = {'preprocessor' : None, 'compiler' : ["cc"], 'compiler_so' : ["cc"], - 'compiler_cxx' : ["cc"], + 'compiler_cxx' : ["c++"], # pypy: changed, 'cc' is bogus 'linker_so' : ["cc", "-shared"], 'linker_exe' : ["cc"], 'archiver' : ["ar", "-cr"], diff --git a/lib-python/2.7/subprocess.py b/lib-python/2.7/subprocess.py --- a/lib-python/2.7/subprocess.py +++ b/lib-python/2.7/subprocess.py @@ -1589,7 +1589,7 @@ 'copyfile' in caller.f_globals): dest_dir = sys.pypy_resolvedirof(target_executable) src_dir = sys.pypy_resolvedirof(sys.executable) - for libname in ['libpypy-c.so']: + for libname in ['libpypy-c.so', 'libpypy-c.dylib']: dest_library = os.path.join(dest_dir, libname) src_library = os.path.join(src_dir, libname) if os.path.exists(src_library): diff --git a/lib_pypy/_functools.py b/lib_pypy/_functools.py --- a/lib_pypy/_functools.py +++ b/lib_pypy/_functools.py @@ -9,7 +9,10 @@ of the given arguments and keywords. """ - def __init__(self, func, *args, **keywords): + def __init__(self, *args, **keywords): + if not args: + raise TypeError('__init__() takes at least 2 arguments (1 given)') + func, args = args[0], args[1:] if not callable(func): raise TypeError("the first argument must be callable") self._func = func diff --git a/lib_pypy/cffi/__init__.py b/lib_pypy/cffi/__init__.py --- a/lib_pypy/cffi/__init__.py +++ b/lib_pypy/cffi/__init__.py @@ -6,3 +6,8 @@ __version__ = "0.8.6" __version_info__ = (0, 8, 6) + +# The verifier module file names are based on the CRC32 of a string that +# contains the following version number. It may be older than __version__ +# if nothing is clearly incompatible. +__version_verifier_modules__ = "0.8.6" diff --git a/lib_pypy/cffi/api.py b/lib_pypy/cffi/api.py --- a/lib_pypy/cffi/api.py +++ b/lib_pypy/cffi/api.py @@ -69,6 +69,7 @@ self._function_caches = [] self._libraries = [] self._cdefsources = [] + self._windows_unicode = None if hasattr(backend, 'set_ffi'): backend.set_ffi(self) for name in backend.__dict__: @@ -77,6 +78,7 @@ # with self._lock: self.BVoidP = self._get_cached_btype(model.voidp_type) + self.BCharA = self._get_cached_btype(model.char_array_type) if isinstance(backend, types.ModuleType): # _cffi_backend: attach these constants to the class if not hasattr(FFI, 'NULL'): @@ -189,13 +191,16 @@ cdecl = self._typeof(cdecl) return self._backend.alignof(cdecl) - def offsetof(self, cdecl, fieldname): + def offsetof(self, cdecl, *fields_or_indexes): """Return the offset of the named field inside the given - structure, which must be given as a C type name. + structure or array, which must be given as a C type name. + You can give several field names in case of nested structures. + You can also give numeric values which correspond to array + items, in case of an array type. """ if isinstance(cdecl, basestring): cdecl = self._typeof(cdecl) - return self._backend.typeoffsetof(cdecl, fieldname)[1] + return self._typeoffsetof(cdecl, *fields_or_indexes)[1] def new(self, cdecl, init=None): """Allocate an instance according to the specified C type and @@ -264,6 +269,16 @@ """ return self._backend.buffer(cdata, size) + def from_buffer(self, python_buffer): + """Return a that points to the data of the + given Python object, which must support the buffer interface. + Note that this is not meant to be used on the built-in types str, + unicode, or bytearray (you can build 'char[]' arrays explicitly) + but only on objects containing large quantities of raw data + in some other format, like 'array.array' or numpy arrays. + """ + return self._backend.from_buffer(self.BCharA, python_buffer) + def callback(self, cdecl, python_callable=None, error=None): """Return a callback object or a decorator making such a callback object. 'cdecl' must name a C function pointer type. @@ -335,9 +350,23 @@ which requires binary compatibility in the signatures. """ from .verifier import Verifier, _caller_dir_pycache + # + # If set_unicode(True) was called, insert the UNICODE and + # _UNICODE macro declarations + if self._windows_unicode: + self._apply_windows_unicode(kwargs) + # + # Set the tmpdir here, and not in Verifier.__init__: it picks + # up the caller's directory, which we want to be the caller of + # ffi.verify(), as opposed to the caller of Veritier(). tmpdir = tmpdir or _caller_dir_pycache() + # + # Make a Verifier() and use it to load the library. self.verifier = Verifier(self, source, tmpdir, **kwargs) lib = self.verifier.load_library() + # + # Save the loaded library for keep-alive purposes, even + # if the caller doesn't keep it alive itself (it should). self._libraries.append(lib) return lib @@ -356,15 +385,29 @@ with self._lock: return model.pointer_cache(self, ctype) - def addressof(self, cdata, field=None): + def addressof(self, cdata, *fields_or_indexes): """Return the address of a . - If 'field' is specified, return the address of this field. + If 'fields_or_indexes' are given, returns the address of that + field or array item in the structure or array, recursively in + case of nested structures. """ ctype = self._backend.typeof(cdata) - ctype, offset = self._backend.typeoffsetof(ctype, field) + if fields_or_indexes: + ctype, offset = self._typeoffsetof(ctype, *fields_or_indexes) + else: + if ctype.kind == "pointer": + raise TypeError("addressof(pointer)") + offset = 0 ctypeptr = self._pointer_to(ctype) return self._backend.rawaddressof(ctypeptr, cdata, offset) + def _typeoffsetof(self, ctype, field_or_index, *fields_or_indexes): + ctype, offset = self._backend.typeoffsetof(ctype, field_or_index) + for field1 in fields_or_indexes: + ctype, offset1 = self._backend.typeoffsetof(ctype, field1, 1) + offset += offset1 + return ctype, offset + def include(self, ffi_to_include): """Includes the typedefs, structs, unions and enums defined in another FFI instance. Usage is similar to a #include in C, @@ -387,6 +430,44 @@ def from_handle(self, x): return self._backend.from_handle(x) + def set_unicode(self, enabled_flag): + """Windows: if 'enabled_flag' is True, enable the UNICODE and + _UNICODE defines in C, and declare the types like TCHAR and LPTCSTR + to be (pointers to) wchar_t. If 'enabled_flag' is False, + declare these types to be (pointers to) plain 8-bit characters. + This is mostly for backward compatibility; you usually want True. + """ + if self._windows_unicode is not None: + raise ValueError("set_unicode() can only be called once") + enabled_flag = bool(enabled_flag) + if enabled_flag: + self.cdef("typedef wchar_t TBYTE;" + "typedef wchar_t TCHAR;" + "typedef const wchar_t *LPCTSTR;" + "typedef const wchar_t *PCTSTR;" + "typedef wchar_t *LPTSTR;" + "typedef wchar_t *PTSTR;" + "typedef TBYTE *PTBYTE;" + "typedef TCHAR *PTCHAR;") + else: + self.cdef("typedef char TBYTE;" + "typedef char TCHAR;" + "typedef const char *LPCTSTR;" + "typedef const char *PCTSTR;" + "typedef char *LPTSTR;" + "typedef char *PTSTR;" + "typedef TBYTE *PTBYTE;" + "typedef TCHAR *PTCHAR;") + self._windows_unicode = enabled_flag + + def _apply_windows_unicode(self, kwds): + defmacros = kwds.get('define_macros', ()) + if not isinstance(defmacros, (list, tuple)): + raise TypeError("'define_macros' must be a list or tuple") + defmacros = list(defmacros) + [('UNICODE', '1'), + ('_UNICODE', '1')] + kwds['define_macros'] = defmacros + def _load_backend_lib(backend, name, flags): if name is None: diff --git a/lib_pypy/cffi/backend_ctypes.py b/lib_pypy/cffi/backend_ctypes.py --- a/lib_pypy/cffi/backend_ctypes.py +++ b/lib_pypy/cffi/backend_ctypes.py @@ -169,6 +169,7 @@ class CTypesGenericPtr(CTypesData): __slots__ = ['_address', '_as_ctype_ptr'] _automatic_casts = False + kind = "pointer" @classmethod def _newp(cls, init): @@ -370,10 +371,12 @@ (CTypesPrimitive, type(source).__name__)) return source # + kind1 = kind class CTypesPrimitive(CTypesGenericPrimitive): __slots__ = ['_value'] _ctype = ctype _reftypename = '%s &' % name + kind = kind1 def __init__(self, value): self._value = value @@ -703,12 +706,13 @@ class struct_or_union(base_ctypes_class): pass struct_or_union.__name__ = '%s_%s' % (kind, name) + kind1 = kind # class CTypesStructOrUnion(CTypesBaseStructOrUnion): __slots__ = ['_blob'] _ctype = struct_or_union _reftypename = '%s &' % (name,) - _kind = kind + _kind = kind = kind1 # CTypesStructOrUnion._fix_class() return CTypesStructOrUnion @@ -994,27 +998,42 @@ def getcname(self, BType, replace_with): return BType._get_c_name(replace_with) - def typeoffsetof(self, BType, fieldname): - if fieldname is not None and issubclass(BType, CTypesGenericPtr): - BType = BType._BItem - if not issubclass(BType, CTypesBaseStructOrUnion): - raise TypeError("expected a struct or union ctype") - if fieldname is None: - return (BType, 0) - else: + def typeoffsetof(self, BType, fieldname, num=0): + if isinstance(fieldname, str): + if num == 0 and issubclass(BType, CTypesGenericPtr): + BType = BType._BItem + if not issubclass(BType, CTypesBaseStructOrUnion): + raise TypeError("expected a struct or union ctype") BField = BType._bfield_types[fieldname] if BField is Ellipsis: raise TypeError("not supported for bitfields") return (BField, BType._offsetof(fieldname)) + elif isinstance(fieldname, (int, long)): + if issubclass(BType, CTypesGenericArray): + BType = BType._CTPtr + if not issubclass(BType, CTypesGenericPtr): + raise TypeError("expected an array or ptr ctype") + BItem = BType._BItem + offset = BItem._get_size() * fieldname + if offset > sys.maxsize: + raise OverflowError + return (BItem, offset) + else: + raise TypeError(type(fieldname)) - def rawaddressof(self, BTypePtr, cdata, offset): + def rawaddressof(self, BTypePtr, cdata, offset=None): if isinstance(cdata, CTypesBaseStructOrUnion): ptr = ctypes.pointer(type(cdata)._to_ctypes(cdata)) elif isinstance(cdata, CTypesGenericPtr): + if offset is None or not issubclass(type(cdata)._BItem, + CTypesBaseStructOrUnion): + raise TypeError("unexpected cdata type") + ptr = type(cdata)._to_ctypes(cdata) + elif isinstance(cdata, CTypesGenericArray): ptr = type(cdata)._to_ctypes(cdata) else: raise TypeError("expected a ") - if offset != 0: + if offset: ptr = ctypes.cast( ctypes.c_void_p( ctypes.cast(ptr, ctypes.c_void_p).value + offset), diff --git a/lib_pypy/cffi/commontypes.py b/lib_pypy/cffi/commontypes.py --- a/lib_pypy/cffi/commontypes.py +++ b/lib_pypy/cffi/commontypes.py @@ -29,6 +29,9 @@ result = model.PointerType(resolve_common_type(result[:-2])) elif result in model.PrimitiveType.ALL_PRIMITIVE_TYPES: result = model.PrimitiveType(result) + elif result == 'set-unicode-needed': + raise api.FFIError("The Windows type %r is only available after " + "you call ffi.set_unicode()" % (commontype,)) else: if commontype == result: raise api.FFIError("Unsupported type: %r. Please file a bug " @@ -86,8 +89,6 @@ "ULONGLONG": "unsigned long long", "WCHAR": "wchar_t", "SHORT": "short", - "TBYTE": "WCHAR", - "TCHAR": "WCHAR", "UCHAR": "unsigned char", "UINT": "unsigned int", "UINT8": "unsigned char", @@ -157,14 +158,12 @@ "LPCVOID": model.const_voidp_type, "LPCWSTR": "const WCHAR *", - "LPCTSTR": "LPCWSTR", "LPDWORD": "DWORD *", "LPHANDLE": "HANDLE *", "LPINT": "int *", "LPLONG": "long *", "LPSTR": "CHAR *", "LPWSTR": "WCHAR *", - "LPTSTR": "LPWSTR", "LPVOID": model.voidp_type, "LPWORD": "WORD *", "LRESULT": "LONG_PTR", @@ -173,7 +172,6 @@ "PBYTE": "BYTE *", "PCHAR": "CHAR *", "PCSTR": "const CHAR *", - "PCTSTR": "LPCWSTR", "PCWSTR": "const WCHAR *", "PDWORD": "DWORD *", "PDWORDLONG": "DWORDLONG *", @@ -200,9 +198,6 @@ "PSIZE_T": "SIZE_T *", "PSSIZE_T": "SSIZE_T *", "PSTR": "CHAR *", - "PTBYTE": "TBYTE *", - "PTCHAR": "TCHAR *", - "PTSTR": "LPWSTR", "PUCHAR": "UCHAR *", "PUHALF_PTR": "UHALF_PTR *", "PUINT": "UINT *", @@ -240,6 +235,15 @@ "USN": "LONGLONG", "VOID": model.void_type, "WPARAM": "UINT_PTR", + + "TBYTE": "set-unicode-needed", + "TCHAR": "set-unicode-needed", + "LPCTSTR": "set-unicode-needed", + "PCTSTR": "set-unicode-needed", + "LPTSTR": "set-unicode-needed", + "PTSTR": "set-unicode-needed", + "PTBYTE": "set-unicode-needed", + "PTCHAR": "set-unicode-needed", }) return result diff --git a/lib_pypy/cffi/cparser.py b/lib_pypy/cffi/cparser.py --- a/lib_pypy/cffi/cparser.py +++ b/lib_pypy/cffi/cparser.py @@ -1,4 +1,3 @@ - from . import api, model from .commontypes import COMMON_TYPES, resolve_common_type try: @@ -209,6 +208,8 @@ def _add_constants(self, key, val): if key in self._int_constants: + if self._int_constants[key] == val: + return # ignore identical double declarations raise api.FFIError( "multiple declarations of constant: %s" % (key,)) self._int_constants[key] = val @@ -228,12 +229,18 @@ pyvalue = int(int_str, 0) self._add_constants(key, pyvalue) + self._declare('macro ' + key, pyvalue) elif value == '...': self._declare('macro ' + key, value) else: - raise api.CDefError('only supports the syntax "#define ' - '%s ..." (literally) or "#define ' - '%s 0x1FF" for now' % (key, key)) + raise api.CDefError( + 'only supports one of the following syntax:\n' + ' #define %s ... (literally dot-dot-dot)\n' + ' #define %s NUMBER (with NUMBER an integer' + ' constant, decimal/hex/octal)\n' + 'got:\n' + ' #define %s %s' + % (key, key, key, value)) def _parse_decl(self, decl): node = decl.type @@ -460,6 +467,8 @@ elif kind == 'union': tp = model.UnionType(explicit_name, None, None, None) elif kind == 'enum': + if explicit_name == '__dotdotdot__': + raise CDefError("Enums cannot be declared with ...") tp = self._build_enum_type(explicit_name, type.values) else: raise AssertionError("kind = %r" % (kind,)) @@ -532,9 +541,24 @@ def _parse_constant(self, exprnode, partial_length_ok=False): # for now, limited to expressions that are an immediate number - # or negative number + # or positive/negative number if isinstance(exprnode, pycparser.c_ast.Constant): - return int(exprnode.value, 0) + s = exprnode.value + if s.startswith('0'): + if s.startswith('0x') or s.startswith('0X'): + return int(s, 16) + return int(s, 8) + elif '1' <= s[0] <= '9': + return int(s, 10) + elif s[0] == "'" and s[-1] == "'" and ( + len(s) == 3 or (len(s) == 4 and s[1] == "\\")): + return ord(s[-2]) + else: + raise api.CDefError("invalid constant %r" % (s,)) + # + if (isinstance(exprnode, pycparser.c_ast.UnaryOp) and + exprnode.op == '+'): + return self._parse_constant(exprnode.expr) # if (isinstance(exprnode, pycparser.c_ast.UnaryOp) and exprnode.op == '-'): diff --git a/lib_pypy/cffi/ffiplatform.py b/lib_pypy/cffi/ffiplatform.py --- a/lib_pypy/cffi/ffiplatform.py +++ b/lib_pypy/cffi/ffiplatform.py @@ -11,6 +11,9 @@ """ +LIST_OF_FILE_NAMES = ['sources', 'include_dirs', 'library_dirs', + 'extra_objects', 'depends'] + def get_extension(srcfilename, modname, sources=(), **kwds): from distutils.core import Extension allsources = [srcfilename] diff --git a/lib_pypy/cffi/model.py b/lib_pypy/cffi/model.py --- a/lib_pypy/cffi/model.py +++ b/lib_pypy/cffi/model.py @@ -235,6 +235,8 @@ BPtrItem = PointerType(self.item).get_cached_btype(ffi, finishlist) return global_cache(self, ffi, 'new_array_type', BPtrItem, self.length) +char_array_type = ArrayType(PrimitiveType('char'), None) + class StructOrUnionOrEnum(BaseTypeByIdentity): _attrs_ = ('name',) @@ -478,7 +480,7 @@ try: res = getattr(ffi._backend, funcname)(*args) except NotImplementedError as e: - raise NotImplementedError("%r: %s" % (srctype, e)) + raise NotImplementedError("%s: %r: %s" % (funcname, srctype, e)) # note that setdefault() on WeakValueDictionary is not atomic # and contains a rare bug (http://bugs.python.org/issue19542); # we have to use a lock and do it ourselves diff --git a/lib_pypy/cffi/vengine_cpy.py b/lib_pypy/cffi/vengine_cpy.py --- a/lib_pypy/cffi/vengine_cpy.py +++ b/lib_pypy/cffi/vengine_cpy.py @@ -65,7 +65,7 @@ # The following two 'chained_list_constants' items contains # the head of these two chained lists, as a string that gives the # call to do, if any. - self._chained_list_constants = ['0', '0'] + self._chained_list_constants = ['((void)lib,0)', '((void)lib,0)'] # prnt = self._prnt # first paste some standard set of lines that are mostly '#define' @@ -138,15 +138,22 @@ prnt() prnt('#endif') - def load_library(self): + def load_library(self, flags=None): # XXX review all usages of 'self' here! # import it as a new extension module + if hasattr(sys, "getdlopenflags"): + previous_flags = sys.getdlopenflags() try: + if hasattr(sys, "setdlopenflags") and flags is not None: + sys.setdlopenflags(flags) module = imp.load_dynamic(self.verifier.get_module_name(), self.verifier.modulefilename) except ImportError as e: error = "importing %r: %s" % (self.verifier.modulefilename, e) raise ffiplatform.VerificationError(error) + finally: + if hasattr(sys, "setdlopenflags"): + sys.setdlopenflags(previous_flags) # # call loading_cpy_struct() to get the struct layout inferred by # the C compiler @@ -228,7 +235,8 @@ converter = '_cffi_to_c_int' extraarg = ', %s' % tp.name else: - converter = '_cffi_to_c_%s' % (tp.name.replace(' ', '_'),) + converter = '(%s)_cffi_to_c_%s' % (tp.get_c_name(''), + tp.name.replace(' ', '_')) errvalue = '-1' # elif isinstance(tp, model.PointerType): @@ -267,8 +275,8 @@ self._prnt(' if (datasize != 0) {') self._prnt(' if (datasize < 0)') self._prnt(' %s;' % errcode) - self._prnt(' %s = alloca(datasize);' % (tovar,)) - self._prnt(' memset((void *)%s, 0, datasize);' % (tovar,)) + self._prnt(' %s = alloca((size_t)datasize);' % (tovar,)) + self._prnt(' memset((void *)%s, 0, (size_t)datasize);' % (tovar,)) self._prnt(' if (_cffi_convert_array_from_object(' '(char *)%s, _cffi_type(%d), %s) < 0)' % ( tovar, self._gettypenum(tp), fromvar)) @@ -336,7 +344,7 @@ prnt = self._prnt numargs = len(tp.args) if numargs == 0: - argname = 'no_arg' + argname = 'noarg' elif numargs == 1: argname = 'arg0' else: @@ -386,6 +394,9 @@ prnt(' Py_END_ALLOW_THREADS') prnt() # + prnt(' (void)self; /* unused */') + if numargs == 0: + prnt(' (void)noarg; /* unused */') if result_code: prnt(' return %s;' % self._convert_expr_from_c(tp.result, 'result', 'result type')) @@ -452,6 +463,7 @@ prnt('static void %s(%s *p)' % (checkfuncname, cname)) prnt('{') prnt(' /* only to generate compile-time warnings or errors */') + prnt(' (void)p;') for fname, ftype, fbitsize in tp.enumfields(): if (isinstance(ftype, model.PrimitiveType) and ftype.is_integer_type()) or fbitsize >= 0: @@ -482,6 +494,8 @@ prnt(' sizeof(((%s *)0)->%s),' % (cname, fname)) prnt(' -1') prnt(' };') + prnt(' (void)self; /* unused */') + prnt(' (void)noarg; /* unused */') prnt(' return _cffi_get_struct_layout(nums);') prnt(' /* the next line is not executed, but compiled */') prnt(' %s(0);' % (checkfuncname,)) @@ -578,7 +592,8 @@ # constants, likely declared with '#define' def _generate_cpy_const(self, is_int, name, tp=None, category='const', - vartp=None, delayed=True, size_too=False): + vartp=None, delayed=True, size_too=False, + check_value=None): prnt = self._prnt funcname = '_cffi_%s_%s' % (category, name) prnt('static int %s(PyObject *lib)' % funcname) @@ -590,6 +605,9 @@ else: assert category == 'const' # + if check_value is not None: + self._check_int_constant_value(name, check_value) + # if not is_int: if category == 'var': realexpr = '&' + name @@ -637,6 +655,27 @@ # ---------- # enums + def _check_int_constant_value(self, name, value, err_prefix=''): + prnt = self._prnt + if value <= 0: + prnt(' if ((%s) > 0 || (long)(%s) != %dL) {' % ( + name, name, value)) + else: + prnt(' if ((%s) <= 0 || (unsigned long)(%s) != %dUL) {' % ( + name, name, value)) + prnt(' char buf[64];') + prnt(' if ((%s) <= 0)' % name) + prnt(' snprintf(buf, 63, "%%ld", (long)(%s));' % name) + prnt(' else') + prnt(' snprintf(buf, 63, "%%lu", (unsigned long)(%s));' % + name) + prnt(' PyErr_Format(_cffi_VerificationError,') + prnt(' "%s%s has the real value %s, not %s",') + prnt(' "%s", "%s", buf, "%d");' % ( + err_prefix, name, value)) + prnt(' return -1;') + prnt(' }') + def _enum_funcname(self, prefix, name): # "$enum_$1" => "___D_enum____D_1" name = name.replace('$', '___D_') @@ -653,25 +692,8 @@ prnt('static int %s(PyObject *lib)' % funcname) prnt('{') for enumerator, enumvalue in zip(tp.enumerators, tp.enumvalues): - if enumvalue < 0: - prnt(' if ((%s) >= 0 || (long)(%s) != %dL) {' % ( - enumerator, enumerator, enumvalue)) - else: - prnt(' if ((%s) < 0 || (unsigned long)(%s) != %dUL) {' % ( - enumerator, enumerator, enumvalue)) - prnt(' char buf[64];') - prnt(' if ((%s) < 0)' % enumerator) - prnt(' snprintf(buf, 63, "%%ld", (long)(%s));' % enumerator) - prnt(' else') - prnt(' snprintf(buf, 63, "%%lu", (unsigned long)(%s));' % - enumerator) - prnt(' PyErr_Format(_cffi_VerificationError,') - prnt(' "enum %s: %s has the real value %s, ' - 'not %s",') - prnt(' "%s", "%s", buf, "%d");' % ( - name, enumerator, enumvalue)) - prnt(' return -1;') - prnt(' }') + self._check_int_constant_value(enumerator, enumvalue, + "enum %s: " % name) prnt(' return %s;' % self._chained_list_constants[True]) self._chained_list_constants[True] = funcname + '(lib)' prnt('}') @@ -695,8 +717,11 @@ # macros: for now only for integers def _generate_cpy_macro_decl(self, tp, name): - assert tp == '...' - self._generate_cpy_const(True, name) + if tp == '...': + check_value = None + else: + check_value = tp # an integer + self._generate_cpy_const(True, name, check_value=check_value) _generate_cpy_macro_collecttype = _generate_nothing _generate_cpy_macro_method = _generate_nothing @@ -783,6 +808,24 @@ typedef unsigned __int16 uint16_t; typedef unsigned __int32 uint32_t; typedef unsigned __int64 uint64_t; + typedef __int8 int_least8_t; + typedef __int16 int_least16_t; + typedef __int32 int_least32_t; + typedef __int64 int_least64_t; + typedef unsigned __int8 uint_least8_t; + typedef unsigned __int16 uint_least16_t; + typedef unsigned __int32 uint_least32_t; + typedef unsigned __int64 uint_least64_t; + typedef __int8 int_fast8_t; + typedef __int16 int_fast16_t; + typedef __int32 int_fast32_t; + typedef __int64 int_fast64_t; + typedef unsigned __int8 uint_fast8_t; + typedef unsigned __int16 uint_fast16_t; + typedef unsigned __int32 uint_fast32_t; + typedef unsigned __int64 uint_fast64_t; + typedef __int64 intmax_t; + typedef unsigned __int64 uintmax_t; # else # include # endif @@ -828,12 +871,15 @@ PyLong_FromLongLong((long long)(x))) #define _cffi_from_c_int(x, type) \ - (((type)-1) > 0 ? /* unsigned */ \ - (sizeof(type) < sizeof(long) ? PyInt_FromLong(x) : \ - sizeof(type) == sizeof(long) ? PyLong_FromUnsignedLong(x) : \ - PyLong_FromUnsignedLongLong(x)) \ - : (sizeof(type) <= sizeof(long) ? PyInt_FromLong(x) : \ - PyLong_FromLongLong(x))) + (((type)-1) > 0 ? /* unsigned */ \ + (sizeof(type) < sizeof(long) ? \ + PyInt_FromLong((long)x) : \ + sizeof(type) == sizeof(long) ? \ + PyLong_FromUnsignedLong((unsigned long)x) : \ + PyLong_FromUnsignedLongLong((unsigned long long)x)) : \ + (sizeof(type) <= sizeof(long) ? \ + PyInt_FromLong((long)x) : \ + PyLong_FromLongLong((long long)x))) #define _cffi_to_c_int(o, type) \ (sizeof(type) == 1 ? (((type)-1) > 0 ? (type)_cffi_to_c_u8(o) \ @@ -844,7 +890,7 @@ : (type)_cffi_to_c_i32(o)) : \ sizeof(type) == 8 ? (((type)-1) > 0 ? (type)_cffi_to_c_u64(o) \ : (type)_cffi_to_c_i64(o)) : \ - (Py_FatalError("unsupported size for type " #type), 0)) + (Py_FatalError("unsupported size for type " #type), (type)0)) #define _cffi_to_c_i8 \ ((int(*)(PyObject *))_cffi_exports[1]) @@ -907,6 +953,7 @@ { PyObject *library; int was_alive = (_cffi_types != NULL); + (void)self; /* unused */ if (!PyArg_ParseTuple(args, "OOO", &_cffi_types, &_cffi_VerificationError, &library)) return NULL; diff --git a/lib_pypy/cffi/vengine_gen.py b/lib_pypy/cffi/vengine_gen.py --- a/lib_pypy/cffi/vengine_gen.py +++ b/lib_pypy/cffi/vengine_gen.py @@ -58,12 +58,12 @@ modname = self.verifier.get_module_name() prnt("void %s%s(void) { }\n" % (prefix, modname)) - def load_library(self): + def load_library(self, flags=0): # import it with the CFFI backend backend = self.ffi._backend # needs to make a path that contains '/', on Posix filename = os.path.join(os.curdir, self.verifier.modulefilename) - module = backend.load_library(filename) + module = backend.load_library(filename, flags) # # call loading_gen_struct() to get the struct layout inferred by # the C compiler @@ -235,6 +235,7 @@ prnt('static void %s(%s *p)' % (checkfuncname, cname)) prnt('{') prnt(' /* only to generate compile-time warnings or errors */') + prnt(' (void)p;') for fname, ftype, fbitsize in tp.enumfields(): if (isinstance(ftype, model.PrimitiveType) and ftype.is_integer_type()) or fbitsize >= 0: @@ -354,11 +355,20 @@ # ---------- # constants, likely declared with '#define' - def _generate_gen_const(self, is_int, name, tp=None, category='const'): + def _generate_gen_const(self, is_int, name, tp=None, category='const', + check_value=None): prnt = self._prnt funcname = '_cffi_%s_%s' % (category, name) self.export_symbols.append(funcname) - if is_int: + if check_value is not None: + assert is_int + assert category == 'const' + prnt('int %s(char *out_error)' % funcname) + prnt('{') + self._check_int_constant_value(name, check_value) + prnt(' return 0;') + prnt('}') + elif is_int: assert category == 'const' prnt('int %s(long long *out_value)' % funcname) prnt('{') @@ -367,6 +377,7 @@ prnt('}') else: assert tp is not None + assert check_value is None prnt(tp.get_c_name(' %s(void)' % funcname, name),) prnt('{') if category == 'var': @@ -383,9 +394,13 @@ _loading_gen_constant = _loaded_noop - def _load_constant(self, is_int, tp, name, module): + def _load_constant(self, is_int, tp, name, module, check_value=None): funcname = '_cffi_const_%s' % name - if is_int: + if check_value is not None: + assert is_int + self._load_known_int_constant(module, funcname) + value = check_value + elif is_int: BType = self.ffi._typeof_locked("long long*")[0] BFunc = self.ffi._typeof_locked("int(*)(long long*)")[0] function = module.load_function(BFunc, funcname) @@ -396,6 +411,7 @@ BLongLong = self.ffi._typeof_locked("long long")[0] value += (1 << (8*self.ffi.sizeof(BLongLong))) else: + assert check_value is None BFunc = self.ffi._typeof_locked(tp.get_c_name('(*)(void)', name))[0] function = module.load_function(BFunc, funcname) value = function() @@ -410,6 +426,36 @@ # ---------- # enums + def _check_int_constant_value(self, name, value): + prnt = self._prnt + if value <= 0: + prnt(' if ((%s) > 0 || (long)(%s) != %dL) {' % ( + name, name, value)) + else: + prnt(' if ((%s) <= 0 || (unsigned long)(%s) != %dUL) {' % ( + name, name, value)) + prnt(' char buf[64];') + prnt(' if ((%s) <= 0)' % name) + prnt(' sprintf(buf, "%%ld", (long)(%s));' % name) + prnt(' else') + prnt(' sprintf(buf, "%%lu", (unsigned long)(%s));' % + name) + prnt(' sprintf(out_error, "%s has the real value %s, not %s",') + prnt(' "%s", buf, "%d");' % (name[:100], value)) + prnt(' return -1;') + prnt(' }') + + def _load_known_int_constant(self, module, funcname): + BType = self.ffi._typeof_locked("char[]")[0] + BFunc = self.ffi._typeof_locked("int(*)(char*)")[0] + function = module.load_function(BFunc, funcname) + p = self.ffi.new(BType, 256) + if function(p) < 0: + error = self.ffi.string(p) + if sys.version_info >= (3,): + error = str(error, 'utf-8') + raise ffiplatform.VerificationError(error) + def _enum_funcname(self, prefix, name): # "$enum_$1" => "___D_enum____D_1" name = name.replace('$', '___D_') @@ -427,24 +473,7 @@ prnt('int %s(char *out_error)' % funcname) prnt('{') for enumerator, enumvalue in zip(tp.enumerators, tp.enumvalues): - if enumvalue < 0: - prnt(' if ((%s) >= 0 || (long)(%s) != %dL) {' % ( - enumerator, enumerator, enumvalue)) - else: - prnt(' if ((%s) < 0 || (unsigned long)(%s) != %dUL) {' % ( - enumerator, enumerator, enumvalue)) - prnt(' char buf[64];') - prnt(' if ((%s) < 0)' % enumerator) - prnt(' sprintf(buf, "%%ld", (long)(%s));' % enumerator) - prnt(' else') - prnt(' sprintf(buf, "%%lu", (unsigned long)(%s));' % - enumerator) - prnt(' sprintf(out_error,' - ' "%s has the real value %s, not %s",') - prnt(' "%s", buf, "%d");' % ( - enumerator[:100], enumvalue)) - prnt(' return -1;') - prnt(' }') + self._check_int_constant_value(enumerator, enumvalue) prnt(' return 0;') prnt('}') prnt() @@ -456,16 +485,8 @@ tp.enumvalues = tuple(enumvalues) tp.partial_resolved = True else: - BType = self.ffi._typeof_locked("char[]")[0] - BFunc = self.ffi._typeof_locked("int(*)(char*)")[0] funcname = self._enum_funcname(prefix, name) - function = module.load_function(BFunc, funcname) - p = self.ffi.new(BType, 256) - if function(p) < 0: - error = self.ffi.string(p) - if sys.version_info >= (3,): - error = str(error, 'utf-8') - raise ffiplatform.VerificationError(error) + self._load_known_int_constant(module, funcname) def _loaded_gen_enum(self, tp, name, module, library): for enumerator, enumvalue in zip(tp.enumerators, tp.enumvalues): @@ -476,13 +497,21 @@ # macros: for now only for integers def _generate_gen_macro_decl(self, tp, name): - assert tp == '...' - self._generate_gen_const(True, name) + if tp == '...': + check_value = None + else: + check_value = tp # an integer + self._generate_gen_const(True, name, check_value=check_value) _loading_gen_macro = _loaded_noop def _loaded_gen_macro(self, tp, name, module, library): - value = self._load_constant(True, tp, name, module) + if tp == '...': + check_value = None + else: + check_value = tp # an integer + value = self._load_constant(True, tp, name, module, + check_value=check_value) setattr(library, name, value) type(library)._cffi_dir.append(name) @@ -565,6 +594,24 @@ typedef unsigned __int16 uint16_t; typedef unsigned __int32 uint32_t; typedef unsigned __int64 uint64_t; + typedef __int8 int_least8_t; + typedef __int16 int_least16_t; + typedef __int32 int_least32_t; + typedef __int64 int_least64_t; + typedef unsigned __int8 uint_least8_t; + typedef unsigned __int16 uint_least16_t; + typedef unsigned __int32 uint_least32_t; + typedef unsigned __int64 uint_least64_t; + typedef __int8 int_fast8_t; + typedef __int16 int_fast16_t; + typedef __int32 int_fast32_t; + typedef __int64 int_fast64_t; + typedef unsigned __int8 uint_fast8_t; + typedef unsigned __int16 uint_fast16_t; + typedef unsigned __int32 uint_fast32_t; + typedef unsigned __int64 uint_fast64_t; + typedef __int64 intmax_t; + typedef unsigned __int64 uintmax_t; # else # include # endif diff --git a/lib_pypy/cffi/verifier.py b/lib_pypy/cffi/verifier.py --- a/lib_pypy/cffi/verifier.py +++ b/lib_pypy/cffi/verifier.py @@ -1,12 +1,23 @@ -import sys, os, binascii, imp, shutil -from . import __version__ +import sys, os, binascii, shutil +from . import __version_verifier_modules__ from . import ffiplatform +if sys.version_info >= (3, 3): + import importlib.machinery + def _extension_suffixes(): + return importlib.machinery.EXTENSION_SUFFIXES[:] +else: + import imp + def _extension_suffixes(): + return [suffix for suffix, _, type in imp.get_suffixes() + if type == imp.C_EXTENSION] + class Verifier(object): def __init__(self, ffi, preamble, tmpdir=None, modulename=None, - ext_package=None, tag='', force_generic_engine=False, **kwds): + ext_package=None, tag='', force_generic_engine=False, + source_extension='.c', flags=None, relative_to=None, **kwds): self.ffi = ffi self.preamble = preamble if not modulename: @@ -14,14 +25,15 @@ vengine_class = _locate_engine_class(ffi, force_generic_engine) self._vengine = vengine_class(self) self._vengine.patch_extension_kwds(kwds) - self.kwds = kwds + self.flags = flags + self.kwds = self.make_relative_to(kwds, relative_to) # if modulename: if tag: raise TypeError("can't specify both 'modulename' and 'tag'") else: - key = '\x00'.join([sys.version[:3], __version__, preamble, - flattened_kwds] + + key = '\x00'.join([sys.version[:3], __version_verifier_modules__, + preamble, flattened_kwds] + ffi._cdefsources) if sys.version_info >= (3,): key = key.encode('utf-8') @@ -33,7 +45,7 @@ k1, k2) suffix = _get_so_suffixes()[0] self.tmpdir = tmpdir or _caller_dir_pycache() - self.sourcefilename = os.path.join(self.tmpdir, modulename + '.c') + self.sourcefilename = os.path.join(self.tmpdir, modulename + source_extension) self.modulefilename = os.path.join(self.tmpdir, modulename + suffix) self.ext_package = ext_package self._has_source = False @@ -97,6 +109,20 @@ def generates_python_module(self): return self._vengine._gen_python_module + def make_relative_to(self, kwds, relative_to): + if relative_to and os.path.dirname(relative_to): + dirname = os.path.dirname(relative_to) + kwds = kwds.copy() + for key in ffiplatform.LIST_OF_FILE_NAMES: + if key in kwds: + lst = kwds[key] + if not isinstance(lst, (list, tuple)): + raise TypeError("keyword '%s' should be a list or tuple" + % (key,)) + lst = [os.path.join(dirname, fn) for fn in lst] + kwds[key] = lst + return kwds + # ---------- def _locate_module(self): @@ -148,7 +174,10 @@ def _load_library(self): assert self._has_module - return self._vengine.load_library() + if self.flags is not None: + return self._vengine.load_library(self.flags) + else: + return self._vengine.load_library() # ____________________________________________________________ @@ -181,6 +210,9 @@ def _caller_dir_pycache(): if _TMPDIR: return _TMPDIR + result = os.environ.get('CFFI_TMPDIR') + if result: + return result filename = sys._getframe(2).f_code.co_filename return os.path.abspath(os.path.join(os.path.dirname(filename), '__pycache__')) @@ -222,11 +254,7 @@ pass def _get_so_suffixes(): - suffixes = [] - for suffix, mode, type in imp.get_suffixes(): - if type == imp.C_EXTENSION: - suffixes.append(suffix) - + suffixes = _extension_suffixes() if not suffixes: # bah, no C_EXTENSION available. Occurs on pypy without cpyext if sys.platform == 'win32': diff --git a/pypy/doc/embedding.rst b/pypy/doc/embedding.rst --- a/pypy/doc/embedding.rst +++ b/pypy/doc/embedding.rst @@ -30,12 +30,10 @@ Initialize threads. Only need to be called if there are any threads involved -.. function:: long pypy_setup_home(char* home, int verbose); +.. function:: int pypy_setup_home(char* home, int verbose); This function searches the PyPy standard library starting from the given - "PyPy home directory". It is not strictly necessary to execute it before - running Python code, but without it you will not be able to import any - non-builtin module from the standard library. The arguments are: + "PyPy home directory". The arguments are: * ``home``: NULL terminated path to an executable inside the pypy directory (can be a .so name, can be made up) @@ -84,25 +82,36 @@ const char source[] = "print 'hello from pypy'"; - int main() + int main(void) { - int res; + int res; - rpython_startup_code(); - // pypy_setup_home() is not needed in this trivial example - res = pypy_execute_source((char*)source); - if (res) { - printf("Error calling pypy_execute_source!\n"); - } - return res; + rpython_startup_code(); + res = pypy_setup_home("/opt/pypy/bin/libpypy-c.so", 1); + if (res) { + printf("Error setting pypy home!\n"); + return 1; + } + + res = pypy_execute_source((char*)source); + if (res) { + printf("Error calling pypy_execute_source!\n"); + } + return res; } -If we save it as ``x.c`` now, compile it and run it with:: +If we save it as ``x.c`` now, compile it and run it (on linux) with:: fijal at hermann:/opt/pypy$ gcc -o x x.c -lpypy-c -L. fijal at hermann:/opt/pypy$ LD_LIBRARY_PATH=. ./x hello from pypy +on OSX it is necessary to set the rpath of the binary if one wants to link to it:: + + gcc -o x x.c -lpypy-c -L. -Wl,-rpath -Wl, at executable_path + ./x + hello from pypy + Worked! .. note:: If the compilation fails because of missing PyPy.h header file, diff --git a/pypy/doc/install.rst b/pypy/doc/install.rst --- a/pypy/doc/install.rst +++ b/pypy/doc/install.rst @@ -38,14 +38,13 @@ and not move the binary there, else PyPy would not be able to find its library. -If you want to install 3rd party libraries, the most convenient way is to -install distribute_ and pip_: +If you want to install 3rd party libraries, the most convenient way is +to install pip_ (unless you want to install virtualenv as explained +below; then you can directly use pip inside virtualenvs): .. code-block:: console - $ curl -O http://python-distribute.org/distribute_setup.py - $ curl -O https://raw.githubusercontent.com/pypa/pip/master/contrib/get-pip.py - $ ./pypy-2.1/bin/pypy distribute_setup.py + $ curl -O https://bootstrap.pypa.io/get-pip.py $ ./pypy-2.1/bin/pypy get-pip.py $ ./pypy-2.1/bin/pip install pygments # for example @@ -69,7 +68,6 @@ Note that bin/python is now a symlink to bin/pypy. -.. _distribute: http://www.python-distribute.org/ .. _pip: http://pypi.python.org/pypi/pip diff --git a/pypy/goal/targetpypystandalone.py b/pypy/goal/targetpypystandalone.py --- a/pypy/goal/targetpypystandalone.py +++ b/pypy/goal/targetpypystandalone.py @@ -101,7 +101,7 @@ if space.is_none(w_path): if verbose: debug("Failed to find library based on pypy_find_stdlib") - return 1 + return rffi.cast(rffi.INT, 1) space.startup() space.call_function(w_pathsetter, w_path) # import site @@ -109,13 +109,13 @@ import_ = space.getattr(space.getbuiltinmodule('__builtin__'), space.wrap('__import__')) space.call_function(import_, space.wrap('site')) - return 0 + return rffi.cast(rffi.INT, 0) except OperationError, e: if verbose: debug("OperationError:") debug(" operror-type: " + e.w_type.getname(space)) debug(" operror-value: " + space.str_w(space.str(e.get_w_value(space)))) - return -1 + return rffi.cast(rffi.INT, -1) @entrypoint('main', [rffi.CCHARP], c_name='pypy_execute_source') def pypy_execute_source(ll_source): @@ -234,8 +234,7 @@ enable_translationmodules(config) config.translation.suggest(check_str_without_nul=True) - if sys.platform.startswith('linux'): - config.translation.suggest(shared=True) + config.translation.suggest(shared=True) if config.translation.thread: config.objspace.usemodules.thread = True diff --git a/pypy/module/__builtin__/app_io.py b/pypy/module/__builtin__/app_io.py --- a/pypy/module/__builtin__/app_io.py +++ b/pypy/module/__builtin__/app_io.py @@ -86,9 +86,11 @@ def print_(*args, **kwargs): """The new-style print function from py3k.""" - fp = kwargs.pop("file", sys.stdout) + fp = kwargs.pop("file", None) if fp is None: - return + fp = sys.stdout + if fp is None: + return def write(data): if not isinstance(data, basestring): data = str(data) diff --git a/pypy/module/__builtin__/test/test_builtin.py b/pypy/module/__builtin__/test/test_builtin.py --- a/pypy/module/__builtin__/test/test_builtin.py +++ b/pypy/module/__builtin__/test/test_builtin.py @@ -651,9 +651,12 @@ out = sys.stdout = StringIO.StringIO() try: pr("Hello,", "person!") + pr("2nd line", file=None) + sys.stdout = None + pr("nowhere") finally: sys.stdout = save - assert out.getvalue() == "Hello, person!\n" + assert out.getvalue() == "Hello, person!\n2nd line\n" out = StringIO.StringIO() pr("Hello,", "person!", file=out) assert out.getvalue() == "Hello, person!\n" @@ -668,7 +671,6 @@ result = out.getvalue() assert isinstance(result, unicode) assert result == u"Hello, person!\n" - pr("Hello", file=None) # This works. out = StringIO.StringIO() pr(None, file=out) assert out.getvalue() == "None\n" diff --git a/pypy/module/_cffi_backend/__init__.py b/pypy/module/_cffi_backend/__init__.py --- a/pypy/module/_cffi_backend/__init__.py +++ b/pypy/module/_cffi_backend/__init__.py @@ -34,6 +34,7 @@ 'newp_handle': 'handle.newp_handle', 'from_handle': 'handle.from_handle', '_get_types': 'func._get_types', + 'from_buffer': 'func.from_buffer', 'string': 'func.string', 'buffer': 'cbuffer.buffer', diff --git a/pypy/module/_cffi_backend/ccallback.py b/pypy/module/_cffi_backend/ccallback.py --- a/pypy/module/_cffi_backend/ccallback.py +++ b/pypy/module/_cffi_backend/ccallback.py @@ -45,8 +45,9 @@ # cif_descr = self.getfunctype().cif_descr if not cif_descr: - raise OperationError(space.w_NotImplementedError, - space.wrap("callbacks with '...'")) + raise oefmt(space.w_NotImplementedError, + "%s: callback with unsupported argument or " + "return type or with '...'", self.getfunctype().name) res = clibffi.c_ffi_prep_closure(self.get_closure(), cif_descr.cif, invoke_callback, rffi.cast(rffi.VOIDP, self.unique_id)) @@ -98,7 +99,7 @@ def print_error(self, operr, extra_line): space = self.space - operr.write_unraisable(space, "callback ", self.w_callable, + operr.write_unraisable(space, "cffi callback ", self.w_callable, with_traceback=True, extra_line=extra_line) def write_error_return_value(self, ll_res): diff --git a/pypy/module/_cffi_backend/cdataobj.py b/pypy/module/_cffi_backend/cdataobj.py --- a/pypy/module/_cffi_backend/cdataobj.py +++ b/pypy/module/_cffi_backend/cdataobj.py @@ -440,6 +440,25 @@ return "handle to %s" % (self.space.str_w(w_repr),) +class W_CDataFromBuffer(W_CData): + _attrs_ = ['buf', 'length', 'w_keepalive'] + _immutable_fields_ = ['buf', 'length', 'w_keepalive'] + + def __init__(self, space, cdata, ctype, buf, w_object): + W_CData.__init__(self, space, cdata, ctype) + self.buf = buf + self.length = buf.getlength() + self.w_keepalive = w_object + + def get_array_length(self): + return self.length + + def _repr_extra(self): + w_repr = self.space.repr(self.w_keepalive) + return "buffer len %d from '%s' object" % ( + self.length, self.space.type(self.w_keepalive).name) + + W_CData.typedef = TypeDef( '_cffi_backend.CData', __module__ = '_cffi_backend', diff --git a/pypy/module/_cffi_backend/ctypearray.py b/pypy/module/_cffi_backend/ctypearray.py --- a/pypy/module/_cffi_backend/ctypearray.py +++ b/pypy/module/_cffi_backend/ctypearray.py @@ -107,6 +107,9 @@ return self.space.w_None return W_CTypePtrOrArray._fget(self, attrchar) + def typeoffsetof_index(self, index): + return self.ctptr.typeoffsetof_index(index) + class W_CDataIter(W_Root): _immutable_fields_ = ['ctitem', 'cdata', '_stop'] # but not '_next' diff --git a/pypy/module/_cffi_backend/ctypefunc.py b/pypy/module/_cffi_backend/ctypefunc.py --- a/pypy/module/_cffi_backend/ctypefunc.py +++ b/pypy/module/_cffi_backend/ctypefunc.py @@ -27,6 +27,8 @@ _immutable_fields_ = ['fargs[*]', 'ellipsis', 'cif_descr'] kind = "function" + cif_descr = lltype.nullptr(CIF_DESCRIPTION) + def __init__(self, space, fargs, fresult, ellipsis): extra = self._compute_extra_text(fargs, fresult, ellipsis) size = rffi.sizeof(rffi.VOIDP) @@ -41,7 +43,17 @@ # at all. The cif is computed on every call from the actual # types passed in. For all other functions, the cif_descr # is computed here. - CifDescrBuilder(fargs, fresult).rawallocate(self) + builder = CifDescrBuilder(fargs, fresult) + try: + builder.rawallocate(self) + except OperationError, e: + if not e.match(space, space.w_NotImplementedError): + raise + # else, eat the NotImplementedError. We will get the + # exception if we see an actual call + if self.cif_descr: # should not be True, but you never know + lltype.free(self.cif_descr, flavor='raw') + self.cif_descr = lltype.nullptr(CIF_DESCRIPTION) def new_ctypefunc_completing_argtypes(self, args_w): space = self.space @@ -57,10 +69,12 @@ "argument %d passed in the variadic part needs to " "be a cdata object (got %T)", i + 1, w_obj) fvarargs[i] = ct + # xxx call instantiate() directly. It's a bit of a hack. ctypefunc = instantiate(W_CTypeFunc) ctypefunc.space = space ctypefunc.fargs = fvarargs ctypefunc.ctitem = self.ctitem + #ctypefunc.cif_descr = NULL --- already provided as the default CifDescrBuilder(fvarargs, self.ctitem).rawallocate(ctypefunc) return ctypefunc @@ -178,8 +192,6 @@ # ____________________________________________________________ -W_CTypeFunc.cif_descr = lltype.nullptr(CIF_DESCRIPTION) # default value - BIG_ENDIAN = sys.byteorder == 'big' USE_C_LIBFFI_MSVC = getattr(clibffi, 'USE_C_LIBFFI_MSVC', False) @@ -295,18 +307,18 @@ nflat = 0 for i, cf in enumerate(ctype.fields_list): if cf.is_bitfield(): - raise OperationError(space.w_NotImplementedError, - space.wrap("cannot pass as argument or return value " - "a struct with bit fields")) + raise oefmt(space.w_NotImplementedError, + "ctype '%s' not supported as argument or return value" + " (it is a struct with bit fields)", ctype.name) flat = 1 ct = cf.ctype while isinstance(ct, ctypearray.W_CTypeArray): flat *= ct.length ct = ct.ctitem if flat <= 0: - raise OperationError(space.w_NotImplementedError, - space.wrap("cannot pass as argument or return value " - "a struct with a zero-length array")) + raise oefmt(space.w_NotImplementedError, + "ctype '%s' not supported as argument or return value" + " (it is a struct with a zero-length array)", ctype.name) nflat += flat if USE_C_LIBFFI_MSVC and is_result_type: diff --git a/pypy/module/_cffi_backend/ctypeobj.py b/pypy/module/_cffi_backend/ctypeobj.py --- a/pypy/module/_cffi_backend/ctypeobj.py +++ b/pypy/module/_cffi_backend/ctypeobj.py @@ -142,12 +142,14 @@ raise oefmt(space.w_ValueError, "ctype '%s' is of unknown alignment", self.name) - def typeoffsetof(self, fieldname): + def typeoffsetof_field(self, fieldname, following): space = self.space - if fieldname is None: - msg = "expected a struct or union ctype" - else: - msg = "expected a struct or union ctype, or a pointer to one" + msg = "with a field name argument, expected a struct or union ctype" + raise OperationError(space.w_TypeError, space.wrap(msg)) + + def typeoffsetof_index(self, index): + space = self.space + msg = "with an integer argument, expected an array or pointer ctype" raise OperationError(space.w_TypeError, space.wrap(msg)) def rawaddressof(self, cdata, offset): diff --git a/pypy/module/_cffi_backend/ctypeptr.py b/pypy/module/_cffi_backend/ctypeptr.py --- a/pypy/module/_cffi_backend/ctypeptr.py +++ b/pypy/module/_cffi_backend/ctypeptr.py @@ -308,24 +308,36 @@ def getcfield(self, attr): return self.ctitem.getcfield(attr) - def typeoffsetof(self, fieldname): - if fieldname is None: - return W_CTypePtrBase.typeoffsetof(self, fieldname) - else: - return self.ctitem.typeoffsetof(fieldname) + def typeoffsetof_field(self, fieldname, following): + if following == 0: + return self.ctitem.typeoffsetof_field(fieldname, -1) + return W_CTypePtrBase.typeoffsetof_field(self, fieldname, following) + + def typeoffsetof_index(self, index): + space = self.space + ctitem = self.ctitem + if ctitem.size < 0: + raise OperationError(space.w_TypeError, + space.wrap("pointer to opaque")) + try: + offset = ovfcheck(index * ctitem.size) + except OverflowError: + raise OperationError(space.w_OverflowError, + space.wrap("array offset would overflow a ssize_t")) + return ctitem, offset def rawaddressof(self, cdata, offset): from pypy.module._cffi_backend.ctypestruct import W_CTypeStructOrUnion space = self.space ctype2 = cdata.ctype if (isinstance(ctype2, W_CTypeStructOrUnion) or - (isinstance(ctype2, W_CTypePtrOrArray) and - isinstance(ctype2.ctitem, W_CTypeStructOrUnion))): + isinstance(ctype2, W_CTypePtrOrArray)): ptrdata = rffi.ptradd(cdata._cdata, offset) return cdataobj.W_CData(space, ptrdata, self) else: raise OperationError(space.w_TypeError, - space.wrap("expected a 'cdata struct-or-union' object")) + space.wrap("expected a cdata struct/union/array/pointer" + " object")) def _fget(self, attrchar): if attrchar == 'i': # item diff --git a/pypy/module/_cffi_backend/ctypestruct.py b/pypy/module/_cffi_backend/ctypestruct.py --- a/pypy/module/_cffi_backend/ctypestruct.py +++ b/pypy/module/_cffi_backend/ctypestruct.py @@ -65,9 +65,7 @@ keepalive_until_here(ob) return ob - def typeoffsetof(self, fieldname): - if fieldname is None: - return (self, 0) + def typeoffsetof_field(self, fieldname, following): self.check_complete() space = self.space try: diff --git a/pypy/module/_cffi_backend/func.py b/pypy/module/_cffi_backend/func.py --- a/pypy/module/_cffi_backend/func.py +++ b/pypy/module/_cffi_backend/func.py @@ -48,13 +48,28 @@ align = w_ctype.alignof() return space.wrap(align) - at unwrap_spec(w_ctype=ctypeobj.W_CType, fieldname="str_or_None") -def typeoffsetof(space, w_ctype, fieldname): - ctype, offset = w_ctype.typeoffsetof(fieldname) + at unwrap_spec(w_ctype=ctypeobj.W_CType, following=int) +def typeoffsetof(space, w_ctype, w_field_or_index, following=0): + try: + fieldname = space.str_w(w_field_or_index) + except OperationError, e: + if not e.match(space, space.w_TypeError): + raise + try: + index = space.int_w(w_field_or_index) + except OperationError, e: + if not e.match(space, space.w_TypeError): + raise + raise OperationError(space.w_TypeError, + space.wrap("field name or array index expected")) + ctype, offset = w_ctype.typeoffsetof_index(index) + else: + ctype, offset = w_ctype.typeoffsetof_field(fieldname, following) + # return space.newtuple([space.wrap(ctype), space.wrap(offset)]) @unwrap_spec(w_ctype=ctypeobj.W_CType, w_cdata=cdataobj.W_CData, offset=int) -def rawaddressof(space, w_ctype, w_cdata, offset=0): +def rawaddressof(space, w_ctype, w_cdata, offset): return w_ctype.rawaddressof(w_cdata, offset) # ____________________________________________________________ @@ -76,3 +91,32 @@ def _get_types(space): return space.newtuple([space.gettypefor(cdataobj.W_CData), space.gettypefor(ctypeobj.W_CType)]) + +# ____________________________________________________________ + + at unwrap_spec(w_ctype=ctypeobj.W_CType) +def from_buffer(space, w_ctype, w_x): + from pypy.module._cffi_backend import ctypearray, ctypeprim + # + if (not isinstance(w_ctype, ctypearray.W_CTypeArray) or + not isinstance(w_ctype.ctptr.ctitem, ctypeprim.W_CTypePrimitiveChar)): + raise oefmt(space.w_TypeError, + "needs 'char[]', got '%s'", w_ctype.name) + # + # xxx do we really need to implement the same mess as in CPython 2.7 + # w.r.t. buffers and memoryviews?? + try: + buf = space.readbuf_w(w_x) + except OperationError, e: + if not e.match(space, space.w_TypeError): + raise + buf = space.buffer_w(w_x, space.BUF_SIMPLE) + try: + _cdata = buf.get_raw_address() + except ValueError: + raise oefmt(space.w_TypeError, + "from_buffer() got a '%T' object, which supports the " + "buffer interface but cannot be rendered as a plain " + "raw address on PyPy", w_x) + # + return cdataobj.W_CDataFromBuffer(space, _cdata, w_ctype, buf, w_x) diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py --- a/pypy/module/_cffi_backend/test/_backend_test_c.py +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py @@ -1030,11 +1030,12 @@ BInt = new_primitive_type("int") BArray0 = new_array_type(new_pointer_type(BInt), 0) BStruct = new_struct_type("struct foo") + BStructP = new_pointer_type(BStruct) complete_struct_or_union(BStruct, [('a', BArray0)]) - py.test.raises(NotImplementedError, new_function_type, - (BStruct,), BInt, False) - py.test.raises(NotImplementedError, new_function_type, - (BInt,), BStruct, False) + BFunc = new_function_type((BStruct,), BInt, False) + py.test.raises(NotImplementedError, cast(BFunc, 123), cast(BStructP, 123)) + BFunc2 = new_function_type((BInt,), BStruct, False) + py.test.raises(NotImplementedError, cast(BFunc2, 123), 123) def test_call_function_9(): BInt = new_primitive_type("int") @@ -1174,7 +1175,7 @@ assert sys.stderr.getvalue() == '' assert f(10000) == -42 assert matches(sys.stderr.getvalue(), """\ -From callback : +From cffi callback : Traceback (most recent call last): File "$", line $, in Zcb1 $ @@ -1186,7 +1187,7 @@ bigvalue = 20000 assert f(bigvalue) == -42 assert matches(sys.stderr.getvalue(), """\ -From callback : +From cffi callback : Trying to convert the result back to C: OverflowError: integer 60000 does not fit 'short' """) @@ -1805,7 +1806,8 @@ new_function_type((), new_pointer_type(BFunc)) BUnion = new_union_type("union foo_u") complete_struct_or_union(BUnion, []) - py.test.raises(NotImplementedError, new_function_type, (), BUnion) + BFunc = new_function_type((), BUnion) + py.test.raises(NotImplementedError, cast(BFunc, 123)) py.test.raises(TypeError, new_function_type, (), BArray) def test_struct_return_in_func(): @@ -2525,13 +2527,32 @@ ('a2', BChar, -1), ('a3', BChar, -1)]) py.test.raises(TypeError, typeoffsetof, BStructPtr, None) - assert typeoffsetof(BStruct, None) == (BStruct, 0) + py.test.raises(TypeError, typeoffsetof, BStruct, None) assert typeoffsetof(BStructPtr, 'a1') == (BChar, 0) assert typeoffsetof(BStruct, 'a1') == (BChar, 0) assert typeoffsetof(BStructPtr, 'a2') == (BChar, 1) assert typeoffsetof(BStruct, 'a3') == (BChar, 2) + assert typeoffsetof(BStructPtr, 'a2', 0) == (BChar, 1) + assert typeoffsetof(BStruct, u+'a3') == (BChar, 2) + py.test.raises(TypeError, typeoffsetof, BStructPtr, 'a2', 1) py.test.raises(KeyError, typeoffsetof, BStructPtr, 'a4') py.test.raises(KeyError, typeoffsetof, BStruct, 'a5') + py.test.raises(TypeError, typeoffsetof, BStruct, 42) + py.test.raises(TypeError, typeoffsetof, BChar, 'a1') + +def test_typeoffsetof_array(): + BInt = new_primitive_type("int") + BIntP = new_pointer_type(BInt) + BArray = new_array_type(BIntP, None) + py.test.raises(TypeError, typeoffsetof, BArray, None) + py.test.raises(TypeError, typeoffsetof, BArray, 'a1') + assert typeoffsetof(BArray, 51) == (BInt, 51 * size_of_int()) + assert typeoffsetof(BIntP, 51) == (BInt, 51 * size_of_int()) + assert typeoffsetof(BArray, -51) == (BInt, -51 * size_of_int()) + MAX = sys.maxsize // size_of_int() + assert typeoffsetof(BArray, MAX) == (BInt, MAX * size_of_int()) + assert typeoffsetof(BIntP, MAX) == (BInt, MAX * size_of_int()) + py.test.raises(OverflowError, typeoffsetof, BArray, MAX + 1) def test_typeoffsetof_no_bitfield(): BInt = new_primitive_type("int") @@ -2551,17 +2572,26 @@ assert repr(p) == "" s = p[0] assert repr(s) == "" - a = rawaddressof(BStructPtr, s) + a = rawaddressof(BStructPtr, s, 0) assert repr(a).startswith("" + p = new_pointer_type(new_primitive_type("unsigned short")) + cast(p, c)[1] += 500 + assert list(a) == [10000, 20500, 30000] + def test_version(): # this test is here mostly for PyPy assert __version__ == "0.8.6" diff --git a/pypy/module/_cffi_backend/test/test_c.py b/pypy/module/_cffi_backend/test/test_c.py --- a/pypy/module/_cffi_backend/test/test_c.py +++ b/pypy/module/_cffi_backend/test/test_c.py @@ -30,7 +30,7 @@ class AppTestC(object): """Populated below, hack hack hack.""" - spaceconfig = dict(usemodules=('_cffi_backend', 'cStringIO')) + spaceconfig = dict(usemodules=('_cffi_backend', 'cStringIO', 'array')) def setup_class(cls): testfuncs_w = [] diff --git a/pypy/module/_io/interp_bufferedio.py b/pypy/module/_io/interp_bufferedio.py --- a/pypy/module/_io/interp_bufferedio.py +++ b/pypy/module/_io/interp_bufferedio.py @@ -565,7 +565,7 @@ # Flush the write buffer if necessary if self.writable: - self._writer_flush_unlocked(space) + self._flush_and_rewind_unlocked(space) self._reader_reset_buf() # Read whole blocks, and don't buffer them diff --git a/pypy/module/_io/test/test_io.py b/pypy/module/_io/test/test_io.py --- a/pypy/module/_io/test/test_io.py +++ b/pypy/module/_io/test/test_io.py @@ -362,3 +362,32 @@ f.read(1) f.seek(-1, 1) f.write(b'') + + def test_issue1902_2(self): + import _io + with _io.open(self.tmpfile, 'w+b', 4096) as f: + f.write(b'\xff' * 13569) + f.flush() + f.seek(0, 0) + + f.read(1) + f.seek(-1, 1) + f.write(b'\xff') + f.seek(1, 0) + f.read(4123) + f.seek(-4123, 1) + + def test_issue1902_3(self): + import _io + buffer_size = 4096 + with _io.open(self.tmpfile, 'w+b', buffer_size) as f: + f.write(b'\xff' * buffer_size * 3) + f.flush() + f.seek(0, 0) + + f.read(1) + f.seek(-1, 1) + f.write(b'\xff') + f.seek(1, 0) + f.read(buffer_size * 2) + assert f.tell() == 1 + buffer_size * 2 diff --git a/pypy/module/micronumpy/descriptor.py b/pypy/module/micronumpy/descriptor.py --- a/pypy/module/micronumpy/descriptor.py +++ b/pypy/module/micronumpy/descriptor.py @@ -394,7 +394,7 @@ alignment = space.int_w(space.getitem(w_data, space.wrap(6))) if (w_names == space.w_None) != (w_fields == space.w_None): - raise oefmt(space.w_ValueError, "inconsistent fields and names") + raise oefmt(space.w_ValueError, "inconsistent fields and names in Numpy dtype unpickling") self.byteorder = endian self.shape = [] diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -1234,7 +1234,8 @@ d = np.dtype((' [] []' + print ' path-to-cpython-with-numpy defaults to "/usr/bin/python"\n' + return + if len(argv) < 4: + cpython = '/usr/bin/python' + else: + cpython = argv[3] + cpy_items = find_numpy_items(cpython) pypy_items = find_numpy_items(argv[1]) ver = get_version_str(argv[1]) all_items = [] diff --git a/pypy/module/micronumpy/tool/numready/search.py b/pypy/module/micronumpy/tool/numready/search.py --- a/pypy/module/micronumpy/tool/numready/search.py +++ b/pypy/module/micronumpy/tool/numready/search.py @@ -23,6 +23,15 @@ if attr is None and name.startswith("_"): continue subobj = getattr(obj, name) + if subobj is None: + continue + if isinstance(subobj, types.FunctionType): + try: + subobj() + except NotImplementedError: + continue + except: + pass if isinstance(subobj, types.TypeType): kind = KINDS["TYPE"] else: diff --git a/pypy/module/termios/__init__.py b/pypy/module/termios/__init__.py --- a/pypy/module/termios/__init__.py +++ b/pypy/module/termios/__init__.py @@ -1,5 +1,7 @@ from pypy.interpreter.mixedmodule import MixedModule +from rpython.rlib import rtermios + class Module(MixedModule): "This module provides an interface to the Posix calls for tty I/O control.\n\ For a complete description of these calls, see the Posix or Unix manual\n\ @@ -23,10 +25,6 @@ 'error' : 'space.fromcache(interp_termios.Cache).w_error', } -# XXX this is extremaly not-portable, but how to prevent this? - -import termios -for i in dir(termios): - val = getattr(termios, i) - if i.isupper() and type(val) is int: - Module.interpleveldefs[i] = "space.wrap(%s)" % val + for name in rtermios.all_constants: + value = getattr(rtermios, name) + interpleveldefs[name] = "space.wrap(%s)" % value diff --git a/pypy/module/termios/interp_termios.py b/pypy/module/termios/interp_termios.py --- a/pypy/module/termios/interp_termios.py +++ b/pypy/module/termios/interp_termios.py @@ -6,7 +6,6 @@ from pypy.interpreter.gateway import unwrap_spec from pypy.interpreter.error import wrap_oserror, OperationError from rpython.rlib import rtermios -import termios class Cache: def __init__(self, space): @@ -52,9 +51,9 @@ l_w = [space.wrap(i) for i in [iflag, oflag, cflag, lflag, ispeed, ospeed]] # last one need to be chosen carefully cc_w = [space.wrap(i) for i in cc] - if lflag & termios.ICANON: - cc_w[termios.VMIN] = space.wrap(ord(cc[termios.VMIN][0])) - cc_w[termios.VTIME] = space.wrap(ord(cc[termios.VTIME][0])) + if lflag & rtermios.ICANON: + cc_w[rtermios.VMIN] = space.wrap(ord(cc[rtermios.VMIN][0])) + cc_w[rtermios.VTIME] = space.wrap(ord(cc[rtermios.VTIME][0])) w_cc = space.newlist(cc_w) l_w.append(w_cc) return space.newlist(l_w) @@ -63,14 +62,14 @@ def tcsendbreak(space, w_fd, duration): fd = space.c_filedescriptor_w(w_fd) try: - termios.tcsendbreak(fd, duration) + rtermios.tcsendbreak(fd, duration) except OSError, e: raise convert_error(space, e) def tcdrain(space, w_fd): fd = space.c_filedescriptor_w(w_fd) try: - termios.tcdrain(fd) + rtermios.tcdrain(fd) except OSError, e: raise convert_error(space, e) @@ -78,7 +77,7 @@ def tcflush(space, w_fd, queue): fd = space.c_filedescriptor_w(w_fd) try: - termios.tcflush(fd, queue) + rtermios.tcflush(fd, queue) except OSError, e: raise convert_error(space, e) @@ -86,6 +85,6 @@ def tcflow(space, w_fd, action): fd = space.c_filedescriptor_w(w_fd) try: - termios.tcflow(fd, action) + rtermios.tcflow(fd, action) except OSError, e: raise convert_error(space, e) diff --git a/pypy/module/termios/test/test_termios.py b/pypy/module/termios/test/test_termios.py --- a/pypy/module/termios/test/test_termios.py +++ b/pypy/module/termios/test/test_termios.py @@ -136,7 +136,7 @@ val = getattr(termios, name) if name.isupper() and type(val) is int: d[name] = val - assert d == self.orig_module_dict + assert sorted(d.items()) == sorted(self.orig_module_dict.items()) def test_error(self): import termios, errno, os diff --git a/pypy/module/test_lib_pypy/cffi_tests/backend_tests.py b/pypy/module/test_lib_pypy/cffi_tests/backend_tests.py --- a/pypy/module/test_lib_pypy/cffi_tests/backend_tests.py +++ b/pypy/module/test_lib_pypy/cffi_tests/backend_tests.py @@ -2,7 +2,7 @@ import py import platform import sys, ctypes -from cffi import FFI, CDefError +from cffi import FFI, CDefError, FFIError from pypy.module.test_lib_pypy.cffi_tests.support import * SIZE_OF_INT = ctypes.sizeof(ctypes.c_int) @@ -917,6 +917,16 @@ assert int(invalid_value) == 2 assert ffi.string(invalid_value) == "2" + def test_enum_char_hex_oct(self): + ffi = FFI(backend=self.Backend()) + ffi.cdef(r"enum foo{A='!', B='\'', C=0x10, D=010, E=- 0x10, F=-010};") + assert ffi.string(ffi.cast("enum foo", ord('!'))) == "A" + assert ffi.string(ffi.cast("enum foo", ord("'"))) == "B" + assert ffi.string(ffi.cast("enum foo", 16)) == "C" + assert ffi.string(ffi.cast("enum foo", 8)) == "D" + assert ffi.string(ffi.cast("enum foo", -16)) == "E" + assert ffi.string(ffi.cast("enum foo", -8)) == "F" + def test_array_of_struct(self): ffi = FFI(backend=self.Backend()) ffi.cdef("struct foo { int a, b; };") @@ -950,6 +960,25 @@ assert ffi.offsetof("struct foo", "b") == 4 assert ffi.offsetof("struct foo", "c") == 8 + def test_offsetof_nested(self): + ffi = FFI(backend=self.Backend()) + ffi.cdef("struct foo { int a, b, c; };" + "struct bar { struct foo d, e; };") + assert ffi.offsetof("struct bar", "e") == 12 + py.test.raises(KeyError, ffi.offsetof, "struct bar", "e.a") + assert ffi.offsetof("struct bar", "e", "a") == 12 + assert ffi.offsetof("struct bar", "e", "b") == 16 + assert ffi.offsetof("struct bar", "e", "c") == 20 + + def test_offsetof_array(self): + ffi = FFI(backend=self.Backend()) + assert ffi.offsetof("int[]", 51) == 51 * ffi.sizeof("int") + assert ffi.offsetof("int *", 51) == 51 * ffi.sizeof("int") + ffi.cdef("struct bar { int a, b; int c[99]; };") + assert ffi.offsetof("struct bar", "c") == 2 * ffi.sizeof("int") + assert ffi.offsetof("struct bar", "c", 0) == 2 * ffi.sizeof("int") + assert ffi.offsetof("struct bar", "c", 51) == 53 * ffi.sizeof("int") + def test_alignof(self): ffi = FFI(backend=self.Backend()) ffi.cdef("struct foo { char a; short b; char c; };") @@ -1482,8 +1511,10 @@ p = ffi.new("struct foo_s *") a = ffi.addressof(p[0]) assert repr(a).startswith(" Author: Armin Rigo Branch: all_ordered_dicts Changeset: r75363:8cadc4e264d0 Date: 2015-01-15 22:52 +0100 http://bitbucket.org/pypy/pypy/changeset/8cadc4e264d0/ Log: fix tests diff --git a/pypy/module/pypyjit/test_pypy_c/test_containers.py b/pypy/module/pypyjit/test_pypy_c/test_containers.py --- a/pypy/module/pypyjit/test_pypy_c/test_containers.py +++ b/pypy/module/pypyjit/test_pypy_c/test_containers.py @@ -43,9 +43,9 @@ # can't change ;) assert loop.match_by_id("getitem", """ ... - i26 = call(ConstClass(ll_dict_lookup), p18, p6, i25, descr=...) + i26 = call(ConstClass(ll_call_lookup_function), p18, p6, i25, 0, descr=...) ... - p33 = getinteriorfield_gc(p31, i26, descr=>) + p33 = getinteriorfield_gc(p31, i26, descr=>) ... """) @@ -68,25 +68,29 @@ guard_no_exception(descr=...) i12 = call(ConstClass(ll_strhash), p10, descr=) p13 = new(descr=...) - p15 = new_array_clear(8, descr=) - setfield_gc(p13, p15, descr=) - i17 = call(ConstClass(ll_dict_lookup_trampoline), p13, p10, i12, descr=) + p15 = new_array_clear(8, descr=) {{{ - setfield_gc(p13, 16, descr=) - setfield_gc(p13, 0, descr=) + setfield_gc(p13, 0, descr=) + setfield_gc(p13, p15, descr=) + setfield_gc(p13, ConstPtr(0), descr=) + }}} + i17 = call(ConstClass(ll_dict_lookup_trampoline), p13, p10, i12, 1, descr=) + {{{ + setfield_gc(p13, 0, descr=) + setfield_gc(p13, 0, descr=) + setfield_gc(p13, 16, descr=) }}} guard_no_exception(descr=...) p20 = new_with_vtable(ConstClass(W_IntObject)) call(ConstClass(_ll_dict_setitem_lookup_done_trampoline), p13, p10, p20, i12, i17, descr=) setfield_gc(p20, i5, descr=) guard_no_exception(descr=...) - i23 = call(ConstClass(ll_dict_lookup_trampoline), p13, p10, i12, descr=) + i23 = call(ConstClass(ll_call_lookup_function), p13, p10, i12, 0, descr=) guard_no_exception(descr=...) - i26 = int_and(i23, #) - i27 = int_is_true(i26) + i27 = int_lt(i23, 0) guard_false(i27, descr=...) p28 = getfield_gc(p13, descr=) - p29 = getinteriorfield_gc(p28, i23, descr=>) + p29 = getinteriorfield_gc(p28, i23, descr=>) guard_nonnull_class(p29, ConstClass(W_IntObject), descr=...) i31 = getfield_gc_pure(p29, descr=) i32 = int_sub_ovf(i31, i5) diff --git a/pypy/module/pypyjit/test_pypy_c/test_instance.py b/pypy/module/pypyjit/test_pypy_c/test_instance.py --- a/pypy/module/pypyjit/test_pypy_c/test_instance.py +++ b/pypy/module/pypyjit/test_pypy_c/test_instance.py @@ -151,11 +151,11 @@ assert loop.match_by_id('loadattr1', ''' guard_not_invalidated(descr=...) - i19 = call(ConstClass(ll_call_lookup_function), _, _, _, descr=...) + i19 = call(ConstClass(ll_call_lookup_function), _, _, _, 0, descr=...) guard_no_exception(descr=...) i22 = int_lt(i19, 0) guard_true(i22, descr=...) - i26 = call(ConstClass(ll_call_lookup_function), _, _, _, descr=...) + i26 = call(ConstClass(ll_call_lookup_function), _, _, _, 0, descr=...) guard_no_exception(descr=...) i29 = int_lt(i26, 0) guard_true(i29, descr=...) From noreply at buildbot.pypy.org Thu Jan 15 23:12:28 2015 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 15 Jan 2015 23:12:28 +0100 (CET) Subject: [pypy-commit] pypy errno-again: Can't call a function with save_err != 0 from a __del__ Message-ID: <20150115221228.749181C0013@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: errno-again Changeset: r75364:ecb186950165 Date: 2015-01-15 23:12 +0100 http://bitbucket.org/pypy/pypy/changeset/ecb186950165/ Log: Can't call a function with save_err != 0 from a __del__ diff --git a/pypy/module/_multiprocessing/interp_semaphore.py b/pypy/module/_multiprocessing/interp_semaphore.py --- a/pypy/module/_multiprocessing/interp_semaphore.py +++ b/pypy/module/_multiprocessing/interp_semaphore.py @@ -83,6 +83,8 @@ [rffi.CCHARP, rffi.INT, rffi.INT, rffi.UINT], SEM_T, save_err=rffi.RFFI_SAVE_ERRNO) # sem_close is releasegil=False to be able to use it in the __del__ + _sem_close_no_errno = external('sem_close', [SEM_T], rffi.INT, + releasegil=False) _sem_close = external('sem_close', [SEM_T], rffi.INT, releasegil=False, save_err=rffi.RFFI_SAVE_ERRNO) _sem_unlink = external('sem_unlink', [rffi.CCHARP], rffi.INT, @@ -328,7 +330,7 @@ return sem def delete_semaphore(handle): - sem_close(handle) + _sem_close_no_errno(handle) def semlock_acquire(self, space, block, w_timeout): if not block: diff --git a/pypy/module/select/interp_kqueue.py b/pypy/module/select/interp_kqueue.py --- a/pypy/module/select/interp_kqueue.py +++ b/pypy/module/select/interp_kqueue.py @@ -3,7 +3,7 @@ from pypy.interpreter.error import exception_from_saved_errno from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault from pypy.interpreter.typedef import TypeDef, generic_new_descr, GetSetProperty -from rpython.rlib._rsocket_rffi import socketclose +from rpython.rlib._rsocket_rffi import socketclose_no_errno from rpython.rlib.rarithmetic import r_uint from rpython.rtyper.lltypesystem import rffi, lltype from rpython.rtyper.tool import rffi_platform @@ -130,7 +130,7 @@ if not self.get_closed(): kqfd = self.kqfd self.kqfd = -1 - socketclose(kqfd) + socketclose_no_errno(kqfd) def check_closed(self, space): if self.get_closed(): diff --git a/rpython/rlib/_rsocket_rffi.py b/rpython/rlib/_rsocket_rffi.py --- a/rpython/rlib/_rsocket_rffi.py +++ b/rpython/rlib/_rsocket_rffi.py @@ -489,11 +489,13 @@ socket = external('socket', [rffi.INT, rffi.INT, rffi.INT], socketfd_type) if WIN32: - socketclose = external('closesocket', [socketfd_type], rffi.INT, - releasegil=False, save_err=SAVE_ERR) + socketclosename = 'closesocket' else: - socketclose = external('close', [socketfd_type], rffi.INT, - releasegil=False, save_err=SAVE_ERR) + socketclosename = 'close' +socketclose = external(socketclosename, [socketfd_type], rffi.INT, + releasegil=False, save_err=SAVE_ERR) +socketclose_no_errno = external(socketclosename, [socketfd_type], rffi.INT, + releasegil=False) socketconnect = external('connect', [socketfd_type, sockaddr_ptr, socklen_t], rffi.INT, save_err=SAVE_ERR) diff --git a/rpython/rlib/rsocket.py b/rpython/rlib/rsocket.py --- a/rpython/rlib/rsocket.py +++ b/rpython/rlib/rsocket.py @@ -517,7 +517,7 @@ fd = self.fd if fd != _c.INVALID_SOCKET: self.fd = _c.INVALID_SOCKET - _c.socketclose(fd) + _c.socketclose_no_errno(fd) if hasattr(_c, 'fcntl'): def _setblocking(self, block): From noreply at buildbot.pypy.org Fri Jan 16 08:39:15 2015 From: noreply at buildbot.pypy.org (fijal) Date: Fri, 16 Jan 2015 08:39:15 +0100 (CET) Subject: [pypy-commit] pypy vmprof: don't release GIL on this external call Message-ID: <20150116073915.AEB521C013C@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: vmprof Changeset: r75365:0865a765abf2 Date: 2015-01-16 09:39 +0200 http://bitbucket.org/pypy/pypy/changeset/0865a765abf2/ Log: don't release GIL on this external call diff --git a/pypy/module/_vmprof/interp_vmprof.py b/pypy/module/_vmprof/interp_vmprof.py --- a/pypy/module/_vmprof/interp_vmprof.py +++ b/pypy/module/_vmprof/interp_vmprof.py @@ -71,7 +71,7 @@ vmprof_register_virtual_function = rffi.llexternal( "vmprof_register_virtual_function", [rffi.CCHARP, rffi.VOIDP, rffi.VOIDP], lltype.Void, - compilation_info=eci) + compilation_info=eci, _nowrapper=True) original_execute_frame = PyFrame.execute_frame.im_func original_execute_frame.c_name = 'pypy_pyframe_execute_frame' From noreply at buildbot.pypy.org Fri Jan 16 10:41:51 2015 From: noreply at buildbot.pypy.org (fijal) Date: Fri, 16 Jan 2015 10:41:51 +0100 (CET) Subject: [pypy-commit] pypy vmprof: kill the no collect, since we are sure we don't arrive there with frame being a non-forced virtualizable Message-ID: <20150116094151.EFFAA1C0035@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: vmprof Changeset: r75366:1f3ebb016229 Date: 2015-01-16 11:41 +0200 http://bitbucket.org/pypy/pypy/changeset/1f3ebb016229/ Log: kill the no collect, since we are sure we don't arrive there with frame being a non-forced virtualizable diff --git a/pypy/module/_vmprof/interp_vmprof.py b/pypy/module/_vmprof/interp_vmprof.py --- a/pypy/module/_vmprof/interp_vmprof.py +++ b/pypy/module/_vmprof/interp_vmprof.py @@ -146,7 +146,6 @@ pos -= 1 return count - at rgc.no_collect def do_get_virtual_ip(frame): virtual_ip = frame.pycode._vmprof_virtual_ip if not frame.pycode._vmprof_registered: From noreply at buildbot.pypy.org Fri Jan 16 10:50:31 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 16 Jan 2015 10:50:31 +0100 (CET) Subject: [pypy-commit] pypy default: Expand trace_slow_path to speed up tracing of dictionaries (with commonly Message-ID: <20150116095031.0B92B1C013C@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r75367:49b4c3d9035e Date: 2015-01-16 10:50 +0100 http://bitbucket.org/pypy/pypy/changeset/49b4c3d9035e/ Log: Expand trace_slow_path to speed up tracing of dictionaries (with commonly one or two GC refs per array item) diff --git a/rpython/memory/gc/base.py b/rpython/memory/gc/base.py --- a/rpython/memory/gc/base.py +++ b/rpython/memory/gc/base.py @@ -219,19 +219,42 @@ def _trace_slow_path(self, obj, callback, arg): typeid = self.get_type_id(obj) if self.has_gcptr_in_varsize(typeid): - item = obj + self.varsize_offset_to_variable_part(typeid) length = (obj + self.varsize_offset_to_length(typeid)).signed[0] - offsets = self.varsize_offsets_to_gcpointers_in_var_part(typeid) - itemlength = self.varsize_item_sizes(typeid) - while length > 0: - j = 0 - while j < len(offsets): - itemobj = item + offsets[j] - if self.points_to_valid_gc_object(itemobj): - callback(itemobj, arg) - j += 1 - item += itemlength - length -= 1 + if length > 0: + item = obj + self.varsize_offset_to_variable_part(typeid) + offsets = self.varsize_offsets_to_gcpointers_in_var_part(typeid) + itemlength = self.varsize_item_sizes(typeid) + len_offsets = len(offsets) + if len_offsets == 1: # common path #1 + offsets0 = offsets[0] + while length > 0: + itemobj0 = item + offsets0 + if self.points_to_valid_gc_object(itemobj0): + callback(itemobj0, arg) + item += itemlength + length -= 1 + elif len_offsets == 2: # common path #2 + offsets0 = offsets[0] + offsets1 = offsets[1] + while length > 0: + itemobj0 = item + offsets0 + if self.points_to_valid_gc_object(itemobj0): + callback(itemobj0, arg) + itemobj1 = item + offsets1 + if self.points_to_valid_gc_object(itemobj1): + callback(itemobj1, arg) + item += itemlength + length -= 1 + else: # general path + while length > 0: + j = 0 + while j < len_offsets: + itemobj = item + offsets[j] + if self.points_to_valid_gc_object(itemobj): + callback(itemobj, arg) + j += 1 + item += itemlength + length -= 1 if self.has_custom_trace(typeid): self.custom_trace_dispatcher(obj, typeid, callback, arg) _trace_slow_path._annspecialcase_ = 'specialize:arg(2)' diff --git a/rpython/memory/test/gc_test_base.py b/rpython/memory/test/gc_test_base.py --- a/rpython/memory/test/gc_test_base.py +++ b/rpython/memory/test/gc_test_base.py @@ -607,6 +607,58 @@ return rgc.can_move(lltype.malloc(TP, 1)) assert self.interpret(func, []) == self.GC_CAN_MOVE + def test_trace_array_of_structs(self): + R = lltype.GcStruct('R', ('i', lltype.Signed)) + S1 = lltype.GcArray(('p1', lltype.Ptr(R))) + S2 = lltype.GcArray(('p1', lltype.Ptr(R)), + ('p2', lltype.Ptr(R))) + S3 = lltype.GcArray(('p1', lltype.Ptr(R)), + ('p2', lltype.Ptr(R)), + ('p3', lltype.Ptr(R))) + def func(): + s1 = lltype.malloc(S1, 2) + s1[0].p1 = lltype.malloc(R) + s1[1].p1 = lltype.malloc(R) + s2 = lltype.malloc(S2, 2) + s2[0].p1 = lltype.malloc(R) + s2[0].p2 = lltype.malloc(R) + s2[1].p1 = lltype.malloc(R) + s2[1].p2 = lltype.malloc(R) + s3 = lltype.malloc(S3, 2) + s3[0].p1 = lltype.malloc(R) + s3[0].p2 = lltype.malloc(R) + s3[0].p3 = lltype.malloc(R) + s3[1].p1 = lltype.malloc(R) + s3[1].p2 = lltype.malloc(R) + s3[1].p3 = lltype.malloc(R) + s1[0].p1.i = 100 + s1[1].p1.i = 101 + s2[0].p1.i = 102 + s2[0].p2.i = 103 + s2[1].p1.i = 104 + s2[1].p2.i = 105 + s3[0].p1.i = 106 + s3[0].p2.i = 107 + s3[0].p3.i = 108 + s3[1].p1.i = 109 + s3[1].p2.i = 110 + s3[1].p3.i = 111 + rgc.collect() + return ((s1[0].p1.i == 100) + + (s1[1].p1.i == 101) + + (s2[0].p1.i == 102) + + (s2[0].p2.i == 103) + + (s2[1].p1.i == 104) + + (s2[1].p2.i == 105) + + (s3[0].p1.i == 106) + + (s3[0].p2.i == 107) + + (s3[0].p3.i == 108) + + (s3[1].p1.i == 109) + + (s3[1].p2.i == 110) + + (s3[1].p3.i == 111)) + res = self.interpret(func, []) + assert res == 12 + def test_shrink_array(self): from rpython.rtyper.lltypesystem.rstr import STR From noreply at buildbot.pypy.org Fri Jan 16 10:53:24 2015 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 16 Jan 2015 10:53:24 +0100 (CET) Subject: [pypy-commit] pypy ufuncapi: document branch to be merged Message-ID: <20150116095324.E45E61C013C@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: ufuncapi Changeset: r75368:6187a42adcce Date: 2015-01-16 11:49 +0200 http://bitbucket.org/pypy/pypy/changeset/6187a42adcce/ Log: document branch to be merged diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -51,3 +51,12 @@ .. branch: ssa-flow Use SSA form for flow graphs inside build_flow() and part of simplify_graph() + +.. branch: ufuncpai + +Implement most of the GenericUfunc api to support numpy linalg. The strategy is +to encourage use of pure python or cffi ufuncs by extending frompyfunc(). +See the docstring of frompyfunc for more details. This dovetails with a branch +of pypy/numpy - cffi-linalg which is a rewrite of the _umath_linalg module in +python, calling lapack from cffi. The branch also support traditional use of +cpyext GenericUfunc definitions in c. From noreply at buildbot.pypy.org Fri Jan 16 10:53:26 2015 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 16 Jan 2015 10:53:26 +0100 (CET) Subject: [pypy-commit] pypy ufuncapi: close branch to be merged Message-ID: <20150116095326.0A9061C013C@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: ufuncapi Changeset: r75369:b6e56e3ce302 Date: 2015-01-16 11:51 +0200 http://bitbucket.org/pypy/pypy/changeset/b6e56e3ce302/ Log: close branch to be merged From noreply at buildbot.pypy.org Fri Jan 16 10:53:27 2015 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 16 Jan 2015 10:53:27 +0100 (CET) Subject: [pypy-commit] pypy default: merge ufuncapi which implements frompyfunc as a mechanism to support GenericUfunc api Message-ID: <20150116095327.C8FD51C013C@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: Changeset: r75370:7115e20d48ba Date: 2015-01-16 11:52 +0200 http://bitbucket.org/pypy/pypy/changeset/7115e20d48ba/ Log: merge ufuncapi which implements frompyfunc as a mechanism to support GenericUfunc api diff too long, truncating to 2000 out of 4153 lines diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -51,3 +51,12 @@ .. branch: ssa-flow Use SSA form for flow graphs inside build_flow() and part of simplify_graph() + +.. branch: ufuncpai + +Implement most of the GenericUfunc api to support numpy linalg. The strategy is +to encourage use of pure python or cffi ufuncs by extending frompyfunc(). +See the docstring of frompyfunc for more details. This dovetails with a branch +of pypy/numpy - cffi-linalg which is a rewrite of the _umath_linalg module in +python, calling lapack from cffi. The branch also support traditional use of +cpyext GenericUfunc definitions in c. diff --git a/pypy/module/cpyext/include/numpy/__multiarray_api.h b/pypy/module/cpyext/include/numpy/__multiarray_api.h new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/include/numpy/__multiarray_api.h @@ -0,0 +1,10 @@ + + +typedef struct { + PyObject_HEAD + npy_bool obval; +} PyBoolScalarObject; + +#define import_array() +#define PyArray_New _PyArray_New + diff --git a/pypy/module/cpyext/include/numpy/arrayobject.h b/pypy/module/cpyext/include/numpy/arrayobject.h --- a/pypy/module/cpyext/include/numpy/arrayobject.h +++ b/pypy/module/cpyext/include/numpy/arrayobject.h @@ -11,6 +11,8 @@ #endif #include "old_defines.h" +#include "npy_common.h" +#include "__multiarray_api.h" #define NPY_UNUSED(x) x #define PyArray_MAX(a,b) (((a)>(b))?(a):(b)) @@ -22,23 +24,10 @@ PyAPI_DATA(PyTypeObject) PyArray_Type; -typedef unsigned char npy_bool; -typedef unsigned char npy_uint8; -typedef unsigned short npy_uint16; -typedef signed short npy_int16; -typedef signed char npy_int8; -typedef int npy_int; - -typedef long npy_intp; -#ifndef NPY_INTP_FMT -#define NPY_INTP_FMT "ld" -#endif -#ifndef import_array -#define import_array() -#endif #define NPY_MAXDIMS 32 +#ifndef NDARRAYTYPES_H typedef struct { npy_intp *ptr; int len; @@ -73,19 +62,6 @@ NPY_NTYPES_ABI_COMPATIBLE=21 }; -#define NPY_INT8 NPY_BYTE -#define NPY_UINT8 NPY_UBYTE -#define NPY_INT16 NPY_SHORT -#define NPY_UINT16 NPY_USHORT -#define NPY_INT32 NPY_INT -#define NPY_UINT32 NPY_UINT -#define NPY_INT64 NPY_LONG -#define NPY_UINT64 NPY_ULONG -#define NPY_FLOAT32 NPY_FLOAT -#define NPY_FLOAT64 NPY_DOUBLE -#define NPY_COMPLEX32 NPY_CFLOAT -#define NPY_COMPLEX64 NPY_CDOUBLE - #define PyTypeNum_ISBOOL(type) ((type) == NPY_BOOL) #define PyTypeNum_ISINTEGER(type) (((type) >= NPY_BYTE) && \ ((type) <= NPY_ULONGLONG)) @@ -167,6 +143,21 @@ #define PyArray_ISNOTSWAPPED(arr) (1) #define PyArray_ISBYTESWAPPED(arr) (0) +#endif + +#define NPY_INT8 NPY_BYTE +#define NPY_UINT8 NPY_UBYTE +#define NPY_INT16 NPY_SHORT +#define NPY_UINT16 NPY_USHORT +#define NPY_INT32 NPY_INT +#define NPY_UINT32 NPY_UINT +#define NPY_INT64 NPY_LONG +#define NPY_UINT64 NPY_ULONG +#define NPY_FLOAT32 NPY_FLOAT +#define NPY_FLOAT64 NPY_DOUBLE +#define NPY_COMPLEX32 NPY_CFLOAT +#define NPY_COMPLEX64 NPY_CDOUBLE + /* functions */ #ifndef PyArray_NDIM diff --git a/pypy/module/cpyext/include/numpy/ndarraytypes.h b/pypy/module/cpyext/include/numpy/ndarraytypes.h new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/include/numpy/ndarraytypes.h @@ -0,0 +1,1786 @@ +#ifndef NDARRAYTYPES_H +#define NDARRAYTYPES_H + +#include "numpy/npy_common.h" +//#include "npy_endian.h" +//#include "npy_cpu.h" +//#include "utils.h" + +//for pypy - numpy has lots of typedefs +//for pypy - make life easier, less backward support +#define NPY_1_8_API_VERSION 0x00000008 +#define NPY_NO_DEPRECATED_API NPY_1_8_API_VERSION +#undef NPY_1_8_API_VERSION + +#define NPY_ENABLE_SEPARATE_COMPILATION 1 +#define NPY_VISIBILITY_HIDDEN + +#ifdef NPY_ENABLE_SEPARATE_COMPILATION + #define NPY_NO_EXPORT NPY_VISIBILITY_HIDDEN +#else + #define NPY_NO_EXPORT static +#endif + +/* Only use thread if configured in config and python supports it */ +#if defined WITH_THREAD && !NPY_NO_SMP + #define NPY_ALLOW_THREADS 1 +#else + #define NPY_ALLOW_THREADS 0 +#endif + + + +/* + * There are several places in the code where an array of dimensions + * is allocated statically. This is the size of that static + * allocation. + * + * The array creation itself could have arbitrary dimensions but all + * the places where static allocation is used would need to be changed + * to dynamic (including inside of several structures) + */ + +#define NPY_MAXDIMS 32 +#define NPY_MAXARGS 32 + +/* Used for Converter Functions "O&" code in ParseTuple */ +#define NPY_FAIL 0 +#define NPY_SUCCEED 1 + +/* + * Binary compatibility version number. This number is increased + * whenever the C-API is changed such that binary compatibility is + * broken, i.e. whenever a recompile of extension modules is needed. + */ +#define NPY_VERSION NPY_ABI_VERSION + +/* + * Minor API version. This number is increased whenever a change is + * made to the C-API -- whether it breaks binary compatibility or not. + * Some changes, such as adding a function pointer to the end of the + * function table, can be made without breaking binary compatibility. + * In this case, only the NPY_FEATURE_VERSION (*not* NPY_VERSION) + * would be increased. Whenever binary compatibility is broken, both + * NPY_VERSION and NPY_FEATURE_VERSION should be increased. + */ +#define NPY_FEATURE_VERSION NPY_API_VERSION + +enum NPY_TYPES { NPY_BOOL=0, + NPY_BYTE, NPY_UBYTE, + NPY_SHORT, NPY_USHORT, + NPY_INT, NPY_UINT, + NPY_LONG, NPY_ULONG, + NPY_LONGLONG, NPY_ULONGLONG, + NPY_FLOAT, NPY_DOUBLE, NPY_LONGDOUBLE, + NPY_CFLOAT, NPY_CDOUBLE, NPY_CLONGDOUBLE, + NPY_OBJECT=17, + NPY_STRING, NPY_UNICODE, + NPY_VOID, + /* + * New 1.6 types appended, may be integrated + * into the above in 2.0. + */ + NPY_DATETIME, NPY_TIMEDELTA, NPY_HALF, + + NPY_NTYPES, + NPY_NOTYPE, + NPY_CHAR, /* special flag */ + NPY_USERDEF=256, /* leave room for characters */ + + /* The number of types not including the new 1.6 types */ + NPY_NTYPES_ABI_COMPATIBLE=21 +}; + +/* basetype array priority */ +#define NPY_PRIORITY 0.0 + +/* default subtype priority */ +#define NPY_SUBTYPE_PRIORITY 1.0 + +/* default scalar priority */ +#define NPY_SCALAR_PRIORITY -1000000.0 + +/* How many floating point types are there (excluding half) */ +#define NPY_NUM_FLOATTYPE 3 + +/* + * These characters correspond to the array type and the struct + * module + */ + +enum NPY_TYPECHAR { + NPY_BOOLLTR = '?', + NPY_BYTELTR = 'b', + NPY_UBYTELTR = 'B', + NPY_SHORTLTR = 'h', + NPY_USHORTLTR = 'H', + NPY_INTLTR = 'i', + NPY_UINTLTR = 'I', + NPY_LONGLTR = 'l', + NPY_ULONGLTR = 'L', + NPY_LONGLONGLTR = 'q', + NPY_ULONGLONGLTR = 'Q', + NPY_HALFLTR = 'e', + NPY_FLOATLTR = 'f', + NPY_DOUBLELTR = 'd', + NPY_LONGDOUBLELTR = 'g', + NPY_CFLOATLTR = 'F', + NPY_CDOUBLELTR = 'D', + NPY_CLONGDOUBLELTR = 'G', + NPY_OBJECTLTR = 'O', + NPY_STRINGLTR = 'S', + NPY_STRINGLTR2 = 'a', + NPY_UNICODELTR = 'U', + NPY_VOIDLTR = 'V', + NPY_DATETIMELTR = 'M', + NPY_TIMEDELTALTR = 'm', + NPY_CHARLTR = 'c', + + /* + * No Descriptor, just a define -- this let's + * Python users specify an array of integers + * large enough to hold a pointer on the + * platform + */ + NPY_INTPLTR = 'p', + NPY_UINTPLTR = 'P', + + /* + * These are for dtype 'kinds', not dtype 'typecodes' + * as the above are for. + */ + NPY_GENBOOLLTR ='b', + NPY_SIGNEDLTR = 'i', + NPY_UNSIGNEDLTR = 'u', + NPY_FLOATINGLTR = 'f', + NPY_COMPLEXLTR = 'c' +}; + +typedef enum { + NPY_QUICKSORT=0, + NPY_HEAPSORT=1, + NPY_MERGESORT=2 +} NPY_SORTKIND; +#define NPY_NSORTS (NPY_MERGESORT + 1) + + +typedef enum { + NPY_INTROSELECT=0, +} NPY_SELECTKIND; +#define NPY_NSELECTS (NPY_INTROSELECT + 1) + + +typedef enum { + NPY_SEARCHLEFT=0, + NPY_SEARCHRIGHT=1 +} NPY_SEARCHSIDE; +#define NPY_NSEARCHSIDES (NPY_SEARCHRIGHT + 1) + + +typedef enum { + NPY_NOSCALAR=-1, + NPY_BOOL_SCALAR, + NPY_INTPOS_SCALAR, + NPY_INTNEG_SCALAR, + NPY_FLOAT_SCALAR, + NPY_COMPLEX_SCALAR, + NPY_OBJECT_SCALAR +} NPY_SCALARKIND; +#define NPY_NSCALARKINDS (NPY_OBJECT_SCALAR + 1) + +/* For specifying array memory layout or iteration order */ +typedef enum { + /* Fortran order if inputs are all Fortran, C otherwise */ + NPY_ANYORDER=-1, + /* C order */ + NPY_CORDER=0, + /* Fortran order */ + NPY_FORTRANORDER=1, + /* An order as close to the inputs as possible */ + NPY_KEEPORDER=2 +} NPY_ORDER; + +/* For specifying allowed casting in operations which support it */ +typedef enum { + /* Only allow identical types */ + NPY_NO_CASTING=0, + /* Allow identical and byte swapped types */ + NPY_EQUIV_CASTING=1, + /* Only allow safe casts */ + NPY_SAFE_CASTING=2, + /* Allow safe casts or casts within the same kind */ + NPY_SAME_KIND_CASTING=3, + /* Allow any casts */ + NPY_UNSAFE_CASTING=4, + + /* + * Temporary internal definition only, will be removed in upcoming + * release, see below + * */ + NPY_INTERNAL_UNSAFE_CASTING_BUT_WARN_UNLESS_SAME_KIND = 100, +} NPY_CASTING; + +typedef enum { + NPY_CLIP=0, + NPY_WRAP=1, + NPY_RAISE=2 +} NPY_CLIPMODE; + +/* The special not-a-time (NaT) value */ +#define NPY_DATETIME_NAT NPY_MIN_INT64 + +/* + * Upper bound on the length of a DATETIME ISO 8601 string + * YEAR: 21 (64-bit year) + * MONTH: 3 + * DAY: 3 + * HOURS: 3 + * MINUTES: 3 + * SECONDS: 3 + * ATTOSECONDS: 1 + 3*6 + * TIMEZONE: 5 + * NULL TERMINATOR: 1 + */ +#define NPY_DATETIME_MAX_ISO8601_STRLEN (21+3*5+1+3*6+6+1) + +typedef enum { + NPY_FR_Y = 0, /* Years */ + NPY_FR_M = 1, /* Months */ + NPY_FR_W = 2, /* Weeks */ + /* Gap where 1.6 NPY_FR_B (value 3) was */ + NPY_FR_D = 4, /* Days */ + NPY_FR_h = 5, /* hours */ + NPY_FR_m = 6, /* minutes */ + NPY_FR_s = 7, /* seconds */ + NPY_FR_ms = 8, /* milliseconds */ + NPY_FR_us = 9, /* microseconds */ + NPY_FR_ns = 10,/* nanoseconds */ + NPY_FR_ps = 11,/* picoseconds */ + NPY_FR_fs = 12,/* femtoseconds */ + NPY_FR_as = 13,/* attoseconds */ + NPY_FR_GENERIC = 14 /* Generic, unbound units, can convert to anything */ +} NPY_DATETIMEUNIT; + +/* + * NOTE: With the NPY_FR_B gap for 1.6 ABI compatibility, NPY_DATETIME_NUMUNITS + * is technically one more than the actual number of units. + */ +#define NPY_DATETIME_NUMUNITS (NPY_FR_GENERIC + 1) +#define NPY_DATETIME_DEFAULTUNIT NPY_FR_GENERIC + +/* + * Business day conventions for mapping invalid business + * days to valid business days. + */ +typedef enum { + /* Go forward in time to the following business day. */ + NPY_BUSDAY_FORWARD, + NPY_BUSDAY_FOLLOWING = NPY_BUSDAY_FORWARD, + /* Go backward in time to the preceding business day. */ + NPY_BUSDAY_BACKWARD, + NPY_BUSDAY_PRECEDING = NPY_BUSDAY_BACKWARD, + /* + * Go forward in time to the following business day, unless it + * crosses a month boundary, in which case go backward + */ + NPY_BUSDAY_MODIFIEDFOLLOWING, + /* + * Go backward in time to the preceding business day, unless it + * crosses a month boundary, in which case go forward. + */ + NPY_BUSDAY_MODIFIEDPRECEDING, + /* Produce a NaT for non-business days. */ + NPY_BUSDAY_NAT, + /* Raise an exception for non-business days. */ + NPY_BUSDAY_RAISE +} NPY_BUSDAY_ROLL; + +/************************************************************ + * NumPy Auxiliary Data for inner loops, sort functions, etc. + ************************************************************/ + +/* + * When creating an auxiliary data struct, this should always appear + * as the first member, like this: + * + * typedef struct { + * NpyAuxData base; + * double constant; + * } constant_multiplier_aux_data; + */ +typedef struct NpyAuxData_tag NpyAuxData; + +/* Function pointers for freeing or cloning auxiliary data */ +typedef void (NpyAuxData_FreeFunc) (NpyAuxData *); +typedef NpyAuxData *(NpyAuxData_CloneFunc) (NpyAuxData *); + +struct NpyAuxData_tag { + NpyAuxData_FreeFunc *free; + NpyAuxData_CloneFunc *clone; + /* To allow for a bit of expansion without breaking the ABI */ + void *reserved[2]; +}; + +/* Macros to use for freeing and cloning auxiliary data */ +#define NPY_AUXDATA_FREE(auxdata) \ + do { \ + if ((auxdata) != NULL) { \ + (auxdata)->free(auxdata); \ + } \ + } while(0) +#define NPY_AUXDATA_CLONE(auxdata) \ + ((auxdata)->clone(auxdata)) + +#define NPY_ERR(str) fprintf(stderr, #str); fflush(stderr); +#define NPY_ERR2(str) fprintf(stderr, str); fflush(stderr); + +#define NPY_STRINGIFY(x) #x +#define NPY_TOSTRING(x) NPY_STRINGIFY(x) + + /* + * Macros to define how array, and dimension/strides data is + * allocated. + */ + + /* Data buffer - PyDataMem_NEW/FREE/RENEW are in multiarraymodule.c */ + +#define NPY_USE_PYMEM 1 + +#if NPY_USE_PYMEM == 1 +#define PyArray_malloc PyMem_Malloc +#define PyArray_free PyMem_Free +#define PyArray_realloc PyMem_Realloc +#else +#define PyArray_malloc malloc +#define PyArray_free free +#define PyArray_realloc realloc +#endif + +/* Dimensions and strides */ +#define PyDimMem_NEW(size) \ + ((npy_intp *)PyArray_malloc(size*sizeof(npy_intp))) + +#define PyDimMem_FREE(ptr) PyArray_free(ptr) + +#define PyDimMem_RENEW(ptr,size) \ + ((npy_intp *)PyArray_realloc(ptr,size*sizeof(npy_intp))) + +/* forward declaration */ +struct _PyArray_Descr; + +/* These must deal with unaligned and swapped data if necessary */ +typedef PyObject * (PyArray_GetItemFunc) (void *, void *); +typedef int (PyArray_SetItemFunc)(PyObject *, void *, void *); + +typedef void (PyArray_CopySwapNFunc)(void *, npy_intp, void *, npy_intp, + npy_intp, int, void *); + +typedef void (PyArray_CopySwapFunc)(void *, void *, int, void *); +typedef npy_bool (PyArray_NonzeroFunc)(void *, void *); + + +/* + * These assume aligned and notswapped data -- a buffer will be used + * before or contiguous data will be obtained + */ + +typedef int (PyArray_CompareFunc)(const void *, const void *, void *); +typedef int (PyArray_ArgFunc)(void*, npy_intp, npy_intp*, void *); + +typedef void (PyArray_DotFunc)(void *, npy_intp, void *, npy_intp, void *, + npy_intp, void *); + +typedef void (PyArray_VectorUnaryFunc)(void *, void *, npy_intp, void *, + void *); + +/* + * XXX the ignore argument should be removed next time the API version + * is bumped. It used to be the separator. + */ +typedef int (PyArray_ScanFunc)(FILE *fp, void *dptr, + char *ignore, struct _PyArray_Descr *); +typedef int (PyArray_FromStrFunc)(char *s, void *dptr, char **endptr, + struct _PyArray_Descr *); + +typedef int (PyArray_FillFunc)(void *, npy_intp, void *); + +typedef int (PyArray_SortFunc)(void *, npy_intp, void *); +typedef int (PyArray_ArgSortFunc)(void *, npy_intp *, npy_intp, void *); +typedef int (PyArray_PartitionFunc)(void *, npy_intp, npy_intp, + npy_intp *, npy_intp *, + void *); +typedef int (PyArray_ArgPartitionFunc)(void *, npy_intp *, npy_intp, npy_intp, + npy_intp *, npy_intp *, + void *); + +typedef int (PyArray_FillWithScalarFunc)(void *, npy_intp, void *, void *); + +typedef int (PyArray_ScalarKindFunc)(void *); + +typedef void (PyArray_FastClipFunc)(void *in, npy_intp n_in, void *min, + void *max, void *out); +typedef void (PyArray_FastPutmaskFunc)(void *in, void *mask, npy_intp n_in, + void *values, npy_intp nv); +typedef int (PyArray_FastTakeFunc)(void *dest, void *src, npy_intp *indarray, + npy_intp nindarray, npy_intp n_outer, + npy_intp m_middle, npy_intp nelem, + NPY_CLIPMODE clipmode); + +typedef struct { + npy_intp *ptr; + int len; +} PyArray_Dims; + +typedef struct { + /* + * Functions to cast to most other standard types + * Can have some NULL entries. The types + * DATETIME, TIMEDELTA, and HALF go into the castdict + * even though they are built-in. + */ + PyArray_VectorUnaryFunc *cast[NPY_NTYPES_ABI_COMPATIBLE]; + + /* The next four functions *cannot* be NULL */ + + /* + * Functions to get and set items with standard Python types + * -- not array scalars + */ + PyArray_GetItemFunc *getitem; + PyArray_SetItemFunc *setitem; + + /* + * Copy and/or swap data. Memory areas may not overlap + * Use memmove first if they might + */ + PyArray_CopySwapNFunc *copyswapn; + PyArray_CopySwapFunc *copyswap; + + /* + * Function to compare items + * Can be NULL + */ + PyArray_CompareFunc *compare; + + /* + * Function to select largest + * Can be NULL + */ + PyArray_ArgFunc *argmax; + + /* + * Function to compute dot product + * Can be NULL + */ + PyArray_DotFunc *dotfunc; + + /* + * Function to scan an ASCII file and + * place a single value plus possible separator + * Can be NULL + */ + PyArray_ScanFunc *scanfunc; + + /* + * Function to read a single value from a string + * and adjust the pointer; Can be NULL + */ + PyArray_FromStrFunc *fromstr; + + /* + * Function to determine if data is zero or not + * If NULL a default version is + * used at Registration time. + */ + PyArray_NonzeroFunc *nonzero; + + /* + * Used for arange. + * Can be NULL. + */ + PyArray_FillFunc *fill; + + /* + * Function to fill arrays with scalar values + * Can be NULL + */ + PyArray_FillWithScalarFunc *fillwithscalar; + + /* + * Sorting functions + * Can be NULL + */ + PyArray_SortFunc *sort[NPY_NSORTS]; + PyArray_ArgSortFunc *argsort[NPY_NSORTS]; + + /* + * Dictionary of additional casting functions + * PyArray_VectorUnaryFuncs + * which can be populated to support casting + * to other registered types. Can be NULL + */ + PyObject *castdict; + + /* + * Functions useful for generalizing + * the casting rules. + * Can be NULL; + */ + PyArray_ScalarKindFunc *scalarkind; + int **cancastscalarkindto; + int *cancastto; + + PyArray_FastClipFunc *fastclip; + PyArray_FastPutmaskFunc *fastputmask; + PyArray_FastTakeFunc *fasttake; + + /* + * Function to select smallest + * Can be NULL + */ + PyArray_ArgFunc *argmin; + +} PyArray_ArrFuncs; + +/* The item must be reference counted when it is inserted or extracted. */ +#define NPY_ITEM_REFCOUNT 0x01 +/* Same as needing REFCOUNT */ +#define NPY_ITEM_HASOBJECT 0x01 +/* Convert to list for pickling */ +#define NPY_LIST_PICKLE 0x02 +/* The item is a POINTER */ +#define NPY_ITEM_IS_POINTER 0x04 +/* memory needs to be initialized for this data-type */ +#define NPY_NEEDS_INIT 0x08 +/* operations need Python C-API so don't give-up thread. */ +#define NPY_NEEDS_PYAPI 0x10 +/* Use f.getitem when extracting elements of this data-type */ +#define NPY_USE_GETITEM 0x20 +/* Use f.setitem when setting creating 0-d array from this data-type.*/ +#define NPY_USE_SETITEM 0x40 +/* A sticky flag specifically for structured arrays */ +#define NPY_ALIGNED_STRUCT 0x80 + +/* + *These are inherited for global data-type if any data-types in the + * field have them + */ +#define NPY_FROM_FIELDS (NPY_NEEDS_INIT | NPY_LIST_PICKLE | \ + NPY_ITEM_REFCOUNT | NPY_NEEDS_PYAPI) + +#define NPY_OBJECT_DTYPE_FLAGS (NPY_LIST_PICKLE | NPY_USE_GETITEM | \ + NPY_ITEM_IS_POINTER | NPY_ITEM_REFCOUNT | \ + NPY_NEEDS_INIT | NPY_NEEDS_PYAPI) + +#define PyDataType_FLAGCHK(dtype, flag) \ + (((dtype)->flags & (flag)) == (flag)) + +#define PyDataType_REFCHK(dtype) \ + PyDataType_FLAGCHK(dtype, NPY_ITEM_REFCOUNT) + +typedef struct _PyArray_Descr { + PyObject_HEAD + /* + * the type object representing an + * instance of this type -- should not + * be two type_numbers with the same type + * object. + */ + PyTypeObject *typeobj; + /* kind for this type */ + char kind; + /* unique-character representing this type */ + char type; + /* + * '>' (big), '<' (little), '|' + * (not-applicable), or '=' (native). + */ + char byteorder; + /* flags describing data type */ + char flags; + /* number representing this type */ + int type_num; + /* element size (itemsize) for this type */ + int elsize; + /* alignment needed for this type */ + int alignment; + /* + * Non-NULL if this type is + * is an array (C-contiguous) + * of some other type + */ + struct _arr_descr *subarray; + /* + * The fields dictionary for this type + * For statically defined descr this + * is always Py_None + */ + PyObject *fields; + /* + * An ordered tuple of field names or NULL + * if no fields are defined + */ + PyObject *names; + /* + * a table of functions specific for each + * basic data descriptor + */ + PyArray_ArrFuncs *f; + /* Metadata about this dtype */ + PyObject *metadata; + /* + * Metadata specific to the C implementation + * of the particular dtype. This was added + * for NumPy 1.7.0. + */ + NpyAuxData *c_metadata; +} PyArray_Descr; + +typedef struct _arr_descr { + PyArray_Descr *base; + PyObject *shape; /* a tuple */ +} PyArray_ArrayDescr; + +/* + * The main array object structure. + * + * It has been recommended to use the inline functions defined below + * (PyArray_DATA and friends) to access fields here for a number of + * releases. Direct access to the members themselves is deprecated. + * To ensure that your code does not use deprecated access, + * #define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION + * (or NPY_1_8_API_VERSION or higher as required). + */ +/* This struct will be moved to a private header in a future release */ +typedef struct tagPyArrayObject_fields { + PyObject_HEAD + /* Pointer to the raw data buffer */ + char *data; + /* The number of dimensions, also called 'ndim' */ + int nd; + /* The size in each dimension, also called 'shape' */ + npy_intp *dimensions; + /* + * Number of bytes to jump to get to the + * next element in each dimension + */ + npy_intp *strides; + /* + * This object is decref'd upon + * deletion of array. Except in the + * case of UPDATEIFCOPY which has + * special handling. + * + * For views it points to the original + * array, collapsed so no chains of + * views occur. + * + * For creation from buffer object it + * points to an object that shold be + * decref'd on deletion + * + * For UPDATEIFCOPY flag this is an + * array to-be-updated upon deletion + * of this one + */ + PyObject *base; + /* Pointer to type structure */ + PyArray_Descr *descr; + /* Flags describing array -- see below */ + int flags; + /* For weak references */ + PyObject *weakreflist; +} PyArrayObject_fields; + +/* + * To hide the implementation details, we only expose + * the Python struct HEAD. + */ +#if !defined(NPY_NO_DEPRECATED_API) || \ + (NPY_NO_DEPRECATED_API < NPY_1_7_API_VERSION) +/* + * Can't put this in npy_deprecated_api.h like the others. + * PyArrayObject field access is deprecated as of NumPy 1.7. + */ +typedef PyArrayObject_fields PyArrayObject; +#else +typedef struct tagPyArrayObject { + PyObject_HEAD +} PyArrayObject; +#endif + +#define NPY_SIZEOF_PYARRAYOBJECT (sizeof(PyArrayObject_fields)) + +/* Array Flags Object */ +typedef struct PyArrayFlagsObject { + PyObject_HEAD + PyObject *arr; + int flags; +} PyArrayFlagsObject; + +/* Mirrors buffer object to ptr */ + +typedef struct { + PyObject_HEAD + PyObject *base; + void *ptr; + npy_intp len; + int flags; +} PyArray_Chunk; + +typedef struct { + NPY_DATETIMEUNIT base; + int num; +} PyArray_DatetimeMetaData; + +typedef struct { + NpyAuxData base; + PyArray_DatetimeMetaData meta; +} PyArray_DatetimeDTypeMetaData; + +/* + * This structure contains an exploded view of a date-time value. + * NaT is represented by year == NPY_DATETIME_NAT. + */ +typedef struct { + npy_int64 year; + npy_int32 month, day, hour, min, sec, us, ps, as; +} npy_datetimestruct; + +/* This is not used internally. */ +typedef struct { + npy_int64 day; + npy_int32 sec, us, ps, as; +} npy_timedeltastruct; + +typedef int (PyArray_FinalizeFunc)(PyArrayObject *, PyObject *); + +/* + * Means c-style contiguous (last index varies the fastest). The data + * elements right after each other. + * + * This flag may be requested in constructor functions. + * This flag may be tested for in PyArray_FLAGS(arr). + */ +#define NPY_ARRAY_C_CONTIGUOUS 0x0001 + +/* + * Set if array is a contiguous Fortran array: the first index varies + * the fastest in memory (strides array is reverse of C-contiguous + * array) + * + * This flag may be requested in constructor functions. + * This flag may be tested for in PyArray_FLAGS(arr). + */ +#define NPY_ARRAY_F_CONTIGUOUS 0x0002 + +/* + * Note: all 0-d arrays are C_CONTIGUOUS and F_CONTIGUOUS. If a + * 1-d array is C_CONTIGUOUS it is also F_CONTIGUOUS. Arrays with + * more then one dimension can be C_CONTIGUOUS and F_CONTIGUOUS + * at the same time if they have either zero or one element. + * If NPY_RELAXED_STRIDES_CHECKING is set, a higher dimensional + * array is always C_CONTIGUOUS and F_CONTIGUOUS if it has zero elements + * and the array is contiguous if ndarray.squeeze() is contiguous. + * I.e. dimensions for which `ndarray.shape[dimension] == 1` are + * ignored. + */ + +/* + * If set, the array owns the data: it will be free'd when the array + * is deleted. + * + * This flag may be tested for in PyArray_FLAGS(arr). + */ +#define NPY_ARRAY_OWNDATA 0x0004 + +/* + * An array never has the next four set; they're only used as parameter + * flags to the the various FromAny functions + * + * This flag may be requested in constructor functions. + */ + +/* Cause a cast to occur regardless of whether or not it is safe. */ +#define NPY_ARRAY_FORCECAST 0x0010 + +/* + * Always copy the array. Returned arrays are always CONTIGUOUS, + * ALIGNED, and WRITEABLE. + * + * This flag may be requested in constructor functions. + */ +#define NPY_ARRAY_ENSURECOPY 0x0020 + +/* + * Make sure the returned array is a base-class ndarray + * + * This flag may be requested in constructor functions. + */ +#define NPY_ARRAY_ENSUREARRAY 0x0040 + +/* + * Make sure that the strides are in units of the element size Needed + * for some operations with record-arrays. + * + * This flag may be requested in constructor functions. + */ +#define NPY_ARRAY_ELEMENTSTRIDES 0x0080 + +/* + * Array data is aligned on the appropiate memory address for the type + * stored according to how the compiler would align things (e.g., an + * array of integers (4 bytes each) starts on a memory address that's + * a multiple of 4) + * + * This flag may be requested in constructor functions. + * This flag may be tested for in PyArray_FLAGS(arr). + */ +#define NPY_ARRAY_ALIGNED 0x0100 + +/* + * Array data has the native endianness + * + * This flag may be requested in constructor functions. + */ +#define NPY_ARRAY_NOTSWAPPED 0x0200 + +/* + * Array data is writeable + * + * This flag may be requested in constructor functions. + * This flag may be tested for in PyArray_FLAGS(arr). + */ +#define NPY_ARRAY_WRITEABLE 0x0400 + +/* + * If this flag is set, then base contains a pointer to an array of + * the same size that should be updated with the current contents of + * this array when this array is deallocated + * + * This flag may be requested in constructor functions. + * This flag may be tested for in PyArray_FLAGS(arr). + */ +#define NPY_ARRAY_UPDATEIFCOPY 0x1000 + +/* + * NOTE: there are also internal flags defined in multiarray/arrayobject.h, + * which start at bit 31 and work down. + */ + +#define NPY_ARRAY_BEHAVED (NPY_ARRAY_ALIGNED | \ + NPY_ARRAY_WRITEABLE) +#define NPY_ARRAY_BEHAVED_NS (NPY_ARRAY_ALIGNED | \ + NPY_ARRAY_WRITEABLE | \ + NPY_ARRAY_NOTSWAPPED) +#define NPY_ARRAY_CARRAY (NPY_ARRAY_C_CONTIGUOUS | \ + NPY_ARRAY_BEHAVED) +#define NPY_ARRAY_CARRAY_RO (NPY_ARRAY_C_CONTIGUOUS | \ + NPY_ARRAY_ALIGNED) +#define NPY_ARRAY_FARRAY (NPY_ARRAY_F_CONTIGUOUS | \ + NPY_ARRAY_BEHAVED) +#define NPY_ARRAY_FARRAY_RO (NPY_ARRAY_F_CONTIGUOUS | \ + NPY_ARRAY_ALIGNED) +#define NPY_ARRAY_DEFAULT (NPY_ARRAY_CARRAY) +#define NPY_ARRAY_IN_ARRAY (NPY_ARRAY_CARRAY_RO) +#define NPY_ARRAY_OUT_ARRAY (NPY_ARRAY_CARRAY) +#define NPY_ARRAY_INOUT_ARRAY (NPY_ARRAY_CARRAY | \ + NPY_ARRAY_UPDATEIFCOPY) +#define NPY_ARRAY_IN_FARRAY (NPY_ARRAY_FARRAY_RO) +#define NPY_ARRAY_OUT_FARRAY (NPY_ARRAY_FARRAY) +#define NPY_ARRAY_INOUT_FARRAY (NPY_ARRAY_FARRAY | \ + NPY_ARRAY_UPDATEIFCOPY) + +#define NPY_ARRAY_UPDATE_ALL (NPY_ARRAY_C_CONTIGUOUS | \ + NPY_ARRAY_F_CONTIGUOUS | \ + NPY_ARRAY_ALIGNED) + +/* This flag is for the array interface, not PyArrayObject */ +#define NPY_ARR_HAS_DESCR 0x0800 + + + + +/* + * Size of internal buffers used for alignment Make BUFSIZE a multiple + * of sizeof(npy_cdouble) -- usually 16 so that ufunc buffers are aligned + */ +#define NPY_MIN_BUFSIZE ((int)sizeof(npy_cdouble)) +#define NPY_MAX_BUFSIZE (((int)sizeof(npy_cdouble))*1000000) +#define NPY_BUFSIZE 8192 +/* buffer stress test size: */ +/*#define NPY_BUFSIZE 17*/ + +#define PyArray_MAX(a,b) (((a)>(b))?(a):(b)) +#define PyArray_MIN(a,b) (((a)<(b))?(a):(b)) +#define PyArray_CLT(p,q) ((((p).real==(q).real) ? ((p).imag < (q).imag) : \ + ((p).real < (q).real))) +#define PyArray_CGT(p,q) ((((p).real==(q).real) ? ((p).imag > (q).imag) : \ + ((p).real > (q).real))) +#define PyArray_CLE(p,q) ((((p).real==(q).real) ? ((p).imag <= (q).imag) : \ + ((p).real <= (q).real))) +#define PyArray_CGE(p,q) ((((p).real==(q).real) ? ((p).imag >= (q).imag) : \ + ((p).real >= (q).real))) +#define PyArray_CEQ(p,q) (((p).real==(q).real) && ((p).imag == (q).imag)) +#define PyArray_CNE(p,q) (((p).real!=(q).real) || ((p).imag != (q).imag)) + +/* + * C API: consists of Macros and functions. The MACROS are defined + * here. + */ + + +#define PyArray_ISCONTIGUOUS(m) PyArray_CHKFLAGS(m, NPY_ARRAY_C_CONTIGUOUS) +#define PyArray_ISWRITEABLE(m) PyArray_CHKFLAGS(m, NPY_ARRAY_WRITEABLE) +#define PyArray_ISALIGNED(m) PyArray_CHKFLAGS(m, NPY_ARRAY_ALIGNED) + +#define PyArray_IS_C_CONTIGUOUS(m) PyArray_CHKFLAGS(m, NPY_ARRAY_C_CONTIGUOUS) +#define PyArray_IS_F_CONTIGUOUS(m) PyArray_CHKFLAGS(m, NPY_ARRAY_F_CONTIGUOUS) + +#if NPY_ALLOW_THREADS +#define NPY_BEGIN_ALLOW_THREADS Py_BEGIN_ALLOW_THREADS +#define NPY_END_ALLOW_THREADS Py_END_ALLOW_THREADS +#define NPY_BEGIN_THREADS_DEF PyThreadState *_save=NULL; +#define NPY_BEGIN_THREADS do {_save = PyEval_SaveThread();} while (0); +#define NPY_END_THREADS do {if (_save) PyEval_RestoreThread(_save);} while (0); +#define NPY_BEGIN_THREADS_THRESHOLDED(loop_size) do { if (loop_size > 500) \ + { _save = PyEval_SaveThread();} } while (0); + +#define NPY_BEGIN_THREADS_DESCR(dtype) \ + do {if (!(PyDataType_FLAGCHK(dtype, NPY_NEEDS_PYAPI))) \ + NPY_BEGIN_THREADS;} while (0); + +#define NPY_END_THREADS_DESCR(dtype) \ + do {if (!(PyDataType_FLAGCHK(dtype, NPY_NEEDS_PYAPI))) \ + NPY_END_THREADS; } while (0); + +#define NPY_ALLOW_C_API_DEF PyGILState_STATE __save__; +#define NPY_ALLOW_C_API do {__save__ = PyGILState_Ensure();} while (0); +#define NPY_DISABLE_C_API do {PyGILState_Release(__save__);} while (0); +#else +#define NPY_BEGIN_ALLOW_THREADS +#define NPY_END_ALLOW_THREADS +#define NPY_BEGIN_THREADS_DEF +#define NPY_BEGIN_THREADS +#define NPY_END_THREADS +#define NPY_BEGIN_THREADS_THRESHOLDED(loop_size) +#define NPY_BEGIN_THREADS_DESCR(dtype) +#define NPY_END_THREADS_DESCR(dtype) +#define NPY_ALLOW_C_API_DEF +#define NPY_ALLOW_C_API +#define NPY_DISABLE_C_API +#endif + +/********************************** + * The nditer object, added in 1.6 + **********************************/ + +/* The actual structure of the iterator is an internal detail */ +typedef struct NpyIter_InternalOnly NpyIter; + +/* Iterator function pointers that may be specialized */ +typedef int (NpyIter_IterNextFunc)(NpyIter *iter); +typedef void (NpyIter_GetMultiIndexFunc)(NpyIter *iter, + npy_intp *outcoords); + +/*** Global flags that may be passed to the iterator constructors ***/ + +/* Track an index representing C order */ +#define NPY_ITER_C_INDEX 0x00000001 +/* Track an index representing Fortran order */ +#define NPY_ITER_F_INDEX 0x00000002 +/* Track a multi-index */ +#define NPY_ITER_MULTI_INDEX 0x00000004 +/* User code external to the iterator does the 1-dimensional innermost loop */ +#define NPY_ITER_EXTERNAL_LOOP 0x00000008 +/* Convert all the operands to a common data type */ +#define NPY_ITER_COMMON_DTYPE 0x00000010 +/* Operands may hold references, requiring API access during iteration */ +#define NPY_ITER_REFS_OK 0x00000020 +/* Zero-sized operands should be permitted, iteration checks IterSize for 0 */ +#define NPY_ITER_ZEROSIZE_OK 0x00000040 +/* Permits reductions (size-0 stride with dimension size > 1) */ +#define NPY_ITER_REDUCE_OK 0x00000080 +/* Enables sub-range iteration */ +#define NPY_ITER_RANGED 0x00000100 +/* Enables buffering */ +#define NPY_ITER_BUFFERED 0x00000200 +/* When buffering is enabled, grows the inner loop if possible */ +#define NPY_ITER_GROWINNER 0x00000400 +/* Delay allocation of buffers until first Reset* call */ +#define NPY_ITER_DELAY_BUFALLOC 0x00000800 +/* When NPY_KEEPORDER is specified, disable reversing negative-stride axes */ +#define NPY_ITER_DONT_NEGATE_STRIDES 0x00001000 + +/*** Per-operand flags that may be passed to the iterator constructors ***/ + +/* The operand will be read from and written to */ +#define NPY_ITER_READWRITE 0x00010000 +/* The operand will only be read from */ +#define NPY_ITER_READONLY 0x00020000 +/* The operand will only be written to */ +#define NPY_ITER_WRITEONLY 0x00040000 +/* The operand's data must be in native byte order */ +#define NPY_ITER_NBO 0x00080000 +/* The operand's data must be aligned */ +#define NPY_ITER_ALIGNED 0x00100000 +/* The operand's data must be contiguous (within the inner loop) */ +#define NPY_ITER_CONTIG 0x00200000 +/* The operand may be copied to satisfy requirements */ +#define NPY_ITER_COPY 0x00400000 +/* The operand may be copied with UPDATEIFCOPY to satisfy requirements */ +#define NPY_ITER_UPDATEIFCOPY 0x00800000 +/* Allocate the operand if it is NULL */ +#define NPY_ITER_ALLOCATE 0x01000000 +/* If an operand is allocated, don't use any subtype */ +#define NPY_ITER_NO_SUBTYPE 0x02000000 +/* This is a virtual array slot, operand is NULL but temporary data is there */ +#define NPY_ITER_VIRTUAL 0x04000000 +/* Require that the dimension match the iterator dimensions exactly */ +#define NPY_ITER_NO_BROADCAST 0x08000000 +/* A mask is being used on this array, affects buffer -> array copy */ +#define NPY_ITER_WRITEMASKED 0x10000000 +/* This array is the mask for all WRITEMASKED operands */ +#define NPY_ITER_ARRAYMASK 0x20000000 + +#define NPY_ITER_GLOBAL_FLAGS 0x0000ffff +#define NPY_ITER_PER_OP_FLAGS 0xffff0000 + + +/***************************** + * Basic iterator object + *****************************/ + +/* FWD declaration */ +typedef struct PyArrayIterObject_tag PyArrayIterObject; + +/* + * type of the function which translates a set of coordinates to a + * pointer to the data + */ +typedef char* (*npy_iter_get_dataptr_t)(PyArrayIterObject* iter, npy_intp*); + +struct PyArrayIterObject_tag { + PyObject_HEAD + int nd_m1; /* number of dimensions - 1 */ + npy_intp index, size; + npy_intp coordinates[NPY_MAXDIMS];/* N-dimensional loop */ + npy_intp dims_m1[NPY_MAXDIMS]; /* ao->dimensions - 1 */ + npy_intp strides[NPY_MAXDIMS]; /* ao->strides or fake */ + npy_intp backstrides[NPY_MAXDIMS];/* how far to jump back */ + npy_intp factors[NPY_MAXDIMS]; /* shape factors */ + PyArrayObject *ao; + char *dataptr; /* pointer to current item*/ + npy_bool contiguous; + + npy_intp bounds[NPY_MAXDIMS][2]; + npy_intp limits[NPY_MAXDIMS][2]; + npy_intp limits_sizes[NPY_MAXDIMS]; + npy_iter_get_dataptr_t translate; +} ; + + +/* Iterator API */ +#define PyArrayIter_Check(op) PyObject_TypeCheck(op, &PyArrayIter_Type) + +#define _PyAIT(it) ((PyArrayIterObject *)(it)) +#define PyArray_ITER_RESET(it) do { \ + _PyAIT(it)->index = 0; \ + _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao); \ + memset(_PyAIT(it)->coordinates, 0, \ + (_PyAIT(it)->nd_m1+1)*sizeof(npy_intp)); \ +} while (0) + +#define _PyArray_ITER_NEXT1(it) do { \ + (it)->dataptr += _PyAIT(it)->strides[0]; \ + (it)->coordinates[0]++; \ +} while (0) + +#define _PyArray_ITER_NEXT2(it) do { \ + if ((it)->coordinates[1] < (it)->dims_m1[1]) { \ + (it)->coordinates[1]++; \ + (it)->dataptr += (it)->strides[1]; \ + } \ + else { \ + (it)->coordinates[1] = 0; \ + (it)->coordinates[0]++; \ + (it)->dataptr += (it)->strides[0] - \ + (it)->backstrides[1]; \ + } \ +} while (0) + +#define _PyArray_ITER_NEXT3(it) do { \ + if ((it)->coordinates[2] < (it)->dims_m1[2]) { \ + (it)->coordinates[2]++; \ + (it)->dataptr += (it)->strides[2]; \ + } \ + else { \ + (it)->coordinates[2] = 0; \ + (it)->dataptr -= (it)->backstrides[2]; \ + if ((it)->coordinates[1] < (it)->dims_m1[1]) { \ + (it)->coordinates[1]++; \ + (it)->dataptr += (it)->strides[1]; \ + } \ + else { \ + (it)->coordinates[1] = 0; \ + (it)->coordinates[0]++; \ + (it)->dataptr += (it)->strides[0] \ + (it)->backstrides[1]; \ + } \ + } \ +} while (0) + +#define PyArray_ITER_NEXT(it) do { \ + _PyAIT(it)->index++; \ + if (_PyAIT(it)->nd_m1 == 0) { \ + _PyArray_ITER_NEXT1(_PyAIT(it)); \ + } \ + else if (_PyAIT(it)->contiguous) \ + _PyAIT(it)->dataptr += PyArray_DESCR(_PyAIT(it)->ao)->elsize; \ + else if (_PyAIT(it)->nd_m1 == 1) { \ + _PyArray_ITER_NEXT2(_PyAIT(it)); \ + } \ + else { \ + int __npy_i; \ + for (__npy_i=_PyAIT(it)->nd_m1; __npy_i >= 0; __npy_i--) { \ + if (_PyAIT(it)->coordinates[__npy_i] < \ + _PyAIT(it)->dims_m1[__npy_i]) { \ + _PyAIT(it)->coordinates[__npy_i]++; \ + _PyAIT(it)->dataptr += \ + _PyAIT(it)->strides[__npy_i]; \ + break; \ + } \ + else { \ + _PyAIT(it)->coordinates[__npy_i] = 0; \ + _PyAIT(it)->dataptr -= \ + _PyAIT(it)->backstrides[__npy_i]; \ + } \ + } \ + } \ +} while (0) + +#define PyArray_ITER_GOTO(it, destination) do { \ + int __npy_i; \ + _PyAIT(it)->index = 0; \ + _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao); \ + for (__npy_i = _PyAIT(it)->nd_m1; __npy_i>=0; __npy_i--) { \ + if (destination[__npy_i] < 0) { \ + destination[__npy_i] += \ + _PyAIT(it)->dims_m1[__npy_i]+1; \ + } \ + _PyAIT(it)->dataptr += destination[__npy_i] * \ + _PyAIT(it)->strides[__npy_i]; \ + _PyAIT(it)->coordinates[__npy_i] = \ + destination[__npy_i]; \ + _PyAIT(it)->index += destination[__npy_i] * \ + ( __npy_i==_PyAIT(it)->nd_m1 ? 1 : \ + _PyAIT(it)->dims_m1[__npy_i+1]+1) ; \ + } \ +} while (0) + +#define PyArray_ITER_GOTO1D(it, ind) do { \ + int __npy_i; \ + npy_intp __npy_ind = (npy_intp) (ind); \ + if (__npy_ind < 0) __npy_ind += _PyAIT(it)->size; \ + _PyAIT(it)->index = __npy_ind; \ + if (_PyAIT(it)->nd_m1 == 0) { \ + _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao) + \ + __npy_ind * _PyAIT(it)->strides[0]; \ + } \ + else if (_PyAIT(it)->contiguous) \ + _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao) + \ + __npy_ind * PyArray_DESCR(_PyAIT(it)->ao)->elsize; \ + else { \ + _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao); \ + for (__npy_i = 0; __npy_i<=_PyAIT(it)->nd_m1; \ + __npy_i++) { \ + _PyAIT(it)->dataptr += \ + (__npy_ind / _PyAIT(it)->factors[__npy_i]) \ + * _PyAIT(it)->strides[__npy_i]; \ + __npy_ind %= _PyAIT(it)->factors[__npy_i]; \ + } \ + } \ +} while (0) + +#define PyArray_ITER_DATA(it) ((void *)(_PyAIT(it)->dataptr)) + +#define PyArray_ITER_NOTDONE(it) (_PyAIT(it)->index < _PyAIT(it)->size) + + +/* + * Any object passed to PyArray_Broadcast must be binary compatible + * with this structure. + */ + +typedef struct { + PyObject_HEAD + int numiter; /* number of iters */ + npy_intp size; /* broadcasted size */ + npy_intp index; /* current index */ + int nd; /* number of dims */ + npy_intp dimensions[NPY_MAXDIMS]; /* dimensions */ + PyArrayIterObject *iters[NPY_MAXARGS]; /* iterators */ +} PyArrayMultiIterObject; + +#define _PyMIT(m) ((PyArrayMultiIterObject *)(m)) +#define PyArray_MultiIter_RESET(multi) do { \ + int __npy_mi; \ + _PyMIT(multi)->index = 0; \ + for (__npy_mi=0; __npy_mi < _PyMIT(multi)->numiter; __npy_mi++) { \ + PyArray_ITER_RESET(_PyMIT(multi)->iters[__npy_mi]); \ + } \ +} while (0) + +#define PyArray_MultiIter_NEXT(multi) do { \ + int __npy_mi; \ + _PyMIT(multi)->index++; \ + for (__npy_mi=0; __npy_mi < _PyMIT(multi)->numiter; __npy_mi++) { \ + PyArray_ITER_NEXT(_PyMIT(multi)->iters[__npy_mi]); \ + } \ +} while (0) + +#define PyArray_MultiIter_GOTO(multi, dest) do { \ + int __npy_mi; \ + for (__npy_mi=0; __npy_mi < _PyMIT(multi)->numiter; __npy_mi++) { \ + PyArray_ITER_GOTO(_PyMIT(multi)->iters[__npy_mi], dest); \ + } \ + _PyMIT(multi)->index = _PyMIT(multi)->iters[0]->index; \ +} while (0) + +#define PyArray_MultiIter_GOTO1D(multi, ind) do { \ + int __npy_mi; \ + for (__npy_mi=0; __npy_mi < _PyMIT(multi)->numiter; __npy_mi++) { \ + PyArray_ITER_GOTO1D(_PyMIT(multi)->iters[__npy_mi], ind); \ + } \ + _PyMIT(multi)->index = _PyMIT(multi)->iters[0]->index; \ +} while (0) + +#define PyArray_MultiIter_DATA(multi, i) \ + ((void *)(_PyMIT(multi)->iters[i]->dataptr)) + +#define PyArray_MultiIter_NEXTi(multi, i) \ + PyArray_ITER_NEXT(_PyMIT(multi)->iters[i]) + +#define PyArray_MultiIter_NOTDONE(multi) \ + (_PyMIT(multi)->index < _PyMIT(multi)->size) + +/* Store the information needed for fancy-indexing over an array */ + +typedef struct { + PyObject_HEAD + /* + * Multi-iterator portion --- needs to be present in this + * order to work with PyArray_Broadcast + */ + + int numiter; /* number of index-array + iterators */ + npy_intp size; /* size of broadcasted + result */ + npy_intp index; /* current index */ + int nd; /* number of dims */ + npy_intp dimensions[NPY_MAXDIMS]; /* dimensions */ + PyArrayIterObject *iters[NPY_MAXDIMS]; /* index object + iterators */ + PyArrayIterObject *ait; /* flat Iterator for + underlying array */ + + /* flat iterator for subspace (when numiter < nd) */ + PyArrayIterObject *subspace; + + /* + * if subspace iteration, then this is the array of axes in + * the underlying array represented by the index objects + */ + int iteraxes[NPY_MAXDIMS]; + /* + * if subspace iteration, the these are the coordinates to the + * start of the subspace. + */ + npy_intp bscoord[NPY_MAXDIMS]; + + PyObject *indexobj; /* creating obj */ + /* + * consec is first used to indicate wether fancy indices are + * consecutive and then denotes at which axis they are inserted + */ + int consec; + char *dataptr; + +} PyArrayMapIterObject; + +enum { + NPY_NEIGHBORHOOD_ITER_ZERO_PADDING, + NPY_NEIGHBORHOOD_ITER_ONE_PADDING, + NPY_NEIGHBORHOOD_ITER_CONSTANT_PADDING, + NPY_NEIGHBORHOOD_ITER_CIRCULAR_PADDING, + NPY_NEIGHBORHOOD_ITER_MIRROR_PADDING +}; + +typedef struct { + PyObject_HEAD + + /* + * PyArrayIterObject part: keep this in this exact order + */ + int nd_m1; /* number of dimensions - 1 */ + npy_intp index, size; + npy_intp coordinates[NPY_MAXDIMS];/* N-dimensional loop */ + npy_intp dims_m1[NPY_MAXDIMS]; /* ao->dimensions - 1 */ + npy_intp strides[NPY_MAXDIMS]; /* ao->strides or fake */ + npy_intp backstrides[NPY_MAXDIMS];/* how far to jump back */ + npy_intp factors[NPY_MAXDIMS]; /* shape factors */ + PyArrayObject *ao; + char *dataptr; /* pointer to current item*/ + npy_bool contiguous; + + npy_intp bounds[NPY_MAXDIMS][2]; + npy_intp limits[NPY_MAXDIMS][2]; + npy_intp limits_sizes[NPY_MAXDIMS]; + npy_iter_get_dataptr_t translate; + + /* + * New members + */ + npy_intp nd; + + /* Dimensions is the dimension of the array */ + npy_intp dimensions[NPY_MAXDIMS]; + + /* + * Neighborhood points coordinates are computed relatively to the + * point pointed by _internal_iter + */ + PyArrayIterObject* _internal_iter; + /* + * To keep a reference to the representation of the constant value + * for constant padding + */ + char* constant; + + int mode; +} PyArrayNeighborhoodIterObject; + +/* + * Neighborhood iterator API + */ + +/* General: those work for any mode */ +static NPY_INLINE int +PyArrayNeighborhoodIter_Reset(PyArrayNeighborhoodIterObject* iter); +static NPY_INLINE int +PyArrayNeighborhoodIter_Next(PyArrayNeighborhoodIterObject* iter); +#if 0 +static NPY_INLINE int +PyArrayNeighborhoodIter_Next2D(PyArrayNeighborhoodIterObject* iter); +#endif + +/* + * Include inline implementations - functions defined there are not + * considered public API + */ +#define _NPY_INCLUDE_NEIGHBORHOOD_IMP +//#include "_neighborhood_iterator_imp.h" +#undef _NPY_INCLUDE_NEIGHBORHOOD_IMP + +/* The default array type */ +#define NPY_DEFAULT_TYPE NPY_DOUBLE + +/* + * All sorts of useful ways to look into a PyArrayObject. It is recommended + * to use PyArrayObject * objects instead of always casting from PyObject *, + * for improved type checking. + * + * In many cases here the macro versions of the accessors are deprecated, + * but can't be immediately changed to inline functions because the + * preexisting macros accept PyObject * and do automatic casts. Inline + * functions accepting PyArrayObject * provides for some compile-time + * checking of correctness when working with these objects in C. + */ + +#define PyArray_ISONESEGMENT(m) (PyArray_NDIM(m) == 0 || \ + PyArray_CHKFLAGS(m, NPY_ARRAY_C_CONTIGUOUS) || \ + PyArray_CHKFLAGS(m, NPY_ARRAY_F_CONTIGUOUS)) + +#define PyArray_ISFORTRAN(m) (PyArray_CHKFLAGS(m, NPY_ARRAY_F_CONTIGUOUS) && \ + (!PyArray_CHKFLAGS(m, NPY_ARRAY_C_CONTIGUOUS))) + +#define PyArray_FORTRAN_IF(m) ((PyArray_CHKFLAGS(m, NPY_ARRAY_F_CONTIGUOUS) ? \ + NPY_ARRAY_F_CONTIGUOUS : 0)) + +#if (defined(NPY_NO_DEPRECATED_API) && (NPY_1_7_API_VERSION <= NPY_NO_DEPRECATED_API)) +/* + * Changing access macros into functions, to allow for future hiding + * of the internal memory layout. This later hiding will allow the 2.x series + * to change the internal representation of arrays without affecting + * ABI compatibility. + */ + +static NPY_INLINE int +PyArray_NDIM(const PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->nd; +} + +static NPY_INLINE void * +PyArray_DATA(PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->data; +} + +static NPY_INLINE char * +PyArray_BYTES(PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->data; +} + +static NPY_INLINE npy_intp * +PyArray_DIMS(PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->dimensions; +} + +static NPY_INLINE npy_intp * +PyArray_STRIDES(PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->strides; +} + +static NPY_INLINE npy_intp +PyArray_DIM(const PyArrayObject *arr, int idim) +{ + return ((PyArrayObject_fields *)arr)->dimensions[idim]; +} + +static NPY_INLINE npy_intp +PyArray_STRIDE(const PyArrayObject *arr, int istride) +{ + return ((PyArrayObject_fields *)arr)->strides[istride]; +} + +static NPY_INLINE PyObject * +PyArray_BASE(PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->base; +} + +static NPY_INLINE PyArray_Descr * +PyArray_DESCR(PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->descr; +} + +static NPY_INLINE int +PyArray_FLAGS(const PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->flags; +} + +static NPY_INLINE npy_intp +PyArray_ITEMSIZE(const PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->descr->elsize; +} + +static NPY_INLINE int +PyArray_TYPE(const PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->descr->type_num; +} + +static NPY_INLINE int +PyArray_CHKFLAGS(const PyArrayObject *arr, int flags) +{ + return (PyArray_FLAGS(arr) & flags) == flags; +} + +static NPY_INLINE PyObject * +PyArray_GETITEM(const PyArrayObject *arr, const char *itemptr) +{ + return ((PyArrayObject_fields *)arr)->descr->f->getitem( + (void *)itemptr, (PyArrayObject *)arr); +} + +static NPY_INLINE int +PyArray_SETITEM(PyArrayObject *arr, char *itemptr, PyObject *v) +{ + return ((PyArrayObject_fields *)arr)->descr->f->setitem( + v, itemptr, arr); +} + +#else + +/* These macros are deprecated as of NumPy 1.7. */ +#define PyArray_NDIM(obj) (((PyArrayObject_fields *)(obj))->nd) +#define PyArray_BYTES(obj) (((PyArrayObject_fields *)(obj))->data) +#define PyArray_DATA(obj) ((void *)((PyArrayObject_fields *)(obj))->data) +#define PyArray_DIMS(obj) (((PyArrayObject_fields *)(obj))->dimensions) +#define PyArray_STRIDES(obj) (((PyArrayObject_fields *)(obj))->strides) +#define PyArray_DIM(obj,n) (PyArray_DIMS(obj)[n]) +#define PyArray_STRIDE(obj,n) (PyArray_STRIDES(obj)[n]) +#define PyArray_BASE(obj) (((PyArrayObject_fields *)(obj))->base) +#define PyArray_DESCR(obj) (((PyArrayObject_fields *)(obj))->descr) +#define PyArray_FLAGS(obj) (((PyArrayObject_fields *)(obj))->flags) +#define PyArray_CHKFLAGS(m, FLAGS) \ + ((((PyArrayObject_fields *)(m))->flags & (FLAGS)) == (FLAGS)) +#define PyArray_ITEMSIZE(obj) \ + (((PyArrayObject_fields *)(obj))->descr->elsize) +#define PyArray_TYPE(obj) \ + (((PyArrayObject_fields *)(obj))->descr->type_num) +#define PyArray_GETITEM(obj,itemptr) \ + PyArray_DESCR(obj)->f->getitem((char *)(itemptr), \ + (PyArrayObject *)(obj)) + +#define PyArray_SETITEM(obj,itemptr,v) \ + PyArray_DESCR(obj)->f->setitem((PyObject *)(v), \ + (char *)(itemptr), \ + (PyArrayObject *)(obj)) +#endif + +static NPY_INLINE PyArray_Descr * +PyArray_DTYPE(PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->descr; +} + +static NPY_INLINE npy_intp * +PyArray_SHAPE(PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->dimensions; +} + +/* + * Enables the specified array flags. Does no checking, + * assumes you know what you're doing. + */ +static NPY_INLINE void +PyArray_ENABLEFLAGS(PyArrayObject *arr, int flags) +{ + ((PyArrayObject_fields *)arr)->flags |= flags; +} + +/* + * Clears the specified array flags. Does no checking, + * assumes you know what you're doing. + */ +static NPY_INLINE void +PyArray_CLEARFLAGS(PyArrayObject *arr, int flags) +{ + ((PyArrayObject_fields *)arr)->flags &= ~flags; +} + +#define PyTypeNum_ISBOOL(type) ((type) == NPY_BOOL) + +#define PyTypeNum_ISUNSIGNED(type) (((type) == NPY_UBYTE) || \ + ((type) == NPY_USHORT) || \ + ((type) == NPY_UINT) || \ + ((type) == NPY_ULONG) || \ + ((type) == NPY_ULONGLONG)) + +#define PyTypeNum_ISSIGNED(type) (((type) == NPY_BYTE) || \ + ((type) == NPY_SHORT) || \ + ((type) == NPY_INT) || \ + ((type) == NPY_LONG) || \ + ((type) == NPY_LONGLONG)) + +#define PyTypeNum_ISINTEGER(type) (((type) >= NPY_BYTE) && \ + ((type) <= NPY_ULONGLONG)) + +#define PyTypeNum_ISFLOAT(type) ((((type) >= NPY_FLOAT) && \ + ((type) <= NPY_LONGDOUBLE)) || \ + ((type) == NPY_HALF)) + +#define PyTypeNum_ISNUMBER(type) (((type) <= NPY_CLONGDOUBLE) || \ + ((type) == NPY_HALF)) + +#define PyTypeNum_ISSTRING(type) (((type) == NPY_STRING) || \ + ((type) == NPY_UNICODE)) + +#define PyTypeNum_ISCOMPLEX(type) (((type) >= NPY_CFLOAT) && \ + ((type) <= NPY_CLONGDOUBLE)) + +#define PyTypeNum_ISPYTHON(type) (((type) == NPY_LONG) || \ + ((type) == NPY_DOUBLE) || \ + ((type) == NPY_CDOUBLE) || \ + ((type) == NPY_BOOL) || \ + ((type) == NPY_OBJECT )) + +#define PyTypeNum_ISFLEXIBLE(type) (((type) >=NPY_STRING) && \ + ((type) <=NPY_VOID)) + +#define PyTypeNum_ISDATETIME(type) (((type) >=NPY_DATETIME) && \ + ((type) <=NPY_TIMEDELTA)) + +#define PyTypeNum_ISUSERDEF(type) (((type) >= NPY_USERDEF) && \ + ((type) < NPY_USERDEF+ \ + NPY_NUMUSERTYPES)) + +#define PyTypeNum_ISEXTENDED(type) (PyTypeNum_ISFLEXIBLE(type) || \ + PyTypeNum_ISUSERDEF(type)) + +#define PyTypeNum_ISOBJECT(type) ((type) == NPY_OBJECT) + + +#define PyDataType_ISBOOL(obj) PyTypeNum_ISBOOL(_PyADt(obj)) +#define PyDataType_ISUNSIGNED(obj) PyTypeNum_ISUNSIGNED(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISSIGNED(obj) PyTypeNum_ISSIGNED(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISINTEGER(obj) PyTypeNum_ISINTEGER(((PyArray_Descr*)(obj))->type_num ) +#define PyDataType_ISFLOAT(obj) PyTypeNum_ISFLOAT(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISNUMBER(obj) PyTypeNum_ISNUMBER(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISSTRING(obj) PyTypeNum_ISSTRING(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISCOMPLEX(obj) PyTypeNum_ISCOMPLEX(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISPYTHON(obj) PyTypeNum_ISPYTHON(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISFLEXIBLE(obj) PyTypeNum_ISFLEXIBLE(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISDATETIME(obj) PyTypeNum_ISDATETIME(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISUSERDEF(obj) PyTypeNum_ISUSERDEF(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISEXTENDED(obj) PyTypeNum_ISEXTENDED(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISOBJECT(obj) PyTypeNum_ISOBJECT(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_HASFIELDS(obj) (((PyArray_Descr *)(obj))->names != NULL) +#define PyDataType_HASSUBARRAY(dtype) ((dtype)->subarray != NULL) + +#define PyArray_ISBOOL(obj) PyTypeNum_ISBOOL(PyArray_TYPE(obj)) +#define PyArray_ISUNSIGNED(obj) PyTypeNum_ISUNSIGNED(PyArray_TYPE(obj)) +#define PyArray_ISSIGNED(obj) PyTypeNum_ISSIGNED(PyArray_TYPE(obj)) +#define PyArray_ISINTEGER(obj) PyTypeNum_ISINTEGER(PyArray_TYPE(obj)) +#define PyArray_ISFLOAT(obj) PyTypeNum_ISFLOAT(PyArray_TYPE(obj)) +#define PyArray_ISNUMBER(obj) PyTypeNum_ISNUMBER(PyArray_TYPE(obj)) +#define PyArray_ISSTRING(obj) PyTypeNum_ISSTRING(PyArray_TYPE(obj)) +#define PyArray_ISCOMPLEX(obj) PyTypeNum_ISCOMPLEX(PyArray_TYPE(obj)) +#define PyArray_ISPYTHON(obj) PyTypeNum_ISPYTHON(PyArray_TYPE(obj)) +#define PyArray_ISFLEXIBLE(obj) PyTypeNum_ISFLEXIBLE(PyArray_TYPE(obj)) +#define PyArray_ISDATETIME(obj) PyTypeNum_ISDATETIME(PyArray_TYPE(obj)) +#define PyArray_ISUSERDEF(obj) PyTypeNum_ISUSERDEF(PyArray_TYPE(obj)) +#define PyArray_ISEXTENDED(obj) PyTypeNum_ISEXTENDED(PyArray_TYPE(obj)) +#define PyArray_ISOBJECT(obj) PyTypeNum_ISOBJECT(PyArray_TYPE(obj)) +#define PyArray_HASFIELDS(obj) PyDataType_HASFIELDS(PyArray_DESCR(obj)) + + /* + * FIXME: This should check for a flag on the data-type that + * states whether or not it is variable length. Because the + * ISFLEXIBLE check is hard-coded to the built-in data-types. + */ +#define PyArray_ISVARIABLE(obj) PyTypeNum_ISFLEXIBLE(PyArray_TYPE(obj)) + +#define PyArray_SAFEALIGNEDCOPY(obj) (PyArray_ISALIGNED(obj) && !PyArray_ISVARIABLE(obj)) + + +#define NPY_LITTLE '<' +#define NPY_BIG '>' +#define NPY_NATIVE '=' +#define NPY_SWAP 's' +#define NPY_IGNORE '|' + +#if NPY_BYTE_ORDER == NPY_BIG_ENDIAN +#define NPY_NATBYTE NPY_BIG +#define NPY_OPPBYTE NPY_LITTLE +#else +#define NPY_NATBYTE NPY_LITTLE +#define NPY_OPPBYTE NPY_BIG +#endif + +#define PyArray_ISNBO(arg) ((arg) != NPY_OPPBYTE) +#define PyArray_IsNativeByteOrder PyArray_ISNBO +#define PyArray_ISNOTSWAPPED(m) PyArray_ISNBO(PyArray_DESCR(m)->byteorder) +#define PyArray_ISBYTESWAPPED(m) (!PyArray_ISNOTSWAPPED(m)) + +#define PyArray_FLAGSWAP(m, flags) (PyArray_CHKFLAGS(m, flags) && \ + PyArray_ISNOTSWAPPED(m)) + +#define PyArray_ISCARRAY(m) PyArray_FLAGSWAP(m, NPY_ARRAY_CARRAY) +#define PyArray_ISCARRAY_RO(m) PyArray_FLAGSWAP(m, NPY_ARRAY_CARRAY_RO) +#define PyArray_ISFARRAY(m) PyArray_FLAGSWAP(m, NPY_ARRAY_FARRAY) +#define PyArray_ISFARRAY_RO(m) PyArray_FLAGSWAP(m, NPY_ARRAY_FARRAY_RO) +#define PyArray_ISBEHAVED(m) PyArray_FLAGSWAP(m, NPY_ARRAY_BEHAVED) +#define PyArray_ISBEHAVED_RO(m) PyArray_FLAGSWAP(m, NPY_ARRAY_ALIGNED) + + +#define PyDataType_ISNOTSWAPPED(d) PyArray_ISNBO(((PyArray_Descr *)(d))->byteorder) +#define PyDataType_ISBYTESWAPPED(d) (!PyDataType_ISNOTSWAPPED(d)) + +/************************************************************ + * A struct used by PyArray_CreateSortedStridePerm, new in 1.7. + ************************************************************/ + +typedef struct { + npy_intp perm, stride; +} npy_stride_sort_item; + +/************************************************************ + * This is the form of the struct that's returned pointed by the + * PyCObject attribute of an array __array_struct__. See + * http://docs.scipy.org/doc/numpy/reference/arrays.interface.html for the full + * documentation. + ************************************************************/ +typedef struct { + int two; /* + * contains the integer 2 as a sanity + * check + */ + + int nd; /* number of dimensions */ + + char typekind; /* + * kind in array --- character code of + * typestr + */ + + int itemsize; /* size of each element */ + + int flags; /* + * how should be data interpreted. Valid + * flags are CONTIGUOUS (1), F_CONTIGUOUS (2), + * ALIGNED (0x100), NOTSWAPPED (0x200), and + * WRITEABLE (0x400). ARR_HAS_DESCR (0x800) + * states that arrdescr field is present in + * structure + */ + + npy_intp *shape; /* + * A length-nd array of shape + * information + */ + + npy_intp *strides; /* A length-nd array of stride information */ + + void *data; /* A pointer to the first element of the array */ + + PyObject *descr; /* + * A list of fields or NULL (ignored if flags + * does not have ARR_HAS_DESCR flag set) + */ +} PyArrayInterface; + +/* + * This is a function for hooking into the PyDataMem_NEW/FREE/RENEW functions. + * See the documentation for PyDataMem_SetEventHook. + */ +typedef void (PyDataMem_EventHookFunc)(void *inp, void *outp, size_t size, + void *user_data); + +/* + * Use the keyword NPY_DEPRECATED_INCLUDES to ensure that the header files + * npy_*_*_deprecated_api.h are only included from here and nowhere else. + */ +#ifdef NPY_DEPRECATED_INCLUDES +#error "Do not use the reserved keyword NPY_DEPRECATED_INCLUDES." +#endif +#define NPY_DEPRECATED_INCLUDES +#if !defined(NPY_NO_DEPRECATED_API) || \ + (NPY_NO_DEPRECATED_API < NPY_1_7_API_VERSION) +#include "npy_1_7_deprecated_api.h" +#endif +/* + * There is no file npy_1_8_deprecated_api.h since there are no additional + * deprecated API features in NumPy 1.8. + * + * Note to maintainers: insert code like the following in future NumPy + * versions. + * + * #if !defined(NPY_NO_DEPRECATED_API) || \ + * (NPY_NO_DEPRECATED_API < NPY_1_9_API_VERSION) + * #include "npy_1_9_deprecated_api.h" + * #endif + */ +#undef NPY_DEPRECATED_INCLUDES + +#endif /* NPY_ARRAYTYPES_H */ diff --git a/pypy/module/cpyext/include/numpy/npy_common.h b/pypy/module/cpyext/include/numpy/npy_common.h new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/include/numpy/npy_common.h @@ -0,0 +1,49 @@ +#ifndef _NPY_COMMON_H_ +#define _NPY_COMMON_H_ + +typedef Py_intptr_t npy_intp; +typedef Py_uintptr_t npy_uintp; +typedef PY_LONG_LONG npy_longlong; +typedef unsigned PY_LONG_LONG npy_ulonglong; +typedef unsigned char npy_bool; +typedef long npy_int32; +typedef unsigned long npy_uint32; +typedef unsigned long npy_ucs4; +typedef long npy_int64; +typedef unsigned long npy_uint64; +typedef unsigned char npy_uint8; + +typedef signed char npy_byte; +typedef unsigned char npy_ubyte; +typedef unsigned short npy_ushort; +typedef unsigned int npy_uint; +typedef unsigned long npy_ulong; + +/* These are for completeness */ +typedef char npy_char; +typedef short npy_short; +typedef int npy_int; +typedef long npy_long; +typedef float npy_float; +typedef double npy_double; + +typedef struct { float real, imag; } npy_cfloat; +typedef struct { double real, imag; } npy_cdouble; +typedef npy_cdouble npy_complex128; +#if defined(_MSC_VER) + #define NPY_INLINE __inline +#elif defined(__GNUC__) + #if defined(__STRICT_ANSI__) + #define NPY_INLINE __inline__ + #else + #define NPY_INLINE inline + #endif +#else + #define NPY_INLINE +#endif +#ifndef NPY_INTP_FMT +#define NPY_INTP_FMT "ld" +#endif +#define NPY_API_VERSION 0x8 +#endif //_NPY_COMMON_H_ + diff --git a/pypy/module/cpyext/ndarrayobject.py b/pypy/module/cpyext/ndarrayobject.py --- a/pypy/module/cpyext/ndarrayobject.py +++ b/pypy/module/cpyext/ndarrayobject.py @@ -11,7 +11,12 @@ from pypy.module.micronumpy.ctors import array from pypy.module.micronumpy.descriptor import get_dtype_cache, W_Dtype from pypy.module.micronumpy.concrete import ConcreteArray +from pypy.module.micronumpy import ufuncs from rpython.rlib.rawstorage import RAW_STORAGE_PTR +from pypy.interpreter.typedef import TypeDef +from pypy.interpreter.baseobjspace import W_Root +from pypy.interpreter.argument import Arguments +from pypy.interpreter.gateway import interp2app NPY_C_CONTIGUOUS = 0x0001 NPY_F_CONTIGUOUS = 0x0002 @@ -252,3 +257,40 @@ return simple_new(space, nd, dims, typenum, order=order, owning=owning, w_subtype=w_subtype) +gufunctype = lltype.Ptr(ufuncs.GenericUfunc) +# XXX single rffi.CArrayPtr(gufunctype) does not work, this does, is there +# a problem with casting function pointers? + at cpython_api([rffi.CArrayPtr(rffi.CArrayPtr(gufunctype)), rffi.VOIDP, rffi.CCHARP, Py_ssize_t, Py_ssize_t, + Py_ssize_t, Py_ssize_t, rffi.CCHARP, rffi.CCHARP, Py_ssize_t, + rffi.CCHARP], PyObject) +def PyUFunc_FromFuncAndDataAndSignature(space, funcs, data, types, ntypes, + nin, nout, identity, name, doc, check_return, signature): + w_signature = rffi.charp2str(signature) + return do_ufunc(space, funcs, data, types, ntypes, nin, nout, identity, name, doc, + check_return, w_signature) + + +def do_ufunc(space, funcs, data, types, ntypes, nin, nout, identity, name, doc, + check_return, w_signature): + funcs_w = [None] * ntypes + dtypes_w = [None] * ntypes * (nin + nout) + for i in range(ntypes): + funcs_w[i] = ufuncs.W_GenericUFuncCaller(rffi.cast(gufunctype, funcs[i]), data) + for i in range(ntypes*(nin+nout)): + dtypes_w[i] = get_dtype_cache(space).dtypes_by_num[ord(types[i])] + w_funcs = space.newlist(funcs_w) + w_dtypes = space.newlist(dtypes_w) + w_doc = rffi.charp2str(doc) + w_name = rffi.charp2str(name) From noreply at buildbot.pypy.org Fri Jan 16 10:53:28 2015 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 16 Jan 2015 10:53:28 +0100 (CET) Subject: [pypy-commit] pypy default: merge heads Message-ID: <20150116095329.001681C013C@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: Changeset: r75371:96a1b468e3c5 Date: 2015-01-16 11:53 +0200 http://bitbucket.org/pypy/pypy/changeset/96a1b468e3c5/ Log: merge heads diff --git a/rpython/memory/gc/base.py b/rpython/memory/gc/base.py --- a/rpython/memory/gc/base.py +++ b/rpython/memory/gc/base.py @@ -219,19 +219,42 @@ def _trace_slow_path(self, obj, callback, arg): typeid = self.get_type_id(obj) if self.has_gcptr_in_varsize(typeid): - item = obj + self.varsize_offset_to_variable_part(typeid) length = (obj + self.varsize_offset_to_length(typeid)).signed[0] - offsets = self.varsize_offsets_to_gcpointers_in_var_part(typeid) - itemlength = self.varsize_item_sizes(typeid) - while length > 0: - j = 0 - while j < len(offsets): - itemobj = item + offsets[j] - if self.points_to_valid_gc_object(itemobj): - callback(itemobj, arg) - j += 1 - item += itemlength - length -= 1 + if length > 0: + item = obj + self.varsize_offset_to_variable_part(typeid) + offsets = self.varsize_offsets_to_gcpointers_in_var_part(typeid) + itemlength = self.varsize_item_sizes(typeid) + len_offsets = len(offsets) + if len_offsets == 1: # common path #1 + offsets0 = offsets[0] + while length > 0: + itemobj0 = item + offsets0 + if self.points_to_valid_gc_object(itemobj0): + callback(itemobj0, arg) + item += itemlength + length -= 1 + elif len_offsets == 2: # common path #2 + offsets0 = offsets[0] + offsets1 = offsets[1] + while length > 0: + itemobj0 = item + offsets0 + if self.points_to_valid_gc_object(itemobj0): + callback(itemobj0, arg) + itemobj1 = item + offsets1 + if self.points_to_valid_gc_object(itemobj1): + callback(itemobj1, arg) + item += itemlength + length -= 1 + else: # general path + while length > 0: + j = 0 + while j < len_offsets: + itemobj = item + offsets[j] + if self.points_to_valid_gc_object(itemobj): + callback(itemobj, arg) + j += 1 + item += itemlength + length -= 1 if self.has_custom_trace(typeid): self.custom_trace_dispatcher(obj, typeid, callback, arg) _trace_slow_path._annspecialcase_ = 'specialize:arg(2)' diff --git a/rpython/memory/test/gc_test_base.py b/rpython/memory/test/gc_test_base.py --- a/rpython/memory/test/gc_test_base.py +++ b/rpython/memory/test/gc_test_base.py @@ -607,6 +607,58 @@ return rgc.can_move(lltype.malloc(TP, 1)) assert self.interpret(func, []) == self.GC_CAN_MOVE + def test_trace_array_of_structs(self): + R = lltype.GcStruct('R', ('i', lltype.Signed)) + S1 = lltype.GcArray(('p1', lltype.Ptr(R))) + S2 = lltype.GcArray(('p1', lltype.Ptr(R)), + ('p2', lltype.Ptr(R))) + S3 = lltype.GcArray(('p1', lltype.Ptr(R)), + ('p2', lltype.Ptr(R)), + ('p3', lltype.Ptr(R))) + def func(): + s1 = lltype.malloc(S1, 2) + s1[0].p1 = lltype.malloc(R) + s1[1].p1 = lltype.malloc(R) + s2 = lltype.malloc(S2, 2) + s2[0].p1 = lltype.malloc(R) + s2[0].p2 = lltype.malloc(R) + s2[1].p1 = lltype.malloc(R) + s2[1].p2 = lltype.malloc(R) + s3 = lltype.malloc(S3, 2) + s3[0].p1 = lltype.malloc(R) + s3[0].p2 = lltype.malloc(R) + s3[0].p3 = lltype.malloc(R) + s3[1].p1 = lltype.malloc(R) + s3[1].p2 = lltype.malloc(R) + s3[1].p3 = lltype.malloc(R) + s1[0].p1.i = 100 + s1[1].p1.i = 101 + s2[0].p1.i = 102 + s2[0].p2.i = 103 + s2[1].p1.i = 104 + s2[1].p2.i = 105 + s3[0].p1.i = 106 + s3[0].p2.i = 107 + s3[0].p3.i = 108 + s3[1].p1.i = 109 + s3[1].p2.i = 110 + s3[1].p3.i = 111 + rgc.collect() + return ((s1[0].p1.i == 100) + + (s1[1].p1.i == 101) + + (s2[0].p1.i == 102) + + (s2[0].p2.i == 103) + + (s2[1].p1.i == 104) + + (s2[1].p2.i == 105) + + (s3[0].p1.i == 106) + + (s3[0].p2.i == 107) + + (s3[0].p3.i == 108) + + (s3[1].p1.i == 109) + + (s3[1].p2.i == 110) + + (s3[1].p3.i == 111)) + res = self.interpret(func, []) + assert res == 12 + def test_shrink_array(self): from rpython.rtyper.lltypesystem.rstr import STR From noreply at buildbot.pypy.org Fri Jan 16 11:05:29 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 16 Jan 2015 11:05:29 +0100 (CET) Subject: [pypy-commit] pypy all_ordered_dicts: hg merge default Message-ID: <20150116100529.18E401C01A1@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: all_ordered_dicts Changeset: r75372:808c6f4df2a1 Date: 2015-01-16 10:58 +0100 http://bitbucket.org/pypy/pypy/changeset/808c6f4df2a1/ Log: hg merge default diff too long, truncating to 2000 out of 7526 lines diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -28,7 +28,7 @@ DEALINGS IN THE SOFTWARE. -PyPy Copyright holders 2003-2014 +PyPy Copyright holders 2003-2015 ----------------------------------- Except when otherwise stated (look for LICENSE files or information at diff --git a/lib-python/2.7/distutils/unixccompiler.py b/lib-python/2.7/distutils/unixccompiler.py --- a/lib-python/2.7/distutils/unixccompiler.py +++ b/lib-python/2.7/distutils/unixccompiler.py @@ -58,7 +58,7 @@ executables = {'preprocessor' : None, 'compiler' : ["cc"], 'compiler_so' : ["cc"], - 'compiler_cxx' : ["cc"], + 'compiler_cxx' : ["c++"], # pypy: changed, 'cc' is bogus 'linker_so' : ["cc", "-shared"], 'linker_exe' : ["cc"], 'archiver' : ["ar", "-cr"], diff --git a/lib-python/2.7/subprocess.py b/lib-python/2.7/subprocess.py --- a/lib-python/2.7/subprocess.py +++ b/lib-python/2.7/subprocess.py @@ -1589,7 +1589,7 @@ 'copyfile' in caller.f_globals): dest_dir = sys.pypy_resolvedirof(target_executable) src_dir = sys.pypy_resolvedirof(sys.executable) - for libname in ['libpypy-c.so']: + for libname in ['libpypy-c.so', 'libpypy-c.dylib']: dest_library = os.path.join(dest_dir, libname) src_library = os.path.join(src_dir, libname) if os.path.exists(src_library): diff --git a/lib-python/2.7/test/test_xml_etree.py b/lib-python/2.7/test/test_xml_etree.py --- a/lib-python/2.7/test/test_xml_etree.py +++ b/lib-python/2.7/test/test_xml_etree.py @@ -225,9 +225,9 @@ >>> element.remove(subelement) >>> serialize(element) # 5 '' - >>> element.remove(subelement) + >>> element.remove(subelement) # doctest: +ELLIPSIS Traceback (most recent call last): - ValueError: list.remove(x): x not in list + ValueError: list.remove(... >>> serialize(element) # 6 '' >>> element[0:0] = [subelement, subelement, subelement] diff --git a/lib_pypy/_functools.py b/lib_pypy/_functools.py --- a/lib_pypy/_functools.py +++ b/lib_pypy/_functools.py @@ -9,7 +9,10 @@ of the given arguments and keywords. """ - def __init__(self, func, *args, **keywords): + def __init__(self, *args, **keywords): + if not args: + raise TypeError('__init__() takes at least 2 arguments (1 given)') + func, args = args[0], args[1:] if not callable(func): raise TypeError("the first argument must be callable") self._func = func diff --git a/lib_pypy/cffi/__init__.py b/lib_pypy/cffi/__init__.py --- a/lib_pypy/cffi/__init__.py +++ b/lib_pypy/cffi/__init__.py @@ -6,3 +6,8 @@ __version__ = "0.8.6" __version_info__ = (0, 8, 6) + +# The verifier module file names are based on the CRC32 of a string that +# contains the following version number. It may be older than __version__ +# if nothing is clearly incompatible. +__version_verifier_modules__ = "0.8.6" diff --git a/lib_pypy/cffi/api.py b/lib_pypy/cffi/api.py --- a/lib_pypy/cffi/api.py +++ b/lib_pypy/cffi/api.py @@ -69,6 +69,7 @@ self._function_caches = [] self._libraries = [] self._cdefsources = [] + self._windows_unicode = None if hasattr(backend, 'set_ffi'): backend.set_ffi(self) for name in backend.__dict__: @@ -77,6 +78,7 @@ # with self._lock: self.BVoidP = self._get_cached_btype(model.voidp_type) + self.BCharA = self._get_cached_btype(model.char_array_type) if isinstance(backend, types.ModuleType): # _cffi_backend: attach these constants to the class if not hasattr(FFI, 'NULL'): @@ -189,13 +191,16 @@ cdecl = self._typeof(cdecl) return self._backend.alignof(cdecl) - def offsetof(self, cdecl, fieldname): + def offsetof(self, cdecl, *fields_or_indexes): """Return the offset of the named field inside the given - structure, which must be given as a C type name. + structure or array, which must be given as a C type name. + You can give several field names in case of nested structures. + You can also give numeric values which correspond to array + items, in case of an array type. """ if isinstance(cdecl, basestring): cdecl = self._typeof(cdecl) - return self._backend.typeoffsetof(cdecl, fieldname)[1] + return self._typeoffsetof(cdecl, *fields_or_indexes)[1] def new(self, cdecl, init=None): """Allocate an instance according to the specified C type and @@ -264,6 +269,16 @@ """ return self._backend.buffer(cdata, size) + def from_buffer(self, python_buffer): + """Return a that points to the data of the + given Python object, which must support the buffer interface. + Note that this is not meant to be used on the built-in types str, + unicode, or bytearray (you can build 'char[]' arrays explicitly) + but only on objects containing large quantities of raw data + in some other format, like 'array.array' or numpy arrays. + """ + return self._backend.from_buffer(self.BCharA, python_buffer) + def callback(self, cdecl, python_callable=None, error=None): """Return a callback object or a decorator making such a callback object. 'cdecl' must name a C function pointer type. @@ -335,9 +350,23 @@ which requires binary compatibility in the signatures. """ from .verifier import Verifier, _caller_dir_pycache + # + # If set_unicode(True) was called, insert the UNICODE and + # _UNICODE macro declarations + if self._windows_unicode: + self._apply_windows_unicode(kwargs) + # + # Set the tmpdir here, and not in Verifier.__init__: it picks + # up the caller's directory, which we want to be the caller of + # ffi.verify(), as opposed to the caller of Veritier(). tmpdir = tmpdir or _caller_dir_pycache() + # + # Make a Verifier() and use it to load the library. self.verifier = Verifier(self, source, tmpdir, **kwargs) lib = self.verifier.load_library() + # + # Save the loaded library for keep-alive purposes, even + # if the caller doesn't keep it alive itself (it should). self._libraries.append(lib) return lib @@ -356,15 +385,29 @@ with self._lock: return model.pointer_cache(self, ctype) - def addressof(self, cdata, field=None): + def addressof(self, cdata, *fields_or_indexes): """Return the address of a . - If 'field' is specified, return the address of this field. + If 'fields_or_indexes' are given, returns the address of that + field or array item in the structure or array, recursively in + case of nested structures. """ ctype = self._backend.typeof(cdata) - ctype, offset = self._backend.typeoffsetof(ctype, field) + if fields_or_indexes: + ctype, offset = self._typeoffsetof(ctype, *fields_or_indexes) + else: + if ctype.kind == "pointer": + raise TypeError("addressof(pointer)") + offset = 0 ctypeptr = self._pointer_to(ctype) return self._backend.rawaddressof(ctypeptr, cdata, offset) + def _typeoffsetof(self, ctype, field_or_index, *fields_or_indexes): + ctype, offset = self._backend.typeoffsetof(ctype, field_or_index) + for field1 in fields_or_indexes: + ctype, offset1 = self._backend.typeoffsetof(ctype, field1, 1) + offset += offset1 + return ctype, offset + def include(self, ffi_to_include): """Includes the typedefs, structs, unions and enums defined in another FFI instance. Usage is similar to a #include in C, @@ -387,6 +430,44 @@ def from_handle(self, x): return self._backend.from_handle(x) + def set_unicode(self, enabled_flag): + """Windows: if 'enabled_flag' is True, enable the UNICODE and + _UNICODE defines in C, and declare the types like TCHAR and LPTCSTR + to be (pointers to) wchar_t. If 'enabled_flag' is False, + declare these types to be (pointers to) plain 8-bit characters. + This is mostly for backward compatibility; you usually want True. + """ + if self._windows_unicode is not None: + raise ValueError("set_unicode() can only be called once") + enabled_flag = bool(enabled_flag) + if enabled_flag: + self.cdef("typedef wchar_t TBYTE;" + "typedef wchar_t TCHAR;" + "typedef const wchar_t *LPCTSTR;" + "typedef const wchar_t *PCTSTR;" + "typedef wchar_t *LPTSTR;" + "typedef wchar_t *PTSTR;" + "typedef TBYTE *PTBYTE;" + "typedef TCHAR *PTCHAR;") + else: + self.cdef("typedef char TBYTE;" + "typedef char TCHAR;" + "typedef const char *LPCTSTR;" + "typedef const char *PCTSTR;" + "typedef char *LPTSTR;" + "typedef char *PTSTR;" + "typedef TBYTE *PTBYTE;" + "typedef TCHAR *PTCHAR;") + self._windows_unicode = enabled_flag + + def _apply_windows_unicode(self, kwds): + defmacros = kwds.get('define_macros', ()) + if not isinstance(defmacros, (list, tuple)): + raise TypeError("'define_macros' must be a list or tuple") + defmacros = list(defmacros) + [('UNICODE', '1'), + ('_UNICODE', '1')] + kwds['define_macros'] = defmacros + def _load_backend_lib(backend, name, flags): if name is None: diff --git a/lib_pypy/cffi/backend_ctypes.py b/lib_pypy/cffi/backend_ctypes.py --- a/lib_pypy/cffi/backend_ctypes.py +++ b/lib_pypy/cffi/backend_ctypes.py @@ -169,6 +169,7 @@ class CTypesGenericPtr(CTypesData): __slots__ = ['_address', '_as_ctype_ptr'] _automatic_casts = False + kind = "pointer" @classmethod def _newp(cls, init): @@ -370,10 +371,12 @@ (CTypesPrimitive, type(source).__name__)) return source # + kind1 = kind class CTypesPrimitive(CTypesGenericPrimitive): __slots__ = ['_value'] _ctype = ctype _reftypename = '%s &' % name + kind = kind1 def __init__(self, value): self._value = value @@ -703,12 +706,13 @@ class struct_or_union(base_ctypes_class): pass struct_or_union.__name__ = '%s_%s' % (kind, name) + kind1 = kind # class CTypesStructOrUnion(CTypesBaseStructOrUnion): __slots__ = ['_blob'] _ctype = struct_or_union _reftypename = '%s &' % (name,) - _kind = kind + _kind = kind = kind1 # CTypesStructOrUnion._fix_class() return CTypesStructOrUnion @@ -994,27 +998,42 @@ def getcname(self, BType, replace_with): return BType._get_c_name(replace_with) - def typeoffsetof(self, BType, fieldname): - if fieldname is not None and issubclass(BType, CTypesGenericPtr): - BType = BType._BItem - if not issubclass(BType, CTypesBaseStructOrUnion): - raise TypeError("expected a struct or union ctype") - if fieldname is None: - return (BType, 0) - else: + def typeoffsetof(self, BType, fieldname, num=0): + if isinstance(fieldname, str): + if num == 0 and issubclass(BType, CTypesGenericPtr): + BType = BType._BItem + if not issubclass(BType, CTypesBaseStructOrUnion): + raise TypeError("expected a struct or union ctype") BField = BType._bfield_types[fieldname] if BField is Ellipsis: raise TypeError("not supported for bitfields") return (BField, BType._offsetof(fieldname)) + elif isinstance(fieldname, (int, long)): + if issubclass(BType, CTypesGenericArray): + BType = BType._CTPtr + if not issubclass(BType, CTypesGenericPtr): + raise TypeError("expected an array or ptr ctype") + BItem = BType._BItem + offset = BItem._get_size() * fieldname + if offset > sys.maxsize: + raise OverflowError + return (BItem, offset) + else: + raise TypeError(type(fieldname)) - def rawaddressof(self, BTypePtr, cdata, offset): + def rawaddressof(self, BTypePtr, cdata, offset=None): if isinstance(cdata, CTypesBaseStructOrUnion): ptr = ctypes.pointer(type(cdata)._to_ctypes(cdata)) elif isinstance(cdata, CTypesGenericPtr): + if offset is None or not issubclass(type(cdata)._BItem, + CTypesBaseStructOrUnion): + raise TypeError("unexpected cdata type") + ptr = type(cdata)._to_ctypes(cdata) + elif isinstance(cdata, CTypesGenericArray): ptr = type(cdata)._to_ctypes(cdata) else: raise TypeError("expected a ") - if offset != 0: + if offset: ptr = ctypes.cast( ctypes.c_void_p( ctypes.cast(ptr, ctypes.c_void_p).value + offset), diff --git a/lib_pypy/cffi/commontypes.py b/lib_pypy/cffi/commontypes.py --- a/lib_pypy/cffi/commontypes.py +++ b/lib_pypy/cffi/commontypes.py @@ -29,6 +29,9 @@ result = model.PointerType(resolve_common_type(result[:-2])) elif result in model.PrimitiveType.ALL_PRIMITIVE_TYPES: result = model.PrimitiveType(result) + elif result == 'set-unicode-needed': + raise api.FFIError("The Windows type %r is only available after " + "you call ffi.set_unicode()" % (commontype,)) else: if commontype == result: raise api.FFIError("Unsupported type: %r. Please file a bug " @@ -86,8 +89,6 @@ "ULONGLONG": "unsigned long long", "WCHAR": "wchar_t", "SHORT": "short", - "TBYTE": "WCHAR", - "TCHAR": "WCHAR", "UCHAR": "unsigned char", "UINT": "unsigned int", "UINT8": "unsigned char", @@ -157,14 +158,12 @@ "LPCVOID": model.const_voidp_type, "LPCWSTR": "const WCHAR *", - "LPCTSTR": "LPCWSTR", "LPDWORD": "DWORD *", "LPHANDLE": "HANDLE *", "LPINT": "int *", "LPLONG": "long *", "LPSTR": "CHAR *", "LPWSTR": "WCHAR *", - "LPTSTR": "LPWSTR", "LPVOID": model.voidp_type, "LPWORD": "WORD *", "LRESULT": "LONG_PTR", @@ -173,7 +172,6 @@ "PBYTE": "BYTE *", "PCHAR": "CHAR *", "PCSTR": "const CHAR *", - "PCTSTR": "LPCWSTR", "PCWSTR": "const WCHAR *", "PDWORD": "DWORD *", "PDWORDLONG": "DWORDLONG *", @@ -200,9 +198,6 @@ "PSIZE_T": "SIZE_T *", "PSSIZE_T": "SSIZE_T *", "PSTR": "CHAR *", - "PTBYTE": "TBYTE *", - "PTCHAR": "TCHAR *", - "PTSTR": "LPWSTR", "PUCHAR": "UCHAR *", "PUHALF_PTR": "UHALF_PTR *", "PUINT": "UINT *", @@ -240,6 +235,15 @@ "USN": "LONGLONG", "VOID": model.void_type, "WPARAM": "UINT_PTR", + + "TBYTE": "set-unicode-needed", + "TCHAR": "set-unicode-needed", + "LPCTSTR": "set-unicode-needed", + "PCTSTR": "set-unicode-needed", + "LPTSTR": "set-unicode-needed", + "PTSTR": "set-unicode-needed", + "PTBYTE": "set-unicode-needed", + "PTCHAR": "set-unicode-needed", }) return result diff --git a/lib_pypy/cffi/cparser.py b/lib_pypy/cffi/cparser.py --- a/lib_pypy/cffi/cparser.py +++ b/lib_pypy/cffi/cparser.py @@ -1,4 +1,3 @@ - from . import api, model from .commontypes import COMMON_TYPES, resolve_common_type try: @@ -209,6 +208,8 @@ def _add_constants(self, key, val): if key in self._int_constants: + if self._int_constants[key] == val: + return # ignore identical double declarations raise api.FFIError( "multiple declarations of constant: %s" % (key,)) self._int_constants[key] = val @@ -228,12 +229,18 @@ pyvalue = int(int_str, 0) self._add_constants(key, pyvalue) + self._declare('macro ' + key, pyvalue) elif value == '...': self._declare('macro ' + key, value) else: - raise api.CDefError('only supports the syntax "#define ' - '%s ..." (literally) or "#define ' - '%s 0x1FF" for now' % (key, key)) + raise api.CDefError( + 'only supports one of the following syntax:\n' + ' #define %s ... (literally dot-dot-dot)\n' + ' #define %s NUMBER (with NUMBER an integer' + ' constant, decimal/hex/octal)\n' + 'got:\n' + ' #define %s %s' + % (key, key, key, value)) def _parse_decl(self, decl): node = decl.type @@ -460,6 +467,8 @@ elif kind == 'union': tp = model.UnionType(explicit_name, None, None, None) elif kind == 'enum': + if explicit_name == '__dotdotdot__': + raise CDefError("Enums cannot be declared with ...") tp = self._build_enum_type(explicit_name, type.values) else: raise AssertionError("kind = %r" % (kind,)) @@ -532,9 +541,24 @@ def _parse_constant(self, exprnode, partial_length_ok=False): # for now, limited to expressions that are an immediate number - # or negative number + # or positive/negative number if isinstance(exprnode, pycparser.c_ast.Constant): - return int(exprnode.value, 0) + s = exprnode.value + if s.startswith('0'): + if s.startswith('0x') or s.startswith('0X'): + return int(s, 16) + return int(s, 8) + elif '1' <= s[0] <= '9': + return int(s, 10) + elif s[0] == "'" and s[-1] == "'" and ( + len(s) == 3 or (len(s) == 4 and s[1] == "\\")): + return ord(s[-2]) + else: + raise api.CDefError("invalid constant %r" % (s,)) + # + if (isinstance(exprnode, pycparser.c_ast.UnaryOp) and + exprnode.op == '+'): + return self._parse_constant(exprnode.expr) # if (isinstance(exprnode, pycparser.c_ast.UnaryOp) and exprnode.op == '-'): diff --git a/lib_pypy/cffi/ffiplatform.py b/lib_pypy/cffi/ffiplatform.py --- a/lib_pypy/cffi/ffiplatform.py +++ b/lib_pypy/cffi/ffiplatform.py @@ -11,6 +11,9 @@ """ +LIST_OF_FILE_NAMES = ['sources', 'include_dirs', 'library_dirs', + 'extra_objects', 'depends'] + def get_extension(srcfilename, modname, sources=(), **kwds): from distutils.core import Extension allsources = [srcfilename] diff --git a/lib_pypy/cffi/model.py b/lib_pypy/cffi/model.py --- a/lib_pypy/cffi/model.py +++ b/lib_pypy/cffi/model.py @@ -235,6 +235,8 @@ BPtrItem = PointerType(self.item).get_cached_btype(ffi, finishlist) return global_cache(self, ffi, 'new_array_type', BPtrItem, self.length) +char_array_type = ArrayType(PrimitiveType('char'), None) + class StructOrUnionOrEnum(BaseTypeByIdentity): _attrs_ = ('name',) @@ -478,7 +480,7 @@ try: res = getattr(ffi._backend, funcname)(*args) except NotImplementedError as e: - raise NotImplementedError("%r: %s" % (srctype, e)) + raise NotImplementedError("%s: %r: %s" % (funcname, srctype, e)) # note that setdefault() on WeakValueDictionary is not atomic # and contains a rare bug (http://bugs.python.org/issue19542); # we have to use a lock and do it ourselves diff --git a/lib_pypy/cffi/vengine_cpy.py b/lib_pypy/cffi/vengine_cpy.py --- a/lib_pypy/cffi/vengine_cpy.py +++ b/lib_pypy/cffi/vengine_cpy.py @@ -65,7 +65,7 @@ # The following two 'chained_list_constants' items contains # the head of these two chained lists, as a string that gives the # call to do, if any. - self._chained_list_constants = ['0', '0'] + self._chained_list_constants = ['((void)lib,0)', '((void)lib,0)'] # prnt = self._prnt # first paste some standard set of lines that are mostly '#define' @@ -138,15 +138,22 @@ prnt() prnt('#endif') - def load_library(self): + def load_library(self, flags=None): # XXX review all usages of 'self' here! # import it as a new extension module + if hasattr(sys, "getdlopenflags"): + previous_flags = sys.getdlopenflags() try: + if hasattr(sys, "setdlopenflags") and flags is not None: + sys.setdlopenflags(flags) module = imp.load_dynamic(self.verifier.get_module_name(), self.verifier.modulefilename) except ImportError as e: error = "importing %r: %s" % (self.verifier.modulefilename, e) raise ffiplatform.VerificationError(error) + finally: + if hasattr(sys, "setdlopenflags"): + sys.setdlopenflags(previous_flags) # # call loading_cpy_struct() to get the struct layout inferred by # the C compiler @@ -228,7 +235,8 @@ converter = '_cffi_to_c_int' extraarg = ', %s' % tp.name else: - converter = '_cffi_to_c_%s' % (tp.name.replace(' ', '_'),) + converter = '(%s)_cffi_to_c_%s' % (tp.get_c_name(''), + tp.name.replace(' ', '_')) errvalue = '-1' # elif isinstance(tp, model.PointerType): @@ -267,8 +275,8 @@ self._prnt(' if (datasize != 0) {') self._prnt(' if (datasize < 0)') self._prnt(' %s;' % errcode) - self._prnt(' %s = alloca(datasize);' % (tovar,)) - self._prnt(' memset((void *)%s, 0, datasize);' % (tovar,)) + self._prnt(' %s = alloca((size_t)datasize);' % (tovar,)) + self._prnt(' memset((void *)%s, 0, (size_t)datasize);' % (tovar,)) self._prnt(' if (_cffi_convert_array_from_object(' '(char *)%s, _cffi_type(%d), %s) < 0)' % ( tovar, self._gettypenum(tp), fromvar)) @@ -336,7 +344,7 @@ prnt = self._prnt numargs = len(tp.args) if numargs == 0: - argname = 'no_arg' + argname = 'noarg' elif numargs == 1: argname = 'arg0' else: @@ -386,6 +394,9 @@ prnt(' Py_END_ALLOW_THREADS') prnt() # + prnt(' (void)self; /* unused */') + if numargs == 0: + prnt(' (void)noarg; /* unused */') if result_code: prnt(' return %s;' % self._convert_expr_from_c(tp.result, 'result', 'result type')) @@ -452,6 +463,7 @@ prnt('static void %s(%s *p)' % (checkfuncname, cname)) prnt('{') prnt(' /* only to generate compile-time warnings or errors */') + prnt(' (void)p;') for fname, ftype, fbitsize in tp.enumfields(): if (isinstance(ftype, model.PrimitiveType) and ftype.is_integer_type()) or fbitsize >= 0: @@ -482,6 +494,8 @@ prnt(' sizeof(((%s *)0)->%s),' % (cname, fname)) prnt(' -1') prnt(' };') + prnt(' (void)self; /* unused */') + prnt(' (void)noarg; /* unused */') prnt(' return _cffi_get_struct_layout(nums);') prnt(' /* the next line is not executed, but compiled */') prnt(' %s(0);' % (checkfuncname,)) @@ -578,7 +592,8 @@ # constants, likely declared with '#define' def _generate_cpy_const(self, is_int, name, tp=None, category='const', - vartp=None, delayed=True, size_too=False): + vartp=None, delayed=True, size_too=False, + check_value=None): prnt = self._prnt funcname = '_cffi_%s_%s' % (category, name) prnt('static int %s(PyObject *lib)' % funcname) @@ -590,6 +605,9 @@ else: assert category == 'const' # + if check_value is not None: + self._check_int_constant_value(name, check_value) + # if not is_int: if category == 'var': realexpr = '&' + name @@ -637,6 +655,27 @@ # ---------- # enums + def _check_int_constant_value(self, name, value, err_prefix=''): + prnt = self._prnt + if value <= 0: + prnt(' if ((%s) > 0 || (long)(%s) != %dL) {' % ( + name, name, value)) + else: + prnt(' if ((%s) <= 0 || (unsigned long)(%s) != %dUL) {' % ( + name, name, value)) + prnt(' char buf[64];') + prnt(' if ((%s) <= 0)' % name) + prnt(' snprintf(buf, 63, "%%ld", (long)(%s));' % name) + prnt(' else') + prnt(' snprintf(buf, 63, "%%lu", (unsigned long)(%s));' % + name) + prnt(' PyErr_Format(_cffi_VerificationError,') + prnt(' "%s%s has the real value %s, not %s",') + prnt(' "%s", "%s", buf, "%d");' % ( + err_prefix, name, value)) + prnt(' return -1;') + prnt(' }') + def _enum_funcname(self, prefix, name): # "$enum_$1" => "___D_enum____D_1" name = name.replace('$', '___D_') @@ -653,25 +692,8 @@ prnt('static int %s(PyObject *lib)' % funcname) prnt('{') for enumerator, enumvalue in zip(tp.enumerators, tp.enumvalues): - if enumvalue < 0: - prnt(' if ((%s) >= 0 || (long)(%s) != %dL) {' % ( - enumerator, enumerator, enumvalue)) - else: - prnt(' if ((%s) < 0 || (unsigned long)(%s) != %dUL) {' % ( - enumerator, enumerator, enumvalue)) - prnt(' char buf[64];') - prnt(' if ((%s) < 0)' % enumerator) - prnt(' snprintf(buf, 63, "%%ld", (long)(%s));' % enumerator) - prnt(' else') - prnt(' snprintf(buf, 63, "%%lu", (unsigned long)(%s));' % - enumerator) - prnt(' PyErr_Format(_cffi_VerificationError,') - prnt(' "enum %s: %s has the real value %s, ' - 'not %s",') - prnt(' "%s", "%s", buf, "%d");' % ( - name, enumerator, enumvalue)) - prnt(' return -1;') - prnt(' }') + self._check_int_constant_value(enumerator, enumvalue, + "enum %s: " % name) prnt(' return %s;' % self._chained_list_constants[True]) self._chained_list_constants[True] = funcname + '(lib)' prnt('}') @@ -695,8 +717,11 @@ # macros: for now only for integers def _generate_cpy_macro_decl(self, tp, name): - assert tp == '...' - self._generate_cpy_const(True, name) + if tp == '...': + check_value = None + else: + check_value = tp # an integer + self._generate_cpy_const(True, name, check_value=check_value) _generate_cpy_macro_collecttype = _generate_nothing _generate_cpy_macro_method = _generate_nothing @@ -783,6 +808,24 @@ typedef unsigned __int16 uint16_t; typedef unsigned __int32 uint32_t; typedef unsigned __int64 uint64_t; + typedef __int8 int_least8_t; + typedef __int16 int_least16_t; + typedef __int32 int_least32_t; + typedef __int64 int_least64_t; + typedef unsigned __int8 uint_least8_t; + typedef unsigned __int16 uint_least16_t; + typedef unsigned __int32 uint_least32_t; + typedef unsigned __int64 uint_least64_t; + typedef __int8 int_fast8_t; + typedef __int16 int_fast16_t; + typedef __int32 int_fast32_t; + typedef __int64 int_fast64_t; + typedef unsigned __int8 uint_fast8_t; + typedef unsigned __int16 uint_fast16_t; + typedef unsigned __int32 uint_fast32_t; + typedef unsigned __int64 uint_fast64_t; + typedef __int64 intmax_t; + typedef unsigned __int64 uintmax_t; # else # include # endif @@ -828,12 +871,15 @@ PyLong_FromLongLong((long long)(x))) #define _cffi_from_c_int(x, type) \ - (((type)-1) > 0 ? /* unsigned */ \ - (sizeof(type) < sizeof(long) ? PyInt_FromLong(x) : \ - sizeof(type) == sizeof(long) ? PyLong_FromUnsignedLong(x) : \ - PyLong_FromUnsignedLongLong(x)) \ - : (sizeof(type) <= sizeof(long) ? PyInt_FromLong(x) : \ - PyLong_FromLongLong(x))) + (((type)-1) > 0 ? /* unsigned */ \ + (sizeof(type) < sizeof(long) ? \ + PyInt_FromLong((long)x) : \ + sizeof(type) == sizeof(long) ? \ + PyLong_FromUnsignedLong((unsigned long)x) : \ + PyLong_FromUnsignedLongLong((unsigned long long)x)) : \ + (sizeof(type) <= sizeof(long) ? \ + PyInt_FromLong((long)x) : \ + PyLong_FromLongLong((long long)x))) #define _cffi_to_c_int(o, type) \ (sizeof(type) == 1 ? (((type)-1) > 0 ? (type)_cffi_to_c_u8(o) \ @@ -844,7 +890,7 @@ : (type)_cffi_to_c_i32(o)) : \ sizeof(type) == 8 ? (((type)-1) > 0 ? (type)_cffi_to_c_u64(o) \ : (type)_cffi_to_c_i64(o)) : \ - (Py_FatalError("unsupported size for type " #type), 0)) + (Py_FatalError("unsupported size for type " #type), (type)0)) #define _cffi_to_c_i8 \ ((int(*)(PyObject *))_cffi_exports[1]) @@ -907,6 +953,7 @@ { PyObject *library; int was_alive = (_cffi_types != NULL); + (void)self; /* unused */ if (!PyArg_ParseTuple(args, "OOO", &_cffi_types, &_cffi_VerificationError, &library)) return NULL; diff --git a/lib_pypy/cffi/vengine_gen.py b/lib_pypy/cffi/vengine_gen.py --- a/lib_pypy/cffi/vengine_gen.py +++ b/lib_pypy/cffi/vengine_gen.py @@ -58,12 +58,12 @@ modname = self.verifier.get_module_name() prnt("void %s%s(void) { }\n" % (prefix, modname)) - def load_library(self): + def load_library(self, flags=0): # import it with the CFFI backend backend = self.ffi._backend # needs to make a path that contains '/', on Posix filename = os.path.join(os.curdir, self.verifier.modulefilename) - module = backend.load_library(filename) + module = backend.load_library(filename, flags) # # call loading_gen_struct() to get the struct layout inferred by # the C compiler @@ -235,6 +235,7 @@ prnt('static void %s(%s *p)' % (checkfuncname, cname)) prnt('{') prnt(' /* only to generate compile-time warnings or errors */') + prnt(' (void)p;') for fname, ftype, fbitsize in tp.enumfields(): if (isinstance(ftype, model.PrimitiveType) and ftype.is_integer_type()) or fbitsize >= 0: @@ -354,11 +355,20 @@ # ---------- # constants, likely declared with '#define' - def _generate_gen_const(self, is_int, name, tp=None, category='const'): + def _generate_gen_const(self, is_int, name, tp=None, category='const', + check_value=None): prnt = self._prnt funcname = '_cffi_%s_%s' % (category, name) self.export_symbols.append(funcname) - if is_int: + if check_value is not None: + assert is_int + assert category == 'const' + prnt('int %s(char *out_error)' % funcname) + prnt('{') + self._check_int_constant_value(name, check_value) + prnt(' return 0;') + prnt('}') + elif is_int: assert category == 'const' prnt('int %s(long long *out_value)' % funcname) prnt('{') @@ -367,6 +377,7 @@ prnt('}') else: assert tp is not None + assert check_value is None prnt(tp.get_c_name(' %s(void)' % funcname, name),) prnt('{') if category == 'var': @@ -383,9 +394,13 @@ _loading_gen_constant = _loaded_noop - def _load_constant(self, is_int, tp, name, module): + def _load_constant(self, is_int, tp, name, module, check_value=None): funcname = '_cffi_const_%s' % name - if is_int: + if check_value is not None: + assert is_int + self._load_known_int_constant(module, funcname) + value = check_value + elif is_int: BType = self.ffi._typeof_locked("long long*")[0] BFunc = self.ffi._typeof_locked("int(*)(long long*)")[0] function = module.load_function(BFunc, funcname) @@ -396,6 +411,7 @@ BLongLong = self.ffi._typeof_locked("long long")[0] value += (1 << (8*self.ffi.sizeof(BLongLong))) else: + assert check_value is None BFunc = self.ffi._typeof_locked(tp.get_c_name('(*)(void)', name))[0] function = module.load_function(BFunc, funcname) value = function() @@ -410,6 +426,36 @@ # ---------- # enums + def _check_int_constant_value(self, name, value): + prnt = self._prnt + if value <= 0: + prnt(' if ((%s) > 0 || (long)(%s) != %dL) {' % ( + name, name, value)) + else: + prnt(' if ((%s) <= 0 || (unsigned long)(%s) != %dUL) {' % ( + name, name, value)) + prnt(' char buf[64];') + prnt(' if ((%s) <= 0)' % name) + prnt(' sprintf(buf, "%%ld", (long)(%s));' % name) + prnt(' else') + prnt(' sprintf(buf, "%%lu", (unsigned long)(%s));' % + name) + prnt(' sprintf(out_error, "%s has the real value %s, not %s",') + prnt(' "%s", buf, "%d");' % (name[:100], value)) + prnt(' return -1;') + prnt(' }') + + def _load_known_int_constant(self, module, funcname): + BType = self.ffi._typeof_locked("char[]")[0] + BFunc = self.ffi._typeof_locked("int(*)(char*)")[0] + function = module.load_function(BFunc, funcname) + p = self.ffi.new(BType, 256) + if function(p) < 0: + error = self.ffi.string(p) + if sys.version_info >= (3,): + error = str(error, 'utf-8') + raise ffiplatform.VerificationError(error) + def _enum_funcname(self, prefix, name): # "$enum_$1" => "___D_enum____D_1" name = name.replace('$', '___D_') @@ -427,24 +473,7 @@ prnt('int %s(char *out_error)' % funcname) prnt('{') for enumerator, enumvalue in zip(tp.enumerators, tp.enumvalues): - if enumvalue < 0: - prnt(' if ((%s) >= 0 || (long)(%s) != %dL) {' % ( - enumerator, enumerator, enumvalue)) - else: - prnt(' if ((%s) < 0 || (unsigned long)(%s) != %dUL) {' % ( - enumerator, enumerator, enumvalue)) - prnt(' char buf[64];') - prnt(' if ((%s) < 0)' % enumerator) - prnt(' sprintf(buf, "%%ld", (long)(%s));' % enumerator) - prnt(' else') - prnt(' sprintf(buf, "%%lu", (unsigned long)(%s));' % - enumerator) - prnt(' sprintf(out_error,' - ' "%s has the real value %s, not %s",') - prnt(' "%s", buf, "%d");' % ( - enumerator[:100], enumvalue)) - prnt(' return -1;') - prnt(' }') + self._check_int_constant_value(enumerator, enumvalue) prnt(' return 0;') prnt('}') prnt() @@ -456,16 +485,8 @@ tp.enumvalues = tuple(enumvalues) tp.partial_resolved = True else: - BType = self.ffi._typeof_locked("char[]")[0] - BFunc = self.ffi._typeof_locked("int(*)(char*)")[0] funcname = self._enum_funcname(prefix, name) - function = module.load_function(BFunc, funcname) - p = self.ffi.new(BType, 256) - if function(p) < 0: - error = self.ffi.string(p) - if sys.version_info >= (3,): - error = str(error, 'utf-8') - raise ffiplatform.VerificationError(error) + self._load_known_int_constant(module, funcname) def _loaded_gen_enum(self, tp, name, module, library): for enumerator, enumvalue in zip(tp.enumerators, tp.enumvalues): @@ -476,13 +497,21 @@ # macros: for now only for integers def _generate_gen_macro_decl(self, tp, name): - assert tp == '...' - self._generate_gen_const(True, name) + if tp == '...': + check_value = None + else: + check_value = tp # an integer + self._generate_gen_const(True, name, check_value=check_value) _loading_gen_macro = _loaded_noop def _loaded_gen_macro(self, tp, name, module, library): - value = self._load_constant(True, tp, name, module) + if tp == '...': + check_value = None + else: + check_value = tp # an integer + value = self._load_constant(True, tp, name, module, + check_value=check_value) setattr(library, name, value) type(library)._cffi_dir.append(name) @@ -565,6 +594,24 @@ typedef unsigned __int16 uint16_t; typedef unsigned __int32 uint32_t; typedef unsigned __int64 uint64_t; + typedef __int8 int_least8_t; + typedef __int16 int_least16_t; + typedef __int32 int_least32_t; + typedef __int64 int_least64_t; + typedef unsigned __int8 uint_least8_t; + typedef unsigned __int16 uint_least16_t; + typedef unsigned __int32 uint_least32_t; + typedef unsigned __int64 uint_least64_t; + typedef __int8 int_fast8_t; + typedef __int16 int_fast16_t; + typedef __int32 int_fast32_t; + typedef __int64 int_fast64_t; + typedef unsigned __int8 uint_fast8_t; + typedef unsigned __int16 uint_fast16_t; + typedef unsigned __int32 uint_fast32_t; + typedef unsigned __int64 uint_fast64_t; + typedef __int64 intmax_t; + typedef unsigned __int64 uintmax_t; # else # include # endif diff --git a/lib_pypy/cffi/verifier.py b/lib_pypy/cffi/verifier.py --- a/lib_pypy/cffi/verifier.py +++ b/lib_pypy/cffi/verifier.py @@ -1,12 +1,23 @@ -import sys, os, binascii, imp, shutil -from . import __version__ +import sys, os, binascii, shutil +from . import __version_verifier_modules__ from . import ffiplatform +if sys.version_info >= (3, 3): + import importlib.machinery + def _extension_suffixes(): + return importlib.machinery.EXTENSION_SUFFIXES[:] +else: + import imp + def _extension_suffixes(): + return [suffix for suffix, _, type in imp.get_suffixes() + if type == imp.C_EXTENSION] + class Verifier(object): def __init__(self, ffi, preamble, tmpdir=None, modulename=None, - ext_package=None, tag='', force_generic_engine=False, **kwds): + ext_package=None, tag='', force_generic_engine=False, + source_extension='.c', flags=None, relative_to=None, **kwds): self.ffi = ffi self.preamble = preamble if not modulename: @@ -14,14 +25,15 @@ vengine_class = _locate_engine_class(ffi, force_generic_engine) self._vengine = vengine_class(self) self._vengine.patch_extension_kwds(kwds) - self.kwds = kwds + self.flags = flags + self.kwds = self.make_relative_to(kwds, relative_to) # if modulename: if tag: raise TypeError("can't specify both 'modulename' and 'tag'") else: - key = '\x00'.join([sys.version[:3], __version__, preamble, - flattened_kwds] + + key = '\x00'.join([sys.version[:3], __version_verifier_modules__, + preamble, flattened_kwds] + ffi._cdefsources) if sys.version_info >= (3,): key = key.encode('utf-8') @@ -33,7 +45,7 @@ k1, k2) suffix = _get_so_suffixes()[0] self.tmpdir = tmpdir or _caller_dir_pycache() - self.sourcefilename = os.path.join(self.tmpdir, modulename + '.c') + self.sourcefilename = os.path.join(self.tmpdir, modulename + source_extension) self.modulefilename = os.path.join(self.tmpdir, modulename + suffix) self.ext_package = ext_package self._has_source = False @@ -97,6 +109,20 @@ def generates_python_module(self): return self._vengine._gen_python_module + def make_relative_to(self, kwds, relative_to): + if relative_to and os.path.dirname(relative_to): + dirname = os.path.dirname(relative_to) + kwds = kwds.copy() + for key in ffiplatform.LIST_OF_FILE_NAMES: + if key in kwds: + lst = kwds[key] + if not isinstance(lst, (list, tuple)): + raise TypeError("keyword '%s' should be a list or tuple" + % (key,)) + lst = [os.path.join(dirname, fn) for fn in lst] + kwds[key] = lst + return kwds + # ---------- def _locate_module(self): @@ -148,7 +174,10 @@ def _load_library(self): assert self._has_module - return self._vengine.load_library() + if self.flags is not None: + return self._vengine.load_library(self.flags) + else: + return self._vengine.load_library() # ____________________________________________________________ @@ -181,6 +210,9 @@ def _caller_dir_pycache(): if _TMPDIR: return _TMPDIR + result = os.environ.get('CFFI_TMPDIR') + if result: + return result filename = sys._getframe(2).f_code.co_filename return os.path.abspath(os.path.join(os.path.dirname(filename), '__pycache__')) @@ -222,11 +254,7 @@ pass def _get_so_suffixes(): - suffixes = [] - for suffix, mode, type in imp.get_suffixes(): - if type == imp.C_EXTENSION: - suffixes.append(suffix) - + suffixes = _extension_suffixes() if not suffixes: # bah, no C_EXTENSION available. Occurs on pypy without cpyext if sys.platform == 'win32': diff --git a/pypy/doc/embedding.rst b/pypy/doc/embedding.rst --- a/pypy/doc/embedding.rst +++ b/pypy/doc/embedding.rst @@ -30,12 +30,10 @@ Initialize threads. Only need to be called if there are any threads involved -.. function:: long pypy_setup_home(char* home, int verbose); +.. function:: int pypy_setup_home(char* home, int verbose); This function searches the PyPy standard library starting from the given - "PyPy home directory". It is not strictly necessary to execute it before - running Python code, but without it you will not be able to import any - non-builtin module from the standard library. The arguments are: + "PyPy home directory". The arguments are: * ``home``: NULL terminated path to an executable inside the pypy directory (can be a .so name, can be made up) @@ -84,25 +82,36 @@ const char source[] = "print 'hello from pypy'"; - int main() + int main(void) { - int res; + int res; - rpython_startup_code(); - // pypy_setup_home() is not needed in this trivial example - res = pypy_execute_source((char*)source); - if (res) { - printf("Error calling pypy_execute_source!\n"); - } - return res; + rpython_startup_code(); + res = pypy_setup_home("/opt/pypy/bin/libpypy-c.so", 1); + if (res) { + printf("Error setting pypy home!\n"); + return 1; + } + + res = pypy_execute_source((char*)source); + if (res) { + printf("Error calling pypy_execute_source!\n"); + } + return res; } -If we save it as ``x.c`` now, compile it and run it with:: +If we save it as ``x.c`` now, compile it and run it (on linux) with:: fijal at hermann:/opt/pypy$ gcc -o x x.c -lpypy-c -L. fijal at hermann:/opt/pypy$ LD_LIBRARY_PATH=. ./x hello from pypy +on OSX it is necessary to set the rpath of the binary if one wants to link to it:: + + gcc -o x x.c -lpypy-c -L. -Wl,-rpath -Wl, at executable_path + ./x + hello from pypy + Worked! .. note:: If the compilation fails because of missing PyPy.h header file, diff --git a/pypy/doc/install.rst b/pypy/doc/install.rst --- a/pypy/doc/install.rst +++ b/pypy/doc/install.rst @@ -38,14 +38,13 @@ and not move the binary there, else PyPy would not be able to find its library. -If you want to install 3rd party libraries, the most convenient way is to -install distribute_ and pip_: +If you want to install 3rd party libraries, the most convenient way is +to install pip_ (unless you want to install virtualenv as explained +below; then you can directly use pip inside virtualenvs): .. code-block:: console - $ curl -O http://python-distribute.org/distribute_setup.py - $ curl -O https://raw.githubusercontent.com/pypa/pip/master/contrib/get-pip.py - $ ./pypy-2.1/bin/pypy distribute_setup.py + $ curl -O https://bootstrap.pypa.io/get-pip.py $ ./pypy-2.1/bin/pypy get-pip.py $ ./pypy-2.1/bin/pip install pygments # for example @@ -69,7 +68,6 @@ Note that bin/python is now a symlink to bin/pypy. -.. _distribute: http://www.python-distribute.org/ .. _pip: http://pypi.python.org/pypi/pip diff --git a/pypy/goal/targetpypystandalone.py b/pypy/goal/targetpypystandalone.py --- a/pypy/goal/targetpypystandalone.py +++ b/pypy/goal/targetpypystandalone.py @@ -101,7 +101,7 @@ if space.is_none(w_path): if verbose: debug("Failed to find library based on pypy_find_stdlib") - return 1 + return rffi.cast(rffi.INT, 1) space.startup() space.call_function(w_pathsetter, w_path) # import site @@ -109,13 +109,13 @@ import_ = space.getattr(space.getbuiltinmodule('__builtin__'), space.wrap('__import__')) space.call_function(import_, space.wrap('site')) - return 0 + return rffi.cast(rffi.INT, 0) except OperationError, e: if verbose: debug("OperationError:") debug(" operror-type: " + e.w_type.getname(space)) debug(" operror-value: " + space.str_w(space.str(e.get_w_value(space)))) - return -1 + return rffi.cast(rffi.INT, -1) @entrypoint('main', [rffi.CCHARP], c_name='pypy_execute_source') def pypy_execute_source(ll_source): @@ -234,8 +234,7 @@ enable_translationmodules(config) config.translation.suggest(check_str_without_nul=True) - if sys.platform.startswith('linux'): - config.translation.suggest(shared=True) + config.translation.suggest(shared=True) if config.translation.thread: config.objspace.usemodules.thread = True diff --git a/pypy/interpreter/astcompiler/optimize.py b/pypy/interpreter/astcompiler/optimize.py --- a/pypy/interpreter/astcompiler/optimize.py +++ b/pypy/interpreter/astcompiler/optimize.py @@ -83,17 +83,16 @@ class __extend__(ast.BoolOp): - def _accept_jump_if_any_is(self, gen, condition, target): - self.values[0].accept_jump_if(gen, condition, target) - for i in range(1, len(self.values)): + def _accept_jump_if_any_is(self, gen, condition, target, skip_last=0): + for i in range(len(self.values) - skip_last): self.values[i].accept_jump_if(gen, condition, target) def accept_jump_if(self, gen, condition, target): if condition and self.op == ast.And or \ (not condition and self.op == ast.Or): end = gen.new_block() - self._accept_jump_if_any_is(gen, not condition, end) - gen.emit_jump(ops.JUMP_FORWARD, target) + self._accept_jump_if_any_is(gen, not condition, end, skip_last=1) + self.values[-1].accept_jump_if(gen, condition, target) gen.use_next_block(end) else: self._accept_jump_if_any_is(gen, condition, target) diff --git a/pypy/module/__builtin__/app_io.py b/pypy/module/__builtin__/app_io.py --- a/pypy/module/__builtin__/app_io.py +++ b/pypy/module/__builtin__/app_io.py @@ -86,9 +86,11 @@ def print_(*args, **kwargs): """The new-style print function from py3k.""" - fp = kwargs.pop("file", sys.stdout) + fp = kwargs.pop("file", None) if fp is None: - return + fp = sys.stdout + if fp is None: + return def write(data): if not isinstance(data, basestring): data = str(data) diff --git a/pypy/module/__builtin__/test/test_builtin.py b/pypy/module/__builtin__/test/test_builtin.py --- a/pypy/module/__builtin__/test/test_builtin.py +++ b/pypy/module/__builtin__/test/test_builtin.py @@ -651,9 +651,12 @@ out = sys.stdout = StringIO.StringIO() try: pr("Hello,", "person!") + pr("2nd line", file=None) + sys.stdout = None + pr("nowhere") finally: sys.stdout = save - assert out.getvalue() == "Hello, person!\n" + assert out.getvalue() == "Hello, person!\n2nd line\n" out = StringIO.StringIO() pr("Hello,", "person!", file=out) assert out.getvalue() == "Hello, person!\n" @@ -668,7 +671,6 @@ result = out.getvalue() assert isinstance(result, unicode) assert result == u"Hello, person!\n" - pr("Hello", file=None) # This works. out = StringIO.StringIO() pr(None, file=out) assert out.getvalue() == "None\n" diff --git a/pypy/module/_cffi_backend/__init__.py b/pypy/module/_cffi_backend/__init__.py --- a/pypy/module/_cffi_backend/__init__.py +++ b/pypy/module/_cffi_backend/__init__.py @@ -34,6 +34,7 @@ 'newp_handle': 'handle.newp_handle', 'from_handle': 'handle.from_handle', '_get_types': 'func._get_types', + 'from_buffer': 'func.from_buffer', 'string': 'func.string', 'buffer': 'cbuffer.buffer', diff --git a/pypy/module/_cffi_backend/ccallback.py b/pypy/module/_cffi_backend/ccallback.py --- a/pypy/module/_cffi_backend/ccallback.py +++ b/pypy/module/_cffi_backend/ccallback.py @@ -45,8 +45,9 @@ # cif_descr = self.getfunctype().cif_descr if not cif_descr: - raise OperationError(space.w_NotImplementedError, - space.wrap("callbacks with '...'")) + raise oefmt(space.w_NotImplementedError, + "%s: callback with unsupported argument or " + "return type or with '...'", self.getfunctype().name) res = clibffi.c_ffi_prep_closure(self.get_closure(), cif_descr.cif, invoke_callback, rffi.cast(rffi.VOIDP, self.unique_id)) @@ -98,7 +99,7 @@ def print_error(self, operr, extra_line): space = self.space - operr.write_unraisable(space, "callback ", self.w_callable, + operr.write_unraisable(space, "cffi callback ", self.w_callable, with_traceback=True, extra_line=extra_line) def write_error_return_value(self, ll_res): diff --git a/pypy/module/_cffi_backend/cdataobj.py b/pypy/module/_cffi_backend/cdataobj.py --- a/pypy/module/_cffi_backend/cdataobj.py +++ b/pypy/module/_cffi_backend/cdataobj.py @@ -440,6 +440,25 @@ return "handle to %s" % (self.space.str_w(w_repr),) +class W_CDataFromBuffer(W_CData): + _attrs_ = ['buf', 'length', 'w_keepalive'] + _immutable_fields_ = ['buf', 'length', 'w_keepalive'] + + def __init__(self, space, cdata, ctype, buf, w_object): + W_CData.__init__(self, space, cdata, ctype) + self.buf = buf + self.length = buf.getlength() + self.w_keepalive = w_object + + def get_array_length(self): + return self.length + + def _repr_extra(self): + w_repr = self.space.repr(self.w_keepalive) + return "buffer len %d from '%s' object" % ( + self.length, self.space.type(self.w_keepalive).name) + + W_CData.typedef = TypeDef( '_cffi_backend.CData', __module__ = '_cffi_backend', diff --git a/pypy/module/_cffi_backend/ctypearray.py b/pypy/module/_cffi_backend/ctypearray.py --- a/pypy/module/_cffi_backend/ctypearray.py +++ b/pypy/module/_cffi_backend/ctypearray.py @@ -107,6 +107,9 @@ return self.space.w_None return W_CTypePtrOrArray._fget(self, attrchar) + def typeoffsetof_index(self, index): + return self.ctptr.typeoffsetof_index(index) + class W_CDataIter(W_Root): _immutable_fields_ = ['ctitem', 'cdata', '_stop'] # but not '_next' diff --git a/pypy/module/_cffi_backend/ctypefunc.py b/pypy/module/_cffi_backend/ctypefunc.py --- a/pypy/module/_cffi_backend/ctypefunc.py +++ b/pypy/module/_cffi_backend/ctypefunc.py @@ -27,6 +27,8 @@ _immutable_fields_ = ['fargs[*]', 'ellipsis', 'cif_descr'] kind = "function" + cif_descr = lltype.nullptr(CIF_DESCRIPTION) + def __init__(self, space, fargs, fresult, ellipsis): extra = self._compute_extra_text(fargs, fresult, ellipsis) size = rffi.sizeof(rffi.VOIDP) @@ -41,7 +43,17 @@ # at all. The cif is computed on every call from the actual # types passed in. For all other functions, the cif_descr # is computed here. - CifDescrBuilder(fargs, fresult).rawallocate(self) + builder = CifDescrBuilder(fargs, fresult) + try: + builder.rawallocate(self) + except OperationError, e: + if not e.match(space, space.w_NotImplementedError): + raise + # else, eat the NotImplementedError. We will get the + # exception if we see an actual call + if self.cif_descr: # should not be True, but you never know + lltype.free(self.cif_descr, flavor='raw') + self.cif_descr = lltype.nullptr(CIF_DESCRIPTION) def new_ctypefunc_completing_argtypes(self, args_w): space = self.space @@ -57,10 +69,12 @@ "argument %d passed in the variadic part needs to " "be a cdata object (got %T)", i + 1, w_obj) fvarargs[i] = ct + # xxx call instantiate() directly. It's a bit of a hack. ctypefunc = instantiate(W_CTypeFunc) ctypefunc.space = space ctypefunc.fargs = fvarargs ctypefunc.ctitem = self.ctitem + #ctypefunc.cif_descr = NULL --- already provided as the default CifDescrBuilder(fvarargs, self.ctitem).rawallocate(ctypefunc) return ctypefunc @@ -178,8 +192,6 @@ # ____________________________________________________________ -W_CTypeFunc.cif_descr = lltype.nullptr(CIF_DESCRIPTION) # default value - BIG_ENDIAN = sys.byteorder == 'big' USE_C_LIBFFI_MSVC = getattr(clibffi, 'USE_C_LIBFFI_MSVC', False) @@ -295,18 +307,18 @@ nflat = 0 for i, cf in enumerate(ctype.fields_list): if cf.is_bitfield(): - raise OperationError(space.w_NotImplementedError, - space.wrap("cannot pass as argument or return value " - "a struct with bit fields")) + raise oefmt(space.w_NotImplementedError, + "ctype '%s' not supported as argument or return value" + " (it is a struct with bit fields)", ctype.name) flat = 1 ct = cf.ctype while isinstance(ct, ctypearray.W_CTypeArray): flat *= ct.length ct = ct.ctitem if flat <= 0: - raise OperationError(space.w_NotImplementedError, - space.wrap("cannot pass as argument or return value " - "a struct with a zero-length array")) + raise oefmt(space.w_NotImplementedError, + "ctype '%s' not supported as argument or return value" + " (it is a struct with a zero-length array)", ctype.name) nflat += flat if USE_C_LIBFFI_MSVC and is_result_type: diff --git a/pypy/module/_cffi_backend/ctypeobj.py b/pypy/module/_cffi_backend/ctypeobj.py --- a/pypy/module/_cffi_backend/ctypeobj.py +++ b/pypy/module/_cffi_backend/ctypeobj.py @@ -142,12 +142,14 @@ raise oefmt(space.w_ValueError, "ctype '%s' is of unknown alignment", self.name) - def typeoffsetof(self, fieldname): + def typeoffsetof_field(self, fieldname, following): space = self.space - if fieldname is None: - msg = "expected a struct or union ctype" - else: - msg = "expected a struct or union ctype, or a pointer to one" + msg = "with a field name argument, expected a struct or union ctype" + raise OperationError(space.w_TypeError, space.wrap(msg)) + + def typeoffsetof_index(self, index): + space = self.space + msg = "with an integer argument, expected an array or pointer ctype" raise OperationError(space.w_TypeError, space.wrap(msg)) def rawaddressof(self, cdata, offset): diff --git a/pypy/module/_cffi_backend/ctypeptr.py b/pypy/module/_cffi_backend/ctypeptr.py --- a/pypy/module/_cffi_backend/ctypeptr.py +++ b/pypy/module/_cffi_backend/ctypeptr.py @@ -308,24 +308,36 @@ def getcfield(self, attr): return self.ctitem.getcfield(attr) - def typeoffsetof(self, fieldname): - if fieldname is None: - return W_CTypePtrBase.typeoffsetof(self, fieldname) - else: - return self.ctitem.typeoffsetof(fieldname) + def typeoffsetof_field(self, fieldname, following): + if following == 0: + return self.ctitem.typeoffsetof_field(fieldname, -1) + return W_CTypePtrBase.typeoffsetof_field(self, fieldname, following) + + def typeoffsetof_index(self, index): + space = self.space + ctitem = self.ctitem + if ctitem.size < 0: + raise OperationError(space.w_TypeError, + space.wrap("pointer to opaque")) + try: + offset = ovfcheck(index * ctitem.size) + except OverflowError: + raise OperationError(space.w_OverflowError, + space.wrap("array offset would overflow a ssize_t")) + return ctitem, offset def rawaddressof(self, cdata, offset): from pypy.module._cffi_backend.ctypestruct import W_CTypeStructOrUnion space = self.space ctype2 = cdata.ctype if (isinstance(ctype2, W_CTypeStructOrUnion) or - (isinstance(ctype2, W_CTypePtrOrArray) and - isinstance(ctype2.ctitem, W_CTypeStructOrUnion))): + isinstance(ctype2, W_CTypePtrOrArray)): ptrdata = rffi.ptradd(cdata._cdata, offset) return cdataobj.W_CData(space, ptrdata, self) else: raise OperationError(space.w_TypeError, - space.wrap("expected a 'cdata struct-or-union' object")) + space.wrap("expected a cdata struct/union/array/pointer" + " object")) def _fget(self, attrchar): if attrchar == 'i': # item diff --git a/pypy/module/_cffi_backend/ctypestruct.py b/pypy/module/_cffi_backend/ctypestruct.py --- a/pypy/module/_cffi_backend/ctypestruct.py +++ b/pypy/module/_cffi_backend/ctypestruct.py @@ -65,9 +65,7 @@ keepalive_until_here(ob) return ob - def typeoffsetof(self, fieldname): - if fieldname is None: - return (self, 0) + def typeoffsetof_field(self, fieldname, following): self.check_complete() space = self.space try: diff --git a/pypy/module/_cffi_backend/func.py b/pypy/module/_cffi_backend/func.py --- a/pypy/module/_cffi_backend/func.py +++ b/pypy/module/_cffi_backend/func.py @@ -48,13 +48,28 @@ align = w_ctype.alignof() return space.wrap(align) - at unwrap_spec(w_ctype=ctypeobj.W_CType, fieldname="str_or_None") -def typeoffsetof(space, w_ctype, fieldname): - ctype, offset = w_ctype.typeoffsetof(fieldname) + at unwrap_spec(w_ctype=ctypeobj.W_CType, following=int) +def typeoffsetof(space, w_ctype, w_field_or_index, following=0): + try: + fieldname = space.str_w(w_field_or_index) + except OperationError, e: + if not e.match(space, space.w_TypeError): + raise + try: + index = space.int_w(w_field_or_index) + except OperationError, e: + if not e.match(space, space.w_TypeError): + raise + raise OperationError(space.w_TypeError, + space.wrap("field name or array index expected")) + ctype, offset = w_ctype.typeoffsetof_index(index) + else: + ctype, offset = w_ctype.typeoffsetof_field(fieldname, following) + # return space.newtuple([space.wrap(ctype), space.wrap(offset)]) @unwrap_spec(w_ctype=ctypeobj.W_CType, w_cdata=cdataobj.W_CData, offset=int) -def rawaddressof(space, w_ctype, w_cdata, offset=0): +def rawaddressof(space, w_ctype, w_cdata, offset): return w_ctype.rawaddressof(w_cdata, offset) # ____________________________________________________________ @@ -76,3 +91,32 @@ def _get_types(space): return space.newtuple([space.gettypefor(cdataobj.W_CData), space.gettypefor(ctypeobj.W_CType)]) + +# ____________________________________________________________ + + at unwrap_spec(w_ctype=ctypeobj.W_CType) +def from_buffer(space, w_ctype, w_x): + from pypy.module._cffi_backend import ctypearray, ctypeprim + # + if (not isinstance(w_ctype, ctypearray.W_CTypeArray) or + not isinstance(w_ctype.ctptr.ctitem, ctypeprim.W_CTypePrimitiveChar)): + raise oefmt(space.w_TypeError, + "needs 'char[]', got '%s'", w_ctype.name) + # + # xxx do we really need to implement the same mess as in CPython 2.7 + # w.r.t. buffers and memoryviews?? + try: + buf = space.readbuf_w(w_x) + except OperationError, e: + if not e.match(space, space.w_TypeError): + raise + buf = space.buffer_w(w_x, space.BUF_SIMPLE) + try: + _cdata = buf.get_raw_address() + except ValueError: + raise oefmt(space.w_TypeError, + "from_buffer() got a '%T' object, which supports the " + "buffer interface but cannot be rendered as a plain " + "raw address on PyPy", w_x) + # + return cdataobj.W_CDataFromBuffer(space, _cdata, w_ctype, buf, w_x) diff --git a/pypy/module/_cffi_backend/newtype.py b/pypy/module/_cffi_backend/newtype.py --- a/pypy/module/_cffi_backend/newtype.py +++ b/pypy/module/_cffi_backend/newtype.py @@ -62,10 +62,54 @@ eptype("intptr_t", rffi.INTPTR_T, ctypeprim.W_CTypePrimitiveSigned) eptype("uintptr_t", rffi.UINTPTR_T, ctypeprim.W_CTypePrimitiveUnsigned) -eptype("ptrdiff_t", rffi.INTPTR_T, ctypeprim.W_CTypePrimitiveSigned) # <-xxx eptype("size_t", rffi.SIZE_T, ctypeprim.W_CTypePrimitiveUnsigned) eptype("ssize_t", rffi.SSIZE_T, ctypeprim.W_CTypePrimitiveSigned) +_WCTSigned = ctypeprim.W_CTypePrimitiveSigned +_WCTUnsign = ctypeprim.W_CTypePrimitiveUnsigned + +eptype("ptrdiff_t", getattr(rffi, 'PTRDIFF_T', rffi.INTPTR_T), _WCTSigned) +eptype("intmax_t", getattr(rffi, 'INTMAX_T', rffi.LONGLONG), _WCTSigned) +eptype("uintmax_t", getattr(rffi, 'UINTMAX_T', rffi.LONGLONG), _WCTUnsign) + +if hasattr(rffi, 'INT_LEAST8_T'): + eptype("int_least8_t", rffi.INT_LEAST8_T, _WCTSigned) + eptype("int_least16_t", rffi.INT_LEAST16_T, _WCTSigned) + eptype("int_least32_t", rffi.INT_LEAST32_T, _WCTSigned) + eptype("int_least64_t", rffi.INT_LEAST64_T, _WCTSigned) + eptype("uint_least8_t", rffi.UINT_LEAST8_T, _WCTUnsign) + eptype("uint_least16_t",rffi.UINT_LEAST16_T, _WCTUnsign) + eptype("uint_least32_t",rffi.UINT_LEAST32_T, _WCTUnsign) + eptype("uint_least64_t",rffi.UINT_LEAST64_T, _WCTUnsign) +else: + eptypesize("int_least8_t", 1, _WCTSigned) + eptypesize("uint_least8_t", 1, _WCTUnsign) + eptypesize("int_least16_t", 2, _WCTSigned) + eptypesize("uint_least16_t", 2, _WCTUnsign) + eptypesize("int_least32_t", 4, _WCTSigned) + eptypesize("uint_least32_t", 4, _WCTUnsign) + eptypesize("int_least64_t", 8, _WCTSigned) + eptypesize("uint_least64_t", 8, _WCTUnsign) + +if hasattr(rffi, 'INT_FAST8_T'): + eptype("int_fast8_t", rffi.INT_FAST8_T, _WCTSigned) + eptype("int_fast16_t", rffi.INT_FAST16_T, _WCTSigned) + eptype("int_fast32_t", rffi.INT_FAST32_T, _WCTSigned) + eptype("int_fast64_t", rffi.INT_FAST64_T, _WCTSigned) + eptype("uint_fast8_t", rffi.UINT_FAST8_T, _WCTUnsign) + eptype("uint_fast16_t",rffi.UINT_FAST16_T, _WCTUnsign) + eptype("uint_fast32_t",rffi.UINT_FAST32_T, _WCTUnsign) + eptype("uint_fast64_t",rffi.UINT_FAST64_T, _WCTUnsign) +else: + eptypesize("int_fast8_t", 1, _WCTSigned) + eptypesize("uint_fast8_t", 1, _WCTUnsign) + eptypesize("int_fast16_t", 2, _WCTSigned) + eptypesize("uint_fast16_t", 2, _WCTUnsign) + eptypesize("int_fast32_t", 4, _WCTSigned) + eptypesize("uint_fast32_t", 4, _WCTUnsign) + eptypesize("int_fast64_t", 8, _WCTSigned) + eptypesize("uint_fast64_t", 8, _WCTUnsign) + @unwrap_spec(name=str) def new_primitive_type(space, name): try: diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py --- a/pypy/module/_cffi_backend/test/_backend_test_c.py +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py @@ -1030,11 +1030,12 @@ BInt = new_primitive_type("int") BArray0 = new_array_type(new_pointer_type(BInt), 0) BStruct = new_struct_type("struct foo") + BStructP = new_pointer_type(BStruct) complete_struct_or_union(BStruct, [('a', BArray0)]) - py.test.raises(NotImplementedError, new_function_type, - (BStruct,), BInt, False) - py.test.raises(NotImplementedError, new_function_type, - (BInt,), BStruct, False) + BFunc = new_function_type((BStruct,), BInt, False) + py.test.raises(NotImplementedError, cast(BFunc, 123), cast(BStructP, 123)) + BFunc2 = new_function_type((BInt,), BStruct, False) + py.test.raises(NotImplementedError, cast(BFunc2, 123), 123) def test_call_function_9(): BInt = new_primitive_type("int") @@ -1174,7 +1175,7 @@ assert sys.stderr.getvalue() == '' assert f(10000) == -42 assert matches(sys.stderr.getvalue(), """\ -From callback : +From cffi callback : Traceback (most recent call last): File "$", line $, in Zcb1 $ @@ -1186,7 +1187,7 @@ bigvalue = 20000 assert f(bigvalue) == -42 assert matches(sys.stderr.getvalue(), """\ -From callback : +From cffi callback : Trying to convert the result back to C: OverflowError: integer 60000 does not fit 'short' """) @@ -1805,7 +1806,8 @@ new_function_type((), new_pointer_type(BFunc)) BUnion = new_union_type("union foo_u") complete_struct_or_union(BUnion, []) - py.test.raises(NotImplementedError, new_function_type, (), BUnion) + BFunc = new_function_type((), BUnion) + py.test.raises(NotImplementedError, cast(BFunc, 123)) py.test.raises(TypeError, new_function_type, (), BArray) def test_struct_return_in_func(): @@ -2525,13 +2527,32 @@ ('a2', BChar, -1), ('a3', BChar, -1)]) py.test.raises(TypeError, typeoffsetof, BStructPtr, None) - assert typeoffsetof(BStruct, None) == (BStruct, 0) + py.test.raises(TypeError, typeoffsetof, BStruct, None) assert typeoffsetof(BStructPtr, 'a1') == (BChar, 0) assert typeoffsetof(BStruct, 'a1') == (BChar, 0) assert typeoffsetof(BStructPtr, 'a2') == (BChar, 1) assert typeoffsetof(BStruct, 'a3') == (BChar, 2) + assert typeoffsetof(BStructPtr, 'a2', 0) == (BChar, 1) + assert typeoffsetof(BStruct, u+'a3') == (BChar, 2) + py.test.raises(TypeError, typeoffsetof, BStructPtr, 'a2', 1) py.test.raises(KeyError, typeoffsetof, BStructPtr, 'a4') py.test.raises(KeyError, typeoffsetof, BStruct, 'a5') + py.test.raises(TypeError, typeoffsetof, BStruct, 42) + py.test.raises(TypeError, typeoffsetof, BChar, 'a1') + +def test_typeoffsetof_array(): + BInt = new_primitive_type("int") + BIntP = new_pointer_type(BInt) + BArray = new_array_type(BIntP, None) + py.test.raises(TypeError, typeoffsetof, BArray, None) + py.test.raises(TypeError, typeoffsetof, BArray, 'a1') + assert typeoffsetof(BArray, 51) == (BInt, 51 * size_of_int()) + assert typeoffsetof(BIntP, 51) == (BInt, 51 * size_of_int()) + assert typeoffsetof(BArray, -51) == (BInt, -51 * size_of_int()) + MAX = sys.maxsize // size_of_int() + assert typeoffsetof(BArray, MAX) == (BInt, MAX * size_of_int()) + assert typeoffsetof(BIntP, MAX) == (BInt, MAX * size_of_int()) + py.test.raises(OverflowError, typeoffsetof, BArray, MAX + 1) def test_typeoffsetof_no_bitfield(): BInt = new_primitive_type("int") @@ -2551,17 +2572,26 @@ assert repr(p) == "" s = p[0] assert repr(s) == "" - a = rawaddressof(BStructPtr, s) + a = rawaddressof(BStructPtr, s, 0) assert repr(a).startswith("" + p = new_pointer_type(new_primitive_type("unsigned short")) + cast(p, c)[1] += 500 + assert list(a) == [10000, 20500, 30000] + def test_version(): # this test is here mostly for PyPy assert __version__ == "0.8.6" diff --git a/pypy/module/_cffi_backend/test/test_c.py b/pypy/module/_cffi_backend/test/test_c.py --- a/pypy/module/_cffi_backend/test/test_c.py +++ b/pypy/module/_cffi_backend/test/test_c.py @@ -30,7 +30,7 @@ class AppTestC(object): """Populated below, hack hack hack.""" - spaceconfig = dict(usemodules=('_cffi_backend', 'cStringIO')) + spaceconfig = dict(usemodules=('_cffi_backend', 'cStringIO', 'array')) def setup_class(cls): testfuncs_w = [] diff --git a/pypy/module/_io/interp_bufferedio.py b/pypy/module/_io/interp_bufferedio.py --- a/pypy/module/_io/interp_bufferedio.py +++ b/pypy/module/_io/interp_bufferedio.py @@ -565,7 +565,7 @@ # Flush the write buffer if necessary if self.writable: - self._writer_flush_unlocked(space) + self._flush_and_rewind_unlocked(space) self._reader_reset_buf() # Read whole blocks, and don't buffer them diff --git a/pypy/module/_io/test/test_io.py b/pypy/module/_io/test/test_io.py --- a/pypy/module/_io/test/test_io.py +++ b/pypy/module/_io/test/test_io.py @@ -362,3 +362,32 @@ f.read(1) f.seek(-1, 1) f.write(b'') + + def test_issue1902_2(self): + import _io + with _io.open(self.tmpfile, 'w+b', 4096) as f: + f.write(b'\xff' * 13569) + f.flush() + f.seek(0, 0) + + f.read(1) + f.seek(-1, 1) + f.write(b'\xff') + f.seek(1, 0) + f.read(4123) + f.seek(-4123, 1) + + def test_issue1902_3(self): + import _io + buffer_size = 4096 + with _io.open(self.tmpfile, 'w+b', buffer_size) as f: + f.write(b'\xff' * buffer_size * 3) + f.flush() + f.seek(0, 0) + + f.read(1) + f.seek(-1, 1) + f.write(b'\xff') + f.seek(1, 0) + f.read(buffer_size * 2) + assert f.tell() == 1 + buffer_size * 2 diff --git a/pypy/module/_rawffi/buffer.py b/pypy/module/_rawffi/buffer.py --- a/pypy/module/_rawffi/buffer.py +++ b/pypy/module/_rawffi/buffer.py @@ -1,4 +1,5 @@ from rpython.rlib.buffer import Buffer +from rpython.rtyper.lltypesystem import rffi # XXX not the most efficient implementation @@ -20,3 +21,7 @@ def setitem(self, index, char): ll_buffer = self.datainstance.ll_buffer ll_buffer[index] = char + + def get_raw_address(self): + ll_buffer = self.datainstance.ll_buffer + return rffi.cast(rffi.CCHARP, ll_buffer) diff --git a/pypy/module/_rawffi/test/test__rawffi.py b/pypy/module/_rawffi/test/test__rawffi.py --- a/pypy/module/_rawffi/test/test__rawffi.py +++ b/pypy/module/_rawffi/test/test__rawffi.py @@ -1144,6 +1144,15 @@ b[3] = b'x' assert b[3] == b'x' + def test_pypy_raw_address(self): + import _rawffi + S = _rawffi.Structure((40, 1)) + s = S(autofree=True) + addr = buffer(s)._pypy_raw_address() + assert type(addr) is int + assert buffer(s)._pypy_raw_address() == addr + assert buffer(s, 10)._pypy_raw_address() == addr + 10 + def test_union(self): import _rawffi longsize = _rawffi.sizeof('l') diff --git a/pypy/module/cpyext/slotdefs.py b/pypy/module/cpyext/slotdefs.py --- a/pypy/module/cpyext/slotdefs.py +++ b/pypy/module/cpyext/slotdefs.py @@ -244,6 +244,9 @@ def getitem(self, index): return self.ptr[index] + def get_raw_address(self): + return rffi.cast(rffi.CCHARP, self.ptr) + def wrap_getreadbuffer(space, w_self, w_args, func): func_target = rffi.cast(readbufferproc, func) with lltype.scoped_alloc(rffi.VOIDPP.TO, 1) as ptr: diff --git a/pypy/module/gc/__init__.py b/pypy/module/gc/__init__.py --- a/pypy/module/gc/__init__.py +++ b/pypy/module/gc/__init__.py @@ -30,6 +30,7 @@ 'get_referrers': 'referents.get_referrers', '_dump_rpy_heap': 'referents._dump_rpy_heap', 'get_typeids_z': 'referents.get_typeids_z', + 'get_typeids_list': 'referents.get_typeids_list', 'GcRef': 'referents.W_GcRef', }) MixedModule.__init__(self, space, w_name) diff --git a/pypy/module/gc/app_referents.py b/pypy/module/gc/app_referents.py --- a/pypy/module/gc/app_referents.py +++ b/pypy/module/gc/app_referents.py @@ -16,7 +16,8 @@ [0][0][0][-1] inserted after all GC roots, before all non-roots. If the argument is a filename and the 'zlib' module is available, - we also write a 'typeids.txt' in the same directory, if none exists. + we also write 'typeids.txt' and 'typeids.lst' in the same directory, + if they don't already exist. """ if isinstance(file, str): f = open(file, 'wb') @@ -30,7 +31,13 @@ filename2 = os.path.join(os.path.dirname(file), 'typeids.txt') if not os.path.exists(filename2): data = zlib.decompress(gc.get_typeids_z()) - f = open(filename2, 'wb') + f = open(filename2, 'w') + f.write(data) + f.close() + filename2 = os.path.join(os.path.dirname(file), 'typeids.lst') + if not os.path.exists(filename2): + data = ''.join(['%d\n' % n for n in gc.get_typeids_list()]) + f = open(filename2, 'w') f.write(data) f.close() else: diff --git a/pypy/module/gc/referents.py b/pypy/module/gc/referents.py --- a/pypy/module/gc/referents.py +++ b/pypy/module/gc/referents.py @@ -228,3 +228,8 @@ a = rgc.get_typeids_z() s = ''.join([a[i] for i in range(len(a))]) return space.wrap(s) + +def get_typeids_list(space): + l = rgc.get_typeids_list() + list_w = [space.wrap(l[i]) for i in range(len(l))] + return space.newlist(list_w) diff --git a/pypy/module/micronumpy/descriptor.py b/pypy/module/micronumpy/descriptor.py --- a/pypy/module/micronumpy/descriptor.py +++ b/pypy/module/micronumpy/descriptor.py @@ -392,7 +392,7 @@ alignment = space.int_w(space.getitem(w_data, space.wrap(6))) if (w_names == space.w_None) != (w_fields == space.w_None): - raise oefmt(space.w_ValueError, "inconsistent fields and names") + raise oefmt(space.w_ValueError, "inconsistent fields and names in Numpy dtype unpickling") self.byteorder = endian self.shape = [] diff --git a/pypy/module/micronumpy/test/test_complex.py b/pypy/module/micronumpy/test/test_complex.py --- a/pypy/module/micronumpy/test/test_complex.py +++ b/pypy/module/micronumpy/test/test_complex.py @@ -478,6 +478,15 @@ for i in range(4): assert c[i] == max(a[i], b[i]) + + def test_abs_overflow(self): + from numpy import array, absolute, isinf + a = array(complex(1.5e308,1.5e308)) + # Prints a RuntimeWarning, but does not raise + b = absolute(a) + assert isinf(b) + + def test_basic(self): import sys from numpy import (dtype, add, array, dtype, diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -665,6 +665,7 @@ assert numpy.int64(9223372036854775807) == 9223372036854775807 assert numpy.int64(9223372036854775807) == 9223372036854775807 From noreply at buildbot.pypy.org Fri Jan 16 11:05:30 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 16 Jan 2015 11:05:30 +0100 (CET) Subject: [pypy-commit] pypy all_ordered_dicts: merge heads Message-ID: <20150116100530.5BB0E1C01A1@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: all_ordered_dicts Changeset: r75373:cc858d85c192 Date: 2015-01-16 10:58 +0100 http://bitbucket.org/pypy/pypy/changeset/cc858d85c192/ Log: merge heads diff --git a/pypy/module/pypyjit/test_pypy_c/test_containers.py b/pypy/module/pypyjit/test_pypy_c/test_containers.py --- a/pypy/module/pypyjit/test_pypy_c/test_containers.py +++ b/pypy/module/pypyjit/test_pypy_c/test_containers.py @@ -43,9 +43,9 @@ # can't change ;) assert loop.match_by_id("getitem", """ ... - i26 = call(ConstClass(ll_dict_lookup), p18, p6, i25, descr=...) + i26 = call(ConstClass(ll_call_lookup_function), p18, p6, i25, 0, descr=...) ... - p33 = getinteriorfield_gc(p31, i26, descr=>) + p33 = getinteriorfield_gc(p31, i26, descr=>) ... """) @@ -68,25 +68,29 @@ guard_no_exception(descr=...) i12 = call(ConstClass(ll_strhash), p10, descr=) p13 = new(descr=...) - p15 = new_array_clear(8, descr=) - setfield_gc(p13, p15, descr=) - i17 = call(ConstClass(ll_dict_lookup_trampoline), p13, p10, i12, descr=) + p15 = new_array_clear(8, descr=) {{{ - setfield_gc(p13, 16, descr=) - setfield_gc(p13, 0, descr=) + setfield_gc(p13, 0, descr=) + setfield_gc(p13, p15, descr=) + setfield_gc(p13, ConstPtr(0), descr=) + }}} + i17 = call(ConstClass(ll_dict_lookup_trampoline), p13, p10, i12, 1, descr=) + {{{ + setfield_gc(p13, 0, descr=) + setfield_gc(p13, 0, descr=) + setfield_gc(p13, 16, descr=) }}} guard_no_exception(descr=...) p20 = new_with_vtable(ConstClass(W_IntObject)) call(ConstClass(_ll_dict_setitem_lookup_done_trampoline), p13, p10, p20, i12, i17, descr=) setfield_gc(p20, i5, descr=) guard_no_exception(descr=...) - i23 = call(ConstClass(ll_dict_lookup_trampoline), p13, p10, i12, descr=) + i23 = call(ConstClass(ll_call_lookup_function), p13, p10, i12, 0, descr=) guard_no_exception(descr=...) - i26 = int_and(i23, #) - i27 = int_is_true(i26) + i27 = int_lt(i23, 0) guard_false(i27, descr=...) p28 = getfield_gc(p13, descr=) - p29 = getinteriorfield_gc(p28, i23, descr=>) + p29 = getinteriorfield_gc(p28, i23, descr=>) guard_nonnull_class(p29, ConstClass(W_IntObject), descr=...) i31 = getfield_gc_pure(p29, descr=) i32 = int_sub_ovf(i31, i5) diff --git a/pypy/module/pypyjit/test_pypy_c/test_instance.py b/pypy/module/pypyjit/test_pypy_c/test_instance.py --- a/pypy/module/pypyjit/test_pypy_c/test_instance.py +++ b/pypy/module/pypyjit/test_pypy_c/test_instance.py @@ -151,11 +151,11 @@ assert loop.match_by_id('loadattr1', ''' guard_not_invalidated(descr=...) - i19 = call(ConstClass(ll_call_lookup_function), _, _, _, descr=...) + i19 = call(ConstClass(ll_call_lookup_function), _, _, _, 0, descr=...) guard_no_exception(descr=...) i22 = int_lt(i19, 0) guard_true(i22, descr=...) - i26 = call(ConstClass(ll_call_lookup_function), _, _, _, descr=...) + i26 = call(ConstClass(ll_call_lookup_function), _, _, _, 0, descr=...) guard_no_exception(descr=...) i29 = int_lt(i26, 0) guard_true(i29, descr=...) From noreply at buildbot.pypy.org Fri Jan 16 11:42:07 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 16 Jan 2015 11:42:07 +0100 (CET) Subject: [pypy-commit] pypy default: Test and fix: Cls.__dict__.popitem() => segfault instead of KeyError Message-ID: <20150116104207.E501B1C013C@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r75374:0e383b1145e4 Date: 2015-01-16 11:41 +0100 http://bitbucket.org/pypy/pypy/changeset/0e383b1145e4/ Log: Test and fix: Cls.__dict__.popitem() => segfault instead of KeyError diff --git a/pypy/objspace/std/dictmultiobject.py b/pypy/objspace/std/dictmultiobject.py --- a/pypy/objspace/std/dictmultiobject.py +++ b/pypy/objspace/std/dictmultiobject.py @@ -471,6 +471,8 @@ # provide a better one. iterator = self.iteritems(w_dict) w_key, w_value = iterator.next_item() + if w_key is None: + raise KeyError self.delitem(w_dict, w_key) return (w_key, w_value) diff --git a/pypy/objspace/std/test/test_dictproxy.py b/pypy/objspace/std/test/test_dictproxy.py --- a/pypy/objspace/std/test/test_dictproxy.py +++ b/pypy/objspace/std/test/test_dictproxy.py @@ -16,14 +16,25 @@ raises(TypeError, 'NotEmpty.__dict__[15] = "y"') raises(KeyError, 'del NotEmpty.__dict__[15]') - key, value = NotEmpty.__dict__.popitem() - assert (key == 'a' and value == 1) or (key == 'b' and value == 4) - assert NotEmpty.__dict__.setdefault("string", 1) == 1 assert NotEmpty.__dict__.setdefault("string", 2) == 1 assert NotEmpty.string == 1 raises(TypeError, 'NotEmpty.__dict__.setdefault(15, 1)') + def test_dictproxy_popitem(self): + class A(object): + a = 42 + seen = 0 + try: + while True: + key, value = A.__dict__.popitem() + if key == 'a': + assert value == 42 + seen += 1 + except KeyError: + pass + assert seen == 1 + def test_dictproxy_getitem(self): class NotEmpty(object): a = 1 From noreply at buildbot.pypy.org Fri Jan 16 11:42:53 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 16 Jan 2015 11:42:53 +0100 (CET) Subject: [pypy-commit] pypy all_ordered_dicts: hg merge default Message-ID: <20150116104253.027731C013C@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: all_ordered_dicts Changeset: r75375:f3b6347c7d5d Date: 2015-01-16 11:42 +0100 http://bitbucket.org/pypy/pypy/changeset/f3b6347c7d5d/ Log: hg merge default diff too long, truncating to 2000 out of 4197 lines diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -51,3 +51,12 @@ .. branch: ssa-flow Use SSA form for flow graphs inside build_flow() and part of simplify_graph() + +.. branch: ufuncpai + +Implement most of the GenericUfunc api to support numpy linalg. The strategy is +to encourage use of pure python or cffi ufuncs by extending frompyfunc(). +See the docstring of frompyfunc for more details. This dovetails with a branch +of pypy/numpy - cffi-linalg which is a rewrite of the _umath_linalg module in +python, calling lapack from cffi. The branch also support traditional use of +cpyext GenericUfunc definitions in c. diff --git a/pypy/module/cpyext/include/numpy/__multiarray_api.h b/pypy/module/cpyext/include/numpy/__multiarray_api.h new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/include/numpy/__multiarray_api.h @@ -0,0 +1,10 @@ + + +typedef struct { + PyObject_HEAD + npy_bool obval; +} PyBoolScalarObject; + +#define import_array() +#define PyArray_New _PyArray_New + diff --git a/pypy/module/cpyext/include/numpy/arrayobject.h b/pypy/module/cpyext/include/numpy/arrayobject.h --- a/pypy/module/cpyext/include/numpy/arrayobject.h +++ b/pypy/module/cpyext/include/numpy/arrayobject.h @@ -11,6 +11,8 @@ #endif #include "old_defines.h" +#include "npy_common.h" +#include "__multiarray_api.h" #define NPY_UNUSED(x) x #define PyArray_MAX(a,b) (((a)>(b))?(a):(b)) @@ -22,23 +24,10 @@ PyAPI_DATA(PyTypeObject) PyArray_Type; -typedef unsigned char npy_bool; -typedef unsigned char npy_uint8; -typedef unsigned short npy_uint16; -typedef signed short npy_int16; -typedef signed char npy_int8; -typedef int npy_int; - -typedef long npy_intp; -#ifndef NPY_INTP_FMT -#define NPY_INTP_FMT "ld" -#endif -#ifndef import_array -#define import_array() -#endif #define NPY_MAXDIMS 32 +#ifndef NDARRAYTYPES_H typedef struct { npy_intp *ptr; int len; @@ -73,19 +62,6 @@ NPY_NTYPES_ABI_COMPATIBLE=21 }; -#define NPY_INT8 NPY_BYTE -#define NPY_UINT8 NPY_UBYTE -#define NPY_INT16 NPY_SHORT -#define NPY_UINT16 NPY_USHORT -#define NPY_INT32 NPY_INT -#define NPY_UINT32 NPY_UINT -#define NPY_INT64 NPY_LONG -#define NPY_UINT64 NPY_ULONG -#define NPY_FLOAT32 NPY_FLOAT -#define NPY_FLOAT64 NPY_DOUBLE -#define NPY_COMPLEX32 NPY_CFLOAT -#define NPY_COMPLEX64 NPY_CDOUBLE - #define PyTypeNum_ISBOOL(type) ((type) == NPY_BOOL) #define PyTypeNum_ISINTEGER(type) (((type) >= NPY_BYTE) && \ ((type) <= NPY_ULONGLONG)) @@ -167,6 +143,21 @@ #define PyArray_ISNOTSWAPPED(arr) (1) #define PyArray_ISBYTESWAPPED(arr) (0) +#endif + +#define NPY_INT8 NPY_BYTE +#define NPY_UINT8 NPY_UBYTE +#define NPY_INT16 NPY_SHORT +#define NPY_UINT16 NPY_USHORT +#define NPY_INT32 NPY_INT +#define NPY_UINT32 NPY_UINT +#define NPY_INT64 NPY_LONG +#define NPY_UINT64 NPY_ULONG +#define NPY_FLOAT32 NPY_FLOAT +#define NPY_FLOAT64 NPY_DOUBLE +#define NPY_COMPLEX32 NPY_CFLOAT +#define NPY_COMPLEX64 NPY_CDOUBLE + /* functions */ #ifndef PyArray_NDIM diff --git a/pypy/module/cpyext/include/numpy/ndarraytypes.h b/pypy/module/cpyext/include/numpy/ndarraytypes.h new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/include/numpy/ndarraytypes.h @@ -0,0 +1,1786 @@ +#ifndef NDARRAYTYPES_H +#define NDARRAYTYPES_H + +#include "numpy/npy_common.h" +//#include "npy_endian.h" +//#include "npy_cpu.h" +//#include "utils.h" + +//for pypy - numpy has lots of typedefs +//for pypy - make life easier, less backward support +#define NPY_1_8_API_VERSION 0x00000008 +#define NPY_NO_DEPRECATED_API NPY_1_8_API_VERSION +#undef NPY_1_8_API_VERSION + +#define NPY_ENABLE_SEPARATE_COMPILATION 1 +#define NPY_VISIBILITY_HIDDEN + +#ifdef NPY_ENABLE_SEPARATE_COMPILATION + #define NPY_NO_EXPORT NPY_VISIBILITY_HIDDEN +#else + #define NPY_NO_EXPORT static +#endif + +/* Only use thread if configured in config and python supports it */ +#if defined WITH_THREAD && !NPY_NO_SMP + #define NPY_ALLOW_THREADS 1 +#else + #define NPY_ALLOW_THREADS 0 +#endif + + + +/* + * There are several places in the code where an array of dimensions + * is allocated statically. This is the size of that static + * allocation. + * + * The array creation itself could have arbitrary dimensions but all + * the places where static allocation is used would need to be changed + * to dynamic (including inside of several structures) + */ + +#define NPY_MAXDIMS 32 +#define NPY_MAXARGS 32 + +/* Used for Converter Functions "O&" code in ParseTuple */ +#define NPY_FAIL 0 +#define NPY_SUCCEED 1 + +/* + * Binary compatibility version number. This number is increased + * whenever the C-API is changed such that binary compatibility is + * broken, i.e. whenever a recompile of extension modules is needed. + */ +#define NPY_VERSION NPY_ABI_VERSION + +/* + * Minor API version. This number is increased whenever a change is + * made to the C-API -- whether it breaks binary compatibility or not. + * Some changes, such as adding a function pointer to the end of the + * function table, can be made without breaking binary compatibility. + * In this case, only the NPY_FEATURE_VERSION (*not* NPY_VERSION) + * would be increased. Whenever binary compatibility is broken, both + * NPY_VERSION and NPY_FEATURE_VERSION should be increased. + */ +#define NPY_FEATURE_VERSION NPY_API_VERSION + +enum NPY_TYPES { NPY_BOOL=0, + NPY_BYTE, NPY_UBYTE, + NPY_SHORT, NPY_USHORT, + NPY_INT, NPY_UINT, + NPY_LONG, NPY_ULONG, + NPY_LONGLONG, NPY_ULONGLONG, + NPY_FLOAT, NPY_DOUBLE, NPY_LONGDOUBLE, + NPY_CFLOAT, NPY_CDOUBLE, NPY_CLONGDOUBLE, + NPY_OBJECT=17, + NPY_STRING, NPY_UNICODE, + NPY_VOID, + /* + * New 1.6 types appended, may be integrated + * into the above in 2.0. + */ + NPY_DATETIME, NPY_TIMEDELTA, NPY_HALF, + + NPY_NTYPES, + NPY_NOTYPE, + NPY_CHAR, /* special flag */ + NPY_USERDEF=256, /* leave room for characters */ + + /* The number of types not including the new 1.6 types */ + NPY_NTYPES_ABI_COMPATIBLE=21 +}; + +/* basetype array priority */ +#define NPY_PRIORITY 0.0 + +/* default subtype priority */ +#define NPY_SUBTYPE_PRIORITY 1.0 + +/* default scalar priority */ +#define NPY_SCALAR_PRIORITY -1000000.0 + +/* How many floating point types are there (excluding half) */ +#define NPY_NUM_FLOATTYPE 3 + +/* + * These characters correspond to the array type and the struct + * module + */ + +enum NPY_TYPECHAR { + NPY_BOOLLTR = '?', + NPY_BYTELTR = 'b', + NPY_UBYTELTR = 'B', + NPY_SHORTLTR = 'h', + NPY_USHORTLTR = 'H', + NPY_INTLTR = 'i', + NPY_UINTLTR = 'I', + NPY_LONGLTR = 'l', + NPY_ULONGLTR = 'L', + NPY_LONGLONGLTR = 'q', + NPY_ULONGLONGLTR = 'Q', + NPY_HALFLTR = 'e', + NPY_FLOATLTR = 'f', + NPY_DOUBLELTR = 'd', + NPY_LONGDOUBLELTR = 'g', + NPY_CFLOATLTR = 'F', + NPY_CDOUBLELTR = 'D', + NPY_CLONGDOUBLELTR = 'G', + NPY_OBJECTLTR = 'O', + NPY_STRINGLTR = 'S', + NPY_STRINGLTR2 = 'a', + NPY_UNICODELTR = 'U', + NPY_VOIDLTR = 'V', + NPY_DATETIMELTR = 'M', + NPY_TIMEDELTALTR = 'm', + NPY_CHARLTR = 'c', + + /* + * No Descriptor, just a define -- this let's + * Python users specify an array of integers + * large enough to hold a pointer on the + * platform + */ + NPY_INTPLTR = 'p', + NPY_UINTPLTR = 'P', + + /* + * These are for dtype 'kinds', not dtype 'typecodes' + * as the above are for. + */ + NPY_GENBOOLLTR ='b', + NPY_SIGNEDLTR = 'i', + NPY_UNSIGNEDLTR = 'u', + NPY_FLOATINGLTR = 'f', + NPY_COMPLEXLTR = 'c' +}; + +typedef enum { + NPY_QUICKSORT=0, + NPY_HEAPSORT=1, + NPY_MERGESORT=2 +} NPY_SORTKIND; +#define NPY_NSORTS (NPY_MERGESORT + 1) + + +typedef enum { + NPY_INTROSELECT=0, +} NPY_SELECTKIND; +#define NPY_NSELECTS (NPY_INTROSELECT + 1) + + +typedef enum { + NPY_SEARCHLEFT=0, + NPY_SEARCHRIGHT=1 +} NPY_SEARCHSIDE; +#define NPY_NSEARCHSIDES (NPY_SEARCHRIGHT + 1) + + +typedef enum { + NPY_NOSCALAR=-1, + NPY_BOOL_SCALAR, + NPY_INTPOS_SCALAR, + NPY_INTNEG_SCALAR, + NPY_FLOAT_SCALAR, + NPY_COMPLEX_SCALAR, + NPY_OBJECT_SCALAR +} NPY_SCALARKIND; +#define NPY_NSCALARKINDS (NPY_OBJECT_SCALAR + 1) + +/* For specifying array memory layout or iteration order */ +typedef enum { + /* Fortran order if inputs are all Fortran, C otherwise */ + NPY_ANYORDER=-1, + /* C order */ + NPY_CORDER=0, + /* Fortran order */ + NPY_FORTRANORDER=1, + /* An order as close to the inputs as possible */ + NPY_KEEPORDER=2 +} NPY_ORDER; + +/* For specifying allowed casting in operations which support it */ +typedef enum { + /* Only allow identical types */ + NPY_NO_CASTING=0, + /* Allow identical and byte swapped types */ + NPY_EQUIV_CASTING=1, + /* Only allow safe casts */ + NPY_SAFE_CASTING=2, + /* Allow safe casts or casts within the same kind */ + NPY_SAME_KIND_CASTING=3, + /* Allow any casts */ + NPY_UNSAFE_CASTING=4, + + /* + * Temporary internal definition only, will be removed in upcoming + * release, see below + * */ + NPY_INTERNAL_UNSAFE_CASTING_BUT_WARN_UNLESS_SAME_KIND = 100, +} NPY_CASTING; + +typedef enum { + NPY_CLIP=0, + NPY_WRAP=1, + NPY_RAISE=2 +} NPY_CLIPMODE; + +/* The special not-a-time (NaT) value */ +#define NPY_DATETIME_NAT NPY_MIN_INT64 + +/* + * Upper bound on the length of a DATETIME ISO 8601 string + * YEAR: 21 (64-bit year) + * MONTH: 3 + * DAY: 3 + * HOURS: 3 + * MINUTES: 3 + * SECONDS: 3 + * ATTOSECONDS: 1 + 3*6 + * TIMEZONE: 5 + * NULL TERMINATOR: 1 + */ +#define NPY_DATETIME_MAX_ISO8601_STRLEN (21+3*5+1+3*6+6+1) + +typedef enum { + NPY_FR_Y = 0, /* Years */ + NPY_FR_M = 1, /* Months */ + NPY_FR_W = 2, /* Weeks */ + /* Gap where 1.6 NPY_FR_B (value 3) was */ + NPY_FR_D = 4, /* Days */ + NPY_FR_h = 5, /* hours */ + NPY_FR_m = 6, /* minutes */ + NPY_FR_s = 7, /* seconds */ + NPY_FR_ms = 8, /* milliseconds */ + NPY_FR_us = 9, /* microseconds */ + NPY_FR_ns = 10,/* nanoseconds */ + NPY_FR_ps = 11,/* picoseconds */ + NPY_FR_fs = 12,/* femtoseconds */ + NPY_FR_as = 13,/* attoseconds */ + NPY_FR_GENERIC = 14 /* Generic, unbound units, can convert to anything */ +} NPY_DATETIMEUNIT; + +/* + * NOTE: With the NPY_FR_B gap for 1.6 ABI compatibility, NPY_DATETIME_NUMUNITS + * is technically one more than the actual number of units. + */ +#define NPY_DATETIME_NUMUNITS (NPY_FR_GENERIC + 1) +#define NPY_DATETIME_DEFAULTUNIT NPY_FR_GENERIC + +/* + * Business day conventions for mapping invalid business + * days to valid business days. + */ +typedef enum { + /* Go forward in time to the following business day. */ + NPY_BUSDAY_FORWARD, + NPY_BUSDAY_FOLLOWING = NPY_BUSDAY_FORWARD, + /* Go backward in time to the preceding business day. */ + NPY_BUSDAY_BACKWARD, + NPY_BUSDAY_PRECEDING = NPY_BUSDAY_BACKWARD, + /* + * Go forward in time to the following business day, unless it + * crosses a month boundary, in which case go backward + */ + NPY_BUSDAY_MODIFIEDFOLLOWING, + /* + * Go backward in time to the preceding business day, unless it + * crosses a month boundary, in which case go forward. + */ + NPY_BUSDAY_MODIFIEDPRECEDING, + /* Produce a NaT for non-business days. */ + NPY_BUSDAY_NAT, + /* Raise an exception for non-business days. */ + NPY_BUSDAY_RAISE +} NPY_BUSDAY_ROLL; + +/************************************************************ + * NumPy Auxiliary Data for inner loops, sort functions, etc. + ************************************************************/ + +/* + * When creating an auxiliary data struct, this should always appear + * as the first member, like this: + * + * typedef struct { + * NpyAuxData base; + * double constant; + * } constant_multiplier_aux_data; + */ +typedef struct NpyAuxData_tag NpyAuxData; + +/* Function pointers for freeing or cloning auxiliary data */ +typedef void (NpyAuxData_FreeFunc) (NpyAuxData *); +typedef NpyAuxData *(NpyAuxData_CloneFunc) (NpyAuxData *); + +struct NpyAuxData_tag { + NpyAuxData_FreeFunc *free; + NpyAuxData_CloneFunc *clone; + /* To allow for a bit of expansion without breaking the ABI */ + void *reserved[2]; +}; + +/* Macros to use for freeing and cloning auxiliary data */ +#define NPY_AUXDATA_FREE(auxdata) \ + do { \ + if ((auxdata) != NULL) { \ + (auxdata)->free(auxdata); \ + } \ + } while(0) +#define NPY_AUXDATA_CLONE(auxdata) \ + ((auxdata)->clone(auxdata)) + +#define NPY_ERR(str) fprintf(stderr, #str); fflush(stderr); +#define NPY_ERR2(str) fprintf(stderr, str); fflush(stderr); + +#define NPY_STRINGIFY(x) #x +#define NPY_TOSTRING(x) NPY_STRINGIFY(x) + + /* + * Macros to define how array, and dimension/strides data is + * allocated. + */ + + /* Data buffer - PyDataMem_NEW/FREE/RENEW are in multiarraymodule.c */ + +#define NPY_USE_PYMEM 1 + +#if NPY_USE_PYMEM == 1 +#define PyArray_malloc PyMem_Malloc +#define PyArray_free PyMem_Free +#define PyArray_realloc PyMem_Realloc +#else +#define PyArray_malloc malloc +#define PyArray_free free +#define PyArray_realloc realloc +#endif + +/* Dimensions and strides */ +#define PyDimMem_NEW(size) \ + ((npy_intp *)PyArray_malloc(size*sizeof(npy_intp))) + +#define PyDimMem_FREE(ptr) PyArray_free(ptr) + +#define PyDimMem_RENEW(ptr,size) \ + ((npy_intp *)PyArray_realloc(ptr,size*sizeof(npy_intp))) + +/* forward declaration */ +struct _PyArray_Descr; + +/* These must deal with unaligned and swapped data if necessary */ +typedef PyObject * (PyArray_GetItemFunc) (void *, void *); +typedef int (PyArray_SetItemFunc)(PyObject *, void *, void *); + +typedef void (PyArray_CopySwapNFunc)(void *, npy_intp, void *, npy_intp, + npy_intp, int, void *); + +typedef void (PyArray_CopySwapFunc)(void *, void *, int, void *); +typedef npy_bool (PyArray_NonzeroFunc)(void *, void *); + + +/* + * These assume aligned and notswapped data -- a buffer will be used + * before or contiguous data will be obtained + */ + +typedef int (PyArray_CompareFunc)(const void *, const void *, void *); +typedef int (PyArray_ArgFunc)(void*, npy_intp, npy_intp*, void *); + +typedef void (PyArray_DotFunc)(void *, npy_intp, void *, npy_intp, void *, + npy_intp, void *); + +typedef void (PyArray_VectorUnaryFunc)(void *, void *, npy_intp, void *, + void *); + +/* + * XXX the ignore argument should be removed next time the API version + * is bumped. It used to be the separator. + */ +typedef int (PyArray_ScanFunc)(FILE *fp, void *dptr, + char *ignore, struct _PyArray_Descr *); +typedef int (PyArray_FromStrFunc)(char *s, void *dptr, char **endptr, + struct _PyArray_Descr *); + +typedef int (PyArray_FillFunc)(void *, npy_intp, void *); + +typedef int (PyArray_SortFunc)(void *, npy_intp, void *); +typedef int (PyArray_ArgSortFunc)(void *, npy_intp *, npy_intp, void *); +typedef int (PyArray_PartitionFunc)(void *, npy_intp, npy_intp, + npy_intp *, npy_intp *, + void *); +typedef int (PyArray_ArgPartitionFunc)(void *, npy_intp *, npy_intp, npy_intp, + npy_intp *, npy_intp *, + void *); + +typedef int (PyArray_FillWithScalarFunc)(void *, npy_intp, void *, void *); + +typedef int (PyArray_ScalarKindFunc)(void *); + +typedef void (PyArray_FastClipFunc)(void *in, npy_intp n_in, void *min, + void *max, void *out); +typedef void (PyArray_FastPutmaskFunc)(void *in, void *mask, npy_intp n_in, + void *values, npy_intp nv); +typedef int (PyArray_FastTakeFunc)(void *dest, void *src, npy_intp *indarray, + npy_intp nindarray, npy_intp n_outer, + npy_intp m_middle, npy_intp nelem, + NPY_CLIPMODE clipmode); + +typedef struct { + npy_intp *ptr; + int len; +} PyArray_Dims; + +typedef struct { + /* + * Functions to cast to most other standard types + * Can have some NULL entries. The types + * DATETIME, TIMEDELTA, and HALF go into the castdict + * even though they are built-in. + */ + PyArray_VectorUnaryFunc *cast[NPY_NTYPES_ABI_COMPATIBLE]; + + /* The next four functions *cannot* be NULL */ + + /* + * Functions to get and set items with standard Python types + * -- not array scalars + */ + PyArray_GetItemFunc *getitem; + PyArray_SetItemFunc *setitem; + + /* + * Copy and/or swap data. Memory areas may not overlap + * Use memmove first if they might + */ + PyArray_CopySwapNFunc *copyswapn; + PyArray_CopySwapFunc *copyswap; + + /* + * Function to compare items + * Can be NULL + */ + PyArray_CompareFunc *compare; + + /* + * Function to select largest + * Can be NULL + */ + PyArray_ArgFunc *argmax; + + /* + * Function to compute dot product + * Can be NULL + */ + PyArray_DotFunc *dotfunc; + + /* + * Function to scan an ASCII file and + * place a single value plus possible separator + * Can be NULL + */ + PyArray_ScanFunc *scanfunc; + + /* + * Function to read a single value from a string + * and adjust the pointer; Can be NULL + */ + PyArray_FromStrFunc *fromstr; + + /* + * Function to determine if data is zero or not + * If NULL a default version is + * used at Registration time. + */ + PyArray_NonzeroFunc *nonzero; + + /* + * Used for arange. + * Can be NULL. + */ + PyArray_FillFunc *fill; + + /* + * Function to fill arrays with scalar values + * Can be NULL + */ + PyArray_FillWithScalarFunc *fillwithscalar; + + /* + * Sorting functions + * Can be NULL + */ + PyArray_SortFunc *sort[NPY_NSORTS]; + PyArray_ArgSortFunc *argsort[NPY_NSORTS]; + + /* + * Dictionary of additional casting functions + * PyArray_VectorUnaryFuncs + * which can be populated to support casting + * to other registered types. Can be NULL + */ + PyObject *castdict; + + /* + * Functions useful for generalizing + * the casting rules. + * Can be NULL; + */ + PyArray_ScalarKindFunc *scalarkind; + int **cancastscalarkindto; + int *cancastto; + + PyArray_FastClipFunc *fastclip; + PyArray_FastPutmaskFunc *fastputmask; + PyArray_FastTakeFunc *fasttake; + + /* + * Function to select smallest + * Can be NULL + */ + PyArray_ArgFunc *argmin; + +} PyArray_ArrFuncs; + +/* The item must be reference counted when it is inserted or extracted. */ +#define NPY_ITEM_REFCOUNT 0x01 +/* Same as needing REFCOUNT */ +#define NPY_ITEM_HASOBJECT 0x01 +/* Convert to list for pickling */ +#define NPY_LIST_PICKLE 0x02 +/* The item is a POINTER */ +#define NPY_ITEM_IS_POINTER 0x04 +/* memory needs to be initialized for this data-type */ +#define NPY_NEEDS_INIT 0x08 +/* operations need Python C-API so don't give-up thread. */ +#define NPY_NEEDS_PYAPI 0x10 +/* Use f.getitem when extracting elements of this data-type */ +#define NPY_USE_GETITEM 0x20 +/* Use f.setitem when setting creating 0-d array from this data-type.*/ +#define NPY_USE_SETITEM 0x40 +/* A sticky flag specifically for structured arrays */ +#define NPY_ALIGNED_STRUCT 0x80 + +/* + *These are inherited for global data-type if any data-types in the + * field have them + */ +#define NPY_FROM_FIELDS (NPY_NEEDS_INIT | NPY_LIST_PICKLE | \ + NPY_ITEM_REFCOUNT | NPY_NEEDS_PYAPI) + +#define NPY_OBJECT_DTYPE_FLAGS (NPY_LIST_PICKLE | NPY_USE_GETITEM | \ + NPY_ITEM_IS_POINTER | NPY_ITEM_REFCOUNT | \ + NPY_NEEDS_INIT | NPY_NEEDS_PYAPI) + +#define PyDataType_FLAGCHK(dtype, flag) \ + (((dtype)->flags & (flag)) == (flag)) + +#define PyDataType_REFCHK(dtype) \ + PyDataType_FLAGCHK(dtype, NPY_ITEM_REFCOUNT) + +typedef struct _PyArray_Descr { + PyObject_HEAD + /* + * the type object representing an + * instance of this type -- should not + * be two type_numbers with the same type + * object. + */ + PyTypeObject *typeobj; + /* kind for this type */ + char kind; + /* unique-character representing this type */ + char type; + /* + * '>' (big), '<' (little), '|' + * (not-applicable), or '=' (native). + */ + char byteorder; + /* flags describing data type */ + char flags; + /* number representing this type */ + int type_num; + /* element size (itemsize) for this type */ + int elsize; + /* alignment needed for this type */ + int alignment; + /* + * Non-NULL if this type is + * is an array (C-contiguous) + * of some other type + */ + struct _arr_descr *subarray; + /* + * The fields dictionary for this type + * For statically defined descr this + * is always Py_None + */ + PyObject *fields; + /* + * An ordered tuple of field names or NULL + * if no fields are defined + */ + PyObject *names; + /* + * a table of functions specific for each + * basic data descriptor + */ + PyArray_ArrFuncs *f; + /* Metadata about this dtype */ + PyObject *metadata; + /* + * Metadata specific to the C implementation + * of the particular dtype. This was added + * for NumPy 1.7.0. + */ + NpyAuxData *c_metadata; +} PyArray_Descr; + +typedef struct _arr_descr { + PyArray_Descr *base; + PyObject *shape; /* a tuple */ +} PyArray_ArrayDescr; + +/* + * The main array object structure. + * + * It has been recommended to use the inline functions defined below + * (PyArray_DATA and friends) to access fields here for a number of + * releases. Direct access to the members themselves is deprecated. + * To ensure that your code does not use deprecated access, + * #define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION + * (or NPY_1_8_API_VERSION or higher as required). + */ +/* This struct will be moved to a private header in a future release */ +typedef struct tagPyArrayObject_fields { + PyObject_HEAD + /* Pointer to the raw data buffer */ + char *data; + /* The number of dimensions, also called 'ndim' */ + int nd; + /* The size in each dimension, also called 'shape' */ + npy_intp *dimensions; + /* + * Number of bytes to jump to get to the + * next element in each dimension + */ + npy_intp *strides; + /* + * This object is decref'd upon + * deletion of array. Except in the + * case of UPDATEIFCOPY which has + * special handling. + * + * For views it points to the original + * array, collapsed so no chains of + * views occur. + * + * For creation from buffer object it + * points to an object that shold be + * decref'd on deletion + * + * For UPDATEIFCOPY flag this is an + * array to-be-updated upon deletion + * of this one + */ + PyObject *base; + /* Pointer to type structure */ + PyArray_Descr *descr; + /* Flags describing array -- see below */ + int flags; + /* For weak references */ + PyObject *weakreflist; +} PyArrayObject_fields; + +/* + * To hide the implementation details, we only expose + * the Python struct HEAD. + */ +#if !defined(NPY_NO_DEPRECATED_API) || \ + (NPY_NO_DEPRECATED_API < NPY_1_7_API_VERSION) +/* + * Can't put this in npy_deprecated_api.h like the others. + * PyArrayObject field access is deprecated as of NumPy 1.7. + */ +typedef PyArrayObject_fields PyArrayObject; +#else +typedef struct tagPyArrayObject { + PyObject_HEAD +} PyArrayObject; +#endif + +#define NPY_SIZEOF_PYARRAYOBJECT (sizeof(PyArrayObject_fields)) + +/* Array Flags Object */ +typedef struct PyArrayFlagsObject { + PyObject_HEAD + PyObject *arr; + int flags; +} PyArrayFlagsObject; + +/* Mirrors buffer object to ptr */ + +typedef struct { + PyObject_HEAD + PyObject *base; + void *ptr; + npy_intp len; + int flags; +} PyArray_Chunk; + +typedef struct { + NPY_DATETIMEUNIT base; + int num; +} PyArray_DatetimeMetaData; + +typedef struct { + NpyAuxData base; + PyArray_DatetimeMetaData meta; +} PyArray_DatetimeDTypeMetaData; + +/* + * This structure contains an exploded view of a date-time value. + * NaT is represented by year == NPY_DATETIME_NAT. + */ +typedef struct { + npy_int64 year; + npy_int32 month, day, hour, min, sec, us, ps, as; +} npy_datetimestruct; + +/* This is not used internally. */ +typedef struct { + npy_int64 day; + npy_int32 sec, us, ps, as; +} npy_timedeltastruct; + +typedef int (PyArray_FinalizeFunc)(PyArrayObject *, PyObject *); + +/* + * Means c-style contiguous (last index varies the fastest). The data + * elements right after each other. + * + * This flag may be requested in constructor functions. + * This flag may be tested for in PyArray_FLAGS(arr). + */ +#define NPY_ARRAY_C_CONTIGUOUS 0x0001 + +/* + * Set if array is a contiguous Fortran array: the first index varies + * the fastest in memory (strides array is reverse of C-contiguous + * array) + * + * This flag may be requested in constructor functions. + * This flag may be tested for in PyArray_FLAGS(arr). + */ +#define NPY_ARRAY_F_CONTIGUOUS 0x0002 + +/* + * Note: all 0-d arrays are C_CONTIGUOUS and F_CONTIGUOUS. If a + * 1-d array is C_CONTIGUOUS it is also F_CONTIGUOUS. Arrays with + * more then one dimension can be C_CONTIGUOUS and F_CONTIGUOUS + * at the same time if they have either zero or one element. + * If NPY_RELAXED_STRIDES_CHECKING is set, a higher dimensional + * array is always C_CONTIGUOUS and F_CONTIGUOUS if it has zero elements + * and the array is contiguous if ndarray.squeeze() is contiguous. + * I.e. dimensions for which `ndarray.shape[dimension] == 1` are + * ignored. + */ + +/* + * If set, the array owns the data: it will be free'd when the array + * is deleted. + * + * This flag may be tested for in PyArray_FLAGS(arr). + */ +#define NPY_ARRAY_OWNDATA 0x0004 + +/* + * An array never has the next four set; they're only used as parameter + * flags to the the various FromAny functions + * + * This flag may be requested in constructor functions. + */ + +/* Cause a cast to occur regardless of whether or not it is safe. */ +#define NPY_ARRAY_FORCECAST 0x0010 + +/* + * Always copy the array. Returned arrays are always CONTIGUOUS, + * ALIGNED, and WRITEABLE. + * + * This flag may be requested in constructor functions. + */ +#define NPY_ARRAY_ENSURECOPY 0x0020 + +/* + * Make sure the returned array is a base-class ndarray + * + * This flag may be requested in constructor functions. + */ +#define NPY_ARRAY_ENSUREARRAY 0x0040 + +/* + * Make sure that the strides are in units of the element size Needed + * for some operations with record-arrays. + * + * This flag may be requested in constructor functions. + */ +#define NPY_ARRAY_ELEMENTSTRIDES 0x0080 + +/* + * Array data is aligned on the appropiate memory address for the type + * stored according to how the compiler would align things (e.g., an + * array of integers (4 bytes each) starts on a memory address that's + * a multiple of 4) + * + * This flag may be requested in constructor functions. + * This flag may be tested for in PyArray_FLAGS(arr). + */ +#define NPY_ARRAY_ALIGNED 0x0100 + +/* + * Array data has the native endianness + * + * This flag may be requested in constructor functions. + */ +#define NPY_ARRAY_NOTSWAPPED 0x0200 + +/* + * Array data is writeable + * + * This flag may be requested in constructor functions. + * This flag may be tested for in PyArray_FLAGS(arr). + */ +#define NPY_ARRAY_WRITEABLE 0x0400 + +/* + * If this flag is set, then base contains a pointer to an array of + * the same size that should be updated with the current contents of + * this array when this array is deallocated + * + * This flag may be requested in constructor functions. + * This flag may be tested for in PyArray_FLAGS(arr). + */ +#define NPY_ARRAY_UPDATEIFCOPY 0x1000 + +/* + * NOTE: there are also internal flags defined in multiarray/arrayobject.h, + * which start at bit 31 and work down. + */ + +#define NPY_ARRAY_BEHAVED (NPY_ARRAY_ALIGNED | \ + NPY_ARRAY_WRITEABLE) +#define NPY_ARRAY_BEHAVED_NS (NPY_ARRAY_ALIGNED | \ + NPY_ARRAY_WRITEABLE | \ + NPY_ARRAY_NOTSWAPPED) +#define NPY_ARRAY_CARRAY (NPY_ARRAY_C_CONTIGUOUS | \ + NPY_ARRAY_BEHAVED) +#define NPY_ARRAY_CARRAY_RO (NPY_ARRAY_C_CONTIGUOUS | \ + NPY_ARRAY_ALIGNED) +#define NPY_ARRAY_FARRAY (NPY_ARRAY_F_CONTIGUOUS | \ + NPY_ARRAY_BEHAVED) +#define NPY_ARRAY_FARRAY_RO (NPY_ARRAY_F_CONTIGUOUS | \ + NPY_ARRAY_ALIGNED) +#define NPY_ARRAY_DEFAULT (NPY_ARRAY_CARRAY) +#define NPY_ARRAY_IN_ARRAY (NPY_ARRAY_CARRAY_RO) +#define NPY_ARRAY_OUT_ARRAY (NPY_ARRAY_CARRAY) +#define NPY_ARRAY_INOUT_ARRAY (NPY_ARRAY_CARRAY | \ + NPY_ARRAY_UPDATEIFCOPY) +#define NPY_ARRAY_IN_FARRAY (NPY_ARRAY_FARRAY_RO) +#define NPY_ARRAY_OUT_FARRAY (NPY_ARRAY_FARRAY) +#define NPY_ARRAY_INOUT_FARRAY (NPY_ARRAY_FARRAY | \ + NPY_ARRAY_UPDATEIFCOPY) + +#define NPY_ARRAY_UPDATE_ALL (NPY_ARRAY_C_CONTIGUOUS | \ + NPY_ARRAY_F_CONTIGUOUS | \ + NPY_ARRAY_ALIGNED) + +/* This flag is for the array interface, not PyArrayObject */ +#define NPY_ARR_HAS_DESCR 0x0800 + + + + +/* + * Size of internal buffers used for alignment Make BUFSIZE a multiple + * of sizeof(npy_cdouble) -- usually 16 so that ufunc buffers are aligned + */ +#define NPY_MIN_BUFSIZE ((int)sizeof(npy_cdouble)) +#define NPY_MAX_BUFSIZE (((int)sizeof(npy_cdouble))*1000000) +#define NPY_BUFSIZE 8192 +/* buffer stress test size: */ +/*#define NPY_BUFSIZE 17*/ + +#define PyArray_MAX(a,b) (((a)>(b))?(a):(b)) +#define PyArray_MIN(a,b) (((a)<(b))?(a):(b)) +#define PyArray_CLT(p,q) ((((p).real==(q).real) ? ((p).imag < (q).imag) : \ + ((p).real < (q).real))) +#define PyArray_CGT(p,q) ((((p).real==(q).real) ? ((p).imag > (q).imag) : \ + ((p).real > (q).real))) +#define PyArray_CLE(p,q) ((((p).real==(q).real) ? ((p).imag <= (q).imag) : \ + ((p).real <= (q).real))) +#define PyArray_CGE(p,q) ((((p).real==(q).real) ? ((p).imag >= (q).imag) : \ + ((p).real >= (q).real))) +#define PyArray_CEQ(p,q) (((p).real==(q).real) && ((p).imag == (q).imag)) +#define PyArray_CNE(p,q) (((p).real!=(q).real) || ((p).imag != (q).imag)) + +/* + * C API: consists of Macros and functions. The MACROS are defined + * here. + */ + + +#define PyArray_ISCONTIGUOUS(m) PyArray_CHKFLAGS(m, NPY_ARRAY_C_CONTIGUOUS) +#define PyArray_ISWRITEABLE(m) PyArray_CHKFLAGS(m, NPY_ARRAY_WRITEABLE) +#define PyArray_ISALIGNED(m) PyArray_CHKFLAGS(m, NPY_ARRAY_ALIGNED) + +#define PyArray_IS_C_CONTIGUOUS(m) PyArray_CHKFLAGS(m, NPY_ARRAY_C_CONTIGUOUS) +#define PyArray_IS_F_CONTIGUOUS(m) PyArray_CHKFLAGS(m, NPY_ARRAY_F_CONTIGUOUS) + +#if NPY_ALLOW_THREADS +#define NPY_BEGIN_ALLOW_THREADS Py_BEGIN_ALLOW_THREADS +#define NPY_END_ALLOW_THREADS Py_END_ALLOW_THREADS +#define NPY_BEGIN_THREADS_DEF PyThreadState *_save=NULL; +#define NPY_BEGIN_THREADS do {_save = PyEval_SaveThread();} while (0); +#define NPY_END_THREADS do {if (_save) PyEval_RestoreThread(_save);} while (0); +#define NPY_BEGIN_THREADS_THRESHOLDED(loop_size) do { if (loop_size > 500) \ + { _save = PyEval_SaveThread();} } while (0); + +#define NPY_BEGIN_THREADS_DESCR(dtype) \ + do {if (!(PyDataType_FLAGCHK(dtype, NPY_NEEDS_PYAPI))) \ + NPY_BEGIN_THREADS;} while (0); + +#define NPY_END_THREADS_DESCR(dtype) \ + do {if (!(PyDataType_FLAGCHK(dtype, NPY_NEEDS_PYAPI))) \ + NPY_END_THREADS; } while (0); + +#define NPY_ALLOW_C_API_DEF PyGILState_STATE __save__; +#define NPY_ALLOW_C_API do {__save__ = PyGILState_Ensure();} while (0); +#define NPY_DISABLE_C_API do {PyGILState_Release(__save__);} while (0); +#else +#define NPY_BEGIN_ALLOW_THREADS +#define NPY_END_ALLOW_THREADS +#define NPY_BEGIN_THREADS_DEF +#define NPY_BEGIN_THREADS +#define NPY_END_THREADS +#define NPY_BEGIN_THREADS_THRESHOLDED(loop_size) +#define NPY_BEGIN_THREADS_DESCR(dtype) +#define NPY_END_THREADS_DESCR(dtype) +#define NPY_ALLOW_C_API_DEF +#define NPY_ALLOW_C_API +#define NPY_DISABLE_C_API +#endif + +/********************************** + * The nditer object, added in 1.6 + **********************************/ + +/* The actual structure of the iterator is an internal detail */ +typedef struct NpyIter_InternalOnly NpyIter; + +/* Iterator function pointers that may be specialized */ +typedef int (NpyIter_IterNextFunc)(NpyIter *iter); +typedef void (NpyIter_GetMultiIndexFunc)(NpyIter *iter, + npy_intp *outcoords); + +/*** Global flags that may be passed to the iterator constructors ***/ + +/* Track an index representing C order */ +#define NPY_ITER_C_INDEX 0x00000001 +/* Track an index representing Fortran order */ +#define NPY_ITER_F_INDEX 0x00000002 +/* Track a multi-index */ +#define NPY_ITER_MULTI_INDEX 0x00000004 +/* User code external to the iterator does the 1-dimensional innermost loop */ +#define NPY_ITER_EXTERNAL_LOOP 0x00000008 +/* Convert all the operands to a common data type */ +#define NPY_ITER_COMMON_DTYPE 0x00000010 +/* Operands may hold references, requiring API access during iteration */ +#define NPY_ITER_REFS_OK 0x00000020 +/* Zero-sized operands should be permitted, iteration checks IterSize for 0 */ +#define NPY_ITER_ZEROSIZE_OK 0x00000040 +/* Permits reductions (size-0 stride with dimension size > 1) */ +#define NPY_ITER_REDUCE_OK 0x00000080 +/* Enables sub-range iteration */ +#define NPY_ITER_RANGED 0x00000100 +/* Enables buffering */ +#define NPY_ITER_BUFFERED 0x00000200 +/* When buffering is enabled, grows the inner loop if possible */ +#define NPY_ITER_GROWINNER 0x00000400 +/* Delay allocation of buffers until first Reset* call */ +#define NPY_ITER_DELAY_BUFALLOC 0x00000800 +/* When NPY_KEEPORDER is specified, disable reversing negative-stride axes */ +#define NPY_ITER_DONT_NEGATE_STRIDES 0x00001000 + +/*** Per-operand flags that may be passed to the iterator constructors ***/ + +/* The operand will be read from and written to */ +#define NPY_ITER_READWRITE 0x00010000 +/* The operand will only be read from */ +#define NPY_ITER_READONLY 0x00020000 +/* The operand will only be written to */ +#define NPY_ITER_WRITEONLY 0x00040000 +/* The operand's data must be in native byte order */ +#define NPY_ITER_NBO 0x00080000 +/* The operand's data must be aligned */ +#define NPY_ITER_ALIGNED 0x00100000 +/* The operand's data must be contiguous (within the inner loop) */ +#define NPY_ITER_CONTIG 0x00200000 +/* The operand may be copied to satisfy requirements */ +#define NPY_ITER_COPY 0x00400000 +/* The operand may be copied with UPDATEIFCOPY to satisfy requirements */ +#define NPY_ITER_UPDATEIFCOPY 0x00800000 +/* Allocate the operand if it is NULL */ +#define NPY_ITER_ALLOCATE 0x01000000 +/* If an operand is allocated, don't use any subtype */ +#define NPY_ITER_NO_SUBTYPE 0x02000000 +/* This is a virtual array slot, operand is NULL but temporary data is there */ +#define NPY_ITER_VIRTUAL 0x04000000 +/* Require that the dimension match the iterator dimensions exactly */ +#define NPY_ITER_NO_BROADCAST 0x08000000 +/* A mask is being used on this array, affects buffer -> array copy */ +#define NPY_ITER_WRITEMASKED 0x10000000 +/* This array is the mask for all WRITEMASKED operands */ +#define NPY_ITER_ARRAYMASK 0x20000000 + +#define NPY_ITER_GLOBAL_FLAGS 0x0000ffff +#define NPY_ITER_PER_OP_FLAGS 0xffff0000 + + +/***************************** + * Basic iterator object + *****************************/ + +/* FWD declaration */ +typedef struct PyArrayIterObject_tag PyArrayIterObject; + +/* + * type of the function which translates a set of coordinates to a + * pointer to the data + */ +typedef char* (*npy_iter_get_dataptr_t)(PyArrayIterObject* iter, npy_intp*); + +struct PyArrayIterObject_tag { + PyObject_HEAD + int nd_m1; /* number of dimensions - 1 */ + npy_intp index, size; + npy_intp coordinates[NPY_MAXDIMS];/* N-dimensional loop */ + npy_intp dims_m1[NPY_MAXDIMS]; /* ao->dimensions - 1 */ + npy_intp strides[NPY_MAXDIMS]; /* ao->strides or fake */ + npy_intp backstrides[NPY_MAXDIMS];/* how far to jump back */ + npy_intp factors[NPY_MAXDIMS]; /* shape factors */ + PyArrayObject *ao; + char *dataptr; /* pointer to current item*/ + npy_bool contiguous; + + npy_intp bounds[NPY_MAXDIMS][2]; + npy_intp limits[NPY_MAXDIMS][2]; + npy_intp limits_sizes[NPY_MAXDIMS]; + npy_iter_get_dataptr_t translate; +} ; + + +/* Iterator API */ +#define PyArrayIter_Check(op) PyObject_TypeCheck(op, &PyArrayIter_Type) + +#define _PyAIT(it) ((PyArrayIterObject *)(it)) +#define PyArray_ITER_RESET(it) do { \ + _PyAIT(it)->index = 0; \ + _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao); \ + memset(_PyAIT(it)->coordinates, 0, \ + (_PyAIT(it)->nd_m1+1)*sizeof(npy_intp)); \ +} while (0) + +#define _PyArray_ITER_NEXT1(it) do { \ + (it)->dataptr += _PyAIT(it)->strides[0]; \ + (it)->coordinates[0]++; \ +} while (0) + +#define _PyArray_ITER_NEXT2(it) do { \ + if ((it)->coordinates[1] < (it)->dims_m1[1]) { \ + (it)->coordinates[1]++; \ + (it)->dataptr += (it)->strides[1]; \ + } \ + else { \ + (it)->coordinates[1] = 0; \ + (it)->coordinates[0]++; \ + (it)->dataptr += (it)->strides[0] - \ + (it)->backstrides[1]; \ + } \ +} while (0) + +#define _PyArray_ITER_NEXT3(it) do { \ + if ((it)->coordinates[2] < (it)->dims_m1[2]) { \ + (it)->coordinates[2]++; \ + (it)->dataptr += (it)->strides[2]; \ + } \ + else { \ + (it)->coordinates[2] = 0; \ + (it)->dataptr -= (it)->backstrides[2]; \ + if ((it)->coordinates[1] < (it)->dims_m1[1]) { \ + (it)->coordinates[1]++; \ + (it)->dataptr += (it)->strides[1]; \ + } \ + else { \ + (it)->coordinates[1] = 0; \ + (it)->coordinates[0]++; \ + (it)->dataptr += (it)->strides[0] \ + (it)->backstrides[1]; \ + } \ + } \ +} while (0) + +#define PyArray_ITER_NEXT(it) do { \ + _PyAIT(it)->index++; \ + if (_PyAIT(it)->nd_m1 == 0) { \ + _PyArray_ITER_NEXT1(_PyAIT(it)); \ + } \ + else if (_PyAIT(it)->contiguous) \ + _PyAIT(it)->dataptr += PyArray_DESCR(_PyAIT(it)->ao)->elsize; \ + else if (_PyAIT(it)->nd_m1 == 1) { \ + _PyArray_ITER_NEXT2(_PyAIT(it)); \ + } \ + else { \ + int __npy_i; \ + for (__npy_i=_PyAIT(it)->nd_m1; __npy_i >= 0; __npy_i--) { \ + if (_PyAIT(it)->coordinates[__npy_i] < \ + _PyAIT(it)->dims_m1[__npy_i]) { \ + _PyAIT(it)->coordinates[__npy_i]++; \ + _PyAIT(it)->dataptr += \ + _PyAIT(it)->strides[__npy_i]; \ + break; \ + } \ + else { \ + _PyAIT(it)->coordinates[__npy_i] = 0; \ + _PyAIT(it)->dataptr -= \ + _PyAIT(it)->backstrides[__npy_i]; \ + } \ + } \ + } \ +} while (0) + +#define PyArray_ITER_GOTO(it, destination) do { \ + int __npy_i; \ + _PyAIT(it)->index = 0; \ + _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao); \ + for (__npy_i = _PyAIT(it)->nd_m1; __npy_i>=0; __npy_i--) { \ + if (destination[__npy_i] < 0) { \ + destination[__npy_i] += \ + _PyAIT(it)->dims_m1[__npy_i]+1; \ + } \ + _PyAIT(it)->dataptr += destination[__npy_i] * \ + _PyAIT(it)->strides[__npy_i]; \ + _PyAIT(it)->coordinates[__npy_i] = \ + destination[__npy_i]; \ + _PyAIT(it)->index += destination[__npy_i] * \ + ( __npy_i==_PyAIT(it)->nd_m1 ? 1 : \ + _PyAIT(it)->dims_m1[__npy_i+1]+1) ; \ + } \ +} while (0) + +#define PyArray_ITER_GOTO1D(it, ind) do { \ + int __npy_i; \ + npy_intp __npy_ind = (npy_intp) (ind); \ + if (__npy_ind < 0) __npy_ind += _PyAIT(it)->size; \ + _PyAIT(it)->index = __npy_ind; \ + if (_PyAIT(it)->nd_m1 == 0) { \ + _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao) + \ + __npy_ind * _PyAIT(it)->strides[0]; \ + } \ + else if (_PyAIT(it)->contiguous) \ + _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao) + \ + __npy_ind * PyArray_DESCR(_PyAIT(it)->ao)->elsize; \ + else { \ + _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao); \ + for (__npy_i = 0; __npy_i<=_PyAIT(it)->nd_m1; \ + __npy_i++) { \ + _PyAIT(it)->dataptr += \ + (__npy_ind / _PyAIT(it)->factors[__npy_i]) \ + * _PyAIT(it)->strides[__npy_i]; \ + __npy_ind %= _PyAIT(it)->factors[__npy_i]; \ + } \ + } \ +} while (0) + +#define PyArray_ITER_DATA(it) ((void *)(_PyAIT(it)->dataptr)) + +#define PyArray_ITER_NOTDONE(it) (_PyAIT(it)->index < _PyAIT(it)->size) + + +/* + * Any object passed to PyArray_Broadcast must be binary compatible + * with this structure. + */ + +typedef struct { + PyObject_HEAD + int numiter; /* number of iters */ + npy_intp size; /* broadcasted size */ + npy_intp index; /* current index */ + int nd; /* number of dims */ + npy_intp dimensions[NPY_MAXDIMS]; /* dimensions */ + PyArrayIterObject *iters[NPY_MAXARGS]; /* iterators */ +} PyArrayMultiIterObject; + +#define _PyMIT(m) ((PyArrayMultiIterObject *)(m)) +#define PyArray_MultiIter_RESET(multi) do { \ + int __npy_mi; \ + _PyMIT(multi)->index = 0; \ + for (__npy_mi=0; __npy_mi < _PyMIT(multi)->numiter; __npy_mi++) { \ + PyArray_ITER_RESET(_PyMIT(multi)->iters[__npy_mi]); \ + } \ +} while (0) + +#define PyArray_MultiIter_NEXT(multi) do { \ + int __npy_mi; \ + _PyMIT(multi)->index++; \ + for (__npy_mi=0; __npy_mi < _PyMIT(multi)->numiter; __npy_mi++) { \ + PyArray_ITER_NEXT(_PyMIT(multi)->iters[__npy_mi]); \ + } \ +} while (0) + +#define PyArray_MultiIter_GOTO(multi, dest) do { \ + int __npy_mi; \ + for (__npy_mi=0; __npy_mi < _PyMIT(multi)->numiter; __npy_mi++) { \ + PyArray_ITER_GOTO(_PyMIT(multi)->iters[__npy_mi], dest); \ + } \ + _PyMIT(multi)->index = _PyMIT(multi)->iters[0]->index; \ +} while (0) + +#define PyArray_MultiIter_GOTO1D(multi, ind) do { \ + int __npy_mi; \ + for (__npy_mi=0; __npy_mi < _PyMIT(multi)->numiter; __npy_mi++) { \ + PyArray_ITER_GOTO1D(_PyMIT(multi)->iters[__npy_mi], ind); \ + } \ + _PyMIT(multi)->index = _PyMIT(multi)->iters[0]->index; \ +} while (0) + +#define PyArray_MultiIter_DATA(multi, i) \ + ((void *)(_PyMIT(multi)->iters[i]->dataptr)) + +#define PyArray_MultiIter_NEXTi(multi, i) \ + PyArray_ITER_NEXT(_PyMIT(multi)->iters[i]) + +#define PyArray_MultiIter_NOTDONE(multi) \ + (_PyMIT(multi)->index < _PyMIT(multi)->size) + +/* Store the information needed for fancy-indexing over an array */ + +typedef struct { + PyObject_HEAD + /* + * Multi-iterator portion --- needs to be present in this + * order to work with PyArray_Broadcast + */ + + int numiter; /* number of index-array + iterators */ + npy_intp size; /* size of broadcasted + result */ + npy_intp index; /* current index */ + int nd; /* number of dims */ + npy_intp dimensions[NPY_MAXDIMS]; /* dimensions */ + PyArrayIterObject *iters[NPY_MAXDIMS]; /* index object + iterators */ + PyArrayIterObject *ait; /* flat Iterator for + underlying array */ + + /* flat iterator for subspace (when numiter < nd) */ + PyArrayIterObject *subspace; + + /* + * if subspace iteration, then this is the array of axes in + * the underlying array represented by the index objects + */ + int iteraxes[NPY_MAXDIMS]; + /* + * if subspace iteration, the these are the coordinates to the + * start of the subspace. + */ + npy_intp bscoord[NPY_MAXDIMS]; + + PyObject *indexobj; /* creating obj */ + /* + * consec is first used to indicate wether fancy indices are + * consecutive and then denotes at which axis they are inserted + */ + int consec; + char *dataptr; + +} PyArrayMapIterObject; + +enum { + NPY_NEIGHBORHOOD_ITER_ZERO_PADDING, + NPY_NEIGHBORHOOD_ITER_ONE_PADDING, + NPY_NEIGHBORHOOD_ITER_CONSTANT_PADDING, + NPY_NEIGHBORHOOD_ITER_CIRCULAR_PADDING, + NPY_NEIGHBORHOOD_ITER_MIRROR_PADDING +}; + +typedef struct { + PyObject_HEAD + + /* + * PyArrayIterObject part: keep this in this exact order + */ + int nd_m1; /* number of dimensions - 1 */ + npy_intp index, size; + npy_intp coordinates[NPY_MAXDIMS];/* N-dimensional loop */ + npy_intp dims_m1[NPY_MAXDIMS]; /* ao->dimensions - 1 */ + npy_intp strides[NPY_MAXDIMS]; /* ao->strides or fake */ + npy_intp backstrides[NPY_MAXDIMS];/* how far to jump back */ + npy_intp factors[NPY_MAXDIMS]; /* shape factors */ + PyArrayObject *ao; + char *dataptr; /* pointer to current item*/ + npy_bool contiguous; + + npy_intp bounds[NPY_MAXDIMS][2]; + npy_intp limits[NPY_MAXDIMS][2]; + npy_intp limits_sizes[NPY_MAXDIMS]; + npy_iter_get_dataptr_t translate; + + /* + * New members + */ + npy_intp nd; + + /* Dimensions is the dimension of the array */ + npy_intp dimensions[NPY_MAXDIMS]; + + /* + * Neighborhood points coordinates are computed relatively to the + * point pointed by _internal_iter + */ + PyArrayIterObject* _internal_iter; + /* + * To keep a reference to the representation of the constant value + * for constant padding + */ + char* constant; + + int mode; +} PyArrayNeighborhoodIterObject; + +/* + * Neighborhood iterator API + */ + +/* General: those work for any mode */ +static NPY_INLINE int +PyArrayNeighborhoodIter_Reset(PyArrayNeighborhoodIterObject* iter); +static NPY_INLINE int +PyArrayNeighborhoodIter_Next(PyArrayNeighborhoodIterObject* iter); +#if 0 +static NPY_INLINE int +PyArrayNeighborhoodIter_Next2D(PyArrayNeighborhoodIterObject* iter); +#endif + +/* + * Include inline implementations - functions defined there are not + * considered public API + */ +#define _NPY_INCLUDE_NEIGHBORHOOD_IMP +//#include "_neighborhood_iterator_imp.h" +#undef _NPY_INCLUDE_NEIGHBORHOOD_IMP + +/* The default array type */ +#define NPY_DEFAULT_TYPE NPY_DOUBLE + +/* + * All sorts of useful ways to look into a PyArrayObject. It is recommended + * to use PyArrayObject * objects instead of always casting from PyObject *, + * for improved type checking. + * + * In many cases here the macro versions of the accessors are deprecated, + * but can't be immediately changed to inline functions because the + * preexisting macros accept PyObject * and do automatic casts. Inline + * functions accepting PyArrayObject * provides for some compile-time + * checking of correctness when working with these objects in C. + */ + +#define PyArray_ISONESEGMENT(m) (PyArray_NDIM(m) == 0 || \ + PyArray_CHKFLAGS(m, NPY_ARRAY_C_CONTIGUOUS) || \ + PyArray_CHKFLAGS(m, NPY_ARRAY_F_CONTIGUOUS)) + +#define PyArray_ISFORTRAN(m) (PyArray_CHKFLAGS(m, NPY_ARRAY_F_CONTIGUOUS) && \ + (!PyArray_CHKFLAGS(m, NPY_ARRAY_C_CONTIGUOUS))) + +#define PyArray_FORTRAN_IF(m) ((PyArray_CHKFLAGS(m, NPY_ARRAY_F_CONTIGUOUS) ? \ + NPY_ARRAY_F_CONTIGUOUS : 0)) + +#if (defined(NPY_NO_DEPRECATED_API) && (NPY_1_7_API_VERSION <= NPY_NO_DEPRECATED_API)) +/* + * Changing access macros into functions, to allow for future hiding + * of the internal memory layout. This later hiding will allow the 2.x series + * to change the internal representation of arrays without affecting + * ABI compatibility. + */ + +static NPY_INLINE int +PyArray_NDIM(const PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->nd; +} + +static NPY_INLINE void * +PyArray_DATA(PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->data; +} + +static NPY_INLINE char * +PyArray_BYTES(PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->data; +} + +static NPY_INLINE npy_intp * +PyArray_DIMS(PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->dimensions; +} + +static NPY_INLINE npy_intp * +PyArray_STRIDES(PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->strides; +} + +static NPY_INLINE npy_intp +PyArray_DIM(const PyArrayObject *arr, int idim) +{ + return ((PyArrayObject_fields *)arr)->dimensions[idim]; +} + +static NPY_INLINE npy_intp +PyArray_STRIDE(const PyArrayObject *arr, int istride) +{ + return ((PyArrayObject_fields *)arr)->strides[istride]; +} + +static NPY_INLINE PyObject * +PyArray_BASE(PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->base; +} + +static NPY_INLINE PyArray_Descr * +PyArray_DESCR(PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->descr; +} + +static NPY_INLINE int +PyArray_FLAGS(const PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->flags; +} + +static NPY_INLINE npy_intp +PyArray_ITEMSIZE(const PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->descr->elsize; +} + +static NPY_INLINE int +PyArray_TYPE(const PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->descr->type_num; +} + +static NPY_INLINE int +PyArray_CHKFLAGS(const PyArrayObject *arr, int flags) +{ + return (PyArray_FLAGS(arr) & flags) == flags; +} + +static NPY_INLINE PyObject * +PyArray_GETITEM(const PyArrayObject *arr, const char *itemptr) +{ + return ((PyArrayObject_fields *)arr)->descr->f->getitem( + (void *)itemptr, (PyArrayObject *)arr); +} + +static NPY_INLINE int +PyArray_SETITEM(PyArrayObject *arr, char *itemptr, PyObject *v) +{ + return ((PyArrayObject_fields *)arr)->descr->f->setitem( + v, itemptr, arr); +} + +#else + +/* These macros are deprecated as of NumPy 1.7. */ +#define PyArray_NDIM(obj) (((PyArrayObject_fields *)(obj))->nd) +#define PyArray_BYTES(obj) (((PyArrayObject_fields *)(obj))->data) +#define PyArray_DATA(obj) ((void *)((PyArrayObject_fields *)(obj))->data) +#define PyArray_DIMS(obj) (((PyArrayObject_fields *)(obj))->dimensions) +#define PyArray_STRIDES(obj) (((PyArrayObject_fields *)(obj))->strides) +#define PyArray_DIM(obj,n) (PyArray_DIMS(obj)[n]) +#define PyArray_STRIDE(obj,n) (PyArray_STRIDES(obj)[n]) +#define PyArray_BASE(obj) (((PyArrayObject_fields *)(obj))->base) +#define PyArray_DESCR(obj) (((PyArrayObject_fields *)(obj))->descr) +#define PyArray_FLAGS(obj) (((PyArrayObject_fields *)(obj))->flags) +#define PyArray_CHKFLAGS(m, FLAGS) \ + ((((PyArrayObject_fields *)(m))->flags & (FLAGS)) == (FLAGS)) +#define PyArray_ITEMSIZE(obj) \ + (((PyArrayObject_fields *)(obj))->descr->elsize) +#define PyArray_TYPE(obj) \ + (((PyArrayObject_fields *)(obj))->descr->type_num) +#define PyArray_GETITEM(obj,itemptr) \ + PyArray_DESCR(obj)->f->getitem((char *)(itemptr), \ + (PyArrayObject *)(obj)) + +#define PyArray_SETITEM(obj,itemptr,v) \ + PyArray_DESCR(obj)->f->setitem((PyObject *)(v), \ + (char *)(itemptr), \ + (PyArrayObject *)(obj)) +#endif + +static NPY_INLINE PyArray_Descr * +PyArray_DTYPE(PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->descr; +} + +static NPY_INLINE npy_intp * +PyArray_SHAPE(PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->dimensions; +} + +/* + * Enables the specified array flags. Does no checking, + * assumes you know what you're doing. + */ +static NPY_INLINE void +PyArray_ENABLEFLAGS(PyArrayObject *arr, int flags) +{ + ((PyArrayObject_fields *)arr)->flags |= flags; +} + +/* + * Clears the specified array flags. Does no checking, + * assumes you know what you're doing. + */ +static NPY_INLINE void +PyArray_CLEARFLAGS(PyArrayObject *arr, int flags) +{ + ((PyArrayObject_fields *)arr)->flags &= ~flags; +} + +#define PyTypeNum_ISBOOL(type) ((type) == NPY_BOOL) + +#define PyTypeNum_ISUNSIGNED(type) (((type) == NPY_UBYTE) || \ + ((type) == NPY_USHORT) || \ + ((type) == NPY_UINT) || \ + ((type) == NPY_ULONG) || \ + ((type) == NPY_ULONGLONG)) + +#define PyTypeNum_ISSIGNED(type) (((type) == NPY_BYTE) || \ + ((type) == NPY_SHORT) || \ + ((type) == NPY_INT) || \ + ((type) == NPY_LONG) || \ + ((type) == NPY_LONGLONG)) + +#define PyTypeNum_ISINTEGER(type) (((type) >= NPY_BYTE) && \ + ((type) <= NPY_ULONGLONG)) + +#define PyTypeNum_ISFLOAT(type) ((((type) >= NPY_FLOAT) && \ + ((type) <= NPY_LONGDOUBLE)) || \ + ((type) == NPY_HALF)) + +#define PyTypeNum_ISNUMBER(type) (((type) <= NPY_CLONGDOUBLE) || \ + ((type) == NPY_HALF)) + +#define PyTypeNum_ISSTRING(type) (((type) == NPY_STRING) || \ + ((type) == NPY_UNICODE)) + +#define PyTypeNum_ISCOMPLEX(type) (((type) >= NPY_CFLOAT) && \ + ((type) <= NPY_CLONGDOUBLE)) + +#define PyTypeNum_ISPYTHON(type) (((type) == NPY_LONG) || \ + ((type) == NPY_DOUBLE) || \ + ((type) == NPY_CDOUBLE) || \ + ((type) == NPY_BOOL) || \ + ((type) == NPY_OBJECT )) + +#define PyTypeNum_ISFLEXIBLE(type) (((type) >=NPY_STRING) && \ + ((type) <=NPY_VOID)) + +#define PyTypeNum_ISDATETIME(type) (((type) >=NPY_DATETIME) && \ + ((type) <=NPY_TIMEDELTA)) + +#define PyTypeNum_ISUSERDEF(type) (((type) >= NPY_USERDEF) && \ + ((type) < NPY_USERDEF+ \ + NPY_NUMUSERTYPES)) + +#define PyTypeNum_ISEXTENDED(type) (PyTypeNum_ISFLEXIBLE(type) || \ + PyTypeNum_ISUSERDEF(type)) + +#define PyTypeNum_ISOBJECT(type) ((type) == NPY_OBJECT) + + +#define PyDataType_ISBOOL(obj) PyTypeNum_ISBOOL(_PyADt(obj)) +#define PyDataType_ISUNSIGNED(obj) PyTypeNum_ISUNSIGNED(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISSIGNED(obj) PyTypeNum_ISSIGNED(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISINTEGER(obj) PyTypeNum_ISINTEGER(((PyArray_Descr*)(obj))->type_num ) +#define PyDataType_ISFLOAT(obj) PyTypeNum_ISFLOAT(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISNUMBER(obj) PyTypeNum_ISNUMBER(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISSTRING(obj) PyTypeNum_ISSTRING(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISCOMPLEX(obj) PyTypeNum_ISCOMPLEX(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISPYTHON(obj) PyTypeNum_ISPYTHON(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISFLEXIBLE(obj) PyTypeNum_ISFLEXIBLE(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISDATETIME(obj) PyTypeNum_ISDATETIME(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISUSERDEF(obj) PyTypeNum_ISUSERDEF(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISEXTENDED(obj) PyTypeNum_ISEXTENDED(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISOBJECT(obj) PyTypeNum_ISOBJECT(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_HASFIELDS(obj) (((PyArray_Descr *)(obj))->names != NULL) +#define PyDataType_HASSUBARRAY(dtype) ((dtype)->subarray != NULL) + +#define PyArray_ISBOOL(obj) PyTypeNum_ISBOOL(PyArray_TYPE(obj)) +#define PyArray_ISUNSIGNED(obj) PyTypeNum_ISUNSIGNED(PyArray_TYPE(obj)) +#define PyArray_ISSIGNED(obj) PyTypeNum_ISSIGNED(PyArray_TYPE(obj)) +#define PyArray_ISINTEGER(obj) PyTypeNum_ISINTEGER(PyArray_TYPE(obj)) +#define PyArray_ISFLOAT(obj) PyTypeNum_ISFLOAT(PyArray_TYPE(obj)) +#define PyArray_ISNUMBER(obj) PyTypeNum_ISNUMBER(PyArray_TYPE(obj)) +#define PyArray_ISSTRING(obj) PyTypeNum_ISSTRING(PyArray_TYPE(obj)) +#define PyArray_ISCOMPLEX(obj) PyTypeNum_ISCOMPLEX(PyArray_TYPE(obj)) +#define PyArray_ISPYTHON(obj) PyTypeNum_ISPYTHON(PyArray_TYPE(obj)) +#define PyArray_ISFLEXIBLE(obj) PyTypeNum_ISFLEXIBLE(PyArray_TYPE(obj)) +#define PyArray_ISDATETIME(obj) PyTypeNum_ISDATETIME(PyArray_TYPE(obj)) +#define PyArray_ISUSERDEF(obj) PyTypeNum_ISUSERDEF(PyArray_TYPE(obj)) +#define PyArray_ISEXTENDED(obj) PyTypeNum_ISEXTENDED(PyArray_TYPE(obj)) +#define PyArray_ISOBJECT(obj) PyTypeNum_ISOBJECT(PyArray_TYPE(obj)) +#define PyArray_HASFIELDS(obj) PyDataType_HASFIELDS(PyArray_DESCR(obj)) + + /* + * FIXME: This should check for a flag on the data-type that + * states whether or not it is variable length. Because the + * ISFLEXIBLE check is hard-coded to the built-in data-types. + */ +#define PyArray_ISVARIABLE(obj) PyTypeNum_ISFLEXIBLE(PyArray_TYPE(obj)) + +#define PyArray_SAFEALIGNEDCOPY(obj) (PyArray_ISALIGNED(obj) && !PyArray_ISVARIABLE(obj)) + + +#define NPY_LITTLE '<' +#define NPY_BIG '>' +#define NPY_NATIVE '=' +#define NPY_SWAP 's' +#define NPY_IGNORE '|' + +#if NPY_BYTE_ORDER == NPY_BIG_ENDIAN +#define NPY_NATBYTE NPY_BIG +#define NPY_OPPBYTE NPY_LITTLE +#else +#define NPY_NATBYTE NPY_LITTLE +#define NPY_OPPBYTE NPY_BIG +#endif + +#define PyArray_ISNBO(arg) ((arg) != NPY_OPPBYTE) +#define PyArray_IsNativeByteOrder PyArray_ISNBO +#define PyArray_ISNOTSWAPPED(m) PyArray_ISNBO(PyArray_DESCR(m)->byteorder) +#define PyArray_ISBYTESWAPPED(m) (!PyArray_ISNOTSWAPPED(m)) + +#define PyArray_FLAGSWAP(m, flags) (PyArray_CHKFLAGS(m, flags) && \ + PyArray_ISNOTSWAPPED(m)) + +#define PyArray_ISCARRAY(m) PyArray_FLAGSWAP(m, NPY_ARRAY_CARRAY) +#define PyArray_ISCARRAY_RO(m) PyArray_FLAGSWAP(m, NPY_ARRAY_CARRAY_RO) +#define PyArray_ISFARRAY(m) PyArray_FLAGSWAP(m, NPY_ARRAY_FARRAY) +#define PyArray_ISFARRAY_RO(m) PyArray_FLAGSWAP(m, NPY_ARRAY_FARRAY_RO) +#define PyArray_ISBEHAVED(m) PyArray_FLAGSWAP(m, NPY_ARRAY_BEHAVED) +#define PyArray_ISBEHAVED_RO(m) PyArray_FLAGSWAP(m, NPY_ARRAY_ALIGNED) + + +#define PyDataType_ISNOTSWAPPED(d) PyArray_ISNBO(((PyArray_Descr *)(d))->byteorder) +#define PyDataType_ISBYTESWAPPED(d) (!PyDataType_ISNOTSWAPPED(d)) + +/************************************************************ + * A struct used by PyArray_CreateSortedStridePerm, new in 1.7. + ************************************************************/ + +typedef struct { + npy_intp perm, stride; +} npy_stride_sort_item; + +/************************************************************ + * This is the form of the struct that's returned pointed by the + * PyCObject attribute of an array __array_struct__. See + * http://docs.scipy.org/doc/numpy/reference/arrays.interface.html for the full + * documentation. + ************************************************************/ +typedef struct { + int two; /* + * contains the integer 2 as a sanity + * check + */ + + int nd; /* number of dimensions */ + + char typekind; /* + * kind in array --- character code of + * typestr + */ + + int itemsize; /* size of each element */ + + int flags; /* + * how should be data interpreted. Valid + * flags are CONTIGUOUS (1), F_CONTIGUOUS (2), + * ALIGNED (0x100), NOTSWAPPED (0x200), and + * WRITEABLE (0x400). ARR_HAS_DESCR (0x800) + * states that arrdescr field is present in + * structure + */ + + npy_intp *shape; /* + * A length-nd array of shape + * information + */ + + npy_intp *strides; /* A length-nd array of stride information */ + + void *data; /* A pointer to the first element of the array */ + + PyObject *descr; /* + * A list of fields or NULL (ignored if flags + * does not have ARR_HAS_DESCR flag set) + */ +} PyArrayInterface; + +/* + * This is a function for hooking into the PyDataMem_NEW/FREE/RENEW functions. + * See the documentation for PyDataMem_SetEventHook. + */ +typedef void (PyDataMem_EventHookFunc)(void *inp, void *outp, size_t size, + void *user_data); + +/* + * Use the keyword NPY_DEPRECATED_INCLUDES to ensure that the header files + * npy_*_*_deprecated_api.h are only included from here and nowhere else. + */ +#ifdef NPY_DEPRECATED_INCLUDES +#error "Do not use the reserved keyword NPY_DEPRECATED_INCLUDES." +#endif +#define NPY_DEPRECATED_INCLUDES +#if !defined(NPY_NO_DEPRECATED_API) || \ + (NPY_NO_DEPRECATED_API < NPY_1_7_API_VERSION) +#include "npy_1_7_deprecated_api.h" +#endif +/* + * There is no file npy_1_8_deprecated_api.h since there are no additional + * deprecated API features in NumPy 1.8. + * + * Note to maintainers: insert code like the following in future NumPy + * versions. + * + * #if !defined(NPY_NO_DEPRECATED_API) || \ + * (NPY_NO_DEPRECATED_API < NPY_1_9_API_VERSION) + * #include "npy_1_9_deprecated_api.h" + * #endif + */ +#undef NPY_DEPRECATED_INCLUDES + +#endif /* NPY_ARRAYTYPES_H */ diff --git a/pypy/module/cpyext/include/numpy/npy_common.h b/pypy/module/cpyext/include/numpy/npy_common.h new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/include/numpy/npy_common.h @@ -0,0 +1,49 @@ +#ifndef _NPY_COMMON_H_ +#define _NPY_COMMON_H_ + +typedef Py_intptr_t npy_intp; +typedef Py_uintptr_t npy_uintp; +typedef PY_LONG_LONG npy_longlong; +typedef unsigned PY_LONG_LONG npy_ulonglong; +typedef unsigned char npy_bool; +typedef long npy_int32; +typedef unsigned long npy_uint32; +typedef unsigned long npy_ucs4; +typedef long npy_int64; +typedef unsigned long npy_uint64; +typedef unsigned char npy_uint8; + +typedef signed char npy_byte; +typedef unsigned char npy_ubyte; +typedef unsigned short npy_ushort; +typedef unsigned int npy_uint; +typedef unsigned long npy_ulong; + +/* These are for completeness */ +typedef char npy_char; +typedef short npy_short; +typedef int npy_int; +typedef long npy_long; +typedef float npy_float; +typedef double npy_double; + +typedef struct { float real, imag; } npy_cfloat; +typedef struct { double real, imag; } npy_cdouble; +typedef npy_cdouble npy_complex128; +#if defined(_MSC_VER) + #define NPY_INLINE __inline +#elif defined(__GNUC__) + #if defined(__STRICT_ANSI__) + #define NPY_INLINE __inline__ + #else + #define NPY_INLINE inline + #endif +#else + #define NPY_INLINE +#endif +#ifndef NPY_INTP_FMT +#define NPY_INTP_FMT "ld" +#endif +#define NPY_API_VERSION 0x8 +#endif //_NPY_COMMON_H_ + diff --git a/pypy/module/cpyext/ndarrayobject.py b/pypy/module/cpyext/ndarrayobject.py --- a/pypy/module/cpyext/ndarrayobject.py +++ b/pypy/module/cpyext/ndarrayobject.py @@ -11,7 +11,12 @@ from pypy.module.micronumpy.ctors import array from pypy.module.micronumpy.descriptor import get_dtype_cache, W_Dtype from pypy.module.micronumpy.concrete import ConcreteArray +from pypy.module.micronumpy import ufuncs from rpython.rlib.rawstorage import RAW_STORAGE_PTR +from pypy.interpreter.typedef import TypeDef +from pypy.interpreter.baseobjspace import W_Root +from pypy.interpreter.argument import Arguments +from pypy.interpreter.gateway import interp2app NPY_C_CONTIGUOUS = 0x0001 NPY_F_CONTIGUOUS = 0x0002 @@ -252,3 +257,40 @@ return simple_new(space, nd, dims, typenum, order=order, owning=owning, w_subtype=w_subtype) +gufunctype = lltype.Ptr(ufuncs.GenericUfunc) +# XXX single rffi.CArrayPtr(gufunctype) does not work, this does, is there +# a problem with casting function pointers? + at cpython_api([rffi.CArrayPtr(rffi.CArrayPtr(gufunctype)), rffi.VOIDP, rffi.CCHARP, Py_ssize_t, Py_ssize_t, + Py_ssize_t, Py_ssize_t, rffi.CCHARP, rffi.CCHARP, Py_ssize_t, + rffi.CCHARP], PyObject) +def PyUFunc_FromFuncAndDataAndSignature(space, funcs, data, types, ntypes, + nin, nout, identity, name, doc, check_return, signature): + w_signature = rffi.charp2str(signature) + return do_ufunc(space, funcs, data, types, ntypes, nin, nout, identity, name, doc, + check_return, w_signature) + + +def do_ufunc(space, funcs, data, types, ntypes, nin, nout, identity, name, doc, + check_return, w_signature): + funcs_w = [None] * ntypes + dtypes_w = [None] * ntypes * (nin + nout) + for i in range(ntypes): + funcs_w[i] = ufuncs.W_GenericUFuncCaller(rffi.cast(gufunctype, funcs[i]), data) + for i in range(ntypes*(nin+nout)): + dtypes_w[i] = get_dtype_cache(space).dtypes_by_num[ord(types[i])] + w_funcs = space.newlist(funcs_w) + w_dtypes = space.newlist(dtypes_w) + w_doc = rffi.charp2str(doc) + w_name = rffi.charp2str(name) From noreply at buildbot.pypy.org Fri Jan 16 11:56:57 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 16 Jan 2015 11:56:57 +0100 (CET) Subject: [pypy-commit] pypy errno-again: fix tests Message-ID: <20150116105657.5176E1C013C@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: errno-again Changeset: r75376:5288382f2748 Date: 2015-01-16 11:54 +0100 http://bitbucket.org/pypy/pypy/changeset/5288382f2748/ Log: fix tests diff --git a/rpython/jit/backend/test/runner_test.py b/rpython/jit/backend/test/runner_test.py --- a/rpython/jit/backend/test/runner_test.py +++ b/rpython/jit/backend/test/runner_test.py @@ -20,6 +20,7 @@ from rpython.rlib.rarithmetic import intmask, is_valid_int from rpython.jit.backend.detect_cpu import autodetect from rpython.jit.backend.llsupport import jitframe +from rpython.jit.backend.llsupport.llmodel import AbstractLLCPU IS_32_BIT = sys.maxint < 2**32 @@ -2924,6 +2925,8 @@ from rpython.rlib.libffi import types from rpython.jit.backend.llsupport import llerrno # + if not isinstance(self.cpu, AbstractLLCPU): + py.test.skip("not on LLGraph") eci = ExternalCompilationInfo( separate_module_sources=[''' #include @@ -2965,6 +2968,8 @@ from rpython.rlib.libffi import types from rpython.jit.backend.llsupport import llerrno # + if not isinstance(self.cpu, AbstractLLCPU): + py.test.skip("not on LLGraph") eci = ExternalCompilationInfo( separate_module_sources=[r''' #include @@ -3333,8 +3338,6 @@ assert not called def test_assembler_call_propagate_exc(self): - from rpython.jit.backend.llsupport.llmodel import AbstractLLCPU - if not isinstance(self.cpu, AbstractLLCPU): py.test.skip("llgraph can't fake exceptions well enough, give up") @@ -3869,7 +3872,6 @@ assert res.value == iexpected def test_free_loop_and_bridges(self): - from rpython.jit.backend.llsupport.llmodel import AbstractLLCPU if not isinstance(self.cpu, AbstractLLCPU): py.test.skip("not a subclass of llmodel.AbstractLLCPU") if hasattr(self.cpu, 'setup_once'): @@ -4009,7 +4011,6 @@ assert got == expected def test_compile_asmlen(self): - from rpython.jit.backend.llsupport.llmodel import AbstractLLCPU if not isinstance(self.cpu, AbstractLLCPU): py.test.skip("pointless test on non-asm") from rpython.jit.backend.tool.viewcode import machine_code_dump, ObjdumpNotFound @@ -4451,7 +4452,6 @@ self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) frame = self.cpu.execute_token(looptoken, 0, 0, 3) assert self.cpu.get_latest_descr(frame) is guarddescr - from rpython.jit.backend.llsupport.llmodel import AbstractLLCPU if not isinstance(self.cpu, AbstractLLCPU): py.test.skip("pointless test on non-asm") @@ -4558,8 +4558,6 @@ assert res.getint() == struct.unpack("I", struct.pack("f", 12.5))[0] def test_zero_ptr_field(self): - from rpython.jit.backend.llsupport.llmodel import AbstractLLCPU - if not isinstance(self.cpu, AbstractLLCPU): py.test.skip("llgraph can't do zero_ptr_field") T = lltype.GcStruct('T') @@ -4583,8 +4581,6 @@ assert not s.x def test_zero_ptr_field_2(self): - from rpython.jit.backend.llsupport.llmodel import AbstractLLCPU - if not isinstance(self.cpu, AbstractLLCPU): py.test.skip("llgraph does not do zero_ptr_field") @@ -4608,8 +4604,6 @@ assert s.y == -4398176 def test_zero_array(self): - from rpython.jit.backend.llsupport.llmodel import AbstractLLCPU - if not isinstance(self.cpu, AbstractLLCPU): py.test.skip("llgraph does not do zero_array") diff --git a/rpython/jit/backend/x86/test/test_runner.py b/rpython/jit/backend/x86/test/test_runner.py --- a/rpython/jit/backend/x86/test/test_runner.py +++ b/rpython/jit/backend/x86/test/test_runner.py @@ -478,28 +478,29 @@ i6 = BoxInt() c = ConstInt(-1) faildescr = BasicFailDescr(1) + cz = ConstInt(0) # we must call it repeatedly: if the stack pointer gets increased # by 40 bytes by the STDCALL call, and if we don't expect it, # then we are going to get our stack emptied unexpectedly by # several repeated calls ops = [ ResOperation(rop.CALL_RELEASE_GIL, - [funcbox, i1, c, c, c, c, c, c, c, c, i2], + [cz, funcbox, i1, c, c, c, c, c, c, c, c, i2], i3, descr=calldescr), ResOperation(rop.GUARD_NOT_FORCED, [], None, descr=faildescr), ResOperation(rop.CALL_RELEASE_GIL, - [funcbox, i1, c, c, c, c, c, c, c, c, i2], + [cz, funcbox, i1, c, c, c, c, c, c, c, c, i2], i4, descr=calldescr), ResOperation(rop.GUARD_NOT_FORCED, [], None, descr=faildescr), ResOperation(rop.CALL_RELEASE_GIL, - [funcbox, i1, c, c, c, c, c, c, c, c, i2], + [cz, funcbox, i1, c, c, c, c, c, c, c, c, i2], i5, descr=calldescr), ResOperation(rop.GUARD_NOT_FORCED, [], None, descr=faildescr), ResOperation(rop.CALL_RELEASE_GIL, - [funcbox, i1, c, c, c, c, c, c, c, c, i2], + [cz, funcbox, i1, c, c, c, c, c, c, c, c, i2], i6, descr=calldescr), ResOperation(rop.GUARD_NOT_FORCED, [], None, descr=faildescr), From noreply at buildbot.pypy.org Fri Jan 16 11:56:58 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 16 Jan 2015 11:56:58 +0100 (CET) Subject: [pypy-commit] pypy errno-again: fix test Message-ID: <20150116105658.9570F1C013C@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: errno-again Changeset: r75377:2daa03a9b986 Date: 2015-01-16 11:56 +0100 http://bitbucket.org/pypy/pypy/changeset/2daa03a9b986/ Log: fix test diff --git a/rpython/translator/test/test_simplify.py b/rpython/translator/test/test_simplify.py --- a/rpython/translator/test/test_simplify.py +++ b/rpython/translator/test/test_simplify.py @@ -183,7 +183,9 @@ print op subgraph = get_graph(op.args[0], t) if subgraph is None: - found.append(op) + # ignore 'get_errno' and 'set_errno' + if 'et_errno' not in repr(op.args[0]): + found.append(op) else: walkgraph(subgraph) walkgraph(graph) From noreply at buildbot.pypy.org Fri Jan 16 11:57:23 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 16 Jan 2015 11:57:23 +0100 (CET) Subject: [pypy-commit] pypy errno-again: hg merge default Message-ID: <20150116105723.671781C013C@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: errno-again Changeset: r75378:29a933458a23 Date: 2015-01-16 11:57 +0100 http://bitbucket.org/pypy/pypy/changeset/29a933458a23/ Log: hg merge default diff too long, truncating to 2000 out of 4448 lines diff --git a/pypy/doc/embedding.rst b/pypy/doc/embedding.rst --- a/pypy/doc/embedding.rst +++ b/pypy/doc/embedding.rst @@ -30,12 +30,10 @@ Initialize threads. Only need to be called if there are any threads involved -.. function:: long pypy_setup_home(char* home, int verbose); +.. function:: int pypy_setup_home(char* home, int verbose); This function searches the PyPy standard library starting from the given - "PyPy home directory". It is not strictly necessary to execute it before - running Python code, but without it you will not be able to import any - non-builtin module from the standard library. The arguments are: + "PyPy home directory". The arguments are: * ``home``: NULL terminated path to an executable inside the pypy directory (can be a .so name, can be made up) @@ -84,17 +82,22 @@ const char source[] = "print 'hello from pypy'"; - int main() + int main(void) { - int res; + int res; - rpython_startup_code(); - // pypy_setup_home() is not needed in this trivial example - res = pypy_execute_source((char*)source); - if (res) { - printf("Error calling pypy_execute_source!\n"); - } - return res; + rpython_startup_code(); + res = pypy_setup_home("/opt/pypy/bin/libpypy-c.so", 1); + if (res) { + printf("Error setting pypy home!\n"); + return 1; + } + + res = pypy_execute_source((char*)source); + if (res) { + printf("Error calling pypy_execute_source!\n"); + } + return res; } If we save it as ``x.c`` now, compile it and run it (on linux) with:: diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -51,3 +51,12 @@ .. branch: ssa-flow Use SSA form for flow graphs inside build_flow() and part of simplify_graph() + +.. branch: ufuncpai + +Implement most of the GenericUfunc api to support numpy linalg. The strategy is +to encourage use of pure python or cffi ufuncs by extending frompyfunc(). +See the docstring of frompyfunc for more details. This dovetails with a branch +of pypy/numpy - cffi-linalg which is a rewrite of the _umath_linalg module in +python, calling lapack from cffi. The branch also support traditional use of +cpyext GenericUfunc definitions in c. diff --git a/pypy/goal/targetpypystandalone.py b/pypy/goal/targetpypystandalone.py --- a/pypy/goal/targetpypystandalone.py +++ b/pypy/goal/targetpypystandalone.py @@ -101,7 +101,7 @@ if space.is_none(w_path): if verbose: debug("Failed to find library based on pypy_find_stdlib") - return 1 + return rffi.cast(rffi.INT, 1) space.startup() space.call_function(w_pathsetter, w_path) # import site @@ -109,13 +109,13 @@ import_ = space.getattr(space.getbuiltinmodule('__builtin__'), space.wrap('__import__')) space.call_function(import_, space.wrap('site')) - return 0 + return rffi.cast(rffi.INT, 0) except OperationError, e: if verbose: debug("OperationError:") debug(" operror-type: " + e.w_type.getname(space)) debug(" operror-value: " + space.str_w(space.str(e.get_w_value(space)))) - return -1 + return rffi.cast(rffi.INT, -1) @entrypoint('main', [rffi.CCHARP], c_name='pypy_execute_source') def pypy_execute_source(ll_source): diff --git a/pypy/module/cpyext/include/numpy/__multiarray_api.h b/pypy/module/cpyext/include/numpy/__multiarray_api.h new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/include/numpy/__multiarray_api.h @@ -0,0 +1,10 @@ + + +typedef struct { + PyObject_HEAD + npy_bool obval; +} PyBoolScalarObject; + +#define import_array() +#define PyArray_New _PyArray_New + diff --git a/pypy/module/cpyext/include/numpy/arrayobject.h b/pypy/module/cpyext/include/numpy/arrayobject.h --- a/pypy/module/cpyext/include/numpy/arrayobject.h +++ b/pypy/module/cpyext/include/numpy/arrayobject.h @@ -11,6 +11,8 @@ #endif #include "old_defines.h" +#include "npy_common.h" +#include "__multiarray_api.h" #define NPY_UNUSED(x) x #define PyArray_MAX(a,b) (((a)>(b))?(a):(b)) @@ -22,23 +24,10 @@ PyAPI_DATA(PyTypeObject) PyArray_Type; -typedef unsigned char npy_bool; -typedef unsigned char npy_uint8; -typedef unsigned short npy_uint16; -typedef signed short npy_int16; -typedef signed char npy_int8; -typedef int npy_int; - -typedef long npy_intp; -#ifndef NPY_INTP_FMT -#define NPY_INTP_FMT "ld" -#endif -#ifndef import_array -#define import_array() -#endif #define NPY_MAXDIMS 32 +#ifndef NDARRAYTYPES_H typedef struct { npy_intp *ptr; int len; @@ -73,19 +62,6 @@ NPY_NTYPES_ABI_COMPATIBLE=21 }; -#define NPY_INT8 NPY_BYTE -#define NPY_UINT8 NPY_UBYTE -#define NPY_INT16 NPY_SHORT -#define NPY_UINT16 NPY_USHORT -#define NPY_INT32 NPY_INT -#define NPY_UINT32 NPY_UINT -#define NPY_INT64 NPY_LONG -#define NPY_UINT64 NPY_ULONG -#define NPY_FLOAT32 NPY_FLOAT -#define NPY_FLOAT64 NPY_DOUBLE -#define NPY_COMPLEX32 NPY_CFLOAT -#define NPY_COMPLEX64 NPY_CDOUBLE - #define PyTypeNum_ISBOOL(type) ((type) == NPY_BOOL) #define PyTypeNum_ISINTEGER(type) (((type) >= NPY_BYTE) && \ ((type) <= NPY_ULONGLONG)) @@ -167,6 +143,21 @@ #define PyArray_ISNOTSWAPPED(arr) (1) #define PyArray_ISBYTESWAPPED(arr) (0) +#endif + +#define NPY_INT8 NPY_BYTE +#define NPY_UINT8 NPY_UBYTE +#define NPY_INT16 NPY_SHORT +#define NPY_UINT16 NPY_USHORT +#define NPY_INT32 NPY_INT +#define NPY_UINT32 NPY_UINT +#define NPY_INT64 NPY_LONG +#define NPY_UINT64 NPY_ULONG +#define NPY_FLOAT32 NPY_FLOAT +#define NPY_FLOAT64 NPY_DOUBLE +#define NPY_COMPLEX32 NPY_CFLOAT +#define NPY_COMPLEX64 NPY_CDOUBLE + /* functions */ #ifndef PyArray_NDIM diff --git a/pypy/module/cpyext/include/numpy/ndarraytypes.h b/pypy/module/cpyext/include/numpy/ndarraytypes.h new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/include/numpy/ndarraytypes.h @@ -0,0 +1,1786 @@ +#ifndef NDARRAYTYPES_H +#define NDARRAYTYPES_H + +#include "numpy/npy_common.h" +//#include "npy_endian.h" +//#include "npy_cpu.h" +//#include "utils.h" + +//for pypy - numpy has lots of typedefs +//for pypy - make life easier, less backward support +#define NPY_1_8_API_VERSION 0x00000008 +#define NPY_NO_DEPRECATED_API NPY_1_8_API_VERSION +#undef NPY_1_8_API_VERSION + +#define NPY_ENABLE_SEPARATE_COMPILATION 1 +#define NPY_VISIBILITY_HIDDEN + +#ifdef NPY_ENABLE_SEPARATE_COMPILATION + #define NPY_NO_EXPORT NPY_VISIBILITY_HIDDEN +#else + #define NPY_NO_EXPORT static +#endif + +/* Only use thread if configured in config and python supports it */ +#if defined WITH_THREAD && !NPY_NO_SMP + #define NPY_ALLOW_THREADS 1 +#else + #define NPY_ALLOW_THREADS 0 +#endif + + + +/* + * There are several places in the code where an array of dimensions + * is allocated statically. This is the size of that static + * allocation. + * + * The array creation itself could have arbitrary dimensions but all + * the places where static allocation is used would need to be changed + * to dynamic (including inside of several structures) + */ + +#define NPY_MAXDIMS 32 +#define NPY_MAXARGS 32 + +/* Used for Converter Functions "O&" code in ParseTuple */ +#define NPY_FAIL 0 +#define NPY_SUCCEED 1 + +/* + * Binary compatibility version number. This number is increased + * whenever the C-API is changed such that binary compatibility is + * broken, i.e. whenever a recompile of extension modules is needed. + */ +#define NPY_VERSION NPY_ABI_VERSION + +/* + * Minor API version. This number is increased whenever a change is + * made to the C-API -- whether it breaks binary compatibility or not. + * Some changes, such as adding a function pointer to the end of the + * function table, can be made without breaking binary compatibility. + * In this case, only the NPY_FEATURE_VERSION (*not* NPY_VERSION) + * would be increased. Whenever binary compatibility is broken, both + * NPY_VERSION and NPY_FEATURE_VERSION should be increased. + */ +#define NPY_FEATURE_VERSION NPY_API_VERSION + +enum NPY_TYPES { NPY_BOOL=0, + NPY_BYTE, NPY_UBYTE, + NPY_SHORT, NPY_USHORT, + NPY_INT, NPY_UINT, + NPY_LONG, NPY_ULONG, + NPY_LONGLONG, NPY_ULONGLONG, + NPY_FLOAT, NPY_DOUBLE, NPY_LONGDOUBLE, + NPY_CFLOAT, NPY_CDOUBLE, NPY_CLONGDOUBLE, + NPY_OBJECT=17, + NPY_STRING, NPY_UNICODE, + NPY_VOID, + /* + * New 1.6 types appended, may be integrated + * into the above in 2.0. + */ + NPY_DATETIME, NPY_TIMEDELTA, NPY_HALF, + + NPY_NTYPES, + NPY_NOTYPE, + NPY_CHAR, /* special flag */ + NPY_USERDEF=256, /* leave room for characters */ + + /* The number of types not including the new 1.6 types */ + NPY_NTYPES_ABI_COMPATIBLE=21 +}; + +/* basetype array priority */ +#define NPY_PRIORITY 0.0 + +/* default subtype priority */ +#define NPY_SUBTYPE_PRIORITY 1.0 + +/* default scalar priority */ +#define NPY_SCALAR_PRIORITY -1000000.0 + +/* How many floating point types are there (excluding half) */ +#define NPY_NUM_FLOATTYPE 3 + +/* + * These characters correspond to the array type and the struct + * module + */ + +enum NPY_TYPECHAR { + NPY_BOOLLTR = '?', + NPY_BYTELTR = 'b', + NPY_UBYTELTR = 'B', + NPY_SHORTLTR = 'h', + NPY_USHORTLTR = 'H', + NPY_INTLTR = 'i', + NPY_UINTLTR = 'I', + NPY_LONGLTR = 'l', + NPY_ULONGLTR = 'L', + NPY_LONGLONGLTR = 'q', + NPY_ULONGLONGLTR = 'Q', + NPY_HALFLTR = 'e', + NPY_FLOATLTR = 'f', + NPY_DOUBLELTR = 'd', + NPY_LONGDOUBLELTR = 'g', + NPY_CFLOATLTR = 'F', + NPY_CDOUBLELTR = 'D', + NPY_CLONGDOUBLELTR = 'G', + NPY_OBJECTLTR = 'O', + NPY_STRINGLTR = 'S', + NPY_STRINGLTR2 = 'a', + NPY_UNICODELTR = 'U', + NPY_VOIDLTR = 'V', + NPY_DATETIMELTR = 'M', + NPY_TIMEDELTALTR = 'm', + NPY_CHARLTR = 'c', + + /* + * No Descriptor, just a define -- this let's + * Python users specify an array of integers + * large enough to hold a pointer on the + * platform + */ + NPY_INTPLTR = 'p', + NPY_UINTPLTR = 'P', + + /* + * These are for dtype 'kinds', not dtype 'typecodes' + * as the above are for. + */ + NPY_GENBOOLLTR ='b', + NPY_SIGNEDLTR = 'i', + NPY_UNSIGNEDLTR = 'u', + NPY_FLOATINGLTR = 'f', + NPY_COMPLEXLTR = 'c' +}; + +typedef enum { + NPY_QUICKSORT=0, + NPY_HEAPSORT=1, + NPY_MERGESORT=2 +} NPY_SORTKIND; +#define NPY_NSORTS (NPY_MERGESORT + 1) + + +typedef enum { + NPY_INTROSELECT=0, +} NPY_SELECTKIND; +#define NPY_NSELECTS (NPY_INTROSELECT + 1) + + +typedef enum { + NPY_SEARCHLEFT=0, + NPY_SEARCHRIGHT=1 +} NPY_SEARCHSIDE; +#define NPY_NSEARCHSIDES (NPY_SEARCHRIGHT + 1) + + +typedef enum { + NPY_NOSCALAR=-1, + NPY_BOOL_SCALAR, + NPY_INTPOS_SCALAR, + NPY_INTNEG_SCALAR, + NPY_FLOAT_SCALAR, + NPY_COMPLEX_SCALAR, + NPY_OBJECT_SCALAR +} NPY_SCALARKIND; +#define NPY_NSCALARKINDS (NPY_OBJECT_SCALAR + 1) + +/* For specifying array memory layout or iteration order */ +typedef enum { + /* Fortran order if inputs are all Fortran, C otherwise */ + NPY_ANYORDER=-1, + /* C order */ + NPY_CORDER=0, + /* Fortran order */ + NPY_FORTRANORDER=1, + /* An order as close to the inputs as possible */ + NPY_KEEPORDER=2 +} NPY_ORDER; + +/* For specifying allowed casting in operations which support it */ +typedef enum { + /* Only allow identical types */ + NPY_NO_CASTING=0, + /* Allow identical and byte swapped types */ + NPY_EQUIV_CASTING=1, + /* Only allow safe casts */ + NPY_SAFE_CASTING=2, + /* Allow safe casts or casts within the same kind */ + NPY_SAME_KIND_CASTING=3, + /* Allow any casts */ + NPY_UNSAFE_CASTING=4, + + /* + * Temporary internal definition only, will be removed in upcoming + * release, see below + * */ + NPY_INTERNAL_UNSAFE_CASTING_BUT_WARN_UNLESS_SAME_KIND = 100, +} NPY_CASTING; + +typedef enum { + NPY_CLIP=0, + NPY_WRAP=1, + NPY_RAISE=2 +} NPY_CLIPMODE; + +/* The special not-a-time (NaT) value */ +#define NPY_DATETIME_NAT NPY_MIN_INT64 + +/* + * Upper bound on the length of a DATETIME ISO 8601 string + * YEAR: 21 (64-bit year) + * MONTH: 3 + * DAY: 3 + * HOURS: 3 + * MINUTES: 3 + * SECONDS: 3 + * ATTOSECONDS: 1 + 3*6 + * TIMEZONE: 5 + * NULL TERMINATOR: 1 + */ +#define NPY_DATETIME_MAX_ISO8601_STRLEN (21+3*5+1+3*6+6+1) + +typedef enum { + NPY_FR_Y = 0, /* Years */ + NPY_FR_M = 1, /* Months */ + NPY_FR_W = 2, /* Weeks */ + /* Gap where 1.6 NPY_FR_B (value 3) was */ + NPY_FR_D = 4, /* Days */ + NPY_FR_h = 5, /* hours */ + NPY_FR_m = 6, /* minutes */ + NPY_FR_s = 7, /* seconds */ + NPY_FR_ms = 8, /* milliseconds */ + NPY_FR_us = 9, /* microseconds */ + NPY_FR_ns = 10,/* nanoseconds */ + NPY_FR_ps = 11,/* picoseconds */ + NPY_FR_fs = 12,/* femtoseconds */ + NPY_FR_as = 13,/* attoseconds */ + NPY_FR_GENERIC = 14 /* Generic, unbound units, can convert to anything */ +} NPY_DATETIMEUNIT; + +/* + * NOTE: With the NPY_FR_B gap for 1.6 ABI compatibility, NPY_DATETIME_NUMUNITS + * is technically one more than the actual number of units. + */ +#define NPY_DATETIME_NUMUNITS (NPY_FR_GENERIC + 1) +#define NPY_DATETIME_DEFAULTUNIT NPY_FR_GENERIC + +/* + * Business day conventions for mapping invalid business + * days to valid business days. + */ +typedef enum { + /* Go forward in time to the following business day. */ + NPY_BUSDAY_FORWARD, + NPY_BUSDAY_FOLLOWING = NPY_BUSDAY_FORWARD, + /* Go backward in time to the preceding business day. */ + NPY_BUSDAY_BACKWARD, + NPY_BUSDAY_PRECEDING = NPY_BUSDAY_BACKWARD, + /* + * Go forward in time to the following business day, unless it + * crosses a month boundary, in which case go backward + */ + NPY_BUSDAY_MODIFIEDFOLLOWING, + /* + * Go backward in time to the preceding business day, unless it + * crosses a month boundary, in which case go forward. + */ + NPY_BUSDAY_MODIFIEDPRECEDING, + /* Produce a NaT for non-business days. */ + NPY_BUSDAY_NAT, + /* Raise an exception for non-business days. */ + NPY_BUSDAY_RAISE +} NPY_BUSDAY_ROLL; + +/************************************************************ + * NumPy Auxiliary Data for inner loops, sort functions, etc. + ************************************************************/ + +/* + * When creating an auxiliary data struct, this should always appear + * as the first member, like this: + * + * typedef struct { + * NpyAuxData base; + * double constant; + * } constant_multiplier_aux_data; + */ +typedef struct NpyAuxData_tag NpyAuxData; + +/* Function pointers for freeing or cloning auxiliary data */ +typedef void (NpyAuxData_FreeFunc) (NpyAuxData *); +typedef NpyAuxData *(NpyAuxData_CloneFunc) (NpyAuxData *); + +struct NpyAuxData_tag { + NpyAuxData_FreeFunc *free; + NpyAuxData_CloneFunc *clone; + /* To allow for a bit of expansion without breaking the ABI */ + void *reserved[2]; +}; + +/* Macros to use for freeing and cloning auxiliary data */ +#define NPY_AUXDATA_FREE(auxdata) \ + do { \ + if ((auxdata) != NULL) { \ + (auxdata)->free(auxdata); \ + } \ + } while(0) +#define NPY_AUXDATA_CLONE(auxdata) \ + ((auxdata)->clone(auxdata)) + +#define NPY_ERR(str) fprintf(stderr, #str); fflush(stderr); +#define NPY_ERR2(str) fprintf(stderr, str); fflush(stderr); + +#define NPY_STRINGIFY(x) #x +#define NPY_TOSTRING(x) NPY_STRINGIFY(x) + + /* + * Macros to define how array, and dimension/strides data is + * allocated. + */ + + /* Data buffer - PyDataMem_NEW/FREE/RENEW are in multiarraymodule.c */ + +#define NPY_USE_PYMEM 1 + +#if NPY_USE_PYMEM == 1 +#define PyArray_malloc PyMem_Malloc +#define PyArray_free PyMem_Free +#define PyArray_realloc PyMem_Realloc +#else +#define PyArray_malloc malloc +#define PyArray_free free +#define PyArray_realloc realloc +#endif + +/* Dimensions and strides */ +#define PyDimMem_NEW(size) \ + ((npy_intp *)PyArray_malloc(size*sizeof(npy_intp))) + +#define PyDimMem_FREE(ptr) PyArray_free(ptr) + +#define PyDimMem_RENEW(ptr,size) \ + ((npy_intp *)PyArray_realloc(ptr,size*sizeof(npy_intp))) + +/* forward declaration */ +struct _PyArray_Descr; + +/* These must deal with unaligned and swapped data if necessary */ +typedef PyObject * (PyArray_GetItemFunc) (void *, void *); +typedef int (PyArray_SetItemFunc)(PyObject *, void *, void *); + +typedef void (PyArray_CopySwapNFunc)(void *, npy_intp, void *, npy_intp, + npy_intp, int, void *); + +typedef void (PyArray_CopySwapFunc)(void *, void *, int, void *); +typedef npy_bool (PyArray_NonzeroFunc)(void *, void *); + + +/* + * These assume aligned and notswapped data -- a buffer will be used + * before or contiguous data will be obtained + */ + +typedef int (PyArray_CompareFunc)(const void *, const void *, void *); +typedef int (PyArray_ArgFunc)(void*, npy_intp, npy_intp*, void *); + +typedef void (PyArray_DotFunc)(void *, npy_intp, void *, npy_intp, void *, + npy_intp, void *); + +typedef void (PyArray_VectorUnaryFunc)(void *, void *, npy_intp, void *, + void *); + +/* + * XXX the ignore argument should be removed next time the API version + * is bumped. It used to be the separator. + */ +typedef int (PyArray_ScanFunc)(FILE *fp, void *dptr, + char *ignore, struct _PyArray_Descr *); +typedef int (PyArray_FromStrFunc)(char *s, void *dptr, char **endptr, + struct _PyArray_Descr *); + +typedef int (PyArray_FillFunc)(void *, npy_intp, void *); + +typedef int (PyArray_SortFunc)(void *, npy_intp, void *); +typedef int (PyArray_ArgSortFunc)(void *, npy_intp *, npy_intp, void *); +typedef int (PyArray_PartitionFunc)(void *, npy_intp, npy_intp, + npy_intp *, npy_intp *, + void *); +typedef int (PyArray_ArgPartitionFunc)(void *, npy_intp *, npy_intp, npy_intp, + npy_intp *, npy_intp *, + void *); + +typedef int (PyArray_FillWithScalarFunc)(void *, npy_intp, void *, void *); + +typedef int (PyArray_ScalarKindFunc)(void *); + +typedef void (PyArray_FastClipFunc)(void *in, npy_intp n_in, void *min, + void *max, void *out); +typedef void (PyArray_FastPutmaskFunc)(void *in, void *mask, npy_intp n_in, + void *values, npy_intp nv); +typedef int (PyArray_FastTakeFunc)(void *dest, void *src, npy_intp *indarray, + npy_intp nindarray, npy_intp n_outer, + npy_intp m_middle, npy_intp nelem, + NPY_CLIPMODE clipmode); + +typedef struct { + npy_intp *ptr; + int len; +} PyArray_Dims; + +typedef struct { + /* + * Functions to cast to most other standard types + * Can have some NULL entries. The types + * DATETIME, TIMEDELTA, and HALF go into the castdict + * even though they are built-in. + */ + PyArray_VectorUnaryFunc *cast[NPY_NTYPES_ABI_COMPATIBLE]; + + /* The next four functions *cannot* be NULL */ + + /* + * Functions to get and set items with standard Python types + * -- not array scalars + */ + PyArray_GetItemFunc *getitem; + PyArray_SetItemFunc *setitem; + + /* + * Copy and/or swap data. Memory areas may not overlap + * Use memmove first if they might + */ + PyArray_CopySwapNFunc *copyswapn; + PyArray_CopySwapFunc *copyswap; + + /* + * Function to compare items + * Can be NULL + */ + PyArray_CompareFunc *compare; + + /* + * Function to select largest + * Can be NULL + */ + PyArray_ArgFunc *argmax; + + /* + * Function to compute dot product + * Can be NULL + */ + PyArray_DotFunc *dotfunc; + + /* + * Function to scan an ASCII file and + * place a single value plus possible separator + * Can be NULL + */ + PyArray_ScanFunc *scanfunc; + + /* + * Function to read a single value from a string + * and adjust the pointer; Can be NULL + */ + PyArray_FromStrFunc *fromstr; + + /* + * Function to determine if data is zero or not + * If NULL a default version is + * used at Registration time. + */ + PyArray_NonzeroFunc *nonzero; + + /* + * Used for arange. + * Can be NULL. + */ + PyArray_FillFunc *fill; + + /* + * Function to fill arrays with scalar values + * Can be NULL + */ + PyArray_FillWithScalarFunc *fillwithscalar; + + /* + * Sorting functions + * Can be NULL + */ + PyArray_SortFunc *sort[NPY_NSORTS]; + PyArray_ArgSortFunc *argsort[NPY_NSORTS]; + + /* + * Dictionary of additional casting functions + * PyArray_VectorUnaryFuncs + * which can be populated to support casting + * to other registered types. Can be NULL + */ + PyObject *castdict; + + /* + * Functions useful for generalizing + * the casting rules. + * Can be NULL; + */ + PyArray_ScalarKindFunc *scalarkind; + int **cancastscalarkindto; + int *cancastto; + + PyArray_FastClipFunc *fastclip; + PyArray_FastPutmaskFunc *fastputmask; + PyArray_FastTakeFunc *fasttake; + + /* + * Function to select smallest + * Can be NULL + */ + PyArray_ArgFunc *argmin; + +} PyArray_ArrFuncs; + +/* The item must be reference counted when it is inserted or extracted. */ +#define NPY_ITEM_REFCOUNT 0x01 +/* Same as needing REFCOUNT */ +#define NPY_ITEM_HASOBJECT 0x01 +/* Convert to list for pickling */ +#define NPY_LIST_PICKLE 0x02 +/* The item is a POINTER */ +#define NPY_ITEM_IS_POINTER 0x04 +/* memory needs to be initialized for this data-type */ +#define NPY_NEEDS_INIT 0x08 +/* operations need Python C-API so don't give-up thread. */ +#define NPY_NEEDS_PYAPI 0x10 +/* Use f.getitem when extracting elements of this data-type */ +#define NPY_USE_GETITEM 0x20 +/* Use f.setitem when setting creating 0-d array from this data-type.*/ +#define NPY_USE_SETITEM 0x40 +/* A sticky flag specifically for structured arrays */ +#define NPY_ALIGNED_STRUCT 0x80 + +/* + *These are inherited for global data-type if any data-types in the + * field have them + */ +#define NPY_FROM_FIELDS (NPY_NEEDS_INIT | NPY_LIST_PICKLE | \ + NPY_ITEM_REFCOUNT | NPY_NEEDS_PYAPI) + +#define NPY_OBJECT_DTYPE_FLAGS (NPY_LIST_PICKLE | NPY_USE_GETITEM | \ + NPY_ITEM_IS_POINTER | NPY_ITEM_REFCOUNT | \ + NPY_NEEDS_INIT | NPY_NEEDS_PYAPI) + +#define PyDataType_FLAGCHK(dtype, flag) \ + (((dtype)->flags & (flag)) == (flag)) + +#define PyDataType_REFCHK(dtype) \ + PyDataType_FLAGCHK(dtype, NPY_ITEM_REFCOUNT) + +typedef struct _PyArray_Descr { + PyObject_HEAD + /* + * the type object representing an + * instance of this type -- should not + * be two type_numbers with the same type + * object. + */ + PyTypeObject *typeobj; + /* kind for this type */ + char kind; + /* unique-character representing this type */ + char type; + /* + * '>' (big), '<' (little), '|' + * (not-applicable), or '=' (native). + */ + char byteorder; + /* flags describing data type */ + char flags; + /* number representing this type */ + int type_num; + /* element size (itemsize) for this type */ + int elsize; + /* alignment needed for this type */ + int alignment; + /* + * Non-NULL if this type is + * is an array (C-contiguous) + * of some other type + */ + struct _arr_descr *subarray; + /* + * The fields dictionary for this type + * For statically defined descr this + * is always Py_None + */ + PyObject *fields; + /* + * An ordered tuple of field names or NULL + * if no fields are defined + */ + PyObject *names; + /* + * a table of functions specific for each + * basic data descriptor + */ + PyArray_ArrFuncs *f; + /* Metadata about this dtype */ + PyObject *metadata; + /* + * Metadata specific to the C implementation + * of the particular dtype. This was added + * for NumPy 1.7.0. + */ + NpyAuxData *c_metadata; +} PyArray_Descr; + +typedef struct _arr_descr { + PyArray_Descr *base; + PyObject *shape; /* a tuple */ +} PyArray_ArrayDescr; + +/* + * The main array object structure. + * + * It has been recommended to use the inline functions defined below + * (PyArray_DATA and friends) to access fields here for a number of + * releases. Direct access to the members themselves is deprecated. + * To ensure that your code does not use deprecated access, + * #define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION + * (or NPY_1_8_API_VERSION or higher as required). + */ +/* This struct will be moved to a private header in a future release */ +typedef struct tagPyArrayObject_fields { + PyObject_HEAD + /* Pointer to the raw data buffer */ + char *data; + /* The number of dimensions, also called 'ndim' */ + int nd; + /* The size in each dimension, also called 'shape' */ + npy_intp *dimensions; + /* + * Number of bytes to jump to get to the + * next element in each dimension + */ + npy_intp *strides; + /* + * This object is decref'd upon + * deletion of array. Except in the + * case of UPDATEIFCOPY which has + * special handling. + * + * For views it points to the original + * array, collapsed so no chains of + * views occur. + * + * For creation from buffer object it + * points to an object that shold be + * decref'd on deletion + * + * For UPDATEIFCOPY flag this is an + * array to-be-updated upon deletion + * of this one + */ + PyObject *base; + /* Pointer to type structure */ + PyArray_Descr *descr; + /* Flags describing array -- see below */ + int flags; + /* For weak references */ + PyObject *weakreflist; +} PyArrayObject_fields; + +/* + * To hide the implementation details, we only expose + * the Python struct HEAD. + */ +#if !defined(NPY_NO_DEPRECATED_API) || \ + (NPY_NO_DEPRECATED_API < NPY_1_7_API_VERSION) +/* + * Can't put this in npy_deprecated_api.h like the others. + * PyArrayObject field access is deprecated as of NumPy 1.7. + */ +typedef PyArrayObject_fields PyArrayObject; +#else +typedef struct tagPyArrayObject { + PyObject_HEAD +} PyArrayObject; +#endif + +#define NPY_SIZEOF_PYARRAYOBJECT (sizeof(PyArrayObject_fields)) + +/* Array Flags Object */ +typedef struct PyArrayFlagsObject { + PyObject_HEAD + PyObject *arr; + int flags; +} PyArrayFlagsObject; + +/* Mirrors buffer object to ptr */ + +typedef struct { + PyObject_HEAD + PyObject *base; + void *ptr; + npy_intp len; + int flags; +} PyArray_Chunk; + +typedef struct { + NPY_DATETIMEUNIT base; + int num; +} PyArray_DatetimeMetaData; + +typedef struct { + NpyAuxData base; + PyArray_DatetimeMetaData meta; +} PyArray_DatetimeDTypeMetaData; + +/* + * This structure contains an exploded view of a date-time value. + * NaT is represented by year == NPY_DATETIME_NAT. + */ +typedef struct { + npy_int64 year; + npy_int32 month, day, hour, min, sec, us, ps, as; +} npy_datetimestruct; + +/* This is not used internally. */ +typedef struct { + npy_int64 day; + npy_int32 sec, us, ps, as; +} npy_timedeltastruct; + +typedef int (PyArray_FinalizeFunc)(PyArrayObject *, PyObject *); + +/* + * Means c-style contiguous (last index varies the fastest). The data + * elements right after each other. + * + * This flag may be requested in constructor functions. + * This flag may be tested for in PyArray_FLAGS(arr). + */ +#define NPY_ARRAY_C_CONTIGUOUS 0x0001 + +/* + * Set if array is a contiguous Fortran array: the first index varies + * the fastest in memory (strides array is reverse of C-contiguous + * array) + * + * This flag may be requested in constructor functions. + * This flag may be tested for in PyArray_FLAGS(arr). + */ +#define NPY_ARRAY_F_CONTIGUOUS 0x0002 + +/* + * Note: all 0-d arrays are C_CONTIGUOUS and F_CONTIGUOUS. If a + * 1-d array is C_CONTIGUOUS it is also F_CONTIGUOUS. Arrays with + * more then one dimension can be C_CONTIGUOUS and F_CONTIGUOUS + * at the same time if they have either zero or one element. + * If NPY_RELAXED_STRIDES_CHECKING is set, a higher dimensional + * array is always C_CONTIGUOUS and F_CONTIGUOUS if it has zero elements + * and the array is contiguous if ndarray.squeeze() is contiguous. + * I.e. dimensions for which `ndarray.shape[dimension] == 1` are + * ignored. + */ + +/* + * If set, the array owns the data: it will be free'd when the array + * is deleted. + * + * This flag may be tested for in PyArray_FLAGS(arr). + */ +#define NPY_ARRAY_OWNDATA 0x0004 + +/* + * An array never has the next four set; they're only used as parameter + * flags to the the various FromAny functions + * + * This flag may be requested in constructor functions. + */ + +/* Cause a cast to occur regardless of whether or not it is safe. */ +#define NPY_ARRAY_FORCECAST 0x0010 + +/* + * Always copy the array. Returned arrays are always CONTIGUOUS, + * ALIGNED, and WRITEABLE. + * + * This flag may be requested in constructor functions. + */ +#define NPY_ARRAY_ENSURECOPY 0x0020 + +/* + * Make sure the returned array is a base-class ndarray + * + * This flag may be requested in constructor functions. + */ +#define NPY_ARRAY_ENSUREARRAY 0x0040 + +/* + * Make sure that the strides are in units of the element size Needed + * for some operations with record-arrays. + * + * This flag may be requested in constructor functions. + */ +#define NPY_ARRAY_ELEMENTSTRIDES 0x0080 + +/* + * Array data is aligned on the appropiate memory address for the type + * stored according to how the compiler would align things (e.g., an + * array of integers (4 bytes each) starts on a memory address that's + * a multiple of 4) + * + * This flag may be requested in constructor functions. + * This flag may be tested for in PyArray_FLAGS(arr). + */ +#define NPY_ARRAY_ALIGNED 0x0100 + +/* + * Array data has the native endianness + * + * This flag may be requested in constructor functions. + */ +#define NPY_ARRAY_NOTSWAPPED 0x0200 + +/* + * Array data is writeable + * + * This flag may be requested in constructor functions. + * This flag may be tested for in PyArray_FLAGS(arr). + */ +#define NPY_ARRAY_WRITEABLE 0x0400 + +/* + * If this flag is set, then base contains a pointer to an array of + * the same size that should be updated with the current contents of + * this array when this array is deallocated + * + * This flag may be requested in constructor functions. + * This flag may be tested for in PyArray_FLAGS(arr). + */ +#define NPY_ARRAY_UPDATEIFCOPY 0x1000 + +/* + * NOTE: there are also internal flags defined in multiarray/arrayobject.h, + * which start at bit 31 and work down. + */ + +#define NPY_ARRAY_BEHAVED (NPY_ARRAY_ALIGNED | \ + NPY_ARRAY_WRITEABLE) +#define NPY_ARRAY_BEHAVED_NS (NPY_ARRAY_ALIGNED | \ + NPY_ARRAY_WRITEABLE | \ + NPY_ARRAY_NOTSWAPPED) +#define NPY_ARRAY_CARRAY (NPY_ARRAY_C_CONTIGUOUS | \ + NPY_ARRAY_BEHAVED) +#define NPY_ARRAY_CARRAY_RO (NPY_ARRAY_C_CONTIGUOUS | \ + NPY_ARRAY_ALIGNED) +#define NPY_ARRAY_FARRAY (NPY_ARRAY_F_CONTIGUOUS | \ + NPY_ARRAY_BEHAVED) +#define NPY_ARRAY_FARRAY_RO (NPY_ARRAY_F_CONTIGUOUS | \ + NPY_ARRAY_ALIGNED) +#define NPY_ARRAY_DEFAULT (NPY_ARRAY_CARRAY) +#define NPY_ARRAY_IN_ARRAY (NPY_ARRAY_CARRAY_RO) +#define NPY_ARRAY_OUT_ARRAY (NPY_ARRAY_CARRAY) +#define NPY_ARRAY_INOUT_ARRAY (NPY_ARRAY_CARRAY | \ + NPY_ARRAY_UPDATEIFCOPY) +#define NPY_ARRAY_IN_FARRAY (NPY_ARRAY_FARRAY_RO) +#define NPY_ARRAY_OUT_FARRAY (NPY_ARRAY_FARRAY) +#define NPY_ARRAY_INOUT_FARRAY (NPY_ARRAY_FARRAY | \ + NPY_ARRAY_UPDATEIFCOPY) + +#define NPY_ARRAY_UPDATE_ALL (NPY_ARRAY_C_CONTIGUOUS | \ + NPY_ARRAY_F_CONTIGUOUS | \ + NPY_ARRAY_ALIGNED) + +/* This flag is for the array interface, not PyArrayObject */ +#define NPY_ARR_HAS_DESCR 0x0800 + + + + +/* + * Size of internal buffers used for alignment Make BUFSIZE a multiple + * of sizeof(npy_cdouble) -- usually 16 so that ufunc buffers are aligned + */ +#define NPY_MIN_BUFSIZE ((int)sizeof(npy_cdouble)) +#define NPY_MAX_BUFSIZE (((int)sizeof(npy_cdouble))*1000000) +#define NPY_BUFSIZE 8192 +/* buffer stress test size: */ +/*#define NPY_BUFSIZE 17*/ + +#define PyArray_MAX(a,b) (((a)>(b))?(a):(b)) +#define PyArray_MIN(a,b) (((a)<(b))?(a):(b)) +#define PyArray_CLT(p,q) ((((p).real==(q).real) ? ((p).imag < (q).imag) : \ + ((p).real < (q).real))) +#define PyArray_CGT(p,q) ((((p).real==(q).real) ? ((p).imag > (q).imag) : \ + ((p).real > (q).real))) +#define PyArray_CLE(p,q) ((((p).real==(q).real) ? ((p).imag <= (q).imag) : \ + ((p).real <= (q).real))) +#define PyArray_CGE(p,q) ((((p).real==(q).real) ? ((p).imag >= (q).imag) : \ + ((p).real >= (q).real))) +#define PyArray_CEQ(p,q) (((p).real==(q).real) && ((p).imag == (q).imag)) +#define PyArray_CNE(p,q) (((p).real!=(q).real) || ((p).imag != (q).imag)) + +/* + * C API: consists of Macros and functions. The MACROS are defined + * here. + */ + + +#define PyArray_ISCONTIGUOUS(m) PyArray_CHKFLAGS(m, NPY_ARRAY_C_CONTIGUOUS) +#define PyArray_ISWRITEABLE(m) PyArray_CHKFLAGS(m, NPY_ARRAY_WRITEABLE) +#define PyArray_ISALIGNED(m) PyArray_CHKFLAGS(m, NPY_ARRAY_ALIGNED) + +#define PyArray_IS_C_CONTIGUOUS(m) PyArray_CHKFLAGS(m, NPY_ARRAY_C_CONTIGUOUS) +#define PyArray_IS_F_CONTIGUOUS(m) PyArray_CHKFLAGS(m, NPY_ARRAY_F_CONTIGUOUS) + +#if NPY_ALLOW_THREADS +#define NPY_BEGIN_ALLOW_THREADS Py_BEGIN_ALLOW_THREADS +#define NPY_END_ALLOW_THREADS Py_END_ALLOW_THREADS +#define NPY_BEGIN_THREADS_DEF PyThreadState *_save=NULL; +#define NPY_BEGIN_THREADS do {_save = PyEval_SaveThread();} while (0); +#define NPY_END_THREADS do {if (_save) PyEval_RestoreThread(_save);} while (0); +#define NPY_BEGIN_THREADS_THRESHOLDED(loop_size) do { if (loop_size > 500) \ + { _save = PyEval_SaveThread();} } while (0); + +#define NPY_BEGIN_THREADS_DESCR(dtype) \ + do {if (!(PyDataType_FLAGCHK(dtype, NPY_NEEDS_PYAPI))) \ + NPY_BEGIN_THREADS;} while (0); + +#define NPY_END_THREADS_DESCR(dtype) \ + do {if (!(PyDataType_FLAGCHK(dtype, NPY_NEEDS_PYAPI))) \ + NPY_END_THREADS; } while (0); + +#define NPY_ALLOW_C_API_DEF PyGILState_STATE __save__; +#define NPY_ALLOW_C_API do {__save__ = PyGILState_Ensure();} while (0); +#define NPY_DISABLE_C_API do {PyGILState_Release(__save__);} while (0); +#else +#define NPY_BEGIN_ALLOW_THREADS +#define NPY_END_ALLOW_THREADS +#define NPY_BEGIN_THREADS_DEF +#define NPY_BEGIN_THREADS +#define NPY_END_THREADS +#define NPY_BEGIN_THREADS_THRESHOLDED(loop_size) +#define NPY_BEGIN_THREADS_DESCR(dtype) +#define NPY_END_THREADS_DESCR(dtype) +#define NPY_ALLOW_C_API_DEF +#define NPY_ALLOW_C_API +#define NPY_DISABLE_C_API +#endif + +/********************************** + * The nditer object, added in 1.6 + **********************************/ + +/* The actual structure of the iterator is an internal detail */ +typedef struct NpyIter_InternalOnly NpyIter; + +/* Iterator function pointers that may be specialized */ +typedef int (NpyIter_IterNextFunc)(NpyIter *iter); +typedef void (NpyIter_GetMultiIndexFunc)(NpyIter *iter, + npy_intp *outcoords); + +/*** Global flags that may be passed to the iterator constructors ***/ + +/* Track an index representing C order */ +#define NPY_ITER_C_INDEX 0x00000001 +/* Track an index representing Fortran order */ +#define NPY_ITER_F_INDEX 0x00000002 +/* Track a multi-index */ +#define NPY_ITER_MULTI_INDEX 0x00000004 +/* User code external to the iterator does the 1-dimensional innermost loop */ +#define NPY_ITER_EXTERNAL_LOOP 0x00000008 +/* Convert all the operands to a common data type */ +#define NPY_ITER_COMMON_DTYPE 0x00000010 +/* Operands may hold references, requiring API access during iteration */ +#define NPY_ITER_REFS_OK 0x00000020 +/* Zero-sized operands should be permitted, iteration checks IterSize for 0 */ +#define NPY_ITER_ZEROSIZE_OK 0x00000040 +/* Permits reductions (size-0 stride with dimension size > 1) */ +#define NPY_ITER_REDUCE_OK 0x00000080 +/* Enables sub-range iteration */ +#define NPY_ITER_RANGED 0x00000100 +/* Enables buffering */ +#define NPY_ITER_BUFFERED 0x00000200 +/* When buffering is enabled, grows the inner loop if possible */ +#define NPY_ITER_GROWINNER 0x00000400 +/* Delay allocation of buffers until first Reset* call */ +#define NPY_ITER_DELAY_BUFALLOC 0x00000800 +/* When NPY_KEEPORDER is specified, disable reversing negative-stride axes */ +#define NPY_ITER_DONT_NEGATE_STRIDES 0x00001000 + +/*** Per-operand flags that may be passed to the iterator constructors ***/ + +/* The operand will be read from and written to */ +#define NPY_ITER_READWRITE 0x00010000 +/* The operand will only be read from */ +#define NPY_ITER_READONLY 0x00020000 +/* The operand will only be written to */ +#define NPY_ITER_WRITEONLY 0x00040000 +/* The operand's data must be in native byte order */ +#define NPY_ITER_NBO 0x00080000 +/* The operand's data must be aligned */ +#define NPY_ITER_ALIGNED 0x00100000 +/* The operand's data must be contiguous (within the inner loop) */ +#define NPY_ITER_CONTIG 0x00200000 +/* The operand may be copied to satisfy requirements */ +#define NPY_ITER_COPY 0x00400000 +/* The operand may be copied with UPDATEIFCOPY to satisfy requirements */ +#define NPY_ITER_UPDATEIFCOPY 0x00800000 +/* Allocate the operand if it is NULL */ +#define NPY_ITER_ALLOCATE 0x01000000 +/* If an operand is allocated, don't use any subtype */ +#define NPY_ITER_NO_SUBTYPE 0x02000000 +/* This is a virtual array slot, operand is NULL but temporary data is there */ +#define NPY_ITER_VIRTUAL 0x04000000 +/* Require that the dimension match the iterator dimensions exactly */ +#define NPY_ITER_NO_BROADCAST 0x08000000 +/* A mask is being used on this array, affects buffer -> array copy */ +#define NPY_ITER_WRITEMASKED 0x10000000 +/* This array is the mask for all WRITEMASKED operands */ +#define NPY_ITER_ARRAYMASK 0x20000000 + +#define NPY_ITER_GLOBAL_FLAGS 0x0000ffff +#define NPY_ITER_PER_OP_FLAGS 0xffff0000 + + +/***************************** + * Basic iterator object + *****************************/ + +/* FWD declaration */ +typedef struct PyArrayIterObject_tag PyArrayIterObject; + +/* + * type of the function which translates a set of coordinates to a + * pointer to the data + */ +typedef char* (*npy_iter_get_dataptr_t)(PyArrayIterObject* iter, npy_intp*); + +struct PyArrayIterObject_tag { + PyObject_HEAD + int nd_m1; /* number of dimensions - 1 */ + npy_intp index, size; + npy_intp coordinates[NPY_MAXDIMS];/* N-dimensional loop */ + npy_intp dims_m1[NPY_MAXDIMS]; /* ao->dimensions - 1 */ + npy_intp strides[NPY_MAXDIMS]; /* ao->strides or fake */ + npy_intp backstrides[NPY_MAXDIMS];/* how far to jump back */ + npy_intp factors[NPY_MAXDIMS]; /* shape factors */ + PyArrayObject *ao; + char *dataptr; /* pointer to current item*/ + npy_bool contiguous; + + npy_intp bounds[NPY_MAXDIMS][2]; + npy_intp limits[NPY_MAXDIMS][2]; + npy_intp limits_sizes[NPY_MAXDIMS]; + npy_iter_get_dataptr_t translate; +} ; + + +/* Iterator API */ +#define PyArrayIter_Check(op) PyObject_TypeCheck(op, &PyArrayIter_Type) + +#define _PyAIT(it) ((PyArrayIterObject *)(it)) +#define PyArray_ITER_RESET(it) do { \ + _PyAIT(it)->index = 0; \ + _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao); \ + memset(_PyAIT(it)->coordinates, 0, \ + (_PyAIT(it)->nd_m1+1)*sizeof(npy_intp)); \ +} while (0) + +#define _PyArray_ITER_NEXT1(it) do { \ + (it)->dataptr += _PyAIT(it)->strides[0]; \ + (it)->coordinates[0]++; \ +} while (0) + +#define _PyArray_ITER_NEXT2(it) do { \ + if ((it)->coordinates[1] < (it)->dims_m1[1]) { \ + (it)->coordinates[1]++; \ + (it)->dataptr += (it)->strides[1]; \ + } \ + else { \ + (it)->coordinates[1] = 0; \ + (it)->coordinates[0]++; \ + (it)->dataptr += (it)->strides[0] - \ + (it)->backstrides[1]; \ + } \ +} while (0) + +#define _PyArray_ITER_NEXT3(it) do { \ + if ((it)->coordinates[2] < (it)->dims_m1[2]) { \ + (it)->coordinates[2]++; \ + (it)->dataptr += (it)->strides[2]; \ + } \ + else { \ + (it)->coordinates[2] = 0; \ + (it)->dataptr -= (it)->backstrides[2]; \ + if ((it)->coordinates[1] < (it)->dims_m1[1]) { \ + (it)->coordinates[1]++; \ + (it)->dataptr += (it)->strides[1]; \ + } \ + else { \ + (it)->coordinates[1] = 0; \ + (it)->coordinates[0]++; \ + (it)->dataptr += (it)->strides[0] \ + (it)->backstrides[1]; \ + } \ + } \ +} while (0) + +#define PyArray_ITER_NEXT(it) do { \ + _PyAIT(it)->index++; \ + if (_PyAIT(it)->nd_m1 == 0) { \ + _PyArray_ITER_NEXT1(_PyAIT(it)); \ + } \ + else if (_PyAIT(it)->contiguous) \ + _PyAIT(it)->dataptr += PyArray_DESCR(_PyAIT(it)->ao)->elsize; \ + else if (_PyAIT(it)->nd_m1 == 1) { \ + _PyArray_ITER_NEXT2(_PyAIT(it)); \ + } \ + else { \ + int __npy_i; \ + for (__npy_i=_PyAIT(it)->nd_m1; __npy_i >= 0; __npy_i--) { \ + if (_PyAIT(it)->coordinates[__npy_i] < \ + _PyAIT(it)->dims_m1[__npy_i]) { \ + _PyAIT(it)->coordinates[__npy_i]++; \ + _PyAIT(it)->dataptr += \ + _PyAIT(it)->strides[__npy_i]; \ + break; \ + } \ + else { \ + _PyAIT(it)->coordinates[__npy_i] = 0; \ + _PyAIT(it)->dataptr -= \ + _PyAIT(it)->backstrides[__npy_i]; \ + } \ + } \ + } \ +} while (0) + +#define PyArray_ITER_GOTO(it, destination) do { \ + int __npy_i; \ + _PyAIT(it)->index = 0; \ + _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao); \ + for (__npy_i = _PyAIT(it)->nd_m1; __npy_i>=0; __npy_i--) { \ + if (destination[__npy_i] < 0) { \ + destination[__npy_i] += \ + _PyAIT(it)->dims_m1[__npy_i]+1; \ + } \ + _PyAIT(it)->dataptr += destination[__npy_i] * \ + _PyAIT(it)->strides[__npy_i]; \ + _PyAIT(it)->coordinates[__npy_i] = \ + destination[__npy_i]; \ + _PyAIT(it)->index += destination[__npy_i] * \ + ( __npy_i==_PyAIT(it)->nd_m1 ? 1 : \ + _PyAIT(it)->dims_m1[__npy_i+1]+1) ; \ + } \ +} while (0) + +#define PyArray_ITER_GOTO1D(it, ind) do { \ + int __npy_i; \ + npy_intp __npy_ind = (npy_intp) (ind); \ + if (__npy_ind < 0) __npy_ind += _PyAIT(it)->size; \ + _PyAIT(it)->index = __npy_ind; \ + if (_PyAIT(it)->nd_m1 == 0) { \ + _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao) + \ + __npy_ind * _PyAIT(it)->strides[0]; \ + } \ + else if (_PyAIT(it)->contiguous) \ + _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao) + \ + __npy_ind * PyArray_DESCR(_PyAIT(it)->ao)->elsize; \ + else { \ + _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao); \ + for (__npy_i = 0; __npy_i<=_PyAIT(it)->nd_m1; \ + __npy_i++) { \ + _PyAIT(it)->dataptr += \ + (__npy_ind / _PyAIT(it)->factors[__npy_i]) \ + * _PyAIT(it)->strides[__npy_i]; \ + __npy_ind %= _PyAIT(it)->factors[__npy_i]; \ + } \ + } \ +} while (0) + +#define PyArray_ITER_DATA(it) ((void *)(_PyAIT(it)->dataptr)) + +#define PyArray_ITER_NOTDONE(it) (_PyAIT(it)->index < _PyAIT(it)->size) + + +/* + * Any object passed to PyArray_Broadcast must be binary compatible + * with this structure. + */ + +typedef struct { + PyObject_HEAD + int numiter; /* number of iters */ + npy_intp size; /* broadcasted size */ + npy_intp index; /* current index */ + int nd; /* number of dims */ + npy_intp dimensions[NPY_MAXDIMS]; /* dimensions */ + PyArrayIterObject *iters[NPY_MAXARGS]; /* iterators */ +} PyArrayMultiIterObject; + +#define _PyMIT(m) ((PyArrayMultiIterObject *)(m)) +#define PyArray_MultiIter_RESET(multi) do { \ + int __npy_mi; \ + _PyMIT(multi)->index = 0; \ + for (__npy_mi=0; __npy_mi < _PyMIT(multi)->numiter; __npy_mi++) { \ + PyArray_ITER_RESET(_PyMIT(multi)->iters[__npy_mi]); \ + } \ +} while (0) + +#define PyArray_MultiIter_NEXT(multi) do { \ + int __npy_mi; \ + _PyMIT(multi)->index++; \ + for (__npy_mi=0; __npy_mi < _PyMIT(multi)->numiter; __npy_mi++) { \ + PyArray_ITER_NEXT(_PyMIT(multi)->iters[__npy_mi]); \ + } \ +} while (0) + +#define PyArray_MultiIter_GOTO(multi, dest) do { \ + int __npy_mi; \ + for (__npy_mi=0; __npy_mi < _PyMIT(multi)->numiter; __npy_mi++) { \ + PyArray_ITER_GOTO(_PyMIT(multi)->iters[__npy_mi], dest); \ + } \ + _PyMIT(multi)->index = _PyMIT(multi)->iters[0]->index; \ +} while (0) + +#define PyArray_MultiIter_GOTO1D(multi, ind) do { \ + int __npy_mi; \ + for (__npy_mi=0; __npy_mi < _PyMIT(multi)->numiter; __npy_mi++) { \ + PyArray_ITER_GOTO1D(_PyMIT(multi)->iters[__npy_mi], ind); \ + } \ + _PyMIT(multi)->index = _PyMIT(multi)->iters[0]->index; \ +} while (0) + +#define PyArray_MultiIter_DATA(multi, i) \ + ((void *)(_PyMIT(multi)->iters[i]->dataptr)) + +#define PyArray_MultiIter_NEXTi(multi, i) \ + PyArray_ITER_NEXT(_PyMIT(multi)->iters[i]) + +#define PyArray_MultiIter_NOTDONE(multi) \ + (_PyMIT(multi)->index < _PyMIT(multi)->size) + +/* Store the information needed for fancy-indexing over an array */ + +typedef struct { + PyObject_HEAD + /* + * Multi-iterator portion --- needs to be present in this + * order to work with PyArray_Broadcast + */ + + int numiter; /* number of index-array + iterators */ + npy_intp size; /* size of broadcasted + result */ + npy_intp index; /* current index */ + int nd; /* number of dims */ + npy_intp dimensions[NPY_MAXDIMS]; /* dimensions */ + PyArrayIterObject *iters[NPY_MAXDIMS]; /* index object + iterators */ + PyArrayIterObject *ait; /* flat Iterator for + underlying array */ + + /* flat iterator for subspace (when numiter < nd) */ + PyArrayIterObject *subspace; + + /* + * if subspace iteration, then this is the array of axes in + * the underlying array represented by the index objects + */ + int iteraxes[NPY_MAXDIMS]; + /* + * if subspace iteration, the these are the coordinates to the + * start of the subspace. + */ + npy_intp bscoord[NPY_MAXDIMS]; + + PyObject *indexobj; /* creating obj */ + /* + * consec is first used to indicate wether fancy indices are + * consecutive and then denotes at which axis they are inserted + */ + int consec; + char *dataptr; + +} PyArrayMapIterObject; + +enum { + NPY_NEIGHBORHOOD_ITER_ZERO_PADDING, + NPY_NEIGHBORHOOD_ITER_ONE_PADDING, + NPY_NEIGHBORHOOD_ITER_CONSTANT_PADDING, + NPY_NEIGHBORHOOD_ITER_CIRCULAR_PADDING, + NPY_NEIGHBORHOOD_ITER_MIRROR_PADDING +}; + +typedef struct { + PyObject_HEAD + + /* + * PyArrayIterObject part: keep this in this exact order + */ + int nd_m1; /* number of dimensions - 1 */ + npy_intp index, size; + npy_intp coordinates[NPY_MAXDIMS];/* N-dimensional loop */ + npy_intp dims_m1[NPY_MAXDIMS]; /* ao->dimensions - 1 */ + npy_intp strides[NPY_MAXDIMS]; /* ao->strides or fake */ + npy_intp backstrides[NPY_MAXDIMS];/* how far to jump back */ + npy_intp factors[NPY_MAXDIMS]; /* shape factors */ + PyArrayObject *ao; + char *dataptr; /* pointer to current item*/ + npy_bool contiguous; + + npy_intp bounds[NPY_MAXDIMS][2]; + npy_intp limits[NPY_MAXDIMS][2]; + npy_intp limits_sizes[NPY_MAXDIMS]; + npy_iter_get_dataptr_t translate; + + /* + * New members + */ + npy_intp nd; + + /* Dimensions is the dimension of the array */ + npy_intp dimensions[NPY_MAXDIMS]; + + /* + * Neighborhood points coordinates are computed relatively to the + * point pointed by _internal_iter + */ + PyArrayIterObject* _internal_iter; + /* + * To keep a reference to the representation of the constant value + * for constant padding + */ + char* constant; + + int mode; +} PyArrayNeighborhoodIterObject; + +/* + * Neighborhood iterator API + */ + +/* General: those work for any mode */ +static NPY_INLINE int +PyArrayNeighborhoodIter_Reset(PyArrayNeighborhoodIterObject* iter); +static NPY_INLINE int +PyArrayNeighborhoodIter_Next(PyArrayNeighborhoodIterObject* iter); +#if 0 +static NPY_INLINE int +PyArrayNeighborhoodIter_Next2D(PyArrayNeighborhoodIterObject* iter); +#endif + +/* + * Include inline implementations - functions defined there are not + * considered public API + */ +#define _NPY_INCLUDE_NEIGHBORHOOD_IMP +//#include "_neighborhood_iterator_imp.h" +#undef _NPY_INCLUDE_NEIGHBORHOOD_IMP + +/* The default array type */ +#define NPY_DEFAULT_TYPE NPY_DOUBLE + +/* + * All sorts of useful ways to look into a PyArrayObject. It is recommended + * to use PyArrayObject * objects instead of always casting from PyObject *, + * for improved type checking. + * + * In many cases here the macro versions of the accessors are deprecated, + * but can't be immediately changed to inline functions because the + * preexisting macros accept PyObject * and do automatic casts. Inline + * functions accepting PyArrayObject * provides for some compile-time + * checking of correctness when working with these objects in C. + */ + +#define PyArray_ISONESEGMENT(m) (PyArray_NDIM(m) == 0 || \ + PyArray_CHKFLAGS(m, NPY_ARRAY_C_CONTIGUOUS) || \ + PyArray_CHKFLAGS(m, NPY_ARRAY_F_CONTIGUOUS)) + +#define PyArray_ISFORTRAN(m) (PyArray_CHKFLAGS(m, NPY_ARRAY_F_CONTIGUOUS) && \ + (!PyArray_CHKFLAGS(m, NPY_ARRAY_C_CONTIGUOUS))) + +#define PyArray_FORTRAN_IF(m) ((PyArray_CHKFLAGS(m, NPY_ARRAY_F_CONTIGUOUS) ? \ + NPY_ARRAY_F_CONTIGUOUS : 0)) + +#if (defined(NPY_NO_DEPRECATED_API) && (NPY_1_7_API_VERSION <= NPY_NO_DEPRECATED_API)) +/* + * Changing access macros into functions, to allow for future hiding + * of the internal memory layout. This later hiding will allow the 2.x series + * to change the internal representation of arrays without affecting + * ABI compatibility. + */ + +static NPY_INLINE int +PyArray_NDIM(const PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->nd; +} + +static NPY_INLINE void * +PyArray_DATA(PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->data; +} + +static NPY_INLINE char * +PyArray_BYTES(PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->data; +} + +static NPY_INLINE npy_intp * +PyArray_DIMS(PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->dimensions; +} + +static NPY_INLINE npy_intp * +PyArray_STRIDES(PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->strides; +} + +static NPY_INLINE npy_intp +PyArray_DIM(const PyArrayObject *arr, int idim) +{ + return ((PyArrayObject_fields *)arr)->dimensions[idim]; +} + +static NPY_INLINE npy_intp +PyArray_STRIDE(const PyArrayObject *arr, int istride) +{ + return ((PyArrayObject_fields *)arr)->strides[istride]; +} + +static NPY_INLINE PyObject * +PyArray_BASE(PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->base; +} + +static NPY_INLINE PyArray_Descr * +PyArray_DESCR(PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->descr; +} + +static NPY_INLINE int +PyArray_FLAGS(const PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->flags; +} + +static NPY_INLINE npy_intp +PyArray_ITEMSIZE(const PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->descr->elsize; +} + +static NPY_INLINE int +PyArray_TYPE(const PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->descr->type_num; +} + +static NPY_INLINE int +PyArray_CHKFLAGS(const PyArrayObject *arr, int flags) +{ + return (PyArray_FLAGS(arr) & flags) == flags; +} + +static NPY_INLINE PyObject * +PyArray_GETITEM(const PyArrayObject *arr, const char *itemptr) +{ + return ((PyArrayObject_fields *)arr)->descr->f->getitem( + (void *)itemptr, (PyArrayObject *)arr); +} + +static NPY_INLINE int +PyArray_SETITEM(PyArrayObject *arr, char *itemptr, PyObject *v) +{ + return ((PyArrayObject_fields *)arr)->descr->f->setitem( + v, itemptr, arr); +} + +#else + +/* These macros are deprecated as of NumPy 1.7. */ +#define PyArray_NDIM(obj) (((PyArrayObject_fields *)(obj))->nd) +#define PyArray_BYTES(obj) (((PyArrayObject_fields *)(obj))->data) +#define PyArray_DATA(obj) ((void *)((PyArrayObject_fields *)(obj))->data) +#define PyArray_DIMS(obj) (((PyArrayObject_fields *)(obj))->dimensions) +#define PyArray_STRIDES(obj) (((PyArrayObject_fields *)(obj))->strides) +#define PyArray_DIM(obj,n) (PyArray_DIMS(obj)[n]) +#define PyArray_STRIDE(obj,n) (PyArray_STRIDES(obj)[n]) +#define PyArray_BASE(obj) (((PyArrayObject_fields *)(obj))->base) +#define PyArray_DESCR(obj) (((PyArrayObject_fields *)(obj))->descr) +#define PyArray_FLAGS(obj) (((PyArrayObject_fields *)(obj))->flags) +#define PyArray_CHKFLAGS(m, FLAGS) \ + ((((PyArrayObject_fields *)(m))->flags & (FLAGS)) == (FLAGS)) +#define PyArray_ITEMSIZE(obj) \ + (((PyArrayObject_fields *)(obj))->descr->elsize) +#define PyArray_TYPE(obj) \ + (((PyArrayObject_fields *)(obj))->descr->type_num) +#define PyArray_GETITEM(obj,itemptr) \ + PyArray_DESCR(obj)->f->getitem((char *)(itemptr), \ + (PyArrayObject *)(obj)) + +#define PyArray_SETITEM(obj,itemptr,v) \ + PyArray_DESCR(obj)->f->setitem((PyObject *)(v), \ + (char *)(itemptr), \ + (PyArrayObject *)(obj)) +#endif + +static NPY_INLINE PyArray_Descr * +PyArray_DTYPE(PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->descr; +} + +static NPY_INLINE npy_intp * +PyArray_SHAPE(PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->dimensions; +} + +/* + * Enables the specified array flags. Does no checking, + * assumes you know what you're doing. + */ +static NPY_INLINE void +PyArray_ENABLEFLAGS(PyArrayObject *arr, int flags) +{ + ((PyArrayObject_fields *)arr)->flags |= flags; +} + +/* + * Clears the specified array flags. Does no checking, + * assumes you know what you're doing. + */ +static NPY_INLINE void +PyArray_CLEARFLAGS(PyArrayObject *arr, int flags) +{ + ((PyArrayObject_fields *)arr)->flags &= ~flags; +} + +#define PyTypeNum_ISBOOL(type) ((type) == NPY_BOOL) + +#define PyTypeNum_ISUNSIGNED(type) (((type) == NPY_UBYTE) || \ + ((type) == NPY_USHORT) || \ + ((type) == NPY_UINT) || \ + ((type) == NPY_ULONG) || \ + ((type) == NPY_ULONGLONG)) + +#define PyTypeNum_ISSIGNED(type) (((type) == NPY_BYTE) || \ + ((type) == NPY_SHORT) || \ + ((type) == NPY_INT) || \ + ((type) == NPY_LONG) || \ + ((type) == NPY_LONGLONG)) + +#define PyTypeNum_ISINTEGER(type) (((type) >= NPY_BYTE) && \ + ((type) <= NPY_ULONGLONG)) + +#define PyTypeNum_ISFLOAT(type) ((((type) >= NPY_FLOAT) && \ + ((type) <= NPY_LONGDOUBLE)) || \ + ((type) == NPY_HALF)) + +#define PyTypeNum_ISNUMBER(type) (((type) <= NPY_CLONGDOUBLE) || \ + ((type) == NPY_HALF)) + +#define PyTypeNum_ISSTRING(type) (((type) == NPY_STRING) || \ + ((type) == NPY_UNICODE)) + +#define PyTypeNum_ISCOMPLEX(type) (((type) >= NPY_CFLOAT) && \ + ((type) <= NPY_CLONGDOUBLE)) + +#define PyTypeNum_ISPYTHON(type) (((type) == NPY_LONG) || \ + ((type) == NPY_DOUBLE) || \ + ((type) == NPY_CDOUBLE) || \ + ((type) == NPY_BOOL) || \ + ((type) == NPY_OBJECT )) + +#define PyTypeNum_ISFLEXIBLE(type) (((type) >=NPY_STRING) && \ + ((type) <=NPY_VOID)) + +#define PyTypeNum_ISDATETIME(type) (((type) >=NPY_DATETIME) && \ + ((type) <=NPY_TIMEDELTA)) + +#define PyTypeNum_ISUSERDEF(type) (((type) >= NPY_USERDEF) && \ + ((type) < NPY_USERDEF+ \ + NPY_NUMUSERTYPES)) + +#define PyTypeNum_ISEXTENDED(type) (PyTypeNum_ISFLEXIBLE(type) || \ + PyTypeNum_ISUSERDEF(type)) + +#define PyTypeNum_ISOBJECT(type) ((type) == NPY_OBJECT) + + +#define PyDataType_ISBOOL(obj) PyTypeNum_ISBOOL(_PyADt(obj)) +#define PyDataType_ISUNSIGNED(obj) PyTypeNum_ISUNSIGNED(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISSIGNED(obj) PyTypeNum_ISSIGNED(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISINTEGER(obj) PyTypeNum_ISINTEGER(((PyArray_Descr*)(obj))->type_num ) +#define PyDataType_ISFLOAT(obj) PyTypeNum_ISFLOAT(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISNUMBER(obj) PyTypeNum_ISNUMBER(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISSTRING(obj) PyTypeNum_ISSTRING(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISCOMPLEX(obj) PyTypeNum_ISCOMPLEX(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISPYTHON(obj) PyTypeNum_ISPYTHON(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISFLEXIBLE(obj) PyTypeNum_ISFLEXIBLE(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISDATETIME(obj) PyTypeNum_ISDATETIME(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISUSERDEF(obj) PyTypeNum_ISUSERDEF(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISEXTENDED(obj) PyTypeNum_ISEXTENDED(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISOBJECT(obj) PyTypeNum_ISOBJECT(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_HASFIELDS(obj) (((PyArray_Descr *)(obj))->names != NULL) +#define PyDataType_HASSUBARRAY(dtype) ((dtype)->subarray != NULL) + +#define PyArray_ISBOOL(obj) PyTypeNum_ISBOOL(PyArray_TYPE(obj)) +#define PyArray_ISUNSIGNED(obj) PyTypeNum_ISUNSIGNED(PyArray_TYPE(obj)) +#define PyArray_ISSIGNED(obj) PyTypeNum_ISSIGNED(PyArray_TYPE(obj)) +#define PyArray_ISINTEGER(obj) PyTypeNum_ISINTEGER(PyArray_TYPE(obj)) +#define PyArray_ISFLOAT(obj) PyTypeNum_ISFLOAT(PyArray_TYPE(obj)) +#define PyArray_ISNUMBER(obj) PyTypeNum_ISNUMBER(PyArray_TYPE(obj)) +#define PyArray_ISSTRING(obj) PyTypeNum_ISSTRING(PyArray_TYPE(obj)) +#define PyArray_ISCOMPLEX(obj) PyTypeNum_ISCOMPLEX(PyArray_TYPE(obj)) +#define PyArray_ISPYTHON(obj) PyTypeNum_ISPYTHON(PyArray_TYPE(obj)) +#define PyArray_ISFLEXIBLE(obj) PyTypeNum_ISFLEXIBLE(PyArray_TYPE(obj)) +#define PyArray_ISDATETIME(obj) PyTypeNum_ISDATETIME(PyArray_TYPE(obj)) +#define PyArray_ISUSERDEF(obj) PyTypeNum_ISUSERDEF(PyArray_TYPE(obj)) +#define PyArray_ISEXTENDED(obj) PyTypeNum_ISEXTENDED(PyArray_TYPE(obj)) +#define PyArray_ISOBJECT(obj) PyTypeNum_ISOBJECT(PyArray_TYPE(obj)) +#define PyArray_HASFIELDS(obj) PyDataType_HASFIELDS(PyArray_DESCR(obj)) + + /* + * FIXME: This should check for a flag on the data-type that + * states whether or not it is variable length. Because the + * ISFLEXIBLE check is hard-coded to the built-in data-types. + */ +#define PyArray_ISVARIABLE(obj) PyTypeNum_ISFLEXIBLE(PyArray_TYPE(obj)) + +#define PyArray_SAFEALIGNEDCOPY(obj) (PyArray_ISALIGNED(obj) && !PyArray_ISVARIABLE(obj)) + + +#define NPY_LITTLE '<' +#define NPY_BIG '>' +#define NPY_NATIVE '=' +#define NPY_SWAP 's' +#define NPY_IGNORE '|' + +#if NPY_BYTE_ORDER == NPY_BIG_ENDIAN +#define NPY_NATBYTE NPY_BIG +#define NPY_OPPBYTE NPY_LITTLE +#else +#define NPY_NATBYTE NPY_LITTLE +#define NPY_OPPBYTE NPY_BIG +#endif + +#define PyArray_ISNBO(arg) ((arg) != NPY_OPPBYTE) +#define PyArray_IsNativeByteOrder PyArray_ISNBO +#define PyArray_ISNOTSWAPPED(m) PyArray_ISNBO(PyArray_DESCR(m)->byteorder) +#define PyArray_ISBYTESWAPPED(m) (!PyArray_ISNOTSWAPPED(m)) + +#define PyArray_FLAGSWAP(m, flags) (PyArray_CHKFLAGS(m, flags) && \ + PyArray_ISNOTSWAPPED(m)) + +#define PyArray_ISCARRAY(m) PyArray_FLAGSWAP(m, NPY_ARRAY_CARRAY) +#define PyArray_ISCARRAY_RO(m) PyArray_FLAGSWAP(m, NPY_ARRAY_CARRAY_RO) +#define PyArray_ISFARRAY(m) PyArray_FLAGSWAP(m, NPY_ARRAY_FARRAY) +#define PyArray_ISFARRAY_RO(m) PyArray_FLAGSWAP(m, NPY_ARRAY_FARRAY_RO) +#define PyArray_ISBEHAVED(m) PyArray_FLAGSWAP(m, NPY_ARRAY_BEHAVED) +#define PyArray_ISBEHAVED_RO(m) PyArray_FLAGSWAP(m, NPY_ARRAY_ALIGNED) + + +#define PyDataType_ISNOTSWAPPED(d) PyArray_ISNBO(((PyArray_Descr *)(d))->byteorder) +#define PyDataType_ISBYTESWAPPED(d) (!PyDataType_ISNOTSWAPPED(d)) + +/************************************************************ + * A struct used by PyArray_CreateSortedStridePerm, new in 1.7. + ************************************************************/ + +typedef struct { + npy_intp perm, stride; +} npy_stride_sort_item; + +/************************************************************ + * This is the form of the struct that's returned pointed by the + * PyCObject attribute of an array __array_struct__. See + * http://docs.scipy.org/doc/numpy/reference/arrays.interface.html for the full + * documentation. + ************************************************************/ +typedef struct { + int two; /* + * contains the integer 2 as a sanity + * check + */ + + int nd; /* number of dimensions */ + + char typekind; /* + * kind in array --- character code of + * typestr + */ + + int itemsize; /* size of each element */ + + int flags; /* + * how should be data interpreted. Valid + * flags are CONTIGUOUS (1), F_CONTIGUOUS (2), + * ALIGNED (0x100), NOTSWAPPED (0x200), and + * WRITEABLE (0x400). ARR_HAS_DESCR (0x800) + * states that arrdescr field is present in + * structure + */ + + npy_intp *shape; /* + * A length-nd array of shape + * information + */ + + npy_intp *strides; /* A length-nd array of stride information */ + + void *data; /* A pointer to the first element of the array */ + + PyObject *descr; /* + * A list of fields or NULL (ignored if flags + * does not have ARR_HAS_DESCR flag set) + */ +} PyArrayInterface; + +/* + * This is a function for hooking into the PyDataMem_NEW/FREE/RENEW functions. + * See the documentation for PyDataMem_SetEventHook. + */ +typedef void (PyDataMem_EventHookFunc)(void *inp, void *outp, size_t size, + void *user_data); + +/* + * Use the keyword NPY_DEPRECATED_INCLUDES to ensure that the header files + * npy_*_*_deprecated_api.h are only included from here and nowhere else. + */ +#ifdef NPY_DEPRECATED_INCLUDES +#error "Do not use the reserved keyword NPY_DEPRECATED_INCLUDES." +#endif +#define NPY_DEPRECATED_INCLUDES +#if !defined(NPY_NO_DEPRECATED_API) || \ + (NPY_NO_DEPRECATED_API < NPY_1_7_API_VERSION) +#include "npy_1_7_deprecated_api.h" +#endif +/* + * There is no file npy_1_8_deprecated_api.h since there are no additional + * deprecated API features in NumPy 1.8. + * + * Note to maintainers: insert code like the following in future NumPy + * versions. + * + * #if !defined(NPY_NO_DEPRECATED_API) || \ + * (NPY_NO_DEPRECATED_API < NPY_1_9_API_VERSION) + * #include "npy_1_9_deprecated_api.h" + * #endif + */ +#undef NPY_DEPRECATED_INCLUDES + +#endif /* NPY_ARRAYTYPES_H */ diff --git a/pypy/module/cpyext/include/numpy/npy_common.h b/pypy/module/cpyext/include/numpy/npy_common.h new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/include/numpy/npy_common.h @@ -0,0 +1,49 @@ +#ifndef _NPY_COMMON_H_ +#define _NPY_COMMON_H_ + +typedef Py_intptr_t npy_intp; +typedef Py_uintptr_t npy_uintp; +typedef PY_LONG_LONG npy_longlong; +typedef unsigned PY_LONG_LONG npy_ulonglong; +typedef unsigned char npy_bool; +typedef long npy_int32; +typedef unsigned long npy_uint32; +typedef unsigned long npy_ucs4; +typedef long npy_int64; +typedef unsigned long npy_uint64; +typedef unsigned char npy_uint8; + +typedef signed char npy_byte; From noreply at buildbot.pypy.org Fri Jan 16 12:31:13 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 16 Jan 2015 12:31:13 +0100 (CET) Subject: [pypy-commit] pypy errno-again: Better tests, and fix Message-ID: <20150116113113.3AED41C05C8@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: errno-again Changeset: r75379:753a87cdaa56 Date: 2015-01-16 12:31 +0100 http://bitbucket.org/pypy/pypy/changeset/753a87cdaa56/ Log: Better tests, and fix diff --git a/rpython/jit/backend/test/runner_test.py b/rpython/jit/backend/test/runner_test.py --- a/rpython/jit/backend/test/runner_test.py +++ b/rpython/jit/backend/test/runner_test.py @@ -2930,31 +2930,39 @@ eci = ExternalCompilationInfo( separate_module_sources=[''' #include - RPY_EXPORTED void test_call_release_gil_save_errno(void) { + RPY_EXPORTED long test_call_release_gil_save_errno( + long a, long b, long c, long d, long e, long f, long g) { errno = 42; + return (a + 10*b + 100*c + 1000*d + + 10000*e + 100000*f + 1000000*g); } ''']) fn_name = 'test_call_release_gil_save_errno' - func1_ptr = rffi.llexternal(fn_name, [], lltype.Void, + func1_ptr = rffi.llexternal(fn_name, [lltype.Signed]*7, lltype.Signed, compilation_info=eci, _nowrapper=True) func1_adr = rffi.cast(lltype.Signed, func1_ptr) - calldescr = self.cpu._calldescr_dynamic_for_tests([], types.void) + calldescr = self.cpu._calldescr_dynamic_for_tests([types.slong]*7, + types.slong) # for saveerr in [rffi.RFFI_ERR_NONE, rffi.RFFI_SAVE_ERRNO]: faildescr = BasicFailDescr(1) + inputargs = [BoxInt() for i in range(7)] + i1 = BoxInt() ops = [ ResOperation(rop.CALL_RELEASE_GIL, - [ConstInt(saveerr), ConstInt(func1_adr)], None, + [ConstInt(saveerr), ConstInt(func1_adr)] + + inputargs, i1, descr=calldescr), ResOperation(rop.GUARD_NOT_FORCED, [], None, descr=faildescr), - ResOperation(rop.FINISH, [], None, descr=BasicFinalDescr(0)) + ResOperation(rop.FINISH, [i1], None, descr=BasicFinalDescr(0)) ] ops[-2].setfailargs([]) looptoken = JitCellToken() - self.cpu.compile_loop([], ops, looptoken) + self.cpu.compile_loop(inputargs, ops, looptoken) # llerrno.set_debug_saved_errno(self.cpu, 24) - self.cpu.execute_token(looptoken) + deadframe = self.cpu.execute_token(looptoken, 9, 8, 7, 6, 5, 4, 3) + original_result = self.cpu.get_int_value(deadframe, 0) result = llerrno.get_debug_saved_errno(self.cpu) print 'saveerr =', saveerr, ': got result =', result # @@ -2962,6 +2970,7 @@ assert result == 42 # from the C code else: assert result == 24 # not touched + assert original_result == 3456789 def test_call_release_gil_readsaved_errno(self): from rpython.translator.tool.cbuild import ExternalCompilationInfo @@ -2974,41 +2983,47 @@ separate_module_sources=[r''' #include #include - RPY_EXPORTED int test_call_release_gil_readsaved_errno(void) { - int r = errno; - printf("read saved errno: %d\n", r); + RPY_EXPORTED long test_call_release_gil_readsaved_errno( + long a, long b, long c, long d, long e, long f, long g) { + long r = errno; + printf("read saved errno: %ld\n", r); + r += 100 * (a + 10*b + 100*c + 1000*d + + 10000*e + 100000*f + 1000000*g); return r; } ''']) fn_name = 'test_call_release_gil_readsaved_errno' - func1_ptr = rffi.llexternal(fn_name, [], rffi.INT, + func1_ptr = rffi.llexternal(fn_name, [lltype.Signed]*7, lltype.Signed, compilation_info=eci, _nowrapper=True) func1_adr = rffi.cast(lltype.Signed, func1_ptr) - calldescr = self.cpu._calldescr_dynamic_for_tests([], types.sint32) + calldescr = self.cpu._calldescr_dynamic_for_tests([types.slong]*7, + types.slong) # for saveerr in [rffi.RFFI_READSAVED_ERRNO, rffi.RFFI_ZERO_ERRNO_BEFORE]: faildescr = BasicFailDescr(1) + inputargs = [BoxInt() for i in range(7)] i1 = BoxInt() ops = [ ResOperation(rop.CALL_RELEASE_GIL, - [ConstInt(saveerr), ConstInt(func1_adr)], i1, + [ConstInt(saveerr), ConstInt(func1_adr)] + + inputargs, i1, descr=calldescr), ResOperation(rop.GUARD_NOT_FORCED, [], None, descr=faildescr), ResOperation(rop.FINISH, [i1], None, descr=BasicFinalDescr(0)) ] ops[-2].setfailargs([]) looptoken = JitCellToken() - self.cpu.compile_loop([], ops, looptoken) + self.cpu.compile_loop(inputargs, ops, looptoken) # llerrno.set_debug_saved_errno(self.cpu, 24) - deadframe = self.cpu.execute_token(looptoken) + deadframe = self.cpu.execute_token(looptoken, 9, 8, 7, 6, 5, 4, 3) result = self.cpu.get_int_value(deadframe, 0) assert llerrno.get_debug_saved_errno(self.cpu) == 24 # if saveerr == rffi.RFFI_READSAVED_ERRNO: - assert result == 24 + assert result == 24 + 345678900 else: - assert result == 0 + assert result == 0 + 345678900 def test_call_release_gil_save_lasterror(self): XXX diff --git a/rpython/jit/backend/x86/callbuilder.py b/rpython/jit/backend/x86/callbuilder.py --- a/rpython/jit/backend/x86/callbuilder.py +++ b/rpython/jit/backend/x86/callbuilder.py @@ -159,9 +159,13 @@ p_errno = llerrno.get_p_errno_offset(self.asm.cpu) mc = self.mc mc.MOV_rs(eax.value, THREADLOCAL_OFS - self.current_esp) - mc.MOV_rm(edx.value, (eax.value, p_errno)) + if IS_X86_32: + tmpreg = edx + else: + tmpreg = r11 # edx is used for 3rd argument + mc.MOV_rm(tmpreg.value, (eax.value, p_errno)) mc.MOV32_rm(eax.value, (eax.value, rpy_errno)) - mc.MOV32_mr((edx.value, 0), eax.value) + mc.MOV32_mr((tmpreg.value, 0), eax.value) elif save_err & rffi.RFFI_ZERO_ERRNO_BEFORE: # Same, but write zero. p_errno = llerrno.get_p_errno_offset(self.asm.cpu) From noreply at buildbot.pypy.org Fri Jan 16 12:40:01 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 16 Jan 2015 12:40:01 +0100 (CET) Subject: [pypy-commit] pypy errno-again: fix test Message-ID: <20150116114001.97D381C0035@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: errno-again Changeset: r75380:1e065a1cd0e4 Date: 2015-01-16 12:33 +0100 http://bitbucket.org/pypy/pypy/changeset/1e065a1cd0e4/ Log: fix test diff --git a/pypy/module/pypyjit/test_pypy_c/test_ffi.py b/pypy/module/pypyjit/test_pypy_c/test_ffi.py --- a/pypy/module/pypyjit/test_pypy_c/test_ffi.py +++ b/pypy/module/pypyjit/test_pypy_c/test_ffi.py @@ -200,14 +200,11 @@ assert res == 8.0 * 300 loop, = log.loops_by_filename(self.filepath) assert loop.match_by_id('cfficall', """ - setarrayitem_raw(i69, 0, i95, descr=) # write 'errno' p96 = force_token() setfield_gc(p0, p96, descr=) - f97 = call_release_gil(i59, 1.0, 3, descr=) + f97 = call_release_gil(27, i59, 1.0, 3, descr=) guard_not_forced(descr=...) guard_no_exception(descr=...) - i98 = getarrayitem_raw(i69, 0, descr=) # read 'errno' - setfield_gc(p65, i98, descr=) """, ignore_ops=['guard_not_invalidated']) def test_cffi_call_guard_not_forced_fails(self): From noreply at buildbot.pypy.org Fri Jan 16 12:41:34 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 16 Jan 2015 12:41:34 +0100 (CET) Subject: [pypy-commit] pypy errno-again: fix test Message-ID: <20150116114134.62E341C0035@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: errno-again Changeset: r75381:0659cb12b380 Date: 2015-01-16 11:41 +0000 http://bitbucket.org/pypy/pypy/changeset/0659cb12b380/ Log: fix test diff --git a/pypy/module/pypyjit/test_pypy_c/test_thread.py b/pypy/module/pypyjit/test_pypy_c/test_thread.py --- a/pypy/module/pypyjit/test_pypy_c/test_thread.py +++ b/pypy/module/pypyjit/test_pypy_c/test_thread.py @@ -64,7 +64,7 @@ guard_true(i56, descr=...) p57 = force_token() setfield_gc(p0, p57, descr=) - i58 = call_release_gil(_, i37, 1, descr=) + i58 = call_release_gil(0, _, i37, 1, descr=) guard_not_forced(descr=...) guard_no_exception(descr=...) i59 = int_is_true(i58) @@ -72,14 +72,14 @@ i60 = int_sub(i44, 1) p62 = force_token() setfield_gc(p0, p62, descr=) - i63 = call_release_gil(_, i37, 0, descr=) + i63 = call_release_gil(0, _, i37, 0, descr=) guard_not_forced(descr=...) guard_no_exception(descr=...) i64 = int_is_true(i63) guard_false(i64, descr=...) p65 = force_token() setfield_gc(p0, p65, descr=) - call_release_gil(_, i37, descr=) + call_release_gil(0, _, i37, descr=) guard_not_forced(descr=...) guard_no_exception(descr=...) guard_not_invalidated(descr=...) From noreply at buildbot.pypy.org Fri Jan 16 12:58:41 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 16 Jan 2015 12:58:41 +0100 (CET) Subject: [pypy-commit] pypy errno-again: start work on supporting GetLastError/SetLastError Message-ID: <20150116115841.7B6C21C0241@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: errno-again Changeset: r75382:80dbb6ecf211 Date: 2015-01-16 12:58 +0100 http://bitbucket.org/pypy/pypy/changeset/80dbb6ecf211/ Log: start work on supporting GetLastError/SetLastError diff --git a/rpython/jit/backend/llsupport/llerrno.py b/rpython/jit/backend/llsupport/llerrno.py --- a/rpython/jit/backend/llsupport/llerrno.py +++ b/rpython/jit/backend/llsupport/llerrno.py @@ -17,6 +17,22 @@ else: return 3 * WORD + +def get_debug_saved_lasterror(cpu): + return cpu._debug_errno_container[4] + +def set_debug_saved_lasterror(cpu, nerrno): + assert nerrno >= 0 + cpu._debug_errno_container[4] = nerrno + +def get_rpy_lasterror_offset(cpu): + if cpu.translate_support_code: + from rpython.rlib import rthread + return rthread.tlfield_rpy_lasterror.getoffset() + else: + return 4 * WORD + + def _fetch_addr_errno(): eci = ExternalCompilationInfo( separate_module_sources=[''' diff --git a/rpython/jit/backend/test/runner_test.py b/rpython/jit/backend/test/runner_test.py --- a/rpython/jit/backend/test/runner_test.py +++ b/rpython/jit/backend/test/runner_test.py @@ -3026,10 +3026,113 @@ assert result == 0 + 345678900 def test_call_release_gil_save_lasterror(self): - XXX + from rpython.translator.tool.cbuild import ExternalCompilationInfo + from rpython.rlib.libffi import types + from rpython.jit.backend.llsupport import llerrno + # + if not isinstance(self.cpu, AbstractLLCPU): + py.test.skip("not on LLGraph") + if sys.platform != 'win32': + py.test.skip("Windows test only") + eci = ExternalCompilationInfo( + separate_module_sources=[''' + #include + RPY_EXPORTED + long __stdcall test_call_release_gil_save_lasterror( + long a, long b, long c, long d, long e, long f, long g) { + SetLastError(42); + return (a + 10*b + 100*c + 1000*d + + 10000*e + 100000*f + 1000000*g); + } + ''']) + fn_name = 'test_call_release_gil_save_lasterror' + func1_ptr = rffi.llexternal(fn_name, [lltype.Signed]*7, lltype.Signed, + compilation_info=eci, _nowrapper=True) + func1_adr = rffi.cast(lltype.Signed, func1_ptr) + calldescr = self.cpu._calldescr_dynamic_for_tests([types.slong]*7, + types.slong) + # + for saveerr in [rffi.RFFI_SAVE_ERRNO, # but not _LASTERROR + rffi.RFFI_SAVE_LASTERROR]: + faildescr = BasicFailDescr(1) + inputargs = [BoxInt() for i in range(7)] + i1 = BoxInt() + ops = [ + ResOperation(rop.CALL_RELEASE_GIL, + [ConstInt(saveerr), ConstInt(func1_adr)] + + inputargs, i1, + descr=calldescr), + ResOperation(rop.GUARD_NOT_FORCED, [], None, descr=faildescr), + ResOperation(rop.FINISH, [i1], None, descr=BasicFinalDescr(0)) + ] + ops[-2].setfailargs([]) + looptoken = JitCellToken() + self.cpu.compile_loop(inputargs, ops, looptoken) + # + llerrno.set_debug_saved_lasterror(self.cpu, 24) + deadframe = self.cpu.execute_token(looptoken, 9, 8, 7, 6, 5, 4, 3) + original_result = self.cpu.get_int_value(deadframe, 0) + result = llerrno.get_debug_saved_lasterror(self.cpu) + print 'saveerr =', saveerr, ': got result =', result + # + if saveerr == rffi.RFFI_SAVE_LASTERROR: + assert result == 42 # from the C code + else: + assert result == 24 # not touched + assert original_result == 3456789 def test_call_release_gil_readsaved_lasterror(self): - XXX + from rpython.translator.tool.cbuild import ExternalCompilationInfo + from rpython.rlib.libffi import types + from rpython.jit.backend.llsupport import llerrno + # + if not isinstance(self.cpu, AbstractLLCPU): + py.test.skip("not on LLGraph") + if sys.platform != 'win32': + py.test.skip("Windows test only") + eci = ExternalCompilationInfo( + separate_module_sources=[r''' + #include + #include + RPY_EXPORTED + long __stdcall test_call_release_gil_readsaved_lasterror( + long a, long b, long c, long d, long e, long f, long g) { + long r = GetLastError(); + printf("GetLastError() result: %ld\n", r); + r += 100 * (a + 10*b + 100*c + 1000*d + + 10000*e + 100000*f + 1000000*g); + return r; + } + ''']) + fn_name = 'test_call_release_gil_readsaved_lasterror' + func1_ptr = rffi.llexternal(fn_name, [lltype.Signed]*7, lltype.Signed, + compilation_info=eci, _nowrapper=True) + func1_adr = rffi.cast(lltype.Signed, func1_ptr) + calldescr = self.cpu._calldescr_dynamic_for_tests([types.slong]*7, + types.slong) + # + for saveerr in [rffi.RFFI_READSAVED_LASTERROR]: + faildescr = BasicFailDescr(1) + inputargs = [BoxInt() for i in range(7)] + i1 = BoxInt() + ops = [ + ResOperation(rop.CALL_RELEASE_GIL, + [ConstInt(saveerr), ConstInt(func1_adr)] + + inputargs, i1, + descr=calldescr), + ResOperation(rop.GUARD_NOT_FORCED, [], None, descr=faildescr), + ResOperation(rop.FINISH, [i1], None, descr=BasicFinalDescr(0)) + ] + ops[-2].setfailargs([]) + looptoken = JitCellToken() + self.cpu.compile_loop(inputargs, ops, looptoken) + # + llerrno.set_debug_saved_lasterror(self.cpu, 24) + deadframe = self.cpu.execute_token(looptoken, 9, 8, 7, 6, 5, 4, 3) + result = self.cpu.get_int_value(deadframe, 0) + assert llerrno.get_debug_saved_lasterror(self.cpu) == 24 + # + assert result == 24 + 345678900 def test_guard_not_invalidated(self): cpu = self.cpu diff --git a/rpython/rlib/rposix.py b/rpython/rlib/rposix.py --- a/rpython/rlib/rposix.py +++ b/rpython/rlib/rposix.py @@ -7,6 +7,8 @@ from rpython.rlib import jit from rpython.translator.platform import platform +WIN32 = os.name == "nt" + class CConstantErrno(CConstant): # these accessors are used when calling get_errno() or set_errno() @@ -109,6 +111,9 @@ @specialize.call_location() def _errno_before(save_err): + if WIN32 and (save_err & rffi.RFFI_READSAVED_LASTERROR): + from rpython.rlib import rthread, rwin32 + rwin32._SetLastError(rthread.tlfield_rpy_lasterror.getraw()) if save_err & rffi.RFFI_READSAVED_ERRNO: from rpython.rlib import rthread _set_errno(rthread.tlfield_rpy_errno.getraw()) @@ -120,6 +125,9 @@ if save_err & rffi.RFFI_SAVE_ERRNO: from rpython.rlib import rthread rthread.tlfield_rpy_errno.setraw(_get_errno()) + if WIN32 and (save_err & rffi.RFFI_SAVE_LASTERROR): + from rpython.rlib import rthread, rwin32 + rthread.tlfield_rpy_lasterror.setraw(rwin32._GetLastError()) if os.name == 'nt': diff --git a/rpython/rlib/rwin32.py b/rpython/rlib/rwin32.py --- a/rpython/rlib/rwin32.py +++ b/rpython/rlib/rwin32.py @@ -123,12 +123,6 @@ _SetLastError = winexternal('SetLastError', [DWORD], lltype.Void, _nowrapper=True, sandboxsafe=True) - def GetLastError_real(): - return rffi.cast(lltype.Signed, _GetLastError()) - - def SetLastError_real(err): - _SetLastError(rffi.cast(DWORD, err)) - def GetLastError_saved(): from rpython.rlib import rthread return rffi.cast(lltype.Signed, rthread.tlfield_rpy_lasterror.getraw()) diff --git a/rpython/rtyper/lltypesystem/rffi.py b/rpython/rtyper/lltypesystem/rffi.py --- a/rpython/rtyper/lltypesystem/rffi.py +++ b/rpython/rtyper/lltypesystem/rffi.py @@ -64,8 +64,8 @@ RFFI_ZERO_ERRNO_BEFORE = 4 # copy the value 0 into real errno before call RFFI_FULL_ERRNO = RFFI_SAVE_ERRNO | RFFI_READSAVED_ERRNO RFFI_FULL_ERRNO_ZERO = RFFI_SAVE_ERRNO | RFFI_ZERO_ERRNO_BEFORE -RFFI_SAVE_LASTERROR = 8 # XXX implement me! -RFFI_READSAVED_LASTERROR = 16 # XXX implement me! +RFFI_SAVE_LASTERROR = 8 # win32: save GetLastError() after the call +RFFI_READSAVED_LASTERROR = 16 # win32: call SetLastError() before the call RFFI_FULL_LASTERROR = RFFI_SAVE_LASTERROR | RFFI_READSAVED_LASTERROR RFFI_ERR_NONE = 0 RFFI_ERR_ALL = RFFI_FULL_ERRNO | RFFI_FULL_LASTERROR From noreply at buildbot.pypy.org Fri Jan 16 13:27:56 2015 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 16 Jan 2015 13:27:56 +0100 (CET) Subject: [pypy-commit] pypy default: fix test after b9d53b23c50b Message-ID: <20150116122756.870B91C02BE@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: Changeset: r75383:dcbbd83e551c Date: 2015-01-16 14:28 +0200 http://bitbucket.org/pypy/pypy/changeset/dcbbd83e551c/ Log: fix test after b9d53b23c50b diff --git a/pypy/interpreter/test/test_targetpypy.py b/pypy/interpreter/test/test_targetpypy.py --- a/pypy/interpreter/test/test_targetpypy.py +++ b/pypy/interpreter/test/test_targetpypy.py @@ -27,6 +27,6 @@ pypy_setup_home = d['pypy_setup_home'] lls = rffi.str2charp(__file__) res = pypy_setup_home(lls, rffi.cast(rffi.INT, 1)) - assert lltype.typeOf(res) == rffi.LONG + assert lltype.typeOf(res) == rffi.INT assert rffi.cast(lltype.Signed, res) == 0 lltype.free(lls, flavor='raw') From noreply at buildbot.pypy.org Fri Jan 16 14:23:06 2015 From: noreply at buildbot.pypy.org (fijal) Date: Fri, 16 Jan 2015 14:23:06 +0100 (CET) Subject: [pypy-commit] pypy vmprof: haha Message-ID: <20150116132306.92C1C1C027F@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: vmprof Changeset: r75384:aaa75f8a1b3f Date: 2015-01-16 15:22 +0200 http://bitbucket.org/pypy/pypy/changeset/aaa75f8a1b3f/ Log: haha diff --git a/pypy/module/_vmprof/src/get_custom_offset.c b/pypy/module/_vmprof/src/get_custom_offset.c --- a/pypy/module/_vmprof/src/get_custom_offset.c +++ b/pypy/module/_vmprof/src/get_custom_offset.c @@ -6,7 +6,7 @@ static ptrdiff_t vmprof_unw_get_custom_offset(void* ip) { long ip_l = (long)ip; - if (ip < pypy_jit_start_addr() or ip > pypy_jit_end_addr()) { + if (ip < pypy_jit_start_addr() || ip > pypy_jit_end_addr()) { return -1; } return pypy_jit_stack_depth_at_loc(ip); From noreply at buildbot.pypy.org Fri Jan 16 14:32:44 2015 From: noreply at buildbot.pypy.org (fijal) Date: Fri, 16 Jan 2015 14:32:44 +0100 (CET) Subject: [pypy-commit] pypy vmprof: an attempt to build the secondary entry points from the JIT in a more official manner Message-ID: <20150116133244.0F1F01C030A@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: vmprof Changeset: r75385:7b8f2499dabe Date: 2015-01-16 15:32 +0200 http://bitbucket.org/pypy/pypy/changeset/7b8f2499dabe/ Log: an attempt to build the secondary entry points from the JIT in a more official manner diff --git a/rpython/jit/metainterp/warmspot.py b/rpython/jit/metainterp/warmspot.py --- a/rpython/jit/metainterp/warmspot.py +++ b/rpython/jit/metainterp/warmspot.py @@ -25,7 +25,8 @@ from rpython.jit.codewriter.policy import JitPolicy from rpython.jit.codewriter.effectinfo import EffectInfo from rpython.jit.metainterp.optimizeopt import ALL_OPTS_NAMES -from rpython.rlib.entrypoint import all_jit_entrypoints +from rpython.rlib.entrypoint import all_jit_entrypoints,\ + annotated_jit_entrypoints # ____________________________________________________________ @@ -681,6 +682,7 @@ def create_jit_entry_points(self): for func, args, result in all_jit_entrypoints: self.helper_func(lltype.Ptr(lltype.FuncType(args, result)), func) + annotated_jit_entrypoints.append((func, None)) def rewrite_access_helper(self, op): # make sure we make a copy of function so it no longer belongs diff --git a/rpython/rlib/entrypoint.py b/rpython/rlib/entrypoint.py --- a/rpython/rlib/entrypoint.py +++ b/rpython/rlib/entrypoint.py @@ -5,6 +5,7 @@ from rpython.rtyper.lltypesystem.lloperation import llop from rpython.rlib.objectmodel import we_are_translated +annotated_jit_entrypoints = [] def export_symbol(func): func.exported_symbol = True diff --git a/rpython/translator/driver.py b/rpython/translator/driver.py --- a/rpython/translator/driver.py +++ b/rpython/translator/driver.py @@ -10,7 +10,8 @@ from rpython.annotator import policy as annpolicy from rpython.tool.udir import udir from rpython.rlib.debug import debug_start, debug_print, debug_stop -from rpython.rlib.entrypoint import secondary_entrypoints +from rpython.rlib.entrypoint import secondary_entrypoints,\ + annotated_jit_entrypoints import py from rpython.tool.ansi_print import ansi_log @@ -416,10 +417,11 @@ from rpython.translator.c.genc import CStandaloneBuilder cbuilder = CStandaloneBuilder(self.translator, self.entry_point, config=self.config, - secondary_entrypoints=self.secondary_entrypoints) + secondary_entrypoints= + self.secondary_entrypoints + annotated_jit_entrypoints) else: from rpython.translator.c.dlltool import CLibraryBuilder - functions = [(self.entry_point, None)] + self.secondary_entrypoints + functions = [(self.entry_point, None)] + self.secondary_entrypoints + annotated_jit_entrypoints cbuilder = CLibraryBuilder(self.translator, self.entry_point, functions=functions, name='libtesting', From noreply at buildbot.pypy.org Fri Jan 16 15:31:25 2015 From: noreply at buildbot.pypy.org (rguillebert) Date: Fri, 16 Jan 2015 15:31:25 +0100 (CET) Subject: [pypy-commit] pypy object-dtype: Fix Object dtype str representation Message-ID: <20150116143125.A40EF1C0190@cobra.cs.uni-duesseldorf.de> Author: Romain Guillebert Branch: object-dtype Changeset: r75386:e7c050377daf Date: 2015-01-16 15:31 +0100 http://bitbucket.org/pypy/pypy/changeset/e7c050377daf/ Log: Fix Object dtype str representation diff --git a/pypy/module/micronumpy/descriptor.py b/pypy/module/micronumpy/descriptor.py --- a/pypy/module/micronumpy/descriptor.py +++ b/pypy/module/micronumpy/descriptor.py @@ -54,7 +54,7 @@ self.char = char self.w_box_type = w_box_type if byteorder is None: - if itemtype.get_element_size() == 1: + if itemtype.get_element_size() == 1 or isinstance(itemtype, types.ObjectType): byteorder = NPY.IGNORE else: byteorder = NPY.NATIVE From noreply at buildbot.pypy.org Fri Jan 16 15:56:16 2015 From: noreply at buildbot.pypy.org (rguillebert) Date: Fri, 16 Jan 2015 15:56:16 +0100 (CET) Subject: [pypy-commit] pypy object-dtype: Pass space around Message-ID: <20150116145616.970591C027F@cobra.cs.uni-duesseldorf.de> Author: Romain Guillebert Branch: object-dtype Changeset: r75387:3126fa1b95b9 Date: 2015-01-16 15:54 +0100 http://bitbucket.org/pypy/pypy/changeset/3126fa1b95b9/ Log: Pass space around diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py --- a/pypy/module/micronumpy/ndarray.py +++ b/pypy/module/micronumpy/ndarray.py @@ -254,16 +254,16 @@ def descr_repr(self, space): cache = get_appbridge_cache(space) if cache.w_array_repr is None: - return space.wrap(self.dump_data()) + return space.wrap(self.dump_data(space)) return space.call_function(cache.w_array_repr, self) def descr_str(self, space): cache = get_appbridge_cache(space) if cache.w_array_str is None: - return space.wrap(self.dump_data(prefix='', separator='', suffix='')) + return space.wrap(self.dump_data(space, prefix='', separator='', suffix='')) return space.call_function(cache.w_array_str, self) - def dump_data(self, prefix='array(', separator=',', suffix=')'): + def dump_data(self, space, prefix='array(', separator=',', suffix=')'): i, state = self.create_iter() first = True dtype = self.get_dtype() @@ -280,7 +280,7 @@ if self.is_scalar() and dtype.is_str(): s.append(dtype.itemtype.to_str(i.getitem(state))) else: - s.append(dtype.itemtype.str_format(i.getitem(state))) + s.append(dtype.itemtype.str_format(space, i.getitem(state))) state = i.next(state) if not self.is_scalar(): s.append(']') @@ -1189,7 +1189,7 @@ "improper dtype '%R'", dtype) self.implementation = W_NDimArray.from_shape_and_storage( space, [space.int_w(i) for i in space.listview(shape)], - rffi.str2charp(space.str_w(storage), track_allocation=False), + rffi.str2charp(space.str_w(storage), track_allocation=False), dtype, storage_bytes=space.len_w(storage), owning=True).implementation def descr___array_finalize__(self, space, w_obj): diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -1871,7 +1871,7 @@ def str_format(self, space, box): assert isinstance(box, boxes.W_VoidBox) arr = self.readarray(box.arr, box.ofs, 0, box.dtype) - return arr.dump_data(prefix='', suffix='') + return arr.dump_data(space, prefix='', suffix='') def to_builtin_type(self, space, item): ''' From the documentation of ndarray.item(): @@ -1980,7 +1980,7 @@ else: pieces.append(", ") val = tp.read(box.arr, box.ofs, ofs, subdtype) - pieces.append(tp.str_format(val)) + pieces.append(tp.str_format(space, val)) pieces.append(")") return "".join(pieces) From noreply at buildbot.pypy.org Fri Jan 16 16:29:01 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 16 Jan 2015 16:29:01 +0100 (CET) Subject: [pypy-commit] pypy all_ordered_dicts: fix Message-ID: <20150116152901.C21EF1C0305@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: all_ordered_dicts Changeset: r75388:6f3785abbca0 Date: 2015-01-16 15:41 +0100 http://bitbucket.org/pypy/pypy/changeset/6f3785abbca0/ Log: fix diff --git a/rpython/memory/gctypelayout.py b/rpython/memory/gctypelayout.py --- a/rpython/memory/gctypelayout.py +++ b/rpython/memory/gctypelayout.py @@ -406,6 +406,11 @@ return self.iseen_roots[value] = True + if isinstance(TYPE, lltype.GcOpaqueType): + self.consider_constant(lltype.typeOf(value.container), + value.container, gc) + return + if isinstance(TYPE, (lltype.GcStruct, lltype.GcArray)): typeid = self.get_type_id(TYPE) hdr = gc.gcheaderbuilder.new_header(value) From noreply at buildbot.pypy.org Fri Jan 16 16:29:03 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 16 Jan 2015 16:29:03 +0100 (CET) Subject: [pypy-commit] pypy all_ordered_dicts: Add a custom ll operation to read the length of any ("simple enough") GC array. Message-ID: <20150116152903.393431C0305@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: all_ordered_dicts Changeset: r75389:95b1d6cff0ee Date: 2015-01-16 16:28 +0100 http://bitbucket.org/pypy/pypy/changeset/95b1d6cff0ee/ Log: Add a custom ll operation to read the length of any ("simple enough") GC array. diff --git a/rpython/rtyper/llinterp.py b/rpython/rtyper/llinterp.py --- a/rpython/rtyper/llinterp.py +++ b/rpython/rtyper/llinterp.py @@ -740,6 +740,10 @@ return lltype.cast_opaque_ptr(RESTYPE, obj) op_cast_opaque_ptr.need_result_type = True + def op_length_of_simple_gcarray_from_opaque(self, obj): + checkptr(obj) + return lltype.length_of_simple_gcarray_from_opaque(obj) + def op_cast_ptr_to_adr(self, ptr): checkptr(ptr) return llmemory.cast_ptr_to_adr(ptr) diff --git a/rpython/rtyper/lltypesystem/lloperation.py b/rpython/rtyper/lltypesystem/lloperation.py --- a/rpython/rtyper/lltypesystem/lloperation.py +++ b/rpython/rtyper/lltypesystem/lloperation.py @@ -396,6 +396,7 @@ 'direct_arrayitems': LLOp(canfold=True), 'direct_ptradd': LLOp(canfold=True), 'cast_opaque_ptr': LLOp(sideeffects=False), + 'length_of_simple_gcarray_from_opaque': LLOp(sideeffects=False), # __________ address operations __________ diff --git a/rpython/rtyper/lltypesystem/lltype.py b/rpython/rtyper/lltypesystem/lltype.py --- a/rpython/rtyper/lltypesystem/lltype.py +++ b/rpython/rtyper/lltypesystem/lltype.py @@ -1025,6 +1025,21 @@ return SomePtr(ll_ptrtype=typeOf(cast_p)) +def length_of_simple_gcarray_from_opaque(opaque_ptr): + CURTYPE = typeOf(opaque_ptr) + if not isinstance(CURTYPE, Ptr): + raise TypeError("can only cast pointers to other pointers") + if not isinstance(CURTYPE.TO, GcOpaqueType): + raise TypeError("expected a GcOpaqueType") + return opaque_ptr._obj.container.getlength() + + at analyzer_for(length_of_simple_gcarray_from_opaque) +def ann_length_of_simple_gcarray_from_opaque(s_p): + assert isinstance(s_p, SomePtr), "casting of non-pointer: %r" % s_p + assert isinstance(s_p.ll_ptrtype.TO, GcOpaqueType) + return SomeInteger(nonneg=True) + + def direct_fieldptr(structptr, fieldname): """Get a pointer to a field in the struct. The resulting pointer is actually of type Ptr(FixedSizeArray(FIELD, 1)). diff --git a/rpython/rtyper/lltypesystem/rordereddict.py b/rpython/rtyper/lltypesystem/rordereddict.py --- a/rpython/rtyper/lltypesystem/rordereddict.py +++ b/rpython/rtyper/lltypesystem/rordereddict.py @@ -604,7 +604,7 @@ # xxx Haaaack: returns len(d.indexes). Works independently of # the exact type pointed to by d, using a forced cast... # Must only be called by @jit.dont_look_inside functions. - return len(rffi.cast(DICTINDEX_BYTE, d.indexes)) + return lltype.length_of_simple_gcarray_from_opaque(d.indexes) def _overallocate_entries_len(baselen): # This over-allocates proportional to the list size, making room diff --git a/rpython/rtyper/rbuiltin.py b/rpython/rtyper/rbuiltin.py --- a/rpython/rtyper/rbuiltin.py +++ b/rpython/rtyper/rbuiltin.py @@ -445,6 +445,14 @@ return hop.genop('cast_opaque_ptr', [v_input], # v_type implicit in r_result resulttype = hop.r_result.lowleveltype) + at typer_for(lltype.length_of_simple_gcarray_from_opaque) +def rtype_length_of_simple_gcarray_from_opaque(hop): + assert isinstance(hop.args_r[0], rptr.PtrRepr) + v_opaque_ptr, = hop.inputargs(hop.args_r[0]) + hop.exception_cannot_occur() + return hop.genop('length_of_simple_gcarray_from_opaque', [v_opaque_ptr], + resulttype = hop.r_result.lowleveltype) + @typer_for(lltype.direct_fieldptr) def rtype_direct_fieldptr(hop): assert isinstance(hop.args_r[0], rptr.PtrRepr) diff --git a/rpython/translator/c/funcgen.py b/rpython/translator/c/funcgen.py --- a/rpython/translator/c/funcgen.py +++ b/rpython/translator/c/funcgen.py @@ -653,6 +653,11 @@ OP_CAST_ADR_TO_PTR = OP_CAST_POINTER OP_CAST_OPAQUE_PTR = OP_CAST_POINTER + def OP_LENGTH_OF_SIMPLE_GCARRAY_FROM_OPAQUE(self, op): + return ('%s = *(long *)(((char *)%s) + sizeof(struct pypy_header0));' + ' /* length_of_simple_gcarray_from_opaque */' + % (self.expr(op.result), self.expr(op.args[0]))) + def OP_CAST_INT_TO_PTR(self, op): TYPE = self.lltypemap(op.result) typename = self.db.gettype(TYPE) diff --git a/rpython/translator/c/test/test_lladdresses.py b/rpython/translator/c/test/test_lladdresses.py --- a/rpython/translator/c/test/test_lladdresses.py +++ b/rpython/translator/c/test/test_lladdresses.py @@ -246,3 +246,13 @@ assert res == 456 res = fc(77) assert res == 123 + +def test_gcarray_length(): + A = lltype.GcArray(lltype.Char) + def f(): + a = lltype.malloc(A, 117) + p = lltype.cast_opaque_ptr(GCREF, a) + return lltype.length_of_simple_gcarray_from_opaque(p) + fc = compile(f, []) + res = fc() + assert res == 117 From noreply at buildbot.pypy.org Fri Jan 16 16:32:45 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 16 Jan 2015 16:32:45 +0100 (CET) Subject: [pypy-commit] pypy all_ordered_dicts: This test happens to not pass any more with refcounting Message-ID: <20150116153245.681651C0241@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: all_ordered_dicts Changeset: r75390:6cdda2220edc Date: 2015-01-16 16:32 +0100 http://bitbucket.org/pypy/pypy/changeset/6cdda2220edc/ Log: This test happens to not pass any more with refcounting diff --git a/pypy/module/_multibytecodec/test/test_translation.py b/pypy/module/_multibytecodec/test/test_translation.py --- a/pypy/module/_multibytecodec/test/test_translation.py +++ b/pypy/module/_multibytecodec/test/test_translation.py @@ -1,8 +1,11 @@ from pypy.module._multibytecodec import c_codecs from rpython.translator.c.test import test_standalone +from rpython.config.translationoption import get_combined_translation_config class TestTranslation(test_standalone.StandaloneTests): + config = get_combined_translation_config(translating=True) + config.translation.gc = 'boehm' def test_translation(self): # From noreply at buildbot.pypy.org Fri Jan 16 16:36:17 2015 From: noreply at buildbot.pypy.org (Raemi) Date: Fri, 16 Jan 2015 16:36:17 +0100 (CET) Subject: [pypy-commit] stmgc c8-private-pages: make old allocations happen in the sharing segment, fix not tracing them and add a test Message-ID: <20150116153617.1E4991C0241@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c8-private-pages Changeset: r1534:8c3e4f9f95a9 Date: 2015-01-16 11:42 +0100 http://bitbucket.org/pypy/stmgc/changeset/8c3e4f9f95a9/ Log: make old allocations happen in the sharing segment, fix not tracing them and add a test diff --git a/c8/stm/core.c b/c8/stm/core.c --- a/c8/stm/core.c +++ b/c8/stm/core.c @@ -151,8 +151,9 @@ if (copy_from_segnum == -1) { /* this page is only accessible in the sharing segment so far (new - allocation). We can thus simply mark it accessible here and - not care about its contents so far. */ + allocation). We can thus simply mark it accessible here. */ + pagecopy(get_virtual_page(my_segnum, pagenum), + get_virtual_page(0, pagenum)); release_all_privatization_locks(); return; } @@ -287,9 +288,6 @@ break; if (first_cl->next == INEV_RUNNING) { -#if STM_TESTS - stm_abort_transaction(); -#endif /* need to reach safe point if an INEV transaction is waiting for us, otherwise deadlock */ break; @@ -473,6 +471,15 @@ { if (!_stm_validate()) stm_abort_transaction(); + +#if STM_TESTS + if (STM_PSEGMENT->transaction_state != TS_INEVITABLE + && STM_PSEGMENT->last_commit_log_entry->next == INEV_RUNNING) { + /* abort for tests... */ + stm_abort_transaction(); + } +#endif + } @@ -1043,6 +1050,7 @@ if (i == 0 || (get_page_status_in(i, page) != PAGE_NO_ACCESS)) { /* shared or private, but never segfault */ char *dst = REAL_ADDRESS(get_segment_base(i), frag); + dprintf(("-> flush %p to seg %lu\n", frag, i)); memcpy(dst, src, frag_size); } } diff --git a/c8/stm/gcpage.c b/c8/stm/gcpage.c --- a/c8/stm/gcpage.c +++ b/c8/stm/gcpage.c @@ -58,8 +58,13 @@ if (addr == NULL) stm_fatalerror("not enough memory!"); - if (LIKELY(addr + size <= uninitialized_page_start)) + if (LIKELY(addr + size <= uninitialized_page_start)) { + dprintf(("allocate_outside_nursery_large(%lu): %p, page=%lu\n", + size, (char*)(addr - stm_object_pages), + (uintptr_t)(addr - stm_object_pages) / 4096UL)); + return (stm_char*)(addr - stm_object_pages); + } /* uncommon case: need to initialize some more pages */ @@ -95,9 +100,15 @@ stm_char *p = allocate_outside_nursery_large(size_rounded_up); object_t *o = (object_t *)p; - memset(get_virtual_address(STM_SEGMENT->segment_num, o), 0, size_rounded_up); + // sharing seg0 needs to be current: + assert(STM_SEGMENT->segment_num == 0); + memset(REAL_ADDRESS(STM_SEGMENT->segment_base, o), 0, size_rounded_up); o->stm_flags = GCFLAG_WRITE_BARRIER; + if (testing_prebuilt_objs == NULL) + testing_prebuilt_objs = list_create(); + LIST_APPEND(testing_prebuilt_objs, o); + dprintf(("allocate_old(%lu): %p, seg=%d, page=%lu\n", size_rounded_up, p, get_segment_of_linear_address(stm_object_pages + (uintptr_t)p), @@ -374,7 +385,7 @@ { /* this is called by _stm_largemalloc_sweep() */ object_t *obj = (object_t *)(data - stm_object_pages); - //dprintf(("keep obj %p ? -> %d\n", obj, mark_visited_test(obj))); + dprintf(("keep obj %p ? -> %d\n", obj, mark_visited_test(obj))); if (!mark_visited_test_and_clear(obj)) { /* This is actually needed in order to avoid random write-read conflicts with objects read and freed long in the past. diff --git a/c8/stm/nursery.c b/c8/stm/nursery.c --- a/c8/stm/nursery.c +++ b/c8/stm/nursery.c @@ -78,14 +78,16 @@ *pobj = pforwarded_array[1]; /* already moved */ return; } - else { - /* really has a shadow */ - nobj = find_existing_shadow(obj); - obj->stm_flags &= ~GCFLAG_HAS_SHADOW; - realobj = REAL_ADDRESS(STM_SEGMENT->segment_base, obj); - size = stmcb_size_rounded_up((struct object_s *)realobj); - goto copy_large_object; - } + + /* really has a shadow */ + nobj = find_existing_shadow(obj); + obj->stm_flags &= ~GCFLAG_HAS_SHADOW; + realobj = REAL_ADDRESS(STM_SEGMENT->segment_base, obj); + size = stmcb_size_rounded_up((struct object_s *)realobj); + + dprintf(("has_shadow(%p): %p, sz:%lu\n", + obj, nobj, size)); + goto copy_large_object; } realobj = REAL_ADDRESS(STM_SEGMENT->segment_base, obj); @@ -454,6 +456,8 @@ tree_insert(STM_PSEGMENT->nursery_objects_shadows, (uintptr_t)obj, (uintptr_t)nobj); + + dprintf(("allocate_shadow(%p): %p\n", obj, nobj)); return nobj; } diff --git a/c8/stm/pages.c b/c8/stm/pages.c --- a/c8/stm/pages.c +++ b/c8/stm/pages.c @@ -63,7 +63,7 @@ static void page_mark_accessible(long segnum, uintptr_t pagenum) { - assert(get_page_status_in(segnum, pagenum) == PAGE_NO_ACCESS); + assert(segnum==0 || get_page_status_in(segnum, pagenum) == PAGE_NO_ACCESS); dprintf(("page_mark_accessible(%lu) in seg:%ld\n", pagenum, segnum)); dprintf(("RW(seg%ld, page%lu)\n", segnum, pagenum)); @@ -82,7 +82,7 @@ __attribute__((unused)) static void page_mark_inaccessible(long segnum, uintptr_t pagenum) { - assert(get_page_status_in(segnum, pagenum) == PAGE_ACCESSIBLE); + assert(segnum==0 || get_page_status_in(segnum, pagenum) == PAGE_ACCESSIBLE); dprintf(("page_mark_inaccessible(%lu) in seg:%ld\n", pagenum, segnum)); set_page_status_in(segnum, pagenum, PAGE_NO_ACCESS); diff --git a/c8/stm/pages.h b/c8/stm/pages.h --- a/c8/stm/pages.h +++ b/c8/stm/pages.h @@ -63,7 +63,7 @@ static inline bool get_page_status_in(long segnum, uintptr_t pagenum) { /* reading page status requires "read"-lock: */ - assert(STM_PSEGMENT->privatization_lock); + assert(STM_SEGMENT->segment_num==0 || STM_PSEGMENT->privatization_lock); OPT_ASSERT(segnum < 8 * sizeof(struct page_shared_s)); volatile struct page_shared_s *ps = (volatile struct page_shared_s *) diff --git a/c8/stm/smallmalloc.c b/c8/stm/smallmalloc.c --- a/c8/stm/smallmalloc.c +++ b/c8/stm/smallmalloc.c @@ -169,6 +169,8 @@ (_allocate_small_slowpath(size) - stm_object_pages); *fl = result->next; + dprintf(("allocate_outside_nursery_small(%lu): %p\n", + size, (char*)((char *)result - stm_object_pages))); return (stm_char*) ((char *)result - stm_object_pages); } @@ -178,7 +180,9 @@ stm_char *p = allocate_outside_nursery_small(size_rounded_up); object_t *o = (object_t *)p; - memset(get_virtual_address(STM_SEGMENT->segment_num, o), 0, size_rounded_up); + // sharing seg0 needs to be current: + assert(STM_SEGMENT->segment_num == 0); + memset(REAL_ADDRESS(STM_SEGMENT->segment_base, o), 0, size_rounded_up); o->stm_flags = GCFLAG_WRITE_BARRIER; dprintf(("allocate_old_small(%lu): %p, seg=%d, page=%lu\n", diff --git a/c8/test/support.py b/c8/test/support.py --- a/c8/test/support.py +++ b/c8/test/support.py @@ -497,8 +497,8 @@ self.tls = [_allocate_thread_local() for i in range(self.NB_THREADS)] self.current_thread = 0 # force-switch back to segment 0 so that when we do something - # outside of transactions before the test, it happens in seg0 - self.switch_to_segment(0) + # outside of transactions before the test, it happens in sharing seg0 + lib._stm_test_switch_segment(-1) def teardown_method(self, meth): lib.stmcb_expand_marker = ffi.NULL diff --git a/c8/test/test_gcpage.py b/c8/test/test_gcpage.py --- a/c8/test/test_gcpage.py +++ b/c8/test/test_gcpage.py @@ -281,3 +281,11 @@ p1 = self.pop_root() assert stm_get_char(p1) == 'o' assert stm_get_char(p2) == 't' + + def test_keepalive_prebuilt(self): + stm_allocate_old(64) + self.start_transaction() + assert lib._stm_total_allocated() == 64 + LMO # large malloc'd + stm_major_collect() + assert lib._stm_total_allocated() == 64 + LMO # large malloc'd + self.commit_transaction() diff --git a/c8/test/test_random.py b/c8/test/test_random.py --- a/c8/test/test_random.py +++ b/c8/test/test_random.py @@ -11,7 +11,7 @@ self.executed = [] def do(self, cmd): - color = ">> \033[%dm" % (31 + (self.thread_num + 5) % 6) + color = ">> \033[%dm" % (31 + (self.thread_num + 6) % 6) print >> sys.stderr, color + cmd + "\033[0m" self.executed.append(cmd) exec cmd in globals(), self.content @@ -579,7 +579,7 @@ op_assert_size, op_assert_modified, op_minor_collect, - #op_major_collect, + op_major_collect, ] for _ in range(2000): # make sure we are in a transaction: From noreply at buildbot.pypy.org Fri Jan 16 16:36:19 2015 From: noreply at buildbot.pypy.org (Raemi) Date: Fri, 16 Jan 2015 16:36:19 +0100 (CET) Subject: [pypy-commit] stmgc c8-private-pages: progress Message-ID: <20150116153619.AF6761C0241@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c8-private-pages Changeset: r1535:50c110de7f3a Date: 2015-01-16 13:53 +0100 http://bitbucket.org/pypy/stmgc/changeset/50c110de7f3a/ Log: progress diff --git a/c8/stm/core.c b/c8/stm/core.c --- a/c8/stm/core.c +++ b/c8/stm/core.c @@ -298,9 +298,6 @@ cl = first_cl; while ((next_cl = cl->next) != NULL) { if (next_cl == INEV_RUNNING) { -#if STM_TESTS - stm_abort_transaction(); -#endif /* only validate entries up to INEV */ break; } @@ -337,6 +334,8 @@ */ reset_modified_from_backup_copies(my_segnum); needs_abort = true; + + dprintf(("_stm_validate() failed for obj %p\n", undo->object)); break; } } @@ -419,6 +418,14 @@ stm_abort_transaction(); } +#if STM_TESTS + if (STM_PSEGMENT->transaction_state != TS_INEVITABLE + && STM_PSEGMENT->last_commit_log_entry->next == INEV_RUNNING) { + /* abort for tests... */ + stm_abort_transaction(); + } +#endif + /* try to attach to commit log: */ old = STM_PSEGMENT->last_commit_log_entry; if (old->next == NULL) { @@ -479,7 +486,6 @@ stm_abort_transaction(); } #endif - } diff --git a/c8/stm/core.h b/c8/stm/core.h --- a/c8/stm/core.h +++ b/c8/stm/core.h @@ -216,7 +216,7 @@ { #ifndef NDEBUG long l; - for (l = 1; l < NB_SEGMENTS; l++) { + for (l = 0; l < NB_SEGMENTS; l++) { if (!get_priv_segment(l)->privatization_lock) return false; } @@ -228,8 +228,9 @@ static inline void acquire_all_privatization_locks() { + /* XXX: don't do for the sharing seg0 */ long l; - for (l = 1; l < NB_SEGMENTS; l++) { + for (l = 0; l < NB_SEGMENTS; l++) { acquire_privatization_lock(l); } } @@ -237,7 +238,7 @@ static inline void release_all_privatization_locks() { long l; - for (l = NB_SEGMENTS-1; l >= 1; l--) { + for (l = NB_SEGMENTS-1; l >= 0; l--) { release_privatization_lock(l); } } @@ -269,7 +270,7 @@ /* acquire locks in global order */ int i; - for (i = 1; i < NB_SEGMENTS; i++) { + for (i = 0; i < NB_SEGMENTS; i++) { if ((seg_set & (1 << i)) == 0) continue; @@ -283,7 +284,7 @@ OPT_ASSERT(seg_set < (1 << NB_SEGMENTS)); int i; - for (i = 1; i < NB_SEGMENTS; i++) { + for (i = 0; i < NB_SEGMENTS; i++) { if ((seg_set & (1 << i)) == 0) continue; diff --git a/c8/stm/gcpage.c b/c8/stm/gcpage.c --- a/c8/stm/gcpage.c +++ b/c8/stm/gcpage.c @@ -359,7 +359,6 @@ */ lst = pseg->objects_pointing_to_nursery; if (!list_is_empty(lst)) { - abort(); // check that there is a test LIST_FOREACH_R(lst, object_t* /*item*/, ({ struct object_s *realobj = (struct object_s *) diff --git a/c8/stm/nursery.c b/c8/stm/nursery.c --- a/c8/stm/nursery.c +++ b/c8/stm/nursery.c @@ -391,7 +391,17 @@ for (i = 0; i < NB_SEGMENTS; i++) { set_gs_register(get_segment_base(i)); + assert(!must_abort()); if (!_stm_validate()) { + assert(i != 0); /* sharing seg0 should never need an abort */ + + if (STM_PSEGMENT->transaction_state == TS_NONE) { + /* we found a segment that has stale read-marker data and thus + is in conflict with committed objs. Since it is not running + currently, it's fine to ignore it. */ + continue; + } + /* tell it to abort when continuing */ STM_PSEGMENT->pub.nursery_end = NSE_SIGABORT; assert(must_abort()); diff --git a/c8/stm/pages.h b/c8/stm/pages.h --- a/c8/stm/pages.h +++ b/c8/stm/pages.h @@ -63,7 +63,7 @@ static inline bool get_page_status_in(long segnum, uintptr_t pagenum) { /* reading page status requires "read"-lock: */ - assert(STM_SEGMENT->segment_num==0 || STM_PSEGMENT->privatization_lock); + assert(STM_PSEGMENT->privatization_lock); OPT_ASSERT(segnum < 8 * sizeof(struct page_shared_s)); volatile struct page_shared_s *ps = (volatile struct page_shared_s *) diff --git a/c8/test/test_gcpage.py b/c8/test/test_gcpage.py --- a/c8/test/test_gcpage.py +++ b/c8/test/test_gcpage.py @@ -289,3 +289,23 @@ stm_major_collect() assert lib._stm_total_allocated() == 64 + LMO # large malloc'd self.commit_transaction() + + def test_bug(self): + lp_ref_4 = stm_allocate_old_refs(50) + # + self.start_transaction() + stm_set_ref(lp_ref_4, 0, ffi.NULL, False) + # + self.switch(1) + self.start_transaction() + self.become_inevitable() + # + py.test.raises(Conflict, self.switch, 0) + # + self.switch(1) + + stm_set_ref(lp_ref_4, 0, ffi.NULL, False) + + self.commit_transaction() + self.start_transaction() + stm_major_collect() From noreply at buildbot.pypy.org Fri Jan 16 16:36:20 2015 From: noreply at buildbot.pypy.org (Raemi) Date: Fri, 16 Jan 2015 16:36:20 +0100 (CET) Subject: [pypy-commit] stmgc c8-private-pages: (try to) not trace objs in segments they are not accessible in Message-ID: <20150116153620.DB4A01C0241@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c8-private-pages Changeset: r1536:90490c3e14aa Date: 2015-01-16 14:06 +0100 http://bitbucket.org/pypy/stmgc/changeset/90490c3e14aa/ Log: (try to) not trace objs in segments they are not accessible in diff --git a/c8/stm/gcpage.c b/c8/stm/gcpage.c --- a/c8/stm/gcpage.c +++ b/c8/stm/gcpage.c @@ -220,10 +220,15 @@ assert(list_is_empty(marked_objects_to_trace)); + /* trace into the object (the version from 'segment_base') */ + struct object_s *realobj = + (struct object_s *)REAL_ADDRESS(segment_base, obj); + stmcb_trace(realobj, &mark_record_trace); + + /* trace all references found in sharing seg0 (should always be + up-to-date and not cause segfaults) */ while (1) { - /* trace into the object (the version from 'segment_base') */ - struct object_s *realobj = - (struct object_s *)REAL_ADDRESS(segment_base, obj); + realobj = (struct object_s *)REAL_ADDRESS(stm_object_pages, obj); stmcb_trace(realobj, &mark_record_trace); if (list_is_empty(marked_objects_to_trace)) From noreply at buildbot.pypy.org Fri Jan 16 16:36:22 2015 From: noreply at buildbot.pypy.org (Raemi) Date: Fri, 16 Jan 2015 16:36:22 +0100 (CET) Subject: [pypy-commit] stmgc c8-private-pages: fix Message-ID: <20150116153622.5EC441C0241@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c8-private-pages Changeset: r1537:593ef95f4496 Date: 2015-01-16 14:24 +0100 http://bitbucket.org/pypy/stmgc/changeset/593ef95f4496/ Log: fix diff --git a/c8/stm/core.c b/c8/stm/core.c --- a/c8/stm/core.c +++ b/c8/stm/core.c @@ -848,6 +848,7 @@ #endif tl->last_abort__bytes_in_nursery = bytes_in_nursery; + list_clear(pseg->objects_pointing_to_nursery); #pragma pop_macro("STM_SEGMENT") #pragma pop_macro("STM_PSEGMENT") } diff --git a/c8/stm/nursery.c b/c8/stm/nursery.c --- a/c8/stm/nursery.c +++ b/c8/stm/nursery.c @@ -391,7 +391,6 @@ for (i = 0; i < NB_SEGMENTS; i++) { set_gs_register(get_segment_base(i)); - assert(!must_abort()); if (!_stm_validate()) { assert(i != 0); /* sharing seg0 should never need an abort */ diff --git a/c8/test/test_gcpage.py b/c8/test/test_gcpage.py --- a/c8/test/test_gcpage.py +++ b/c8/test/test_gcpage.py @@ -309,3 +309,22 @@ self.commit_transaction() self.start_transaction() stm_major_collect() + + def test_bug2(self): + lp_ref_4 = stm_allocate_old(16) + # + self.start_transaction() + stm_set_char(lp_ref_4, 'x') + # + self.switch(1) + self.start_transaction() + stm_set_char(lp_ref_4, 'y') + # + self.switch(0) + self.commit_transaction() + self.start_transaction() + stm_major_collect() + stm_major_collect() + stm_major_collect() + # + py.test.raises(Conflict, self.switch, 1) From noreply at buildbot.pypy.org Fri Jan 16 16:36:23 2015 From: noreply at buildbot.pypy.org (Raemi) Date: Fri, 16 Jan 2015 16:36:23 +0100 (CET) Subject: [pypy-commit] stmgc c8-private-pages: uncomment some important code for resizing largemalloc when grabbing new pages for smallmalloc... Message-ID: <20150116153623.7974C1C030A@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c8-private-pages Changeset: r1538:6872a592d2bd Date: 2015-01-16 15:31 +0100 http://bitbucket.org/pypy/stmgc/changeset/6872a592d2bd/ Log: uncomment some important code for resizing largemalloc when grabbing new pages for smallmalloc... diff --git a/c8/stm/gcpage.c b/c8/stm/gcpage.c --- a/c8/stm/gcpage.c +++ b/c8/stm/gcpage.c @@ -9,7 +9,7 @@ static void setup_gcpage(void) { char *base = stm_object_pages + END_NURSERY_PAGE * 4096UL; - uintptr_t length = (NB_PAGES - END_NURSERY_PAGE) * 4096UL; + uintptr_t length = NB_SHARED_PAGES * 4096UL; _stm_largemalloc_init_arena(base, length); uninitialized_page_start = stm_object_pages + END_NURSERY_PAGE * 4096UL; diff --git a/c8/stm/largemalloc.c b/c8/stm/largemalloc.c --- a/c8/stm/largemalloc.c +++ b/c8/stm/largemalloc.c @@ -32,7 +32,7 @@ typedef struct malloc_chunk { size_t prev_size; /* - if the previous chunk is free: size of its data - otherwise, if this chunk is free: 1 - - otherwise, 0. */ + - otherwise, 0. both chunks used */ size_t size; /* size of the data in this chunk */ dlist_t d; /* if free: a doubly-linked list 'largebins' */ @@ -393,6 +393,7 @@ /* unlink the following chunk */ unlink_chunk(mscan); + #ifndef NDEBUG mscan->prev_size = (size_t)-258; /* 0xfffffffffffffefe */ mscan->size = (size_t)-515; /* 0xfffffffffffffdfd */ diff --git a/c8/stm/smallmalloc.c b/c8/stm/smallmalloc.c --- a/c8/stm/smallmalloc.c +++ b/c8/stm/smallmalloc.c @@ -62,10 +62,8 @@ uninitialized_page_stop -= decrease_by; first_small_uniform_loc = uninitialized_page_stop - stm_object_pages; - /* XXX: */ - /* char *base = stm_object_pages + END_NURSERY_PAGE * 4096UL; */ - /* if (!_stm_largemalloc_resize_arena(uninitialized_page_stop - base)) */ - /* goto out_of_memory; */ + if (!_stm_largemalloc_resize_arena(uninitialized_page_stop - uninitialized_page_start)) + goto out_of_memory; /* make writable in sharing seg */ setup_N_pages(uninitialized_page_stop, GCPAGE_NUM_PAGES); diff --git a/c8/test/test_largemalloc.py b/c8/test/test_largemalloc.py --- a/c8/test/test_largemalloc.py +++ b/c8/test/test_largemalloc.py @@ -49,6 +49,25 @@ # lib._stm_large_dump() + def test_random_sweep(self): + @ffi.callback("bool(char *)") + def keep(data): + print "keep?", data, data not in to_free + return data not in to_free + lib._stm_largemalloc_keep = keep + + OBJS = 6 + FREE = 2 + random.seed(12) + for _ in range(100): + allocd = {lib._stm_large_malloc(64) for _ in range(OBJS)} + while allocd: + to_free = set(random.sample(allocd, min(FREE, len(allocd)))) + print "allocd", allocd, "free", to_free + lib._stm_largemalloc_sweep() + allocd -= to_free + + def test_overflow_1(self): d = lib._stm_large_malloc(self.size - 32) assert ra(d) == self.rawmem + 16 From noreply at buildbot.pypy.org Fri Jan 16 16:57:00 2015 From: noreply at buildbot.pypy.org (Raemi) Date: Fri, 16 Jan 2015 16:57:00 +0100 (CET) Subject: [pypy-commit] stmgc c8-private-pages: fix for old-smallmalloced objs Message-ID: <20150116155700.3710A1C0305@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c8-private-pages Changeset: r1539:3ae30d429859 Date: 2015-01-16 16:54 +0100 http://bitbucket.org/pypy/stmgc/changeset/3ae30d429859/ Log: fix for old-smallmalloced objs diff --git a/c8/stm/smallmalloc.c b/c8/stm/smallmalloc.c --- a/c8/stm/smallmalloc.c +++ b/c8/stm/smallmalloc.c @@ -183,7 +183,7 @@ memset(REAL_ADDRESS(STM_SEGMENT->segment_base, o), 0, size_rounded_up); o->stm_flags = GCFLAG_WRITE_BARRIER; - dprintf(("allocate_old_small(%lu): %p, seg=%d, page=%lu\n", + dprintf(("_stm_allocate_old_small(%lu): %p, seg=%d, page=%lu\n", size_rounded_up, p, get_segment_of_linear_address(stm_object_pages + (uintptr_t)p), (uintptr_t)p / 4096UL)); @@ -201,7 +201,7 @@ return _stm_smallmalloc_keep((char*)(p - stm_object_pages)); } #endif - abort(); + return true; //return smallmalloc_keep_object_at(p); } @@ -248,7 +248,6 @@ } else if (!_smallmalloc_sweep_keep(p)) { /* the location should be freed now */ - //dprintf(("free small obj %p\n", (object_t*)(p - stm_object_pages))); #ifdef STM_TESTS /* fill location with 0xdd in all segs except seg0 */ int j; @@ -258,6 +257,7 @@ if (get_page_status_in(j, page) == PAGE_ACCESSIBLE) memset(get_virtual_address(j, obj), 0xdd, szword*8); #endif + //dprintf(("free small %p : %lu\n", (char*)(p - stm_object_pages), szword*8)); if (flprev == NULL) { flprev = (struct small_free_loc_s *)p; @@ -273,6 +273,7 @@ any_object_dying = true; } else { + //dprintf(("keep small %p : %lu\n", (char*)(p - stm_object_pages), szword*8)); any_object_remaining = true; } } @@ -308,7 +309,8 @@ small_page_lists[szword] = NULL; /* process the pages that the various segments are busy filling */ - for (i = 1; i < NB_SEGMENTS; i++) { + /* including sharing seg0 for old-malloced things */ + for (i = 0; i < NB_SEGMENTS; i++) { struct stm_priv_segment_info_s *pseg = get_priv_segment(i); struct small_free_loc_s **fl = &pseg->small_malloc_data.loc_free[szword]; diff --git a/c8/test/test_smallmalloc.py b/c8/test/test_smallmalloc.py --- a/c8/test/test_smallmalloc.py +++ b/c8/test/test_smallmalloc.py @@ -51,7 +51,8 @@ # allocate a page's worth of objs page0 = [stm_allocate_old_small(16) for i in range(0, 4096, 16)] assert len(set(map(pageof, page0))) == 1, "all in the same page" - tid = lib._get_type_id(page0[0]) # 58 + tid = lib._get_type_id(page0[0]) + assert tid == 58, "current way to do it" # repeatedly free a subset until no objs are left in that page while len(page0) > 0: @@ -59,6 +60,8 @@ self.keep_me = set(random.sample(page0, len(page0) // 2)) self.has_been_asked_for = [] lib._stm_smallmalloc_sweep() + + print len(page0), len(self.has_been_asked_for) assert sorted(page0) == self.has_been_asked_for, "all objs were observed" # get list of objs that were not freed From noreply at buildbot.pypy.org Fri Jan 16 16:57:01 2015 From: noreply at buildbot.pypy.org (Raemi) Date: Fri, 16 Jan 2015 16:57:01 +0100 (CET) Subject: [pypy-commit] stmgc c8-private-pages: first failing test for smallmalloc during major gc Message-ID: <20150116155701.6C6BE1C0305@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c8-private-pages Changeset: r1540:0ad00b13789a Date: 2015-01-16 16:57 +0100 http://bitbucket.org/pypy/stmgc/changeset/0ad00b13789a/ Log: first failing test for smallmalloc during major gc diff --git a/c8/test/test_gcpage.py b/c8/test/test_gcpage.py --- a/c8/test/test_gcpage.py +++ b/c8/test/test_gcpage.py @@ -328,3 +328,18 @@ stm_major_collect() # py.test.raises(Conflict, self.switch, 1) + + def test_small_major_collection(self): + self.start_transaction() + new = stm_allocate(16) + self.push_root(new) + stm_minor_collect() + assert lib._stm_total_allocated() == 16 + + new = self.pop_root() + assert not is_in_nursery(new) + stm_minor_collect() + assert lib._stm_total_allocated() == 16 + + stm_major_collect() + assert lib._stm_total_allocated() == 0 From noreply at buildbot.pypy.org Fri Jan 16 17:00:34 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 16 Jan 2015 17:00:34 +0100 (CET) Subject: [pypy-commit] pypy all_ordered_dicts: Fix this test (unsure if it's worth it but well) Message-ID: <20150116160034.E25021C030B@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: all_ordered_dicts Changeset: r75391:dfe68418c314 Date: 2015-01-16 17:00 +0100 http://bitbucket.org/pypy/pypy/changeset/dfe68418c314/ Log: Fix this test (unsure if it's worth it but well) diff --git a/rpython/translator/tool/staticsizereport.py b/rpython/translator/tool/staticsizereport.py --- a/rpython/translator/tool/staticsizereport.py +++ b/rpython/translator/tool/staticsizereport.py @@ -3,6 +3,7 @@ from rpython.tool.ansicolor import red, yellow, green from rpython.rtyper.lltypesystem.lltype import typeOf, _ptr, Ptr, ContainerType +from rpython.rtyper.lltypesystem.lltype import GcOpaqueType from rpython.rtyper.lltypesystem import llmemory from rpython.memory.lltypelayout import convert_offset_to_int @@ -54,6 +55,8 @@ if isinstance(typeOf(value), Ptr): container = value._obj if isinstance(typeOf(container), ContainerType): + if isinstance(typeOf(container), GcOpaqueType): + container = container.container node = database.getcontainernode(container) if node.nodekind != 'func': nodes.append(node) @@ -77,7 +80,10 @@ return 0 else: length = None - return convert_offset_to_int(llmemory.sizeof(TYPE, length)) + #print obj, ', length =', length + r = convert_offset_to_int(llmemory.sizeof(TYPE, length)) + #print '\tr =', r + return r def guess_size(database, node, recursive=None): diff --git a/rpython/translator/tool/test/test_staticsizereport.py b/rpython/translator/tool/test/test_staticsizereport.py --- a/rpython/translator/tool/test/test_staticsizereport.py +++ b/rpython/translator/tool/test/test_staticsizereport.py @@ -57,10 +57,17 @@ P = rffi.sizeof(rffi.VOIDP) B = 1 # bool assert guess_size(func.builder.db, dictvalnode, set()) > 100 - assert guess_size(func.builder.db, dictvalnode2, set()) == 2 * S + 1 * P + 1 * S + 8 * (2*S + 1 * B) + assert guess_size(func.builder.db, dictvalnode2, set()) == ( + (4 * S + 2 * P) + # struct dicttable + (S + 8) + # indexes, length 8 + (S + S + S)) # entries, length 1 r_set = set() dictnode_size = guess_size(db, test_dictnode, r_set) - assert dictnode_size == 2 * S + 1 * P + 1 * S + (4096-256) * (1*S+1*P + (1 * S + 1*P + 5)) + (8192-4096+256) * (1*S+1*P) + assert dictnode_size == ( + (4 * S + 2 * P) + # struct dicttable + (S + 2 * 8192) + # indexes, length 8192, rffi.USHORT + (S + (S + S) * 3840) + # entries, length 3840 + (S + S + 5) * 3840) # 3840 strings with 5 chars each assert guess_size(func.builder.db, fixarrayvalnode, set()) == 100 * rffi.sizeof(lltype.Signed) + 1 * rffi.sizeof(lltype.Signed) assert guess_size(func.builder.db, dynarrayvalnode, set()) == 100 * rffi.sizeof(lltype.Signed) + 2 * rffi.sizeof(lltype.Signed) + 1 * rffi.sizeof(rffi.VOIDP) From noreply at buildbot.pypy.org Fri Jan 16 17:23:23 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 16 Jan 2015 17:23:23 +0100 (CET) Subject: [pypy-commit] pypy errno-again: Try to fix things on windows Message-ID: <20150116162323.C7DF91C0313@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: errno-again Changeset: r75392:770d44c07c9a Date: 2015-01-16 17:23 +0100 http://bitbucket.org/pypy/pypy/changeset/770d44c07c9a/ Log: Try to fix things on windows diff --git a/rpython/rtyper/lltypesystem/rffi.py b/rpython/rtyper/lltypesystem/rffi.py --- a/rpython/rtyper/lltypesystem/rffi.py +++ b/rpython/rtyper/lltypesystem/rffi.py @@ -169,15 +169,18 @@ argnames = ', '.join(['a%d' % i for i in range(len(args))]) source = py.code.Source(""" - from rpython.rlib import rposix + if %(save_err)d: + from rpython.rlib import rposix def call_external_function(%(argnames)s): before = aroundstate.before if before: before() # NB. it is essential that no exception checking occurs here! - rposix._errno_before(%(save_err)d) + if %(save_err)d: + rposix._errno_before(%(save_err)d) res = funcptr(%(argnames)s) - rposix._errno_after(%(save_err)d) + if %(save_err)d: + rposix._errno_after(%(save_err)d) after = aroundstate.after if after: after() return res @@ -212,12 +215,15 @@ # to hide it from the JIT... argnames = ', '.join(['a%d' % i for i in range(len(args))]) source = py.code.Source(""" - from rpython.rlib import rposix + if %(save_err)d: + from rpython.rlib import rposix def call_external_function(%(argnames)s): - rposix._errno_before(%(save_err)d) + if %(save_err)d: + rposix._errno_before(%(save_err)d) res = funcptr(%(argnames)s) - rposix._errno_after(%(save_err)d) + if %(save_err)d: + rposix._errno_after(%(save_err)d) return res """ % locals()) miniglobals = {'funcptr': funcptr, From noreply at buildbot.pypy.org Fri Jan 16 17:25:51 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 16 Jan 2015 17:25:51 +0100 (CET) Subject: [pypy-commit] pypy errno-again: still trying Message-ID: <20150116162551.12E401C0313@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: errno-again Changeset: r75393:b39953890ab8 Date: 2015-01-16 17:25 +0100 http://bitbucket.org/pypy/pypy/changeset/b39953890ab8/ Log: still trying diff --git a/rpython/rtyper/lltypesystem/rffi.py b/rpython/rtyper/lltypesystem/rffi.py --- a/rpython/rtyper/lltypesystem/rffi.py +++ b/rpython/rtyper/lltypesystem/rffi.py @@ -156,7 +156,6 @@ assert save_err == RFFI_ERR_NONE return funcptr - if invoke_around_handlers: # The around-handlers are releasing the GIL in a threaded pypy. # We need tons of care to ensure that no GC operation and no @@ -169,17 +168,16 @@ argnames = ', '.join(['a%d' % i for i in range(len(args))]) source = py.code.Source(""" - if %(save_err)d: - from rpython.rlib import rposix - def call_external_function(%(argnames)s): before = aroundstate.before if before: before() # NB. it is essential that no exception checking occurs here! if %(save_err)d: + from rpython.rlib import rposix rposix._errno_before(%(save_err)d) res = funcptr(%(argnames)s) if %(save_err)d: + from rpython.rlib import rposix rposix._errno_after(%(save_err)d) after = aroundstate.after if after: after() @@ -215,14 +213,13 @@ # to hide it from the JIT... argnames = ', '.join(['a%d' % i for i in range(len(args))]) source = py.code.Source(""" - if %(save_err)d: - from rpython.rlib import rposix - def call_external_function(%(argnames)s): if %(save_err)d: + from rpython.rlib import rposix rposix._errno_before(%(save_err)d) res = funcptr(%(argnames)s) if %(save_err)d: + from rpython.rlib import rposix rposix._errno_after(%(save_err)d) return res """ % locals()) From noreply at buildbot.pypy.org Fri Jan 16 18:22:42 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 16 Jan 2015 18:22:42 +0100 (CET) Subject: [pypy-commit] pypy errno-again: in-progress: windows Message-ID: <20150116172242.2DC121C0313@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: errno-again Changeset: r75394:944e6eb53a35 Date: 2015-01-16 18:01 +0100 http://bitbucket.org/pypy/pypy/changeset/944e6eb53a35/ Log: in-progress: windows diff --git a/pypy/module/_multiprocessing/interp_connection.py b/pypy/module/_multiprocessing/interp_connection.py --- a/pypy/module/_multiprocessing/interp_connection.py +++ b/pypy/module/_multiprocessing/interp_connection.py @@ -441,7 +441,7 @@ lltype.nullptr(rwin32.LPDWORD.TO), lltype.nullptr(rwin32.LPDWORD.TO), left_ptr): - raise wrap_windowserror(space, rwin32.lastWindowsError()) + raise wrap_windowserror(space, rwin32.lastSavedWindowsError()) length = intmask(read_ptr[0] + left_ptr[0]) if length > maxlength: # bad message, close connection @@ -460,7 +460,7 @@ read_ptr, rffi.NULL) if not result: rffi.free_charp(newbuf) - raise wrap_windowserror(space, rwin32.lastWindowsError()) + raise wrap_windowserror(space, rwin32.lastSavedWindowsError()) assert read_ptr[0] == left_ptr[0] return length, newbuf @@ -480,7 +480,7 @@ lltype.nullptr(rwin32.LPDWORD.TO), bytes_ptr, lltype.nullptr(rwin32.LPDWORD.TO)): - raise wrap_windowserror(space, rwin32.lastWindowsError()) + raise wrap_windowserror(space, rwin32.lastSavedWindowsError()) bytes = bytes_ptr[0] finally: lltype.free(bytes_ptr, flavor='raw') @@ -506,7 +506,8 @@ lltype.nullptr(rwin32.LPDWORD.TO), bytes_ptr, lltype.nullptr(rwin32.LPDWORD.TO)): - raise wrap_windowserror(space, rwin32.lastWindowsError()) + raise wrap_windowserror(space, + rwin32.lastSavedWindowsError()) bytes = bytes_ptr[0] finally: lltype.free(bytes_ptr, flavor='raw') diff --git a/pypy/module/_multiprocessing/interp_semaphore.py b/pypy/module/_multiprocessing/interp_semaphore.py --- a/pypy/module/_multiprocessing/interp_semaphore.py +++ b/pypy/module/_multiprocessing/interp_semaphore.py @@ -31,7 +31,8 @@ rwin32.BOOL, releasegil=False) _ReleaseSemaphore = rwin32.winexternal( 'ReleaseSemaphore', [rwin32.HANDLE, rffi.LONG, rffi.LONGP], - rwin32.BOOL) + rwin32.BOOL, + save_err=rffi.RFFI_SAVE_LASTERROR) else: from rpython.rlib import rposix @@ -296,7 +297,7 @@ def semlock_release(self, space): if not _ReleaseSemaphore(self.handle, 1, lltype.nullptr(rffi.LONGP.TO)): - err = rwin32.GetLastError() + err = rwin32.GetLastError_saved() if err == 0x0000012a: # ERROR_TOO_MANY_POSTS raise OperationError( space.w_ValueError, @@ -310,7 +311,7 @@ previous_ptr = lltype.malloc(rffi.LONGP.TO, 1, flavor='raw') try: if not _ReleaseSemaphore(self.handle, 1, previous_ptr): - raise rwin32.lastWindowsError("ReleaseSemaphore") + raise rwin32.lastSavedWindowsError("ReleaseSemaphore") return previous_ptr[0] + 1 finally: lltype.free(previous_ptr, flavor='raw') diff --git a/pypy/module/_multiprocessing/interp_win32.py b/pypy/module/_multiprocessing/interp_win32.py --- a/pypy/module/_multiprocessing/interp_win32.py +++ b/pypy/module/_multiprocessing/interp_win32.py @@ -41,20 +41,24 @@ rwin32.DWORD, rwin32.DWORD, rwin32.DWORD, rwin32.DWORD, rwin32.DWORD, rwin32.DWORD, rffi.VOIDP], - rwin32.HANDLE) + rwin32.HANDLE, + save_err=rffi.RFFI_SAVE_LASTERROR) _ConnectNamedPipe = rwin32.winexternal( - 'ConnectNamedPipe', [rwin32.HANDLE, rffi.VOIDP], rwin32.BOOL) + 'ConnectNamedPipe', [rwin32.HANDLE, rffi.VOIDP], rwin32.BOOL, + save_err=rffi.RFFI_SAVE_LASTERROR) _SetNamedPipeHandleState = rwin32.winexternal( 'SetNamedPipeHandleState', [ rwin32.HANDLE, rwin32.LPDWORD, rwin32.LPDWORD, rwin32.LPDWORD], - rwin32.BOOL) + rwin32.BOOL, + save_err=rffi.RFFI_SAVE_LASTERROR) _WaitNamedPipe = rwin32.winexternal( 'WaitNamedPipeA', [rwin32.LPCSTR, rwin32.DWORD], - rwin32.BOOL) + rwin32.BOOL, + save_err=rffi.RFFI_SAVE_LASTERROR) _PeekNamedPipe = rwin32.winexternal( 'PeekNamedPipe', [ @@ -62,31 +66,36 @@ rffi.VOIDP, rwin32.DWORD, rwin32.LPDWORD, rwin32.LPDWORD, rwin32.LPDWORD], - rwin32.BOOL) + rwin32.BOOL, + save_err=rffi.RFFI_SAVE_LASTERROR) _CreateFile = rwin32.winexternal( 'CreateFileA', [ rwin32.LPCSTR, rwin32.DWORD, rwin32.DWORD, rffi.VOIDP, rwin32.DWORD, rwin32.DWORD, rwin32.HANDLE], - rwin32.HANDLE) + rwin32.HANDLE, + save_err=rffi.RFFI_SAVE_LASTERROR) _WriteFile = rwin32.winexternal( 'WriteFile', [ rwin32.HANDLE, rffi.VOIDP, rwin32.DWORD, rwin32.LPDWORD, rffi.VOIDP], - rwin32.BOOL) + rwin32.BOOL, + save_err=rffi.RFFI_SAVE_LASTERROR) _ReadFile = rwin32.winexternal( 'ReadFile', [ rwin32.HANDLE, rffi.VOIDP, rwin32.DWORD, rwin32.LPDWORD, rffi.VOIDP], - rwin32.BOOL) + rwin32.BOOL, + save_err=rffi.RFFI_SAVE_LASTERROR) _ExitProcess = rwin32.winexternal( - 'ExitProcess', [rffi.UINT], lltype.Void) + 'ExitProcess', [rffi.UINT], lltype.Void, + save_err=rffi.RFFI_SAVE_LASTERROR) _GetTickCount = rwin32.winexternal( 'GetTickCount', [], rwin32.DWORD) @@ -97,10 +106,10 @@ def CloseHandle(space, w_handle): handle = handle_w(space, w_handle) if not rwin32.CloseHandle(handle): - raise wrap_windowserror(space, rwin32.lastWindowsError()) + raise wrap_windowserror(space, rwin32.lastSavedWindowsError()) def GetLastError(space): - return space.wrap(rwin32.GetLastError()) + return space.wrap(rwin32.GetLastError_saved()) # __________________________________________________________ # functions for the "win32" namespace @@ -118,7 +127,7 @@ outputsize, inputsize, timeout, rffi.NULL) if handle == rwin32.INVALID_HANDLE_VALUE: - raise wrap_windowserror(space, rwin32.lastWindowsError()) + raise wrap_windowserror(space, rwin32.lastSavedWindowsError()) return w_handle(space, handle) @@ -129,7 +138,7 @@ raise OperationError(space.w_NotImplementedError, space.wrap("expected a NULL pointer")) if not _ConnectNamedPipe(handle, rffi.NULL): - raise wrap_windowserror(space, rwin32.lastWindowsError()) + raise wrap_windowserror(space, rwin32.lastSavedWindowsError()) def SetNamedPipeHandleState(space, w_handle, w_pipemode, w_maxinstances, w_timeout): @@ -149,7 +158,7 @@ statep[2] = rffi.ptradd(state, 2) if not _SetNamedPipeHandleState(handle, statep[0], statep[1], statep[2]): - raise wrap_windowserror(space, rwin32.lastWindowsError()) + raise wrap_windowserror(space, rwin32.lastSavedWindowsError()) finally: lltype.free(state, flavor='raw') lltype.free(statep, flavor='raw') @@ -158,7 +167,7 @@ def WaitNamedPipe(space, name, timeout): # Careful: zero means "default value specified by CreateNamedPipe()" if not _WaitNamedPipe(name, timeout): - raise wrap_windowserror(space, rwin32.lastWindowsError()) + raise wrap_windowserror(space, rwin32.lastSavedWindowsError()) @unwrap_spec(filename=str, access=r_uint, share=r_uint, disposition=r_uint, flags=r_uint) @@ -174,7 +183,7 @@ disposition, flags, rwin32.NULL_HANDLE) if handle == rwin32.INVALID_HANDLE_VALUE: - raise wrap_windowserror(space, rwin32.lastWindowsError()) + raise wrap_windowserror(space, rwin32.lastSavedWindowsError()) return w_handle(space, handle) diff --git a/pypy/module/time/interp_time.py b/pypy/module/time/interp_time.py --- a/pypy/module/time/interp_time.py +++ b/pypy/module/time/interp_time.py @@ -62,7 +62,8 @@ _setCtrlHandlerRoutine = rffi.llexternal( 'pypy_timemodule_setCtrlHandler', [rwin32.HANDLE], rwin32.BOOL, - compilation_info=eci) + compilation_info=eci, + save_err=rffi.RFFI_SAVE_LASTERROR) class GlobalState: def __init__(self): @@ -79,8 +80,8 @@ except WindowsError, e: raise wrap_windowserror(space, e) if not _setCtrlHandlerRoutine(globalState.interrupt_event): - raise wrap_windowserror( - space, rwin32.lastWindowsError("SetConsoleCtrlHandler")) + raise wrap_windowserror(space, + rwin32.lastSavedWindowsError("SetConsoleCtrlHandler")) globalState = GlobalState() diff --git a/rpython/rlib/rdynload.py b/rpython/rlib/rdynload.py --- a/rpython/rlib/rdynload.py +++ b/rpython/rlib/rdynload.py @@ -145,7 +145,7 @@ # mode is unused on windows, but a consistant signature res = rwin32.LoadLibrary(name) if not res: - err = rwin32.GetLastError() + err = rwin32.GetLastError_saved() raise DLOpenError(rwin32.FormatError(err)) return res diff --git a/rpython/rlib/rmmap.py b/rpython/rlib/rmmap.py --- a/rpython/rlib/rmmap.py +++ b/rpython/rlib/rmmap.py @@ -201,11 +201,20 @@ SYSTEM_INFO_P = lltype.Ptr(SYSTEM_INFO) GetSystemInfo, _ = winexternal('GetSystemInfo', [SYSTEM_INFO_P], lltype.Void) - GetFileSize, _ = winexternal('GetFileSize', [HANDLE, LPDWORD], DWORD) + GetFileSize, _ = winexternal('GetFileSize', [HANDLE, LPDWORD], DWORD, + save_err=rffi.RFFI_SAVE_LASTERROR) GetCurrentProcess, _ = winexternal('GetCurrentProcess', [], HANDLE) - DuplicateHandle, _ = winexternal('DuplicateHandle', [HANDLE, HANDLE, HANDLE, LPHANDLE, DWORD, BOOL, DWORD], BOOL) - CreateFileMapping, _ = winexternal('CreateFileMappingA', [HANDLE, rwin32.LPSECURITY_ATTRIBUTES, DWORD, DWORD, DWORD, LPCSTR], HANDLE) - MapViewOfFile, _ = winexternal('MapViewOfFile', [HANDLE, DWORD, DWORD, DWORD, SIZE_T], LPCSTR)##!!LPVOID) + DuplicateHandle, _ = winexternal('DuplicateHandle', + [HANDLE, HANDLE, HANDLE, LPHANDLE, DWORD, + BOOL, DWORD], BOOL, + save_err=rffi.RFFI_SAVE_LASTERROR) + CreateFileMapping, _ = winexternal('CreateFileMappingA', + [HANDLE, rwin32.LPSECURITY_ATTRIBUTES, + DWORD, DWORD, DWORD, LPCSTR], HANDLE, + save_err=rffi.RFFI_SAVE_LASTERROR) + MapViewOfFile, _ = winexternal('MapViewOfFile', [HANDLE, DWORD, DWORD, + DWORD, SIZE_T], LPCSTR, + save_err=rffi.RFFI_SAVE_LASTERROR) ##!!LPVOID _, UnmapViewOfFile_safe = winexternal('UnmapViewOfFile', [LPCSTR], BOOL) FlushViewOfFile, _ = winexternal('FlushViewOfFile', [LPCSTR, SIZE_T], BOOL) SetFilePointer, _ = winexternal('SetFilePointer', [HANDLE, LONG, PLONG, DWORD], DWORD) @@ -255,7 +264,7 @@ # so we need to check the last error also INVALID_FILE_SIZE = -1 if low == INVALID_FILE_SIZE: - err = rwin32.GetLastError() + err = rwin32.GetLastError_saved() if err: raise WindowsError(err, "mmap") return low, high @@ -328,10 +337,10 @@ self.unmap() self.setdata(NODATA, 0) if self.map_handle != INVALID_HANDLE: - rwin32.CloseHandle(self.map_handle) + rwin32.CloseHandle_no_err(self.map_handle) self.map_handle = INVALID_HANDLE if self.file_handle != INVALID_HANDLE: - rwin32.CloseHandle(self.file_handle) + rwin32.CloseHandle_no_err(self.file_handle) self.file_handle = INVALID_HANDLE elif _POSIX: self.closed = True @@ -536,7 +545,7 @@ elif _MS_WINDOWS: # disconnect the mapping self.unmap() - rwin32.CloseHandle(self.map_handle) + rwin32.CloseHandle_no_err(self.map_handle) # move to the desired EOF position if _64BIT: @@ -573,9 +582,9 @@ charp = rffi.cast(LPCSTR, data) self.setdata(charp, newsize) return - winerror = rwin32.lastWindowsError() + winerror = rwin32.lastSavedWindowsError() if self.map_handle: - rwin32.CloseHandle(self.map_handle) + rwin32.CloseHandle_no_err(self.map_handle) self.map_handle = INVALID_HANDLE raise winerror @@ -812,7 +821,7 @@ False, # inherited by child procs? DUPLICATE_SAME_ACCESS) # options if not res: - raise rwin32.lastWindowsError() + raise rwin32.lastSavedWindowsError() m.file_handle = handle_ref[0] finally: lltype.free(handle_ref, flavor='raw') @@ -855,9 +864,9 @@ charp = rffi.cast(LPCSTR, data) m.setdata(charp, map_size) return m - winerror = rwin32.lastWindowsError() + winerror = rwin32.lastSavedWindowsError() if m.map_handle: - rwin32.CloseHandle(m.map_handle) + rwin32.CloseHandle_no_err(m.map_handle) m.map_handle = INVALID_HANDLE raise winerror diff --git a/rpython/rlib/rthread.py b/rpython/rlib/rthread.py --- a/rpython/rlib/rthread.py +++ b/rpython/rlib/rthread.py @@ -371,6 +371,7 @@ loop_invariant=True) tlfield_rpy_errno = ThreadLocalField(rffi.INT, "rpy_errno") if sys.platform == "win32": + from rpython.rlib import rwin32 tlfield_rpy_lasterror = ThreadLocalField(rwin32.DWORD, "rpy_lasterror") def _threadlocalref_seeme(field): diff --git a/rpython/rlib/runicode.py b/rpython/rlib/runicode.py --- a/rpython/rlib/runicode.py +++ b/rpython/rlib/runicode.py @@ -1597,7 +1597,8 @@ rwin32.LPCSTR, rffi.INT, rffi.CWCHARP, rffi.INT], rffi.INT, - calling_conv='win') + calling_conv='win', + save_err=rffi.RFFI_SAVE_LASTERROR) WideCharToMultiByte = rffi.llexternal('WideCharToMultiByte', [rffi.UINT, rwin32.DWORD, @@ -1605,19 +1606,20 @@ rwin32.LPCSTR, rffi.INT, rwin32.LPCSTR, BOOLP], rffi.INT, - calling_conv='win') + calling_conv='win', + save_err=rffi.RFFI_SAVE_LASTERROR) def is_dbcs_lead_byte(c): # XXX don't know how to test this return False def _decode_mbcs_error(s, errorhandler): - if rwin32.GetLastError() == rwin32.ERROR_NO_UNICODE_TRANSLATION: + if rwin32.GetLastError_saved() == rwin32.ERROR_NO_UNICODE_TRANSLATION: msg = ("No mapping for the Unicode character exists in the target " "multi-byte code page.") errorhandler('strict', 'mbcs', msg, s, 0, 0) else: - raise rwin32.lastWindowsError() + raise rwin32.lastSavedWindowsError() def str_decode_mbcs(s, size, errors, final=False, errorhandler=None, force_ignore=True): @@ -1684,7 +1686,7 @@ dataptr, size, None, 0, None, used_default_p) if mbcssize == 0: - raise rwin32.lastWindowsError() + raise rwin32.lastSavedWindowsError() # If we used a default char, then we failed! if (used_default_p and rffi.cast(lltype.Bool, used_default_p[0])): @@ -1696,7 +1698,7 @@ if WideCharToMultiByte(CP_ACP, flags, dataptr, size, buf.raw, mbcssize, None, used_default_p) == 0: - raise rwin32.lastWindowsError() + raise rwin32.lastSavedWindowsError() if (used_default_p and rffi.cast(lltype.Bool, used_default_p[0])): errorhandler('strict', 'mbcs', "invalid character", diff --git a/rpython/rlib/rurandom.py b/rpython/rlib/rurandom.py --- a/rpython/rlib/rurandom.py +++ b/rpython/rlib/rurandom.py @@ -35,14 +35,16 @@ rwin32.LPCSTR, rwin32.LPCSTR, rwin32.DWORD, rwin32.DWORD], rwin32.BOOL, calling_conv='win', - compilation_info=eci) + compilation_info=eci, + save_err=rffi.RFFI_SAVE_LASTERROR) CryptGenRandom = rffi.llexternal( 'CryptGenRandom', [HCRYPTPROV, rwin32.DWORD, rffi.CArrayPtr(rwin32.BYTE)], rwin32.BOOL, calling_conv='win', - compilation_info=eci) + compilation_info=eci, + save_err=rffi.RFFI_SAVE_LASTERROR) def init_urandom(): """NOT_RPYTHON @@ -60,14 +62,14 @@ if not CryptAcquireContext( context, None, None, PROV_RSA_FULL, CRYPT_VERIFYCONTEXT): - raise rwin32.lastWindowsError("CryptAcquireContext") + raise rwin32.lastSavedWindowsError("CryptAcquireContext") provider = context[0] # TODO(win64) This is limited to 2**31 with lltype.scoped_alloc(rffi.CArray(rwin32.BYTE), n, zero=True, # zero seed ) as buf: if not CryptGenRandom(provider, n, buf): - raise rwin32.lastWindowsError("CryptGenRandom") + raise rwin32.lastSavedWindowsError("CryptGenRandom") return rffi.charpsize2str(rffi.cast(rffi.CCHARP, buf), n) diff --git a/rpython/rlib/rwin32.py b/rpython/rlib/rwin32.py --- a/rpython/rlib/rwin32.py +++ b/rpython/rlib/rwin32.py @@ -131,19 +131,23 @@ from rpython.rlib import rthread rthread.tlfield_rpy_lasterror.setraw(rffi.cast(DWORD, err)) - # In tests, the first call to GetLastError_real() is always wrong, + # In tests, the first call to _GetLastError() is always wrong, # because error is hidden by operations in ll2ctypes. Call it now. - GetLastError_real() + _GetLastError() GetModuleHandle = winexternal('GetModuleHandleA', [rffi.CCHARP], HMODULE) - LoadLibrary = winexternal('LoadLibraryA', [rffi.CCHARP], HMODULE) + LoadLibrary = winexternal('LoadLibraryA', [rffi.CCHARP], HMODULE, + save_err=rffi.RFFI_SAVE_LASTERROR) GetProcAddress = winexternal('GetProcAddress', [HMODULE, rffi.CCHARP], rffi.VOIDP) FreeLibrary = winexternal('FreeLibrary', [HMODULE], BOOL, releasegil=False) LocalFree = winexternal('LocalFree', [HLOCAL], DWORD) - CloseHandle = winexternal('CloseHandle', [HANDLE], BOOL, releasegil=False) + CloseHandle = winexternal('CloseHandle', [HANDLE], BOOL, releasegil=False, + save_err=rffi.RFFI_SAVE_LASTERROR) + CloseHandle_no_err = winexternal('CloseHandle', [HANDLE], BOOL, + releasegil=False) FormatMessage = winexternal( 'FormatMessageA', @@ -260,7 +264,7 @@ def fake_FormatError(code): return 'Windows Error %d' % (code,) - def lastWindowsError(context="Windows Error"): + def lastSavedWindowsError(context="Windows Error"): code = GetLastError_saved() return WindowsError(code, context) @@ -285,7 +289,8 @@ _GetVersionEx = winexternal('GetVersionExA', [lltype.Ptr(OSVERSIONINFOEX)], - DWORD) + DWORD, + save_err=rffi.RFFI_SAVE_LASTERROR) @jit.dont_look_inside def GetVersionEx(): @@ -294,7 +299,7 @@ rffi.sizeof(OSVERSIONINFOEX)) try: if not _GetVersionEx(info): - raise lastWindowsError() + raise lastSavedWindowsError() return (rffi.cast(lltype.Signed, info.c_dwMajorVersion), rffi.cast(lltype.Signed, info.c_dwMinorVersion), rffi.cast(lltype.Signed, info.c_dwBuildNumber), @@ -309,7 +314,8 @@ lltype.free(info, flavor='raw') _WaitForSingleObject = winexternal( - 'WaitForSingleObject', [HANDLE, DWORD], DWORD) + 'WaitForSingleObject', [HANDLE, DWORD], DWORD, + save_err=rffi.RFFI_SAVE_LASTERROR) def WaitForSingleObject(handle, timeout): """Return values: @@ -317,12 +323,13 @@ - WAIT_TIMEOUT when the timeout elapsed""" res = _WaitForSingleObject(handle, timeout) if res == rffi.cast(DWORD, -1): - raise lastWindowsError("WaitForSingleObject") + raise lastSavedWindowsError("WaitForSingleObject") return res _WaitForMultipleObjects = winexternal( 'WaitForMultipleObjects', [ - DWORD, rffi.CArrayPtr(HANDLE), BOOL, DWORD], DWORD) + DWORD, rffi.CArrayPtr(HANDLE), BOOL, DWORD], DWORD, + save_err=rffi.RFFI_SAVE_LASTERROR) def WaitForMultipleObjects(handles, waitall=False, timeout=INFINITE): """Return values: @@ -336,24 +343,26 @@ handle_array[i] = handles[i] res = _WaitForMultipleObjects(nb, handle_array, waitall, timeout) if res == rffi.cast(DWORD, -1): - raise lastWindowsError("WaitForMultipleObjects") + raise lastSavedWindowsError("WaitForMultipleObjects") return res finally: lltype.free(handle_array, flavor='raw') _CreateEvent = winexternal( - 'CreateEventA', [rffi.VOIDP, BOOL, BOOL, LPCSTR], HANDLE) + 'CreateEventA', [rffi.VOIDP, BOOL, BOOL, LPCSTR], HANDLE, + save_err=rffi.RFFI_SAVE_LASTERROR) def CreateEvent(*args): handle = _CreateEvent(*args) if handle == NULL_HANDLE: - raise lastWindowsError("CreateEvent") + raise lastSavedWindowsError("CreateEvent") return handle SetEvent = winexternal( 'SetEvent', [HANDLE], BOOL) ResetEvent = winexternal( 'ResetEvent', [HANDLE], BOOL) _OpenProcess = winexternal( - 'OpenProcess', [DWORD, BOOL, DWORD], HANDLE) + 'OpenProcess', [DWORD, BOOL, DWORD], HANDLE, + save_err=rffi.RFFI_SAVE_LASTERROR) def OpenProcess(*args): ''' OpenProcess( dwDesiredAccess, bInheritHandle, dwProcessId) where dwDesiredAccess is a combination of the flags: @@ -379,12 +388,14 @@ ''' handle = _OpenProcess(*args) if handle == NULL_HANDLE: - raise lastWindowsError("OpenProcess") + raise lastSavedWindowsError("OpenProcess") return handle TerminateProcess = winexternal( - 'TerminateProcess', [HANDLE, rffi.UINT], BOOL) + 'TerminateProcess', [HANDLE, rffi.UINT], BOOL, + save_err=rffi.RFFI_SAVE_LASTERROR) GenerateConsoleCtrlEvent = winexternal( - 'GenerateConsoleCtrlEvent', [DWORD, DWORD], BOOL) + 'GenerateConsoleCtrlEvent', [DWORD, DWORD], BOOL, + save_err=rffi.RFFI_SAVE_LASTERROR) _GetCurrentProcessId = winexternal( 'GetCurrentProcessId', [], DWORD) def GetCurrentProcessId(): @@ -400,14 +411,15 @@ def os_kill(pid, sig): if sig == CTRL_C_EVENT or sig == CTRL_BREAK_EVENT: if GenerateConsoleCtrlEvent(sig, pid) == 0: - raise lastWindowsError('os_kill failed generating event') + raise lastSavedWindowsError('os_kill failed generating event') return handle = OpenProcess(PROCESS_ALL_ACCESS, False, pid) if handle == NULL_HANDLE: - raise lastWindowsError('os_kill failed opening process') + raise lastSavedWindowsError('os_kill failed opening process') try: if TerminateProcess(handle, sig) == 0: - raise lastWindowsError('os_kill failed to terminate process') + raise lastSavedWindowsError( + 'os_kill failed to terminate process') finally: CloseHandle(handle) diff --git a/rpython/rlib/rwinreg.py b/rpython/rlib/rwinreg.py --- a/rpython/rlib/rwinreg.py +++ b/rpython/rlib/rwinreg.py @@ -146,17 +146,18 @@ _ExpandEnvironmentStringsW = external( 'ExpandEnvironmentStringsW', [rffi.CWCHARP, rffi.CWCHARP, rwin32.DWORD], - rwin32.DWORD) + rwin32.DWORD, + save_err=rffi.RFFI_SAVE_LASTERROR) def ExpandEnvironmentStrings(source): with rffi.scoped_unicode2wcharp(source) as src_buf: size = _ExpandEnvironmentStringsW(src_buf, lltype.nullptr(rffi.CWCHARP.TO), 0) if size == 0: - raise rwin32.lastWindowsError("ExpandEnvironmentStrings") + raise rwin32.lastSavedWindowsError("ExpandEnvironmentStrings") size = intmask(size) with rffi.scoped_alloc_unicodebuffer(size) as dest_buf: if _ExpandEnvironmentStringsW(src_buf, dest_buf.raw, size) == 0: - raise rwin32.lastWindowsError("ExpandEnvironmentStrings") + raise rwin32.lastSavedWindowsError("ExpandEnvironmentStrings") return dest_buf.str(size - 1) # remove trailing \0 diff --git a/rpython/rlib/streamio.py b/rpython/rlib/streamio.py --- a/rpython/rlib/streamio.py +++ b/rpython/rlib/streamio.py @@ -188,7 +188,8 @@ if sys.platform == "win32": - from rpython.rlib.rwin32 import BOOL, HANDLE, get_osfhandle, GetLastError + from rpython.rlib.rwin32 import BOOL, HANDLE, get_osfhandle + from rpython.rlib.rwin32 import GetLastError_saved from rpython.translator.tool.cbuild import ExternalCompilationInfo from rpython.rtyper.lltypesystem import rffi @@ -196,7 +197,8 @@ _setmode = rffi.llexternal('_setmode', [rffi.INT, rffi.INT], rffi.INT, compilation_info=_eci) SetEndOfFile = rffi.llexternal('SetEndOfFile', [HANDLE], BOOL, - compilation_info=_eci) + compilation_info=_eci, + save_err=rffi.RFFI_SAVE_LASTERROR) def _setfd_binary(fd): # Allow this to succeed on invalid fd's @@ -211,7 +213,7 @@ # Truncate. Note that this may grow the file! handle = get_osfhandle(fd) if not SetEndOfFile(handle): - raise OSError(GetLastError(), + raise OSError(GetLastError_saved(), "Could not truncate file") finally: # we restore the file pointer position in any case diff --git a/rpython/rtyper/lltypesystem/module/ll_math.py b/rpython/rtyper/lltypesystem/module/ll_math.py --- a/rpython/rtyper/lltypesystem/module/ll_math.py +++ b/rpython/rtyper/lltypesystem/module/ll_math.py @@ -43,10 +43,10 @@ return rffi.llexternal(name, ARGS, RESULT, compilation_info=eci, sandboxsafe=True, **kwargs) -def math_llexternal(name, ARGS, RESULT): +def math_llexternal(name, ARGS, RESULT, **kwargs): return rffi.llexternal(math_prefix + name, ARGS, RESULT, compilation_info=math_eci, - sandboxsafe=True) + sandboxsafe=True, **kwargs) math_fabs = llexternal('fabs', [rffi.DOUBLE], rffi.DOUBLE) math_log = llexternal('log', [rffi.DOUBLE], rffi.DOUBLE) diff --git a/rpython/rtyper/module/ll_os.py b/rpython/rtyper/module/ll_os.py --- a/rpython/rtyper/module/ll_os.py +++ b/rpython/rtyper/module/ll_os.py @@ -1339,7 +1339,8 @@ rwin32.LPHANDLE, rffi.VOIDP, rwin32.DWORD], - rwin32.BOOL) + rwin32.BOOL, + save_err=rffi.RFFI_SAVE_LASTERROR) _open_osfhandle = self.llexternal('_open_osfhandle', [rffi.INTPTR_T, rffi.INT], rffi.INT) @@ -1352,7 +1353,7 @@ if ok: error = 0 else: - error = rwin32.GetLastError() + error = rwin32.GetLastError_saved() hread = rffi.cast(rffi.INTPTR_T, pread[0]) hwrite = rffi.cast(rffi.INTPTR_T, pwrite[0]) lltype.free(pwrite, flavor='raw') @@ -1564,7 +1565,7 @@ @func_renamer('unlink_llimpl_%s' % traits.str.__name__) def unlink_llimpl(path): if not win32traits.DeleteFile(path): - raise rwin32.lastWindowsError() + raise rwin32.lastSavedWindowsError() return extdef([traits.str0], s_None, llimpl=unlink_llimpl, export_name=traits.ll_os_name('unlink')) @@ -1601,7 +1602,7 @@ @func_renamer('mkdir_llimpl_%s' % traits.str.__name__) def os_mkdir_llimpl(path, mode): if not win32traits.CreateDirectory(path, None): - raise rwin32.lastWindowsError() + raise rwin32.lastSavedWindowsError() else: def os_mkdir_llimpl(pathname, mode): res = os_mkdir(pathname, mode) @@ -1677,7 +1678,7 @@ @func_renamer('rename_llimpl_%s' % traits.str.__name__) def rename_llimpl(oldpath, newpath): if not win32traits.MoveFile(oldpath, newpath): - raise rwin32.lastWindowsError() + raise rwin32.lastSavedWindowsError() return extdef([traits.str0, traits.str0], s_None, llimpl=rename_llimpl, export_name=traits.ll_os_name('rename')) diff --git a/rpython/rtyper/module/ll_os_environ.py b/rpython/rtyper/module/ll_os_environ.py --- a/rpython/rtyper/module/ll_os_environ.py +++ b/rpython/rtyper/module/ll_os_environ.py @@ -124,7 +124,8 @@ _wgetenv = rffi.llexternal('_wgetenv', [rffi.CWCHARP], rffi.CWCHARP, compilation_info=eci, releasegil=False) _wputenv = rffi.llexternal('_wputenv', [rffi.CWCHARP], rffi.INT, - compilation_info=eci) + compilation_info=eci, + save_err=rffi.RFFI_SAVE_LASTERROR) class EnvKeepalive: pass @@ -144,7 +145,7 @@ traits = UnicodeTraits() get_environ, getenv, putenv = get__wenviron, _wgetenv, _wputenv byname, eq = envkeepalive.bywname, u'=' - from rpython.rlib.rwin32 import lastWindowsError as last_error + from rpython.rlib.rwin32 import lastSavedWindowsError as last_error def envitems_llimpl(): environ = get_environ() diff --git a/rpython/rtyper/module/ll_os_stat.py b/rpython/rtyper/module/ll_os_stat.py --- a/rpython/rtyper/module/ll_os_stat.py +++ b/rpython/rtyper/module/ll_os_stat.py @@ -523,11 +523,11 @@ try: l_path = traits.str2charp(path) res = win32traits.GetFileAttributesEx(l_path, win32traits.GetFileExInfoStandard, data) - errcode = rwin32.GetLastError() + errcode = rwin32.GetLastError_saved() if res == 0: if errcode == win32traits.ERROR_SHARING_VIOLATION: res = attributes_from_dir(l_path, data) - errcode = rwin32.GetLastError() + errcode = rwin32.GetLastError_saved() traits.free_charp(l_path) if res == 0: raise WindowsError(errcode, "os_stat failed") @@ -549,7 +549,7 @@ 0, 0, 0, 0, 0, 0, 0, 0, 0)) elif filetype == win32traits.FILE_TYPE_UNKNOWN: - error = rwin32.GetLastError() + error = rwin32.GetLastError_saved() if error != 0: raise WindowsError(error, "os_fstat failed") # else: unknown but valid file @@ -560,7 +560,8 @@ try: res = win32traits.GetFileInformationByHandle(handle, info) if res == 0: - raise WindowsError(rwin32.GetLastError(), "os_fstat failed") + raise WindowsError(rwin32.GetLastError_saved(), + "os_fstat failed") return by_handle_info_to_stat(info) finally: lltype.free(info, flavor='raw') diff --git a/rpython/rtyper/module/ll_win32file.py b/rpython/rtyper/module/ll_win32file.py --- a/rpython/rtyper/module/ll_win32file.py +++ b/rpython/rtyper/module/ll_win32file.py @@ -113,10 +113,12 @@ FindFirstFile = external('FindFirstFile' + suffix, [traits.CCHARP, LPWIN32_FIND_DATA], - rwin32.HANDLE) + rwin32.HANDLE, + save_err=rffi.RFFI_SAVE_LASTERROR) FindNextFile = external('FindNextFile' + suffix, [rwin32.HANDLE, LPWIN32_FIND_DATA], - rwin32.BOOL) + rwin32.BOOL, + save_err=rffi.RFFI_SAVE_LASTERROR) FindClose = external('FindClose', [rwin32.HANDLE], rwin32.BOOL) @@ -124,28 +126,33 @@ GetFileAttributes = external( 'GetFileAttributes' + suffix, [traits.CCHARP], - rwin32.DWORD) + rwin32.DWORD, + save_err=rffi.RFFI_SAVE_LASTERROR) SetFileAttributes = external( 'SetFileAttributes' + suffix, [traits.CCHARP, rwin32.DWORD], - rwin32.BOOL) + rwin32.BOOL, + save_err=rffi.RFFI_SAVE_LASTERROR) GetFileAttributesEx = external( 'GetFileAttributesEx' + suffix, [traits.CCHARP, GET_FILEEX_INFO_LEVELS, lltype.Ptr(WIN32_FILE_ATTRIBUTE_DATA)], - rwin32.BOOL) + rwin32.BOOL, + save_err=rffi.RFFI_SAVE_LASTERROR) GetFileInformationByHandle = external( 'GetFileInformationByHandle', [rwin32.HANDLE, lltype.Ptr(BY_HANDLE_FILE_INFORMATION)], - rwin32.BOOL) + rwin32.BOOL, + save_err=rffi.RFFI_SAVE_LASTERROR) GetFileType = external( 'GetFileType', [rwin32.HANDLE], - rwin32.DWORD) + rwin32.DWORD, + save_err=rffi.RFFI_SAVE_LASTERROR) LPSTRP = rffi.CArrayPtr(traits.CCHARP) @@ -153,45 +160,52 @@ 'GetFullPathName' + suffix, [traits.CCHARP, rwin32.DWORD, traits.CCHARP, LPSTRP], - rwin32.DWORD) + rwin32.DWORD, + save_err=rffi.RFFI_SAVE_LASTERROR) GetCurrentDirectory = external( 'GetCurrentDirectory' + suffix, [rwin32.DWORD, traits.CCHARP], - rwin32.DWORD) + rwin32.DWORD, + save_err=rffi.RFFI_SAVE_LASTERROR) SetCurrentDirectory = external( 'SetCurrentDirectory' + suffix, [traits.CCHARP], - rwin32.BOOL) + rwin32.BOOL, + save_err=rffi.RFFI_SAVE_LASTERROR) CreateDirectory = external( 'CreateDirectory' + suffix, [traits.CCHARP, rffi.VOIDP], rwin32.BOOL, - XXX) # save_err=rffi.RFFI_SAVE_LASTERROR + save_err=rffi.RFFI_SAVE_LASTERROR) SetEnvironmentVariable = external( 'SetEnvironmentVariable' + suffix, [traits.CCHARP, traits.CCHARP], - rwin32.BOOL) + rwin32.BOOL, + save_err=rffi.RFFI_SAVE_LASTERROR) CreateFile = external( 'CreateFile' + apisuffix, [traits.CCHARP, rwin32.DWORD, rwin32.DWORD, rwin32.LPSECURITY_ATTRIBUTES, rwin32.DWORD, rwin32.DWORD, rwin32.HANDLE], - rwin32.HANDLE) + rwin32.HANDLE, + save_err=rffi.RFFI_SAVE_LASTERROR) DeleteFile = external( 'DeleteFile' + suffix, [traits.CCHARP], - rwin32.BOOL) + rwin32.BOOL, + save_err=rffi.RFFI_SAVE_LASTERROR) MoveFile = external( 'MoveFile' + suffix, [traits.CCHARP, traits.CCHARP], - rwin32.BOOL) + rwin32.BOOL, + save_err=rffi.RFFI_SAVE_LASTERROR) return Win32Traits @@ -227,7 +241,7 @@ result = [] hFindFile = win32traits.FindFirstFile(mask, filedata) if hFindFile == rwin32.INVALID_HANDLE_VALUE: - error = rwin32.GetLastError() + error = rwin32.GetLastError_saved() if error == win32traits.ERROR_FILE_NOT_FOUND: return result else: @@ -241,7 +255,7 @@ break # FindNextFile sets error to ERROR_NO_MORE_FILES if # it got to the end of the directory - error = rwin32.GetLastError() + error = rwin32.GetLastError_saved() win32traits.FindClose(hFindFile) if error == win32traits.ERROR_NO_MORE_FILES: return result @@ -279,14 +293,14 @@ the per-drive current directory, which are of the form =: """ if not win32traits.SetCurrentDirectory(path): - raise rwin32.lastWindowsError() + raise rwin32.lastSavedWindowsError() MAX_PATH = rwin32.MAX_PATH assert MAX_PATH > 0 with traits.scoped_alloc_buffer(MAX_PATH) as path: res = win32traits.GetCurrentDirectory(MAX_PATH + 1, path.raw) if not res: - raise rwin32.lastWindowsError() + raise rwin32.lastSavedWindowsError() res = rffi.cast(lltype.Signed, res) assert res > 0 if res <= MAX_PATH + 1: @@ -295,14 +309,14 @@ with traits.scoped_alloc_buffer(res) as path: res = win32traits.GetCurrentDirectory(res, path.raw) if not res: - raise rwin32.lastWindowsError() + raise rwin32.lastSavedWindowsError() res = rffi.cast(lltype.Signed, res) assert res > 0 new_path = path.str(res) if isUNC(new_path): return if not win32traits.SetEnvironmentVariable(magic_envvar(new_path), new_path): - raise rwin32.lastWindowsError() + raise rwin32.lastSavedWindowsError() return chdir_llimpl @@ -317,13 +331,13 @@ def chmod_llimpl(path, mode): attr = win32traits.GetFileAttributes(path) if attr == win32traits.INVALID_FILE_ATTRIBUTES: - raise rwin32.lastWindowsError() + raise rwin32.lastSavedWindowsError() if mode & 0200: # _S_IWRITE attr &= ~win32traits.FILE_ATTRIBUTE_READONLY else: attr |= win32traits.FILE_ATTRIBUTE_READONLY if not win32traits.SetFileAttributes(path, attr): - raise rwin32.lastWindowsError() + raise rwin32.lastSavedWindowsError() return chmod_llimpl @@ -343,7 +357,7 @@ path, rffi.cast(rwin32.DWORD, nBufferLength), lpBuffer, lltype.nullptr(win32traits.LPSTRP.TO)) if res == 0: - raise rwin32.lastWindowsError("_getfullpathname failed") + raise rwin32.lastSavedWindowsError("_getfullpathname failed") result = traits.charp2str(lpBuffer) return result finally: @@ -360,7 +374,8 @@ 'GetSystemTime', [lltype.Ptr(rwin32.SYSTEMTIME)], lltype.Void, - calling_conv='win') + calling_conv='win', + save_err=rffi.RFFI_SAVE_LASTERROR) SystemTimeToFileTime = rffi.llexternal( 'SystemTimeToFileTime', @@ -376,7 +391,8 @@ lltype.Ptr(rwin32.FILETIME), lltype.Ptr(rwin32.FILETIME)], rwin32.BOOL, - calling_conv = 'win') + calling_conv = 'win', + save_err=rffi.RFFI_SAVE_LASTERROR) @specialize.argtype(1) def os_utime_llimpl(path, tp): @@ -386,7 +402,7 @@ win32traits.FILE_FLAG_BACKUP_SEMANTICS, rwin32.NULL_HANDLE) if hFile == rwin32.INVALID_HANDLE_VALUE: - raise rwin32.lastWindowsError() + raise rwin32.lastSavedWindowsError() ctime = lltype.nullptr(rwin32.FILETIME) atime = lltype.malloc(rwin32.FILETIME, flavor='raw') mtime = lltype.malloc(rwin32.FILETIME, flavor='raw') @@ -397,7 +413,7 @@ GetSystemTime(now) if (not SystemTimeToFileTime(now, atime) or not SystemTimeToFileTime(now, mtime)): - raise rwin32.lastWindowsError() + raise rwin32.lastSavedWindowsError() finally: lltype.free(now, flavor='raw') else: @@ -405,7 +421,7 @@ time_t_to_FILE_TIME(actime, atime) time_t_to_FILE_TIME(modtime, mtime) if not SetFileTime(hFile, ctime, atime, mtime): - raise rwin32.lastWindowsError() + raise rwin32.lastSavedWindowsError() finally: rwin32.CloseHandle(hFile) lltype.free(atime, flavor='raw') From noreply at buildbot.pypy.org Fri Jan 16 18:27:31 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 16 Jan 2015 18:27:31 +0100 (CET) Subject: [pypy-commit] pypy errno-again: fix Message-ID: <20150116172731.7AFD11C0313@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: errno-again Changeset: r75395:5889e82d01c7 Date: 2015-01-16 18:26 +0100 http://bitbucket.org/pypy/pypy/changeset/5889e82d01c7/ Log: fix diff --git a/rpython/rlib/_rsocket_rffi.py b/rpython/rlib/_rsocket_rffi.py --- a/rpython/rlib/_rsocket_rffi.py +++ b/rpython/rlib/_rsocket_rffi.py @@ -653,7 +653,7 @@ [rffi.INT, rffi.INT, rffi.INT, lltype.Ptr(WSAPROTOCOL_INFO), rwin32.DWORD, rwin32.DWORD], - socketfd_type, save_err=save_err) + socketfd_type, save_err=SAVE_ERR) if WIN32: WSAData = cConfig.WSAData From noreply at buildbot.pypy.org Fri Jan 16 18:32:13 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 16 Jan 2015 18:32:13 +0100 (CET) Subject: [pypy-commit] pypy all_ordered_dicts: Fix Message-ID: <20150116173213.4A4CE1C0313@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: all_ordered_dicts Changeset: r75396:2cc55683107d Date: 2015-01-16 18:31 +0100 http://bitbucket.org/pypy/pypy/changeset/2cc55683107d/ Log: Fix diff --git a/rpython/rtyper/lltypesystem/lltype.py b/rpython/rtyper/lltypesystem/lltype.py --- a/rpython/rtyper/lltypesystem/lltype.py +++ b/rpython/rtyper/lltypesystem/lltype.py @@ -1031,7 +1031,16 @@ raise TypeError("can only cast pointers to other pointers") if not isinstance(CURTYPE.TO, GcOpaqueType): raise TypeError("expected a GcOpaqueType") - return opaque_ptr._obj.container.getlength() + try: + c = opaque_ptr._obj.container + except AttributeError: + # if 'opaque_ptr' is already some _llgcopaque, hack its length + # by casting it to a random GcArray type and hoping + from rpython.rtyper.lltypesystem import rffi + p = rffi.cast(Ptr(GcArray(Signed)), opaque_ptr) + return len(p) + else: + return c.getlength() @analyzer_for(length_of_simple_gcarray_from_opaque) def ann_length_of_simple_gcarray_from_opaque(s_p): From noreply at buildbot.pypy.org Fri Jan 16 18:33:42 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 16 Jan 2015 18:33:42 +0100 (CET) Subject: [pypy-commit] pypy errno-again: fix Message-ID: <20150116173342.4F01F1C039C@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: errno-again Changeset: r75397:2bf6480ad8fb Date: 2015-01-16 18:33 +0100 http://bitbucket.org/pypy/pypy/changeset/2bf6480ad8fb/ Log: fix diff --git a/rpython/rlib/rwinreg.py b/rpython/rlib/rwinreg.py --- a/rpython/rlib/rwinreg.py +++ b/rpython/rlib/rwinreg.py @@ -40,9 +40,9 @@ constants.update(cConfig) globals().update(cConfig) -def external(name, args, result): +def external(name, args, result, **kwds): return rffi.llexternal(name, args, result, compilation_info=eci, - calling_conv='win') + calling_conv='win', **kwds) HKEY = rwin32.HANDLE PHKEY = rffi.CArrayPtr(HKEY) From noreply at buildbot.pypy.org Fri Jan 16 18:47:08 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 16 Jan 2015 18:47:08 +0100 (CET) Subject: [pypy-commit] pypy errno-again: fix tests Message-ID: <20150116174708.A27E01C019D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: errno-again Changeset: r75398:e6cc54e8bb43 Date: 2015-01-16 18:43 +0100 http://bitbucket.org/pypy/pypy/changeset/e6cc54e8bb43/ Log: fix tests diff --git a/rpython/jit/backend/test/runner_test.py b/rpython/jit/backend/test/runner_test.py --- a/rpython/jit/backend/test/runner_test.py +++ b/rpython/jit/backend/test/runner_test.py @@ -3037,8 +3037,7 @@ eci = ExternalCompilationInfo( separate_module_sources=[''' #include - RPY_EXPORTED - long __stdcall test_call_release_gil_save_lasterror( + RPY_EXPORTED long test_call_release_gil_save_lasterror( long a, long b, long c, long d, long e, long f, long g) { SetLastError(42); return (a + 10*b + 100*c + 1000*d + @@ -3094,8 +3093,7 @@ separate_module_sources=[r''' #include #include - RPY_EXPORTED - long __stdcall test_call_release_gil_readsaved_lasterror( + RPY_EXPORTED long test_call_release_gil_readsaved_lasterror( long a, long b, long c, long d, long e, long f, long g) { long r = GetLastError(); printf("GetLastError() result: %ld\n", r); From noreply at buildbot.pypy.org Fri Jan 16 18:47:09 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 16 Jan 2015 18:47:09 +0100 (CET) Subject: [pypy-commit] pypy errno-again: Fix Message-ID: <20150116174709.E75AA1C027F@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: errno-again Changeset: r75399:d273b19b4bd9 Date: 2015-01-16 18:46 +0100 http://bitbucket.org/pypy/pypy/changeset/d273b19b4bd9/ Log: Fix diff --git a/pypy/module/_cffi_backend/cerrno.py b/pypy/module/_cffi_backend/cerrno.py --- a/pypy/module/_cffi_backend/cerrno.py +++ b/pypy/module/_cffi_backend/cerrno.py @@ -23,10 +23,8 @@ @unwrap_spec(code=int) def getwinerror(space, code=-1): - XXX - from rpython.rlib.rwin32 import FormatError + from rpython.rlib.rwin32 import GetLastError_saved, FormatError if code == -1: - ec = get_errno_container(space) - code = ec._cffi_saved_LastError + code = GetLastError_saved() message = FormatError(code) return space.newtuple([space.wrap(code), space.wrap(message)]) From noreply at buildbot.pypy.org Fri Jan 16 19:07:27 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 16 Jan 2015 19:07:27 +0100 (CET) Subject: [pypy-commit] pypy errno-again: GetLastError fix Message-ID: <20150116180727.25EC51C0305@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: errno-again Changeset: r75400:e06dabcafed5 Date: 2015-01-16 18:59 +0100 http://bitbucket.org/pypy/pypy/changeset/e06dabcafed5/ Log: GetLastError fix diff --git a/pypy/module/_rawffi/interp_rawffi.py b/pypy/module/_rawffi/interp_rawffi.py --- a/pypy/module/_rawffi/interp_rawffi.py +++ b/pypy/module/_rawffi/interp_rawffi.py @@ -18,7 +18,6 @@ from rpython.rlib.rarithmetic import intmask, r_uint from pypy.module._rawffi.buffer import RawFFIBuffer from pypy.module._rawffi.tracker import tracker -from pypy.module._rawffi import lasterror TYPEMAP = { # XXX A mess with unsigned/signed/normal chars :-/ @@ -496,14 +495,10 @@ try: if self.resshape is not None: result = self.resshape.allocate(space, 1, autofree=True) - lasterror.restore_last_error(space) self.ptr.call(args_ll, result.ll_buffer) - lasterror.save_last_error(space) return space.wrap(result) else: - lasterror.restore_last_error(space) self.ptr.call(args_ll, lltype.nullptr(rffi.VOIDP.TO)) - lasterror.save_last_error(space) return space.w_None except StackCheckError, e: raise OperationError(space.w_ValueError, space.wrap(e.message)) @@ -619,11 +614,13 @@ rposix.set_saved_errno(space.int_w(w_errno)) if sys.platform == 'win32': + # see also + # https://bitbucket.org/pypy/pypy/issue/1944/ctypes-on-windows-getlasterror def get_last_error(space): - return space.wrap(lasterror.fetch_last_error(space)) + return space.wrap(rwin32.GetLastError_saved()) @unwrap_spec(error=int) def set_last_error(space, error): - lasterror.store_last_error(space, error) + rwin32.SetLastError_saved(error) else: # always have at least a dummy version of these functions # (https://bugs.pypy.org/issue1242) diff --git a/pypy/module/_rawffi/lasterror.py b/pypy/module/_rawffi/lasterror.py deleted file mode 100644 --- a/pypy/module/_rawffi/lasterror.py +++ /dev/null @@ -1,40 +0,0 @@ -# For Windows only. -# https://bitbucket.org/pypy/pypy/issue/1944/ctypes-on-windows-getlasterror - -import os - -_MS_WINDOWS = os.name == "nt" - - -if _MS_WINDOWS: - from rpython.rlib import rwin32 - from pypy.interpreter.executioncontext import ExecutionContext - - - ExecutionContext._rawffi_last_error = 0 - - def fetch_last_error(space): - ec = space.getexecutioncontext() - return ec._rawffi_last_error - - def store_last_error(space, last_error): - ec = space.getexecutioncontext() - ec._rawffi_last_error = last_error - - def restore_last_error(space): - ec = space.getexecutioncontext() - lasterror = ec._rawffi_last_error - rwin32.SetLastError(lasterror) - - def save_last_error(space): - lasterror = rwin32.GetLastError() - ec = space.getexecutioncontext() - ec._rawffi_last_error = lasterror - -else: - - def restore_last_error(space): - pass - - def save_last_error(space): - pass From noreply at buildbot.pypy.org Fri Jan 16 19:07:29 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 16 Jan 2015 19:07:29 +0100 (CET) Subject: [pypy-commit] pypy errno-again: more win fixes Message-ID: <20150116180729.3EE971C030A@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: errno-again Changeset: r75401:465c2459d8ca Date: 2015-01-16 19:06 +0100 http://bitbucket.org/pypy/pypy/changeset/465c2459d8ca/ Log: more win fixes diff --git a/pypy/module/_multiprocessing/interp_connection.py b/pypy/module/_multiprocessing/interp_connection.py --- a/pypy/module/_multiprocessing/interp_connection.py +++ b/pypy/module/_multiprocessing/interp_connection.py @@ -406,7 +406,7 @@ size, written_ptr, rffi.NULL) if (result == 0 and - rwin32.GetLastError() == ERROR_NO_SYSTEM_RESOURCES): + rwin32.GetLastError_saved() == ERROR_NO_SYSTEM_RESOURCES): raise oefmt(space.w_ValueError, "Cannot send %d bytes over connection", size) finally: @@ -430,7 +430,7 @@ if result: return intmask(read_ptr[0]), lltype.nullptr(rffi.CCHARP.TO) - err = rwin32.GetLastError() + err = rwin32.GetLastError_saved() if err == ERROR_BROKEN_PIPE: raise OperationError(space.w_EOFError, space.w_None) elif err != ERROR_MORE_DATA: diff --git a/pypy/module/_multiprocessing/interp_semaphore.py b/pypy/module/_multiprocessing/interp_semaphore.py --- a/pypy/module/_multiprocessing/interp_semaphore.py +++ b/pypy/module/_multiprocessing/interp_semaphore.py @@ -26,8 +26,9 @@ _CreateSemaphore = rwin32.winexternal( 'CreateSemaphoreA', [rffi.VOIDP, rffi.LONG, rffi.LONG, rwin32.LPCSTR], - rwin32.HANDLE) - _CloseHandle = rwin32.winexternal('CloseHandle', [rwin32.HANDLE], + rwin32.HANDLE, + save_err=rffi.RFFI_SAVE_LASTERROR) + _CloseHandle_no_errno = rwin32.winexternal('CloseHandle', [rwin32.HANDLE], rwin32.BOOL, releasegil=False) _ReleaseSemaphore = rwin32.winexternal( 'ReleaseSemaphore', [rwin32.HANDLE, rffi.LONG, rffi.LONGP], @@ -230,15 +231,13 @@ rwin32.SetLastError(0) handle = _CreateSemaphore(rffi.NULL, val, max, rffi.NULL) # On Windows we should fail on ERROR_ALREADY_EXISTS - err = rwin32.GetLastError() + err = rwin32.GetLastError_saved() if err != 0: raise WindowsError(err, "CreateSemaphore") return handle def delete_semaphore(handle): - if not _CloseHandle(handle): - err = rwin32.GetLastError() - raise WindowsError(err, "CloseHandle") + _CloseHandle_no_errno(handle) def semlock_acquire(self, space, block, w_timeout): if not block: From noreply at buildbot.pypy.org Fri Jan 16 19:15:29 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 16 Jan 2015 19:15:29 +0100 (CET) Subject: [pypy-commit] pypy errno-again: fix Message-ID: <20150116181529.DC4A81C0241@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: errno-again Changeset: r75402:dc9b0cfc2a37 Date: 2015-01-16 19:15 +0100 http://bitbucket.org/pypy/pypy/changeset/dc9b0cfc2a37/ Log: fix diff --git a/pypy/module/_rawffi/alt/interp_funcptr.py b/pypy/module/_rawffi/alt/interp_funcptr.py --- a/pypy/module/_rawffi/alt/interp_funcptr.py +++ b/pypy/module/_rawffi/alt/interp_funcptr.py @@ -14,7 +14,6 @@ from rpython.rlib.objectmodel import we_are_translated from pypy.module._rawffi.alt.type_converter import FromAppLevelConverter, ToAppLevelConverter from pypy.module._rawffi.interp_rawffi import got_libffi_error, wrap_dlopenerror -from pypy.module._rawffi import lasterror import os if os.name == 'nt': @@ -202,23 +201,11 @@ self.func = func self.argchain = argchain - def before(self): - lasterror.restore_last_error(self.space) - - def after(self): - lasterror.save_last_error(self.space) - def get_longlong(self, w_ffitype): - self.before() - x = self.func.call(self.argchain, rffi.LONGLONG) - self.after() - return x + return self.func.call(self.argchain, rffi.LONGLONG) def get_ulonglong(self, w_ffitype): - self.before() - x = self.func.call(self.argchain, rffi.ULONGLONG) - self.after() - return x + return self.func.call(self.argchain, rffi.ULONGLONG) def get_signed(self, w_ffitype): # if the declared return type of the function is smaller than LONG, @@ -229,7 +216,6 @@ # to space.wrap in order to get a nice applevel . # restype = w_ffitype.get_ffitype() - self.before() call = self.func.call if restype is libffi.types.slong: x = call(self.argchain, rffi.LONG) @@ -241,19 +227,14 @@ x = rffi.cast(rffi.LONG, call(self.argchain, rffi.SIGNEDCHAR)) else: raise self.error(w_ffitype) - self.after() return x def get_unsigned(self, w_ffitype): - self.before() - x = self.func.call(self.argchain, rffi.ULONG) - self.after() - return x + return self.func.call(self.argchain, rffi.ULONG) def get_unsigned_which_fits_into_a_signed(self, w_ffitype): # the same comment as get_signed apply restype = w_ffitype.get_ffitype() - self.before() call = self.func.call if restype is libffi.types.uint: assert not libffi.IS_32_BIT @@ -266,57 +247,35 @@ x = rffi.cast(rffi.LONG, call(self.argchain, rffi.UCHAR)) else: raise self.error(w_ffitype) - self.after() return x def get_pointer(self, w_ffitype): - self.before() ptrres = self.func.call(self.argchain, rffi.VOIDP) - self.after() return rffi.cast(rffi.ULONG, ptrres) def get_char(self, w_ffitype): - self.before() - x = self.func.call(self.argchain, rffi.UCHAR) - self.after() - return x + return self.func.call(self.argchain, rffi.UCHAR) def get_unichar(self, w_ffitype): - self.before() - x = self.func.call(self.argchain, rffi.WCHAR_T) - self.after() - return x + return self.func.call(self.argchain, rffi.WCHAR_T) def get_float(self, w_ffitype): - self.before() - x = self.func.call(self.argchain, rffi.DOUBLE) - self.after() - return x + return self.func.call(self.argchain, rffi.DOUBLE) def get_singlefloat(self, w_ffitype): - self.before() - x = self.func.call(self.argchain, rffi.FLOAT) - self.after() - return x + return self.func.call(self.argchain, rffi.FLOAT) def get_struct(self, w_ffitype, w_structdescr): - self.before() addr = self.func.call(self.argchain, rffi.LONG, is_struct=True) - self.after() return w_structdescr.fromaddress(self.space, addr) def get_struct_rawffi(self, w_ffitype, w_structdescr): - self.before() uintval = self.func.call(self.argchain, rffi.ULONG, is_struct=True) - self.after() return w_structdescr.fromaddress(self.space, uintval) def get_void(self, w_ffitype): - self.before() - x = self.func.call(self.argchain, lltype.Void) - self.after() - return x + return self.func.call(self.argchain, lltype.Void) def unpack_argtypes(space, w_argtypes, w_restype): From noreply at buildbot.pypy.org Fri Jan 16 19:30:56 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 16 Jan 2015 19:30:56 +0100 (CET) Subject: [pypy-commit] pypy errno-again: fix Message-ID: <20150116183056.A83A21C0305@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: errno-again Changeset: r75403:eadbe60a1222 Date: 2015-01-16 19:30 +0100 http://bitbucket.org/pypy/pypy/changeset/eadbe60a1222/ Log: fix diff --git a/pypy/module/_multiprocessing/interp_semaphore.py b/pypy/module/_multiprocessing/interp_semaphore.py --- a/pypy/module/_multiprocessing/interp_semaphore.py +++ b/pypy/module/_multiprocessing/interp_semaphore.py @@ -27,7 +27,7 @@ _CreateSemaphore = rwin32.winexternal( 'CreateSemaphoreA', [rffi.VOIDP, rffi.LONG, rffi.LONG, rwin32.LPCSTR], rwin32.HANDLE, - save_err=rffi.RFFI_SAVE_LASTERROR) + save_err=rffi.RFFI_FULL_LASTERROR) _CloseHandle_no_errno = rwin32.winexternal('CloseHandle', [rwin32.HANDLE], rwin32.BOOL, releasegil=False) _ReleaseSemaphore = rwin32.winexternal( @@ -228,7 +228,7 @@ if sys.platform == 'win32': def create_semaphore(space, name, val, max): - rwin32.SetLastError(0) + rwin32.SetLastError_saved(0) handle = _CreateSemaphore(rffi.NULL, val, max, rffi.NULL) # On Windows we should fail on ERROR_ALREADY_EXISTS err = rwin32.GetLastError_saved() From noreply at buildbot.pypy.org Fri Jan 16 20:42:42 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 16 Jan 2015 20:42:42 +0100 (CET) Subject: [pypy-commit] pypy errno-again: in-progress Message-ID: <20150116194242.75CAF1C0190@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: errno-again Changeset: r75404:666a00c88bbb Date: 2015-01-16 20:42 +0100 http://bitbucket.org/pypy/pypy/changeset/666a00c88bbb/ Log: in-progress diff --git a/rpython/jit/backend/test/runner_test.py b/rpython/jit/backend/test/runner_test.py --- a/rpython/jit/backend/test/runner_test.py +++ b/rpython/jit/backend/test/runner_test.py @@ -3091,12 +3091,11 @@ py.test.skip("Windows test only") eci = ExternalCompilationInfo( separate_module_sources=[r''' - #include - #include + #include RPY_EXPORTED long test_call_release_gil_readsaved_lasterror( long a, long b, long c, long d, long e, long f, long g) { long r = GetLastError(); - printf("GetLastError() result: %ld\n", r); + //printf("GetLastError() result: %ld\n", r); r += 100 * (a + 10*b + 100*c + 1000*d + 10000*e + 100000*f + 1000000*g); return r; @@ -3132,6 +3131,9 @@ # assert result == 24 + 345678900 + def test_call_release_gil_err_all(self): + xxx + def test_guard_not_invalidated(self): cpu = self.cpu i0 = BoxInt() diff --git a/rpython/jit/backend/x86/callbuilder.py b/rpython/jit/backend/x86/callbuilder.py --- a/rpython/jit/backend/x86/callbuilder.py +++ b/rpython/jit/backend/x86/callbuilder.py @@ -20,6 +20,7 @@ CALL_ALIGN = 16 // WORD stdcall_or_cdecl = sys.platform == "win32" +handle_lasterror = sys.platform == "win32" def align_stack_words(words): return (words + CALL_ALIGN - 1) & ~(CALL_ALIGN-1) @@ -31,6 +32,8 @@ # arguments, we need to decrease esp temporarily stack_max = PASS_ON_MY_FRAME + result_value_saved_early = False + def __init__(self, assembler, fnloc, arglocs, resloc=eax, restype=INT, ressize=WORD): AbstractCallBuilder.__init__(self, assembler, fnloc, arglocs, @@ -150,6 +153,23 @@ self.mc.ADD(ebp, imm(1)) # ebp any more def write_real_errno(self, save_err): + tlofsreg = None + mc = self.mc + + if handle_lasterror and (save_err & rffi.RFFI_READSAVED_LASTERROR): + # must call SetLastError(). There are no registers to save + # because we are on 32-bit in this case: no register contains + # the arguments to the main function we want to call afterwards. + from rpython.rlib.rwin32 import _SetLastError + SetLastError_addr = self.asm.cpu.cast_ptr_to_int(_SetLastError) + assert isinstance(self, CallBuilder32) # Windows 32-bit only + # + rpy_lasterror = llerrno.get_rpy_lasterror_offset(self.asm.cpu) + tlofsreg = edi # saved across the call to SetLastError + mc.MOV_rs(edi.value, THREADLOCAL_OFS - self.current_esp) + mc.PUSH_m((edi.value, rpy_lasterror)) + mc.CALL(imm(SetLastError_addr)) + if save_err & rffi.RFFI_READSAVED_ERRNO: # Just before a call, read 'rpy_errno' and write it into the # real 'errno'. Most registers are free here, including the @@ -157,35 +177,54 @@ # pass the arguments on x86-64. rpy_errno = llerrno.get_rpy_errno_offset(self.asm.cpu) p_errno = llerrno.get_p_errno_offset(self.asm.cpu) - mc = self.mc - mc.MOV_rs(eax.value, THREADLOCAL_OFS - self.current_esp) + if tlofsreg is None: + tlofsreg = eax + mc.MOV_rs(eax.value, THREADLOCAL_OFS - self.current_esp) if IS_X86_32: tmpreg = edx else: tmpreg = r11 # edx is used for 3rd argument - mc.MOV_rm(tmpreg.value, (eax.value, p_errno)) - mc.MOV32_rm(eax.value, (eax.value, rpy_errno)) + mc.MOV_rm(tmpreg.value, (tlofsreg.value, p_errno)) + mc.MOV32_rm(eax.value, (tlofsreg.value, rpy_errno)) mc.MOV32_mr((tmpreg.value, 0), eax.value) elif save_err & rffi.RFFI_ZERO_ERRNO_BEFORE: # Same, but write zero. p_errno = llerrno.get_p_errno_offset(self.asm.cpu) - mc = self.mc - mc.MOV_rs(eax.value, THREADLOCAL_OFS - self.current_esp) - mc.MOV_rm(eax.value, (eax.value, p_errno)) + if tlofsreg is None: + tlofsreg = eax + mc.MOV_rs(eax.value, THREADLOCAL_OFS - self.current_esp) + mc.MOV_rm(eax.value, (tlofsreg.value, p_errno)) mc.MOV32_mi((eax.value, 0), 0) def read_real_errno(self, save_err): + esi_is_threadlocal_ofs = False + mc = self.mc + if save_err & rffi.RFFI_SAVE_ERRNO: # Just after a call, read the real 'errno' and save a copy of # it inside our thread-local 'rpy_errno'. Most registers are # free here, including the callee-saved ones, except 'ebx'. rpy_errno = llerrno.get_rpy_errno_offset(self.asm.cpu) p_errno = llerrno.get_p_errno_offset(self.asm.cpu) - mc = self.mc mc.MOV_rs(esi.value, THREADLOCAL_OFS) mc.MOV_rm(edi.value, (esi.value, p_errno)) mc.MOV32_rm(edi.value, (edi.value, 0)) mc.MOV32_mr((esi.value, rpy_errno), edi.value) + esi_is_threadlocal_ofs = True + + if handle_lasterror and (save_err & rffi.RFFI_SAVE_LASTERROR): + from rpython.rlib.rwin32 import _GetLastError + GetLastError_addr = self.asm.cpu.cast_ptr_to_int(_GetLastError) + assert isinstance(self, CallBuilder32) # Windows 32-bit only + # + rpy_lasterror = llerrno.get_rpy_lasterror_offset(self.asm.cpu) + self.save_result_value(save_edx=True) + self.result_value_saved_early = True + mc.CALL(imm(GetLastError_addr)) + # + if not esi_is_threadlocal_ofs: + mc.MOV_rs(esi.value, THREADLOCAL_OFS) + mc.MOV32_mr((esi.value, rpy_lasterror), eax.value) def move_real_result_and_call_reacqgil_addr(self, fastgil): from rpython.jit.backend.x86 import rx86 @@ -205,8 +244,9 @@ if IS_X86_32: assert css >= 16 if self.restype == 'L': # long long result: eax/edx - mc.MOV_sr(12, edx.value) - restore_edx = True + if not self.result_value_saved_early: + mc.MOV_sr(12, edx.value) + restore_edx = True css_value = edx old_value = ecx elif IS_X86_64: @@ -255,14 +295,16 @@ je_location = mc.get_relative_pos() # # Yes, we need to call the reacqgil() function - self.save_result_value_reacq() + if not self.result_value_saved_early: + self.save_result_value(save_edx=False) if self.asm._is_asmgcc(): if IS_X86_32: mc.MOV_sr(4, old_value.value) mc.MOV_sr(0, css_value.value) # on X86_64, they are already in the right registers mc.CALL(imm(self.asm.reacqgil_addr)) - self.restore_result_value_reacq() + if not self.result_value_saved_early: + self.restore_result_value(save_edx=False) # # patch the JE above offset = mc.get_relative_pos() - je_location @@ -272,6 +314,9 @@ if restore_edx: mc.MOV_rs(edx.value, 12) # restore this # + if self.result_value_saved_early: + self.restore_result_value(save_edx=True) + # if not we_are_translated(): # for testing: now we can accesss mc.SUB(ebp, imm(1)) # ebp again # @@ -284,11 +329,11 @@ #else: # for shadowstack, done for us by _reload_frame_if_necessary() - def save_result_value_reacq(self): + def save_result_value(self, save_edx): """Overridden in CallBuilder32 and CallBuilder64""" raise NotImplementedError - def restore_result_value_reacq(self): + def restore_result_value(self, save_edx): """Overridden in CallBuilder32 and CallBuilder64""" raise NotImplementedError @@ -378,7 +423,7 @@ else: CallBuilderX86.load_result(self) - def save_result_value_reacq(self): + def save_result_value(self, save_edx): # Temporarily save the result value into [ESP+8]. We use "+8" # in order to leave the two initial words free, in case it's needed. # Also note that in this 32-bit case, a long long return value is @@ -390,7 +435,8 @@ # a float or a long long return if self.restype == 'L': self.mc.MOV_sr(8, eax.value) # long long - #self.mc.MOV_sr(12, edx.value) -- already done by the caller + if save_edx: + self.mc.MOV_sr(12, edx.value) else: self.mc.FSTPL_s(8) # float return else: @@ -401,15 +447,16 @@ assert self.ressize <= WORD self.mc.MOV_sr(8, eax.value) - def restore_result_value_reacq(self): - # Opposite of save_result_value_reacq() + def restore_result_value(self, save_edx): + # Opposite of save_result_value() if self.ressize == 0: # void return return if self.resloc.is_float(): # a float or a long long return if self.restype == 'L': self.mc.MOV_rs(eax.value, 8) # long long - #self.mc.MOV_rs(edx.value, 12) -- will be done by the caller + if save_edx: + self.mc.MOV_rs(edx.value, 12) else: self.mc.FLDL_s(8) # float return else: @@ -534,7 +581,7 @@ else: CallBuilderX86.load_result(self) - def save_result_value_reacq(self): + def save_result_value(self, save_edx): # Temporarily save the result value into [ESP]. if self.ressize == 0: # void return return @@ -551,8 +598,8 @@ assert self.restype == INT self.mc.MOV_sr(0, eax.value) - def restore_result_value_reacq(self): - # Opposite of save_result_value_reacq() + def restore_result_value(self, save_edx): + # Opposite of save_result_value() if self.ressize == 0: # void return return # diff --git a/rpython/jit/backend/x86/rx86.py b/rpython/jit/backend/x86/rx86.py --- a/rpython/jit/backend/x86/rx86.py +++ b/rpython/jit/backend/x86/rx86.py @@ -536,6 +536,7 @@ PUSH_r = insn(rex_nw, register(1), '\x50') PUSH_b = insn(rex_nw, '\xFF', orbyte(6<<3), stack_bp(1)) + PUSH_m = insn(rex_nw, '\xFF', orbyte(6<<3), mem_reg_plus_const(1)) PUSH_i8 = insn('\x6A', immediate(1, 'b')) PUSH_i32 = insn('\x68', immediate(1, 'i')) def PUSH_i(mc, immed): From noreply at buildbot.pypy.org Fri Jan 16 21:33:46 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 16 Jan 2015 21:33:46 +0100 (CET) Subject: [pypy-commit] pypy errno-again: Fix on Windows. Improve tests. Message-ID: <20150116203346.8934A1C0305@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: errno-again Changeset: r75405:6582a6559821 Date: 2015-01-16 21:33 +0100 http://bitbucket.org/pypy/pypy/changeset/6582a6559821/ Log: Fix on Windows. Improve tests. diff --git a/rpython/jit/backend/test/runner_test.py b/rpython/jit/backend/test/runner_test.py --- a/rpython/jit/backend/test/runner_test.py +++ b/rpython/jit/backend/test/runner_test.py @@ -2930,17 +2930,21 @@ eci = ExternalCompilationInfo( separate_module_sources=[''' #include - RPY_EXPORTED long test_call_release_gil_save_errno( - long a, long b, long c, long d, long e, long f, long g) { + static long f1(long a, long b, long c, long d, + long e, long f, long g) { errno = 42; return (a + 10*b + 100*c + 1000*d + 10000*e + 100000*f + 1000000*g); } + RPY_EXPORTED + long test_call_release_gil_save_errno(void) { + return (long)&f1; + } ''']) fn_name = 'test_call_release_gil_save_errno' - func1_ptr = rffi.llexternal(fn_name, [lltype.Signed]*7, lltype.Signed, - compilation_info=eci, _nowrapper=True) - func1_adr = rffi.cast(lltype.Signed, func1_ptr) + getter_ptr = rffi.llexternal(fn_name, [], lltype.Signed, + compilation_info=eci, _nowrapper=True) + func1_adr = getter_ptr() calldescr = self.cpu._calldescr_dynamic_for_tests([types.slong]*7, types.slong) # @@ -2983,19 +2987,23 @@ separate_module_sources=[r''' #include #include - RPY_EXPORTED long test_call_release_gil_readsaved_errno( - long a, long b, long c, long d, long e, long f, long g) { + static long f1(long a, long b, long c, long d, + long e, long f, long g) { long r = errno; printf("read saved errno: %ld\n", r); r += 100 * (a + 10*b + 100*c + 1000*d + 10000*e + 100000*f + 1000000*g); return r; } + RPY_EXPORTED + long test_call_release_gil_readsaved_errno(void) { + return (long)&f1; + } ''']) fn_name = 'test_call_release_gil_readsaved_errno' - func1_ptr = rffi.llexternal(fn_name, [lltype.Signed]*7, lltype.Signed, - compilation_info=eci, _nowrapper=True) - func1_adr = rffi.cast(lltype.Signed, func1_ptr) + getter_ptr = rffi.llexternal(fn_name, [], lltype.Signed, + compilation_info=eci, _nowrapper=True) + func1_adr = getter_ptr() calldescr = self.cpu._calldescr_dynamic_for_tests([types.slong]*7, types.slong) # @@ -3037,17 +3045,21 @@ eci = ExternalCompilationInfo( separate_module_sources=[''' #include - RPY_EXPORTED long test_call_release_gil_save_lasterror( - long a, long b, long c, long d, long e, long f, long g) { + static long f1(long a, long b, long c, long d, + long e, long f, long g) { SetLastError(42); return (a + 10*b + 100*c + 1000*d + 10000*e + 100000*f + 1000000*g); } + RPY_EXPORTED + long test_call_release_gil_save_lasterror(void) { + return (long)&f1; + } ''']) fn_name = 'test_call_release_gil_save_lasterror' - func1_ptr = rffi.llexternal(fn_name, [lltype.Signed]*7, lltype.Signed, - compilation_info=eci, _nowrapper=True) - func1_adr = rffi.cast(lltype.Signed, func1_ptr) + getter_ptr = rffi.llexternal(fn_name, [], lltype.Signed, + compilation_info=eci, _nowrapper=True) + func1_adr = getter_ptr() calldescr = self.cpu._calldescr_dynamic_for_tests([types.slong]*7, types.slong) # @@ -3092,19 +3104,24 @@ eci = ExternalCompilationInfo( separate_module_sources=[r''' #include - RPY_EXPORTED long test_call_release_gil_readsaved_lasterror( - long a, long b, long c, long d, long e, long f, long g) { + static long f1(long a, long b, long c, long d, + long e, long f, long g) { long r = GetLastError(); - //printf("GetLastError() result: %ld\n", r); + printf("GetLastError() result: %ld\n", r); + printf("%ld %ld %ld %ld %ld %ld %ld\n", a,b,c,d,e,f,g); r += 100 * (a + 10*b + 100*c + 1000*d + 10000*e + 100000*f + 1000000*g); return r; } + RPY_EXPORTED + long test_call_release_gil_readsaved_lasterror(void) { + return (long)&f1; + } ''']) fn_name = 'test_call_release_gil_readsaved_lasterror' - func1_ptr = rffi.llexternal(fn_name, [lltype.Signed]*7, lltype.Signed, - compilation_info=eci, _nowrapper=True) - func1_adr = rffi.cast(lltype.Signed, func1_ptr) + getter_ptr = rffi.llexternal(fn_name, [], lltype.Signed, + compilation_info=eci, _nowrapper=True) + func1_adr = getter_ptr() calldescr = self.cpu._calldescr_dynamic_for_tests([types.slong]*7, types.slong) # @@ -3132,8 +3149,82 @@ assert result == 24 + 345678900 def test_call_release_gil_err_all(self): - xxx - + from rpython.translator.tool.cbuild import ExternalCompilationInfo + from rpython.rlib.libffi import types + from rpython.jit.backend.llsupport import llerrno + # + if not isinstance(self.cpu, AbstractLLCPU): + py.test.skip("not on LLGraph") + if sys.platform != 'win32': + eci = ExternalCompilationInfo( + separate_module_sources=[r''' + #include + static long f1(long a, long b, long c, long d, + long e, long f, long g) { + long r = errno; + errno = 42; + r += 100 * (a + 10*b + 100*c + 1000*d + + 10000*e + 100000*f + 1000000*g); + return r; + } + RPY_EXPORTED + long test_call_release_gil_err_all(void) { + return (long)&f1; + } + ''']) + else: + eci = ExternalCompilationInfo( + separate_module_sources=[r''' + #include + #include + static long f1(long a, long b, long c, long d, + long e, long f, long g) { + long r = errno + 10 * GetLastError(); + errno = 42; + SetLastError(43); + r += 100 * (a + 10*b + 100*c + 1000*d + + 10000*e + 100000*f + 1000000*g); + return r; + } + RPY_EXPORTED + long test_call_release_gil_err_all(void) { + return (long)&f1; + } + ''']) + fn_name = 'test_call_release_gil_err_all' + getter_ptr = rffi.llexternal(fn_name, [], lltype.Signed, + compilation_info=eci, _nowrapper=True) + func1_adr = getter_ptr() + calldescr = self.cpu._calldescr_dynamic_for_tests([types.slong]*7, + types.slong) + # + for saveerr in [rffi.RFFI_ERR_ALL]: + faildescr = BasicFailDescr(1) + inputargs = [BoxInt() for i in range(7)] + i1 = BoxInt() + ops = [ + ResOperation(rop.CALL_RELEASE_GIL, + [ConstInt(saveerr), ConstInt(func1_adr)] + + inputargs, i1, + descr=calldescr), + ResOperation(rop.GUARD_NOT_FORCED, [], None, descr=faildescr), + ResOperation(rop.FINISH, [i1], None, descr=BasicFinalDescr(0)) + ] + ops[-2].setfailargs([]) + looptoken = JitCellToken() + self.cpu.compile_loop(inputargs, ops, looptoken) + # + llerrno.set_debug_saved_errno(self.cpu, 8) + llerrno.set_debug_saved_lasterror(self.cpu, 9) + deadframe = self.cpu.execute_token(looptoken, 1, 2, 3, 4, 5, 6, 7) + result = self.cpu.get_int_value(deadframe, 0) + assert llerrno.get_debug_saved_errno(self.cpu) == 42 + if sys.platform != 'win32': + assert result == 765432108 + else: + assert llerrno.get_debug_saved_lasterror(self.cpu) == 43 + assert result == 765432198 + def test_guard_not_invalidated(self): cpu = self.cpu i0 = BoxInt() diff --git a/rpython/jit/backend/x86/callbuilder.py b/rpython/jit/backend/x86/callbuilder.py --- a/rpython/jit/backend/x86/callbuilder.py +++ b/rpython/jit/backend/x86/callbuilder.py @@ -169,6 +169,7 @@ mc.MOV_rs(edi.value, THREADLOCAL_OFS - self.current_esp) mc.PUSH_m((edi.value, rpy_lasterror)) mc.CALL(imm(SetLastError_addr)) + mc.ADD_ri(esp.value, WORD) if save_err & rffi.RFFI_READSAVED_ERRNO: # Just before a call, read 'rpy_errno' and write it into the From noreply at buildbot.pypy.org Fri Jan 16 23:04:40 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 16 Jan 2015 23:04:40 +0100 (CET) Subject: [pypy-commit] pypy errno-again: Untested: ARM support Message-ID: <20150116220440.838791C019D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: errno-again Changeset: r75406:ff91d89bf257 Date: 2015-01-16 23:04 +0100 http://bitbucket.org/pypy/pypy/changeset/ff91d89bf257/ Log: Untested: ARM support diff --git a/rpython/jit/backend/arm/callbuilder.py b/rpython/jit/backend/arm/callbuilder.py --- a/rpython/jit/backend/arm/callbuilder.py +++ b/rpython/jit/backend/arm/callbuilder.py @@ -11,6 +11,8 @@ from rpython.jit.backend.arm.helper.assembler import saved_registers from rpython.jit.backend.arm.helper.regalloc import check_imm_arg from rpython.jit.backend.arm.codebuilder import OverwritingBuilder +from rpython.jit.backend.llsupport import llerrno +from rpython.rtyper.lltypesystem import rffi class ARMCallbuilder(AbstractCallBuilder): @@ -172,6 +174,41 @@ self.mc.LSL_ri(resloc.value, resloc.value, 16) self.mc.ASR_ri(resloc.value, resloc.value, 16) + def write_real_errno(self, save_err): + if save_err & rffi.RFFI_READSAVED_ERRNO: + # Just before a call, read 'rpy_errno' and write it into the + # real 'errno'. The r0-r3 registers contain arguments to the + # future call; the r5-r7 registers contain various stuff. + # We still have r8-r12. + rpy_errno = llerrno.get_rpy_errno_offset(self.asm.cpu) + p_errno = llerrno.get_p_errno_offset(self.asm.cpu) + self.mc.LDR_ri(r.r9.value, r.sp.value, + self.asm.saved_threadlocal_addr + self.current_sp) + self.mc.LDR_ri(r.ip.value, r.r9.value, p_errno) + self.mc.LDR_ri(r.r9.value, r.r9.value, rpy_errno) + self.mc.STR_ri(r.r9.value, r.ip.value) + elif save_err & rffi.RFFI_ZERO_ERRNO_BEFORE: + # Same, but write zero. + p_errno = llerrno.get_p_errno_offset(self.asm.cpu) + self.mc.LDR_ri(r.r9.value, r.sp.value, + self.asm.saved_threadlocal_addr + self.current_sp) + self.mc.LDR_ri(r.ip.value, r.r9.value, p_errno) + self.mc.MOV_ri(r.r9.value, 0) + self.mc.STR_ri(r.r9.value, r.ip.value) + + def read_real_errno(self, save_err): + if save_err & rffi.RFFI_SAVE_ERRNO: + # Just after a call, read the real 'errno' and save a copy of + # it inside our thread-local 'rpy_errno'. Registers r8-r12 + # are unused here, and registers r2-r3 never contain anything + # after the call. + rpy_errno = llerrno.get_rpy_errno_offset(self.asm.cpu) + p_errno = llerrno.get_p_errno_offset(self.asm.cpu) + self.mc.LDR_ri(r.r3.value, r.sp.value, + self.asm.saved_threadlocal_addr) + self.mc.LDR_ri(r.ip.value, r.r3.value, p_errno) + self.mc.LDR_ri(r.ip.value, r.ip.value, 0) + self.mc.STR_ri(r.ip.value, r.r3.value, rpy_errno) class SoftFloatCallBuilder(ARMCallbuilder): diff --git a/rpython/jit/backend/arm/opassembler.py b/rpython/jit/backend/arm/opassembler.py --- a/rpython/jit/backend/arm/opassembler.py +++ b/rpython/jit/backend/arm/opassembler.py @@ -403,7 +403,9 @@ # args = [resloc, size, sign, args...] from rpython.jit.backend.llsupport.descr import CallDescr - cb = callbuilder.get_callbuilder(self.cpu, self, arglocs[3], arglocs[4:], arglocs[0]) + func_index = 3 + is_call_release_gil + cb = callbuilder.get_callbuilder(self.cpu, self, arglocs[func_index], + arglocs[func_index+1:], arglocs[0]) descr = op.getdescr() assert isinstance(descr, CallDescr) @@ -418,7 +420,9 @@ cb.ressign = signloc.value if is_call_release_gil: - cb.emit_call_release_gil() + saveerrloc = arglocs[3] + assert saveerrloc.is_imm() + cb.emit_call_release_gil(saveerrloc.value) else: cb.emit() return fcond @@ -1073,7 +1077,7 @@ def emit_guard_call_release_gil(self, op, guard_op, arglocs, regalloc, fcond): numargs = op.numargs() - callargs = arglocs[:numargs + 3] # extract the arguments to the call + callargs = arglocs[:numargs + 4] # extract the arguments to the call guardargs = arglocs[len(callargs):] # extrat the arguments for the guard self._store_force_index(guard_op) self._emit_call(op, callargs, is_call_release_gil=True) @@ -1286,9 +1290,13 @@ return fcond def emit_opx_threadlocalref_get(self, op, arglocs, regalloc, fcond): - ofs0, res = arglocs - assert ofs0.is_imm() + ofs_loc, size_loc, sign_loc, res_loc = arglocs + assert ofs_loc.is_imm() + assert size_loc.is_imm() + assert sign_loc.is_imm() ofs = self.saved_threadlocal_addr - self.load_reg(self.mc, res, r.sp, ofs) - self.load_reg(self.mc, res, res, ofs0.value) + self.load_reg(self.mc, res_loc, r.sp, ofs) + scale = get_scale(size_loc.value) + signed = (sign_loc.value != 0) + self._load_from_mem(res_loc, res_loc, ofs_loc, scale, signed, fcond) return fcond diff --git a/rpython/jit/backend/arm/regalloc.py b/rpython/jit/backend/arm/regalloc.py --- a/rpython/jit/backend/arm/regalloc.py +++ b/rpython/jit/backend/arm/regalloc.py @@ -573,11 +573,12 @@ # ... return self._prepare_call(op) - def _prepare_call(self, op, force_store=[], save_all_regs=False): + def _prepare_call(self, op, force_store=[], save_all_regs=False, + first_arg_index=1): args = [None] * (op.numargs() + 3) calldescr = op.getdescr() assert isinstance(calldescr, CallDescr) - assert len(calldescr.arg_classes) == op.numargs() - 1 + assert len(calldescr.arg_classes) == op.numargs() - first_arg_index for i in range(op.numargs()): args[i + 3] = self.loc(op.getarg(i)) @@ -626,10 +627,12 @@ return [loc0, res] def _prepare_threadlocalref_get(self, op, fcond): - ofs0 = imm(op.getarg(1).getint()) - xxxxxxxxxxxxxxxx check the size and signedness of op.getdescr() - res = self.force_allocate_reg(op.result) - return [ofs0, res] + ofs_loc = imm(op.getarg(1).getint()) + calldescr = op.getdescr() + size_loc = imm(calldescr.get_result_size()) + sign_loc = imm(calldescr.is_result_signed()) + res_loc = self.force_allocate_reg(op.result) + return [ofs_loc, size_loc, sign_loc, res_loc] def _prepare_guard(self, op, args=None): if args is None: @@ -1236,7 +1239,10 @@ def prepare_guard_call_may_force(self, op, guard_op, fcond): args = self._prepare_call(op, save_all_regs=True) return self._prepare_guard(guard_op, args) - prepare_guard_call_release_gil = prepare_guard_call_may_force + + def prepare_guard_call_release_gil(self, op, guard_op, fcond): + args = self._prepare_call(op, save_all_regs=True, first_arg_index=2) + return self._prepare_guard(guard_op, args) def prepare_guard_call_assembler(self, op, guard_op, fcond): locs = self.locs_for_call_assembler(op, guard_op) From noreply at buildbot.pypy.org Sat Jan 17 09:20:42 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 17 Jan 2015 09:20:42 +0100 (CET) Subject: [pypy-commit] pypy all_ordered_dicts: Remove most of the quadratic complexity of Message-ID: <20150117082042.F14A91C01E7@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: all_ordered_dicts Changeset: r75407:3f2c4305cbc5 Date: 2015-01-17 09:20 +0100 http://bitbucket.org/pypy/pypy/changeset/3f2c4305cbc5/ Log: Remove most of the quadratic complexity of collections.OrderedDict.popitem(last=False). diff --git a/rpython/rtyper/lltypesystem/rordereddict.py b/rpython/rtyper/lltypesystem/rordereddict.py --- a/rpython/rtyper/lltypesystem/rordereddict.py +++ b/rpython/rtyper/lltypesystem/rordereddict.py @@ -34,7 +34,8 @@ # {byte, short, int, long} *indexes; # dictentry *entries; # lookup_function_no; # one of the four possible functions for different -# # size dicts +# # size dicts; the rest of the word is a counter for how +# # many 'entries' at the start are known to be deleted # (Function DICTKEY, DICTKEY -> bool) *fnkeyeq; # (Function DICTKEY -> int) *fnkeyhash; # } @@ -44,7 +45,7 @@ @jit.look_inside_iff(lambda d, key, hash, flag: jit.isvirtual(d)) @jit.oopspec('ordereddict.lookup(d, key, hash, flag)') def ll_call_lookup_function(d, key, hash, flag): - fun = d.lookup_function_no + fun = d.lookup_function_no & FUNC_MASK if fun == FUNC_BYTE: return ll_dict_lookup(d, key, hash, flag, TYPE_BYTE) elif fun == FUNC_SHORT: @@ -409,6 +410,8 @@ IS_64BIT = sys.maxint != 2 ** 31 - 1 +FUNC_SHIFT = 2 +FUNC_MASK = 0x03 # two bits if IS_64BIT: FUNC_BYTE, FUNC_SHORT, FUNC_INT, FUNC_LONG = range(4) else: @@ -442,36 +445,42 @@ d.lookup_function_no = FUNC_LONG def ll_clear_indexes(d, n): - if n <= 256: + fun = d.lookup_function_no & FUNC_MASK + d.lookup_function_no = fun + if fun == FUNC_BYTE: rgc.ll_arrayclear(lltype.cast_opaque_ptr(DICTINDEX_BYTE, d.indexes)) - elif n <= 65536: + elif fun == FUNC_SHORT: rgc.ll_arrayclear(lltype.cast_opaque_ptr(DICTINDEX_SHORT, d.indexes)) - elif IS_64BIT and n <= 2 ** 32: + elif IS_64BIT and fun == FUNC_INT: rgc.ll_arrayclear(lltype.cast_opaque_ptr(DICTINDEX_INT, d.indexes)) + elif fun == FUNC_LONG: + rgc.ll_arrayclear(lltype.cast_opaque_ptr(DICTINDEX_LONG, d.indexes)) else: - rgc.ll_arrayclear(lltype.cast_opaque_ptr(DICTINDEX_LONG, d.indexes)) + assert False @jit.dont_look_inside def ll_call_insert_clean_function(d, hash, i): - if d.lookup_function_no == FUNC_BYTE: + fun = d.lookup_function_no & FUNC_MASK + if fun == FUNC_BYTE: ll_dict_store_clean(d, hash, i, TYPE_BYTE) - elif d.lookup_function_no == FUNC_SHORT: + elif fun == FUNC_SHORT: ll_dict_store_clean(d, hash, i, TYPE_SHORT) - elif IS_64BIT and d.lookup_function_no == FUNC_INT: + elif IS_64BIT and fun == FUNC_INT: ll_dict_store_clean(d, hash, i, TYPE_INT) - elif d.lookup_function_no == FUNC_LONG: + elif fun == FUNC_LONG: ll_dict_store_clean(d, hash, i, TYPE_LONG) else: assert False def ll_call_delete_by_entry_index(d, hash, i): - if d.lookup_function_no == FUNC_BYTE: + fun = d.lookup_function_no & FUNC_MASK + if fun == FUNC_BYTE: ll_dict_delete_by_entry_index(d, hash, i, TYPE_BYTE) - elif d.lookup_function_no == FUNC_SHORT: + elif fun == FUNC_SHORT: ll_dict_delete_by_entry_index(d, hash, i, TYPE_SHORT) - elif IS_64BIT and d.lookup_function_no == FUNC_INT: + elif IS_64BIT and fun == FUNC_INT: ll_dict_delete_by_entry_index(d, hash, i, TYPE_INT) - elif d.lookup_function_no == FUNC_LONG: + elif fun == FUNC_LONG: ll_dict_delete_by_entry_index(d, hash, i, TYPE_LONG) else: assert False @@ -645,7 +654,7 @@ # full, so we know that 'd.num_live_items' should be at most 2/3 * 256 # (or 65536 or etc.) so after the ll_dict_remove_deleted_items() below # at least 1/3rd items in 'd.entries' are free. - fun = d.lookup_function_no + fun = d.lookup_function_no & FUNC_MASK toobig = False if fun == FUNC_BYTE: assert d.num_live_items < ((1 << 8) - MIN_INDEXES_MINUS_ENTRIES) @@ -783,7 +792,9 @@ else: ll_malloc_indexes_and_choose_lookup(d, new_size) d.resize_counter = new_size * 2 - d.num_live_items * 3 - assert d.resize_counter > 0 + ll_assert(d.resize_counter > 0, "reindex: resize_counter <= 0") + ll_assert((d.lookup_function_no >> FUNC_SHIFT) == 0, + "reindex: lookup_fun >> SHIFT") # entries = d.entries i = 0 @@ -999,7 +1010,8 @@ def ll_dictiter(ITERPTR, d): iter = lltype.malloc(ITERPTR.TO) iter.dict = d - iter.index = 0 + # initialize the index with usually 0, but occasionally a larger value + iter.index = d.lookup_function_no >> FUNC_SHIFT return iter @jit.look_inside_iff(lambda iter: jit.isvirtual(iter) @@ -1018,6 +1030,17 @@ if entries.valid(index): iter.index = nextindex return index + else: + # In case of repeated iteration over the start of + # a dict where the items get removed, like + # collections.OrderedDict.popitem(last=False), + # the hack below will increase the value stored in + # the high bits of lookup_function_no and so the + # next iteration will start at a higher value. + # We should carefully reset these high bits to zero + # as soon as we do something like ll_dict_reindex(). + if index == (dict.lookup_function_no >> FUNC_SHIFT): + dict.lookup_function_no += (1 << FUNC_SHIFT) index = nextindex # clear the reference to the dict and prevent restarts iter.dict = lltype.nullptr(lltype.typeOf(iter).TO.dict.TO) diff --git a/rpython/rtyper/test/test_rordereddict.py b/rpython/rtyper/test/test_rordereddict.py --- a/rpython/rtyper/test/test_rordereddict.py +++ b/rpython/rtyper/test/test_rordereddict.py @@ -160,6 +160,22 @@ assert ll_elem.item1 == 1 py.test.raises(KeyError, rordereddict.ll_dict_popitem, TUP, ll_d) + def test_popitem_first(self): + DICT = self._get_str_dict() + ll_d = rordereddict.ll_newdict(DICT) + rordereddict.ll_dict_setitem(ll_d, llstr("k"), 1) + rordereddict.ll_dict_setitem(ll_d, llstr("j"), 2) + rordereddict.ll_dict_setitem(ll_d, llstr("m"), 3) + ITER = rordereddict.get_ll_dictiter(lltype.Ptr(DICT)) + for expected in ["k", "j", "m"]: + ll_iter = rordereddict.ll_dictiter(ITER, ll_d) + num = rordereddict._ll_dictnext(ll_iter) + ll_key = ll_d.entries[num].key + assert hlstr(ll_key) == expected + rordereddict.ll_dict_delitem(ll_d, ll_key) + ll_iter = rordereddict.ll_dictiter(ITER, ll_d) + py.test.raises(StopIteration, rordereddict._ll_dictnext, ll_iter) + def test_direct_enter_and_del(self): def eq(a, b): return a == b From noreply at buildbot.pypy.org Sat Jan 17 09:28:19 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 17 Jan 2015 09:28:19 +0100 (CET) Subject: [pypy-commit] pypy errno-again: fix Message-ID: <20150117082819.67A2B1C01E7@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: errno-again Changeset: r75408:34e06d532620 Date: 2015-01-17 09:28 +0100 http://bitbucket.org/pypy/pypy/changeset/34e06d532620/ Log: fix diff --git a/rpython/jit/backend/arm/opassembler.py b/rpython/jit/backend/arm/opassembler.py --- a/rpython/jit/backend/arm/opassembler.py +++ b/rpython/jit/backend/arm/opassembler.py @@ -1077,7 +1077,7 @@ def emit_guard_call_release_gil(self, op, guard_op, arglocs, regalloc, fcond): numargs = op.numargs() - callargs = arglocs[:numargs + 4] # extract the arguments to the call + callargs = arglocs[:numargs + 3] # extract the arguments to the call guardargs = arglocs[len(callargs):] # extrat the arguments for the guard self._store_force_index(guard_op) self._emit_call(op, callargs, is_call_release_gil=True) From noreply at buildbot.pypy.org Sat Jan 17 10:55:22 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 17 Jan 2015 10:55:22 +0100 (CET) Subject: [pypy-commit] pypy errno-again: Add some 'save_err' to ropenssl, as seemingly needed by pypy/module/_ssl. Message-ID: <20150117095522.A0CA31C02BD@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: errno-again Changeset: r75409:2c55fbd16f12 Date: 2015-01-17 10:53 +0100 http://bitbucket.org/pypy/pypy/changeset/2c55fbd16f12/ Log: Add some 'save_err' to ropenssl, as seemingly needed by pypy/module/_ssl. diff --git a/rpython/rlib/_rsocket_rffi.py b/rpython/rlib/_rsocket_rffi.py --- a/rpython/rlib/_rsocket_rffi.py +++ b/rpython/rlib/_rsocket_rffi.py @@ -640,7 +640,7 @@ rffi.VOIDP, rwin32.DWORD, rffi.VOIDP, rwin32.DWORD, rwin32.LPDWORD, rffi.VOIDP, rffi.VOIDP], - rffi.INT) + rffi.INT, save_err=SAVE_ERR) tcp_keepalive = cConfig.tcp_keepalive WSAPROTOCOL_INFO = cConfig.WSAPROTOCOL_INFO diff --git a/rpython/rlib/ropenssl.py b/rpython/rlib/ropenssl.py --- a/rpython/rlib/ropenssl.py +++ b/rpython/rlib/ropenssl.py @@ -5,7 +5,7 @@ from rpython.translator.platform import platform from rpython.translator.tool.cbuild import ExternalCompilationInfo from rpython.rlib.unroll import unrolling_iterable -from rpython.rlib._rsocket_rffi import MAX_FD_SIZE +from rpython.rlib._rsocket_rffi import MAX_FD_SIZE, SAVE_ERR if sys.platform == 'win32' and platform.name != 'mingw32': @@ -211,7 +211,9 @@ ssl_external('SSL_CTX_get_verify_mode', [SSL_CTX], rffi.INT) ssl_external('SSL_CTX_set_default_verify_paths', [SSL_CTX], rffi.INT) ssl_external('SSL_CTX_set_cipher_list', [SSL_CTX, rffi.CCHARP], rffi.INT) -ssl_external('SSL_CTX_load_verify_locations', [SSL_CTX, rffi.CCHARP, rffi.CCHARP], rffi.INT) +ssl_external('SSL_CTX_load_verify_locations', + [SSL_CTX, rffi.CCHARP, rffi.CCHARP], rffi.INT, + save_err=SAVE_ERR) ssl_external('SSL_CTX_check_private_key', [SSL_CTX], rffi.INT) ssl_external('SSL_CTX_set_session_id_context', [SSL_CTX, rffi.CCHARP, rffi.UINT], rffi.INT) SSL_CTX_STATS_NAMES = """ @@ -231,8 +233,8 @@ ssl_external('SSL_set_connect_state', [SSL], lltype.Void) ssl_external('SSL_set_accept_state', [SSL], lltype.Void) ssl_external('SSL_connect', [SSL], rffi.INT) -ssl_external('SSL_do_handshake', [SSL], rffi.INT) -ssl_external('SSL_shutdown', [SSL], rffi.INT) +ssl_external('SSL_do_handshake', [SSL], rffi.INT, save_err=SAVE_ERR) +ssl_external('SSL_shutdown', [SSL], rffi.INT, save_err=SAVE_ERR) ssl_external('SSL_get_error', [SSL, rffi.INT], rffi.INT) ssl_external('SSL_get_shutdown', [SSL], rffi.INT) ssl_external('SSL_set_read_ahead', [SSL, rffi.INT], lltype.Void) @@ -246,7 +248,7 @@ ssl_external('X509_NAME_get_entry', [X509_NAME, rffi.INT], X509_NAME_ENTRY) ssl_external('X509_NAME_ENTRY_get_object', [X509_NAME_ENTRY], ASN1_OBJECT) ssl_external('X509_NAME_ENTRY_get_data', [X509_NAME_ENTRY], ASN1_STRING) -ssl_external('i2d_X509', [X509, rffi.CCHARPP], rffi.INT) +ssl_external('i2d_X509', [X509, rffi.CCHARPP], rffi.INT, save_err=SAVE_ERR) ssl_external('X509_free', [X509], lltype.Void, releasegil=False) ssl_external('X509_get_notBefore', [X509], ASN1_TIME, macro=True) ssl_external('X509_get_notAfter', [X509], ASN1_TIME, macro=True) @@ -258,10 +260,12 @@ ssl_external('OBJ_obj2txt', - [rffi.CCHARP, rffi.INT, ASN1_OBJECT, rffi.INT], rffi.INT) + [rffi.CCHARP, rffi.INT, ASN1_OBJECT, rffi.INT], rffi.INT, + save_err=SAVE_ERR) ssl_external('ASN1_STRING_data', [ASN1_STRING], rffi.CCHARP) ssl_external('ASN1_STRING_length', [ASN1_STRING], rffi.INT) -ssl_external('ASN1_STRING_to_UTF8', [rffi.CCHARPP, ASN1_STRING], rffi.INT) +ssl_external('ASN1_STRING_to_UTF8', [rffi.CCHARPP, ASN1_STRING], rffi.INT, + save_err=SAVE_ERR) ssl_external('ASN1_TIME_print', [BIO, ASN1_TIME], rffi.INT) ssl_external('i2a_ASN1_INTEGER', [BIO, ASN1_INTEGER], rffi.INT) ssl_external('ASN1_item_d2i', @@ -293,9 +297,12 @@ ssl_external('CRYPTO_free', [rffi.VOIDP], lltype.Void) libssl_OPENSSL_free = libssl_CRYPTO_free -ssl_external('SSL_write', [SSL, rffi.CCHARP, rffi.INT], rffi.INT) -ssl_external('SSL_pending', [SSL], rffi.INT) -ssl_external('SSL_read', [SSL, rffi.CCHARP, rffi.INT], rffi.INT) +ssl_external('SSL_write', [SSL, rffi.CCHARP, rffi.INT], rffi.INT, + save_err=SAVE_ERR) +ssl_external('SSL_pending', [SSL], rffi.INT, + save_err=SAVE_ERR) +ssl_external('SSL_read', [SSL, rffi.CCHARP, rffi.INT], rffi.INT, + save_err=SAVE_ERR) BIO_METHOD = rffi.COpaquePtr('BIO_METHOD') ssl_external('BIO_s_mem', [], BIO_METHOD) @@ -305,7 +312,8 @@ ssl_external('BIO_free', [BIO], rffi.INT) ssl_external('BIO_reset', [BIO], rffi.INT, macro=True) ssl_external('BIO_read_filename', [BIO, rffi.CCHARP], rffi.INT, macro=True) -ssl_external('BIO_gets', [BIO, rffi.CCHARP, rffi.INT], rffi.INT) +ssl_external('BIO_gets', [BIO, rffi.CCHARP, rffi.INT], rffi.INT, + save_err=SAVE_ERR) ssl_external('PEM_read_bio_X509_AUX', [BIO, rffi.VOIDP, rffi.VOIDP, rffi.VOIDP], X509) From noreply at buildbot.pypy.org Sat Jan 17 12:07:57 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 17 Jan 2015 12:07:57 +0100 (CET) Subject: [pypy-commit] pypy all_ordered_dicts: test and fix Message-ID: <20150117110757.E3AFE1C0B4D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: all_ordered_dicts Changeset: r75410:6069cfccdda9 Date: 2015-01-17 12:07 +0100 http://bitbucket.org/pypy/pypy/changeset/6069cfccdda9/ Log: test and fix diff --git a/rpython/rtyper/lltypesystem/rordereddict.py b/rpython/rtyper/lltypesystem/rordereddict.py --- a/rpython/rtyper/lltypesystem/rordereddict.py +++ b/rpython/rtyper/lltypesystem/rordereddict.py @@ -747,7 +747,12 @@ if ENTRIES.must_clear_value: entry.value = lltype.nullptr(ENTRY.value.TO) - if index == d.num_ever_used_items - 1: + if d.num_live_items == 0: + # Dict is now empty. Reset these fields. + d.num_ever_used_items = 0 + d.lookup_function_no &= FUNC_MASK + + elif index == d.num_ever_used_items - 1: # The last element of the ordereddict has been deleted. Instead of # simply marking the item as dead, we can safely reuse it. Since it's # also possible that there are more dead items immediately behind the diff --git a/rpython/rtyper/test/test_rordereddict.py b/rpython/rtyper/test/test_rordereddict.py --- a/rpython/rtyper/test/test_rordereddict.py +++ b/rpython/rtyper/test/test_rordereddict.py @@ -176,6 +176,22 @@ ll_iter = rordereddict.ll_dictiter(ITER, ll_d) py.test.raises(StopIteration, rordereddict._ll_dictnext, ll_iter) + def test_popitem_first_bug(self): + DICT = self._get_str_dict() + ll_d = rordereddict.ll_newdict(DICT) + rordereddict.ll_dict_setitem(ll_d, llstr("k"), 1) + rordereddict.ll_dict_setitem(ll_d, llstr("j"), 1) + rordereddict.ll_dict_delitem(ll_d, llstr("k")) + ITER = rordereddict.get_ll_dictiter(lltype.Ptr(DICT)) + ll_iter = rordereddict.ll_dictiter(ITER, ll_d) + num = rordereddict._ll_dictnext(ll_iter) + ll_key = ll_d.entries[num].key + assert hlstr(ll_key) == "j" + assert ll_d.lookup_function_no == 4 # 1 free item found at the start + rordereddict.ll_dict_delitem(ll_d, llstr("j")) + assert ll_d.num_ever_used_items == 0 + assert ll_d.lookup_function_no == 0 # reset + def test_direct_enter_and_del(self): def eq(a, b): return a == b From noreply at buildbot.pypy.org Sat Jan 17 14:37:19 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 17 Jan 2015 14:37:19 +0100 (CET) Subject: [pypy-commit] pypy default: Issue #1963 fix Message-ID: <20150117133719.B66E81C0117@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r75411:abdf46d950ba Date: 2015-01-17 14:37 +0100 http://bitbucket.org/pypy/pypy/changeset/abdf46d950ba/ Log: Issue #1963 fix diff --git a/pypy/interpreter/astcompiler/optimize.py b/pypy/interpreter/astcompiler/optimize.py --- a/pypy/interpreter/astcompiler/optimize.py +++ b/pypy/interpreter/astcompiler/optimize.py @@ -254,11 +254,15 @@ return rep def visit_Name(self, name): - # Turn loading None into a constant lookup. Eventaully, we can do this - # for True and False, too. + # Turn loading None into a constant lookup. We cannot do this + # for True and False, because rebinding them is allowed (2.7). if name.id == "None": - assert name.ctx == ast.Load - return ast.Const(self.space.w_None, name.lineno, name.col_offset) + # The compiler refuses to parse "None = ...", but "del None" + # is allowed (if pointless). Check anyway: custom asts that + # correspond to "None = ..." can be made by hand. + if name.ctx == ast.Load: + return ast.Const(self.space.w_None, name.lineno, + name.col_offset) return name def visit_Tuple(self, tup): diff --git a/pypy/interpreter/test/test_compiler.py b/pypy/interpreter/test/test_compiler.py --- a/pypy/interpreter/test/test_compiler.py +++ b/pypy/interpreter/test/test_compiler.py @@ -654,6 +654,18 @@ assert ex.match(space, space.w_SyntaxError) assert 'hello_world' in space.str_w(space.str(ex.get_w_value(space))) + def test_del_None(self): + snippet = '''if 1: + try: + del None + except NameError: + pass + ''' + code = self.compiler.compile(snippet, '', 'exec', 0) + space = self.space + w_d = space.newdict() + space.exec_(code, w_d, w_d) + class TestPythonAstCompiler_25_grammar(BaseTestCompiler): def setup_method(self, method): From noreply at buildbot.pypy.org Sat Jan 17 15:35:25 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 17 Jan 2015 15:35:25 +0100 (CET) Subject: [pypy-commit] extradoc extradoc: Fix archaism Message-ID: <20150117143525.5E49E1C0354@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r5487:379b7d2c6dd7 Date: 2015-01-17 15:35 +0100 http://bitbucket.org/pypy/extradoc/changeset/379b7d2c6dd7/ Log: Fix archaism diff --git a/sprintinfo/leysin-winter-2015/announcement.txt b/sprintinfo/leysin-winter-2015/announcement.txt --- a/sprintinfo/leysin-winter-2015/announcement.txt +++ b/sprintinfo/leysin-winter-2015/announcement.txt @@ -46,7 +46,7 @@ Leysin, Switzerland, "same place as before". Let me refresh your memory: both the sprint venue and the lodging will be in a very spacious pair of chalets built specifically for bed & breakfast: -http://www.ermina.ch/. The place has a good ADSL Internet connexion +http://www.ermina.ch/. The place has a good ADSL Internet connection with wireless installed. You can of course arrange your own lodging anywhere (as long as you are in Leysin, you cannot be more than a 15 minutes walk away from the sprint venue), but I definitely recommend From noreply at buildbot.pypy.org Sat Jan 17 16:14:37 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 17 Jan 2015 16:14:37 +0100 (CET) Subject: [pypy-commit] pypy errno-again: Fix the order of _GetLastError(). See comments Message-ID: <20150117151437.991781C00F7@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: errno-again Changeset: r75412:b2951c6490bc Date: 2015-01-17 16:14 +0100 http://bitbucket.org/pypy/pypy/changeset/b2951c6490bc/ Log: Fix the order of _GetLastError(). See comments diff --git a/rpython/rlib/rposix.py b/rpython/rlib/rposix.py --- a/rpython/rlib/rposix.py +++ b/rpython/rlib/rposix.py @@ -111,23 +111,30 @@ @specialize.call_location() def _errno_before(save_err): - if WIN32 and (save_err & rffi.RFFI_READSAVED_LASTERROR): - from rpython.rlib import rthread, rwin32 - rwin32._SetLastError(rthread.tlfield_rpy_lasterror.getraw()) if save_err & rffi.RFFI_READSAVED_ERRNO: from rpython.rlib import rthread _set_errno(rthread.tlfield_rpy_errno.getraw()) elif save_err & rffi.RFFI_ZERO_ERRNO_BEFORE: _set_errno(rffi.cast(rffi.INT, 0)) + if WIN32 and (save_err & rffi.RFFI_READSAVED_LASTERROR): + from rpython.rlib import rthread, rwin32 + err = rthread.tlfield_rpy_lasterror.getraw() + # careful, getraw() overwrites GetLastError. + # We must assign it with _SetLastError() as the last + # operation, i.e. after the errno handling. + rwin32._SetLastError(err) @specialize.call_location() def _errno_after(save_err): + if WIN32 and (save_err & rffi.RFFI_SAVE_LASTERROR): + from rpython.rlib import rthread, rwin32 + err = rwin32._GetLastError() + # careful, setraw() overwrites GetLastError. + # We must read it first, before the errno handling. + rthread.tlfield_rpy_lasterror.setraw(err) if save_err & rffi.RFFI_SAVE_ERRNO: from rpython.rlib import rthread rthread.tlfield_rpy_errno.setraw(_get_errno()) - if WIN32 and (save_err & rffi.RFFI_SAVE_LASTERROR): - from rpython.rlib import rthread, rwin32 - rthread.tlfield_rpy_lasterror.setraw(rwin32._GetLastError()) if os.name == 'nt': From noreply at buildbot.pypy.org Sat Jan 17 18:03:16 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 17 Jan 2015 18:03:16 +0100 (CET) Subject: [pypy-commit] pypy all_ordered_dicts: Trying to overallocate dicts a little bit more eagerly Message-ID: <20150117170316.1DA9B1C041B@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: all_ordered_dicts Changeset: r75413:4a6947ecc8fe Date: 2015-01-17 18:03 +0100 http://bitbucket.org/pypy/pypy/changeset/4a6947ecc8fe/ Log: Trying to overallocate dicts a little bit more eagerly diff --git a/rpython/rtyper/lltypesystem/rordereddict.py b/rpython/rtyper/lltypesystem/rordereddict.py --- a/rpython/rtyper/lltypesystem/rordereddict.py +++ b/rpython/rtyper/lltypesystem/rordereddict.py @@ -617,18 +617,13 @@ def _overallocate_entries_len(baselen): # This over-allocates proportional to the list size, making room - # for additional growth. The over-allocation is mild, but is - # enough to give linear-time amortized behavior over a long - # sequence of appends() in the presence of a poorly-performing - # system malloc(). - # The growth pattern is: 0, 4, 8, 16, 25, 35, 46, 58, 72, 88, ... - newsize = baselen + 1 - if newsize < 9: - some = 3 - else: - some = 6 - some += newsize >> 3 - return newsize + some + # for additional growth. This over-allocates slightly more eagerly + # than with regular lists. The idea is that there are many more + # lists than dicts around in PyPy, and dicts of 5 to 8 items are + # not that rare (so a single jump from 0 to 8 is a good idea). + # The growth pattern is: 0, 8, 17, 27, 38, 50, 64, 80, 98, ... + newsize = baselen + (baselen >> 3) + return newsize + 8 @jit.look_inside_iff(lambda d: jit.isvirtual(d)) def ll_dict_grow(d): @@ -947,7 +942,9 @@ # # Irregular operations. -DICT_INITSIZE = 8 +# Start the hashtable size at 16 rather than 8, as with rdict.py, because +# it is only an array of bytes +DICT_INITSIZE = 16 @specialize.memo() diff --git a/rpython/rtyper/test/test_rordereddict.py b/rpython/rtyper/test/test_rordereddict.py --- a/rpython/rtyper/test/test_rordereddict.py +++ b/rpython/rtyper/test/test_rordereddict.py @@ -115,11 +115,18 @@ rordereddict.ll_dict_setitem(ll_d, llstr("b"), 2) rordereddict.ll_dict_setitem(ll_d, llstr("c"), 3) rordereddict.ll_dict_setitem(ll_d, llstr("d"), 4) - assert len(get_indexes(ll_d)) == 8 rordereddict.ll_dict_setitem(ll_d, llstr("e"), 5) rordereddict.ll_dict_setitem(ll_d, llstr("f"), 6) - assert len(get_indexes(ll_d)) == 32 - for item in ['a', 'b', 'c', 'd', 'e', 'f']: + rordereddict.ll_dict_setitem(ll_d, llstr("g"), 7) + rordereddict.ll_dict_setitem(ll_d, llstr("h"), 8) + rordereddict.ll_dict_setitem(ll_d, llstr("i"), 9) + rordereddict.ll_dict_setitem(ll_d, llstr("j"), 10) + assert len(get_indexes(ll_d)) == 16 + rordereddict.ll_dict_setitem(ll_d, llstr("k"), 11) + rordereddict.ll_dict_setitem(ll_d, llstr("l"), 12) + rordereddict.ll_dict_setitem(ll_d, llstr("m"), 13) + assert len(get_indexes(ll_d)) == 64 + for item in 'abcdefghijklm': assert rordereddict.ll_dict_getitem(ll_d, llstr(item)) == ord(item) - ord('a') + 1 def test_dict_grow_cleanup(self): From noreply at buildbot.pypy.org Sat Jan 17 19:00:19 2015 From: noreply at buildbot.pypy.org (mattip) Date: Sat, 17 Jan 2015 19:00:19 +0100 (CET) Subject: [pypy-commit] pypy default: add windows link Message-ID: <20150117180019.138341C0117@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: Changeset: r75414:cc624edc31ce Date: 2015-01-17 20:00 +0200 http://bitbucket.org/pypy/pypy/changeset/cc624edc31ce/ Log: add windows link diff --git a/pypy/doc/build.rst b/pypy/doc/build.rst --- a/pypy/doc/build.rst +++ b/pypy/doc/build.rst @@ -47,6 +47,11 @@ Install build-time dependencies ------------------------------- +(**Note**: for some hints on how to translate the Python interpreter under +Windows, see the `windows document`_) + +.. _`windows document`: windows.html + To build PyPy on Unix using the C translation backend, you need at least a C compiler and ``make`` installed. Further, some optional modules have additional From noreply at buildbot.pypy.org Sat Jan 17 20:25:10 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 17 Jan 2015 20:25:10 +0100 (CET) Subject: [pypy-commit] pypy default: Skip a test which doesn't make sense any more since adc6ab4ae74d Message-ID: <20150117192510.8E9AB1C0103@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r75415:a0ef99cf3818 Date: 2015-01-17 20:25 +0100 http://bitbucket.org/pypy/pypy/changeset/a0ef99cf3818/ Log: Skip a test which doesn't make sense any more since adc6ab4ae74d diff --git a/rpython/jit/backend/arm/test/test_regalloc_mov.py b/rpython/jit/backend/arm/test/test_regalloc_mov.py --- a/rpython/jit/backend/arm/test/test_regalloc_mov.py +++ b/rpython/jit/backend/arm/test/test_regalloc_mov.py @@ -503,7 +503,6 @@ def test_unsupported(self): py.test.raises(AssertionError, 'self.asm.regalloc_mov(raw_stack(0), imm(1))') py.test.raises(AssertionError, 'self.asm.regalloc_mov(raw_stack(0), imm_float(1))') - py.test.raises(AssertionError, 'self.asm.regalloc_mov(raw_stack(0), r(1))') py.test.raises(AssertionError, 'self.asm.regalloc_mov(raw_stack(0), vfp(1))') py.test.raises(AssertionError, 'self.asm.regalloc_mov(raw_stack(0), stack(1))') py.test.raises(AssertionError, 'self.asm.regalloc_mov(raw_stack(0), stack_float(1))') From noreply at buildbot.pypy.org Sat Jan 17 20:50:14 2015 From: noreply at buildbot.pypy.org (mattip) Date: Sat, 17 Jan 2015 20:50:14 +0100 (CET) Subject: [pypy-commit] pypy default: improve windows doc Message-ID: <20150117195014.667091C00F7@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: Changeset: r75416:c13a109707ab Date: 2015-01-17 21:49 +0200 http://bitbucket.org/pypy/pypy/changeset/c13a109707ab/ Log: improve windows doc diff --git a/pypy/doc/windows.rst b/pypy/doc/windows.rst --- a/pypy/doc/windows.rst +++ b/pypy/doc/windows.rst @@ -78,6 +78,7 @@ Then you need to execute:: + \vc\vcvars.bat editbin /largeaddressaware translator.exe where ``translator.exe`` is the pypy.exe or cpython.exe you will use to @@ -96,7 +97,7 @@ Abridged method (for -Ojit builds using Visual Studio 2008) -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +----------------------------------------------------------- Download the versions of all the external packages from https://bitbucket.org/pypy/pypy/downloads/local_2.4.zip @@ -110,7 +111,13 @@ set INCLUDE=\include;\tcltk\include;%INCLUDE% set LIB=\lib;\tcltk\lib;%LIB% -Now you should be good to go. Read on for more information. +Now you should be good to go. If you choose this method, you do not need +to download/build anything else. + +Nonabrided method (building from scratch) +----------------------------------------- + +If you want to, you can rebuild everything from scratch by continuing. The Boehm garbage collector From noreply at buildbot.pypy.org Sun Jan 18 10:45:16 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 18 Jan 2015 10:45:16 +0100 (CET) Subject: [pypy-commit] pypy all_ordered_dicts: Close branch ready for merge Message-ID: <20150118094516.8E4311C00F7@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: all_ordered_dicts Changeset: r75417:9644f7a2c66a Date: 2015-01-18 10:36 +0100 http://bitbucket.org/pypy/pypy/changeset/9644f7a2c66a/ Log: Close branch ready for merge From noreply at buildbot.pypy.org Sun Jan 18 10:45:19 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 18 Jan 2015 10:45:19 +0100 (CET) Subject: [pypy-commit] pypy default: hg merge all_ordered_dicts Message-ID: <20150118094519.1BC611C00F7@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r75418:c7425aab5b56 Date: 2015-01-18 10:45 +0100 http://bitbucket.org/pypy/pypy/changeset/c7425aab5b56/ Log: hg merge all_ordered_dicts This makes ordered dicts the default dictionary implementation in RPython and in PyPy. It polishes the basic idea of rordereddict.py and then fixes various things, up to simplifying collections.OrderedDict. (Work started with ltratt.) Note: Python programs can rely on the guaranteed dict order in PyPy now, but for compatibility with other Python implementations they should still use collections.OrderedDict where that really matters. Also, support for reversed() was *not* added to the 'dict' class; use OrderedDict. Benchmark results: in the noise. A few benchmarks see good speed improvements but the average is very close to parity. diff --git a/lib-python/2.7/collections.py b/lib-python/2.7/collections.py --- a/lib-python/2.7/collections.py +++ b/lib-python/2.7/collections.py @@ -17,6 +17,10 @@ except ImportError: assert '__pypy__' not in _sys.builtin_module_names newdict = lambda _ : {} +try: + from __pypy__ import reversed_dict +except ImportError: + reversed_dict = lambda d: reversed(d.keys()) try: from thread import get_ident as _get_ident @@ -29,142 +33,35 @@ ################################################################################ class OrderedDict(dict): - 'Dictionary that remembers insertion order' - # An inherited dict maps keys to values. - # The inherited dict provides __getitem__, __len__, __contains__, and get. - # The remaining methods are order-aware. - # Big-O running times for all methods are the same as regular dictionaries. + '''Dictionary that remembers insertion order. - # The internal self.__map dict maps keys to links in a doubly linked list. - # The circular doubly linked list starts and ends with a sentinel element. - # The sentinel element never gets deleted (this simplifies the algorithm). - # Each link is stored as a list of length three: [PREV, NEXT, KEY]. + In PyPy all dicts are ordered anyway. This is mostly useful as a + placeholder to mean "this dict must be ordered even on CPython". - def __init__(self, *args, **kwds): - '''Initialize an ordered dictionary. The signature is the same as - regular dictionaries, but keyword arguments are not recommended because - their insertion order is arbitrary. - - ''' - if len(args) > 1: - raise TypeError('expected at most 1 arguments, got %d' % len(args)) - try: - self.__root - except AttributeError: - self.__root = root = [] # sentinel node - root[:] = [root, root, None] - self.__map = {} - self.__update(*args, **kwds) - - def __setitem__(self, key, value, dict_setitem=dict.__setitem__): - 'od.__setitem__(i, y) <==> od[i]=y' - # Setting a new item creates a new link at the end of the linked list, - # and the inherited dictionary is updated with the new key/value pair. - if key not in self: - root = self.__root - last = root[0] - last[1] = root[0] = self.__map[key] = [last, root, key] - return dict_setitem(self, key, value) - - def __delitem__(self, key, dict_delitem=dict.__delitem__): - 'od.__delitem__(y) <==> del od[y]' - # Deleting an existing item uses self.__map to find the link which gets - # removed by updating the links in the predecessor and successor nodes. - dict_delitem(self, key) - link_prev, link_next, _ = self.__map.pop(key) - link_prev[1] = link_next # update link_prev[NEXT] - link_next[0] = link_prev # update link_next[PREV] - - def __iter__(self): - 'od.__iter__() <==> iter(od)' - # Traverse the linked list in order. - root = self.__root - curr = root[1] # start at the first node - while curr is not root: - yield curr[2] # yield the curr[KEY] - curr = curr[1] # move to next node + Known difference: iterating over an OrderedDict which is being + concurrently modified raises RuntimeError in PyPy. In CPython + instead we get some behavior that appears reasonable in some + cases but is nonsensical in other cases. This is officially + forbidden by the CPython docs, so we forbid it explicitly for now. + ''' def __reversed__(self): - 'od.__reversed__() <==> reversed(od)' - # Traverse the linked list in reverse order. - root = self.__root - curr = root[0] # start at the last node - while curr is not root: - yield curr[2] # yield the curr[KEY] - curr = curr[0] # move to previous node - - def clear(self): - 'od.clear() -> None. Remove all items from od.' - root = self.__root - root[:] = [root, root, None] - self.__map.clear() - dict.clear(self) - - # -- the following methods do not depend on the internal structure -- - - def keys(self): - 'od.keys() -> list of keys in od' - return list(self) - - def values(self): - 'od.values() -> list of values in od' - return [self[key] for key in self] - - def items(self): - 'od.items() -> list of (key, value) pairs in od' - return [(key, self[key]) for key in self] - - def iterkeys(self): - 'od.iterkeys() -> an iterator over the keys in od' - return iter(self) - - def itervalues(self): - 'od.itervalues -> an iterator over the values in od' - for k in self: - yield self[k] - - def iteritems(self): - 'od.iteritems -> an iterator over the (key, value) pairs in od' - for k in self: - yield (k, self[k]) - - update = MutableMapping.update - - __update = update # let subclasses override update without breaking __init__ - - __marker = object() - - def pop(self, key, default=__marker): - '''od.pop(k[,d]) -> v, remove specified key and return the corresponding - value. If key is not found, d is returned if given, otherwise KeyError - is raised. - - ''' - if key in self: - result = self[key] - del self[key] - return result - if default is self.__marker: - raise KeyError(key) - return default - - def setdefault(self, key, default=None): - 'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od' - if key in self: - return self[key] - self[key] = default - return default + return reversed_dict(self) def popitem(self, last=True): '''od.popitem() -> (k, v), return and remove a (key, value) pair. Pairs are returned in LIFO order if last is true or FIFO order if false. ''' - if not self: - raise KeyError('dictionary is empty') - key = next(reversed(self) if last else iter(self)) - value = self.pop(key) - return key, value + if last: + return dict.popitem(self) + else: + it = dict.__iter__(self) + try: + k = it.next() + except StopIteration: + raise KeyError('dictionary is empty') + return (k, self.pop(k)) def __repr__(self, _repr_running={}): 'od.__repr__() <==> repr(od)' @@ -183,8 +80,6 @@ 'Return state information for pickling' items = [[k, self[k]] for k in self] inst_dict = vars(self).copy() - for k in vars(OrderedDict()): - inst_dict.pop(k, None) if inst_dict: return (self.__class__, (items,), inst_dict) return self.__class__, (items,) @@ -193,17 +88,6 @@ 'od.copy() -> a shallow copy of od' return self.__class__(self) - @classmethod - def fromkeys(cls, iterable, value=None): - '''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S. - If not specified, the value defaults to None. - - ''' - self = cls() - for key in iterable: - self[key] = value - return self - def __eq__(self, other): '''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive while comparison to a regular mapping is order-insensitive. diff --git a/lib-python/2.7/test/test_collections.py b/lib-python/2.7/test/test_collections.py --- a/lib-python/2.7/test/test_collections.py +++ b/lib-python/2.7/test/test_collections.py @@ -578,7 +578,12 @@ def __repr__(self): return "MySet(%s)" % repr(list(self)) s = MySet([5,43,2,1]) - self.assertEqual(s.pop(), 1) + # changed from CPython 2.7: it was "s.pop() == 1" but I see + # nothing that guarantees a particular order here. In the + # 'all_ordered_dicts' branch of PyPy (or with OrderedDict + # instead of sets), it consistently returns 5, but this test + # should not rely on this or any other order. + self.assert_(s.pop() in [5,43,2,1]) def test_issue8750(self): empty = WithSet() @@ -1010,8 +1015,9 @@ c=3, e=5).items()), pairs) # mixed input # make sure no positional args conflict with possible kwdargs - self.assertEqual(inspect.getargspec(OrderedDict.__dict__['__init__']).args, - ['self']) + if '__init__' in OrderedDict.__dict__: # absent in PyPy + self.assertEqual(inspect.getargspec(OrderedDict.__dict__['__init__']).args, + ['self']) # Make sure that direct calls to __init__ do not clear previous contents d = OrderedDict([('a', 1), ('b', 2), ('c', 3), ('d', 44), ('e', 55)]) diff --git a/pypy/module/__pypy__/__init__.py b/pypy/module/__pypy__/__init__.py --- a/pypy/module/__pypy__/__init__.py +++ b/pypy/module/__pypy__/__init__.py @@ -78,6 +78,7 @@ 'newlist_hint' : 'interp_magic.newlist_hint', 'add_memory_pressure' : 'interp_magic.add_memory_pressure', 'newdict' : 'interp_dict.newdict', + 'reversed_dict' : 'interp_dict.reversed_dict', 'strategy' : 'interp_magic.strategy', # dict,set,list 'set_debug' : 'interp_magic.set_debug', 'locals_to_fast' : 'interp_magic.locals_to_fast', diff --git a/pypy/module/__pypy__/interp_dict.py b/pypy/module/__pypy__/interp_dict.py --- a/pypy/module/__pypy__/interp_dict.py +++ b/pypy/module/__pypy__/interp_dict.py @@ -30,3 +30,17 @@ return space.newdict(strdict=True) else: raise oefmt(space.w_TypeError, "unknown type of dict %s", type) + +def reversed_dict(space, w_obj): + """Enumerate the keys in a dictionary object in reversed order. + + This is a __pypy__ function instead of being simply done by calling + reversed(), for CPython compatibility: dictionaries are only ordered + on PyPy. You should use the collections.OrderedDict class for cases + where ordering is important. That class implements __reversed__ by + calling __pypy__.reversed_dict(). + """ + from pypy.objspace.std.dictmultiobject import W_DictMultiObject + if not isinstance(w_obj, W_DictMultiObject): + raise OperationError(space.w_TypeError, space.w_None) + return w_obj.nondescr_reversed_dict(space) diff --git a/pypy/module/_multibytecodec/test/test_translation.py b/pypy/module/_multibytecodec/test/test_translation.py --- a/pypy/module/_multibytecodec/test/test_translation.py +++ b/pypy/module/_multibytecodec/test/test_translation.py @@ -1,8 +1,11 @@ from pypy.module._multibytecodec import c_codecs from rpython.translator.c.test import test_standalone +from rpython.config.translationoption import get_combined_translation_config class TestTranslation(test_standalone.StandaloneTests): + config = get_combined_translation_config(translating=True) + config.translation.gc = 'boehm' def test_translation(self): # diff --git a/pypy/module/pypyjit/test_pypy_c/test_containers.py b/pypy/module/pypyjit/test_pypy_c/test_containers.py --- a/pypy/module/pypyjit/test_pypy_c/test_containers.py +++ b/pypy/module/pypyjit/test_pypy_c/test_containers.py @@ -43,9 +43,9 @@ # can't change ;) assert loop.match_by_id("getitem", """ ... - i26 = call(ConstClass(ll_dict_lookup), p18, p6, i25, descr=...) + i26 = call(ConstClass(ll_call_lookup_function), p18, p6, i25, 0, descr=...) ... - p33 = getinteriorfield_gc(p31, i26, descr=>) + p33 = getinteriorfield_gc(p31, i26, descr=>) ... """) @@ -68,25 +68,29 @@ guard_no_exception(descr=...) i12 = call(ConstClass(ll_strhash), p10, descr=) p13 = new(descr=...) - p15 = new_array_clear(8, descr=) - setfield_gc(p13, p15, descr=) - i17 = call(ConstClass(ll_dict_lookup_trampoline), p13, p10, i12, descr=) + p15 = new_array_clear(8, descr=) {{{ - setfield_gc(p13, 16, descr=) - setfield_gc(p13, 0, descr=) + setfield_gc(p13, 0, descr=) + setfield_gc(p13, p15, descr=) + setfield_gc(p13, ConstPtr(0), descr=) + }}} + i17 = call(ConstClass(ll_dict_lookup_trampoline), p13, p10, i12, 1, descr=) + {{{ + setfield_gc(p13, 0, descr=) + setfield_gc(p13, 0, descr=) + setfield_gc(p13, 16, descr=) }}} guard_no_exception(descr=...) p20 = new_with_vtable(ConstClass(W_IntObject)) call(ConstClass(_ll_dict_setitem_lookup_done_trampoline), p13, p10, p20, i12, i17, descr=) setfield_gc(p20, i5, descr=) guard_no_exception(descr=...) - i23 = call(ConstClass(ll_dict_lookup_trampoline), p13, p10, i12, descr=) + i23 = call(ConstClass(ll_call_lookup_function), p13, p10, i12, 0, descr=) guard_no_exception(descr=...) - i26 = int_and(i23, #) - i27 = int_is_true(i26) + i27 = int_lt(i23, 0) guard_false(i27, descr=...) p28 = getfield_gc(p13, descr=) - p29 = getinteriorfield_gc(p28, i23, descr=>) + p29 = getinteriorfield_gc(p28, i23, descr=>) guard_nonnull_class(p29, ConstClass(W_IntObject), descr=...) i31 = getfield_gc_pure(p29, descr=) i32 = int_sub_ovf(i31, i5) diff --git a/pypy/module/pypyjit/test_pypy_c/test_instance.py b/pypy/module/pypyjit/test_pypy_c/test_instance.py --- a/pypy/module/pypyjit/test_pypy_c/test_instance.py +++ b/pypy/module/pypyjit/test_pypy_c/test_instance.py @@ -151,15 +151,13 @@ assert loop.match_by_id('loadattr1', ''' guard_not_invalidated(descr=...) - i19 = call(ConstClass(ll_dict_lookup), _, _, _, descr=...) + i19 = call(ConstClass(ll_call_lookup_function), _, _, _, 0, descr=...) guard_no_exception(descr=...) - i21 = int_and(i19, _) - i22 = int_is_true(i21) + i22 = int_lt(i19, 0) guard_true(i22, descr=...) - i26 = call(ConstClass(ll_dict_lookup), _, _, _, descr=...) + i26 = call(ConstClass(ll_call_lookup_function), _, _, _, 0, descr=...) guard_no_exception(descr=...) - i28 = int_and(i26, _) - i29 = int_is_true(i28) + i29 = int_lt(i26, 0) guard_true(i29, descr=...) ''') assert loop.match_by_id('loadattr2', "") # completely folded away diff --git a/pypy/objspace/std/dictmultiobject.py b/pypy/objspace/std/dictmultiobject.py --- a/pypy/objspace/std/dictmultiobject.py +++ b/pypy/objspace/std/dictmultiobject.py @@ -258,6 +258,17 @@ """D.itervalues() -> an iterator over the values of D""" return W_DictMultiIterValuesObject(space, self.itervalues()) + def nondescr_reversed_dict(self, space): + """Not exposed directly to app-level, but via __pypy__.reversed_dict(). + """ + if self.strategy.has_iterreversed: + it = self.strategy.iterreversed(self) + return W_DictMultiIterKeysObject(space, it) + else: + # fall-back + w_keys = self.w_keys() + return space.call_method(w_keys, '__reversed__') + def descr_viewitems(self, space): """D.viewitems() -> a set-like object providing a view on D's items""" return W_DictViewItemsObject(space, self) @@ -503,6 +514,9 @@ def getiteritems(self, w_dict): raise NotImplementedError + has_iterreversed = False + # no 'getiterreversed': no default implementation available + def rev_update1_dict_dict(self, w_dict, w_updatedict): iteritems = self.iteritems(w_dict) while True: @@ -623,6 +637,9 @@ def getiteritems(self, w_dict): return iter([]) + def getiterreversed(self, w_dict): + return iter([]) + # Iterator Implementation base classes @@ -747,6 +764,17 @@ else: return None, None + class IterClassReversed(BaseKeyIterator): + def __init__(self, space, strategy, impl): + self.iterator = strategy.getiterreversed(impl) + BaseIteratorImplementation.__init__(self, space, strategy, impl) + + def next_key_entry(self): + for key in self.iterator: + return wrapkey(self.space, key) + else: + return None + def iterkeys(self, w_dict): return IterClassKeys(self.space, self, w_dict) @@ -756,6 +784,12 @@ def iteritems(self, w_dict): return IterClassItems(self.space, self, w_dict) + if hasattr(dictimpl, 'getiterreversed'): + def iterreversed(self, w_dict): + return IterClassReversed(self.space, self, w_dict) + dictimpl.iterreversed = iterreversed + dictimpl.has_iterreversed = True + @jit.look_inside_iff(lambda self, w_dict, w_updatedict: w_dict_unrolling_heuristic(w_dict)) def rev_update1_dict_dict(self, w_dict, w_updatedict): @@ -932,6 +966,9 @@ def getiteritems(self, w_dict): return self.unerase(w_dict.dstorage).iteritems() + def getiterreversed(self, w_dict): + return objectmodel.reversed_dict(self.unerase(w_dict.dstorage)) + def prepare_update(self, w_dict, num_extra): objectmodel.prepare_dict_update(self.unerase(w_dict.dstorage), num_extra) diff --git a/pypy/objspace/std/test/test_dictmultiobject.py b/pypy/objspace/std/test/test_dictmultiobject.py --- a/pypy/objspace/std/test/test_dictmultiobject.py +++ b/pypy/objspace/std/test/test_dictmultiobject.py @@ -254,6 +254,21 @@ values.append(k) assert values == d.values() + def test_reversed_dict(self): + import __pypy__ + for d in [{}, {1: 2, 3: 4, 5: 6}, {"a": 5, "b": 2, "c": 6}]: + assert list(__pypy__.reversed_dict(d)) == d.keys()[::-1] + raises(TypeError, __pypy__.reversed_dict, 42) + + def test_reversed_dict_runtimeerror(self): + import __pypy__ + d = {1: 2, 3: 4, 5: 6} + it = __pypy__.reversed_dict(d) + key = it.next() + assert key in [1, 3, 5] + del d[key] + raises(RuntimeError, it.next) + def test_keys(self): d = {1: 2, 3: 4} kys = d.keys() diff --git a/pypy/objspace/std/test/test_mapdict.py b/pypy/objspace/std/test/test_mapdict.py --- a/pypy/objspace/std/test/test_mapdict.py +++ b/pypy/objspace/std/test/test_mapdict.py @@ -700,6 +700,15 @@ del a.x raises(AttributeError, "a.x") + def test_reversed_dict(self): + import __pypy__ + class X(object): + pass + x = X(); x.a = 10; x.b = 20; x.c = 30 + d = x.__dict__ + assert list(__pypy__.reversed_dict(d)) == d.keys()[::-1] + + class AppTestWithMapDictAndCounters(object): spaceconfig = {"objspace.std.withmapdict": True, "objspace.std.withmethodcachecounter": True} diff --git a/rpython/annotator/model.py b/rpython/annotator/model.py --- a/rpython/annotator/model.py +++ b/rpython/annotator/model.py @@ -392,6 +392,8 @@ assert isinstance(dct2, SomeOrderedDict), "OrderedDict.update(dict) not allowed" dct1.dictdef.union(dct2.dictdef) +SomeDict = SomeOrderedDict # all dicts are ordered! + class SomeIterator(SomeObject): "Stands for an iterator returning objects from a given container." diff --git a/rpython/annotator/test/test_annrpython.py b/rpython/annotator/test/test_annrpython.py --- a/rpython/annotator/test/test_annrpython.py +++ b/rpython/annotator/test/test_annrpython.py @@ -1877,7 +1877,7 @@ return None a = self.RPythonAnnotator() s = a.build_types(f, [int]) - assert s.knowntype == dict + assert s.knowntype == annmodel.SomeOrderedDict.knowntype def test_const_list_and_none(self): def g(l=None): diff --git a/rpython/jit/metainterp/optimizeopt/heap.py b/rpython/jit/metainterp/optimizeopt/heap.py --- a/rpython/jit/metainterp/optimizeopt/heap.py +++ b/rpython/jit/metainterp/optimizeopt/heap.py @@ -2,11 +2,12 @@ from rpython.jit.codewriter.effectinfo import EffectInfo from rpython.jit.metainterp.optimizeopt.util import args_dict -from rpython.jit.metainterp.history import Const +from rpython.jit.metainterp.history import Const, ConstInt from rpython.jit.metainterp.jitexc import JitException from rpython.jit.metainterp.optimizeopt.optimizer import Optimization,\ MODE_ARRAY, LEVEL_KNOWNCLASS, REMOVED from rpython.jit.metainterp.optimizeopt.util import make_dispatcher_method +from rpython.jit.metainterp.optimizeopt.intutils import IntBound from rpython.jit.metainterp.optimize import InvalidLoop from rpython.jit.metainterp.resoperation import rop, ResOperation from rpython.rlib.objectmodel import we_are_translated @@ -325,6 +326,29 @@ self.emit_operation(op) def _optimize_CALL_DICT_LOOKUP(self, op): + # Cache consecutive lookup() calls on the same dict and key, + # depending on the 'flag_store' argument passed: + # FLAG_LOOKUP: always cache and use the cached result. + # FLAG_STORE: don't cache (it might return -1, which would be + # incorrect for future lookups); but if found in + # the cache and the cached value was already checked + # non-negative, then we can reuse it. + # FLAG_DELETE: never cache, never use the cached result (because + # if there is a cached result, the FLAG_DELETE call + # is needed for its side-effect of removing it). + # In theory we could cache a -1 for the case where + # the delete is immediately followed by a lookup, + # but too obscure. + # + from rpython.rtyper.lltypesystem.rordereddict import FLAG_LOOKUP + from rpython.rtyper.lltypesystem.rordereddict import FLAG_STORE + flag_value = self.getvalue(op.getarg(4)) + if not flag_value.is_constant(): + return False + flag = flag_value.get_constant_int() + if flag != FLAG_LOOKUP and flag != FLAG_STORE: + return False + # descrs = op.getdescr().get_extra_info().extradescrs assert descrs # translation hint descr1 = descrs[0] @@ -333,13 +357,20 @@ except KeyError: d = self.cached_dict_reads[descr1] = args_dict() self.corresponding_array_descrs[descrs[1]] = descr1 - args = self.optimizer.make_args_key(op) + # + key = [self.optimizer.get_box_replacement(op.getarg(1)), # dict + self.optimizer.get_box_replacement(op.getarg(2))] # key + # other args can be ignored here (hash, store_flag) try: - res_v = d[args] + res_v = d[key] except KeyError: - d[args] = self.getvalue(op.result) + if flag == FLAG_LOOKUP: + d[key] = self.getvalue(op.result) return False else: + if flag != FLAG_LOOKUP: + if not res_v.getintbound().known_ge(IntBound(0, 0)): + return False self.make_equal_to(op.result, res_v) self.last_emitted_operation = REMOVED return True diff --git a/rpython/jit/metainterp/optimizeopt/optimizer.py b/rpython/jit/metainterp/optimizeopt/optimizer.py --- a/rpython/jit/metainterp/optimizeopt/optimizer.py +++ b/rpython/jit/metainterp/optimizeopt/optimizer.py @@ -148,6 +148,12 @@ if self.getlevel() < LEVEL_NONNULL: self.setlevel(LEVEL_NONNULL) + def get_constant_int(self): + assert self.is_constant() + box = self.box + assert isinstance(box, ConstInt) + return box.getint() + def is_virtual(self): # Don't check this with 'isinstance(_, VirtualValue)'! # Even if it is a VirtualValue, the 'box' can be non-None, diff --git a/rpython/jit/metainterp/test/test_dict.py b/rpython/jit/metainterp/test/test_dict.py --- a/rpython/jit/metainterp/test/test_dict.py +++ b/rpython/jit/metainterp/test/test_dict.py @@ -181,15 +181,21 @@ n = d[y] return d[Wrapper(str(n + 1))] + # XXX unsure I see the point of this test: the repeated + # dict lookup is *not* elided so far, and the test happens to + # check this... with rdict.py, it's a write followed by a read, + # where the dict cache is thrown away after the first lookup + # (correctly: we don't want the two lookups to return the exact + # same result!). With rordereddict.py, FLAG_STORE lookups are + # not cached anyway. res = self.meta_interp(f, [100], listops=True) assert res == f(50) self.check_resops({'new_array_clear': 2, 'getfield_gc': 2, - 'guard_true': 2, 'jump': 1, + 'guard_true': 4, 'jump': 1, 'new_with_vtable': 2, 'getinteriorfield_gc': 2, - 'setfield_gc': 8, 'int_gt': 2, 'int_sub': 2, - 'call': 10, 'int_and': 2, - 'guard_no_exception': 8, 'new': 2, - 'guard_false': 2, 'int_is_true': 2}) + 'setfield_gc': 14, 'int_gt': 2, 'int_sub': 2, + 'call': 10, 'int_ge': 2, + 'guard_no_exception': 8, 'new': 2}) def test_unrolling_of_dict_iter(self): driver = JitDriver(greens = [], reds = ['n']) @@ -223,7 +229,7 @@ return s self.meta_interp(f, [10]) - # XXX should be one getinteriorfield_gc + # XXX should be one getinteriorfield_gc. At least it's one call. self.check_simple_loop(call=1, getinteriorfield_gc=2, guard_no_exception=1) @@ -244,7 +250,7 @@ return s self.meta_interp(f, [10]) - # XXX should be one getinteriorfield_gc + # XXX should be one getinteriorfield_gc. At least it's one call. self.check_simple_loop(call=1, getinteriorfield_gc=2, guard_no_exception=1) @@ -259,7 +265,7 @@ driver.jit_merge_point() index = indexes[n & 1] s += d[index] - d['aa'] += 1 # this will invalidate the index + d['aa'] = 13 # this will invalidate the index s += d[index] n -= 1 return s @@ -355,7 +361,7 @@ if n in mdict: raise Exception self.meta_interp(f, [10]) - self.check_simple_loop(call_may_force=0, call=3) + self.check_simple_loop(call_may_force=0, call=4) def test_dict_virtual(self): myjitdriver = JitDriver(greens = [], reds = 'auto') diff --git a/rpython/memory/gctypelayout.py b/rpython/memory/gctypelayout.py --- a/rpython/memory/gctypelayout.py +++ b/rpython/memory/gctypelayout.py @@ -406,6 +406,11 @@ return self.iseen_roots[value] = True + if isinstance(TYPE, lltype.GcOpaqueType): + self.consider_constant(lltype.typeOf(value.container), + value.container, gc) + return + if isinstance(TYPE, (lltype.GcStruct, lltype.GcArray)): typeid = self.get_type_id(TYPE) hdr = gc.gcheaderbuilder.new_header(value) diff --git a/rpython/rlib/objectmodel.py b/rpython/rlib/objectmodel.py --- a/rpython/rlib/objectmodel.py +++ b/rpython/rlib/objectmodel.py @@ -753,6 +753,17 @@ dict._prepare_dict_update(n_elements) # ^^ call an extra method that doesn't exist before translation + at specialize.call_location() +def reversed_dict(d): + """Equivalent to reversed(ordered_dict), but works also for + regular dicts.""" + # note that there is also __pypy__.reversed_dict(), which we could + # try to use here if we're not translated and running on top of pypy, + # but that seems a bit pointless + if not we_are_translated(): + d = d.keys() + return reversed(d) + # ____________________________________________________________ diff --git a/rpython/rlib/test/test_objectmodel.py b/rpython/rlib/test/test_objectmodel.py --- a/rpython/rlib/test/test_objectmodel.py +++ b/rpython/rlib/test/test_objectmodel.py @@ -341,6 +341,21 @@ res = self.interpret(g, [3]) assert res == 42 # "did not crash" + def test_reversed_dict(self): + d1 = {2:3, 4:5, 6:7} + def g(): + n1 = 0 + for key in d1: + n1 = n1 * 10 + key + n2 = 0 + for key in reversed_dict(d1): + n2 = n2 * 10 + key + return n1 * 10000 + n2 + got = str(g()) + assert len(got) == 7 and got[3] == '0' and got[:3] == got[6:3:-1] + got = str(self.interpret(g, [])) + assert len(got) == 7 and got[3] == '0' and got[:3] == got[6:3:-1] + def test_compute_hash(self): class Foo(object): pass diff --git a/rpython/rtyper/llinterp.py b/rpython/rtyper/llinterp.py --- a/rpython/rtyper/llinterp.py +++ b/rpython/rtyper/llinterp.py @@ -740,6 +740,10 @@ return lltype.cast_opaque_ptr(RESTYPE, obj) op_cast_opaque_ptr.need_result_type = True + def op_length_of_simple_gcarray_from_opaque(self, obj): + checkptr(obj) + return lltype.length_of_simple_gcarray_from_opaque(obj) + def op_cast_ptr_to_adr(self, ptr): checkptr(ptr) return llmemory.cast_ptr_to_adr(ptr) diff --git a/rpython/rtyper/lltypesystem/lloperation.py b/rpython/rtyper/lltypesystem/lloperation.py --- a/rpython/rtyper/lltypesystem/lloperation.py +++ b/rpython/rtyper/lltypesystem/lloperation.py @@ -396,6 +396,7 @@ 'direct_arrayitems': LLOp(canfold=True), 'direct_ptradd': LLOp(canfold=True), 'cast_opaque_ptr': LLOp(sideeffects=False), + 'length_of_simple_gcarray_from_opaque': LLOp(sideeffects=False), # __________ address operations __________ diff --git a/rpython/rtyper/lltypesystem/lltype.py b/rpython/rtyper/lltypesystem/lltype.py --- a/rpython/rtyper/lltypesystem/lltype.py +++ b/rpython/rtyper/lltypesystem/lltype.py @@ -1025,6 +1025,30 @@ return SomePtr(ll_ptrtype=typeOf(cast_p)) +def length_of_simple_gcarray_from_opaque(opaque_ptr): + CURTYPE = typeOf(opaque_ptr) + if not isinstance(CURTYPE, Ptr): + raise TypeError("can only cast pointers to other pointers") + if not isinstance(CURTYPE.TO, GcOpaqueType): + raise TypeError("expected a GcOpaqueType") + try: + c = opaque_ptr._obj.container + except AttributeError: + # if 'opaque_ptr' is already some _llgcopaque, hack its length + # by casting it to a random GcArray type and hoping + from rpython.rtyper.lltypesystem import rffi + p = rffi.cast(Ptr(GcArray(Signed)), opaque_ptr) + return len(p) + else: + return c.getlength() + + at analyzer_for(length_of_simple_gcarray_from_opaque) +def ann_length_of_simple_gcarray_from_opaque(s_p): + assert isinstance(s_p, SomePtr), "casting of non-pointer: %r" % s_p + assert isinstance(s_p.ll_ptrtype.TO, GcOpaqueType) + return SomeInteger(nonneg=True) + + def direct_fieldptr(structptr, fieldname): """Get a pointer to a field in the struct. The resulting pointer is actually of type Ptr(FixedSizeArray(FIELD, 1)). diff --git a/rpython/rtyper/lltypesystem/rordereddict.py b/rpython/rtyper/lltypesystem/rordereddict.py --- a/rpython/rtyper/lltypesystem/rordereddict.py +++ b/rpython/rtyper/lltypesystem/rordereddict.py @@ -34,16 +34,18 @@ # {byte, short, int, long} *indexes; # dictentry *entries; # lookup_function_no; # one of the four possible functions for different -# # size dicts +# # size dicts; the rest of the word is a counter for how +# # many 'entries' at the start are known to be deleted # (Function DICTKEY, DICTKEY -> bool) *fnkeyeq; # (Function DICTKEY -> int) *fnkeyhash; # } # # + at jit.look_inside_iff(lambda d, key, hash, flag: jit.isvirtual(d)) + at jit.oopspec('ordereddict.lookup(d, key, hash, flag)') def ll_call_lookup_function(d, key, hash, flag): - DICT = lltype.typeOf(d).TO - fun = d.lookup_function_no + fun = d.lookup_function_no & FUNC_MASK if fun == FUNC_BYTE: return ll_dict_lookup(d, key, hash, flag, TYPE_BYTE) elif fun == FUNC_SHORT: @@ -408,6 +410,8 @@ IS_64BIT = sys.maxint != 2 ** 31 - 1 +FUNC_SHIFT = 2 +FUNC_MASK = 0x03 # two bits if IS_64BIT: FUNC_BYTE, FUNC_SHORT, FUNC_INT, FUNC_LONG = range(4) else: @@ -441,28 +445,46 @@ d.lookup_function_no = FUNC_LONG def ll_clear_indexes(d, n): - if n <= 256: + fun = d.lookup_function_no & FUNC_MASK + d.lookup_function_no = fun + if fun == FUNC_BYTE: rgc.ll_arrayclear(lltype.cast_opaque_ptr(DICTINDEX_BYTE, d.indexes)) - elif n <= 65536: + elif fun == FUNC_SHORT: rgc.ll_arrayclear(lltype.cast_opaque_ptr(DICTINDEX_SHORT, d.indexes)) - elif IS_64BIT and n <= 2 ** 32: + elif IS_64BIT and fun == FUNC_INT: rgc.ll_arrayclear(lltype.cast_opaque_ptr(DICTINDEX_INT, d.indexes)) + elif fun == FUNC_LONG: + rgc.ll_arrayclear(lltype.cast_opaque_ptr(DICTINDEX_LONG, d.indexes)) else: - rgc.ll_arrayclear(lltype.cast_opaque_ptr(DICTINDEX_LONG, d.indexes)) + assert False + at jit.dont_look_inside def ll_call_insert_clean_function(d, hash, i): - DICT = lltype.typeOf(d).TO - if d.lookup_function_no == FUNC_BYTE: + fun = d.lookup_function_no & FUNC_MASK + if fun == FUNC_BYTE: ll_dict_store_clean(d, hash, i, TYPE_BYTE) - elif d.lookup_function_no == FUNC_SHORT: + elif fun == FUNC_SHORT: ll_dict_store_clean(d, hash, i, TYPE_SHORT) - elif IS_64BIT and d.lookup_function_no == FUNC_INT: + elif IS_64BIT and fun == FUNC_INT: ll_dict_store_clean(d, hash, i, TYPE_INT) - elif d.lookup_function_no == FUNC_LONG: + elif fun == FUNC_LONG: ll_dict_store_clean(d, hash, i, TYPE_LONG) else: assert False +def ll_call_delete_by_entry_index(d, hash, i): + fun = d.lookup_function_no & FUNC_MASK + if fun == FUNC_BYTE: + ll_dict_delete_by_entry_index(d, hash, i, TYPE_BYTE) + elif fun == FUNC_SHORT: + ll_dict_delete_by_entry_index(d, hash, i, TYPE_SHORT) + elif IS_64BIT and fun == FUNC_INT: + ll_dict_delete_by_entry_index(d, hash, i, TYPE_INT) + elif fun == FUNC_LONG: + ll_dict_delete_by_entry_index(d, hash, i, TYPE_LONG) + else: + assert False + def ll_valid_from_flag(entries, i): return entries[i].f_valid @@ -513,7 +535,7 @@ def ll_dict_getitem(d, key): index = d.lookup_function(d, key, d.keyhash(key), FLAG_LOOKUP) - if index != -1: + if index >= 0: return d.entries[index].value else: raise KeyError @@ -572,6 +594,7 @@ _ll_dict_rescue._dont_inline_ = True def _ll_dict_insertclean(d, key, value, hash): + # never translated ENTRY = lltype.typeOf(d.entries).TO.OF ll_call_insert_clean_function(d, hash, d.num_ever_used_items) entry = d.entries[d.num_ever_used_items] @@ -590,25 +613,24 @@ # xxx Haaaack: returns len(d.indexes). Works independently of # the exact type pointed to by d, using a forced cast... # Must only be called by @jit.dont_look_inside functions. - return len(rffi.cast(DICTINDEX_BYTE, d.indexes)) + return lltype.length_of_simple_gcarray_from_opaque(d.indexes) def _overallocate_entries_len(baselen): # This over-allocates proportional to the list size, making room - # for additional growth. The over-allocation is mild, but is - # enough to give linear-time amortized behavior over a long - # sequence of appends() in the presence of a poorly-performing - # system malloc(). - # The growth pattern is: 0, 4, 8, 16, 25, 35, 46, 58, 72, 88, ... - newsize = baselen + 1 - if newsize < 9: - some = 3 - else: - some = 6 - some += newsize >> 3 - return newsize + some + # for additional growth. This over-allocates slightly more eagerly + # than with regular lists. The idea is that there are many more + # lists than dicts around in PyPy, and dicts of 5 to 8 items are + # not that rare (so a single jump from 0 to 8 is a good idea). + # The growth pattern is: 0, 8, 17, 27, 38, 50, 64, 80, 98, ... + newsize = baselen + (baselen >> 3) + return newsize + 8 - at jit.dont_look_inside + at jit.look_inside_iff(lambda d: jit.isvirtual(d)) def ll_dict_grow(d): + # note: this @jit.look_inside_iff is here to inline the three lines + # at the end of this function. It's important because dicts start + # with a length-zero 'd.entries' which must be grown as soon as we + # insert an element. if d.num_live_items < d.num_ever_used_items // 2: # At least 50% of the allocated entries are dead, so perform a # compaction. If ll_dict_remove_deleted_items detects that over @@ -619,11 +641,29 @@ new_allocated = _overallocate_entries_len(len(d.entries)) - # Detect an obscure case where the indexes numeric type is too - # small to store all the entry indexes - if (max(128, _ll_len_of_d_indexes(d)) - new_allocated - < MIN_INDEXES_MINUS_ENTRIES): + # Detect a relatively rare case where the indexes numeric type is too + # small to store all the entry indexes: there would be 'new_allocated' + # entries, which may in corner cases be larger than 253 even though we + # have single bytes in 'd.indexes' (and the same for the larger + # boundaries). The 'd.indexes' hashtable is never more than 2/3rd + # full, so we know that 'd.num_live_items' should be at most 2/3 * 256 + # (or 65536 or etc.) so after the ll_dict_remove_deleted_items() below + # at least 1/3rd items in 'd.entries' are free. + fun = d.lookup_function_no & FUNC_MASK + toobig = False + if fun == FUNC_BYTE: + assert d.num_live_items < ((1 << 8) - MIN_INDEXES_MINUS_ENTRIES) + toobig = new_allocated > ((1 << 8) - MIN_INDEXES_MINUS_ENTRIES) + elif fun == FUNC_SHORT: + assert d.num_live_items < ((1 << 16) - MIN_INDEXES_MINUS_ENTRIES) + toobig = new_allocated > ((1 << 16) - MIN_INDEXES_MINUS_ENTRIES) + elif IS_64BIT and fun == FUNC_INT: + assert d.num_live_items < ((1 << 32) - MIN_INDEXES_MINUS_ENTRIES) + toobig = new_allocated > ((1 << 32) - MIN_INDEXES_MINUS_ENTRIES) + # + if toobig: ll_dict_remove_deleted_items(d) + assert d.num_live_items == d.num_ever_used_items return True newitems = lltype.malloc(lltype.typeOf(d).TO.entries.TO, new_allocated) @@ -631,6 +671,7 @@ d.entries = newitems return False + at jit.dont_look_inside def ll_dict_remove_deleted_items(d): if d.num_live_items < len(d.entries) // 4: # At least 75% of the allocated entries are dead, so shrink the memory @@ -684,7 +725,7 @@ def ll_dict_delitem(d, key): index = d.lookup_function(d, key, d.keyhash(key), FLAG_DELETE) - if index == -1: + if index < 0: raise KeyError _ll_dict_del(d, index) @@ -701,7 +742,12 @@ if ENTRIES.must_clear_value: entry.value = lltype.nullptr(ENTRY.value.TO) - if index == d.num_ever_used_items - 1: + if d.num_live_items == 0: + # Dict is now empty. Reset these fields. + d.num_ever_used_items = 0 + d.lookup_function_no &= FUNC_MASK + + elif index == d.num_ever_used_items - 1: # The last element of the ordereddict has been deleted. Instead of # simply marking the item as dead, we can safely reuse it. Since it's # also possible that there are more dead items immediately behind the @@ -746,7 +792,9 @@ else: ll_malloc_indexes_and_choose_lookup(d, new_size) d.resize_counter = new_size * 2 - d.num_live_items * 3 - assert d.resize_counter > 0 + ll_assert(d.resize_counter > 0, "reindex: resize_counter <= 0") + ll_assert((d.lookup_function_no >> FUNC_SHIFT) == 0, + "reindex: lookup_fun >> SHIFT") # entries = d.entries i = 0 @@ -769,23 +817,11 @@ FLAG_LOOKUP = 0 FLAG_STORE = 1 FLAG_DELETE = 2 -FLAG_DELETE_TRY_HARD = 3 @specialize.memo() def _ll_ptr_to_array_of(T): return lltype.Ptr(lltype.GcArray(T)) -def ll_kill_something(d, T): - INDEXES = _ll_ptr_to_array_of(T) - i = 0 - indexes = lltype.cast_opaque_ptr(INDEXES, d.indexes) - while True: - index = rffi.cast(lltype.Signed, indexes[i]) - if index >= VALID_OFFSET: - indexes[i] = rffi.cast(T, DELETED) - return index - i += 1 - @jit.look_inside_iff(lambda d, key, hash, store_flag, T: jit.isvirtual(d) and jit.isconstant(key)) @jit.oopspec('ordereddict.lookup(d, key, hash, store_flag, T)') @@ -827,8 +863,6 @@ # pristine entry -- lookup failed if store_flag == FLAG_STORE: indexes[i] = rffi.cast(T, d.num_ever_used_items + VALID_OFFSET) - elif d.paranoia and store_flag == FLAG_DELETE_TRY_HARD: - return ll_kill_something(d, T) return -1 # In the loop, a deleted entry (everused and not valid) is by far @@ -845,8 +879,6 @@ deletedslot = intmask(i) indexes[deletedslot] = rffi.cast(T, d.num_ever_used_items + VALID_OFFSET) - elif d.paranoia and store_flag == FLAG_DELETE_TRY_HARD: - return ll_kill_something(d, T) return -1 elif index >= VALID_OFFSET: checkingkey = entries[index - VALID_OFFSET].key @@ -881,17 +913,38 @@ mask = len(indexes) - 1 i = r_uint(hash & mask) perturb = r_uint(hash) - while rffi.cast(lltype.Signed, indexes[i]) != 0: + while rffi.cast(lltype.Signed, indexes[i]) != FREE: i = (i << 2) + i + perturb + 1 i = i & mask perturb >>= PERTURB_SHIFT indexes[i] = rffi.cast(T, index + VALID_OFFSET) +def ll_dict_delete_by_entry_index(d, hash, locate_index, T): + # Another simplified version of ll_dict_lookup() which locates a + # hashtable entry with the given 'index' stored in it, and deletes it. + # This *should* be safe against evil user-level __eq__/__hash__ + # functions because the 'hash' argument here should be the one stored + # into the directory, which is correct. + INDEXES = _ll_ptr_to_array_of(T) + indexes = lltype.cast_opaque_ptr(INDEXES, d.indexes) + mask = len(indexes) - 1 + i = r_uint(hash & mask) + perturb = r_uint(hash) + locate_value = locate_index + VALID_OFFSET + while rffi.cast(lltype.Signed, indexes[i]) != locate_value: + assert rffi.cast(lltype.Signed, indexes[i]) != FREE + i = (i << 2) + i + perturb + 1 + i = i & mask + perturb >>= PERTURB_SHIFT + indexes[i] = rffi.cast(T, DELETED) + # ____________________________________________________________ # # Irregular operations. -DICT_INITSIZE = 8 +# Start the hashtable size at 16 rather than 8, as with rdict.py, because +# it is only an array of bytes +DICT_INITSIZE = 16 @specialize.memo() @@ -948,14 +1001,19 @@ self.r_dict = r_dict self.variant = variant self.lowleveltype = get_ll_dictiter(r_dict.lowleveltype) - self.ll_dictiter = ll_dictiter - self._ll_dictnext = _ll_dictnext + if variant == 'reversed': + self.ll_dictiter = ll_dictiter_reversed + self._ll_dictnext = _ll_dictnext_reversed + else: + self.ll_dictiter = ll_dictiter + self._ll_dictnext = _ll_dictnext def ll_dictiter(ITERPTR, d): iter = lltype.malloc(ITERPTR.TO) iter.dict = d - iter.index = 0 + # initialize the index with usually 0, but occasionally a larger value + iter.index = d.lookup_function_no >> FUNC_SHIFT return iter @jit.look_inside_iff(lambda iter: jit.isvirtual(iter) @@ -974,17 +1032,48 @@ if entries.valid(index): iter.index = nextindex return index + else: + # In case of repeated iteration over the start of + # a dict where the items get removed, like + # collections.OrderedDict.popitem(last=False), + # the hack below will increase the value stored in + # the high bits of lookup_function_no and so the + # next iteration will start at a higher value. + # We should carefully reset these high bits to zero + # as soon as we do something like ll_dict_reindex(). + if index == (dict.lookup_function_no >> FUNC_SHIFT): + dict.lookup_function_no += (1 << FUNC_SHIFT) index = nextindex # clear the reference to the dict and prevent restarts iter.dict = lltype.nullptr(lltype.typeOf(iter).TO.dict.TO) raise StopIteration +def ll_dictiter_reversed(ITERPTR, d): + iter = lltype.malloc(ITERPTR.TO) + iter.dict = d + iter.index = d.num_ever_used_items + return iter + +def _ll_dictnext_reversed(iter): + dict = iter.dict + if dict: + entries = dict.entries + index = iter.index - 1 + while index >= 0: + if entries.valid(index): + iter.index = index + return index + index = index - 1 + # clear the reference to the dict and prevent restarts + iter.dict = lltype.nullptr(lltype.typeOf(iter).TO.dict.TO) + raise StopIteration + # _____________________________________________________________ # methods def ll_dict_get(dict, key, default): index = dict.lookup_function(dict, key, dict.keyhash(key), FLAG_LOOKUP) - if index == -1: + if index < 0: return default else: return dict.entries[index].value @@ -992,7 +1081,7 @@ def ll_dict_setdefault(dict, key, default): hash = dict.keyhash(key) index = dict.lookup_function(dict, key, hash, FLAG_STORE) - if index == -1: + if index < 0: _ll_dict_setitem_lookup_done(dict, key, default, hash, -1) return default else: @@ -1119,7 +1208,7 @@ def ll_dict_contains(d, key): i = d.lookup_function(d, key, d.keyhash(key), FLAG_LOOKUP) - return i != -1 + return i >= 0 def _ll_getnextitem(dic): if dic.num_live_items == 0: @@ -1127,22 +1216,19 @@ entries = dic.entries + # find the last entry. It's unclear if the loop below is still + # needed nowadays, because 'num_ever_used_items - 1' should always + # point to the last active item (we decrease it as needed in + # _ll_dict_del). Better safe than sorry. while True: i = dic.num_ever_used_items - 1 if entries.valid(i): break dic.num_ever_used_items -= 1 - key = entries[i].key - index = dic.lookup_function(dic, key, entries.hash(i), - FLAG_DELETE_TRY_HARD) - # if the lookup function returned me a random strange thing, - # don't care about deleting the item - if index == dic.num_ever_used_items - 1: - dic.num_ever_used_items -= 1 - else: - assert index != -1 - return index + # we must remove the precise entry in the hashtable that points to 'i' + ll_call_delete_by_entry_index(dic, entries.hash(i), i) + return i def ll_dict_popitem(ELEM, dic): i = _ll_getnextitem(dic) @@ -1155,7 +1241,7 @@ def ll_dict_pop(dic, key): index = dic.lookup_function(dic, key, dic.keyhash(key), FLAG_DELETE) - if index == -1: + if index < 0: raise KeyError value = dic.entries[index].value _ll_dict_del(dic, index) @@ -1163,7 +1249,7 @@ def ll_dict_pop_default(dic, key, dfl): index = dic.lookup_function(dic, key, dic.keyhash(key), FLAG_DELETE) - if index == -1: + if index < 0: return dfl value = dic.entries[index].value _ll_dict_del(dic, index) diff --git a/rpython/rtyper/rbuiltin.py b/rpython/rtyper/rbuiltin.py --- a/rpython/rtyper/rbuiltin.py +++ b/rpython/rtyper/rbuiltin.py @@ -445,6 +445,14 @@ return hop.genop('cast_opaque_ptr', [v_input], # v_type implicit in r_result resulttype = hop.r_result.lowleveltype) + at typer_for(lltype.length_of_simple_gcarray_from_opaque) +def rtype_length_of_simple_gcarray_from_opaque(hop): + assert isinstance(hop.args_r[0], rptr.PtrRepr) + v_opaque_ptr, = hop.inputargs(hop.args_r[0]) + hop.exception_cannot_occur() + return hop.genop('length_of_simple_gcarray_from_opaque', [v_opaque_ptr], + resulttype = hop.r_result.lowleveltype) + @typer_for(lltype.direct_fieldptr) def rtype_direct_fieldptr(hop): assert isinstance(hop.args_r[0], rptr.PtrRepr) diff --git a/rpython/rtyper/rdict.py b/rpython/rtyper/rdict.py --- a/rpython/rtyper/rdict.py +++ b/rpython/rtyper/rdict.py @@ -98,12 +98,12 @@ c_key = hop.inputconst(lltype.Void, 'key') v_key = hop.genop('getinteriorfield', [v_entries, v_index, c_key], resulttype=KEY) - if variant != 'keys': + if variant != 'keys' and variant != 'reversed': VALUE = ENTRIES.TO.OF.value c_value = hop.inputconst(lltype.Void, 'value') v_value = hop.genop('getinteriorfield', [v_entries,v_index,c_value], resulttype=VALUE) - if variant == 'keys': + if variant == 'keys' or variant == 'reversed': return self.r_dict.recast_key(hop.llops, v_key) elif variant == 'values': return self.r_dict.recast_value(hop.llops, v_value) diff --git a/rpython/rtyper/test/test_rdict.py b/rpython/rtyper/test/test_rdict.py --- a/rpython/rtyper/test/test_rdict.py +++ b/rpython/rtyper/test/test_rdict.py @@ -1005,6 +1005,7 @@ def test_dict_resize(self): + py.test.skip("test written for non-ordered dicts, update or kill") # XXX we no longer automatically resize on 'del'. We need to # hack a bit in this test to trigger a resize by continuing to # fill the dict's table while keeping the actual size very low @@ -1025,7 +1026,7 @@ res = self.interpret(func, [1]) assert len(res.entries) == rdict.DICT_INITSIZE - def test_opt_nullkeymarker(self): + def test_opt_dummykeymarker(self): def f(): d = {"hello": None} d["world"] = None @@ -1033,10 +1034,9 @@ res = self.interpret(f, []) assert res.item0 == True DICT = lltype.typeOf(res.item1).TO - assert not hasattr(DICT.entries.TO.OF, 'f_everused')# non-None string keys - assert not hasattr(DICT.entries.TO.OF, 'f_valid') # strings have a dummy + assert not hasattr(DICT.entries.TO.OF, 'f_valid') # strs have a dummy - def test_opt_nullvaluemarker(self): + def test_opt_dummyvaluemarker(self): def f(n): d = {-5: "abcd"} d[123] = "def" @@ -1044,29 +1044,8 @@ res = self.interpret(f, [-5]) assert res.item0 == 4 DICT = lltype.typeOf(res.item1).TO - assert not hasattr(DICT.entries.TO.OF, 'f_everused')# non-None str values assert not hasattr(DICT.entries.TO.OF, 'f_valid') # strs have a dummy - def test_opt_nonullmarker(self): - class A: - pass - def f(n): - if n > 5: - a = A() - else: - a = None - d = {a: -5441} - d[A()] = n+9872 - return d[a], d - res = self.interpret(f, [-5]) - assert res.item0 == -5441 - DICT = lltype.typeOf(res.item1).TO - assert hasattr(DICT.entries.TO.OF, 'f_everused') # can-be-None A instances - assert not hasattr(DICT.entries.TO.OF, 'f_valid')# with a dummy A instance - - res = self.interpret(f, [6]) - assert res.item0 == -5441 - def test_opt_nonnegint_dummy(self): def f(n): d = {n: 12} @@ -1077,7 +1056,6 @@ assert res.item0 == 1 assert res.item1 == 24 DICT = lltype.typeOf(res.item2).TO - assert hasattr(DICT.entries.TO.OF, 'f_everused') # all ints can be zero assert not hasattr(DICT.entries.TO.OF, 'f_valid')# nonneg int: dummy -1 def test_opt_no_dummy(self): @@ -1090,7 +1068,6 @@ assert res.item0 == 1 assert res.item1 == -24 DICT = lltype.typeOf(res.item2).TO - assert hasattr(DICT.entries.TO.OF, 'f_everused') # all ints can be zero assert hasattr(DICT.entries.TO.OF, 'f_valid') # no dummy available def test_opt_boolean_has_no_dummy(self): @@ -1103,7 +1080,6 @@ assert res.item0 == 1 assert res.item1 is True DICT = lltype.typeOf(res.item2).TO - assert hasattr(DICT.entries.TO.OF, 'f_everused') # all ints can be zero assert hasattr(DICT.entries.TO.OF, 'f_valid') # no dummy available def test_opt_multiple_identical_dicts(self): @@ -1142,6 +1118,7 @@ assert sorted(DICT.TO.entries.TO.OF._flds) == ['f_hash', 'key', 'value'] def test_deleted_entry_reusage_with_colliding_hashes(self): + py.test.skip("test written for non-ordered dicts, update or kill") def lowlevelhash(value): p = rstr.mallocstr(len(value)) for i in range(len(value)): diff --git a/rpython/rtyper/test/test_rordereddict.py b/rpython/rtyper/test/test_rordereddict.py --- a/rpython/rtyper/test/test_rordereddict.py +++ b/rpython/rtyper/test/test_rordereddict.py @@ -115,11 +115,18 @@ rordereddict.ll_dict_setitem(ll_d, llstr("b"), 2) rordereddict.ll_dict_setitem(ll_d, llstr("c"), 3) rordereddict.ll_dict_setitem(ll_d, llstr("d"), 4) - assert len(get_indexes(ll_d)) == 8 rordereddict.ll_dict_setitem(ll_d, llstr("e"), 5) rordereddict.ll_dict_setitem(ll_d, llstr("f"), 6) - assert len(get_indexes(ll_d)) == 32 - for item in ['a', 'b', 'c', 'd', 'e', 'f']: + rordereddict.ll_dict_setitem(ll_d, llstr("g"), 7) + rordereddict.ll_dict_setitem(ll_d, llstr("h"), 8) + rordereddict.ll_dict_setitem(ll_d, llstr("i"), 9) + rordereddict.ll_dict_setitem(ll_d, llstr("j"), 10) + assert len(get_indexes(ll_d)) == 16 + rordereddict.ll_dict_setitem(ll_d, llstr("k"), 11) + rordereddict.ll_dict_setitem(ll_d, llstr("l"), 12) + rordereddict.ll_dict_setitem(ll_d, llstr("m"), 13) + assert len(get_indexes(ll_d)) == 64 + for item in 'abcdefghijklm': assert rordereddict.ll_dict_getitem(ll_d, llstr(item)) == ord(item) - ord('a') + 1 def test_dict_grow_cleanup(self): @@ -160,6 +167,38 @@ assert ll_elem.item1 == 1 py.test.raises(KeyError, rordereddict.ll_dict_popitem, TUP, ll_d) + def test_popitem_first(self): + DICT = self._get_str_dict() + ll_d = rordereddict.ll_newdict(DICT) + rordereddict.ll_dict_setitem(ll_d, llstr("k"), 1) + rordereddict.ll_dict_setitem(ll_d, llstr("j"), 2) + rordereddict.ll_dict_setitem(ll_d, llstr("m"), 3) + ITER = rordereddict.get_ll_dictiter(lltype.Ptr(DICT)) + for expected in ["k", "j", "m"]: + ll_iter = rordereddict.ll_dictiter(ITER, ll_d) + num = rordereddict._ll_dictnext(ll_iter) + ll_key = ll_d.entries[num].key + assert hlstr(ll_key) == expected + rordereddict.ll_dict_delitem(ll_d, ll_key) + ll_iter = rordereddict.ll_dictiter(ITER, ll_d) + py.test.raises(StopIteration, rordereddict._ll_dictnext, ll_iter) + + def test_popitem_first_bug(self): + DICT = self._get_str_dict() + ll_d = rordereddict.ll_newdict(DICT) + rordereddict.ll_dict_setitem(ll_d, llstr("k"), 1) + rordereddict.ll_dict_setitem(ll_d, llstr("j"), 1) + rordereddict.ll_dict_delitem(ll_d, llstr("k")) + ITER = rordereddict.get_ll_dictiter(lltype.Ptr(DICT)) + ll_iter = rordereddict.ll_dictiter(ITER, ll_d) + num = rordereddict._ll_dictnext(ll_iter) + ll_key = ll_d.entries[num].key + assert hlstr(ll_key) == "j" + assert ll_d.lookup_function_no == 4 # 1 free item found at the start + rordereddict.ll_dict_delitem(ll_d, llstr("j")) + assert ll_d.num_ever_used_items == 0 + assert ll_d.lookup_function_no == 0 # reset + def test_direct_enter_and_del(self): def eq(a, b): return a == b diff --git a/rpython/translator/c/funcgen.py b/rpython/translator/c/funcgen.py --- a/rpython/translator/c/funcgen.py +++ b/rpython/translator/c/funcgen.py @@ -653,6 +653,11 @@ OP_CAST_ADR_TO_PTR = OP_CAST_POINTER OP_CAST_OPAQUE_PTR = OP_CAST_POINTER + def OP_LENGTH_OF_SIMPLE_GCARRAY_FROM_OPAQUE(self, op): + return ('%s = *(long *)(((char *)%s) + sizeof(struct pypy_header0));' + ' /* length_of_simple_gcarray_from_opaque */' + % (self.expr(op.result), self.expr(op.args[0]))) + def OP_CAST_INT_TO_PTR(self, op): TYPE = self.lltypemap(op.result) typename = self.db.gettype(TYPE) diff --git a/rpython/translator/c/test/test_lladdresses.py b/rpython/translator/c/test/test_lladdresses.py --- a/rpython/translator/c/test/test_lladdresses.py +++ b/rpython/translator/c/test/test_lladdresses.py @@ -246,3 +246,13 @@ assert res == 456 res = fc(77) assert res == 123 + +def test_gcarray_length(): + A = lltype.GcArray(lltype.Char) + def f(): + a = lltype.malloc(A, 117) + p = lltype.cast_opaque_ptr(GCREF, a) + return lltype.length_of_simple_gcarray_from_opaque(p) + fc = compile(f, []) + res = fc() + assert res == 117 diff --git a/rpython/translator/tool/staticsizereport.py b/rpython/translator/tool/staticsizereport.py --- a/rpython/translator/tool/staticsizereport.py +++ b/rpython/translator/tool/staticsizereport.py @@ -3,6 +3,7 @@ from rpython.tool.ansicolor import red, yellow, green from rpython.rtyper.lltypesystem.lltype import typeOf, _ptr, Ptr, ContainerType +from rpython.rtyper.lltypesystem.lltype import GcOpaqueType from rpython.rtyper.lltypesystem import llmemory from rpython.memory.lltypelayout import convert_offset_to_int @@ -54,6 +55,8 @@ if isinstance(typeOf(value), Ptr): container = value._obj if isinstance(typeOf(container), ContainerType): + if isinstance(typeOf(container), GcOpaqueType): + container = container.container node = database.getcontainernode(container) if node.nodekind != 'func': nodes.append(node) @@ -77,7 +80,10 @@ return 0 else: length = None - return convert_offset_to_int(llmemory.sizeof(TYPE, length)) + #print obj, ', length =', length + r = convert_offset_to_int(llmemory.sizeof(TYPE, length)) + #print '\tr =', r + return r def guess_size(database, node, recursive=None): diff --git a/rpython/translator/tool/test/test_staticsizereport.py b/rpython/translator/tool/test/test_staticsizereport.py --- a/rpython/translator/tool/test/test_staticsizereport.py +++ b/rpython/translator/tool/test/test_staticsizereport.py @@ -57,10 +57,17 @@ P = rffi.sizeof(rffi.VOIDP) B = 1 # bool assert guess_size(func.builder.db, dictvalnode, set()) > 100 - assert guess_size(func.builder.db, dictvalnode2, set()) == 2 * S + 1 * P + 1 * S + 8 * (2*S + 1 * B) + assert guess_size(func.builder.db, dictvalnode2, set()) == ( + (4 * S + 2 * P) + # struct dicttable + (S + 8) + # indexes, length 8 + (S + S + S)) # entries, length 1 r_set = set() dictnode_size = guess_size(db, test_dictnode, r_set) - assert dictnode_size == 2 * S + 1 * P + 1 * S + (4096-256) * (1*S+1*P + (1 * S + 1*P + 5)) + (8192-4096+256) * (1*S+1*P) + assert dictnode_size == ( + (4 * S + 2 * P) + # struct dicttable + (S + 2 * 8192) + # indexes, length 8192, rffi.USHORT + (S + (S + S) * 3840) + # entries, length 3840 + (S + S + 5) * 3840) # 3840 strings with 5 chars each assert guess_size(func.builder.db, fixarrayvalnode, set()) == 100 * rffi.sizeof(lltype.Signed) + 1 * rffi.sizeof(lltype.Signed) assert guess_size(func.builder.db, dynarrayvalnode, set()) == 100 * rffi.sizeof(lltype.Signed) + 2 * rffi.sizeof(lltype.Signed) + 1 * rffi.sizeof(rffi.VOIDP) From noreply at buildbot.pypy.org Sun Jan 18 11:11:19 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 18 Jan 2015 11:11:19 +0100 (CET) Subject: [pypy-commit] pypy default: Fix for termios on 32-bit Message-ID: <20150118101119.DE8A11C0117@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r75419:c52fc1774518 Date: 2015-01-18 11:11 +0100 http://bitbucket.org/pypy/pypy/changeset/c52fc1774518/ Log: Fix for termios on 32-bit diff --git a/rpython/rlib/rtermios.py b/rpython/rlib/rtermios.py --- a/rpython/rlib/rtermios.py +++ b/rpython/rlib/rtermios.py @@ -3,6 +3,7 @@ # returns list of mostly-strings of length one, but with few ints # inside, so we make sure it works +import sys from rpython.rtyper.lltypesystem import rffi, lltype from rpython.rtyper.tool import rffi_platform from rpython.translator.tool.cbuild import ExternalCompilationInfo @@ -97,6 +98,10 @@ for name in CONSTANT_NAMES: value = c_config[name] if value is not None: + if value < -sys.maxsize-1 or value >= 2 * (sys.maxsize+1): + raise AssertionError("termios: %r has value %r, too large" % ( + name, value)) + value = intmask(value) # wrap unsigned long numbers to signed longs globals()[name] = value all_constants[name] = value From noreply at buildbot.pypy.org Sun Jan 18 12:34:36 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 18 Jan 2015 12:34:36 +0100 (CET) Subject: [pypy-commit] pypy errno-again: Possibly clarify some things and avoid a few redundant loads; also, Message-ID: <20150118113436.922351C03EF@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: errno-again Changeset: r75420:0055b7f7e471 Date: 2015-01-18 12:34 +0100 http://bitbucket.org/pypy/pypy/changeset/0055b7f7e471/ Log: Possibly clarify some things and avoid a few redundant loads; also, I can't understand if SetLastError is supposed to pop its argument off the stack or not (as used here) so write a generic case. diff --git a/rpython/jit/backend/x86/callbuilder.py b/rpython/jit/backend/x86/callbuilder.py --- a/rpython/jit/backend/x86/callbuilder.py +++ b/rpython/jit/backend/x86/callbuilder.py @@ -32,6 +32,8 @@ # arguments, we need to decrease esp temporarily stack_max = PASS_ON_MY_FRAME + tlofs_reg = None + saved_stack_position_reg = None result_value_saved_early = False def __init__(self, assembler, fnloc, arglocs, @@ -152,8 +154,35 @@ if not we_are_translated(): # for testing: we should not access self.mc.ADD(ebp, imm(1)) # ebp any more + def get_tlofs_reg(self): + """Load the THREADLOCAL_OFS from the stack into a callee-saved + register. Further calls just return the same register, by assuming + it is indeed saved.""" + assert self.is_call_release_gil + if self.tlofs_reg is None: + # pick a register saved across calls + if IS_X86_32: + self.tlofs_reg = esi + else: + self.tlofs_reg = r12 + self.mc.MOV_rs(self.tlofs_reg.value, + THREADLOCAL_OFS - self.current_esp) + return self.tlofs_reg + + def save_stack_position(self): + """Load the current 'esp' value into a callee-saved register. + Further calls just return the same register, by assuming it is + indeed saved.""" + assert IS_X86_32 + assert stdcall_or_cdecl and self.is_call_release_gil + if self.saved_stack_position_reg is None: + # pick a register saved across calls + self.saved_stack_position_reg = edi + self.mc.MOV(self.saved_stack_position_reg, esp) + def write_real_errno(self, save_err): - tlofsreg = None + """This occurs just before emit_raw_call(). + """ mc = self.mc if handle_lasterror and (save_err & rffi.RFFI_READSAVED_LASTERROR): @@ -165,11 +194,13 @@ assert isinstance(self, CallBuilder32) # Windows 32-bit only # rpy_lasterror = llerrno.get_rpy_lasterror_offset(self.asm.cpu) - tlofsreg = edi # saved across the call to SetLastError - mc.MOV_rs(edi.value, THREADLOCAL_OFS - self.current_esp) - mc.PUSH_m((edi.value, rpy_lasterror)) + tlofsreg = self.get_tlofs_reg() # => esi, callee-saved + self.save_stack_position() # => edi, callee-saved + mc.PUSH_m((tlofsreg.value, rpy_lasterror)) mc.CALL(imm(SetLastError_addr)) - mc.ADD_ri(esp.value, WORD) + # restore the stack position without assuming a particular + # calling convention of _SetLastError() + self.mc.MOV(esp, self.saved_stack_position_reg) if save_err & rffi.RFFI_READSAVED_ERRNO: # Just before a call, read 'rpy_errno' and write it into the @@ -178,9 +209,7 @@ # pass the arguments on x86-64. rpy_errno = llerrno.get_rpy_errno_offset(self.asm.cpu) p_errno = llerrno.get_p_errno_offset(self.asm.cpu) - if tlofsreg is None: - tlofsreg = eax - mc.MOV_rs(eax.value, THREADLOCAL_OFS - self.current_esp) + tlofsreg = self.get_tlofs_reg() # => esi or r12, callee-saved if IS_X86_32: tmpreg = edx else: @@ -191,27 +220,27 @@ elif save_err & rffi.RFFI_ZERO_ERRNO_BEFORE: # Same, but write zero. p_errno = llerrno.get_p_errno_offset(self.asm.cpu) - if tlofsreg is None: - tlofsreg = eax - mc.MOV_rs(eax.value, THREADLOCAL_OFS - self.current_esp) + tlofsreg = self.get_tlofs_reg() # => esi or r12, callee-saved mc.MOV_rm(eax.value, (tlofsreg.value, p_errno)) mc.MOV32_mi((eax.value, 0), 0) def read_real_errno(self, save_err): - esi_is_threadlocal_ofs = False + """This occurs after emit_raw_call() and after restore_stack_pointer(). + """ mc = self.mc if save_err & rffi.RFFI_SAVE_ERRNO: # Just after a call, read the real 'errno' and save a copy of # it inside our thread-local 'rpy_errno'. Most registers are # free here, including the callee-saved ones, except 'ebx'. + # The tlofs register might have been loaded earlier and is + # callee-saved, so it does not need to be reloaded. rpy_errno = llerrno.get_rpy_errno_offset(self.asm.cpu) p_errno = llerrno.get_p_errno_offset(self.asm.cpu) - mc.MOV_rs(esi.value, THREADLOCAL_OFS) - mc.MOV_rm(edi.value, (esi.value, p_errno)) + tlofsreg = self.get_tlofs_reg() # => esi or r12 (possibly reused) + mc.MOV_rm(edi.value, (tlofsreg.value, p_errno)) mc.MOV32_rm(edi.value, (edi.value, 0)) - mc.MOV32_mr((esi.value, rpy_errno), edi.value) - esi_is_threadlocal_ofs = True + mc.MOV32_mr((tlofsreg.value, rpy_errno), edi.value) if handle_lasterror and (save_err & rffi.RFFI_SAVE_LASTERROR): from rpython.rlib.rwin32 import _GetLastError @@ -219,13 +248,12 @@ assert isinstance(self, CallBuilder32) # Windows 32-bit only # rpy_lasterror = llerrno.get_rpy_lasterror_offset(self.asm.cpu) - self.save_result_value(save_edx=True) + self.save_result_value(save_edx=True) # save eax/edx/xmm0 self.result_value_saved_early = True mc.CALL(imm(GetLastError_addr)) # - if not esi_is_threadlocal_ofs: - mc.MOV_rs(esi.value, THREADLOCAL_OFS) - mc.MOV32_mr((esi.value, rpy_lasterror), eax.value) + tlofsreg = self.get_tlofs_reg() # => esi (possibly reused) + mc.MOV32_mr((tlofsreg.value, rpy_lasterror), eax.value) def move_real_result_and_call_reacqgil_addr(self, fastgil): from rpython.jit.backend.x86 import rx86 @@ -383,15 +411,10 @@ # Dynamically accept both stdcall and cdecl functions. # We could try to detect from pyjitpl which calling # convention this particular function takes, which would - # avoid these two extra MOVs... but later. Pick any - # caller-saved register here except ebx (used for shadowstack). - if IS_X86_32: - free_caller_save_reg = edi - else: - free_caller_save_reg = r14 - self.mc.MOV(free_caller_save_reg, esp) + # avoid these two extra MOVs... but later. + self.save_stack_position() # => edi (possibly reused) self.mc.CALL(self.fnloc) - self.mc.MOV(esp, free_caller_save_reg) + self.mc.MOV(esp, self.saved_stack_position_reg) else: self.mc.CALL(self.fnloc) if self.callconv != FFI_DEFAULT_ABI: From noreply at buildbot.pypy.org Sun Jan 18 14:50:02 2015 From: noreply at buildbot.pypy.org (mjacob) Date: Sun, 18 Jan 2015 14:50:02 +0100 (CET) Subject: [pypy-commit] pypy llvm-translation-backend: Implement int_force_ge_zero() llop. Message-ID: <20150118135002.022851C027F@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: llvm-translation-backend Changeset: r75421:13b46ae9604f Date: 2015-01-08 22:36 +0100 http://bitbucket.org/pypy/pypy/changeset/13b46ae9604f/ Log: Implement int_force_ge_zero() llop. diff --git a/rpython/translator/llvm/genllvm.py b/rpython/translator/llvm/genllvm.py --- a/rpython/translator/llvm/genllvm.py +++ b/rpython/translator/llvm/genllvm.py @@ -1306,6 +1306,12 @@ self.w('{t2.V} = icmp slt {b.TV}, {c.V}'.format(**locals())) self.w('{result.V} = and i1 {t1.V}, {t2.V}'.format(**locals())) + def op_int_force_ge_zero(self, result, var): + isneg = self._tmp() + self.w('{isneg.V} = icmp slt {var.TV}, 0'.format(**locals())) + self.w('{result.V} = select i1 {isneg.V}, {var.T} 0, {var.TV}' + .format(**locals())) + def op_ptr_iszero(self, result, var): self.w('{result.V} = icmp eq {var.TV}, null'.format(**locals())) From noreply at buildbot.pypy.org Sun Jan 18 14:50:13 2015 From: noreply at buildbot.pypy.org (mjacob) Date: Sun, 18 Jan 2015 14:50:13 +0100 (CET) Subject: [pypy-commit] pypy llvm-translation-backend: hg merge default Message-ID: <20150118135013.D80CF1C027F@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: llvm-translation-backend Changeset: r75422:67a005ec73ed Date: 2015-01-08 22:43 +0100 http://bitbucket.org/pypy/pypy/changeset/67a005ec73ed/ Log: hg merge default diff too long, truncating to 2000 out of 35007 lines diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -28,7 +28,7 @@ DEALINGS IN THE SOFTWARE. -PyPy Copyright holders 2003-2014 +PyPy Copyright holders 2003-2015 ----------------------------------- Except when otherwise stated (look for LICENSE files or information at diff --git a/README.rst b/README.rst --- a/README.rst +++ b/README.rst @@ -37,4 +37,4 @@ to use virtualenv with the resulting pypy-c as the interpreter; you can find more details about various installation schemes here: -http://doc.pypy.org/en/latest/getting-started.html#installing-pypy + http://doc.pypy.org/en/latest/install.html diff --git a/lib-python/2.7/distutils/unixccompiler.py b/lib-python/2.7/distutils/unixccompiler.py --- a/lib-python/2.7/distutils/unixccompiler.py +++ b/lib-python/2.7/distutils/unixccompiler.py @@ -58,7 +58,7 @@ executables = {'preprocessor' : None, 'compiler' : ["cc"], 'compiler_so' : ["cc"], - 'compiler_cxx' : ["cc"], + 'compiler_cxx' : ["c++"], # pypy: changed, 'cc' is bogus 'linker_so' : ["cc", "-shared"], 'linker_exe' : ["cc"], 'archiver' : ["ar", "-cr"], diff --git a/lib-python/2.7/sqlite3/test/dbapi.py b/lib-python/2.7/sqlite3/test/dbapi.py --- a/lib-python/2.7/sqlite3/test/dbapi.py +++ b/lib-python/2.7/sqlite3/test/dbapi.py @@ -478,6 +478,29 @@ except TypeError: pass + def CheckCurDescription(self): + self.cu.execute("select * from test") + + actual = self.cu.description + expected = [ + ('id', None, None, None, None, None, None), + ('name', None, None, None, None, None, None), + ('income', None, None, None, None, None, None), + ] + self.assertEqual(expected, actual) + + def CheckCurDescriptionVoidStatement(self): + self.cu.execute("insert into test(name) values (?)", ("foo",)) + self.assertIsNone(self.cu.description) + + def CheckCurDescriptionWithoutStatement(self): + cu = self.cx.cursor() + try: + self.assertIsNone(cu.description) + finally: + cu.close() + + @unittest.skipUnless(threading, 'This test requires threading.') class ThreadTests(unittest.TestCase): def setUp(self): diff --git a/lib-python/2.7/subprocess.py b/lib-python/2.7/subprocess.py --- a/lib-python/2.7/subprocess.py +++ b/lib-python/2.7/subprocess.py @@ -655,6 +655,21 @@ """Create new Popen instance.""" _cleanup() + # --- PyPy hack, see _pypy_install_libs_after_virtualenv() --- + # match arguments passed by different versions of virtualenv + if args[1:] in ( + ['-c', 'import sys; print(sys.prefix)'], # 1.6 10ba3f3c + ['-c', "\nimport sys\nprefix = sys.prefix\n" # 1.7 0e9342ce + "if sys.version_info[0] == 3:\n" + " prefix = prefix.encode('utf8')\n" + "if hasattr(sys.stdout, 'detach'):\n" + " sys.stdout = sys.stdout.detach()\n" + "elif hasattr(sys.stdout, 'buffer'):\n" + " sys.stdout = sys.stdout.buffer\nsys.stdout.write(prefix)\n"], + ['-c', 'import sys;out=sys.stdout;getattr(out, "buffer"' + ', out).write(sys.prefix.encode("utf-8"))']): # 1.7.2 a9454bce + _pypy_install_libs_after_virtualenv(args[0]) + if not isinstance(bufsize, (int, long)): raise TypeError("bufsize must be an integer") @@ -1560,6 +1575,27 @@ self.send_signal(signal.SIGKILL) +def _pypy_install_libs_after_virtualenv(target_executable): + # https://bitbucket.org/pypy/pypy/issue/1922/future-proofing-virtualenv + # + # PyPy 2.4.1 turned --shared on by default. This means the pypy binary + # depends on the 'libpypy-c.so' shared library to be able to run. + # The virtualenv code existing at the time did not account for this + # and would break. Try to detect that we're running under such a + # virtualenv in the "Testing executable with" phase and copy the + # library ourselves. + caller = sys._getframe(2) + if ('virtualenv_version' in caller.f_globals and + 'copyfile' in caller.f_globals): + dest_dir = sys.pypy_resolvedirof(target_executable) + src_dir = sys.pypy_resolvedirof(sys.executable) + for libname in ['libpypy-c.so', 'libpypy-c.dylib']: + dest_library = os.path.join(dest_dir, libname) + src_library = os.path.join(src_dir, libname) + if os.path.exists(src_library): + caller.f_globals['copyfile'](src_library, dest_library) + + def _demo_posix(): # # Example 1: Simple redirection: Get process list diff --git a/lib-python/2.7/test/test_collections.py b/lib-python/2.7/test/test_collections.py --- a/lib-python/2.7/test/test_collections.py +++ b/lib-python/2.7/test/test_collections.py @@ -1108,6 +1108,16 @@ od.popitem() self.assertEqual(len(od), 0) + def test_popitem_first(self): + pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)] + shuffle(pairs) + od = OrderedDict(pairs) + while pairs: + self.assertEqual(od.popitem(last=False), pairs.pop(0)) + with self.assertRaises(KeyError): + od.popitem(last=False) + self.assertEqual(len(od), 0) + def test_pop(self): pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)] shuffle(pairs) @@ -1179,7 +1189,11 @@ od = OrderedDict(pairs) # yaml.dump(od) --> # '!!python/object/apply:__main__.OrderedDict\n- - [a, 1]\n - [b, 2]\n' - self.assertTrue(all(type(pair)==list for pair in od.__reduce__()[1])) + + # PyPy bug fix: added [0] at the end of this line, because the + # test is really about the 2-tuples that need to be 2-lists + # inside the list of 6 of them + self.assertTrue(all(type(pair)==list for pair in od.__reduce__()[1][0])) def test_reduce_not_too_fat(self): # do not save instance dictionary if not needed @@ -1189,6 +1203,16 @@ od.x = 10 self.assertEqual(len(od.__reduce__()), 3) + def test_reduce_exact_output(self): + # PyPy: test that __reduce__() produces the exact same answer as + # CPython does, even though in the 'all_ordered_dicts' branch we + # have to emulate it. + pairs = [['c', 1], ['b', 2], ['d', 4]] + od = OrderedDict(pairs) + self.assertEqual(od.__reduce__(), (OrderedDict, (pairs,))) + od.x = 10 + self.assertEqual(od.__reduce__(), (OrderedDict, (pairs,), {'x': 10})) + def test_repr(self): od = OrderedDict([('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]) self.assertEqual(repr(od), diff --git a/lib-python/2.7/test/test_xml_etree.py b/lib-python/2.7/test/test_xml_etree.py --- a/lib-python/2.7/test/test_xml_etree.py +++ b/lib-python/2.7/test/test_xml_etree.py @@ -225,9 +225,9 @@ >>> element.remove(subelement) >>> serialize(element) # 5 '' - >>> element.remove(subelement) + >>> element.remove(subelement) # doctest: +ELLIPSIS Traceback (most recent call last): - ValueError: list.remove(x): x not in list + ValueError: list.remove(... >>> serialize(element) # 6 '' >>> element[0:0] = [subelement, subelement, subelement] diff --git a/lib-python/conftest.py b/lib-python/conftest.py --- a/lib-python/conftest.py +++ b/lib-python/conftest.py @@ -59,7 +59,7 @@ def __init__(self, basename, core=False, compiler=None, usemodules='', skip=None): self.basename = basename - self._usemodules = usemodules.split() + ['signal', 'rctime', 'itertools', '_socket'] + self._usemodules = usemodules.split() + ['signal', 'time', 'itertools', '_socket'] self._compiler = compiler self.core = core self.skip = skip diff --git a/lib-python/stdlib-upgrade.txt b/lib-python/stdlib-upgrade.txt --- a/lib-python/stdlib-upgrade.txt +++ b/lib-python/stdlib-upgrade.txt @@ -7,7 +7,7 @@ 1. check out the branch vendor/stdlib 2. upgrade the files there -3. update stdlib-versions.txt with the output of hg -id from the cpython repo +3. update stdlib-version.txt with the output of hg -id from the cpython repo 4. commit 5. update to default/py3k 6. create a integration branch for the new stdlib diff --git a/lib_pypy/_functools.py b/lib_pypy/_functools.py --- a/lib_pypy/_functools.py +++ b/lib_pypy/_functools.py @@ -9,7 +9,10 @@ of the given arguments and keywords. """ - def __init__(self, func, *args, **keywords): + def __init__(self, *args, **keywords): + if not args: + raise TypeError('__init__() takes at least 2 arguments (1 given)') + func, args = args[0], args[1:] if not callable(func): raise TypeError("the first argument must be callable") self._func = func diff --git a/lib_pypy/_sqlite3.py b/lib_pypy/_sqlite3.py --- a/lib_pypy/_sqlite3.py +++ b/lib_pypy/_sqlite3.py @@ -1175,8 +1175,9 @@ try: return self.__description except AttributeError: - self.__description = self.__statement._get_description() - return self.__description + if self.__statement: + self.__description = self.__statement._get_description() + return self.__description description = property(__get_description) def __get_lastrowid(self): diff --git a/lib_pypy/grp.py b/lib_pypy/grp.py --- a/lib_pypy/grp.py +++ b/lib_pypy/grp.py @@ -66,11 +66,12 @@ @builtinify def getgrnam(name): - if not isinstance(name, str): + if not isinstance(name, basestring): raise TypeError("expected string") + name = str(name) res = libc.getgrnam(name) if not res: - raise KeyError(name) + raise KeyError("'getgrnam(): name not found: %s'" % name) return _group_from_gstruct(res) @builtinify diff --git a/lib_pypy/readline.py b/lib_pypy/readline.py --- a/lib_pypy/readline.py +++ b/lib_pypy/readline.py @@ -6,4 +6,11 @@ are only stubs at the moment. """ -from pyrepl.readline import * +try: + from pyrepl.readline import * +except ImportError: + import sys + if sys.platform == 'win32': + raise ImportError("the 'readline' module is not available on Windows" + " (on either PyPy or CPython)") + raise diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -29,7 +29,7 @@ # --allworkingmodules working_modules = default_modules.copy() working_modules.update([ - "_socket", "unicodedata", "mmap", "fcntl", "_locale", "pwd", "rctime" , + "_socket", "unicodedata", "mmap", "fcntl", "_locale", "pwd", "time" , "select", "zipimport", "_lsprof", "crypt", "signal", "_rawffi", "termios", "zlib", "bz2", "struct", "_hashlib", "_md5", "_sha", "_minimal_curses", "cStringIO", "thread", "itertools", "pyexpat", "_ssl", "cpyext", "array", @@ -40,7 +40,7 @@ translation_modules = default_modules.copy() translation_modules.update([ - "fcntl", "rctime", "select", "signal", "_rawffi", "zlib", "struct", "_md5", + "fcntl", "time", "select", "signal", "_rawffi", "zlib", "struct", "_md5", "cStringIO", "array", "binascii", # the following are needed for pyrepl (and hence for the # interactive prompt/pdb) @@ -64,19 +64,15 @@ default_modules.add("_locale") if sys.platform == "sunos5": - working_modules.remove('mmap') # depend on ctypes, can't get at c-level 'errono' - working_modules.remove('rctime') # depend on ctypes, missing tm_zone/tm_gmtoff - working_modules.remove('signal') # depend on ctypes, can't get at c-level 'errono' working_modules.remove('fcntl') # LOCK_NB not defined working_modules.remove("_minimal_curses") working_modules.remove("termios") - working_modules.remove("_multiprocessing") # depends on rctime if "cppyy" in working_modules: working_modules.remove("cppyy") # depends on ctypes module_dependencies = { - '_multiprocessing': [('objspace.usemodules.rctime', True), + '_multiprocessing': [('objspace.usemodules.time', True), ('objspace.usemodules.thread', True)], 'cpyext': [('objspace.usemodules.array', True)], 'cppyy': [('objspace.usemodules.cpyext', True)], @@ -86,9 +82,10 @@ # itself needs the interp-level struct module # because 'P' is missing from the app-level one "_rawffi": [("objspace.usemodules.struct", True)], - "cpyext": [("translation.secondaryentrypoints", "cpyext,main"), - ("translation.shared", sys.platform == "win32")], + "cpyext": [("translation.secondaryentrypoints", "cpyext,main")], } +if sys.platform == "win32": + module_suggests["cpyext"].append(("translation.shared", True)) module_import_dependencies = { # no _rawffi if importing rpython.rlib.clibffi raises ImportError @@ -255,10 +252,6 @@ BoolOption("optimized_list_getitem", "special case the 'list[integer]' expressions", default=False), - BoolOption("builtinshortcut", - "a shortcut for operations between built-in types. XXX: " - "deprecated, not really a shortcut any more.", - default=False), BoolOption("getattributeshortcut", "track types that override __getattribute__", default=False, @@ -270,9 +263,6 @@ # weakrefs needed, because of get_subclasses() requires=[("translation.rweakref", True)]), - ChoiceOption("multimethods", "the multimethod implementation to use", - ["doubledispatch", "mrd"], - default="mrd"), BoolOption("withidentitydict", "track types that override __hash__, __eq__ or __cmp__ and use a special dict strategy for those which do not", default=False, diff --git a/pypy/config/test/test_pypyoption.py b/pypy/config/test/test_pypyoption.py --- a/pypy/config/test/test_pypyoption.py +++ b/pypy/config/test/test_pypyoption.py @@ -64,7 +64,7 @@ def check_file_exists(fn): assert configdocdir.join(fn).check() - from pypy.doc.config.confrest import all_optiondescrs + from pypy.doc.config.generate import all_optiondescrs configdocdir = thisdir.dirpath().dirpath().join("doc", "config") for descr in all_optiondescrs: prefix = descr._name diff --git a/pypy/doc/build.rst b/pypy/doc/build.rst --- a/pypy/doc/build.rst +++ b/pypy/doc/build.rst @@ -119,6 +119,9 @@ pypy rpython/bin/rpython --opt=2 pypy/goal/targetpypystandalone.py +(You can use ``python`` instead of ``pypy`` here, which will take longer +but works too.) + If everything works correctly this will create an executable ``pypy-c`` in the current directory. The executable behaves mostly like a normal Python interpreter (see :doc:`cpython_differences`). diff --git a/pypy/doc/config/objspace.std.builtinshortcut.txt b/pypy/doc/config/objspace.std.builtinshortcut.txt deleted file mode 100644 --- a/pypy/doc/config/objspace.std.builtinshortcut.txt +++ /dev/null @@ -1,5 +0,0 @@ -A shortcut speeding up primitive operations between built-in types. - -This is a space-time trade-off: at the moment, this option makes a -translated pypy-c executable bigger by about 1.7 MB. (This can probably -be improved with careful analysis.) diff --git a/pypy/doc/config/objspace.std.multimethods.txt b/pypy/doc/config/objspace.std.multimethods.txt deleted file mode 100644 --- a/pypy/doc/config/objspace.std.multimethods.txt +++ /dev/null @@ -1,8 +0,0 @@ -Choose the multimethod implementation. - -* ``doubledispatch`` turns - a multimethod call into a sequence of normal method calls. - -* ``mrd`` uses a technique known as Multiple Row Displacement - which precomputes a few compact tables of numbers and - function pointers. diff --git a/pypy/doc/config/objspace.usemodules.rctime.txt b/pypy/doc/config/objspace.usemodules.rctime.txt deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules.rctime.txt +++ /dev/null @@ -1,7 +0,0 @@ -Use the 'rctime' module. - -'rctime' is our `rffi`_ based implementation of the builtin 'time' module. -It supersedes the less complete :config:`objspace.usemodules.time`, -at least for C-like targets (the C and LLVM backends). - -.. _`rffi`: ../rffi.html diff --git a/pypy/doc/config/objspace.usemodules.time.txt b/pypy/doc/config/objspace.usemodules.time.txt --- a/pypy/doc/config/objspace.usemodules.time.txt +++ b/pypy/doc/config/objspace.usemodules.time.txt @@ -1,5 +1,1 @@ Use the 'time' module. - -Obsolete; use :config:`objspace.usemodules.rctime` for our up-to-date version -of the application-level 'time' module, at least for C-like targets (the C -and LLVM backends). diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst --- a/pypy/doc/cpython_differences.rst +++ b/pypy/doc/cpython_differences.rst @@ -205,23 +205,28 @@ The above is true both in CPython and in PyPy. Differences can occur about whether a built-in function or method will call an overridden method of *another* object than ``self``. -In PyPy, they are generally always called, whereas not in -CPython. For example, in PyPy, ``dict1.update(dict2)`` -considers that ``dict2`` is just a general mapping object, and -will thus call overridden ``keys()`` and ``__getitem__()`` -methods on it. So the following code prints ``42`` on PyPy -but ``foo`` on CPython:: +In PyPy, they are often called in cases where CPython would not. +Two examples:: - >>>> class D(dict): - .... def __getitem__(self, key): - .... return 42 - .... - >>>> - >>>> d1 = {} - >>>> d2 = D(a='foo') - >>>> d1.update(d2) - >>>> print d1['a'] - 42 + class D(dict): + def __getitem__(self, key): + return "%r from D" % (key,) + + class A(object): + pass + + a = A() + a.__dict__ = D() + a.foo = "a's own foo" + print a.foo + # CPython => a's own foo + # PyPy => 'foo' from D + + glob = D(foo="base item") + loc = {} + exec "print foo" in glob, loc + # CPython => base item + # PyPy => 'foo' from D Mutating classes of objects which are already used as dictionary keys @@ -292,6 +297,9 @@ above types will return a value that is computed from the argument, and can thus be larger than ``sys.maxint`` (i.e. it can be an arbitrary long). +Notably missing from the list above are ``str`` and ``unicode``. If your +code relies on comparing strings with ``is``, then it might break in PyPy. + Miscellaneous ------------- diff --git a/pypy/doc/embedding.rst b/pypy/doc/embedding.rst --- a/pypy/doc/embedding.rst +++ b/pypy/doc/embedding.rst @@ -6,6 +6,10 @@ C. It was developed in collaboration with Roberto De Ioris from the `uwsgi`_ project. The `PyPy uwsgi plugin`_ is a good example of using the embedding API. +**NOTE**: As of 1st of December, PyPy comes with ``--shared`` by default +on linux, linux64 and windows. We will make it the default on all platforms +by the time of the next release. + The first thing that you need is to compile PyPy yourself with the option ``--shared``. We plan to make ``--shared`` the default in the future. Consult the `how to compile PyPy`_ doc for details. This will result in ``libpypy.so`` @@ -93,12 +97,18 @@ return res; } -If we save it as ``x.c`` now, compile it and run it with:: +If we save it as ``x.c`` now, compile it and run it (on linux) with:: fijal at hermann:/opt/pypy$ gcc -o x x.c -lpypy-c -L. fijal at hermann:/opt/pypy$ LD_LIBRARY_PATH=. ./x hello from pypy +on OSX it is necessary to set the rpath of the binary if one wants to link to it:: + + gcc -o x x.c -lpypy-c -L. -Wl,-rpath -Wl, at executable_path + ./x + hello from pypy + Worked! .. note:: If the compilation fails because of missing PyPy.h header file, diff --git a/pypy/doc/project-ideas.rst b/pypy/doc/project-ideas.rst --- a/pypy/doc/project-ideas.rst +++ b/pypy/doc/project-ideas.rst @@ -35,6 +35,13 @@ PyPy's bytearray type is very inefficient. It would be an interesting task to look into possible optimizations on this. +Implement AF_XXX packet types for PyPy +-------------------------------------- + +PyPy is missing AF_XXX types of sockets. Implementing it is easy-to-medium +task. `bug report`_ + +.. _`bug report`: https://bitbucket.org/pypy/pypy/issue/1942/support-for-af_xxx-sockets#more Implement copy-on-write list slicing ------------------------------------ diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -35,3 +35,19 @@ Split RPython documentation from PyPy documentation and clean up. There now is a clearer separation between documentation for users, developers and people interested in background information. + +.. branch: kill-multimethod + +Kill multimethod machinery, all multimethods were removed earlier. + +.. branch nditer-external_loop + +Implement `external_loop` arguement to numpy's nditer + +.. branch kill-rctime + +Rename pypy/module/rctime to pypy/module/time, since it contains the implementation of the 'time' module. + +.. branch: ssa-flow + +Use SSA form for flow graphs inside build_flow() and part of simplify_graph() diff --git a/pypy/goal/targetpypystandalone.py b/pypy/goal/targetpypystandalone.py --- a/pypy/goal/targetpypystandalone.py +++ b/pypy/goal/targetpypystandalone.py @@ -208,23 +208,6 @@ from pypy.config.pypyoption import set_pypy_opt_level set_pypy_opt_level(config, translateconfig.opt) - # as of revision 27081, multimethod.py uses the InstallerVersion1 by default - # because it is much faster both to initialize and run on top of CPython. - # The InstallerVersion2 is optimized for making a translator-friendly - # structure for low level backends. However, InstallerVersion1 is still - # preferable for high level backends, so we patch here. - - from pypy.objspace.std import multimethod - if config.objspace.std.multimethods == 'mrd': - assert multimethod.InstallerVersion1.instance_counter == 0,\ - 'The wrong Installer version has already been instatiated' - multimethod.Installer = multimethod.InstallerVersion2 - elif config.objspace.std.multimethods == 'doubledispatch': - # don't rely on the default, set again here - assert multimethod.InstallerVersion2.instance_counter == 0,\ - 'The wrong Installer version has already been instatiated' - multimethod.Installer = multimethod.InstallerVersion1 - def print_help(self, config): self.opt_parser(config).print_help() @@ -251,8 +234,7 @@ enable_translationmodules(config) config.translation.suggest(check_str_without_nul=True) - if sys.platform.startswith('linux'): - config.translation.suggest(shared=True) + config.translation.suggest(shared=True) if config.translation.thread: config.objspace.usemodules.thread = True diff --git a/pypy/interpreter/astcompiler/assemble.py b/pypy/interpreter/astcompiler/assemble.py --- a/pypy/interpreter/astcompiler/assemble.py +++ b/pypy/interpreter/astcompiler/assemble.py @@ -1,15 +1,13 @@ -""" -Python control flow graph generation and bytecode assembly. -""" +"""Python control flow graph generation and bytecode assembly.""" -from pypy.interpreter.astcompiler import ast, symtable -from pypy.interpreter import pycode +from rpython.rlib import rfloat +from rpython.rlib.objectmodel import we_are_translated + +from pypy.interpreter.astcompiler import ast, misc, symtable +from pypy.interpreter.error import OperationError +from pypy.interpreter.pycode import PyCode from pypy.tool import stdlib_opcode as ops -from pypy.interpreter.error import OperationError -from rpython.rlib.objectmodel import we_are_translated -from rpython.rlib import rfloat - class Instruction(object): """Represents a single opcode.""" @@ -21,14 +19,12 @@ self.has_jump = False def size(self): - """Return the size of bytes of this instruction when it is encoded.""" + """Return the size of bytes of this instruction when it is + encoded. + """ if self.opcode >= ops.HAVE_ARGUMENT: - if self.arg > 0xFFFF: - return 6 - else: - return 3 - else: - return 1 + return (6 if self.arg > 0xFFFF else 3) + return 1 def jump_to(self, target, absolute=False): """Indicate the target this jump instruction. @@ -54,9 +50,9 @@ class Block(object): """A basic control flow block. - It has one entry point and several possible exit points. Its instructions - may be jumps to other blocks, or if control flow reaches the end of the - block, it continues to next_block. + It has one entry point and several possible exit points. Its + instructions may be jumps to other blocks, or if control flow + reaches the end of the block, it continues to next_block. """ def __init__(self): @@ -71,10 +67,10 @@ stack.append(nextblock) def post_order(self): - """Return this block and its children in post order. - This means that the graph of blocks is first cleaned up to - ignore back-edges, thus turning it into a DAG. Then the DAG - is linearized. For example: + """Return this block and its children in post order. This means + that the graph of blocks is first cleaned up to ignore + back-edges, thus turning it into a DAG. Then the DAG is + linearized. For example: A --> B -\ => [A, D, B, C] \-> D ---> C @@ -105,7 +101,9 @@ return resultblocks def code_size(self): - """Return the encoded size of all the instructions in this block.""" + """Return the encoded size of all the instructions in this + block. + """ i = 0 for instr in self.instructions: i += instr.size() @@ -141,6 +139,7 @@ i += 1 return result + def _list_to_dict(l, offset=0): result = {} index = offset @@ -300,11 +299,11 @@ def _resolve_block_targets(self, blocks): """Compute the arguments of jump instructions.""" last_extended_arg_count = 0 - # The reason for this loop is extended jumps. EXTENDED_ARG extends the - # bytecode size, so it might invalidate the offsets we've already given. - # Thus we have to loop until the number of extended args is stable. Any - # extended jump at all is extremely rare, so performance is not too - # concerning. + # The reason for this loop is extended jumps. EXTENDED_ARG + # extends the bytecode size, so it might invalidate the offsets + # we've already given. Thus we have to loop until the number of + # extended args is stable. Any extended jump at all is + # extremely rare, so performance is not too concerning. while True: extended_arg_count = 0 offset = 0 @@ -330,7 +329,8 @@ instr.opcode = ops.JUMP_ABSOLUTE absolute = True elif target_op == ops.RETURN_VALUE: - # Replace JUMP_* to a RETURN into just a RETURN + # Replace JUMP_* to a RETURN into + # just a RETURN instr.opcode = ops.RETURN_VALUE instr.arg = 0 instr.has_jump = False @@ -345,7 +345,8 @@ instr.arg = jump_arg if jump_arg > 0xFFFF: extended_arg_count += 1 - if extended_arg_count == last_extended_arg_count and not force_redo: + if (extended_arg_count == last_extended_arg_count and + not force_redo): break else: last_extended_arg_count = extended_arg_count @@ -360,12 +361,14 @@ while True: try: w_key = space.next(w_iter) - except OperationError, e: + except OperationError as e: if not e.match(space, space.w_StopIteration): raise break w_index = space.getitem(w_consts, w_key) - consts_w[space.int_w(w_index)] = space.getitem(w_key, first) + w_constant = space.getitem(w_key, first) + w_constant = misc.intern_if_common_string(space, w_constant) + consts_w[space.int_w(w_index)] = w_constant return consts_w def _get_code_flags(self): @@ -433,15 +436,16 @@ continue addr = offset - current_off # Python assumes that lineno always increases with - # increasing bytecode address (lnotab is unsigned char). - # Depending on when SET_LINENO instructions are emitted this - # is not always true. Consider the code: + # increasing bytecode address (lnotab is unsigned + # char). Depending on when SET_LINENO instructions + # are emitted this is not always true. Consider the + # code: # a = (1, # b) - # In the bytecode stream, the assignment to "a" occurs after - # the loading of "b". This works with the C Python compiler - # because it only generates a SET_LINENO instruction for the - # assignment. + # In the bytecode stream, the assignment to "a" + # occurs after the loading of "b". This works with + # the C Python compiler because it only generates a + # SET_LINENO instruction for the assignment. if line or addr: while addr > 255: push(chr(255)) @@ -484,22 +488,22 @@ free_names = _list_from_dict(self.free_vars, len(cell_names)) flags = self._get_code_flags() | self.compile_info.flags bytecode = ''.join([block.get_code() for block in blocks]) - return pycode.PyCode(self.space, - self.argcount, - len(self.var_names), - stack_depth, - flags, - bytecode, - list(consts_w), - names, - var_names, - self.compile_info.filename, - self.name, - self.first_lineno, - lnotab, - free_names, - cell_names, - self.compile_info.hidden_applevel) + return PyCode(self.space, + self.argcount, + len(self.var_names), + stack_depth, + flags, + bytecode, + list(consts_w), + names, + var_names, + self.compile_info.filename, + self.name, + self.first_lineno, + lnotab, + free_names, + cell_names, + self.compile_info.hidden_applevel) def _list_from_dict(d, offset=0): @@ -510,134 +514,134 @@ _static_opcode_stack_effects = { - ops.NOP : 0, - ops.STOP_CODE : 0, + ops.NOP: 0, + ops.STOP_CODE: 0, - ops.POP_TOP : -1, - ops.ROT_TWO : 0, - ops.ROT_THREE : 0, - ops.ROT_FOUR : 0, - ops.DUP_TOP : 1, + ops.POP_TOP: -1, + ops.ROT_TWO: 0, + ops.ROT_THREE: 0, + ops.ROT_FOUR: 0, + ops.DUP_TOP: 1, - ops.UNARY_POSITIVE : 0, - ops.UNARY_NEGATIVE : 0, - ops.UNARY_NOT : 0, - ops.UNARY_CONVERT : 0, - ops.UNARY_INVERT : 0, + ops.UNARY_POSITIVE: 0, + ops.UNARY_NEGATIVE: 0, + ops.UNARY_NOT: 0, + ops.UNARY_CONVERT: 0, + ops.UNARY_INVERT: 0, - ops.LIST_APPEND : -1, - ops.SET_ADD : -1, - ops.MAP_ADD : -2, - ops.STORE_MAP : -2, + ops.LIST_APPEND: -1, + ops.SET_ADD: -1, + ops.MAP_ADD: -2, + ops.STORE_MAP: -2, - ops.BINARY_POWER : -1, - ops.BINARY_MULTIPLY : -1, - ops.BINARY_DIVIDE : -1, - ops.BINARY_MODULO : -1, - ops.BINARY_ADD : -1, - ops.BINARY_SUBTRACT : -1, - ops.BINARY_SUBSCR : -1, - ops.BINARY_FLOOR_DIVIDE : -1, - ops.BINARY_TRUE_DIVIDE : -1, - ops.BINARY_LSHIFT : -1, - ops.BINARY_RSHIFT : -1, - ops.BINARY_AND : -1, - ops.BINARY_OR : -1, - ops.BINARY_XOR : -1, + ops.BINARY_POWER: -1, + ops.BINARY_MULTIPLY: -1, + ops.BINARY_DIVIDE: -1, + ops.BINARY_MODULO: -1, + ops.BINARY_ADD: -1, + ops.BINARY_SUBTRACT: -1, + ops.BINARY_SUBSCR: -1, + ops.BINARY_FLOOR_DIVIDE: -1, + ops.BINARY_TRUE_DIVIDE: -1, + ops.BINARY_LSHIFT: -1, + ops.BINARY_RSHIFT: -1, + ops.BINARY_AND: -1, + ops.BINARY_OR: -1, + ops.BINARY_XOR: -1, - ops.INPLACE_FLOOR_DIVIDE : -1, - ops.INPLACE_TRUE_DIVIDE : -1, - ops.INPLACE_ADD : -1, - ops.INPLACE_SUBTRACT : -1, - ops.INPLACE_MULTIPLY : -1, - ops.INPLACE_DIVIDE : -1, - ops.INPLACE_MODULO : -1, - ops.INPLACE_POWER : -1, - ops.INPLACE_LSHIFT : -1, - ops.INPLACE_RSHIFT : -1, - ops.INPLACE_AND : -1, - ops.INPLACE_OR : -1, - ops.INPLACE_XOR : -1, + ops.INPLACE_FLOOR_DIVIDE: -1, + ops.INPLACE_TRUE_DIVIDE: -1, + ops.INPLACE_ADD: -1, + ops.INPLACE_SUBTRACT: -1, + ops.INPLACE_MULTIPLY: -1, + ops.INPLACE_DIVIDE: -1, + ops.INPLACE_MODULO: -1, + ops.INPLACE_POWER: -1, + ops.INPLACE_LSHIFT: -1, + ops.INPLACE_RSHIFT: -1, + ops.INPLACE_AND: -1, + ops.INPLACE_OR: -1, + ops.INPLACE_XOR: -1, - ops.SLICE+0 : 1, - ops.SLICE+1 : 0, - ops.SLICE+2 : 0, - ops.SLICE+3 : -1, - ops.STORE_SLICE+0 : -2, - ops.STORE_SLICE+1 : -3, - ops.STORE_SLICE+2 : -3, - ops.STORE_SLICE+3 : -4, - ops.DELETE_SLICE+0 : -1, - ops.DELETE_SLICE+1 : -2, - ops.DELETE_SLICE+2 : -2, - ops.DELETE_SLICE+3 : -3, + ops.SLICE+0: 1, + ops.SLICE+1: 0, + ops.SLICE+2: 0, + ops.SLICE+3: -1, + ops.STORE_SLICE+0: -2, + ops.STORE_SLICE+1: -3, + ops.STORE_SLICE+2: -3, + ops.STORE_SLICE+3: -4, + ops.DELETE_SLICE+0: -1, + ops.DELETE_SLICE+1: -2, + ops.DELETE_SLICE+2: -2, + ops.DELETE_SLICE+3: -3, - ops.STORE_SUBSCR : -2, - ops.DELETE_SUBSCR : -2, + ops.STORE_SUBSCR: -2, + ops.DELETE_SUBSCR: -2, - ops.GET_ITER : 0, - ops.FOR_ITER : 1, - ops.BREAK_LOOP : 0, - ops.CONTINUE_LOOP : 0, - ops.SETUP_LOOP : 0, + ops.GET_ITER: 0, + ops.FOR_ITER: 1, + ops.BREAK_LOOP: 0, + ops.CONTINUE_LOOP: 0, + ops.SETUP_LOOP: 0, - ops.PRINT_EXPR : -1, - ops.PRINT_ITEM : -1, - ops.PRINT_NEWLINE : 0, - ops.PRINT_ITEM_TO : -2, - ops.PRINT_NEWLINE_TO : -1, + ops.PRINT_EXPR: -1, + ops.PRINT_ITEM: -1, + ops.PRINT_NEWLINE: 0, + ops.PRINT_ITEM_TO: -2, + ops.PRINT_NEWLINE_TO: -1, - ops.WITH_CLEANUP : -1, - ops.POP_BLOCK : 0, - ops.END_FINALLY : -1, - ops.SETUP_WITH : 1, - ops.SETUP_FINALLY : 0, - ops.SETUP_EXCEPT : 0, + ops.WITH_CLEANUP: -1, + ops.POP_BLOCK: 0, + ops.END_FINALLY: -1, + ops.SETUP_WITH: 1, + ops.SETUP_FINALLY: 0, + ops.SETUP_EXCEPT: 0, - ops.LOAD_LOCALS : 1, - ops.RETURN_VALUE : -1, - ops.EXEC_STMT : -3, - ops.YIELD_VALUE : 0, - ops.BUILD_CLASS : -2, - ops.BUILD_MAP : 1, - ops.BUILD_SET : 1, - ops.COMPARE_OP : -1, + ops.LOAD_LOCALS: 1, + ops.RETURN_VALUE: -1, + ops.EXEC_STMT: -3, + ops.YIELD_VALUE: 0, + ops.BUILD_CLASS: -2, + ops.BUILD_MAP: 1, + ops.BUILD_SET: 1, + ops.COMPARE_OP: -1, - ops.LOOKUP_METHOD : 1, + ops.LOOKUP_METHOD: 1, - ops.LOAD_NAME : 1, - ops.STORE_NAME : -1, - ops.DELETE_NAME : 0, + ops.LOAD_NAME: 1, + ops.STORE_NAME: -1, + ops.DELETE_NAME: 0, - ops.LOAD_FAST : 1, - ops.STORE_FAST : -1, - ops.DELETE_FAST : 0, + ops.LOAD_FAST: 1, + ops.STORE_FAST: -1, + ops.DELETE_FAST: 0, - ops.LOAD_ATTR : 0, - ops.STORE_ATTR : -2, - ops.DELETE_ATTR : -1, + ops.LOAD_ATTR: 0, + ops.STORE_ATTR: -2, + ops.DELETE_ATTR: -1, - ops.LOAD_GLOBAL : 1, - ops.STORE_GLOBAL : -1, - ops.DELETE_GLOBAL : 0, + ops.LOAD_GLOBAL: 1, + ops.STORE_GLOBAL: -1, + ops.DELETE_GLOBAL: 0, - ops.LOAD_CLOSURE : 1, - ops.LOAD_DEREF : 1, - ops.STORE_DEREF : -1, + ops.LOAD_CLOSURE: 1, + ops.LOAD_DEREF: 1, + ops.STORE_DEREF: -1, - ops.LOAD_CONST : 1, + ops.LOAD_CONST: 1, - ops.IMPORT_STAR : -1, - ops.IMPORT_NAME : -1, - ops.IMPORT_FROM : 1, + ops.IMPORT_STAR: -1, + ops.IMPORT_NAME: -1, + ops.IMPORT_FROM: 1, - ops.JUMP_FORWARD : 0, - ops.JUMP_ABSOLUTE : 0, - ops.JUMP_IF_TRUE_OR_POP : 0, - ops.JUMP_IF_FALSE_OR_POP : 0, - ops.POP_JUMP_IF_TRUE : -1, - ops.POP_JUMP_IF_FALSE : -1, - ops.JUMP_IF_NOT_DEBUG : 0, + ops.JUMP_FORWARD: 0, + ops.JUMP_ABSOLUTE: 0, + ops.JUMP_IF_TRUE_OR_POP: 0, + ops.JUMP_IF_FALSE_OR_POP: 0, + ops.POP_JUMP_IF_TRUE: -1, + ops.POP_JUMP_IF_FALSE: -1, + ops.JUMP_IF_NOT_DEBUG: 0, ops.BUILD_LIST_FROM_ARG: 1, } diff --git a/pypy/interpreter/astcompiler/misc.py b/pypy/interpreter/astcompiler/misc.py --- a/pypy/interpreter/astcompiler/misc.py +++ b/pypy/interpreter/astcompiler/misc.py @@ -106,3 +106,13 @@ except IndexError: return name return "_%s%s" % (klass[i:], name) + + +def intern_if_common_string(space, w_const): + # only intern identifier-like strings + if not space.is_w(space.type(w_const), space.w_str): + return w_const + for c in space.str_w(w_const): + if not (c.isalnum() or c == '_'): + return w_const + return space.new_interned_w_str(w_const) diff --git a/pypy/interpreter/astcompiler/optimize.py b/pypy/interpreter/astcompiler/optimize.py --- a/pypy/interpreter/astcompiler/optimize.py +++ b/pypy/interpreter/astcompiler/optimize.py @@ -83,17 +83,16 @@ class __extend__(ast.BoolOp): - def _accept_jump_if_any_is(self, gen, condition, target): - self.values[0].accept_jump_if(gen, condition, target) - for i in range(1, len(self.values)): + def _accept_jump_if_any_is(self, gen, condition, target, skip_last=0): + for i in range(len(self.values) - skip_last): self.values[i].accept_jump_if(gen, condition, target) def accept_jump_if(self, gen, condition, target): if condition and self.op == ast.And or \ (not condition and self.op == ast.Or): end = gen.new_block() - self._accept_jump_if_any_is(gen, not condition, end) - gen.emit_jump(ops.JUMP_FORWARD, target) + self._accept_jump_if_any_is(gen, not condition, end, skip_last=1) + self.values[-1].accept_jump_if(gen, condition, target) gen.use_next_block(end) else: self._accept_jump_if_any_is(gen, condition, target) @@ -272,6 +271,11 @@ if w_const is None: return tup consts_w[i] = w_const + # intern the string constants packed into the tuple here, + # because assemble.py will see the result as just a tuple constant + for i in range(len(consts_w)): + consts_w[i] = misc.intern_if_common_string( + self.space, consts_w[i]) else: consts_w = [] w_consts = self.space.newtuple(consts_w) diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -14,7 +14,7 @@ UserDelAction) from pypy.interpreter.error import OperationError, new_exception_class, oefmt from pypy.interpreter.argument import Arguments -from pypy.interpreter.miscutils import ThreadLocals +from pypy.interpreter.miscutils import ThreadLocals, make_weak_value_dictionary __all__ = ['ObjSpace', 'OperationError', 'W_Root'] @@ -384,7 +384,7 @@ self.builtin_modules = {} self.reloading_modules = {} - self.interned_strings = {} + self.interned_strings = make_weak_value_dictionary(self, str, W_Root) self.actionflag = ActionFlag() # changed by the signal module self.check_signal_action = None # changed by the signal module self.user_del_action = UserDelAction(self) @@ -522,11 +522,6 @@ if name not in modules: modules.append(name) - # a bit of custom logic: rctime take precedence over time - # XXX this could probably be done as a "requires" in the config - if 'rctime' in modules and 'time' in modules: - modules.remove('time') - self._builtinmodule_list = modules return self._builtinmodule_list @@ -782,25 +777,30 @@ return self.w_False def new_interned_w_str(self, w_s): + assert isinstance(w_s, W_Root) # and is not None s = self.str_w(w_s) if not we_are_translated(): assert type(s) is str - try: - return self.interned_strings[s] - except KeyError: - pass - self.interned_strings[s] = w_s - return w_s + w_s1 = self.interned_strings.get(s) + if w_s1 is None: + w_s1 = w_s + self.interned_strings.set(s, w_s1) + return w_s1 def new_interned_str(self, s): if not we_are_translated(): assert type(s) is str - try: - return self.interned_strings[s] - except KeyError: - pass - w_s = self.interned_strings[s] = self.wrap(s) - return w_s + w_s1 = self.interned_strings.get(s) + if w_s1 is None: + w_s1 = self.wrap(s) + self.interned_strings.set(s, w_s1) + return w_s1 + + def is_interned_str(self, s): + # interface for marshal_impl + if not we_are_translated(): + assert type(s) is str + return self.interned_strings.get(s) is not None def descr_self_interp_w(self, RequiredClass, w_obj): if not isinstance(w_obj, RequiredClass): diff --git a/pypy/interpreter/executioncontext.py b/pypy/interpreter/executioncontext.py --- a/pypy/interpreter/executioncontext.py +++ b/pypy/interpreter/executioncontext.py @@ -32,6 +32,17 @@ self.compiler = space.createcompiler() self.profilefunc = None self.w_profilefuncarg = None + self.thread_disappeared = False # might be set to True after os.fork() + + @staticmethod + def _mark_thread_disappeared(space): + # Called in the child process after os.fork() by interp_posix.py. + # Marks all ExecutionContexts except the current one + # with 'thread_disappeared = True'. + me = space.getexecutioncontext() + for ec in space.threadlocals.getallvalues().values(): + if ec is not me: + ec.thread_disappeared = True def gettopframe(self): return self.topframeref() diff --git a/pypy/interpreter/miscutils.py b/pypy/interpreter/miscutils.py --- a/pypy/interpreter/miscutils.py +++ b/pypy/interpreter/miscutils.py @@ -31,3 +31,19 @@ def getallvalues(self): return {0: self._value} + + +def make_weak_value_dictionary(space, keytype, valuetype): + "NOT_RPYTHON" + if space.config.translation.rweakref: + from rpython.rlib.rweakref import RWeakValueDictionary + return RWeakValueDictionary(keytype, valuetype) + else: + class FakeWeakValueDict(object): + def __init__(self): + self._dict = {} + def get(self, key): + return self._dict.get(key, None) + def set(self, key, value): + self._dict[key] = value + return FakeWeakValueDict() diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py --- a/pypy/interpreter/pyframe.py +++ b/pypy/interpreter/pyframe.py @@ -125,13 +125,14 @@ else: return self.space.builtin + _NO_CELLS = [] + @jit.unroll_safe def initialize_frame_scopes(self, outer_func, code): # regular functions always have CO_OPTIMIZED and CO_NEWLOCALS. # class bodies only have CO_NEWLOCALS. # CO_NEWLOCALS: make a locals dict unless optimized is also set # CO_OPTIMIZED: no locals dict needed at all - # NB: this method is overridden in nestedscope.py flags = code.co_flags if not (flags & pycode.CO_OPTIMIZED): if flags & pycode.CO_NEWLOCALS: @@ -144,7 +145,7 @@ nfreevars = len(code.co_freevars) if not nfreevars: if not ncellvars: - self.cells = [] + self.cells = self._NO_CELLS return # no self.cells needed - fast path elif outer_func is None: space = self.space diff --git a/pypy/interpreter/test/test_compiler.py b/pypy/interpreter/test/test_compiler.py --- a/pypy/interpreter/test/test_compiler.py +++ b/pypy/interpreter/test/test_compiler.py @@ -970,7 +970,12 @@ sys.stdout = out output = s.getvalue() assert "CALL_METHOD" in output - + + def test_interned_strings(self): + source = """x = ('foo_bar42', 5); y = 'foo_bar42'; z = x[0]""" + exec source + assert y is z + class AppTestExceptions: def test_indentation_error(self): diff --git a/pypy/interpreter/test/test_objspace.py b/pypy/interpreter/test/test_objspace.py --- a/pypy/interpreter/test/test_objspace.py +++ b/pypy/interpreter/test/test_objspace.py @@ -378,3 +378,41 @@ assert space.str_w(space.getattr(space.sys, w_executable)) == 'foobar' space.startup() assert space.str_w(space.getattr(space.sys, w_executable)) == 'foobar' + + def test_interned_strings_are_weak(self): + import weakref, gc, random + space = self.space + assert space.config.translation.rweakref + w1 = space.new_interned_str("abcdef") + w2 = space.new_interned_str("abcdef") + assert w2 is w1 + # + # check that 'w1' goes away if we don't hold a reference to it + rw1 = weakref.ref(w1) + del w1, w2 + i = 10 + while rw1() is not None: + i -= 1 + assert i >= 0 + gc.collect() + # + s = "foobar%r" % random.random() + w0 = space.wrap(s) + w1 = space.new_interned_w_str(w0) + assert w1 is w0 + w2 = space.new_interned_w_str(w0) + assert w2 is w0 + w3 = space.wrap(s) + assert w3 is not w0 + w4 = space.new_interned_w_str(w3) + assert w4 is w0 + # + # check that 'w0' goes away if we don't hold a reference to it + # (even if we hold a reference to 'w3') + rw0 = weakref.ref(w0) + del w0, w1, w2, w4 + i = 10 + while rw0() is not None: + i -= 1 + assert i >= 0 + gc.collect() diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py --- a/pypy/interpreter/typedef.py +++ b/pypy/interpreter/typedef.py @@ -618,6 +618,7 @@ from pypy.interpreter.nestedscope import Cell from pypy.interpreter.special import NotImplemented, Ellipsis + def descr_get_dict(space, w_obj): w_dict = w_obj.getdict(space) if w_dict is None: @@ -638,6 +639,11 @@ return space.w_None return lifeline.get_any_weakref(space) +dict_descr = GetSetProperty(descr_get_dict, descr_set_dict, descr_del_dict, + doc="dictionary for instance variables (if defined)") +dict_descr.name = '__dict__' + + def generic_ne(space, w_obj1, w_obj2): if space.eq_w(w_obj1, w_obj2): return space.w_False diff --git a/pypy/module/_cffi_backend/__init__.py b/pypy/module/_cffi_backend/__init__.py --- a/pypy/module/_cffi_backend/__init__.py +++ b/pypy/module/_cffi_backend/__init__.py @@ -34,6 +34,7 @@ 'newp_handle': 'handle.newp_handle', 'from_handle': 'handle.from_handle', '_get_types': 'func._get_types', + 'from_buffer': 'func.from_buffer', 'string': 'func.string', 'buffer': 'cbuffer.buffer', diff --git a/pypy/module/_cffi_backend/ccallback.py b/pypy/module/_cffi_backend/ccallback.py --- a/pypy/module/_cffi_backend/ccallback.py +++ b/pypy/module/_cffi_backend/ccallback.py @@ -45,8 +45,9 @@ # cif_descr = self.getfunctype().cif_descr if not cif_descr: - raise OperationError(space.w_NotImplementedError, - space.wrap("callbacks with '...'")) + raise oefmt(space.w_NotImplementedError, + "%s: callback with unsupported argument or " + "return type or with '...'", self.getfunctype().name) res = clibffi.c_ffi_prep_closure(self.get_closure(), cif_descr.cif, invoke_callback, rffi.cast(rffi.VOIDP, self.unique_id)) diff --git a/pypy/module/_cffi_backend/cdataobj.py b/pypy/module/_cffi_backend/cdataobj.py --- a/pypy/module/_cffi_backend/cdataobj.py +++ b/pypy/module/_cffi_backend/cdataobj.py @@ -440,6 +440,25 @@ return "handle to %s" % (self.space.str_w(w_repr),) +class W_CDataFromBuffer(W_CData): + _attrs_ = ['buf', 'length', 'w_keepalive'] + _immutable_fields_ = ['buf', 'length', 'w_keepalive'] + + def __init__(self, space, cdata, ctype, buf, w_object): + W_CData.__init__(self, space, cdata, ctype) + self.buf = buf + self.length = buf.getlength() + self.w_keepalive = w_object + + def get_array_length(self): + return self.length + + def _repr_extra(self): + w_repr = self.space.repr(self.w_keepalive) + return "buffer len %d from '%s' object" % ( + self.length, self.space.type(self.w_keepalive).name) + + W_CData.typedef = TypeDef( '_cffi_backend.CData', __module__ = '_cffi_backend', diff --git a/pypy/module/_cffi_backend/ctypefunc.py b/pypy/module/_cffi_backend/ctypefunc.py --- a/pypy/module/_cffi_backend/ctypefunc.py +++ b/pypy/module/_cffi_backend/ctypefunc.py @@ -34,6 +34,7 @@ could_cast_anything=False) self.fargs = fargs self.ellipsis = bool(ellipsis) + self.cif_descr = lltype.nullptr(CIF_DESCRIPTION) # fresult is stored in self.ctitem if not ellipsis: @@ -41,7 +42,14 @@ # at all. The cif is computed on every call from the actual # types passed in. For all other functions, the cif_descr # is computed here. - CifDescrBuilder(fargs, fresult).rawallocate(self) + builder = CifDescrBuilder(fargs, fresult) + try: + builder.rawallocate(self) + except OperationError, e: + if not e.match(space, space.w_NotImplementedError): + raise + # else, eat the NotImplementedError. We will get the + # exception if we see an actual call def new_ctypefunc_completing_argtypes(self, args_w): space = self.space @@ -178,8 +186,6 @@ # ____________________________________________________________ -W_CTypeFunc.cif_descr = lltype.nullptr(CIF_DESCRIPTION) # default value - BIG_ENDIAN = sys.byteorder == 'big' USE_C_LIBFFI_MSVC = getattr(clibffi, 'USE_C_LIBFFI_MSVC', False) @@ -295,18 +301,18 @@ nflat = 0 for i, cf in enumerate(ctype.fields_list): if cf.is_bitfield(): - raise OperationError(space.w_NotImplementedError, - space.wrap("cannot pass as argument or return value " - "a struct with bit fields")) + raise oefmt(space.w_NotImplementedError, + "ctype '%s' not supported as argument or return value" + " (it is a struct with bit fields)", ctype.name) flat = 1 ct = cf.ctype while isinstance(ct, ctypearray.W_CTypeArray): flat *= ct.length ct = ct.ctitem if flat <= 0: - raise OperationError(space.w_NotImplementedError, - space.wrap("cannot pass as argument or return value " - "a struct with a zero-length array")) + raise oefmt(space.w_NotImplementedError, + "ctype '%s' not supported as argument or return value" + " (it is a struct with a zero-length array)", ctype.name) nflat += flat if USE_C_LIBFFI_MSVC and is_result_type: diff --git a/pypy/module/_cffi_backend/ctypeprim.py b/pypy/module/_cffi_backend/ctypeprim.py --- a/pypy/module/_cffi_backend/ctypeprim.py +++ b/pypy/module/_cffi_backend/ctypeprim.py @@ -158,21 +158,14 @@ class W_CTypePrimitiveSigned(W_CTypePrimitive): - _attrs_ = ['value_fits_long', 'vmin', 'vrangemax'] - _immutable_fields_ = ['value_fits_long', 'vmin', 'vrangemax'] + _attrs_ = ['value_fits_long', 'value_smaller_than_long'] + _immutable_fields_ = ['value_fits_long', 'value_smaller_than_long'] is_primitive_integer = True def __init__(self, *args): W_CTypePrimitive.__init__(self, *args) self.value_fits_long = self.size <= rffi.sizeof(lltype.Signed) - if self.size < rffi.sizeof(lltype.Signed): - assert self.value_fits_long - sh = self.size * 8 - self.vmin = r_uint(-1) << (sh - 1) - self.vrangemax = (r_uint(1) << sh) - 1 - else: - self.vmin = r_uint(0) - self.vrangemax = r_uint(-1) + self.value_smaller_than_long = self.size < rffi.sizeof(lltype.Signed) def cast_to_int(self, cdata): return self.convert_to_object(cdata) @@ -192,8 +185,17 @@ def convert_from_object(self, cdata, w_ob): if self.value_fits_long: value = misc.as_long(self.space, w_ob) - if self.size < rffi.sizeof(lltype.Signed): - if r_uint(value) - self.vmin > self.vrangemax: + if self.value_smaller_than_long: + size = self.size + if size == 1: + signextended = misc.signext(value, 1) + elif size == 2: + signextended = misc.signext(value, 2) + elif size == 4: + signextended = misc.signext(value, 4) + else: + raise AssertionError("unsupported size") + if value != signextended: self._overflow(w_ob) misc.write_raw_signed_data(cdata, value, self.size) else: @@ -221,7 +223,7 @@ length = w_cdata.get_array_length() populate_list_from_raw_array(res, buf, length) return res - elif self.value_fits_long: + elif self.value_smaller_than_long: res = [0] * w_cdata.get_array_length() misc.unpack_list_from_raw_array(res, w_cdata._cdata, self.size) return res @@ -235,8 +237,8 @@ cdata = rffi.cast(rffi.LONGP, cdata) copy_list_to_raw_array(int_list, cdata) else: - overflowed = misc.pack_list_to_raw_array_bounds( - int_list, cdata, self.size, self.vmin, self.vrangemax) + overflowed = misc.pack_list_to_raw_array_bounds_signed( + int_list, cdata, self.size) if overflowed != 0: self._overflow(self.space.wrap(overflowed)) return True @@ -314,8 +316,8 @@ def pack_list_of_items(self, cdata, w_ob): int_list = self.space.listview_int(w_ob) if int_list is not None: - overflowed = misc.pack_list_to_raw_array_bounds( - int_list, cdata, self.size, r_uint(0), self.vrangemax) + overflowed = misc.pack_list_to_raw_array_bounds_unsigned( + int_list, cdata, self.size, self.vrangemax) if overflowed != 0: self._overflow(self.space.wrap(overflowed)) return True diff --git a/pypy/module/_cffi_backend/func.py b/pypy/module/_cffi_backend/func.py --- a/pypy/module/_cffi_backend/func.py +++ b/pypy/module/_cffi_backend/func.py @@ -76,3 +76,32 @@ def _get_types(space): return space.newtuple([space.gettypefor(cdataobj.W_CData), space.gettypefor(ctypeobj.W_CType)]) + +# ____________________________________________________________ + + at unwrap_spec(w_ctype=ctypeobj.W_CType) +def from_buffer(space, w_ctype, w_x): + from pypy.module._cffi_backend import ctypearray, ctypeprim + # + if (not isinstance(w_ctype, ctypearray.W_CTypeArray) or + not isinstance(w_ctype.ctptr.ctitem, ctypeprim.W_CTypePrimitiveChar)): + raise oefmt(space.w_TypeError, + "needs 'char[]', got '%s'", w_ctype.name) + # + # xxx do we really need to implement the same mess as in CPython 2.7 + # w.r.t. buffers and memoryviews?? + try: + buf = space.readbuf_w(w_x) + except OperationError, e: + if not e.match(space, space.w_TypeError): + raise + buf = space.buffer_w(w_x, space.BUF_SIMPLE) + try: + _cdata = buf.get_raw_address() + except ValueError: + raise oefmt(space.w_TypeError, + "from_buffer() got a '%T' object, which supports the " + "buffer interface but cannot be rendered as a plain " + "raw address on PyPy", w_x) + # + return cdataobj.W_CDataFromBuffer(space, _cdata, w_ctype, buf, w_x) diff --git a/pypy/module/_cffi_backend/misc.py b/pypy/module/_cffi_backend/misc.py --- a/pypy/module/_cffi_backend/misc.py +++ b/pypy/module/_cffi_backend/misc.py @@ -216,6 +216,19 @@ neg_msg = "can't convert negative number to unsigned" ovf_msg = "long too big to convert" + at specialize.arg(1) +def signext(value, size): + # 'value' is sign-extended from 'size' bytes to a full integer. + # 'size' should be a constant smaller than a full integer size. + if size == rffi.sizeof(rffi.SIGNEDCHAR): + return rffi.cast(lltype.Signed, rffi.cast(rffi.SIGNEDCHAR, value)) + elif size == rffi.sizeof(rffi.SHORT): + return rffi.cast(lltype.Signed, rffi.cast(rffi.SHORT, value)) + elif size == rffi.sizeof(rffi.INT): + return rffi.cast(lltype.Signed, rffi.cast(rffi.INT, value)) + else: + raise AssertionError("unsupported size") + # ____________________________________________________________ class _NotStandardObject(Exception): @@ -334,13 +347,26 @@ # ____________________________________________________________ -def pack_list_to_raw_array_bounds(int_list, target, size, vmin, vrangemax): +def pack_list_to_raw_array_bounds_signed(int_list, target, size): for TP, TPP in _prim_signed_types: if size == rffi.sizeof(TP): ptr = rffi.cast(TPP, target) for i in range(len(int_list)): x = int_list[i] - if r_uint(x) - vmin > vrangemax: + y = rffi.cast(TP, x) + if x != rffi.cast(lltype.Signed, y): + return x # overflow + ptr[i] = y + return 0 + raise NotImplementedError("bad integer size") + +def pack_list_to_raw_array_bounds_unsigned(int_list, target, size, vrangemax): + for TP, TPP in _prim_signed_types: + if size == rffi.sizeof(TP): + ptr = rffi.cast(TPP, target) + for i in range(len(int_list)): + x = int_list[i] + if r_uint(x) > vrangemax: return x # overflow ptr[i] = rffi.cast(TP, x) return 0 diff --git a/pypy/module/_cffi_backend/newtype.py b/pypy/module/_cffi_backend/newtype.py --- a/pypy/module/_cffi_backend/newtype.py +++ b/pypy/module/_cffi_backend/newtype.py @@ -62,10 +62,54 @@ eptype("intptr_t", rffi.INTPTR_T, ctypeprim.W_CTypePrimitiveSigned) eptype("uintptr_t", rffi.UINTPTR_T, ctypeprim.W_CTypePrimitiveUnsigned) -eptype("ptrdiff_t", rffi.INTPTR_T, ctypeprim.W_CTypePrimitiveSigned) # <-xxx eptype("size_t", rffi.SIZE_T, ctypeprim.W_CTypePrimitiveUnsigned) eptype("ssize_t", rffi.SSIZE_T, ctypeprim.W_CTypePrimitiveSigned) +_WCTSigned = ctypeprim.W_CTypePrimitiveSigned +_WCTUnsign = ctypeprim.W_CTypePrimitiveUnsigned + +eptype("ptrdiff_t", getattr(rffi, 'PTRDIFF_T', rffi.INTPTR_T), _WCTSigned) +eptype("intmax_t", getattr(rffi, 'INTMAX_T', rffi.LONGLONG), _WCTSigned) +eptype("uintmax_t", getattr(rffi, 'UINTMAX_T', rffi.LONGLONG), _WCTUnsign) + +if hasattr(rffi, 'INT_LEAST8_T'): + eptype("int_least8_t", rffi.INT_LEAST8_T, _WCTSigned) + eptype("int_least16_t", rffi.INT_LEAST16_T, _WCTSigned) + eptype("int_least32_t", rffi.INT_LEAST32_T, _WCTSigned) + eptype("int_least64_t", rffi.INT_LEAST64_T, _WCTSigned) + eptype("uint_least8_t", rffi.UINT_LEAST8_T, _WCTUnsign) + eptype("uint_least16_t",rffi.UINT_LEAST16_T, _WCTUnsign) + eptype("uint_least32_t",rffi.UINT_LEAST32_T, _WCTUnsign) + eptype("uint_least64_t",rffi.UINT_LEAST64_T, _WCTUnsign) +else: + eptypesize("int_least8_t", 1, _WCTSigned) + eptypesize("uint_least8_t", 1, _WCTUnsign) + eptypesize("int_least16_t", 2, _WCTSigned) + eptypesize("uint_least16_t", 2, _WCTUnsign) + eptypesize("int_least32_t", 4, _WCTSigned) + eptypesize("uint_least32_t", 4, _WCTUnsign) + eptypesize("int_least64_t", 8, _WCTSigned) + eptypesize("uint_least64_t", 8, _WCTUnsign) + +if hasattr(rffi, 'INT_FAST8_T'): + eptype("int_fast8_t", rffi.INT_FAST8_T, _WCTSigned) + eptype("int_fast16_t", rffi.INT_FAST16_T, _WCTSigned) + eptype("int_fast32_t", rffi.INT_FAST32_T, _WCTSigned) + eptype("int_fast64_t", rffi.INT_FAST64_T, _WCTSigned) + eptype("uint_fast8_t", rffi.UINT_FAST8_T, _WCTUnsign) + eptype("uint_fast16_t",rffi.UINT_FAST16_T, _WCTUnsign) + eptype("uint_fast32_t",rffi.UINT_FAST32_T, _WCTUnsign) + eptype("uint_fast64_t",rffi.UINT_FAST64_T, _WCTUnsign) +else: + eptypesize("int_fast8_t", 1, _WCTSigned) + eptypesize("uint_fast8_t", 1, _WCTUnsign) + eptypesize("int_fast16_t", 2, _WCTSigned) + eptypesize("uint_fast16_t", 2, _WCTUnsign) + eptypesize("int_fast32_t", 4, _WCTSigned) + eptypesize("uint_fast32_t", 4, _WCTUnsign) + eptypesize("int_fast64_t", 8, _WCTSigned) + eptypesize("uint_fast64_t", 8, _WCTUnsign) + @unwrap_spec(name=str) def new_primitive_type(space, name): try: diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py --- a/pypy/module/_cffi_backend/test/_backend_test_c.py +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py @@ -397,7 +397,7 @@ def test_invalid_indexing(): p = new_primitive_type("int") x = cast(p, 42) - py.test.raises(TypeError, "p[0]") + py.test.raises(TypeError, "x[0]") def test_default_str(): BChar = new_primitive_type("char") @@ -1030,11 +1030,12 @@ BInt = new_primitive_type("int") BArray0 = new_array_type(new_pointer_type(BInt), 0) BStruct = new_struct_type("struct foo") + BStructP = new_pointer_type(BStruct) complete_struct_or_union(BStruct, [('a', BArray0)]) - py.test.raises(NotImplementedError, new_function_type, - (BStruct,), BInt, False) - py.test.raises(NotImplementedError, new_function_type, - (BInt,), BStruct, False) + BFunc = new_function_type((BStruct,), BInt, False) + py.test.raises(NotImplementedError, cast(BFunc, 123), cast(BStructP, 123)) + BFunc2 = new_function_type((BInt,), BStruct, False) + py.test.raises(NotImplementedError, cast(BFunc2, 123), 123) def test_call_function_9(): BInt = new_primitive_type("int") @@ -1805,7 +1806,8 @@ new_function_type((), new_pointer_type(BFunc)) BUnion = new_union_type("union foo_u") complete_struct_or_union(BUnion, []) - py.test.raises(NotImplementedError, new_function_type, (), BUnion) + BFunc = new_function_type((), BUnion) + py.test.raises(NotImplementedError, cast(BFunc, 123)) py.test.raises(TypeError, new_function_type, (), BArray) def test_struct_return_in_func(): @@ -2718,7 +2720,16 @@ def test_nonstandard_integer_types(): for typename in ['int8_t', 'uint8_t', 'int16_t', 'uint16_t', 'int32_t', 'uint32_t', 'int64_t', 'uint64_t', 'intptr_t', - 'uintptr_t', 'ptrdiff_t', 'size_t', 'ssize_t']: + 'uintptr_t', 'ptrdiff_t', 'size_t', 'ssize_t', + 'int_least8_t', 'uint_least8_t', + 'int_least16_t', 'uint_least16_t', + 'int_least32_t', 'uint_least32_t', + 'int_least64_t', 'uint_least64_t', + 'int_fast8_t', 'uint_fast8_t', + 'int_fast16_t', 'uint_fast16_t', + 'int_fast32_t', 'uint_fast32_t', + 'int_fast64_t', 'uint_fast64_t', + 'intmax_t', 'uintmax_t']: new_primitive_type(typename) # works def test_cannot_convert_unicode_to_charp(): @@ -3186,6 +3197,20 @@ ('a2', BChar, 5)], None, -1, -1, SF_PACKED) +def test_from_buffer(): + import array + a = array.array('H', [10000, 20000, 30000]) + BChar = new_primitive_type("char") + BCharP = new_pointer_type(BChar) + BCharA = new_array_type(BCharP, None) + c = from_buffer(BCharA, a) + assert typeof(c) is BCharA + assert len(c) == 6 + assert repr(c) == "" + p = new_pointer_type(new_primitive_type("unsigned short")) + cast(p, c)[1] += 500 + assert list(a) == [10000, 20500, 30000] + def test_version(): # this test is here mostly for PyPy assert __version__ == "0.8.6" diff --git a/pypy/module/_cffi_backend/test/test_c.py b/pypy/module/_cffi_backend/test/test_c.py --- a/pypy/module/_cffi_backend/test/test_c.py +++ b/pypy/module/_cffi_backend/test/test_c.py @@ -30,7 +30,7 @@ class AppTestC(object): """Populated below, hack hack hack.""" - spaceconfig = dict(usemodules=('_cffi_backend', 'cStringIO')) + spaceconfig = dict(usemodules=('_cffi_backend', 'cStringIO', 'array')) def setup_class(cls): testfuncs_w = [] diff --git a/pypy/module/_file/interp_stream.py b/pypy/module/_file/interp_stream.py --- a/pypy/module/_file/interp_stream.py +++ b/pypy/module/_file/interp_stream.py @@ -34,8 +34,12 @@ # this function runs with the GIL acquired so there is no race # condition in the creation of the lock me = self.space.getexecutioncontext() # used as thread ident - if self.slockowner is me: - return False # already acquired by the current thread + if self.slockowner is not None: + if self.slockowner is me: + return False # already acquired by the current thread + if self.slockowner.thread_disappeared: + self.slockowner = None + self.slock = None try: if self.slock is None: self.slock = self.space.allocate_lock() diff --git a/pypy/module/_file/test/test_file.py b/pypy/module/_file/test/test_file.py --- a/pypy/module/_file/test/test_file.py +++ b/pypy/module/_file/test/test_file.py @@ -275,6 +275,24 @@ finally: f.close() + def test_ignore_ioerror_in_readall_if_nonempty_result(self): + # this is the behavior of regular files in CPython 2.7, as + # well as of _io.FileIO at least in CPython 3.3. This is + # *not* the behavior of _io.FileIO in CPython 3.4 or 3.5; + # see CPython's issue #21090. + try: + from posix import openpty, fdopen, write, close + except ImportError: + skip('no openpty on this platform') + read_fd, write_fd = openpty() + write(write_fd, 'Abc\n') + close(write_fd) + f = fdopen(read_fd) + s = f.read() + assert s == 'Abc\r\n' + raises(IOError, f.read) + f.close() + class AppTestNonblocking(object): def setup_class(cls): @@ -286,7 +304,7 @@ py.test.skip("works with internals of _file impl on py.py") state = [0] def read(fd, n=None): - if fd != 42: + if fd != 424242: return cls.old_read(fd, n) if state[0] == 0: state[0] += 1 @@ -297,7 +315,7 @@ return '' os.read = read stdin = W_File(cls.space) - stdin.file_fdopen(42, 'rb', 1) + stdin.file_fdopen(424242, 'rb', 1) stdin.name = '' cls.w_stream = stdin diff --git a/pypy/module/_file/test/test_file_extra.py b/pypy/module/_file/test/test_file_extra.py --- a/pypy/module/_file/test/test_file_extra.py +++ b/pypy/module/_file/test/test_file_extra.py @@ -221,7 +221,7 @@ expected_filename = str(udir.join('sample')) expected_mode = 'rb' extra_args = () - spaceconfig = {'usemodules': ['binascii', 'rctime', 'struct']} + spaceconfig = {'usemodules': ['binascii', 'time', 'struct']} def setup_method(self, method): space = self.space @@ -281,7 +281,7 @@ expected_filename = '' expected_mode = 'rb' extra_args = () - spaceconfig = {'usemodules': ['binascii', 'rctime', 'struct']} + spaceconfig = {'usemodules': ['binascii', 'time', 'struct']} def setup_method(self, method): space = self.space @@ -359,7 +359,7 @@ # A few extra tests class AppTestAFewExtra: - spaceconfig = {'usemodules': ['_socket', 'array', 'binascii', 'rctime', + spaceconfig = {'usemodules': ['_socket', 'array', 'binascii', 'time', 'struct']} def setup_method(self, method): diff --git a/pypy/module/_io/interp_bufferedio.py b/pypy/module/_io/interp_bufferedio.py --- a/pypy/module/_io/interp_bufferedio.py +++ b/pypy/module/_io/interp_bufferedio.py @@ -565,7 +565,7 @@ # Flush the write buffer if necessary if self.writable: - self._writer_flush_unlocked(space) + self._flush_and_rewind_unlocked(space) self._reader_reset_buf() # Read whole blocks, and don't buffer them @@ -812,11 +812,6 @@ self._check_closed(space, "flush of closed file") with self.lock: self._flush_and_rewind_unlocked(space) - if self.readable: - # Rewind the raw stream so that its position corresponds to - # the current logical position. - self._raw_seek(space, -self._raw_offset(), 1) - self._reader_reset_buf() def _flush_and_rewind_unlocked(self, space): self._writer_flush_unlocked(space) diff --git a/pypy/module/_io/interp_iobase.py b/pypy/module/_io/interp_iobase.py --- a/pypy/module/_io/interp_iobase.py +++ b/pypy/module/_io/interp_iobase.py @@ -24,8 +24,7 @@ try: w_value = error.get_w_value(space) w_errno = space.getattr(w_value, space.wrap("errno")) - return space.is_true( - space.eq(w_errno, space.wrap(EINTR))) + return space.eq_w(w_errno, space.wrap(EINTR)) except OperationError: return False diff --git a/pypy/module/_io/test/test_io.py b/pypy/module/_io/test/test_io.py --- a/pypy/module/_io/test/test_io.py +++ b/pypy/module/_io/test/test_io.py @@ -352,3 +352,42 @@ assert mod == 'io' else: assert mod == '_io' + + def test_issue1902(self): + import _io + with _io.open(self.tmpfile, 'w+b', 4096) as f: + f.write(b'\xff' * 13569) + f.flush() + f.seek(0, 0) + f.read(1) + f.seek(-1, 1) + f.write(b'') + + def test_issue1902_2(self): + import _io + with _io.open(self.tmpfile, 'w+b', 4096) as f: + f.write(b'\xff' * 13569) + f.flush() + f.seek(0, 0) + + f.read(1) + f.seek(-1, 1) + f.write(b'\xff') + f.seek(1, 0) + f.read(4123) + f.seek(-4123, 1) + + def test_issue1902_3(self): + import _io + buffer_size = 4096 + with _io.open(self.tmpfile, 'w+b', buffer_size) as f: + f.write(b'\xff' * buffer_size * 3) + f.flush() + f.seek(0, 0) + + f.read(1) + f.seek(-1, 1) + f.write(b'\xff') + f.seek(1, 0) + f.read(buffer_size * 2) + assert f.tell() == 1 + buffer_size * 2 diff --git a/pypy/module/_lsprof/test/test_cprofile.py b/pypy/module/_lsprof/test/test_cprofile.py --- a/pypy/module/_lsprof/test/test_cprofile.py +++ b/pypy/module/_lsprof/test/test_cprofile.py @@ -1,6 +1,6 @@ class AppTestCProfile(object): spaceconfig = { - "usemodules": ['_lsprof', 'rctime'], + "usemodules": ['_lsprof', 'time'], } def setup_class(cls): diff --git a/pypy/module/_md5/test/test_md5.py b/pypy/module/_md5/test/test_md5.py --- a/pypy/module/_md5/test/test_md5.py +++ b/pypy/module/_md5/test/test_md5.py @@ -5,7 +5,7 @@ class AppTestMD5(object): spaceconfig = { - 'usemodules': ['_md5', 'binascii', 'rctime', 'struct'], + 'usemodules': ['_md5', 'binascii', 'time', 'struct'], } def setup_class(cls): diff --git a/pypy/module/_multibytecodec/src/cjkcodecs/multibytecodec.h b/pypy/module/_multibytecodec/src/cjkcodecs/multibytecodec.h --- a/pypy/module/_multibytecodec/src/cjkcodecs/multibytecodec.h +++ b/pypy/module/_multibytecodec/src/cjkcodecs/multibytecodec.h @@ -97,24 +97,24 @@ Py_UNICODE *outbuf_start, *outbuf, *outbuf_end; }; -RPY_EXPORTED_FOR_TESTS +RPY_EXTERN struct pypy_cjk_dec_s *pypy_cjk_dec_new(const MultibyteCodec *codec); -RPY_EXPORTED_FOR_TESTS +RPY_EXTERN Py_ssize_t pypy_cjk_dec_init(struct pypy_cjk_dec_s *d, char *inbuf, Py_ssize_t inlen); -RPY_EXPORTED_FOR_TESTS +RPY_EXTERN void pypy_cjk_dec_free(struct pypy_cjk_dec_s *); -RPY_EXPORTED_FOR_TESTS +RPY_EXTERN Py_ssize_t pypy_cjk_dec_chunk(struct pypy_cjk_dec_s *); -RPY_EXPORTED_FOR_TESTS +RPY_EXTERN Py_UNICODE *pypy_cjk_dec_outbuf(struct pypy_cjk_dec_s *); -RPY_EXPORTED_FOR_TESTS +RPY_EXTERN Py_ssize_t pypy_cjk_dec_outlen(struct pypy_cjk_dec_s *); -RPY_EXPORTED_FOR_TESTS +RPY_EXTERN Py_ssize_t pypy_cjk_dec_inbuf_remaining(struct pypy_cjk_dec_s *d); -RPY_EXPORTED_FOR_TESTS +RPY_EXTERN Py_ssize_t pypy_cjk_dec_inbuf_consumed(struct pypy_cjk_dec_s* d); -RPY_EXPORTED_FOR_TESTS +RPY_EXTERN Py_ssize_t pypy_cjk_dec_replace_on_error(struct pypy_cjk_dec_s* d, Py_UNICODE *, Py_ssize_t, Py_ssize_t); @@ -125,35 +125,35 @@ unsigned char *outbuf_start, *outbuf, *outbuf_end; }; -RPY_EXPORTED_FOR_TESTS From noreply at buildbot.pypy.org Sun Jan 18 14:50:15 2015 From: noreply at buildbot.pypy.org (mjacob) Date: Sun, 18 Jan 2015 14:50:15 +0100 (CET) Subject: [pypy-commit] pypy llvm-translation-backend: Adapt to removed obmalloc. Message-ID: <20150118135015.1A97D1C027F@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: llvm-translation-backend Changeset: r75423:847de726f3e3 Date: 2015-01-08 22:50 +0100 http://bitbucket.org/pypy/pypy/changeset/847de726f3e3/ Log: Adapt to removed obmalloc. diff --git a/rpython/translator/llvm/genllvm.py b/rpython/translator/llvm/genllvm.py --- a/rpython/translator/llvm/genllvm.py +++ b/rpython/translator/llvm/genllvm.py @@ -1649,15 +1649,9 @@ return lltype.functionptr(func_type, name, external='C', calling_conv='c', compilation_info=compilation_info) -c_dir = local(cdir) -eci = ExternalCompilationInfo( - include_dirs=[cdir, c_dir / '..' / 'llvm'], - includes=['src/allocator.h'], - separate_module_files=[c_dir / 'src' / 'allocator.c']) -raw_malloc = extfunc('PyObject_Malloc', [lltype.Signed], llmemory.Address, eci) -raw_free = extfunc('PyObject_Free', [llmemory.Address], lltype.Void, eci) - eci = ExternalCompilationInfo() +raw_malloc = extfunc('malloc', [lltype.Signed], llmemory.Address, eci) +raw_free = extfunc('free', [llmemory.Address], lltype.Void, eci) llvm_memcpy = extfunc('llvm.memcpy.p0i8.p0i8.i' + str(LLVMSigned.bitwidth), [llmemory.Address, llmemory.Address, lltype.Signed, rffi.INT, lltype.Bool], lltype.Void, eci) From noreply at buildbot.pypy.org Sun Jan 18 14:50:16 2015 From: noreply at buildbot.pypy.org (mjacob) Date: Sun, 18 Jan 2015 14:50:16 +0100 (CET) Subject: [pypy-commit] pypy llvm-translation-backend: Fix or implement various things to make all tests pass and translation succeed. Message-ID: <20150118135016.437E21C027F@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: llvm-translation-backend Changeset: r75424:72d355b06d86 Date: 2015-01-18 14:49 +0100 http://bitbucket.org/pypy/pypy/changeset/72d355b06d86/ Log: Fix or implement various things to make all tests pass and translation succeed. diff --git a/rpython/memory/gctransform/refcounting.py b/rpython/memory/gctransform/refcounting.py --- a/rpython/memory/gctransform/refcounting.py +++ b/rpython/memory/gctransform/refcounting.py @@ -285,3 +285,9 @@ resulttype=llmemory.Address) hop.genop("direct_call", [self.identityhash_ptr, v_adr], resultvar=hop.spaceop.result) + + def gct_zero_gc_pointers_inside(self, hop): + pass + + def gct_zero_everything_inside(self, hop): + pass diff --git a/rpython/translator/c/genc.py b/rpython/translator/c/genc.py --- a/rpython/translator/c/genc.py +++ b/rpython/translator/c/genc.py @@ -723,6 +723,7 @@ print >> f, '\t%s;' % cdecl(typename, field.fieldname) print >> f, '};' print >> f + return fields def gen_forwarddecl(f, database): print >> f, '/***********************************************************/' diff --git a/rpython/translator/llvm/genllvm.py b/rpython/translator/llvm/genllvm.py --- a/rpython/translator/llvm/genllvm.py +++ b/rpython/translator/llvm/genllvm.py @@ -193,11 +193,19 @@ return 'or({T} sext({lp.TV} to {T}), {rest.TV})'.format(**locals()) elif isinstance(value, CDefinedIntSymbolic): if value is malloc_zero_filled: - return '1' + gctransformer = database.genllvm.gcpolicy.gctransformer + return str(int(gctransformer.malloc_zero_filled)) elif value is _we_are_jitted: return '0' elif value is running_on_llinterp: return '0' + elif value.expr.startswith('RPY_TLOFS_'): + fieldname = value.expr[10:] + idx = database.tls_struct.fldnames_wo_voids.index(fieldname) + return ('ptrtoint({}* getelementptr({}* null, i64 0, i32 {}) ' + 'to {})'.format( + database.tls_struct.fldtypes_wo_voids[idx].repr_type(), + database.tls_struct.repr_type(), idx, SIGNED_TYPE)) elif isinstance(value, llmemory.AddressAsInt): return 'ptrtoint({.TV} to {})'.format(get_repr(value.adr.ptr), SIGNED_TYPE) @@ -358,6 +366,8 @@ if type not in PRIMITIVES: size_in_bytes, is_unsigned = rffi.size_and_sign(type) PRIMITIVES[type] = IntegralType(size_in_bytes * 8, is_unsigned) +for key, value in PRIMITIVES.items(): + value.lltype = key LLVMSigned = PRIMITIVES[lltype.Signed] SIGNED_TYPE = LLVMSigned.repr_type() LLVMHalfWord = PRIMITIVES[llgroup.HALFWORD] @@ -753,6 +763,8 @@ self.types = PRIMITIVES.copy() self.hashes = [] self.stack_bottoms = [] + self.tls_getters = set() + self.tls_addr_wrapper = False def get_type(self, type): try: @@ -772,6 +784,7 @@ if ret.needs_gc_header: _llvm_needs_header[type] = database.genllvm.gcpolicy \ .get_gc_fields_lltype() # hint for ll2ctypes + ret.lltype = type return ret def unique_name(self, name, llvm_name=True): @@ -1379,6 +1392,12 @@ self.op_direct_call(result, get_repr(llvm_memset), ptr, null_char, size, null_int, null_bool) + def op_raw_memset(self, result, ptr, val, size): + assert 0 <= val.value <= 255 + self.op_direct_call(result, get_repr(llvm_memset), ptr, + ConstantRepr(LLVMChar, chr(val.value)), + size, null_int, null_bool) + def op_raw_memcopy(self, result, src, dst, size): self.op_direct_call(result, get_repr(llvm_memcpy), dst, src, size, null_int, null_bool) @@ -1443,6 +1462,9 @@ else: assert False, "No subop {}".format(subopnum.value) + def op_gc_thread_die(self, result): + self.op_direct_call(result, get_repr(rpy_tls_thread_die)) + def _ignore(self, *args): pass op_gc_stack_bottom = _ignore @@ -1484,6 +1506,39 @@ def op_convert_longlong_bytes_to_float(self, result, ll): self.w('{result.V} = bitcast {ll.TV} to {result.T}'.format(**locals())) + def op_threadlocalref_get(self, result, offset): + if isinstance(offset, ConstantRepr): + assert isinstance(offset.value, CDefinedIntSymbolic) + fieldname = offset.value.expr + assert fieldname.startswith('RPY_TLOFS_') + fieldname = fieldname[10:] + if fieldname not in database.tls_getters: + database.tls_getters.add(fieldname) + from rpython.translator.c.database import LowLevelDatabase + from rpython.translator.c.support import cdecl + db = LowLevelDatabase() + pattern = ("{}() {{ return RPY_THREADLOCALREF_GET({}); }}") + database.genllvm.sources.append(pattern.format( + cdecl(db.gettype(result.type.lltype), + '_rpy_tls_get_' + fieldname), fieldname)) + database.f.write('declare {result.T} @_rpy_tls_get_{fieldname}()' + .format(**locals())) + self.w('{result.V} = call {result.T} @_rpy_tls_get_{fieldname}()' + .format(**locals())) + else: + tls_addr = self._tmp(LLVMAddress) + self.op_threadlocalref_addr(tls_addr) + self.op_raw_load(result, tls_addr, offset) + + def op_threadlocalref_addr(self, result): + if not database.tls_addr_wrapper: + database.tls_addr_wrapper = True + wrapper_src = ('char *_rpy_tls_addr() ' + '{ char *r; OP_THREADLOCALREF_ADDR(r); return r; }') + database.genllvm.sources.append(wrapper_src) + database.f.write('declare i8* @_rpy_tls_addr()\n') + self.w('{result.V} = call i8* @_rpy_tls_addr()'.format(**locals())) + class GCPolicy(object): def __init__(self, genllvm): @@ -1662,6 +1717,10 @@ eci) llvm_readcyclecounter = extfunc('llvm.readcyclecounter', [], lltype.SignedLongLong, eci) +rpy_tls_program_init = extfunc('RPython_ThreadLocals_ProgramInit', [], + lltype.Void, eci) +rpy_tls_thread_die = extfunc('RPython_ThreadLocals_ThreadDie', [], lltype.Void, + eci) del eci null_int = ConstantRepr(LLVMInt, 0) @@ -1699,10 +1758,14 @@ def prepare(self, entrypoint, secondary_entrypoints): if callable(entrypoint): + bk = self.translator.annotator.bookkeeper + has_tls = bool(bk.thread_local_fields) setup_ptr = self.gcpolicy.get_setup_ptr() def main(argc, argv): llop.gc_stack_bottom(lltype.Void) try: + if has_tls: + rpy_tls_program_init() if setup_ptr is not None: setup_ptr() args = [rffi.charp2str(argv[i]) for i in range(argc)] @@ -1733,6 +1796,11 @@ self.ovf_err = self.exctransformer.get_builtin_exception(OverflowError) ovf_err_inst = self.ovf_err[1] self.gcpolicy._consider_constant(ovf_err_inst._T, ovf_err_inst._obj) + + # XXX for some reason, this is needed to make all tests pass + tmp = self.exctransformer.get_builtin_exception(OSError)[1] + self.gcpolicy._consider_constant(tmp._T, tmp._obj) + self.gcpolicy.finish() def _write_special_declarations(self, f): @@ -1769,8 +1837,19 @@ f.write(line) database = Database(self, f) + + from rpython.translator.c.database import LowLevelDatabase + from rpython.translator.c.genc import gen_threadlocal_structdef + db = LowLevelDatabase(self.translator) + with self.work_dir.join('structdef.h').open('w') as f2: + tls_fields = gen_threadlocal_structdef(f2, db) + database.tls_struct = StructType() + database.tls_struct.setup( + 'tls', [(LLVMInt, 'ready'), (LLVMAddress, 'stack_end')] + + [(database.get_type(fld.FIELDTYPE), fld.fieldname) + for fld in tls_fields], False) + self._write_special_declarations(f) - for export in self.entrypoints: get_repr(export._as_ptr()).V @@ -1803,8 +1882,13 @@ raise Exception("RPYTHON_LLVM_ASSEMBLY must be 'false' or 'true'.") # merge ECIs + c_dir = local(cdir) eci = ExternalCompilationInfo( - includes=['stdio.h', 'stdlib.h'], + include_dirs=[cdir, c_dir / '..' / 'llvm', self.work_dir], + includes=['stdio.h', 'stdlib.h', 'src/threadlocal.h', + 'src/stack.h', 'structdef.h'], + separate_module_files=[c_dir / 'src' / 'threadlocal.c', + c_dir / 'src' / 'stack.c'], separate_module_sources=['\n'.join(self.sources)], post_include_bits=['typedef _Bool bool_t;'] ).merge(*self.ecis).convert_sources_to_files() diff --git a/rpython/translator/llvm/test/test_genllvm.py b/rpython/translator/llvm/test/test_genllvm.py --- a/rpython/translator/llvm/test/test_genllvm.py +++ b/rpython/translator/llvm/test/test_genllvm.py @@ -287,7 +287,7 @@ self.rpyexc_clear() ret = self.entry_point(*args) if self.rpyexc_occured(): - name = ''.join(self.rpyexc_fetch_type().name._obj.items[:-1]) + name = ''.join(self.rpyexc_fetch_type().name.chars) if expected_exception_name is not None: assert name == expected_exception_name return From noreply at buildbot.pypy.org Sun Jan 18 14:52:54 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 18 Jan 2015 14:52:54 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: Process truncated log files Message-ID: <20150118135254.9C07A1C03DF@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r75425:f7b0089c0975 Date: 2015-01-18 14:52 +0100 http://bitbucket.org/pypy/pypy/changeset/f7b0089c0975/ Log: Process truncated log files diff --git a/pypy/stm/print_stm_log.py b/pypy/stm/print_stm_log.py --- a/pypy/stm/print_stm_log.py +++ b/pypy/stm/print_stm_log.py @@ -72,7 +72,7 @@ result = [] while True: packet = f.read(19) - if not packet: break + if len(packet) < 19: break sec, nsec, threadnum, otherthreadnum, event, len1, len2 = \ struct.unpack("IIIIBBB", packet) if event >= _STM_EVENT_N: From noreply at buildbot.pypy.org Sun Jan 18 15:30:23 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 18 Jan 2015 15:30:23 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: Account for the usleep(1) we do after an abort Message-ID: <20150118143023.B6B341C03E2@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r75426:97e19da50a69 Date: 2015-01-18 15:30 +0100 http://bitbucket.org/pypy/pypy/changeset/97e19da50a69/ Log: Account for the usleep(1) we do after an abort diff --git a/pypy/stm/print_stm_log.py b/pypy/stm/print_stm_log.py --- a/pypy/stm/print_stm_log.py +++ b/pypy/stm/print_stm_log.py @@ -32,6 +32,8 @@ _STM_EVENT_N = 15 +PAUSE_AFTER_ABORT = 0.000001 # usleep(1) after every abort + event_name = {} for _key, _value in globals().items(): @@ -108,6 +110,7 @@ if self._conflict and entry.event == STM_TRANSACTION_ABORT: c = self._conflict[1] c.aborted_time += transaction_time + c.paused_time += PAUSE_AFTER_ABORT self._conflict = None def transaction_pause(self, entry): From noreply at buildbot.pypy.org Sun Jan 18 16:03:57 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 18 Jan 2015 16:03:57 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: Try harder to preserve the stm_location Message-ID: <20150118150357.8DAFA1C05ED@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r75427:dd8bd6ba13a9 Date: 2015-01-18 16:03 +0100 http://bitbucket.org/pypy/pypy/changeset/dd8bd6ba13a9/ Log: Try harder to preserve the stm_location diff --git a/TODO b/TODO --- a/TODO +++ b/TODO @@ -143,6 +143,8 @@ ------------------------------------------------------------ +stm_read(p125) +cond_call_gc_wb_array(p125...) # don't need the stm_read maybe? diff --git a/rpython/jit/backend/llsupport/gc.py b/rpython/jit/backend/llsupport/gc.py --- a/rpython/jit/backend/llsupport/gc.py +++ b/rpython/jit/backend/llsupport/gc.py @@ -145,8 +145,9 @@ if op.is_guard() or op.getopnum() == rop.FINISH: # the only ops with descrs that get recorded in a trace llref = cast_instance_to_gcref(op.getdescr()) - assert rgc._make_sure_does_not_move(llref) - gcrefs_output_list.append(llref) + if llref: + assert rgc._make_sure_does_not_move(llref) + gcrefs_output_list.append(llref) # if len(ops_with_movable_const_ptr[op]) == 0: del ops_with_movable_const_ptr[op] diff --git a/rpython/jit/backend/llsupport/rewrite.py b/rpython/jit/backend/llsupport/rewrite.py --- a/rpython/jit/backend/llsupport/rewrite.py +++ b/rpython/jit/backend/llsupport/rewrite.py @@ -38,6 +38,7 @@ _previous_size = -1 _op_malloc_nursery = None _v_last_malloced_nursery = None + stm_location = None # does_any_alloc tells us if we did any allocation since the last LABEL does_any_allocation = False @@ -63,6 +64,8 @@ # for i in range(len(operations)): op = operations[i] + if op.stm_location is not None: + self.stm_location = op.stm_location if op.getopnum() == rop.DEBUG_MERGE_POINT: continue # ---------- turn NEWxxx into CALL_MALLOC_xxx ---------- @@ -110,6 +113,10 @@ return self.newops def other_operation(self, op): + self.newop(op) + + def newop(self, op): + op.stm_location = self.stm_location self.newops.append(op) def could_merge_with_next_guard(self, op, i, operations): @@ -142,7 +149,7 @@ op = ResOperation(rop.SETFIELD_GC, [op.result, ConstInt(classint)], None, descr=self.gc_ll_descr.fielddescr_vtable) - self.newops.append(op) + self.newop(op) elif opnum == rop.NEW_ARRAY or opnum == rop.NEW_ARRAY_CLEAR: descr = op.getdescr() assert isinstance(descr, ArrayDescr) @@ -202,7 +209,7 @@ return op = ResOperation(rop.SETFIELD_GC, [result, self.c_zero], None, descr=hash_descr) - self.newops.append(op) + self.newop(op) def handle_new_fixedsize(self, descr, op): assert isinstance(descr, SizeDescr) @@ -264,7 +271,7 @@ # See emit_pending_zeros(). o = ResOperation(rop.ZERO_ARRAY, [v_arr, self.c_zero, v_length], None, descr=arraydescr) - self.newops.append(o) + self.newop(o) if isinstance(v_length, ConstInt): self.last_zero_arrays.append(o) @@ -276,7 +283,7 @@ op0 = ResOperation(rop.GETFIELD_RAW, [history.ConstInt(frame_info)], size_box, descr=descrs.jfi_frame_depth) - self.newops.append(op0) + self.newop(op0) op1 = ResOperation(rop.NEW_ARRAY, [size_box], frame, descr=descrs.arraydescr) self.handle_new_array(descrs.arraydescr, op1) @@ -286,7 +293,7 @@ op0 = ResOperation(rop.GETFIELD_RAW, [history.ConstInt(frame_info)], size_box, descr=descrs.jfi_frame_size) - self.newops.append(op0) + self.newop(op0) self.gen_malloc_nursery_varsize_frame(size_box, frame) self.gen_initialize_tid(frame, descrs.arraydescr.tid) length_box = history.BoxInt() @@ -308,7 +315,8 @@ ResOperation(rop.SETFIELD_GC, [frame, self.c_null], None, descr=descrs.jf_forward), ] - self.newops += extra_ops + for extra_op in extra_ops: + self.newop(extra_op) self.gen_initialize_len(frame, length_box, descrs.arraydescr.lendescr) else: @@ -317,7 +325,7 @@ op0 = ResOperation(rop.GETFIELD_RAW,[history.ConstInt(frame_info)], length_box, descr=descrs.jfi_frame_depth) - self.newops.append(op0) + self.newop(op0) self.gen_malloc_nursery_varsize_frame(length_box, frame) self.gen_initialize_tid(frame, descrs.arraydescr.tid) self.gen_initialize_len(frame, length_box, @@ -333,7 +341,7 @@ self.gen_malloc_frame(llfi, frame) op2 = ResOperation(rop.SETFIELD_GC, [frame, history.ConstInt(llfi)], None, descr=descrs.jf_frame_info) - self.newops.append(op2) + self.newop(op2) arglist = op.getarglist() index_list = loop_token.compiled_loop_token._ll_initial_locs for i, arg in enumerate(arglist): @@ -341,7 +349,7 @@ assert self.cpu.JITFRAME_FIXED_SIZE & 1 == 0 _, itemsize, _ = self.cpu.unpack_arraydescr_size(descr) index = index_list[i] // itemsize # index is in bytes - self.newops.append(ResOperation(rop.SETARRAYITEM_GC, + self.newop(ResOperation(rop.SETARRAYITEM_GC, [frame, ConstInt(index), arg], None, descr)) @@ -355,8 +363,7 @@ args = [frame] op1 = ResOperation(rop.CALL_ASSEMBLER, args, op.result, op.getdescr()) - op1.stm_location = op.stm_location - self.newops.append(op1) + self.newop(op1) # ---------- @@ -399,14 +406,14 @@ for v, d in self.delayed_zero_setfields.iteritems(): for ofs in d.iterkeys(): op = ResOperation(rop.ZERO_PTR_FIELD, [v, ConstInt(ofs)], None) - self.newops.append(op) + self.newop(op) self.delayed_zero_setfields.clear() def _gen_call_malloc_gc(self, args, v_result, descr): """Generate a CALL_MALLOC_GC with the given args.""" self.emitting_an_operation_that_can_collect() op = ResOperation(rop.CALL_MALLOC_GC, args, v_result, descr) - self.newops.append(op) + self.newop(op) # In general, don't add v_result to write_barrier_applied: # v_result might be a large young array. @@ -493,7 +500,7 @@ op = ResOperation(rop.CALL_MALLOC_NURSERY_VARSIZE, [ConstInt(kind), ConstInt(itemsize), v_length], v_result, descr=arraydescr) - self.newops.append(op) + self.newop(op) # don't record v_result into self.write_barrier_applied: # it can be a large, young array with card marking, and then # the GC relies on the write barrier being called @@ -507,7 +514,7 @@ [sizebox], # if STM, this is actually lengthbox! v_result) - self.newops.append(op) + self.newop(op) self.write_barrier_applied[v_result] = None def gen_malloc_nursery(self, size, v_result): @@ -539,7 +546,7 @@ v_result) self._op_malloc_nursery = op # - self.newops.append(op) + self.newop(op) self._previous_size = size self._v_last_malloced_nursery = v_result self.write_barrier_applied[v_result] = None @@ -551,14 +558,14 @@ op = ResOperation(rop.SETFIELD_GC, [v_newgcobj, ConstInt(tid)], None, descr=self.gc_ll_descr.fielddescr_tid) - self.newops.append(op) + self.newop(op) def gen_initialize_len(self, v_newgcobj, v_length, arraylen_descr): # produce a SETFIELD to initialize the array length op = ResOperation(rop.SETFIELD_GC, [v_newgcobj, v_length], None, descr=arraylen_descr) - self.newops.append(op) + self.newop(op) # ---------- @@ -572,27 +579,26 @@ def handle_write_barrier_setfield(self, op): val = op.getarg(0) if self.must_apply_write_barrier(val, op.getarg(1)): - self.gen_write_barrier(val, op.stm_location) - self.newops.append(op) + self.gen_write_barrier(val) + self.newop(op) def handle_write_barrier_setarrayitem(self, op): val = op.getarg(0) if self.must_apply_write_barrier(val, op.getarg(2)): - self.gen_write_barrier_array(val, op.getarg(1), op.stm_location) - self.newops.append(op) + self.gen_write_barrier_array(val, op.getarg(1)) + self.newop(op) handle_write_barrier_setinteriorfield = handle_write_barrier_setarrayitem - def gen_write_barrier(self, v_base, stm_location): + def gen_write_barrier(self, v_base): write_barrier_descr = self.gc_ll_descr.write_barrier_descr args = [v_base] op = ResOperation(rop.COND_CALL_GC_WB, args, None, descr=write_barrier_descr) - op.stm_location = stm_location - self.newops.append(op) + self.newop(op) self.write_barrier_applied[v_base] = None - def gen_write_barrier_array(self, v_base, v_index, stm_location): + def gen_write_barrier_array(self, v_base, v_index): write_barrier_descr = self.gc_ll_descr.write_barrier_descr if write_barrier_descr.has_write_barrier_from_array(self.cpu): # If we know statically the length of 'v', and it is not too @@ -605,13 +611,12 @@ args = [v_base, v_index] op = ResOperation(rop.COND_CALL_GC_WB_ARRAY, args, None, descr=write_barrier_descr) - op.stm_location = stm_location - self.newops.append(op) + self.newop(op) # a WB_ARRAY is not enough to prevent any future write # barriers, so don't add to 'write_barrier_applied'! return # fall-back case: produce a write_barrier - self.gen_write_barrier(v_base, stm_location) + self.gen_write_barrier(v_base) def round_up_for_allocation(self, size): if not self.gc_ll_descr.round_up: diff --git a/rpython/jit/backend/llsupport/stmrewrite.py b/rpython/jit/backend/llsupport/stmrewrite.py --- a/rpython/jit/backend/llsupport/stmrewrite.py +++ b/rpython/jit/backend/llsupport/stmrewrite.py @@ -20,22 +20,21 @@ def other_operation(self, op): opnum = op.getopnum() if opnum == rop.INCREMENT_DEBUG_COUNTER: - self.newops.append(op) + self.newop(op) return # ---------- transaction breaks ---------- if opnum == rop.STM_HINT_COMMIT_SOON: - self._do_stm_call('stm_hint_commit_soon', [], None, - op.stm_location) + self._do_stm_call('stm_hint_commit_soon', [], None) return # ---------- jump, finish, guard_not_forced_2 ---------- if (opnum == rop.JUMP or opnum == rop.FINISH or opnum == rop.GUARD_NOT_FORCED_2): self.add_dummy_allocation() - self.newops.append(op) + self.newop(op) return # ---------- pure operations, guards ---------- if op.is_always_pure() or op.is_guard() or op.is_ovf(): - self.newops.append(op) + self.newop(op) return # ---------- non-pure getfields ---------- if opnum in (rop.GETFIELD_GC, rop.GETARRAYITEM_GC, @@ -49,7 +48,7 @@ if opnum == rop.CALL_RELEASE_GIL: # self.fallback_inevitable(op) # is done by assembler._release_gil_shadowstack() - self.newops.append(op) + self.newop(op) elif opnum == rop.CALL_ASSEMBLER: assert 0 # case handled by the parent class else: @@ -62,7 +61,7 @@ or descr.get_extra_info().call_needs_inevitable(): self.fallback_inevitable(op) else: - self.newops.append(op) + self.newop(op) return # ---------- setters for pure fields ---------- if opnum in (rop.STRSETITEM, rop.UNICODESETITEM): @@ -80,7 +79,7 @@ if opnum == rop.LABEL: # note that the parent class also clears some things on a LABEL self.next_op_may_be_in_new_transaction() - self.newops.append(op) + self.newop(op) return # ---------- other ignored ops ---------- if opnum in (rop.STM_SHOULD_BREAK_TRANSACTION, rop.FORCE_TOKEN, @@ -88,7 +87,7 @@ rop.JIT_DEBUG, rop.KEEPALIVE, rop.QUASIIMMUT_FIELD, rop.RECORD_KNOWN_CLASS, ): - self.newops.append(op) + self.newop(op) return # ---------- fall-back ---------- # Check that none of the ops handled here can collect. @@ -112,12 +111,12 @@ # group together several stm_reads then we can save one # instruction; if delayed over a cond_call_gc_wb then we can # omit the stm_read completely; ... - self.newops.append(op) + self.newop(op) v_ptr = op.getarg(0) if (v_ptr not in self.read_barrier_applied and v_ptr not in self.write_barrier_applied): op1 = ResOperation(rop.STM_READ, [v_ptr], None) - self.newops.append(op1) + self.newop(op1) self.read_barrier_applied[v_ptr] = None def add_dummy_allocation(self): @@ -137,33 +136,31 @@ @specialize.arg(1) - def _do_stm_call(self, funcname, args, result, stm_location): + def _do_stm_call(self, funcname, args, result): addr = self.gc_ll_descr.get_malloc_fn_addr(funcname) descr = getattr(self.gc_ll_descr, funcname + '_descr') op1 = ResOperation(rop.CALL, [ConstInt(addr)] + args, result, descr=descr) - op1.stm_location = stm_location - self.newops.append(op1) + self.newop(op1) def fallback_inevitable(self, op): if not self.always_inevitable: self.emitting_an_operation_that_can_collect() - self._do_stm_call('stm_try_inevitable', [], None, - op.stm_location) + self._do_stm_call('stm_try_inevitable', [], None) self.always_inevitable = True - self.newops.append(op) + self.newop(op) debug_print("fallback for", op.repr()) def maybe_handle_raw_accesses(self, op): descr = op.getdescr() assert isinstance(descr, FieldDescr) if descr.stm_dont_track_raw_accesses: - self.newops.append(op) + self.newop(op) return True return False def handle_setters_for_pure_fields(self, op, targetindex): val = op.getarg(targetindex) if self.must_apply_write_barrier(val): - self.gen_write_barrier(val, op.stm_location) - self.newops.append(op) + self.gen_write_barrier(val) + self.newop(op) diff --git a/rpython/jit/backend/llsupport/test/test_stmrewrite.py b/rpython/jit/backend/llsupport/test/test_stmrewrite.py --- a/rpython/jit/backend/llsupport/test/test_stmrewrite.py +++ b/rpython/jit/backend/llsupport/test/test_stmrewrite.py @@ -1256,17 +1256,30 @@ guard_not_forced() [] {55} """, """ [i0, f0] - i1 = getfield_raw(ConstClass(frame_info), descr=jfi_frame_depth) - p1 = call_malloc_nursery_varsize_frame(i1) - setfield_gc(p1, 0, descr=tiddescr) - setfield_gc(p1, i1, descr=framelendescr) - setfield_gc(p1, ConstClass(frame_info), descr=jf_frame_info) - setarrayitem_gc(p1, 0, i0, descr=signedframedescr) - setarrayitem_gc(p1, 1, f0, descr=floatframedescr) + i1 = getfield_raw(ConstClass(frame_info), descr=jfi_frame_depth) {54} + p1 = call_malloc_nursery_varsize_frame(i1) {54} + setfield_gc(p1, 0, descr=tiddescr) {54} + setfield_gc(p1, i1, descr=framelendescr) {54} + setfield_gc(p1, ConstClass(frame_info), descr=jf_frame_info) {54} + setarrayitem_gc(p1, 0, i0, descr=signedframedescr) {54} + setarrayitem_gc(p1, 1, f0, descr=floatframedescr) {54} i3 = call_assembler(p1, descr=casmdescr) {54} guard_not_forced() [] {55} """) + def test_stm_location_4(self): + self.check_rewrite(""" + [p1, i2, p3] + debug_merge_point() {81} + i3 = int_add(i2, 5) + setarrayitem_gc(p1, i3, p3, descr=cdescr) + """, """ + [p1, i2, p3] + i3 = int_add(i2, 5) {81} + cond_call_gc_wb_array(p1, i3, descr=wbdescr) {81} + setarrayitem_gc(p1, i3, p3, descr=cdescr) {81} + """) + def test_stm_should_break_transaction_no_malloc(self): self.check_rewrite(""" [] diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -1159,8 +1159,6 @@ loc = jitdriver_sd.warmstate.get_location_str(greenkey) debug_print(loc) args = [ConstInt(jd_index), ConstInt(portal_call_depth), ConstInt(current_call_id)] + greenkey - self.metainterp.history.record(rop.DEBUG_MERGE_POINT, args, None) - # if self.metainterp.staticdata.config.translation.stm: report_location = jitdriver_sd.stm_report_location if report_location is not None: @@ -1169,6 +1167,8 @@ ref = greenkey[idx_ref].getref_base() location = StmLocation(num, ref) self.metainterp.history.stm_location = location + # + self.metainterp.history.record(rop.DEBUG_MERGE_POINT, args, None) @arguments("box", "label") def opimpl_goto_if_exception_mismatch(self, vtablebox, next_exc_target): From noreply at buildbot.pypy.org Sun Jan 18 17:16:51 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 18 Jan 2015 17:16:51 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: Bah, PyPyJitDriver did not have 'stm_report_location'. Message-ID: <20150118161651.5E0881C00F7@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r75428:10b577c5d4b1 Date: 2015-01-18 17:16 +0100 http://bitbucket.org/pypy/pypy/changeset/10b577c5d4b1/ Log: Bah, PyPyJitDriver did not have 'stm_report_location'. diff --git a/pypy/module/pypyjit/interp_jit.py b/pypy/module/pypyjit/interp_jit.py --- a/pypy/module/pypyjit/interp_jit.py +++ b/pypy/module/pypyjit/interp_jit.py @@ -43,6 +43,7 @@ reds = ['frame', 'ec'] greens = ['next_instr', 'is_being_profiled', 'pycode'] virtualizables = ['frame'] + stm_report_location = [0, 2] # 'next_instr', 'pycode' pypyjitdriver = PyPyJitDriver(get_printable_location = get_printable_location, should_unroll_one_iteration = diff --git a/rpython/jit/metainterp/test/test_stm.py b/rpython/jit/metainterp/test/test_stm.py --- a/rpython/jit/metainterp/test/test_stm.py +++ b/rpython/jit/metainterp/test/test_stm.py @@ -50,6 +50,8 @@ num_box, ref_box = op.getarglist()[-2:] num = num_box.getint() ref = ref_box.getref_base() + assert num == op.stm_location.num + assert ref == op.stm_location.ref cur_location = (num, ref) elif op.getopname() in ("label", "finish", "jump"): pass From noreply at buildbot.pypy.org Sun Jan 18 23:02:45 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 18 Jan 2015 23:02:45 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: Minimal fixes so that a _stm.hashtable at least feels like a dict, Message-ID: <20150118220245.592C01C027F@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r75429:4f5f684ab5f9 Date: 2015-01-18 23:02 +0100 http://bitbucket.org/pypy/pypy/changeset/4f5f684ab5f9/ Log: Minimal fixes so that a _stm.hashtable at least feels like a dict, even if it has no way to enumerate the keys diff --git a/pypy/module/_stm/hashtable.py b/pypy/module/_stm/hashtable.py --- a/pypy/module/_stm/hashtable.py +++ b/pypy/module/_stm/hashtable.py @@ -29,9 +29,17 @@ self.h.set(key, gcref) @unwrap_spec(key=int) - def delitem_w(self, key): + def delitem_w(self, space, key): + gcref = self.h.get(key) + if not gcref: + space.raise_key_error(space.wrap(key)) self.h.set(key, rstm.NULL_GCREF) + @unwrap_spec(key=int) + def contains_w(self, space, key): + gcref = self.h.get(key) + return space.newbool(not not gcref) + def W_Hashtable___new__(space, w_subtype): r = space.allocate_instance(W_Hashtable, w_subtype) @@ -44,4 +52,5 @@ __getitem__ = interp2app(W_Hashtable.getitem_w), __setitem__ = interp2app(W_Hashtable.setitem_w), __delitem__ = interp2app(W_Hashtable.delitem_w), + __contains__ = interp2app(W_Hashtable.contains_w), ) diff --git a/pypy/module/_stm/test/test_hashtable.py b/pypy/module/_stm/test/test_hashtable.py --- a/pypy/module/_stm/test/test_hashtable.py +++ b/pypy/module/_stm/test/test_hashtable.py @@ -10,6 +10,9 @@ raises(KeyError, "h[42]") h[42] = "foo" assert h[42] == "foo" + assert 42 in h del h[42] + assert 42 not in h raises(KeyError, "h[42]") assert h[42+65536] == "bar" + raises(KeyError, "del h[42]") From noreply at buildbot.pypy.org Sun Jan 18 23:41:26 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 18 Jan 2015 23:41:26 +0100 (CET) Subject: [pypy-commit] pypy errno-again: Try to fix Windows test failures by also saving WSAGetLastError() using Message-ID: <20150118224126.4CB3D1C027F@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: errno-again Changeset: r75430:6eda442d4d39 Date: 2015-01-18 23:41 +0100 http://bitbucket.org/pypy/pypy/changeset/6eda442d4d39/ Log: Try to fix Windows test failures by also saving WSAGetLastError() using the same mechanism. diff --git a/rpython/jit/backend/x86/callbuilder.py b/rpython/jit/backend/x86/callbuilder.py --- a/rpython/jit/backend/x86/callbuilder.py +++ b/rpython/jit/backend/x86/callbuilder.py @@ -242,9 +242,14 @@ mc.MOV32_rm(edi.value, (edi.value, 0)) mc.MOV32_mr((tlofsreg.value, rpy_errno), edi.value) - if handle_lasterror and (save_err & rffi.RFFI_SAVE_LASTERROR): - from rpython.rlib.rwin32 import _GetLastError - GetLastError_addr = self.asm.cpu.cast_ptr_to_int(_GetLastError) + if handle_lasterror and (save_err & (rffi.RFFI_SAVE_LASTERROR | + rffi.RFFI_SAVE_WSALASTERROR)): + if save_err & rffi.RFFI_SAVE_LASTERROR: + from rpython.rlib.rwin32 import _GetLastError + GetLastError_addr = self.asm.cpu.cast_ptr_to_int(_GetLastError) + else: + from rpython.rlib._rsocket_rffi import _WSAGetLastError as WSAE + GetLastError_addr = self.asm.cpu.cast_ptr_to_int(WSAE) assert isinstance(self, CallBuilder32) # Windows 32-bit only # rpy_lasterror = llerrno.get_rpy_lasterror_offset(self.asm.cpu) diff --git a/rpython/rlib/_rsocket_rffi.py b/rpython/rlib/_rsocket_rffi.py --- a/rpython/rlib/_rsocket_rffi.py +++ b/rpython/rlib/_rsocket_rffi.py @@ -462,7 +462,7 @@ if WIN32: WSAEVENT = cConfig.WSAEVENT WSANETWORKEVENTS = cConfig.WSANETWORKEVENTS - SAVE_ERR = rffi.RFFI_ERR_NONE + SAVE_ERR = rffi.RFFI_SAVE_WSALASTERROR else: SAVE_ERR = rffi.RFFI_SAVE_ERRNO timeval = cConfig.timeval @@ -660,14 +660,14 @@ WSAStartup = external('WSAStartup', [rwin32.WORD, lltype.Ptr(WSAData)], rffi.INT) - WSAGetLastError = external('WSAGetLastError', [], rffi.INT, releasegil=False) - geterrno = WSAGetLastError + _WSAGetLastError = external('WSAGetLastError', [], rffi.INT, + _nowrapper=True, sandboxsafe=True) + + geterrno = rwin32.GetLastError_saved # In tests, the first call to GetLastError is always wrong, because error # is hidden by operations in ll2ctypes. Call it now. - WSAGetLastError() - - from rpython.rlib import rwin32 + _WSAGetLastError() def socket_strerror_str(errno): return rwin32.FormatError(errno) diff --git a/rpython/rlib/rposix.py b/rpython/rlib/rposix.py --- a/rpython/rlib/rposix.py +++ b/rpython/rlib/rposix.py @@ -126,12 +126,17 @@ @specialize.call_location() def _errno_after(save_err): - if WIN32 and (save_err & rffi.RFFI_SAVE_LASTERROR): - from rpython.rlib import rthread, rwin32 - err = rwin32._GetLastError() - # careful, setraw() overwrites GetLastError. - # We must read it first, before the errno handling. - rthread.tlfield_rpy_lasterror.setraw(err) + if WIN32: + if save_err & rffi.RFFI_SAVE_LASTERROR: + from rpython.rlib import rthread, rwin32 + err = rwin32._GetLastError() + # careful, setraw() overwrites GetLastError. + # We must read it first, before the errno handling. + rthread.tlfield_rpy_lasterror.setraw(err) + elif save_err & rffi.RFFI_SAVE_WSALASTERROR: + from rpython.rlib import rthread, _rsocket_rffi + err = _rsocket_rffi._WSAGetLastError() + rthread.tlfield_rpy_lasterror.setraw(err) if save_err & rffi.RFFI_SAVE_ERRNO: from rpython.rlib import rthread rthread.tlfield_rpy_errno.setraw(_get_errno()) diff --git a/rpython/rtyper/lltypesystem/rffi.py b/rpython/rtyper/lltypesystem/rffi.py --- a/rpython/rtyper/lltypesystem/rffi.py +++ b/rpython/rtyper/lltypesystem/rffi.py @@ -66,6 +66,7 @@ RFFI_FULL_ERRNO_ZERO = RFFI_SAVE_ERRNO | RFFI_ZERO_ERRNO_BEFORE RFFI_SAVE_LASTERROR = 8 # win32: save GetLastError() after the call RFFI_READSAVED_LASTERROR = 16 # win32: call SetLastError() before the call +RFFI_SAVE_WSALASTERROR = 32 # win32: save WSAGetLastError() after the call RFFI_FULL_LASTERROR = RFFI_SAVE_LASTERROR | RFFI_READSAVED_LASTERROR RFFI_ERR_NONE = 0 RFFI_ERR_ALL = RFFI_FULL_ERRNO | RFFI_FULL_LASTERROR From noreply at buildbot.pypy.org Sun Jan 18 23:58:45 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 18 Jan 2015 23:58:45 +0100 (CET) Subject: [pypy-commit] pypy errno-again: translation fix Message-ID: <20150118225845.521591C03ED@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: errno-again Changeset: r75431:73ecbbb25dba Date: 2015-01-18 23:58 +0100 http://bitbucket.org/pypy/pypy/changeset/73ecbbb25dba/ Log: translation fix diff --git a/rpython/rlib/_rsocket_rffi.py b/rpython/rlib/_rsocket_rffi.py --- a/rpython/rlib/_rsocket_rffi.py +++ b/rpython/rlib/_rsocket_rffi.py @@ -660,7 +660,7 @@ WSAStartup = external('WSAStartup', [rwin32.WORD, lltype.Ptr(WSAData)], rffi.INT) - _WSAGetLastError = external('WSAGetLastError', [], rffi.INT, + _WSAGetLastError = external('WSAGetLastError', [], rwin32.DWORD, _nowrapper=True, sandboxsafe=True) geterrno = rwin32.GetLastError_saved From noreply at buildbot.pypy.org Mon Jan 19 10:50:14 2015 From: noreply at buildbot.pypy.org (Raemi) Date: Mon, 19 Jan 2015 10:50:14 +0100 (CET) Subject: [pypy-commit] stmgc c8-private-pages: sweep small objs too in major gc Message-ID: <20150119095014.F1EF81C0041@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c8-private-pages Changeset: r1541:6520222bf6c8 Date: 2015-01-19 10:27 +0100 http://bitbucket.org/pypy/stmgc/changeset/6520222bf6c8/ Log: sweep small objs too in major gc diff --git a/c8/stm/gcpage.c b/c8/stm/gcpage.c --- a/c8/stm/gcpage.c +++ b/c8/stm/gcpage.c @@ -410,6 +410,34 @@ _stm_largemalloc_sweep(); } +static inline bool smallmalloc_keep_object_at(char *data) +{ + /* XXX: identical to largemalloc_keep_object_at()? */ + /* this is called by _stm_smallmalloc_sweep() */ + object_t *obj = (object_t *)(data - stm_object_pages); + dprintf(("keep small obj %p ? -> %d\n", obj, mark_visited_test(obj))); + if (!mark_visited_test_and_clear(obj)) { + /* This is actually needed in order to avoid random write-read + conflicts with objects read and freed long in the past. + It is probably rare enough, but still, we want to avoid any + false conflict. (test_random hits it sometimes) */ + long i; + for (i = 1; i < NB_SEGMENTS; i++) { + /* reset read marker */ + *((char *)(get_segment_base(i) + (((uintptr_t)obj) >> 4))) = 0; + } + return false; + } + return true; +} + +static void sweep_small_objects(void) +{ + _stm_smallmalloc_sweep(); +} + + + static void major_collection_now_at_safe_point(void) { dprintf(("\n")); @@ -438,7 +466,7 @@ /* /\* sweeping *\/ */ sweep_large_objects(); - /* //sweep_uniform_pages(); */ + sweep_small_objects(); dprintf((" | used after collection: %ld\n", (long)pages_ctl.total_allocated)); diff --git a/c8/stm/gcpage.h b/c8/stm/gcpage.h --- a/c8/stm/gcpage.h +++ b/c8/stm/gcpage.h @@ -21,3 +21,4 @@ static void major_collection_if_requested(void); static void major_collection_now_at_safe_point(void); static bool largemalloc_keep_object_at(char *data); /* for largemalloc.c */ +static bool smallmalloc_keep_object_at(char *data); /* for smallmalloc.c */ diff --git a/c8/stm/misc.c b/c8/stm/misc.c --- a/c8/stm/misc.c +++ b/c8/stm/misc.c @@ -104,4 +104,13 @@ { return increment_total_allocated(0); } + + +void _stm_smallmalloc_sweep_test() +{ + acquire_all_privatization_locks(); + _stm_smallmalloc_sweep(); + release_all_privatization_locks(); +} + #endif diff --git a/c8/stm/smallmalloc.c b/c8/stm/smallmalloc.c --- a/c8/stm/smallmalloc.c +++ b/c8/stm/smallmalloc.c @@ -22,7 +22,9 @@ static fpsz_t *get_fpsz(char *smallpage) { uintptr_t pagenum = (((char *)smallpage) - END_NURSERY_PAGE * 4096UL - stm_object_pages) / 4096; - assert(PAGE_SMSIZE_START <= pagenum && pagenum < PAGE_SMSIZE_END); + /* <= PAGE_SMSIZE_END because we may ask for it when there is no + page with smallobjs yet and uninit_page_stop == NB_PAGES... */ + assert(PAGE_SMSIZE_START <= pagenum && pagenum <= PAGE_SMSIZE_END); return &full_pages_object_size[pagenum - PAGE_SMSIZE_START]; } @@ -162,6 +164,8 @@ struct small_free_loc_s *result = *fl; + increment_total_allocated(size); + if (UNLIKELY(result == NULL)) return (stm_char*) (_allocate_small_slowpath(size) - stm_object_pages); @@ -201,8 +205,7 @@ return _stm_smallmalloc_keep((char*)(p - stm_object_pages)); } #endif - return true; - //return smallmalloc_keep_object_at(p); + return smallmalloc_keep_object_at(p); } void check_order_inside_small_page(struct small_free_loc_s *page) @@ -248,6 +251,7 @@ } else if (!_smallmalloc_sweep_keep(p)) { /* the location should be freed now */ + increment_total_allocated(-szword*8); #ifdef STM_TESTS /* fill location with 0xdd in all segs except seg0 */ int j; @@ -301,7 +305,6 @@ void _stm_smallmalloc_sweep(void) { - acquire_all_privatization_locks(); /* should be done outside, but tests... */ long i, szword; for (szword = 2; szword < GC_N_SMALL_REQUESTS; szword++) { struct small_free_loc_s *page = small_page_lists[szword]; @@ -350,5 +353,4 @@ sweep_small_page(pageptr, NULL, sz); } } - release_all_privatization_locks(); } diff --git a/c8/stmgc.h b/c8/stmgc.h --- a/c8/stmgc.h +++ b/c8/stmgc.h @@ -96,7 +96,7 @@ char *stm_file_pages; object_t *_stm_allocate_old_small(ssize_t size_rounded_up); bool (*_stm_smallmalloc_keep)(char *data); -void _stm_smallmalloc_sweep(void); +void _stm_smallmalloc_sweep_test(void); void _stm_start_safe_point(void); void _stm_stop_safe_point(void); diff --git a/c8/test/support.py b/c8/test/support.py --- a/c8/test/support.py +++ b/c8/test/support.py @@ -116,7 +116,7 @@ object_t *_stm_allocate_old_small(ssize_t size_rounded_up); bool (*_stm_smallmalloc_keep)(char *data); -void _stm_smallmalloc_sweep(void); +void _stm_smallmalloc_sweep_test(void); """) @@ -291,7 +291,6 @@ } } - ''', sources=source_files, define_macros=[('STM_TESTS', '1'), ('STM_NO_AUTOMATIC_SETJMP', '1'), diff --git a/c8/test/test_gcpage.py b/c8/test/test_gcpage.py --- a/c8/test/test_gcpage.py +++ b/c8/test/test_gcpage.py @@ -331,7 +331,8 @@ def test_small_major_collection(self): self.start_transaction() - new = stm_allocate(16) + new = stm_allocate(16) # small + assert lib._stm_total_allocated() == 0 self.push_root(new) stm_minor_collect() assert lib._stm_total_allocated() == 16 diff --git a/c8/test/test_smallmalloc.py b/c8/test/test_smallmalloc.py --- a/c8/test/test_smallmalloc.py +++ b/c8/test/test_smallmalloc.py @@ -40,10 +40,13 @@ assert p == seen[0] seen.pop(0) + def test_sweep_trivial(self): + lib._stm_smallmalloc_sweep_test() + def test_sweep_freeing_simple(self): p1 = stm_allocate_old_small(16) self.has_been_asked_for = [] - lib._stm_smallmalloc_sweep() + lib._stm_smallmalloc_sweep_test() assert p1 in self.has_been_asked_for def test_sweep_freeing_random_subset(self): @@ -59,7 +62,7 @@ # keep half of them around self.keep_me = set(random.sample(page0, len(page0) // 2)) self.has_been_asked_for = [] - lib._stm_smallmalloc_sweep() + lib._stm_smallmalloc_sweep_test() print len(page0), len(self.has_been_asked_for) assert sorted(page0) == self.has_been_asked_for, "all objs were observed" @@ -84,6 +87,6 @@ page0 = [stm_allocate_old_small(16) for i in range(0, 4096, 16)] tid = lib._get_type_id(page0[0]) self.keep_me = set(page0) - lib._stm_smallmalloc_sweep() + lib._stm_smallmalloc_sweep_test() for p in page0: assert lib._get_type_id(p) == tid From noreply at buildbot.pypy.org Mon Jan 19 10:50:16 2015 From: noreply at buildbot.pypy.org (Raemi) Date: Mon, 19 Jan 2015 10:50:16 +0100 (CET) Subject: [pypy-commit] stmgc c8-private-pages: increase confidence with a test Message-ID: <20150119095016.3FA581C0041@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c8-private-pages Changeset: r1542:f1bfa3441475 Date: 2015-01-19 10:51 +0100 http://bitbucket.org/pypy/stmgc/changeset/f1bfa3441475/ Log: increase confidence with a test diff --git a/c8/test/support.py b/c8/test/support.py --- a/c8/test/support.py +++ b/c8/test/support.py @@ -121,7 +121,8 @@ """) -GC_N_SMALL_REQUESTS = 36 # from gcpage.c +GC_N_SMALL_REQUESTS = 36 # from smallmalloc.h +GC_LAST_SMALL_SIZE = (8 * (GC_N_SMALL_REQUESTS - 1)) LARGE_MALLOC_OVERHEAD = 16 # from largemalloc.h lib = ffi.verify(r''' diff --git a/c8/test/test_gcpage.py b/c8/test/test_gcpage.py --- a/c8/test/test_gcpage.py +++ b/c8/test/test_gcpage.py @@ -344,3 +344,51 @@ stm_major_collect() assert lib._stm_total_allocated() == 0 + + def test_mixed_major_collections(self): + import random + obj_sizes = [16, 48, 1024, 1000*8] + + self.start_transaction() + random.seed(123) + + # allocate objs: + allocated = 0 + NOBJS = 100 + for _ in range(NOBJS): + osize = random.choice(obj_sizes) + is_small = osize <= GC_LAST_SMALL_SIZE + if is_small: + allocated += osize + else: + allocated += osize + LMO + + o = stm_allocate(osize) + self.push_root(o) + + # sometimes do a minor collection: + if random.random() > 0.95: + stm_minor_collect() + assert lib._stm_total_allocated() == allocated + + stm_minor_collect() + assert lib._stm_total_allocated() == allocated + # -> all objs old + + objs = set() + for _ in range(NOBJS): + objs.add(self.pop_root()) + + # do major collections while always saving less + # and less objs + while objs: + objs = random.sample(objs, len(objs) // 2) + for o in objs: + self.push_root(o) + stm_major_collect() + for o in objs: + self.pop_root() + + assert lib._stm_total_allocated() == 0 + + self.commit_transaction() From noreply at buildbot.pypy.org Mon Jan 19 11:06:58 2015 From: noreply at buildbot.pypy.org (Raemi) Date: Mon, 19 Jan 2015 11:06:58 +0100 (CET) Subject: [pypy-commit] stmgc c8-private-pages: try fixing demo_random2 (not sure if the seg0 requirement for allocate_old() is a good solution..) Message-ID: <20150119100658.043FC1C03DF@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c8-private-pages Changeset: r1543:c24436ef205c Date: 2015-01-19 11:04 +0100 http://bitbucket.org/pypy/stmgc/changeset/c24436ef205c/ Log: try fixing demo_random2 (not sure if the seg0 requirement for allocate_old() is a good solution..) diff --git a/c8/demo/demo_random2.c b/c8/demo/demo_random2.c --- a/c8/demo/demo_random2.c +++ b/c8/demo/demo_random2.c @@ -463,7 +463,7 @@ .next = NULL }; - stm_start_inevitable_transaction(&stm_thread_local); + //stm_start_inevitable_transaction(&stm_thread_local); for (i = 0; i < PREBUILT_ROOTS; i++) { void* new_templ = malloc(sizeof(struct node_s)); memcpy(new_templ, &prebuilt_template, sizeof(struct node_s)); @@ -476,7 +476,7 @@ ((nodeptr_t)prebuilt_roots[i])->my_hash = hash; } } - stm_commit_transaction(); + //stm_commit_transaction(); } int main(void) @@ -495,10 +495,11 @@ stm_setup(); + setup_globals(); + stm_register_thread_local(&stm_thread_local); stm_rewind_jmp_enterframe(&stm_thread_local, &rjbuf); - setup_globals(); int thread_starts = NUMTHREADS * THREAD_STARTS; for (i = 0; i < NUMTHREADS; i++) { diff --git a/c8/stm/setup.c b/c8/stm/setup.c --- a/c8/stm/setup.c +++ b/c8/stm/setup.c @@ -118,6 +118,8 @@ setup_nursery(); setup_gcpage(); setup_pages(); + + set_gs_register(get_segment_base(0)); } void stm_teardown(void) From noreply at buildbot.pypy.org Mon Jan 19 11:06:59 2015 From: noreply at buildbot.pypy.org (Raemi) Date: Mon, 19 Jan 2015 11:06:59 +0100 (CET) Subject: [pypy-commit] stmgc c8-private-pages: fix for trying to free -1 Message-ID: <20150119100659.174181C03DF@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c8-private-pages Changeset: r1544:a9ebb62368a6 Date: 2015-01-19 11:07 +0100 http://bitbucket.org/pypy/stmgc/changeset/a9ebb62368a6/ Log: fix for trying to free -1 diff --git a/c8/stm/core.c b/c8/stm/core.c --- a/c8/stm/core.c +++ b/c8/stm/core.c @@ -414,7 +414,8 @@ while (1) { if (!_stm_validate()) { - free(new); + if (new != INEV_RUNNING) + free(new); stm_abort_transaction(); } From noreply at buildbot.pypy.org Mon Jan 19 11:59:18 2015 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 19 Jan 2015 11:59:18 +0100 (CET) Subject: [pypy-commit] pypy errno-again: hg merge default Message-ID: <20150119105918.05A711C00F7@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: errno-again Changeset: r75432:d1497aa9524d Date: 2015-01-19 11:59 +0100 http://bitbucket.org/pypy/pypy/changeset/d1497aa9524d/ Log: hg merge default diff --git a/lib-python/2.7/collections.py b/lib-python/2.7/collections.py --- a/lib-python/2.7/collections.py +++ b/lib-python/2.7/collections.py @@ -17,6 +17,10 @@ except ImportError: assert '__pypy__' not in _sys.builtin_module_names newdict = lambda _ : {} +try: + from __pypy__ import reversed_dict +except ImportError: + reversed_dict = lambda d: reversed(d.keys()) try: from thread import get_ident as _get_ident @@ -29,142 +33,35 @@ ################################################################################ class OrderedDict(dict): - 'Dictionary that remembers insertion order' - # An inherited dict maps keys to values. - # The inherited dict provides __getitem__, __len__, __contains__, and get. - # The remaining methods are order-aware. - # Big-O running times for all methods are the same as regular dictionaries. + '''Dictionary that remembers insertion order. - # The internal self.__map dict maps keys to links in a doubly linked list. - # The circular doubly linked list starts and ends with a sentinel element. - # The sentinel element never gets deleted (this simplifies the algorithm). - # Each link is stored as a list of length three: [PREV, NEXT, KEY]. + In PyPy all dicts are ordered anyway. This is mostly useful as a + placeholder to mean "this dict must be ordered even on CPython". - def __init__(self, *args, **kwds): - '''Initialize an ordered dictionary. The signature is the same as - regular dictionaries, but keyword arguments are not recommended because - their insertion order is arbitrary. - - ''' - if len(args) > 1: - raise TypeError('expected at most 1 arguments, got %d' % len(args)) - try: - self.__root - except AttributeError: - self.__root = root = [] # sentinel node - root[:] = [root, root, None] - self.__map = {} - self.__update(*args, **kwds) - - def __setitem__(self, key, value, dict_setitem=dict.__setitem__): - 'od.__setitem__(i, y) <==> od[i]=y' - # Setting a new item creates a new link at the end of the linked list, - # and the inherited dictionary is updated with the new key/value pair. - if key not in self: - root = self.__root - last = root[0] - last[1] = root[0] = self.__map[key] = [last, root, key] - return dict_setitem(self, key, value) - - def __delitem__(self, key, dict_delitem=dict.__delitem__): - 'od.__delitem__(y) <==> del od[y]' - # Deleting an existing item uses self.__map to find the link which gets - # removed by updating the links in the predecessor and successor nodes. - dict_delitem(self, key) - link_prev, link_next, _ = self.__map.pop(key) - link_prev[1] = link_next # update link_prev[NEXT] - link_next[0] = link_prev # update link_next[PREV] - - def __iter__(self): - 'od.__iter__() <==> iter(od)' - # Traverse the linked list in order. - root = self.__root - curr = root[1] # start at the first node - while curr is not root: - yield curr[2] # yield the curr[KEY] - curr = curr[1] # move to next node + Known difference: iterating over an OrderedDict which is being + concurrently modified raises RuntimeError in PyPy. In CPython + instead we get some behavior that appears reasonable in some + cases but is nonsensical in other cases. This is officially + forbidden by the CPython docs, so we forbid it explicitly for now. + ''' def __reversed__(self): - 'od.__reversed__() <==> reversed(od)' - # Traverse the linked list in reverse order. - root = self.__root - curr = root[0] # start at the last node - while curr is not root: - yield curr[2] # yield the curr[KEY] - curr = curr[0] # move to previous node - - def clear(self): - 'od.clear() -> None. Remove all items from od.' - root = self.__root - root[:] = [root, root, None] - self.__map.clear() - dict.clear(self) - - # -- the following methods do not depend on the internal structure -- - - def keys(self): - 'od.keys() -> list of keys in od' - return list(self) - - def values(self): - 'od.values() -> list of values in od' - return [self[key] for key in self] - - def items(self): - 'od.items() -> list of (key, value) pairs in od' - return [(key, self[key]) for key in self] - - def iterkeys(self): - 'od.iterkeys() -> an iterator over the keys in od' - return iter(self) - - def itervalues(self): - 'od.itervalues -> an iterator over the values in od' - for k in self: - yield self[k] - - def iteritems(self): - 'od.iteritems -> an iterator over the (key, value) pairs in od' - for k in self: - yield (k, self[k]) - - update = MutableMapping.update - - __update = update # let subclasses override update without breaking __init__ - - __marker = object() - - def pop(self, key, default=__marker): - '''od.pop(k[,d]) -> v, remove specified key and return the corresponding - value. If key is not found, d is returned if given, otherwise KeyError - is raised. - - ''' - if key in self: - result = self[key] - del self[key] - return result - if default is self.__marker: - raise KeyError(key) - return default - - def setdefault(self, key, default=None): - 'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od' - if key in self: - return self[key] - self[key] = default - return default + return reversed_dict(self) def popitem(self, last=True): '''od.popitem() -> (k, v), return and remove a (key, value) pair. Pairs are returned in LIFO order if last is true or FIFO order if false. ''' - if not self: - raise KeyError('dictionary is empty') - key = next(reversed(self) if last else iter(self)) - value = self.pop(key) - return key, value + if last: + return dict.popitem(self) + else: + it = dict.__iter__(self) + try: + k = it.next() + except StopIteration: + raise KeyError('dictionary is empty') + return (k, self.pop(k)) def __repr__(self, _repr_running={}): 'od.__repr__() <==> repr(od)' @@ -183,8 +80,6 @@ 'Return state information for pickling' items = [[k, self[k]] for k in self] inst_dict = vars(self).copy() - for k in vars(OrderedDict()): - inst_dict.pop(k, None) if inst_dict: return (self.__class__, (items,), inst_dict) return self.__class__, (items,) @@ -193,17 +88,6 @@ 'od.copy() -> a shallow copy of od' return self.__class__(self) - @classmethod - def fromkeys(cls, iterable, value=None): - '''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S. - If not specified, the value defaults to None. - - ''' - self = cls() - for key in iterable: - self[key] = value - return self - def __eq__(self, other): '''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive while comparison to a regular mapping is order-insensitive. diff --git a/lib-python/2.7/test/test_collections.py b/lib-python/2.7/test/test_collections.py --- a/lib-python/2.7/test/test_collections.py +++ b/lib-python/2.7/test/test_collections.py @@ -578,7 +578,12 @@ def __repr__(self): return "MySet(%s)" % repr(list(self)) s = MySet([5,43,2,1]) - self.assertEqual(s.pop(), 1) + # changed from CPython 2.7: it was "s.pop() == 1" but I see + # nothing that guarantees a particular order here. In the + # 'all_ordered_dicts' branch of PyPy (or with OrderedDict + # instead of sets), it consistently returns 5, but this test + # should not rely on this or any other order. + self.assert_(s.pop() in [5,43,2,1]) def test_issue8750(self): empty = WithSet() @@ -1010,8 +1015,9 @@ c=3, e=5).items()), pairs) # mixed input # make sure no positional args conflict with possible kwdargs - self.assertEqual(inspect.getargspec(OrderedDict.__dict__['__init__']).args, - ['self']) + if '__init__' in OrderedDict.__dict__: # absent in PyPy + self.assertEqual(inspect.getargspec(OrderedDict.__dict__['__init__']).args, + ['self']) # Make sure that direct calls to __init__ do not clear previous contents d = OrderedDict([('a', 1), ('b', 2), ('c', 3), ('d', 44), ('e', 55)]) diff --git a/pypy/doc/build.rst b/pypy/doc/build.rst --- a/pypy/doc/build.rst +++ b/pypy/doc/build.rst @@ -47,6 +47,11 @@ Install build-time dependencies ------------------------------- +(**Note**: for some hints on how to translate the Python interpreter under +Windows, see the `windows document`_) + +.. _`windows document`: windows.html + To build PyPy on Unix using the C translation backend, you need at least a C compiler and ``make`` installed. Further, some optional modules have additional diff --git a/pypy/doc/windows.rst b/pypy/doc/windows.rst --- a/pypy/doc/windows.rst +++ b/pypy/doc/windows.rst @@ -78,6 +78,7 @@ Then you need to execute:: + \vc\vcvars.bat editbin /largeaddressaware translator.exe where ``translator.exe`` is the pypy.exe or cpython.exe you will use to @@ -96,7 +97,7 @@ Abridged method (for -Ojit builds using Visual Studio 2008) -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +----------------------------------------------------------- Download the versions of all the external packages from https://bitbucket.org/pypy/pypy/downloads/local_2.4.zip @@ -110,7 +111,13 @@ set INCLUDE=\include;\tcltk\include;%INCLUDE% set LIB=\lib;\tcltk\lib;%LIB% -Now you should be good to go. Read on for more information. +Now you should be good to go. If you choose this method, you do not need +to download/build anything else. + +Nonabrided method (building from scratch) +----------------------------------------- + +If you want to, you can rebuild everything from scratch by continuing. The Boehm garbage collector diff --git a/pypy/interpreter/astcompiler/optimize.py b/pypy/interpreter/astcompiler/optimize.py --- a/pypy/interpreter/astcompiler/optimize.py +++ b/pypy/interpreter/astcompiler/optimize.py @@ -254,11 +254,15 @@ return rep def visit_Name(self, name): - # Turn loading None into a constant lookup. Eventaully, we can do this - # for True and False, too. + # Turn loading None into a constant lookup. We cannot do this + # for True and False, because rebinding them is allowed (2.7). if name.id == "None": - assert name.ctx == ast.Load - return ast.Const(self.space.w_None, name.lineno, name.col_offset) + # The compiler refuses to parse "None = ...", but "del None" + # is allowed (if pointless). Check anyway: custom asts that + # correspond to "None = ..." can be made by hand. + if name.ctx == ast.Load: + return ast.Const(self.space.w_None, name.lineno, + name.col_offset) return name def visit_Tuple(self, tup): diff --git a/pypy/interpreter/test/test_compiler.py b/pypy/interpreter/test/test_compiler.py --- a/pypy/interpreter/test/test_compiler.py +++ b/pypy/interpreter/test/test_compiler.py @@ -654,6 +654,18 @@ assert ex.match(space, space.w_SyntaxError) assert 'hello_world' in space.str_w(space.str(ex.get_w_value(space))) + def test_del_None(self): + snippet = '''if 1: + try: + del None + except NameError: + pass + ''' + code = self.compiler.compile(snippet, '', 'exec', 0) + space = self.space + w_d = space.newdict() + space.exec_(code, w_d, w_d) + class TestPythonAstCompiler_25_grammar(BaseTestCompiler): def setup_method(self, method): diff --git a/pypy/interpreter/test/test_targetpypy.py b/pypy/interpreter/test/test_targetpypy.py --- a/pypy/interpreter/test/test_targetpypy.py +++ b/pypy/interpreter/test/test_targetpypy.py @@ -27,6 +27,6 @@ pypy_setup_home = d['pypy_setup_home'] lls = rffi.str2charp(__file__) res = pypy_setup_home(lls, rffi.cast(rffi.INT, 1)) - assert lltype.typeOf(res) == rffi.LONG + assert lltype.typeOf(res) == rffi.INT assert rffi.cast(lltype.Signed, res) == 0 lltype.free(lls, flavor='raw') diff --git a/pypy/module/__pypy__/__init__.py b/pypy/module/__pypy__/__init__.py --- a/pypy/module/__pypy__/__init__.py +++ b/pypy/module/__pypy__/__init__.py @@ -78,6 +78,7 @@ 'newlist_hint' : 'interp_magic.newlist_hint', 'add_memory_pressure' : 'interp_magic.add_memory_pressure', 'newdict' : 'interp_dict.newdict', + 'reversed_dict' : 'interp_dict.reversed_dict', 'strategy' : 'interp_magic.strategy', # dict,set,list 'set_debug' : 'interp_magic.set_debug', 'locals_to_fast' : 'interp_magic.locals_to_fast', diff --git a/pypy/module/__pypy__/interp_dict.py b/pypy/module/__pypy__/interp_dict.py --- a/pypy/module/__pypy__/interp_dict.py +++ b/pypy/module/__pypy__/interp_dict.py @@ -30,3 +30,17 @@ return space.newdict(strdict=True) else: raise oefmt(space.w_TypeError, "unknown type of dict %s", type) + +def reversed_dict(space, w_obj): + """Enumerate the keys in a dictionary object in reversed order. + + This is a __pypy__ function instead of being simply done by calling + reversed(), for CPython compatibility: dictionaries are only ordered + on PyPy. You should use the collections.OrderedDict class for cases + where ordering is important. That class implements __reversed__ by + calling __pypy__.reversed_dict(). + """ + from pypy.objspace.std.dictmultiobject import W_DictMultiObject + if not isinstance(w_obj, W_DictMultiObject): + raise OperationError(space.w_TypeError, space.w_None) + return w_obj.nondescr_reversed_dict(space) diff --git a/pypy/module/_multibytecodec/test/test_translation.py b/pypy/module/_multibytecodec/test/test_translation.py --- a/pypy/module/_multibytecodec/test/test_translation.py +++ b/pypy/module/_multibytecodec/test/test_translation.py @@ -1,8 +1,11 @@ from pypy.module._multibytecodec import c_codecs from rpython.translator.c.test import test_standalone +from rpython.config.translationoption import get_combined_translation_config class TestTranslation(test_standalone.StandaloneTests): + config = get_combined_translation_config(translating=True) + config.translation.gc = 'boehm' def test_translation(self): # diff --git a/pypy/module/pypyjit/test_pypy_c/test_containers.py b/pypy/module/pypyjit/test_pypy_c/test_containers.py --- a/pypy/module/pypyjit/test_pypy_c/test_containers.py +++ b/pypy/module/pypyjit/test_pypy_c/test_containers.py @@ -43,9 +43,9 @@ # can't change ;) assert loop.match_by_id("getitem", """ ... - i26 = call(ConstClass(ll_dict_lookup), p18, p6, i25, descr=...) + i26 = call(ConstClass(ll_call_lookup_function), p18, p6, i25, 0, descr=...) ... - p33 = getinteriorfield_gc(p31, i26, descr=>) + p33 = getinteriorfield_gc(p31, i26, descr=>) ... """) @@ -68,25 +68,29 @@ guard_no_exception(descr=...) i12 = call(ConstClass(ll_strhash), p10, descr=) p13 = new(descr=...) - p15 = new_array_clear(8, descr=) - setfield_gc(p13, p15, descr=) - i17 = call(ConstClass(ll_dict_lookup_trampoline), p13, p10, i12, descr=) + p15 = new_array_clear(8, descr=) {{{ - setfield_gc(p13, 16, descr=) - setfield_gc(p13, 0, descr=) + setfield_gc(p13, 0, descr=) + setfield_gc(p13, p15, descr=) + setfield_gc(p13, ConstPtr(0), descr=) + }}} + i17 = call(ConstClass(ll_dict_lookup_trampoline), p13, p10, i12, 1, descr=) + {{{ + setfield_gc(p13, 0, descr=) + setfield_gc(p13, 0, descr=) + setfield_gc(p13, 16, descr=) }}} guard_no_exception(descr=...) p20 = new_with_vtable(ConstClass(W_IntObject)) call(ConstClass(_ll_dict_setitem_lookup_done_trampoline), p13, p10, p20, i12, i17, descr=) setfield_gc(p20, i5, descr=) guard_no_exception(descr=...) - i23 = call(ConstClass(ll_dict_lookup_trampoline), p13, p10, i12, descr=) + i23 = call(ConstClass(ll_call_lookup_function), p13, p10, i12, 0, descr=) guard_no_exception(descr=...) - i26 = int_and(i23, #) - i27 = int_is_true(i26) + i27 = int_lt(i23, 0) guard_false(i27, descr=...) p28 = getfield_gc(p13, descr=) - p29 = getinteriorfield_gc(p28, i23, descr=>) + p29 = getinteriorfield_gc(p28, i23, descr=>) guard_nonnull_class(p29, ConstClass(W_IntObject), descr=...) i31 = getfield_gc_pure(p29, descr=) i32 = int_sub_ovf(i31, i5) diff --git a/pypy/module/pypyjit/test_pypy_c/test_instance.py b/pypy/module/pypyjit/test_pypy_c/test_instance.py --- a/pypy/module/pypyjit/test_pypy_c/test_instance.py +++ b/pypy/module/pypyjit/test_pypy_c/test_instance.py @@ -151,15 +151,13 @@ assert loop.match_by_id('loadattr1', ''' guard_not_invalidated(descr=...) - i19 = call(ConstClass(ll_dict_lookup), _, _, _, descr=...) + i19 = call(ConstClass(ll_call_lookup_function), _, _, _, 0, descr=...) guard_no_exception(descr=...) - i21 = int_and(i19, _) - i22 = int_is_true(i21) + i22 = int_lt(i19, 0) guard_true(i22, descr=...) - i26 = call(ConstClass(ll_dict_lookup), _, _, _, descr=...) + i26 = call(ConstClass(ll_call_lookup_function), _, _, _, 0, descr=...) guard_no_exception(descr=...) - i28 = int_and(i26, _) - i29 = int_is_true(i28) + i29 = int_lt(i26, 0) guard_true(i29, descr=...) ''') assert loop.match_by_id('loadattr2', "") # completely folded away diff --git a/pypy/objspace/std/dictmultiobject.py b/pypy/objspace/std/dictmultiobject.py --- a/pypy/objspace/std/dictmultiobject.py +++ b/pypy/objspace/std/dictmultiobject.py @@ -258,6 +258,17 @@ """D.itervalues() -> an iterator over the values of D""" return W_DictMultiIterValuesObject(space, self.itervalues()) + def nondescr_reversed_dict(self, space): + """Not exposed directly to app-level, but via __pypy__.reversed_dict(). + """ + if self.strategy.has_iterreversed: + it = self.strategy.iterreversed(self) + return W_DictMultiIterKeysObject(space, it) + else: + # fall-back + w_keys = self.w_keys() + return space.call_method(w_keys, '__reversed__') + def descr_viewitems(self, space): """D.viewitems() -> a set-like object providing a view on D's items""" return W_DictViewItemsObject(space, self) @@ -503,6 +514,9 @@ def getiteritems(self, w_dict): raise NotImplementedError + has_iterreversed = False + # no 'getiterreversed': no default implementation available + def rev_update1_dict_dict(self, w_dict, w_updatedict): iteritems = self.iteritems(w_dict) while True: @@ -623,6 +637,9 @@ def getiteritems(self, w_dict): return iter([]) + def getiterreversed(self, w_dict): + return iter([]) + # Iterator Implementation base classes @@ -747,6 +764,17 @@ else: return None, None + class IterClassReversed(BaseKeyIterator): + def __init__(self, space, strategy, impl): + self.iterator = strategy.getiterreversed(impl) + BaseIteratorImplementation.__init__(self, space, strategy, impl) + + def next_key_entry(self): + for key in self.iterator: + return wrapkey(self.space, key) + else: + return None + def iterkeys(self, w_dict): return IterClassKeys(self.space, self, w_dict) @@ -756,6 +784,12 @@ def iteritems(self, w_dict): return IterClassItems(self.space, self, w_dict) + if hasattr(dictimpl, 'getiterreversed'): + def iterreversed(self, w_dict): + return IterClassReversed(self.space, self, w_dict) + dictimpl.iterreversed = iterreversed + dictimpl.has_iterreversed = True + @jit.look_inside_iff(lambda self, w_dict, w_updatedict: w_dict_unrolling_heuristic(w_dict)) def rev_update1_dict_dict(self, w_dict, w_updatedict): @@ -932,6 +966,9 @@ def getiteritems(self, w_dict): return self.unerase(w_dict.dstorage).iteritems() + def getiterreversed(self, w_dict): + return objectmodel.reversed_dict(self.unerase(w_dict.dstorage)) + def prepare_update(self, w_dict, num_extra): objectmodel.prepare_dict_update(self.unerase(w_dict.dstorage), num_extra) diff --git a/pypy/objspace/std/test/test_dictmultiobject.py b/pypy/objspace/std/test/test_dictmultiobject.py --- a/pypy/objspace/std/test/test_dictmultiobject.py +++ b/pypy/objspace/std/test/test_dictmultiobject.py @@ -254,6 +254,21 @@ values.append(k) assert values == d.values() + def test_reversed_dict(self): + import __pypy__ + for d in [{}, {1: 2, 3: 4, 5: 6}, {"a": 5, "b": 2, "c": 6}]: + assert list(__pypy__.reversed_dict(d)) == d.keys()[::-1] + raises(TypeError, __pypy__.reversed_dict, 42) + + def test_reversed_dict_runtimeerror(self): + import __pypy__ + d = {1: 2, 3: 4, 5: 6} + it = __pypy__.reversed_dict(d) + key = it.next() + assert key in [1, 3, 5] + del d[key] + raises(RuntimeError, it.next) + def test_keys(self): d = {1: 2, 3: 4} kys = d.keys() diff --git a/pypy/objspace/std/test/test_mapdict.py b/pypy/objspace/std/test/test_mapdict.py --- a/pypy/objspace/std/test/test_mapdict.py +++ b/pypy/objspace/std/test/test_mapdict.py @@ -700,6 +700,15 @@ del a.x raises(AttributeError, "a.x") + def test_reversed_dict(self): + import __pypy__ + class X(object): + pass + x = X(); x.a = 10; x.b = 20; x.c = 30 + d = x.__dict__ + assert list(__pypy__.reversed_dict(d)) == d.keys()[::-1] + + class AppTestWithMapDictAndCounters(object): spaceconfig = {"objspace.std.withmapdict": True, "objspace.std.withmethodcachecounter": True} diff --git a/rpython/annotator/model.py b/rpython/annotator/model.py --- a/rpython/annotator/model.py +++ b/rpython/annotator/model.py @@ -392,6 +392,8 @@ assert isinstance(dct2, SomeOrderedDict), "OrderedDict.update(dict) not allowed" dct1.dictdef.union(dct2.dictdef) +SomeDict = SomeOrderedDict # all dicts are ordered! + class SomeIterator(SomeObject): "Stands for an iterator returning objects from a given container." diff --git a/rpython/annotator/test/test_annrpython.py b/rpython/annotator/test/test_annrpython.py --- a/rpython/annotator/test/test_annrpython.py +++ b/rpython/annotator/test/test_annrpython.py @@ -1877,7 +1877,7 @@ return None a = self.RPythonAnnotator() s = a.build_types(f, [int]) - assert s.knowntype == dict + assert s.knowntype == annmodel.SomeOrderedDict.knowntype def test_const_list_and_none(self): def g(l=None): diff --git a/rpython/jit/backend/arm/test/test_regalloc_mov.py b/rpython/jit/backend/arm/test/test_regalloc_mov.py --- a/rpython/jit/backend/arm/test/test_regalloc_mov.py +++ b/rpython/jit/backend/arm/test/test_regalloc_mov.py @@ -503,7 +503,6 @@ def test_unsupported(self): py.test.raises(AssertionError, 'self.asm.regalloc_mov(raw_stack(0), imm(1))') py.test.raises(AssertionError, 'self.asm.regalloc_mov(raw_stack(0), imm_float(1))') - py.test.raises(AssertionError, 'self.asm.regalloc_mov(raw_stack(0), r(1))') py.test.raises(AssertionError, 'self.asm.regalloc_mov(raw_stack(0), vfp(1))') py.test.raises(AssertionError, 'self.asm.regalloc_mov(raw_stack(0), stack(1))') py.test.raises(AssertionError, 'self.asm.regalloc_mov(raw_stack(0), stack_float(1))') diff --git a/rpython/jit/metainterp/optimizeopt/heap.py b/rpython/jit/metainterp/optimizeopt/heap.py --- a/rpython/jit/metainterp/optimizeopt/heap.py +++ b/rpython/jit/metainterp/optimizeopt/heap.py @@ -2,11 +2,12 @@ from rpython.jit.codewriter.effectinfo import EffectInfo from rpython.jit.metainterp.optimizeopt.util import args_dict -from rpython.jit.metainterp.history import Const +from rpython.jit.metainterp.history import Const, ConstInt from rpython.jit.metainterp.jitexc import JitException from rpython.jit.metainterp.optimizeopt.optimizer import Optimization,\ MODE_ARRAY, LEVEL_KNOWNCLASS, REMOVED from rpython.jit.metainterp.optimizeopt.util import make_dispatcher_method +from rpython.jit.metainterp.optimizeopt.intutils import IntBound from rpython.jit.metainterp.optimize import InvalidLoop from rpython.jit.metainterp.resoperation import rop, ResOperation from rpython.rlib.objectmodel import we_are_translated @@ -325,6 +326,29 @@ self.emit_operation(op) def _optimize_CALL_DICT_LOOKUP(self, op): + # Cache consecutive lookup() calls on the same dict and key, + # depending on the 'flag_store' argument passed: + # FLAG_LOOKUP: always cache and use the cached result. + # FLAG_STORE: don't cache (it might return -1, which would be + # incorrect for future lookups); but if found in + # the cache and the cached value was already checked + # non-negative, then we can reuse it. + # FLAG_DELETE: never cache, never use the cached result (because + # if there is a cached result, the FLAG_DELETE call + # is needed for its side-effect of removing it). + # In theory we could cache a -1 for the case where + # the delete is immediately followed by a lookup, + # but too obscure. + # + from rpython.rtyper.lltypesystem.rordereddict import FLAG_LOOKUP + from rpython.rtyper.lltypesystem.rordereddict import FLAG_STORE + flag_value = self.getvalue(op.getarg(4)) + if not flag_value.is_constant(): + return False + flag = flag_value.get_constant_int() + if flag != FLAG_LOOKUP and flag != FLAG_STORE: + return False + # descrs = op.getdescr().get_extra_info().extradescrs assert descrs # translation hint descr1 = descrs[0] @@ -333,13 +357,20 @@ except KeyError: d = self.cached_dict_reads[descr1] = args_dict() self.corresponding_array_descrs[descrs[1]] = descr1 - args = self.optimizer.make_args_key(op) + # + key = [self.optimizer.get_box_replacement(op.getarg(1)), # dict + self.optimizer.get_box_replacement(op.getarg(2))] # key + # other args can be ignored here (hash, store_flag) try: - res_v = d[args] + res_v = d[key] except KeyError: - d[args] = self.getvalue(op.result) + if flag == FLAG_LOOKUP: + d[key] = self.getvalue(op.result) return False else: + if flag != FLAG_LOOKUP: + if not res_v.getintbound().known_ge(IntBound(0, 0)): + return False self.make_equal_to(op.result, res_v) self.last_emitted_operation = REMOVED return True diff --git a/rpython/jit/metainterp/optimizeopt/optimizer.py b/rpython/jit/metainterp/optimizeopt/optimizer.py --- a/rpython/jit/metainterp/optimizeopt/optimizer.py +++ b/rpython/jit/metainterp/optimizeopt/optimizer.py @@ -148,6 +148,12 @@ if self.getlevel() < LEVEL_NONNULL: self.setlevel(LEVEL_NONNULL) + def get_constant_int(self): + assert self.is_constant() + box = self.box + assert isinstance(box, ConstInt) + return box.getint() + def is_virtual(self): # Don't check this with 'isinstance(_, VirtualValue)'! # Even if it is a VirtualValue, the 'box' can be non-None, diff --git a/rpython/jit/metainterp/test/test_dict.py b/rpython/jit/metainterp/test/test_dict.py --- a/rpython/jit/metainterp/test/test_dict.py +++ b/rpython/jit/metainterp/test/test_dict.py @@ -181,15 +181,21 @@ n = d[y] return d[Wrapper(str(n + 1))] + # XXX unsure I see the point of this test: the repeated + # dict lookup is *not* elided so far, and the test happens to + # check this... with rdict.py, it's a write followed by a read, + # where the dict cache is thrown away after the first lookup + # (correctly: we don't want the two lookups to return the exact + # same result!). With rordereddict.py, FLAG_STORE lookups are + # not cached anyway. res = self.meta_interp(f, [100], listops=True) assert res == f(50) self.check_resops({'new_array_clear': 2, 'getfield_gc': 2, - 'guard_true': 2, 'jump': 1, + 'guard_true': 4, 'jump': 1, 'new_with_vtable': 2, 'getinteriorfield_gc': 2, - 'setfield_gc': 8, 'int_gt': 2, 'int_sub': 2, - 'call': 10, 'int_and': 2, - 'guard_no_exception': 8, 'new': 2, - 'guard_false': 2, 'int_is_true': 2}) + 'setfield_gc': 14, 'int_gt': 2, 'int_sub': 2, + 'call': 10, 'int_ge': 2, + 'guard_no_exception': 8, 'new': 2}) def test_unrolling_of_dict_iter(self): driver = JitDriver(greens = [], reds = ['n']) @@ -223,7 +229,7 @@ return s self.meta_interp(f, [10]) - # XXX should be one getinteriorfield_gc + # XXX should be one getinteriorfield_gc. At least it's one call. self.check_simple_loop(call=1, getinteriorfield_gc=2, guard_no_exception=1) @@ -244,7 +250,7 @@ return s self.meta_interp(f, [10]) - # XXX should be one getinteriorfield_gc + # XXX should be one getinteriorfield_gc. At least it's one call. self.check_simple_loop(call=1, getinteriorfield_gc=2, guard_no_exception=1) @@ -259,7 +265,7 @@ driver.jit_merge_point() index = indexes[n & 1] s += d[index] - d['aa'] += 1 # this will invalidate the index + d['aa'] = 13 # this will invalidate the index s += d[index] n -= 1 return s @@ -355,7 +361,7 @@ if n in mdict: raise Exception self.meta_interp(f, [10]) - self.check_simple_loop(call_may_force=0, call=3) + self.check_simple_loop(call_may_force=0, call=4) def test_dict_virtual(self): myjitdriver = JitDriver(greens = [], reds = 'auto') diff --git a/rpython/memory/gctypelayout.py b/rpython/memory/gctypelayout.py --- a/rpython/memory/gctypelayout.py +++ b/rpython/memory/gctypelayout.py @@ -406,6 +406,11 @@ return self.iseen_roots[value] = True + if isinstance(TYPE, lltype.GcOpaqueType): + self.consider_constant(lltype.typeOf(value.container), + value.container, gc) + return + if isinstance(TYPE, (lltype.GcStruct, lltype.GcArray)): typeid = self.get_type_id(TYPE) hdr = gc.gcheaderbuilder.new_header(value) diff --git a/rpython/rlib/objectmodel.py b/rpython/rlib/objectmodel.py --- a/rpython/rlib/objectmodel.py +++ b/rpython/rlib/objectmodel.py @@ -753,6 +753,17 @@ dict._prepare_dict_update(n_elements) # ^^ call an extra method that doesn't exist before translation + at specialize.call_location() +def reversed_dict(d): + """Equivalent to reversed(ordered_dict), but works also for + regular dicts.""" + # note that there is also __pypy__.reversed_dict(), which we could + # try to use here if we're not translated and running on top of pypy, + # but that seems a bit pointless + if not we_are_translated(): + d = d.keys() + return reversed(d) + # ____________________________________________________________ diff --git a/rpython/rlib/rtermios.py b/rpython/rlib/rtermios.py --- a/rpython/rlib/rtermios.py +++ b/rpython/rlib/rtermios.py @@ -3,6 +3,7 @@ # returns list of mostly-strings of length one, but with few ints # inside, so we make sure it works +import sys from rpython.rtyper.lltypesystem import rffi, lltype from rpython.rtyper.tool import rffi_platform from rpython.translator.tool.cbuild import ExternalCompilationInfo @@ -97,6 +98,10 @@ for name in CONSTANT_NAMES: value = c_config[name] if value is not None: + if value < -sys.maxsize-1 or value >= 2 * (sys.maxsize+1): + raise AssertionError("termios: %r has value %r, too large" % ( + name, value)) + value = intmask(value) # wrap unsigned long numbers to signed longs globals()[name] = value all_constants[name] = value diff --git a/rpython/rlib/test/test_objectmodel.py b/rpython/rlib/test/test_objectmodel.py --- a/rpython/rlib/test/test_objectmodel.py +++ b/rpython/rlib/test/test_objectmodel.py @@ -341,6 +341,21 @@ res = self.interpret(g, [3]) assert res == 42 # "did not crash" + def test_reversed_dict(self): + d1 = {2:3, 4:5, 6:7} + def g(): + n1 = 0 + for key in d1: + n1 = n1 * 10 + key + n2 = 0 + for key in reversed_dict(d1): + n2 = n2 * 10 + key + return n1 * 10000 + n2 + got = str(g()) + assert len(got) == 7 and got[3] == '0' and got[:3] == got[6:3:-1] + got = str(self.interpret(g, [])) + assert len(got) == 7 and got[3] == '0' and got[:3] == got[6:3:-1] + def test_compute_hash(self): class Foo(object): pass diff --git a/rpython/rtyper/llinterp.py b/rpython/rtyper/llinterp.py --- a/rpython/rtyper/llinterp.py +++ b/rpython/rtyper/llinterp.py @@ -740,6 +740,10 @@ return lltype.cast_opaque_ptr(RESTYPE, obj) op_cast_opaque_ptr.need_result_type = True + def op_length_of_simple_gcarray_from_opaque(self, obj): + checkptr(obj) + return lltype.length_of_simple_gcarray_from_opaque(obj) + def op_cast_ptr_to_adr(self, ptr): checkptr(ptr) return llmemory.cast_ptr_to_adr(ptr) diff --git a/rpython/rtyper/lltypesystem/lloperation.py b/rpython/rtyper/lltypesystem/lloperation.py --- a/rpython/rtyper/lltypesystem/lloperation.py +++ b/rpython/rtyper/lltypesystem/lloperation.py @@ -396,6 +396,7 @@ 'direct_arrayitems': LLOp(canfold=True), 'direct_ptradd': LLOp(canfold=True), 'cast_opaque_ptr': LLOp(sideeffects=False), + 'length_of_simple_gcarray_from_opaque': LLOp(sideeffects=False), # __________ address operations __________ diff --git a/rpython/rtyper/lltypesystem/lltype.py b/rpython/rtyper/lltypesystem/lltype.py --- a/rpython/rtyper/lltypesystem/lltype.py +++ b/rpython/rtyper/lltypesystem/lltype.py @@ -1025,6 +1025,30 @@ return SomePtr(ll_ptrtype=typeOf(cast_p)) +def length_of_simple_gcarray_from_opaque(opaque_ptr): + CURTYPE = typeOf(opaque_ptr) + if not isinstance(CURTYPE, Ptr): + raise TypeError("can only cast pointers to other pointers") + if not isinstance(CURTYPE.TO, GcOpaqueType): + raise TypeError("expected a GcOpaqueType") + try: + c = opaque_ptr._obj.container + except AttributeError: + # if 'opaque_ptr' is already some _llgcopaque, hack its length + # by casting it to a random GcArray type and hoping + from rpython.rtyper.lltypesystem import rffi + p = rffi.cast(Ptr(GcArray(Signed)), opaque_ptr) + return len(p) + else: + return c.getlength() + + at analyzer_for(length_of_simple_gcarray_from_opaque) +def ann_length_of_simple_gcarray_from_opaque(s_p): + assert isinstance(s_p, SomePtr), "casting of non-pointer: %r" % s_p + assert isinstance(s_p.ll_ptrtype.TO, GcOpaqueType) + return SomeInteger(nonneg=True) + + def direct_fieldptr(structptr, fieldname): """Get a pointer to a field in the struct. The resulting pointer is actually of type Ptr(FixedSizeArray(FIELD, 1)). diff --git a/rpython/rtyper/lltypesystem/rordereddict.py b/rpython/rtyper/lltypesystem/rordereddict.py --- a/rpython/rtyper/lltypesystem/rordereddict.py +++ b/rpython/rtyper/lltypesystem/rordereddict.py @@ -34,16 +34,18 @@ # {byte, short, int, long} *indexes; # dictentry *entries; # lookup_function_no; # one of the four possible functions for different -# # size dicts +# # size dicts; the rest of the word is a counter for how +# # many 'entries' at the start are known to be deleted # (Function DICTKEY, DICTKEY -> bool) *fnkeyeq; # (Function DICTKEY -> int) *fnkeyhash; # } # # + at jit.look_inside_iff(lambda d, key, hash, flag: jit.isvirtual(d)) + at jit.oopspec('ordereddict.lookup(d, key, hash, flag)') def ll_call_lookup_function(d, key, hash, flag): - DICT = lltype.typeOf(d).TO - fun = d.lookup_function_no + fun = d.lookup_function_no & FUNC_MASK if fun == FUNC_BYTE: return ll_dict_lookup(d, key, hash, flag, TYPE_BYTE) elif fun == FUNC_SHORT: @@ -408,6 +410,8 @@ IS_64BIT = sys.maxint != 2 ** 31 - 1 +FUNC_SHIFT = 2 +FUNC_MASK = 0x03 # two bits if IS_64BIT: FUNC_BYTE, FUNC_SHORT, FUNC_INT, FUNC_LONG = range(4) else: @@ -441,28 +445,46 @@ d.lookup_function_no = FUNC_LONG def ll_clear_indexes(d, n): - if n <= 256: + fun = d.lookup_function_no & FUNC_MASK + d.lookup_function_no = fun + if fun == FUNC_BYTE: rgc.ll_arrayclear(lltype.cast_opaque_ptr(DICTINDEX_BYTE, d.indexes)) - elif n <= 65536: + elif fun == FUNC_SHORT: rgc.ll_arrayclear(lltype.cast_opaque_ptr(DICTINDEX_SHORT, d.indexes)) - elif IS_64BIT and n <= 2 ** 32: + elif IS_64BIT and fun == FUNC_INT: rgc.ll_arrayclear(lltype.cast_opaque_ptr(DICTINDEX_INT, d.indexes)) + elif fun == FUNC_LONG: + rgc.ll_arrayclear(lltype.cast_opaque_ptr(DICTINDEX_LONG, d.indexes)) else: - rgc.ll_arrayclear(lltype.cast_opaque_ptr(DICTINDEX_LONG, d.indexes)) + assert False + at jit.dont_look_inside def ll_call_insert_clean_function(d, hash, i): - DICT = lltype.typeOf(d).TO - if d.lookup_function_no == FUNC_BYTE: + fun = d.lookup_function_no & FUNC_MASK + if fun == FUNC_BYTE: ll_dict_store_clean(d, hash, i, TYPE_BYTE) - elif d.lookup_function_no == FUNC_SHORT: + elif fun == FUNC_SHORT: ll_dict_store_clean(d, hash, i, TYPE_SHORT) - elif IS_64BIT and d.lookup_function_no == FUNC_INT: + elif IS_64BIT and fun == FUNC_INT: ll_dict_store_clean(d, hash, i, TYPE_INT) - elif d.lookup_function_no == FUNC_LONG: + elif fun == FUNC_LONG: ll_dict_store_clean(d, hash, i, TYPE_LONG) else: assert False +def ll_call_delete_by_entry_index(d, hash, i): + fun = d.lookup_function_no & FUNC_MASK + if fun == FUNC_BYTE: + ll_dict_delete_by_entry_index(d, hash, i, TYPE_BYTE) + elif fun == FUNC_SHORT: + ll_dict_delete_by_entry_index(d, hash, i, TYPE_SHORT) + elif IS_64BIT and fun == FUNC_INT: + ll_dict_delete_by_entry_index(d, hash, i, TYPE_INT) + elif fun == FUNC_LONG: + ll_dict_delete_by_entry_index(d, hash, i, TYPE_LONG) + else: + assert False + def ll_valid_from_flag(entries, i): return entries[i].f_valid @@ -513,7 +535,7 @@ def ll_dict_getitem(d, key): index = d.lookup_function(d, key, d.keyhash(key), FLAG_LOOKUP) - if index != -1: + if index >= 0: return d.entries[index].value else: raise KeyError @@ -572,6 +594,7 @@ _ll_dict_rescue._dont_inline_ = True def _ll_dict_insertclean(d, key, value, hash): + # never translated ENTRY = lltype.typeOf(d.entries).TO.OF ll_call_insert_clean_function(d, hash, d.num_ever_used_items) entry = d.entries[d.num_ever_used_items] @@ -590,25 +613,24 @@ # xxx Haaaack: returns len(d.indexes). Works independently of # the exact type pointed to by d, using a forced cast... # Must only be called by @jit.dont_look_inside functions. - return len(rffi.cast(DICTINDEX_BYTE, d.indexes)) + return lltype.length_of_simple_gcarray_from_opaque(d.indexes) def _overallocate_entries_len(baselen): # This over-allocates proportional to the list size, making room - # for additional growth. The over-allocation is mild, but is - # enough to give linear-time amortized behavior over a long - # sequence of appends() in the presence of a poorly-performing - # system malloc(). - # The growth pattern is: 0, 4, 8, 16, 25, 35, 46, 58, 72, 88, ... - newsize = baselen + 1 - if newsize < 9: - some = 3 - else: - some = 6 - some += newsize >> 3 - return newsize + some + # for additional growth. This over-allocates slightly more eagerly + # than with regular lists. The idea is that there are many more + # lists than dicts around in PyPy, and dicts of 5 to 8 items are + # not that rare (so a single jump from 0 to 8 is a good idea). + # The growth pattern is: 0, 8, 17, 27, 38, 50, 64, 80, 98, ... + newsize = baselen + (baselen >> 3) + return newsize + 8 - at jit.dont_look_inside + at jit.look_inside_iff(lambda d: jit.isvirtual(d)) def ll_dict_grow(d): + # note: this @jit.look_inside_iff is here to inline the three lines + # at the end of this function. It's important because dicts start + # with a length-zero 'd.entries' which must be grown as soon as we + # insert an element. if d.num_live_items < d.num_ever_used_items // 2: # At least 50% of the allocated entries are dead, so perform a # compaction. If ll_dict_remove_deleted_items detects that over @@ -619,11 +641,29 @@ new_allocated = _overallocate_entries_len(len(d.entries)) - # Detect an obscure case where the indexes numeric type is too - # small to store all the entry indexes - if (max(128, _ll_len_of_d_indexes(d)) - new_allocated - < MIN_INDEXES_MINUS_ENTRIES): + # Detect a relatively rare case where the indexes numeric type is too + # small to store all the entry indexes: there would be 'new_allocated' + # entries, which may in corner cases be larger than 253 even though we + # have single bytes in 'd.indexes' (and the same for the larger + # boundaries). The 'd.indexes' hashtable is never more than 2/3rd + # full, so we know that 'd.num_live_items' should be at most 2/3 * 256 + # (or 65536 or etc.) so after the ll_dict_remove_deleted_items() below + # at least 1/3rd items in 'd.entries' are free. + fun = d.lookup_function_no & FUNC_MASK + toobig = False + if fun == FUNC_BYTE: + assert d.num_live_items < ((1 << 8) - MIN_INDEXES_MINUS_ENTRIES) + toobig = new_allocated > ((1 << 8) - MIN_INDEXES_MINUS_ENTRIES) + elif fun == FUNC_SHORT: + assert d.num_live_items < ((1 << 16) - MIN_INDEXES_MINUS_ENTRIES) + toobig = new_allocated > ((1 << 16) - MIN_INDEXES_MINUS_ENTRIES) + elif IS_64BIT and fun == FUNC_INT: + assert d.num_live_items < ((1 << 32) - MIN_INDEXES_MINUS_ENTRIES) + toobig = new_allocated > ((1 << 32) - MIN_INDEXES_MINUS_ENTRIES) + # + if toobig: ll_dict_remove_deleted_items(d) + assert d.num_live_items == d.num_ever_used_items return True newitems = lltype.malloc(lltype.typeOf(d).TO.entries.TO, new_allocated) @@ -631,6 +671,7 @@ d.entries = newitems return False + at jit.dont_look_inside def ll_dict_remove_deleted_items(d): if d.num_live_items < len(d.entries) // 4: # At least 75% of the allocated entries are dead, so shrink the memory @@ -684,7 +725,7 @@ def ll_dict_delitem(d, key): index = d.lookup_function(d, key, d.keyhash(key), FLAG_DELETE) - if index == -1: + if index < 0: raise KeyError _ll_dict_del(d, index) @@ -701,7 +742,12 @@ if ENTRIES.must_clear_value: entry.value = lltype.nullptr(ENTRY.value.TO) - if index == d.num_ever_used_items - 1: + if d.num_live_items == 0: + # Dict is now empty. Reset these fields. + d.num_ever_used_items = 0 + d.lookup_function_no &= FUNC_MASK + + elif index == d.num_ever_used_items - 1: # The last element of the ordereddict has been deleted. Instead of # simply marking the item as dead, we can safely reuse it. Since it's # also possible that there are more dead items immediately behind the @@ -746,7 +792,9 @@ else: ll_malloc_indexes_and_choose_lookup(d, new_size) d.resize_counter = new_size * 2 - d.num_live_items * 3 - assert d.resize_counter > 0 + ll_assert(d.resize_counter > 0, "reindex: resize_counter <= 0") + ll_assert((d.lookup_function_no >> FUNC_SHIFT) == 0, + "reindex: lookup_fun >> SHIFT") # entries = d.entries i = 0 @@ -769,23 +817,11 @@ FLAG_LOOKUP = 0 FLAG_STORE = 1 FLAG_DELETE = 2 -FLAG_DELETE_TRY_HARD = 3 @specialize.memo() def _ll_ptr_to_array_of(T): return lltype.Ptr(lltype.GcArray(T)) -def ll_kill_something(d, T): - INDEXES = _ll_ptr_to_array_of(T) - i = 0 - indexes = lltype.cast_opaque_ptr(INDEXES, d.indexes) - while True: - index = rffi.cast(lltype.Signed, indexes[i]) - if index >= VALID_OFFSET: - indexes[i] = rffi.cast(T, DELETED) - return index - i += 1 - @jit.look_inside_iff(lambda d, key, hash, store_flag, T: jit.isvirtual(d) and jit.isconstant(key)) @jit.oopspec('ordereddict.lookup(d, key, hash, store_flag, T)') @@ -827,8 +863,6 @@ # pristine entry -- lookup failed if store_flag == FLAG_STORE: indexes[i] = rffi.cast(T, d.num_ever_used_items + VALID_OFFSET) - elif d.paranoia and store_flag == FLAG_DELETE_TRY_HARD: - return ll_kill_something(d, T) return -1 # In the loop, a deleted entry (everused and not valid) is by far @@ -845,8 +879,6 @@ deletedslot = intmask(i) indexes[deletedslot] = rffi.cast(T, d.num_ever_used_items + VALID_OFFSET) - elif d.paranoia and store_flag == FLAG_DELETE_TRY_HARD: - return ll_kill_something(d, T) return -1 elif index >= VALID_OFFSET: checkingkey = entries[index - VALID_OFFSET].key @@ -881,17 +913,38 @@ mask = len(indexes) - 1 i = r_uint(hash & mask) perturb = r_uint(hash) - while rffi.cast(lltype.Signed, indexes[i]) != 0: + while rffi.cast(lltype.Signed, indexes[i]) != FREE: i = (i << 2) + i + perturb + 1 i = i & mask perturb >>= PERTURB_SHIFT indexes[i] = rffi.cast(T, index + VALID_OFFSET) +def ll_dict_delete_by_entry_index(d, hash, locate_index, T): + # Another simplified version of ll_dict_lookup() which locates a + # hashtable entry with the given 'index' stored in it, and deletes it. + # This *should* be safe against evil user-level __eq__/__hash__ + # functions because the 'hash' argument here should be the one stored + # into the directory, which is correct. + INDEXES = _ll_ptr_to_array_of(T) + indexes = lltype.cast_opaque_ptr(INDEXES, d.indexes) + mask = len(indexes) - 1 + i = r_uint(hash & mask) + perturb = r_uint(hash) + locate_value = locate_index + VALID_OFFSET + while rffi.cast(lltype.Signed, indexes[i]) != locate_value: + assert rffi.cast(lltype.Signed, indexes[i]) != FREE + i = (i << 2) + i + perturb + 1 + i = i & mask + perturb >>= PERTURB_SHIFT + indexes[i] = rffi.cast(T, DELETED) + # ____________________________________________________________ # # Irregular operations. -DICT_INITSIZE = 8 +# Start the hashtable size at 16 rather than 8, as with rdict.py, because +# it is only an array of bytes +DICT_INITSIZE = 16 @specialize.memo() @@ -948,14 +1001,19 @@ self.r_dict = r_dict self.variant = variant self.lowleveltype = get_ll_dictiter(r_dict.lowleveltype) - self.ll_dictiter = ll_dictiter - self._ll_dictnext = _ll_dictnext + if variant == 'reversed': + self.ll_dictiter = ll_dictiter_reversed + self._ll_dictnext = _ll_dictnext_reversed + else: + self.ll_dictiter = ll_dictiter + self._ll_dictnext = _ll_dictnext def ll_dictiter(ITERPTR, d): iter = lltype.malloc(ITERPTR.TO) iter.dict = d - iter.index = 0 + # initialize the index with usually 0, but occasionally a larger value + iter.index = d.lookup_function_no >> FUNC_SHIFT return iter @jit.look_inside_iff(lambda iter: jit.isvirtual(iter) @@ -974,17 +1032,48 @@ if entries.valid(index): iter.index = nextindex return index + else: + # In case of repeated iteration over the start of + # a dict where the items get removed, like + # collections.OrderedDict.popitem(last=False), + # the hack below will increase the value stored in + # the high bits of lookup_function_no and so the + # next iteration will start at a higher value. + # We should carefully reset these high bits to zero + # as soon as we do something like ll_dict_reindex(). + if index == (dict.lookup_function_no >> FUNC_SHIFT): + dict.lookup_function_no += (1 << FUNC_SHIFT) index = nextindex # clear the reference to the dict and prevent restarts iter.dict = lltype.nullptr(lltype.typeOf(iter).TO.dict.TO) raise StopIteration +def ll_dictiter_reversed(ITERPTR, d): + iter = lltype.malloc(ITERPTR.TO) + iter.dict = d + iter.index = d.num_ever_used_items + return iter + +def _ll_dictnext_reversed(iter): + dict = iter.dict + if dict: + entries = dict.entries + index = iter.index - 1 + while index >= 0: + if entries.valid(index): + iter.index = index + return index + index = index - 1 + # clear the reference to the dict and prevent restarts + iter.dict = lltype.nullptr(lltype.typeOf(iter).TO.dict.TO) + raise StopIteration + # _____________________________________________________________ # methods def ll_dict_get(dict, key, default): index = dict.lookup_function(dict, key, dict.keyhash(key), FLAG_LOOKUP) - if index == -1: + if index < 0: return default else: return dict.entries[index].value @@ -992,7 +1081,7 @@ def ll_dict_setdefault(dict, key, default): hash = dict.keyhash(key) index = dict.lookup_function(dict, key, hash, FLAG_STORE) - if index == -1: + if index < 0: _ll_dict_setitem_lookup_done(dict, key, default, hash, -1) return default else: @@ -1119,7 +1208,7 @@ def ll_dict_contains(d, key): i = d.lookup_function(d, key, d.keyhash(key), FLAG_LOOKUP) - return i != -1 + return i >= 0 def _ll_getnextitem(dic): if dic.num_live_items == 0: @@ -1127,22 +1216,19 @@ entries = dic.entries + # find the last entry. It's unclear if the loop below is still + # needed nowadays, because 'num_ever_used_items - 1' should always + # point to the last active item (we decrease it as needed in + # _ll_dict_del). Better safe than sorry. while True: i = dic.num_ever_used_items - 1 if entries.valid(i): break dic.num_ever_used_items -= 1 - key = entries[i].key - index = dic.lookup_function(dic, key, entries.hash(i), - FLAG_DELETE_TRY_HARD) - # if the lookup function returned me a random strange thing, - # don't care about deleting the item - if index == dic.num_ever_used_items - 1: - dic.num_ever_used_items -= 1 - else: - assert index != -1 - return index + # we must remove the precise entry in the hashtable that points to 'i' + ll_call_delete_by_entry_index(dic, entries.hash(i), i) + return i def ll_dict_popitem(ELEM, dic): i = _ll_getnextitem(dic) @@ -1155,7 +1241,7 @@ def ll_dict_pop(dic, key): index = dic.lookup_function(dic, key, dic.keyhash(key), FLAG_DELETE) - if index == -1: + if index < 0: raise KeyError value = dic.entries[index].value _ll_dict_del(dic, index) @@ -1163,7 +1249,7 @@ def ll_dict_pop_default(dic, key, dfl): index = dic.lookup_function(dic, key, dic.keyhash(key), FLAG_DELETE) - if index == -1: + if index < 0: return dfl value = dic.entries[index].value _ll_dict_del(dic, index) diff --git a/rpython/rtyper/rbuiltin.py b/rpython/rtyper/rbuiltin.py --- a/rpython/rtyper/rbuiltin.py +++ b/rpython/rtyper/rbuiltin.py @@ -445,6 +445,14 @@ return hop.genop('cast_opaque_ptr', [v_input], # v_type implicit in r_result resulttype = hop.r_result.lowleveltype) + at typer_for(lltype.length_of_simple_gcarray_from_opaque) +def rtype_length_of_simple_gcarray_from_opaque(hop): + assert isinstance(hop.args_r[0], rptr.PtrRepr) + v_opaque_ptr, = hop.inputargs(hop.args_r[0]) + hop.exception_cannot_occur() + return hop.genop('length_of_simple_gcarray_from_opaque', [v_opaque_ptr], + resulttype = hop.r_result.lowleveltype) + @typer_for(lltype.direct_fieldptr) def rtype_direct_fieldptr(hop): assert isinstance(hop.args_r[0], rptr.PtrRepr) diff --git a/rpython/rtyper/rdict.py b/rpython/rtyper/rdict.py --- a/rpython/rtyper/rdict.py +++ b/rpython/rtyper/rdict.py @@ -98,12 +98,12 @@ c_key = hop.inputconst(lltype.Void, 'key') v_key = hop.genop('getinteriorfield', [v_entries, v_index, c_key], resulttype=KEY) - if variant != 'keys': + if variant != 'keys' and variant != 'reversed': VALUE = ENTRIES.TO.OF.value c_value = hop.inputconst(lltype.Void, 'value') v_value = hop.genop('getinteriorfield', [v_entries,v_index,c_value], resulttype=VALUE) - if variant == 'keys': + if variant == 'keys' or variant == 'reversed': return self.r_dict.recast_key(hop.llops, v_key) elif variant == 'values': return self.r_dict.recast_value(hop.llops, v_value) diff --git a/rpython/rtyper/test/test_rdict.py b/rpython/rtyper/test/test_rdict.py --- a/rpython/rtyper/test/test_rdict.py +++ b/rpython/rtyper/test/test_rdict.py @@ -1005,6 +1005,7 @@ def test_dict_resize(self): + py.test.skip("test written for non-ordered dicts, update or kill") # XXX we no longer automatically resize on 'del'. We need to # hack a bit in this test to trigger a resize by continuing to # fill the dict's table while keeping the actual size very low @@ -1025,7 +1026,7 @@ res = self.interpret(func, [1]) assert len(res.entries) == rdict.DICT_INITSIZE - def test_opt_nullkeymarker(self): + def test_opt_dummykeymarker(self): def f(): d = {"hello": None} d["world"] = None @@ -1033,10 +1034,9 @@ res = self.interpret(f, []) assert res.item0 == True DICT = lltype.typeOf(res.item1).TO - assert not hasattr(DICT.entries.TO.OF, 'f_everused')# non-None string keys - assert not hasattr(DICT.entries.TO.OF, 'f_valid') # strings have a dummy + assert not hasattr(DICT.entries.TO.OF, 'f_valid') # strs have a dummy - def test_opt_nullvaluemarker(self): + def test_opt_dummyvaluemarker(self): def f(n): d = {-5: "abcd"} d[123] = "def" @@ -1044,29 +1044,8 @@ res = self.interpret(f, [-5]) assert res.item0 == 4 DICT = lltype.typeOf(res.item1).TO - assert not hasattr(DICT.entries.TO.OF, 'f_everused')# non-None str values assert not hasattr(DICT.entries.TO.OF, 'f_valid') # strs have a dummy - def test_opt_nonullmarker(self): - class A: - pass - def f(n): - if n > 5: - a = A() - else: - a = None - d = {a: -5441} - d[A()] = n+9872 - return d[a], d - res = self.interpret(f, [-5]) - assert res.item0 == -5441 - DICT = lltype.typeOf(res.item1).TO - assert hasattr(DICT.entries.TO.OF, 'f_everused') # can-be-None A instances - assert not hasattr(DICT.entries.TO.OF, 'f_valid')# with a dummy A instance - - res = self.interpret(f, [6]) - assert res.item0 == -5441 - def test_opt_nonnegint_dummy(self): def f(n): d = {n: 12} @@ -1077,7 +1056,6 @@ assert res.item0 == 1 assert res.item1 == 24 DICT = lltype.typeOf(res.item2).TO - assert hasattr(DICT.entries.TO.OF, 'f_everused') # all ints can be zero assert not hasattr(DICT.entries.TO.OF, 'f_valid')# nonneg int: dummy -1 def test_opt_no_dummy(self): @@ -1090,7 +1068,6 @@ assert res.item0 == 1 assert res.item1 == -24 DICT = lltype.typeOf(res.item2).TO - assert hasattr(DICT.entries.TO.OF, 'f_everused') # all ints can be zero assert hasattr(DICT.entries.TO.OF, 'f_valid') # no dummy available def test_opt_boolean_has_no_dummy(self): @@ -1103,7 +1080,6 @@ assert res.item0 == 1 assert res.item1 is True DICT = lltype.typeOf(res.item2).TO - assert hasattr(DICT.entries.TO.OF, 'f_everused') # all ints can be zero assert hasattr(DICT.entries.TO.OF, 'f_valid') # no dummy available def test_opt_multiple_identical_dicts(self): @@ -1142,6 +1118,7 @@ assert sorted(DICT.TO.entries.TO.OF._flds) == ['f_hash', 'key', 'value'] def test_deleted_entry_reusage_with_colliding_hashes(self): + py.test.skip("test written for non-ordered dicts, update or kill") def lowlevelhash(value): p = rstr.mallocstr(len(value)) for i in range(len(value)): diff --git a/rpython/rtyper/test/test_rordereddict.py b/rpython/rtyper/test/test_rordereddict.py --- a/rpython/rtyper/test/test_rordereddict.py +++ b/rpython/rtyper/test/test_rordereddict.py @@ -115,11 +115,18 @@ rordereddict.ll_dict_setitem(ll_d, llstr("b"), 2) rordereddict.ll_dict_setitem(ll_d, llstr("c"), 3) rordereddict.ll_dict_setitem(ll_d, llstr("d"), 4) - assert len(get_indexes(ll_d)) == 8 rordereddict.ll_dict_setitem(ll_d, llstr("e"), 5) rordereddict.ll_dict_setitem(ll_d, llstr("f"), 6) - assert len(get_indexes(ll_d)) == 32 - for item in ['a', 'b', 'c', 'd', 'e', 'f']: + rordereddict.ll_dict_setitem(ll_d, llstr("g"), 7) + rordereddict.ll_dict_setitem(ll_d, llstr("h"), 8) + rordereddict.ll_dict_setitem(ll_d, llstr("i"), 9) + rordereddict.ll_dict_setitem(ll_d, llstr("j"), 10) + assert len(get_indexes(ll_d)) == 16 + rordereddict.ll_dict_setitem(ll_d, llstr("k"), 11) + rordereddict.ll_dict_setitem(ll_d, llstr("l"), 12) + rordereddict.ll_dict_setitem(ll_d, llstr("m"), 13) + assert len(get_indexes(ll_d)) == 64 + for item in 'abcdefghijklm': assert rordereddict.ll_dict_getitem(ll_d, llstr(item)) == ord(item) - ord('a') + 1 def test_dict_grow_cleanup(self): @@ -160,6 +167,38 @@ assert ll_elem.item1 == 1 py.test.raises(KeyError, rordereddict.ll_dict_popitem, TUP, ll_d) + def test_popitem_first(self): + DICT = self._get_str_dict() + ll_d = rordereddict.ll_newdict(DICT) + rordereddict.ll_dict_setitem(ll_d, llstr("k"), 1) + rordereddict.ll_dict_setitem(ll_d, llstr("j"), 2) + rordereddict.ll_dict_setitem(ll_d, llstr("m"), 3) + ITER = rordereddict.get_ll_dictiter(lltype.Ptr(DICT)) + for expected in ["k", "j", "m"]: + ll_iter = rordereddict.ll_dictiter(ITER, ll_d) + num = rordereddict._ll_dictnext(ll_iter) + ll_key = ll_d.entries[num].key + assert hlstr(ll_key) == expected + rordereddict.ll_dict_delitem(ll_d, ll_key) + ll_iter = rordereddict.ll_dictiter(ITER, ll_d) + py.test.raises(StopIteration, rordereddict._ll_dictnext, ll_iter) + + def test_popitem_first_bug(self): + DICT = self._get_str_dict() + ll_d = rordereddict.ll_newdict(DICT) + rordereddict.ll_dict_setitem(ll_d, llstr("k"), 1) + rordereddict.ll_dict_setitem(ll_d, llstr("j"), 1) + rordereddict.ll_dict_delitem(ll_d, llstr("k")) + ITER = rordereddict.get_ll_dictiter(lltype.Ptr(DICT)) + ll_iter = rordereddict.ll_dictiter(ITER, ll_d) + num = rordereddict._ll_dictnext(ll_iter) + ll_key = ll_d.entries[num].key + assert hlstr(ll_key) == "j" + assert ll_d.lookup_function_no == 4 # 1 free item found at the start + rordereddict.ll_dict_delitem(ll_d, llstr("j")) + assert ll_d.num_ever_used_items == 0 + assert ll_d.lookup_function_no == 0 # reset + def test_direct_enter_and_del(self): def eq(a, b): return a == b diff --git a/rpython/translator/c/funcgen.py b/rpython/translator/c/funcgen.py --- a/rpython/translator/c/funcgen.py +++ b/rpython/translator/c/funcgen.py @@ -653,6 +653,11 @@ OP_CAST_ADR_TO_PTR = OP_CAST_POINTER OP_CAST_OPAQUE_PTR = OP_CAST_POINTER + def OP_LENGTH_OF_SIMPLE_GCARRAY_FROM_OPAQUE(self, op): + return ('%s = *(long *)(((char *)%s) + sizeof(struct pypy_header0));' + ' /* length_of_simple_gcarray_from_opaque */' + % (self.expr(op.result), self.expr(op.args[0]))) + def OP_CAST_INT_TO_PTR(self, op): TYPE = self.lltypemap(op.result) typename = self.db.gettype(TYPE) diff --git a/rpython/translator/c/test/test_lladdresses.py b/rpython/translator/c/test/test_lladdresses.py --- a/rpython/translator/c/test/test_lladdresses.py +++ b/rpython/translator/c/test/test_lladdresses.py @@ -246,3 +246,13 @@ assert res == 456 res = fc(77) assert res == 123 + +def test_gcarray_length(): + A = lltype.GcArray(lltype.Char) + def f(): + a = lltype.malloc(A, 117) + p = lltype.cast_opaque_ptr(GCREF, a) + return lltype.length_of_simple_gcarray_from_opaque(p) + fc = compile(f, []) + res = fc() + assert res == 117 diff --git a/rpython/translator/tool/staticsizereport.py b/rpython/translator/tool/staticsizereport.py --- a/rpython/translator/tool/staticsizereport.py +++ b/rpython/translator/tool/staticsizereport.py @@ -3,6 +3,7 @@ from rpython.tool.ansicolor import red, yellow, green from rpython.rtyper.lltypesystem.lltype import typeOf, _ptr, Ptr, ContainerType +from rpython.rtyper.lltypesystem.lltype import GcOpaqueType from rpython.rtyper.lltypesystem import llmemory from rpython.memory.lltypelayout import convert_offset_to_int @@ -54,6 +55,8 @@ if isinstance(typeOf(value), Ptr): container = value._obj if isinstance(typeOf(container), ContainerType): + if isinstance(typeOf(container), GcOpaqueType): + container = container.container node = database.getcontainernode(container) if node.nodekind != 'func': nodes.append(node) @@ -77,7 +80,10 @@ return 0 else: length = None - return convert_offset_to_int(llmemory.sizeof(TYPE, length)) + #print obj, ', length =', length + r = convert_offset_to_int(llmemory.sizeof(TYPE, length)) + #print '\tr =', r + return r def guess_size(database, node, recursive=None): diff --git a/rpython/translator/tool/test/test_staticsizereport.py b/rpython/translator/tool/test/test_staticsizereport.py --- a/rpython/translator/tool/test/test_staticsizereport.py +++ b/rpython/translator/tool/test/test_staticsizereport.py @@ -57,10 +57,17 @@ P = rffi.sizeof(rffi.VOIDP) B = 1 # bool assert guess_size(func.builder.db, dictvalnode, set()) > 100 - assert guess_size(func.builder.db, dictvalnode2, set()) == 2 * S + 1 * P + 1 * S + 8 * (2*S + 1 * B) + assert guess_size(func.builder.db, dictvalnode2, set()) == ( + (4 * S + 2 * P) + # struct dicttable + (S + 8) + # indexes, length 8 + (S + S + S)) # entries, length 1 r_set = set() dictnode_size = guess_size(db, test_dictnode, r_set) - assert dictnode_size == 2 * S + 1 * P + 1 * S + (4096-256) * (1*S+1*P + (1 * S + 1*P + 5)) + (8192-4096+256) * (1*S+1*P) + assert dictnode_size == ( + (4 * S + 2 * P) + # struct dicttable + (S + 2 * 8192) + # indexes, length 8192, rffi.USHORT + (S + (S + S) * 3840) + # entries, length 3840 + (S + S + 5) * 3840) # 3840 strings with 5 chars each assert guess_size(func.builder.db, fixarrayvalnode, set()) == 100 * rffi.sizeof(lltype.Signed) + 1 * rffi.sizeof(lltype.Signed) assert guess_size(func.builder.db, dynarrayvalnode, set()) == 100 * rffi.sizeof(lltype.Signed) + 2 * rffi.sizeof(lltype.Signed) + 1 * rffi.sizeof(rffi.VOIDP) From noreply at buildbot.pypy.org Mon Jan 19 12:05:52 2015 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 19 Jan 2015 12:05:52 +0100 (CET) Subject: [pypy-commit] pypy default: Fix test Message-ID: <20150119110552.305F31C0271@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r75433:b6913a381bf9 Date: 2015-01-19 12:05 +0100 http://bitbucket.org/pypy/pypy/changeset/b6913a381bf9/ Log: Fix test diff --git a/rpython/translator/tool/test/test_staticsizereport.py b/rpython/translator/tool/test/test_staticsizereport.py --- a/rpython/translator/tool/test/test_staticsizereport.py +++ b/rpython/translator/tool/test/test_staticsizereport.py @@ -59,7 +59,7 @@ assert guess_size(func.builder.db, dictvalnode, set()) > 100 assert guess_size(func.builder.db, dictvalnode2, set()) == ( (4 * S + 2 * P) + # struct dicttable - (S + 8) + # indexes, length 8 + (S + 16) + # indexes, length 16 (S + S + S)) # entries, length 1 r_set = set() dictnode_size = guess_size(db, test_dictnode, r_set) From noreply at buildbot.pypy.org Mon Jan 19 12:07:35 2015 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 19 Jan 2015 12:07:35 +0100 (CET) Subject: [pypy-commit] pypy default: Changed the default DICT_INITSIZE Message-ID: <20150119110735.6718F1C0303@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r75434:3ab517117ed9 Date: 2015-01-19 12:07 +0100 http://bitbucket.org/pypy/pypy/changeset/3ab517117ed9/ Log: Changed the default DICT_INITSIZE diff --git a/pypy/module/pypyjit/test_pypy_c/test_containers.py b/pypy/module/pypyjit/test_pypy_c/test_containers.py --- a/pypy/module/pypyjit/test_pypy_c/test_containers.py +++ b/pypy/module/pypyjit/test_pypy_c/test_containers.py @@ -68,7 +68,7 @@ guard_no_exception(descr=...) i12 = call(ConstClass(ll_strhash), p10, descr=) p13 = new(descr=...) - p15 = new_array_clear(8, descr=) + p15 = new_array_clear(16, descr=) {{{ setfield_gc(p13, 0, descr=) setfield_gc(p13, p15, descr=) From noreply at buildbot.pypy.org Mon Jan 19 12:08:30 2015 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 19 Jan 2015 12:08:30 +0100 (CET) Subject: [pypy-commit] pypy errno-again: hg merge default Message-ID: <20150119110830.265281C0303@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: errno-again Changeset: r75435:9cdf809ec5a5 Date: 2015-01-19 12:08 +0100 http://bitbucket.org/pypy/pypy/changeset/9cdf809ec5a5/ Log: hg merge default diff --git a/pypy/module/pypyjit/test_pypy_c/test_containers.py b/pypy/module/pypyjit/test_pypy_c/test_containers.py --- a/pypy/module/pypyjit/test_pypy_c/test_containers.py +++ b/pypy/module/pypyjit/test_pypy_c/test_containers.py @@ -68,7 +68,7 @@ guard_no_exception(descr=...) i12 = call(ConstClass(ll_strhash), p10, descr=) p13 = new(descr=...) - p15 = new_array_clear(8, descr=) + p15 = new_array_clear(16, descr=) {{{ setfield_gc(p13, 0, descr=) setfield_gc(p13, p15, descr=) diff --git a/rpython/translator/tool/test/test_staticsizereport.py b/rpython/translator/tool/test/test_staticsizereport.py --- a/rpython/translator/tool/test/test_staticsizereport.py +++ b/rpython/translator/tool/test/test_staticsizereport.py @@ -59,7 +59,7 @@ assert guess_size(func.builder.db, dictvalnode, set()) > 100 assert guess_size(func.builder.db, dictvalnode2, set()) == ( (4 * S + 2 * P) + # struct dicttable - (S + 8) + # indexes, length 8 + (S + 16) + # indexes, length 16 (S + S + S)) # entries, length 1 r_set = set() dictnode_size = guess_size(db, test_dictnode, r_set) From noreply at buildbot.pypy.org Mon Jan 19 12:16:56 2015 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 19 Jan 2015 12:16:56 +0100 (CET) Subject: [pypy-commit] pypy default: Baaaah. It picked "whatsnew-head.rst" by choosing the last Message-ID: <20150119111656.9E5AE1C00F7@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r75436:41b93b18408c Date: 2015-01-19 12:16 +0100 http://bitbucket.org/pypy/pypy/changeset/41b93b18408c/ Log: Baaaah. It picked "whatsnew-head.rst" by choosing the last "whatsnew-*.rst" in alphabetical order. But nowadays we have "whatsnew-pypy3-*" too... diff --git a/pypy/doc/test/test_whatsnew.py b/pypy/doc/test/test_whatsnew.py --- a/pypy/doc/test/test_whatsnew.py +++ b/pypy/doc/test/test_whatsnew.py @@ -78,9 +78,10 @@ def test_whatsnew(): doc = ROOT.join('pypy', 'doc') - whatsnew_list = doc.listdir('whatsnew-*.rst') - whatsnew_list.sort() - last_whatsnew = whatsnew_list[-1].read() + #whatsnew_list = doc.listdir('whatsnew-*.rst') + #whatsnew_list.sort() + #last_whatsnew = whatsnew_list[-1].read() + last_whatsnew = doc.join('whatsnew-head.rst').read() startrev, documented = parse_doc(last_whatsnew) merged, branch = get_merged_branches(ROOT, startrev, '') merged.discard('default') From noreply at buildbot.pypy.org Mon Jan 19 12:20:50 2015 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 19 Jan 2015 12:20:50 +0100 (CET) Subject: [pypy-commit] pypy default: list all undocumented branches Message-ID: <20150119112050.A9A4D1C0173@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r75437:9012135a9d14 Date: 2015-01-19 12:20 +0100 http://bitbucket.org/pypy/pypy/changeset/9012135a9d14/ Log: list all undocumented branches diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -52,7 +52,7 @@ Use SSA form for flow graphs inside build_flow() and part of simplify_graph() -.. branch: ufuncpai +.. branch: ufuncapi Implement most of the GenericUfunc api to support numpy linalg. The strategy is to encourage use of pure python or cffi ufuncs by extending frompyfunc(). @@ -60,3 +60,29 @@ of pypy/numpy - cffi-linalg which is a rewrite of the _umath_linalg module in python, calling lapack from cffi. The branch also support traditional use of cpyext GenericUfunc definitions in c. + +.. branch: all_ordered_dicts +.. branch: berkerpeksag/fix-broken-link-in-readmerst-1415127402066 +.. branch: bigint-with-int-ops +.. branch: dstufft/update-pip-bootstrap-location-to-the-new-1420760611527 +.. branch: float-opt +.. branch: gc-incminimark-pinning +.. branch: gc_no_cleanup_nursery +.. branch: improve-gc-tracing-hooks +.. branch: improve-ptr-conv-error +.. branch: intern-not-immortal +.. branch: issue1922-take2 +.. branch: kill-exported-symbols-list +.. branch: kill-rctime +.. branch: kill_ll_termios +.. branch: look-into-all-modules +.. branch: nditer-external_loop +.. branch: numpy-generic-item +.. branch: osx-shared +.. branch: portable-threadlocal +.. branch: pypy-dont-copy-ops +.. branch: recursion_and_inlining +.. branch: slim-down-resumedescr +.. branch: squeaky/use-cflags-for-compiling-asm +.. branch: unicode-fix +.. branch: zlib_zdict From noreply at buildbot.pypy.org Mon Jan 19 12:29:39 2015 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 19 Jan 2015 12:29:39 +0100 (CET) Subject: [pypy-commit] pypy default: Add documentation for a few selected branches, from "hg log". Message-ID: <20150119112939.EE85C1C026F@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r75438:4a9d6032d86c Date: 2015-01-19 12:29 +0100 http://bitbucket.org/pypy/pypy/changeset/4a9d6032d86c/ Log: Add documentation for a few selected branches, from "hg log". diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -62,15 +62,46 @@ cpyext GenericUfunc definitions in c. .. branch: all_ordered_dicts + +This makes ordered dicts the default dictionary implementation in +RPython and in PyPy. It polishes the basic idea of rordereddict.py +and then fixes various things, up to simplifying +collections.OrderedDict. + +Note: Python programs can rely on the guaranteed dict order in PyPy +now, but for compatibility with other Python implementations they +should still use collections.OrderedDict where that really matters. +Also, support for reversed() was *not* added to the 'dict' class; +use OrderedDict. + +Benchmark results: in the noise. A few benchmarks see good speed +improvements but the average is very close to parity. + .. branch: berkerpeksag/fix-broken-link-in-readmerst-1415127402066 .. branch: bigint-with-int-ops .. branch: dstufft/update-pip-bootstrap-location-to-the-new-1420760611527 .. branch: float-opt .. branch: gc-incminimark-pinning + +This branch adds an interface rgc.pin which would (very temporarily) +make object non-movable. That's used by rffi.alloc_buffer and +rffi.get_nonmovable_buffer and improves performance considerably for +IO operations. + .. branch: gc_no_cleanup_nursery + +A branch started by Wenzhu Man (SoC'14) and then done by fijal. It +removes the clearing of the nursery. The drawback is that new objects +are not automatically filled with zeros any longer, which needs some +care, mostly for GC references (which the GC tries to follow, so they +must not contain garbage). The benefit is a quite large speed-up. + .. branch: improve-gc-tracing-hooks .. branch: improve-ptr-conv-error .. branch: intern-not-immortal + +Fix intern() to return mortal strings, like in CPython. + .. branch: issue1922-take2 .. branch: kill-exported-symbols-list .. branch: kill-rctime @@ -79,6 +110,9 @@ .. branch: nditer-external_loop .. branch: numpy-generic-item .. branch: osx-shared + +``--shared`` support on OS/X (thanks wouter) + .. branch: portable-threadlocal .. branch: pypy-dont-copy-ops .. branch: recursion_and_inlining From noreply at buildbot.pypy.org Mon Jan 19 12:49:25 2015 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 19 Jan 2015 12:49:25 +0100 (CET) Subject: [pypy-commit] pypy errno-again: fix test Message-ID: <20150119114925.8B7EC1C0041@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: errno-again Changeset: r75439:48a9ea8cca8a Date: 2015-01-19 12:48 +0100 http://bitbucket.org/pypy/pypy/changeset/48a9ea8cca8a/ Log: fix test diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -123,10 +123,9 @@ assert result == 3 ** 2 self.check_trace_count(1) self.check_simple_loop({ - 'call': 1, + 'call': 2, # ccall_pow / _ll_1_threadlocalref_get(rpy_errno) 'float_eq': 2, 'float_mul': 2, - 'getarrayitem_raw': 1, # read the errno 'guard_false': 2, 'guard_not_invalidated': 1, 'guard_true': 2, @@ -136,7 +135,6 @@ 'jump': 1, 'raw_load': 1, 'raw_store': 1, - 'setarrayitem_raw': 1, # write the errno }) def define_pow_int(): From noreply at buildbot.pypy.org Mon Jan 19 15:07:11 2015 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 19 Jan 2015 15:07:11 +0100 (CET) Subject: [pypy-commit] pypy errno-again: translation fix Message-ID: <20150119140711.5262E1C0098@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: errno-again Changeset: r75440:2883f8230e86 Date: 2015-01-19 15:00 +0100 http://bitbucket.org/pypy/pypy/changeset/2883f8230e86/ Log: translation fix diff --git a/rpython/jit/backend/x86/callbuilder.py b/rpython/jit/backend/x86/callbuilder.py --- a/rpython/jit/backend/x86/callbuilder.py +++ b/rpython/jit/backend/x86/callbuilder.py @@ -12,7 +12,7 @@ from rpython.jit.backend.x86.jump import remap_frame_layout from rpython.jit.backend.llsupport.callbuilder import AbstractCallBuilder from rpython.jit.backend.llsupport import llerrno -from rpython.rtyper.lltypesystem import rffi +from rpython.rtyper.lltypesystem import llmemory, rffi # darwin requires the stack to be 16 bytes aligned on calls. @@ -190,7 +190,8 @@ # because we are on 32-bit in this case: no register contains # the arguments to the main function we want to call afterwards. from rpython.rlib.rwin32 import _SetLastError - SetLastError_addr = self.asm.cpu.cast_ptr_to_int(_SetLastError) + adr = llmemory.cast_ptr_to_adr(_SetLastError) + SetLastError_addr = self.asm.cpu.cast_adr_to_int(adr) assert isinstance(self, CallBuilder32) # Windows 32-bit only # rpy_lasterror = llerrno.get_rpy_lasterror_offset(self.asm.cpu) @@ -246,10 +247,11 @@ rffi.RFFI_SAVE_WSALASTERROR)): if save_err & rffi.RFFI_SAVE_LASTERROR: from rpython.rlib.rwin32 import _GetLastError - GetLastError_addr = self.asm.cpu.cast_ptr_to_int(_GetLastError) + adr = llmemory.cast_ptr_to_adr(_GetLastError) else: - from rpython.rlib._rsocket_rffi import _WSAGetLastError as WSAE - GetLastError_addr = self.asm.cpu.cast_ptr_to_int(WSAE) + from rpython.rlib._rsocket_rffi import _WSAGetLastError + adr = llmemory.cast_ptr_to_adr(_WSAGetLastError) + GetLastError_addr = self.asm.cpu.cast_adr_to_int(adr) assert isinstance(self, CallBuilder32) # Windows 32-bit only # rpy_lasterror = llerrno.get_rpy_lasterror_offset(self.asm.cpu) From noreply at buildbot.pypy.org Mon Jan 19 16:09:57 2015 From: noreply at buildbot.pypy.org (Raemi) Date: Mon, 19 Jan 2015 16:09:57 +0100 (CET) Subject: [pypy-commit] stmgc c8-private-pages: kill assert (not sure if it can be fixed easily) Message-ID: <20150119150957.1B04E1C0041@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c8-private-pages Changeset: r1545:50887be70049 Date: 2015-01-19 13:48 +0100 http://bitbucket.org/pypy/stmgc/changeset/50887be70049/ Log: kill assert (not sure if it can be fixed easily) diff --git a/c8/stm/core.c b/c8/stm/core.c --- a/c8/stm/core.c +++ b/c8/stm/core.c @@ -270,7 +270,9 @@ /* Don't check this 'cl'. This entry is already checked */ if (STM_PSEGMENT->transaction_state == TS_INEVITABLE) { - assert(first_cl->next == INEV_RUNNING); + //assert(first_cl->next == INEV_RUNNING); + /* the above assert may fail when running a major collection + while the commit of the inevitable transaction is in progress */ return true; } From noreply at buildbot.pypy.org Mon Jan 19 16:09:59 2015 From: noreply at buildbot.pypy.org (Raemi) Date: Mon, 19 Jan 2015 16:09:59 +0100 (CET) Subject: [pypy-commit] stmgc c8-private-pages: Merge with default Message-ID: <20150119150959.1091B1C0041@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c8-private-pages Changeset: r1546:82528c2b6af6 Date: 2015-01-19 15:09 +0100 http://bitbucket.org/pypy/stmgc/changeset/82528c2b6af6/ Log: Merge with default diff too long, truncating to 2000 out of 3250 lines diff --git a/c7/demo/demo2.c b/c7/demo/demo2.c --- a/c7/demo/demo2.c +++ b/c7/demo/demo2.c @@ -3,6 +3,7 @@ #include #include #include +#include #ifdef USE_HTM # include "../../htm-c7/stmgc.h" @@ -59,12 +60,25 @@ } void stmcb_commit_soon() {} -static void expand_marker(char *base, uintptr_t odd_number, - object_t *following_object, - char *outputbuf, size_t outputbufsize) +static void timing_event(stm_thread_local_t *tl, /* the local thread */ + enum stm_event_e event, + stm_loc_marker_t *markers) { - assert(following_object == NULL); - snprintf(outputbuf, outputbufsize, "<%p %lu>", base, odd_number); + static char *event_names[] = { STM_EVENT_NAMES }; + + char buf[1024], *p; + struct timespec tp; + clock_gettime(CLOCK_MONOTONIC, &tp); + + p = buf; + p += sprintf(p, "{%.9f} %p %s", tp.tv_sec + 0.000000001 * tp.tv_nsec, + tl, event_names[event]); + if (markers != NULL) { + p += sprintf(p, ", markers: %lu, %lu", + markers[0].odd_number, markers[1].odd_number); + } + sprintf(p, "\n"); + fputs(buf, stderr); } @@ -108,18 +122,6 @@ stm_start_transaction(&stm_thread_local); - if (stm_thread_local.longest_marker_state != 0) { - fprintf(stderr, "[%p] marker %d for %.6f seconds:\n", - &stm_thread_local, - stm_thread_local.longest_marker_state, - stm_thread_local.longest_marker_time); - fprintf(stderr, "\tself:\t\"%s\"\n\tother:\t\"%s\"\n", - stm_thread_local.longest_marker_self, - stm_thread_local.longest_marker_other); - stm_thread_local.longest_marker_state = 0; - stm_thread_local.longest_marker_time = 0.0; - } - nodeptr_t prev = initial; stm_read((objptr_t)prev); @@ -223,7 +225,6 @@ void unregister_thread_local(void) { - stm_flush_timing(&stm_thread_local, 1); stm_unregister_thread_local(&stm_thread_local); } @@ -295,9 +296,15 @@ stm_setup(); stm_register_thread_local(&stm_thread_local); + + /* check that we can use stm_start_inevitable_transaction() without + any rjbuf on the stack */ + stm_start_inevitable_transaction(&stm_thread_local); + stm_commit_transaction(); + + stm_rewind_jmp_enterframe(&stm_thread_local, &rjbuf); - stmcb_expand_marker = expand_marker; - + stmcb_timing_event = timing_event; setup_list(); diff --git a/c7/stm/contention.c b/c7/stm/contention.c --- a/c7/stm/contention.c +++ b/c7/stm/contention.c @@ -3,34 +3,50 @@ #endif -enum contention_kind_e { +/* Here are the possible kinds of contention: - /* A write-write contention occurs when we running our transaction - and detect that we are about to write to an object that another - thread is also writing to. This kind of contention must be - resolved before continuing. This *must* abort one of the two - threads: the caller's thread is not at a safe-point, so cannot - wait! */ - WRITE_WRITE_CONTENTION, + STM_CONTENTION_WRITE_WRITE - /* A write-read contention occurs when we are trying to commit: it + A write-write contention occurs when we are running our + transaction and detect that we are about to write to an object + that another thread is also writing to. This kind of + contention must be resolved before continuing. This *must* + abort one of the two threads: the caller's thread is not at a + safe-point, so cannot wait! + + It is reported as a timing event with the following two markers: + the current thread (i.e. where the second-in-time write occurs); + and the other thread (from its 'modified_old_objects_markers', + where the first-in-time write occurred). + + STM_CONTENTION_WRITE_READ + + A write-read contention occurs when we are trying to commit: it means that an object we wrote to was also read by another transaction. Even though it would seem obvious that we should just abort the other thread and proceed in our commit, a more subtle answer would be in some cases to wait for the other thread to commit first. It would commit having read the old value, and - then we can commit our change to it. */ - WRITE_READ_CONTENTION, + then we can commit our change to it. - /* An inevitable contention occurs when we're trying to become + It is reported as a timing event with only one marker: the + older location of the write that was done by the current thread. + + STM_CONTENTION_INEVITABLE + + An inevitable contention occurs when we're trying to become inevitable but another thread already is. We can never abort the other thread in this case, but we still have the choice to abort - ourselves or pause until the other thread commits. */ - INEVITABLE_CONTENTION, -}; + ourselves or pause until the other thread commits. + + It is reported with two markers, one for the current thread and + one for the other thread. Each marker gives the location that + attempts to make the transaction inevitable. +*/ + struct contmgr_s { - enum contention_kind_e kind; + enum stm_event_e kind; struct stm_priv_segment_info_s *other_pseg; bool abort_other; bool try_sleep; // XXX add a way to timeout, but should handle repeated @@ -99,7 +115,7 @@ static bool contention_management(uint8_t other_segment_num, - enum contention_kind_e kind, + enum stm_event_e kind, object_t *obj) { assert(_has_mutex()); @@ -109,6 +125,9 @@ if (must_abort()) abort_with_mutex(); + /* Report the contention */ + timing_contention(kind, other_segment_num, obj); + /* Who should abort here: this thread, or the other thread? */ struct contmgr_s contmgr; contmgr.kind = kind; @@ -138,20 +157,9 @@ contmgr.abort_other = false; } - - int wait_category = - kind == WRITE_READ_CONTENTION ? STM_TIME_WAIT_WRITE_READ : - kind == INEVITABLE_CONTENTION ? STM_TIME_WAIT_INEVITABLE : - STM_TIME_WAIT_OTHER; - - int abort_category = - kind == WRITE_WRITE_CONTENTION ? STM_TIME_RUN_ABORTED_WRITE_WRITE : - kind == WRITE_READ_CONTENTION ? STM_TIME_RUN_ABORTED_WRITE_READ : - kind == INEVITABLE_CONTENTION ? STM_TIME_RUN_ABORTED_INEVITABLE : - STM_TIME_RUN_ABORTED_OTHER; - - - if (contmgr.try_sleep && kind != WRITE_WRITE_CONTENTION && + /* Do one of three things here... + */ + if (contmgr.try_sleep && kind != STM_CONTENTION_WRITE_WRITE && contmgr.other_pseg->safe_point != SP_WAIT_FOR_C_TRANSACTION_DONE) { others_may_have_run = true; /* Sleep. @@ -164,30 +172,24 @@ itself already paused here. */ contmgr.other_pseg->signal_when_done = true; - marker_contention(kind, false, other_segment_num, obj); - - change_timing_state(wait_category); /* tell the other to commit ASAP */ signal_other_to_commit_soon(contmgr.other_pseg); dprintf(("pausing...\n")); + + timing_event(STM_SEGMENT->running_thread, STM_WAIT_CONTENTION); + cond_signal(C_AT_SAFE_POINT); STM_PSEGMENT->safe_point = SP_WAIT_FOR_C_TRANSACTION_DONE; cond_wait(C_TRANSACTION_DONE); STM_PSEGMENT->safe_point = SP_RUNNING; dprintf(("pausing done\n")); + timing_event(STM_SEGMENT->running_thread, STM_WAIT_DONE); + if (must_abort()) abort_with_mutex(); - - struct stm_priv_segment_info_s *pseg = - get_priv_segment(STM_SEGMENT->segment_num); - double elapsed = - change_timing_state_tl(pseg->pub.running_thread, - STM_TIME_RUN_CURRENT); - marker_copy(pseg->pub.running_thread, pseg, - wait_category, elapsed); } else if (!contmgr.abort_other) { @@ -195,16 +197,16 @@ signal_other_to_commit_soon(contmgr.other_pseg); dprintf(("abort in contention: kind %d\n", kind)); - STM_SEGMENT->nursery_end = abort_category; - marker_contention(kind, false, other_segment_num, obj); abort_with_mutex(); } else { /* We have to signal the other thread to abort, and wait until it does. */ - contmgr.other_pseg->pub.nursery_end = abort_category; - marker_contention(kind, true, other_segment_num, obj); + contmgr.other_pseg->pub.nursery_end = NSE_SIGABORT; + + timing_event(STM_SEGMENT->running_thread, + STM_ABORTING_OTHER_CONTENTION); int sp = contmgr.other_pseg->safe_point; switch (sp) { @@ -296,7 +298,8 @@ assert(get_priv_segment(other_segment_num)->write_lock_num == prev_owner); - contention_management(other_segment_num, WRITE_WRITE_CONTENTION, obj); + contention_management(other_segment_num, + STM_CONTENTION_WRITE_WRITE, obj); /* now we return into _stm_write_slowpath() and will try again to acquire the write lock on our object. */ @@ -308,10 +311,12 @@ static bool write_read_contention_management(uint8_t other_segment_num, object_t *obj) { - return contention_management(other_segment_num, WRITE_READ_CONTENTION, obj); + return contention_management(other_segment_num, + STM_CONTENTION_WRITE_READ, obj); } static void inevitable_contention_management(uint8_t other_segment_num) { - contention_management(other_segment_num, INEVITABLE_CONTENTION, NULL); + contention_management(other_segment_num, + STM_CONTENTION_INEVITABLE, NULL); } diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -124,17 +124,13 @@ dprintf_test(("write_slowpath %p -> mod_old\n", obj)); - /* First change to this old object from this transaction. + /* Add the current marker, recording where we wrote to this object */ + timing_record_write(); + + /* Change to this old object from this transaction. Add it to the list 'modified_old_objects'. */ LIST_APPEND(STM_PSEGMENT->modified_old_objects, obj); - /* Add the current marker, recording where we wrote to this object */ - uintptr_t marker[2]; - marker_fetch(STM_SEGMENT->running_thread, marker); - STM_PSEGMENT->modified_old_objects_markers = - list_append2(STM_PSEGMENT->modified_old_objects_markers, - marker[0], marker[1]); - release_marker_lock(STM_SEGMENT->segment_base); /* We need to privatize the pages containing the object, if they @@ -328,29 +324,24 @@ STM_SEGMENT->transaction_read_version = 1; } -static void _stm_start_transaction(stm_thread_local_t *tl, bool inevitable) +static uint64_t _global_start_time = 0; + +static void _stm_start_transaction(stm_thread_local_t *tl) { assert(!_stm_in_transaction(tl)); - retry: - if (inevitable) { - wait_for_end_of_inevitable_transaction(tl); - } - - if (!acquire_thread_segment(tl)) - goto retry; + while (!acquire_thread_segment(tl)) + ; /* GS invalid before this point! */ assert(STM_PSEGMENT->safe_point == SP_NO_TRANSACTION); assert(STM_PSEGMENT->transaction_state == TS_NONE); - change_timing_state(STM_TIME_RUN_CURRENT); - STM_PSEGMENT->start_time = tl->_timing_cur_start; + timing_event(tl, STM_TRANSACTION_START); + STM_PSEGMENT->start_time = _global_start_time++; STM_PSEGMENT->signalled_to_commit_soon = false; STM_PSEGMENT->safe_point = SP_RUNNING; - STM_PSEGMENT->marker_inev[1] = 0; - if (inevitable) - marker_fetch_inev(); - STM_PSEGMENT->transaction_state = (inevitable ? TS_INEVITABLE : TS_REGULAR); + STM_PSEGMENT->marker_inev.object = NULL; + STM_PSEGMENT->transaction_state = TS_REGULAR; #ifndef NDEBUG STM_PSEGMENT->running_pthread = pthread_self(); #endif @@ -383,6 +374,7 @@ assert(tree_is_cleared(STM_PSEGMENT->callbacks_on_commit_and_abort[1])); assert(STM_PSEGMENT->objects_pointing_to_nursery == NULL); assert(STM_PSEGMENT->large_overflow_objects == NULL); + assert(STM_PSEGMENT->finalizers == NULL); #ifndef NDEBUG /* this should not be used when objects_pointing_to_nursery == NULL */ STM_PSEGMENT->modified_old_objects_markers_num_old = 99999999999999999L; @@ -399,14 +391,21 @@ #else long repeat_count = stm_rewind_jmp_setjmp(tl); #endif - _stm_start_transaction(tl, false); + _stm_start_transaction(tl); return repeat_count; } void stm_start_inevitable_transaction(stm_thread_local_t *tl) { - s_mutex_lock(); - _stm_start_transaction(tl, true); + /* used to be more efficient, starting directly an inevitable transaction, + but there is no real point any more, I believe */ + rewind_jmp_buf rjbuf; + stm_rewind_jmp_enterframe(tl, &rjbuf); + + stm_start_transaction(tl); + stm_become_inevitable(tl, "start_inevitable_transaction"); + + stm_rewind_jmp_leaveframe(tl, &rjbuf); } @@ -449,7 +448,10 @@ return true; } /* we aborted the other transaction without waiting, so - we can just continue */ + we can just break out of this loop on + modified_old_objects and continue with the next + segment */ + break; } })); } @@ -783,13 +785,13 @@ list_clear(STM_PSEGMENT->modified_old_objects_markers); } -static void _finish_transaction(int attribute_to) +static void _finish_transaction(enum stm_event_e event) { STM_PSEGMENT->safe_point = SP_NO_TRANSACTION; STM_PSEGMENT->transaction_state = TS_NONE; /* marker_inev is not needed anymore */ - STM_PSEGMENT->marker_inev[1] = 0; + STM_PSEGMENT->marker_inev.object = NULL; /* reset these lists to NULL for the next transaction */ _verify_cards_cleared_in_all_lists(get_priv_segment(STM_SEGMENT->segment_num)); @@ -797,24 +799,24 @@ list_clear(STM_PSEGMENT->old_objects_with_cards); LIST_FREE(STM_PSEGMENT->large_overflow_objects); - timing_end_transaction(attribute_to); + stm_thread_local_t *tl = STM_SEGMENT->running_thread; + timing_event(tl, event); - stm_thread_local_t *tl = STM_SEGMENT->running_thread; release_thread_segment(tl); /* cannot access STM_SEGMENT or STM_PSEGMENT from here ! */ } void stm_commit_transaction(void) { + restart_all: + exec_local_finalizers(); + assert(!_has_mutex()); assert(STM_PSEGMENT->safe_point == SP_RUNNING); assert(STM_PSEGMENT->running_pthread == pthread_self()); minor_collection(/*commit=*/ true); - /* the call to minor_collection() above leaves us with - STM_TIME_BOOKKEEPING */ - /* synchronize overflow objects living in privatized pages */ push_overflow_objects_from_privatized_pages(); @@ -826,6 +828,11 @@ Important: we should not call cond_wait() in the meantime. */ synchronize_all_threads(STOP_OTHERS_UNTIL_MUTEX_UNLOCK); + if (any_local_finalizers()) { + s_mutex_unlock(); + goto restart_all; + } + /* detect conflicts */ if (detect_write_read_conflicts()) goto restart; @@ -838,15 +845,17 @@ /* if a major collection is required, do it here */ if (is_major_collection_requested()) { - int oldstate = change_timing_state(STM_TIME_MAJOR_GC); + timing_event(STM_SEGMENT->running_thread, STM_GC_MAJOR_START); major_collection_now_at_safe_point(); - change_timing_state(oldstate); + timing_event(STM_SEGMENT->running_thread, STM_GC_MAJOR_DONE); } /* synchronize modified old objects to other threads */ push_modified_to_other_segments(); _verify_cards_cleared_in_all_lists(get_priv_segment(STM_SEGMENT->segment_num)); + commit_finalizers(); + /* update 'overflow_number' if needed */ if (STM_PSEGMENT->overflow_number_has_been_used) { highest_overflow_number += GCFLAG_OVERFLOW_NUMBER_bit0; @@ -867,10 +876,13 @@ } /* done */ - _finish_transaction(STM_TIME_RUN_COMMITTED); + stm_thread_local_t *tl = STM_SEGMENT->running_thread; + _finish_transaction(STM_TRANSACTION_COMMIT); /* cannot access STM_SEGMENT or STM_PSEGMENT from here ! */ s_mutex_unlock(); + + invoke_general_finalizers(tl); } void stm_abort_transaction(void) @@ -960,10 +972,6 @@ (int)pseg->transaction_state); } - /* if we don't have marker information already, look up and preserve - the marker information from the shadowstack as a string */ - marker_default_for_abort(pseg); - /* throw away the content of the nursery */ long bytes_in_nursery = throw_away_nursery(pseg); @@ -1052,16 +1060,15 @@ /* invoke the callbacks */ invoke_and_clear_user_callbacks(1); /* for abort */ - int attribute_to = STM_TIME_RUN_ABORTED_OTHER; + abort_finalizers(); if (is_abort(STM_SEGMENT->nursery_end)) { /* done aborting */ - attribute_to = STM_SEGMENT->nursery_end; STM_SEGMENT->nursery_end = pause_signalled ? NSE_SIGPAUSE : NURSERY_END; } - _finish_transaction(attribute_to); + _finish_transaction(STM_TRANSACTION_ABORT); /* cannot access STM_SEGMENT or STM_PSEGMENT from here ! */ /* Broadcast C_ABORTED to wake up contention.c */ @@ -1103,8 +1110,8 @@ if (STM_PSEGMENT->transaction_state == TS_REGULAR) { dprintf(("become_inevitable: %s\n", msg)); - marker_fetch_inev(); - wait_for_end_of_inevitable_transaction(NULL); + timing_fetch_inev(); + wait_for_end_of_inevitable_transaction(); STM_PSEGMENT->transaction_state = TS_INEVITABLE; stm_rewind_jmp_forget(STM_SEGMENT->running_thread); invoke_and_clear_user_callbacks(0); /* for commit */ diff --git a/c7/stm/core.h b/c7/stm/core.h --- a/c7/stm/core.h +++ b/c7/stm/core.h @@ -138,7 +138,7 @@ /* Start time: to know approximately for how long a transaction has been running, in contention management */ - double start_time; + uint64_t start_time; /* This is the number stored in the overflowed objects (a multiple of GCFLAG_OVERFLOW_NUMBER_bit0). It is incremented when the @@ -196,10 +196,15 @@ pthread_t running_pthread; #endif - /* Temporarily stores the marker information */ - char marker_self[_STM_MARKER_LEN]; - char marker_other[_STM_MARKER_LEN]; - uintptr_t marker_inev[2]; /* marker where this thread became inevitable */ + /* marker where this thread became inevitable */ + stm_loc_marker_t marker_inev; + + /* light finalizers */ + struct list_s *young_objects_with_light_finalizers; + struct list_s *old_objects_with_light_finalizers; + + /* regular finalizers (objs from the current transaction only) */ + struct finalizers_s *finalizers; }; enum /* safe_point */ { diff --git a/c7/stm/finalizer.c b/c7/stm/finalizer.c new file mode 100644 --- /dev/null +++ b/c7/stm/finalizer.c @@ -0,0 +1,404 @@ + + +/* callbacks */ +void (*stmcb_light_finalizer)(object_t *); +void (*stmcb_finalizer)(object_t *); + + +static void init_finalizers(struct finalizers_s *f) +{ + f->objects_with_finalizers = list_create(); + f->count_non_young = 0; + f->run_finalizers = NULL; + f->running_next = NULL; +} + +static void setup_finalizer(void) +{ + init_finalizers(&g_finalizers); +} + +static void teardown_finalizer(void) +{ + if (g_finalizers.run_finalizers != NULL) + list_free(g_finalizers.run_finalizers); + list_free(g_finalizers.objects_with_finalizers); + memset(&g_finalizers, 0, sizeof(g_finalizers)); +} + +static void _commit_finalizers(void) +{ + if (STM_PSEGMENT->finalizers->run_finalizers != NULL) { + /* copy 'STM_PSEGMENT->finalizers->run_finalizers' into + 'g_finalizers.run_finalizers', dropping any initial NULLs + (finalizers already called) */ + struct list_s *src = STM_PSEGMENT->finalizers->run_finalizers; + uintptr_t frm = 0; + if (STM_PSEGMENT->finalizers->running_next != NULL) { + frm = *STM_PSEGMENT->finalizers->running_next; + assert(frm <= list_count(src)); + *STM_PSEGMENT->finalizers->running_next = (uintptr_t)-1; + } + if (frm < list_count(src)) { + g_finalizers.run_finalizers = list_extend( + g_finalizers.run_finalizers, + src, frm); + } + list_free(src); + } + + /* copy the whole 'STM_PSEGMENT->finalizers->objects_with_finalizers' + into 'g_finalizers.objects_with_finalizers' */ + g_finalizers.objects_with_finalizers = list_extend( + g_finalizers.objects_with_finalizers, + STM_PSEGMENT->finalizers->objects_with_finalizers, 0); + list_free(STM_PSEGMENT->finalizers->objects_with_finalizers); + + free(STM_PSEGMENT->finalizers); + STM_PSEGMENT->finalizers = NULL; +} + +static void _abort_finalizers(void) +{ + /* like _commit_finalizers(), but forget everything from the + current transaction */ + if (STM_PSEGMENT->finalizers->run_finalizers != NULL) { + if (STM_PSEGMENT->finalizers->running_next != NULL) { + *STM_PSEGMENT->finalizers->running_next = (uintptr_t)-1; + } + list_free(STM_PSEGMENT->finalizers->run_finalizers); + } + list_free(STM_PSEGMENT->finalizers->objects_with_finalizers); + free(STM_PSEGMENT->finalizers); + STM_PSEGMENT->finalizers = NULL; +} + + +void stm_enable_light_finalizer(object_t *obj) +{ + if (_is_young(obj)) + LIST_APPEND(STM_PSEGMENT->young_objects_with_light_finalizers, obj); + else + LIST_APPEND(STM_PSEGMENT->old_objects_with_light_finalizers, obj); +} + +object_t *stm_allocate_with_finalizer(ssize_t size_rounded_up) +{ + object_t *obj = _stm_allocate_external(size_rounded_up); + + if (STM_PSEGMENT->finalizers == NULL) { + struct finalizers_s *f = malloc(sizeof(struct finalizers_s)); + if (f == NULL) + stm_fatalerror("out of memory in create_finalizers"); /* XXX */ + init_finalizers(f); + STM_PSEGMENT->finalizers = f; + } + LIST_APPEND(STM_PSEGMENT->finalizers->objects_with_finalizers, obj); + return obj; +} + + +/************************************************************/ +/* Light finalizers +*/ + +static void deal_with_young_objects_with_finalizers(void) +{ + /* for light finalizers */ + struct list_s *lst = STM_PSEGMENT->young_objects_with_light_finalizers; + long i, count = list_count(lst); + for (i = 0; i < count; i++) { + object_t* obj = (object_t *)list_item(lst, i); + assert(_is_young(obj)); + + object_t *TLPREFIX *pforwarded_array = (object_t *TLPREFIX *)obj; + if (pforwarded_array[0] != GCWORD_MOVED) { + /* not moved: the object dies */ + stmcb_light_finalizer(obj); + } + else { + obj = pforwarded_array[1]; /* moved location */ + assert(!_is_young(obj)); + LIST_APPEND(STM_PSEGMENT->old_objects_with_light_finalizers, obj); + } + } + list_clear(lst); +} + +static void deal_with_old_objects_with_finalizers(void) +{ + /* for light finalizers */ + int old_gs_register = STM_SEGMENT->segment_num; + int current_gs_register = old_gs_register; + long j; + for (j = 1; j <= NB_SEGMENTS; j++) { + struct stm_priv_segment_info_s *pseg = get_priv_segment(j); + + struct list_s *lst = pseg->old_objects_with_light_finalizers; + long i, count = list_count(lst); + lst->count = 0; + for (i = 0; i < count; i++) { + object_t* obj = (object_t *)list_item(lst, i); + if (!mark_visited_test(obj)) { + /* not marked: object dies */ + /* we're calling the light finalizer in the same + segment as where it was originally registered. For + objects that existed since a long time, it doesn't + change anything: any thread should see the same old + content (because if it wasn't the case, the object + would be in a 'modified_old_objects' list + somewhere, and so it wouldn't be dead). But it's + important if the object was created by the same + transaction: then only that segment sees valid + content. + */ + if (j != current_gs_register) { + set_gs_register(get_segment_base(j)); + current_gs_register = j; + } + stmcb_light_finalizer(obj); + } + else { + /* object survives */ + list_set_item(lst, lst->count++, (uintptr_t)obj); + } + } + } + if (old_gs_register != current_gs_register) + set_gs_register(get_segment_base(old_gs_register)); +} + + +/************************************************************/ +/* Algorithm for regular (non-light) finalizers. + Follows closely pypy/doc/discussion/finalizer-order.rst + as well as rpython/memory/gc/minimark.py. +*/ + +static inline int _finalization_state(object_t *obj) +{ + /* Returns the state, "0", 1, 2 or 3, as per finalizer-order.rst. + One difference is that the official state 0 is returned here + as a number that is <= 0. */ + uintptr_t lock_idx = mark_loc(obj); + return write_locks[lock_idx] - (WL_FINALIZ_ORDER_1 - 1); +} + +static void _bump_finalization_state_from_0_to_1(object_t *obj) +{ + uintptr_t lock_idx = mark_loc(obj); + assert(write_locks[lock_idx] < WL_FINALIZ_ORDER_1); + write_locks[lock_idx] = WL_FINALIZ_ORDER_1; +} + +static struct list_s *_finalizer_tmpstack; +static struct list_s *_finalizer_emptystack; +static struct list_s *_finalizer_pending; + +static inline void _append_to_finalizer_tmpstack(object_t **pobj) +{ + object_t *obj = *pobj; + if (obj != NULL) + LIST_APPEND(_finalizer_tmpstack, obj); +} + +static inline struct list_s *finalizer_trace(char *base, object_t *obj, + struct list_s *lst) +{ + struct object_s *realobj = (struct object_s *)REAL_ADDRESS(base, obj); + _finalizer_tmpstack = lst; + stmcb_trace(realobj, &_append_to_finalizer_tmpstack); + return _finalizer_tmpstack; +} + +static void _recursively_bump_finalization_state(char *base, object_t *obj, + int to_state) +{ + struct list_s *tmpstack = _finalizer_emptystack; + assert(list_is_empty(tmpstack)); + + while (1) { + if (_finalization_state(obj) == to_state - 1) { + /* bump to the next state */ + write_locks[mark_loc(obj)]++; + + /* trace */ + tmpstack = finalizer_trace(base, obj, tmpstack); + } + + if (list_is_empty(tmpstack)) + break; + + obj = (object_t *)list_pop_item(tmpstack); + } + _finalizer_emptystack = tmpstack; +} + +static struct list_s *mark_finalize_step1(char *base, struct finalizers_s *f) +{ + if (f == NULL) + return NULL; + + struct list_s *marked = list_create(); + + struct list_s *lst = f->objects_with_finalizers; + long i, count = list_count(lst); + lst->count = 0; + for (i = 0; i < count; i++) { + object_t *x = (object_t *)list_item(lst, i); + + assert(_finalization_state(x) != 1); + if (_finalization_state(x) >= 2) { + list_set_item(lst, lst->count++, (uintptr_t)x); + continue; + } + LIST_APPEND(marked, x); + + struct list_s *pending = _finalizer_pending; + LIST_APPEND(pending, x); + while (!list_is_empty(pending)) { + object_t *y = (object_t *)list_pop_item(pending); + int state = _finalization_state(y); + if (state <= 0) { + _bump_finalization_state_from_0_to_1(y); + pending = finalizer_trace(base, y, pending); + } + else if (state == 2) { + _recursively_bump_finalization_state(base, y, 3); + } + } + _finalizer_pending = pending; + assert(_finalization_state(x) == 1); + _recursively_bump_finalization_state(base, x, 2); + } + return marked; +} + +static void mark_finalize_step2(char *base, struct finalizers_s *f, + struct list_s *marked) +{ + if (f == NULL) + return; + + struct list_s *run_finalizers = f->run_finalizers; + + long i, count = list_count(marked); + for (i = 0; i < count; i++) { + object_t *x = (object_t *)list_item(marked, i); + + int state = _finalization_state(x); + assert(state >= 2); + if (state == 2) { + if (run_finalizers == NULL) + run_finalizers = list_create(); + LIST_APPEND(run_finalizers, x); + _recursively_bump_finalization_state(base, x, 3); + } + else { + struct list_s *lst = f->objects_with_finalizers; + list_set_item(lst, lst->count++, (uintptr_t)x); + } + } + list_free(marked); + + f->run_finalizers = run_finalizers; +} + +static void deal_with_objects_with_finalizers(void) +{ + /* for non-light finalizers */ + + /* there is one 'objects_with_finalizers' list per segment. + Objects that die at a major collection running in the same + transaction as they were created will be put in the + 'run_finalizers' list of that segment. Objects that survive at + least one commit move to the global g_objects_with_finalizers, + and when they die they go to g_run_finalizers. The former kind + of dying object must have its finalizer called in the correct + thread; the latter kind can be called in any thread, through + any segment, because they all should see the same old content + anyway. (If the content was different between segments at this + point, the object would be in a 'modified_old_objects' list + somewhere, and so it wouldn't be dead). + */ + struct list_s *marked_seg[NB_SEGMENTS + 1]; + LIST_CREATE(_finalizer_emptystack); + LIST_CREATE(_finalizer_pending); + + long j; + for (j = 1; j <= NB_SEGMENTS; j++) { + struct stm_priv_segment_info_s *pseg = get_priv_segment(j); + marked_seg[j] = mark_finalize_step1(pseg->pub.segment_base, + pseg->finalizers); + } + marked_seg[0] = mark_finalize_step1(stm_object_pages, &g_finalizers); + + LIST_FREE(_finalizer_pending); + + for (j = 1; j <= NB_SEGMENTS; j++) { + struct stm_priv_segment_info_s *pseg = get_priv_segment(j); + mark_finalize_step2(pseg->pub.segment_base, pseg->finalizers, + marked_seg[j]); + } + mark_finalize_step2(stm_object_pages, &g_finalizers, marked_seg[0]); + + LIST_FREE(_finalizer_emptystack); +} + +static void _execute_finalizers(struct finalizers_s *f) +{ + if (f->run_finalizers == NULL) + return; /* nothing to do */ + + restart: + if (f->running_next != NULL) + return; /* in a nested invocation of execute_finalizers() */ + + uintptr_t next = 0, total = list_count(f->run_finalizers); + f->running_next = &next; + + while (next < total) { + object_t *obj = (object_t *)list_item(f->run_finalizers, next); + list_set_item(f->run_finalizers, next, 0); + next++; + + stmcb_finalizer(obj); + } + if (next == (uintptr_t)-1) { + /* transaction committed: the whole 'f' was freed */ + return; + } + f->running_next = NULL; + + if (f->run_finalizers->count > total) { + memmove(f->run_finalizers->items, + f->run_finalizers->items + total, + (f->run_finalizers->count - total) * sizeof(uintptr_t)); + goto restart; + } + + LIST_FREE(f->run_finalizers); +} + +static void _invoke_general_finalizers(stm_thread_local_t *tl) +{ + /* called between transactions */ + static int lock = 0; + + if (__sync_lock_test_and_set(&lock, 1) != 0) { + /* can't acquire the lock: someone else is likely already + running this function, so don't wait. */ + return; + } + + rewind_jmp_buf rjbuf; + stm_rewind_jmp_enterframe(tl, &rjbuf); + stm_start_transaction(tl); + + _execute_finalizers(&g_finalizers); + + stm_commit_transaction(); + stm_rewind_jmp_leaveframe(tl, &rjbuf); + + __sync_lock_release(&lock); +} diff --git a/c7/stm/finalizer.h b/c7/stm/finalizer.h new file mode 100644 --- /dev/null +++ b/c7/stm/finalizer.h @@ -0,0 +1,47 @@ + +struct finalizers_s { + struct list_s *objects_with_finalizers; + uintptr_t count_non_young; + struct list_s *run_finalizers; + uintptr_t *running_next; +}; + +static void deal_with_young_objects_with_finalizers(void); +static void deal_with_old_objects_with_finalizers(void); +static void deal_with_objects_with_finalizers(void); + +static void setup_finalizer(void); +static void teardown_finalizer(void); + +static void _commit_finalizers(void); +static void _abort_finalizers(void); + +#define commit_finalizers() do { \ + if (STM_PSEGMENT->finalizers != NULL) \ + _commit_finalizers(); \ +} while (0) + +#define abort_finalizers() do { \ + if (STM_PSEGMENT->finalizers != NULL) \ + _abort_finalizers(); \ +} while (0) + + +/* regular finalizers (objs from already-committed transactions) */ +static struct finalizers_s g_finalizers; + +static void _invoke_general_finalizers(stm_thread_local_t *tl); + +#define invoke_general_finalizers(tl) do { \ + if (g_finalizers.run_finalizers != NULL) \ + _invoke_general_finalizers(tl); \ +} while (0) + +static void _execute_finalizers(struct finalizers_s *f); + +#define any_local_finalizers() (STM_PSEGMENT->finalizers != NULL && \ + STM_PSEGMENT->finalizers->run_finalizers != NULL) +#define exec_local_finalizers() do { \ + if (any_local_finalizers()) \ + _execute_finalizers(STM_PSEGMENT->finalizers); \ +} while (0) diff --git a/c7/stm/forksupport.c b/c7/stm/forksupport.c --- a/c7/stm/forksupport.c +++ b/c7/stm/forksupport.c @@ -55,14 +55,12 @@ s_mutex_unlock(); bool was_in_transaction = _stm_in_transaction(this_tl); - if (was_in_transaction) { - stm_become_inevitable(this_tl, "fork"); - /* Note that the line above can still fail and abort, which should - be fine */ - } - else { - stm_start_inevitable_transaction(this_tl); - } + if (!was_in_transaction) + stm_start_transaction(this_tl); + + stm_become_inevitable(this_tl, "fork"); + /* Note that the line above can still fail and abort, which should + be fine */ s_mutex_lock(); synchronize_all_threads(STOP_OTHERS_UNTIL_MUTEX_UNLOCK); @@ -187,7 +185,6 @@ #ifndef NDEBUG pr->running_pthread = pthread_self(); #endif - strcpy(pr->marker_self, "fork"); tl->shadowstack = NULL; pr->shadowstack_at_start_of_transaction = NULL; stm_rewind_jmp_forget(tl); @@ -204,6 +201,9 @@ just release these locks early */ s_mutex_unlock(); + /* Open a new profiling file, if any */ + forksupport_open_new_profiling_file(); + /* Move the copy of the mmap over the old one, overwriting it and thus freeing the old mapping in this process */ diff --git a/c7/stm/gcpage.c b/c7/stm/gcpage.c --- a/c7/stm/gcpage.c +++ b/c7/stm/gcpage.c @@ -141,7 +141,7 @@ if (is_major_collection_requested()) { /* if still true */ - int oldstate = change_timing_state(STM_TIME_MAJOR_GC); + timing_event(STM_SEGMENT->running_thread, STM_GC_MAJOR_START); synchronize_all_threads(STOP_OTHERS_UNTIL_MUTEX_UNLOCK); @@ -149,10 +149,11 @@ major_collection_now_at_safe_point(); } - change_timing_state(oldstate); + timing_event(STM_SEGMENT->running_thread, STM_GC_MAJOR_DONE); } s_mutex_unlock(); + exec_local_finalizers(); } @@ -161,7 +162,11 @@ static struct list_s *mark_objects_to_trace; -#define WL_VISITED 255 +#define WL_FINALIZ_ORDER_1 253 +#define WL_FINALIZ_ORDER_2 254 +#define WL_FINALIZ_ORDER_3 WL_VISITED + +#define WL_VISITED 255 static inline uintptr_t mark_loc(object_t *obj) @@ -446,9 +451,9 @@ for (i = list_count(lst); i > 0; i -= 2) { mark_visit_object((object_t *)list_item(lst, i - 1), base); } - if (get_priv_segment(j)->marker_inev[1]) { - uintptr_t marker_inev_obj = get_priv_segment(j)->marker_inev[1]; - mark_visit_object((object_t *)marker_inev_obj, base); + if (get_priv_segment(j)->marker_inev.segment_base) { + object_t *marker_inev_obj = get_priv_segment(j)->marker_inev.object; + mark_visit_object(marker_inev_obj, base); } } } @@ -626,8 +631,14 @@ mark_visit_from_roots(); LIST_FREE(mark_objects_to_trace); - /* weakrefs: */ + /* finalizer support: will mark as WL_VISITED all objects with a + finalizer and all objects reachable from there, and also moves + some objects from 'objects_with_finalizers' to 'run_finalizers'. */ + deal_with_objects_with_finalizers(); + + /* weakrefs and old light finalizers */ stm_visit_old_weakrefs(); + deal_with_old_objects_with_finalizers(); /* cleanup */ clean_up_segment_lists(); diff --git a/c7/stm/list.c b/c7/stm/list.c --- a/c7/stm/list.c +++ b/c7/stm/list.c @@ -30,6 +30,21 @@ return lst; } +static struct list_s *list_extend(struct list_s *lst, struct list_s *lst2, + uintptr_t slicestart) +{ + if (lst2->count <= slicestart) + return lst; + uintptr_t baseindex = lst->count; + lst->count = baseindex + lst2->count - slicestart; + uintptr_t lastindex = lst->count - 1; + if (lastindex > lst->last_allocated) + lst = _list_grow(lst, lastindex); + memcpy(lst->items + baseindex, lst2->items + slicestart, + (lst2->count - slicestart) * sizeof(uintptr_t)); + return lst; +} + /************************************************************/ diff --git a/c7/stm/list.h b/c7/stm/list.h --- a/c7/stm/list.h +++ b/c7/stm/list.h @@ -83,6 +83,9 @@ return &lst->items[index]; } +static struct list_s *list_extend(struct list_s *lst, struct list_s *lst2, + uintptr_t slicestart); + #define LIST_FOREACH_R(lst, TYPE, CODE) \ do { \ struct list_s *_lst = (lst); \ diff --git a/c7/stm/marker.c b/c7/stm/marker.c --- a/c7/stm/marker.c +++ b/c7/stm/marker.c @@ -3,18 +3,11 @@ #endif -void (*stmcb_expand_marker)(char *segment_base, uintptr_t odd_number, - object_t *following_object, - char *outputbuf, size_t outputbufsize); - -void (*stmcb_debug_print)(const char *cause, double time, - const char *marker); - - -static void marker_fetch(stm_thread_local_t *tl, uintptr_t marker[2]) +static void marker_fetch(stm_loc_marker_t *out_marker) { - /* fetch the current marker from the tl's shadow stack, - and return it in 'marker[2]'. */ + /* Fetch the current marker from the 'out_marker->tl's shadow stack, + and return it in 'out_marker->odd_number' and 'out_marker->object'. */ + stm_thread_local_t *tl = out_marker->tl; struct stm_shadowentry_s *current = tl->shadowstack - 1; struct stm_shadowentry_s *base = tl->shadowstack_base; @@ -28,85 +21,31 @@ } if (current != base) { /* found the odd marker */ - marker[0] = (uintptr_t)current[0].ss; - marker[1] = (uintptr_t)current[1].ss; + out_marker->odd_number = (uintptr_t)current[0].ss; + out_marker->object = current[1].ss; } else { /* no marker found */ - marker[0] = 0; - marker[1] = 0; + out_marker->odd_number = 0; + out_marker->object = NULL; } } -static void marker_expand(uintptr_t marker[2], char *segment_base, - char *outmarker) +static void _timing_fetch_inev(void) { - /* Expand the marker given by 'marker[2]' into a full string. This - works assuming that the marker was produced inside the segment - given by 'segment_base'. If that's from a different thread, you - must first acquire the corresponding 'marker_lock'. */ - assert(_has_mutex()); - outmarker[0] = 0; - if (marker[0] == 0) - return; /* no marker entry found */ - if (stmcb_expand_marker != NULL) { - stmcb_expand_marker(segment_base, marker[0], (object_t *)marker[1], - outmarker, _STM_MARKER_LEN); - } + stm_loc_marker_t marker; + marker.tl = STM_SEGMENT->running_thread; + marker_fetch(&marker); + STM_PSEGMENT->marker_inev.odd_number = marker.odd_number; + STM_PSEGMENT->marker_inev.object = marker.object; } -static void marker_default_for_abort(struct stm_priv_segment_info_s *pseg) +static void marker_fetch_obj_write(object_t *obj, stm_loc_marker_t *out_marker) { - if (pseg->marker_self[0] != 0) - return; /* already collected an entry */ - - uintptr_t marker[2]; - marker_fetch(pseg->pub.running_thread, marker); - marker_expand(marker, pseg->pub.segment_base, pseg->marker_self); - pseg->marker_other[0] = 0; -} - -char *_stm_expand_marker(void) -{ - /* for tests only! */ - static char _result[_STM_MARKER_LEN]; - uintptr_t marker[2]; - _result[0] = 0; - s_mutex_lock(); - marker_fetch(STM_SEGMENT->running_thread, marker); - marker_expand(marker, STM_SEGMENT->segment_base, _result); - s_mutex_unlock(); - return _result; -} - -static void marker_copy(stm_thread_local_t *tl, - struct stm_priv_segment_info_s *pseg, - enum stm_time_e attribute_to, double time) -{ - /* Copies the marker information from pseg to tl. This is called - indirectly from abort_with_mutex(), but only if the lost time is - greater than that of the previous recorded marker. By contrast, - pseg->marker_self has been filled already in all cases. The - reason for the two steps is that we must fill pseg->marker_self - earlier than now (some objects may be GCed), but we only know - here the total time it gets attributed. + /* From 'out_marker->tl', fill in 'out_marker->segment_base' and + 'out_marker->odd_number' and 'out_marker->object' from the + marker associated with writing the 'obj'. */ - if (stmcb_debug_print) { - stmcb_debug_print(timer_names[attribute_to], time, pseg->marker_self); - } - if (time * 0.99 > tl->longest_marker_time) { - tl->longest_marker_state = attribute_to; - tl->longest_marker_time = time; - memcpy(tl->longest_marker_self, pseg->marker_self, _STM_MARKER_LEN); - memcpy(tl->longest_marker_other, pseg->marker_other, _STM_MARKER_LEN); - } - pseg->marker_self[0] = 0; - pseg->marker_other[0] = 0; -} - -static void marker_fetch_obj_write(uint8_t in_segment_num, object_t *obj, - uintptr_t marker[2]) -{ assert(_has_mutex()); /* here, we acquired the other thread's marker_lock, which means that: @@ -118,80 +57,86 @@ the global mutex_lock at this point too). */ long i; + int in_segment_num = out_marker->tl->associated_segment_num; struct stm_priv_segment_info_s *pseg = get_priv_segment(in_segment_num); struct list_s *mlst = pseg->modified_old_objects; struct list_s *mlstm = pseg->modified_old_objects_markers; - for (i = list_count(mlst); --i >= 0; ) { + assert(list_count(mlstm) <= 2 * list_count(mlst)); + for (i = list_count(mlstm) / 2; --i >= 0; ) { if (list_item(mlst, i) == (uintptr_t)obj) { - assert(list_count(mlstm) == 2 * list_count(mlst)); - marker[0] = list_item(mlstm, i * 2 + 0); - marker[1] = list_item(mlstm, i * 2 + 1); + out_marker->odd_number = list_item(mlstm, i * 2 + 0); + out_marker->object = (object_t *)list_item(mlstm, i * 2 + 1); return; } } - marker[0] = 0; - marker[1] = 0; + out_marker->odd_number = 0; + out_marker->object = NULL; } -static void marker_contention(int kind, bool abort_other, - uint8_t other_segment_num, object_t *obj) +static void _timing_record_write(void) { - uintptr_t self_marker[2]; - uintptr_t other_marker[2]; - struct stm_priv_segment_info_s *my_pseg, *other_pseg; + stm_loc_marker_t marker; + marker.tl = STM_SEGMENT->running_thread; + marker_fetch(&marker); - my_pseg = get_priv_segment(STM_SEGMENT->segment_num); + long base_count = list_count(STM_PSEGMENT->modified_old_objects); + struct list_s *mlstm = STM_PSEGMENT->modified_old_objects_markers; + while (list_count(mlstm) < 2 * base_count) { + mlstm = list_append2(mlstm, 0, 0); + } + mlstm = list_append2(mlstm, marker.odd_number, (uintptr_t)marker.object); + STM_PSEGMENT->modified_old_objects_markers = mlstm; +} + +static void _timing_contention(enum stm_event_e kind, + uint8_t other_segment_num, object_t *obj) +{ + struct stm_priv_segment_info_s *other_pseg; other_pseg = get_priv_segment(other_segment_num); - char *my_segment_base = STM_SEGMENT->segment_base; - char *other_segment_base = get_segment_base(other_segment_num); + char *other_segment_base = other_pseg->pub.segment_base; + acquire_marker_lock(other_segment_base); - acquire_marker_lock(other_segment_base); + stm_loc_marker_t markers[2]; /* Collect the location for myself. It's usually the current location, except in a write-read abort, in which case it's the older location of the write. */ - if (kind == WRITE_READ_CONTENTION) - marker_fetch_obj_write(my_pseg->pub.segment_num, obj, self_marker); + markers[0].tl = STM_SEGMENT->running_thread; + markers[0].segment_base = STM_SEGMENT->segment_base; + + if (kind == STM_CONTENTION_WRITE_READ) + marker_fetch_obj_write(obj, &markers[0]); else - marker_fetch(my_pseg->pub.running_thread, self_marker); - - /* Expand this location into either my_pseg->marker_self or - other_pseg->marker_other, depending on who aborts. */ - marker_expand(self_marker, my_segment_base, - abort_other ? other_pseg->marker_other - : my_pseg->marker_self); + marker_fetch(&markers[0]); /* For some categories, we can also collect the relevant information for the other segment. */ - char *outmarker = abort_other ? other_pseg->marker_self - : my_pseg->marker_other; + markers[1].tl = other_pseg->pub.running_thread; + markers[1].segment_base = other_pseg->pub.segment_base; + switch (kind) { - case WRITE_WRITE_CONTENTION: - marker_fetch_obj_write(other_segment_num, obj, other_marker); - marker_expand(other_marker, other_segment_base, outmarker); + case STM_CONTENTION_WRITE_WRITE: + marker_fetch_obj_write(obj, &markers[1]); break; - case INEVITABLE_CONTENTION: - assert(abort_other == false); - other_marker[0] = other_pseg->marker_inev[0]; - other_marker[1] = other_pseg->marker_inev[1]; - marker_expand(other_marker, other_segment_base, outmarker); - break; - case WRITE_READ_CONTENTION: - strcpy(outmarker, ""); + case STM_CONTENTION_INEVITABLE: + markers[1].odd_number = other_pseg->marker_inev.odd_number; + markers[1].object = other_pseg->marker_inev.object; break; default: - outmarker[0] = 0; + markers[1].odd_number = 0; + markers[1].object = NULL; break; } + stmcb_timing_event(markers[0].tl, kind, markers); + + /* only release the lock after stmcb_timing_event(), otherwise it could + run into race conditions trying to interpret 'markers[1].object' */ release_marker_lock(other_segment_base); } -static void marker_fetch_inev(void) -{ - uintptr_t marker[2]; - marker_fetch(STM_SEGMENT->running_thread, marker); - STM_PSEGMENT->marker_inev[0] = marker[0]; - STM_PSEGMENT->marker_inev[1] = marker[1]; -} + +void (*stmcb_timing_event)(stm_thread_local_t *tl, /* the local thread */ + enum stm_event_e event, + stm_loc_marker_t *markers); diff --git a/c7/stm/marker.h b/c7/stm/marker.h --- a/c7/stm/marker.h +++ b/c7/stm/marker.h @@ -1,12 +1,19 @@ -static void marker_fetch(stm_thread_local_t *tl, uintptr_t marker[2]); -static void marker_fetch_inev(void); -static void marker_expand(uintptr_t marker[2], char *segment_base, - char *outmarker); -static void marker_default_for_abort(struct stm_priv_segment_info_s *pseg); -static void marker_copy(stm_thread_local_t *tl, - struct stm_priv_segment_info_s *pseg, - enum stm_time_e attribute_to, double time); +static void _timing_record_write(void); +static void _timing_fetch_inev(void); +static void _timing_contention(enum stm_event_e kind, + uint8_t other_segment_num, object_t *obj); -static void marker_contention(int kind, bool abort_other, - uint8_t other_segment_num, object_t *obj); + +#define timing_event(tl, event) \ + (stmcb_timing_event != NULL ? stmcb_timing_event(tl, event, NULL) : (void)0) + +#define timing_record_write() \ + (stmcb_timing_event != NULL ? _timing_record_write() : (void)0) + +#define timing_fetch_inev() \ + (stmcb_timing_event != NULL ? _timing_fetch_inev() : (void)0) + +#define timing_contention(kind, other_segnum, obj) \ + (stmcb_timing_event != NULL ? \ + _timing_contention(kind, other_segnum, obj) : (void)0) diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c --- a/c7/stm/nursery.c +++ b/c7/stm/nursery.c @@ -425,14 +425,32 @@ for (i = num_old + 1; i < total; i += 2) { minor_trace_if_young((object_t **)list_ptr_to_item(mlst, i)); } - if (STM_PSEGMENT->marker_inev[1]) { - uintptr_t *pmarker_inev_obj = (uintptr_t *) + if (STM_PSEGMENT->marker_inev.segment_base) { + assert(STM_PSEGMENT->marker_inev.segment_base == + STM_SEGMENT->segment_base); + object_t **pmarker_inev_obj = (object_t **) REAL_ADDRESS(STM_SEGMENT->segment_base, - &STM_PSEGMENT->marker_inev[1]); - minor_trace_if_young((object_t **)pmarker_inev_obj); + &STM_PSEGMENT->marker_inev.object); + minor_trace_if_young(pmarker_inev_obj); } } +static void collect_objs_still_young_but_with_finalizers(void) +{ + struct list_s *lst = STM_PSEGMENT->finalizers->objects_with_finalizers; + uintptr_t i, total = list_count(lst); + + for (i = STM_PSEGMENT->finalizers->count_non_young; i < total; i++) { + + object_t *o = (object_t *)list_item(lst, i); + minor_trace_if_young(&o); + + /* was not actually movable */ + assert(o == (object_t *)list_item(lst, i)); + } + STM_PSEGMENT->finalizers->count_non_young = total; +} + static size_t throw_away_nursery(struct stm_priv_segment_info_s *pseg) { #pragma push_macro("STM_PSEGMENT") @@ -552,11 +570,15 @@ collect_roots_in_nursery(); + if (STM_PSEGMENT->finalizers != NULL) + collect_objs_still_young_but_with_finalizers(); + collect_oldrefs_to_nursery(); assert(list_is_empty(STM_PSEGMENT->old_objects_with_cards)); /* now all surviving nursery objects have been moved out */ stm_move_young_weakrefs(); + deal_with_young_objects_with_finalizers(); throw_away_nursery(get_priv_segment(STM_SEGMENT->segment_num)); @@ -572,11 +594,11 @@ stm_safe_point(); - change_timing_state(STM_TIME_MINOR_GC); + timing_event(STM_SEGMENT->running_thread, STM_GC_MINOR_START); _do_minor_collection(commit); - change_timing_state(commit ? STM_TIME_BOOKKEEPING : STM_TIME_RUN_CURRENT); + timing_event(STM_SEGMENT->running_thread, STM_GC_MINOR_DONE); } void stm_collect(long level) diff --git a/c7/stm/nursery.h b/c7/stm/nursery.h --- a/c7/stm/nursery.h +++ b/c7/stm/nursery.h @@ -1,7 +1,13 @@ -/* '_stm_nursery_section_end' is either NURSERY_END or NSE_SIGxxx */ -#define NSE_SIGPAUSE STM_TIME_WAIT_OTHER -#define NSE_SIGCOMMITSOON STM_TIME_SYNC_COMMIT_SOON +/* 'nursery_end' is either NURSERY_END or one of NSE_SIGxxx */ +#define NSE_SIGABORT 1 +#define NSE_SIGPAUSE 2 +#define NSE_SIGCOMMITSOON 3 +#define _NSE_NUM_SIGNALS 4 + +#if _NSE_NUM_SIGNALS >= _STM_NSE_SIGNAL_MAX +# error "increase _STM_NSE_SIGNAL_MAX" +#endif static uint32_t highest_overflow_number; diff --git a/c7/stm/prof.c b/c7/stm/prof.c new file mode 100644 --- /dev/null +++ b/c7/stm/prof.c @@ -0,0 +1,106 @@ +#include + + +static FILE *profiling_file; +static char *profiling_basefn = NULL; +static int (*profiling_expand_marker)(stm_loc_marker_t *, char *, int); + + +static void _stm_profiling_event(stm_thread_local_t *tl, + enum stm_event_e event, + stm_loc_marker_t *markers) +{ + struct buf_s { + uint32_t tv_sec; + uint32_t tv_nsec; + uint32_t thread_num; + uint32_t other_thread_num; + uint8_t event; + uint8_t marker_length[2]; + char extra[256]; + } __attribute__((packed)); + + struct buf_s buf; + struct timespec t; + clock_gettime(CLOCK_MONOTONIC, &t); + buf.tv_sec = t.tv_sec; + buf.tv_nsec = t.tv_nsec; + buf.thread_num = tl->thread_local_counter; + buf.other_thread_num = 0; + buf.event = event; + + int len0 = 0; + int len1 = 0; + if (markers != NULL) { + if (markers[1].tl != NULL) + buf.other_thread_num = markers[1].tl->thread_local_counter; + if (markers[0].odd_number != 0) + len0 = profiling_expand_marker(&markers[0], buf.extra, 128); + if (markers[1].odd_number != 0) + len1 = profiling_expand_marker(&markers[1], buf.extra + len0, 128); + } + buf.marker_length[0] = len0; + buf.marker_length[1] = len1; + + fwrite(&buf, offsetof(struct buf_s, extra) + len0 + len1, + 1, profiling_file); +} + +static int default_expand_marker(stm_loc_marker_t *m, char *p, int s) +{ + *(uintptr_t *)p = m->odd_number; + return sizeof(uintptr_t); +} + +static bool open_timing_log(const char *filename) +{ + profiling_file = fopen(filename, "w"); + if (profiling_file == NULL) + return false; + + fwrite("STMGC-C7-PROF01\n", 16, 1, profiling_file); + stmcb_timing_event = _stm_profiling_event; + return true; +} + +static bool close_timing_log(void) +{ + if (stmcb_timing_event == &_stm_profiling_event) { + stmcb_timing_event = NULL; + fclose(profiling_file); + profiling_file = NULL; + return true; + } + return false; +} + +static void forksupport_open_new_profiling_file(void) +{ + if (close_timing_log() && profiling_basefn != NULL) { + char filename[1024]; + snprintf(filename, sizeof(filename), + "%s.fork%ld", profiling_basefn, (long)getpid()); + open_timing_log(filename); + } +} + +int stm_set_timing_log(const char *profiling_file_name, + int expand_marker(stm_loc_marker_t *, char *, int)) +{ + close_timing_log(); + free(profiling_basefn); + profiling_basefn = NULL; + + if (profiling_file_name == NULL) + return 0; + + if (!expand_marker) + expand_marker = default_expand_marker; + profiling_expand_marker = expand_marker; + + if (!open_timing_log(profiling_file_name)) + return -1; + + profiling_basefn = strdup(profiling_file_name); + return 0; +} diff --git a/c7/stm/prof.h b/c7/stm/prof.h new file mode 100644 --- /dev/null +++ b/c7/stm/prof.h @@ -0,0 +1,2 @@ + +static void forksupport_open_new_profiling_file(void); diff --git a/c7/stm/setup.c b/c7/stm/setup.c --- a/c7/stm/setup.c +++ b/c7/stm/setup.c @@ -22,8 +22,8 @@ static char *setup_mmap(char *reason, int *map_fd) { char name[128]; - sprintf(name, "/stmgc-c7-bigmem-%ld-%.18e", - (long)getpid(), get_stm_time()); + sprintf(name, "/stmgc-c7-bigmem-%ld", + (long)getpid()); /* Create the big shared memory object, and immediately unlink it. There is a small window where if this process is killed the @@ -113,7 +113,7 @@ /* Initialize STM_PSEGMENT */ struct stm_priv_segment_info_s *pr = get_priv_segment(i); - assert(1 <= i && i < 255); /* 255 is WL_VISITED in gcpage.c */ + assert(1 <= i && i < 253); /* 253 is WL_FINALIZ_ORDER_1 in gcpage.c */ pr->write_lock_num = i; pr->pub.segment_num = i; pr->pub.segment_base = segment_base; @@ -128,6 +128,8 @@ pr->nursery_objects_shadows = tree_create(); pr->callbacks_on_commit_and_abort[0] = tree_create(); pr->callbacks_on_commit_and_abort[1] = tree_create(); + pr->young_objects_with_light_finalizers = list_create(); + pr->old_objects_with_light_finalizers = list_create(); pr->overflow_number = GCFLAG_OVERFLOW_NUMBER_bit0 * i; highest_overflow_number = pr->overflow_number; pr->pub.transaction_read_version = 0xff; @@ -147,6 +149,7 @@ setup_gcpage(); setup_pages(); setup_forksupport(); + setup_finalizer(); } void stm_teardown(void) @@ -169,12 +172,15 @@ tree_free(pr->nursery_objects_shadows); tree_free(pr->callbacks_on_commit_and_abort[0]); tree_free(pr->callbacks_on_commit_and_abort[1]); + list_free(pr->young_objects_with_light_finalizers); + list_free(pr->old_objects_with_light_finalizers); } munmap(stm_object_pages, TOTAL_MEMORY); stm_object_pages = NULL; close_fd_mmap(stm_object_pages_fd); + teardown_finalizer(); teardown_core(); teardown_sync(); teardown_gcpage(); @@ -225,6 +231,8 @@ return (pthread_t *)(tl->creating_pthread); } +static int thread_local_counters = 0; + void stm_register_thread_local(stm_thread_local_t *tl) { int num; @@ -241,14 +249,13 @@ num = tl->prev->associated_segment_num; } tl->thread_local_obj = NULL; - tl->_timing_cur_state = STM_TIME_OUTSIDE_TRANSACTION; - tl->_timing_cur_start = get_stm_time(); /* assign numbers consecutively, but that's for tests; we could also assign the same number to all of them and they would get their own numbers automatically. */ num = (num % NB_SEGMENTS) + 1; tl->associated_segment_num = num; + tl->thread_local_counter = ++thread_local_counters; *_get_cpth(tl) = pthread_self(); _init_shadow_stack(tl); set_gs_register(get_segment_base(num)); diff --git a/c7/stm/sync.c b/c7/stm/sync.c --- a/c7/stm/sync.c +++ b/c7/stm/sync.c @@ -123,32 +123,19 @@ /************************************************************/ -static void wait_for_end_of_inevitable_transaction( - stm_thread_local_t *tl_or_null_if_can_abort) +static void wait_for_end_of_inevitable_transaction(void) { long i; restart: for (i = 1; i <= NB_SEGMENTS; i++) { struct stm_priv_segment_info_s *other_pseg = get_priv_segment(i); if (other_pseg->transaction_state == TS_INEVITABLE) { - if (tl_or_null_if_can_abort == NULL) { - /* handle this case like a contention: it will either - abort us (not the other thread, which is inevitable), - or wait for a while. If we go past this call, then we - waited; in this case we have to re-check if no other - thread is inevitable. */ - inevitable_contention_management(i); - } - else { - /* wait for stm_commit_transaction() to finish this - inevitable transaction */ - signal_other_to_commit_soon(other_pseg); - change_timing_state_tl(tl_or_null_if_can_abort, - STM_TIME_WAIT_INEVITABLE); - cond_wait(C_INEVITABLE); - /* don't bother changing the timing state again: the caller - will very soon go to STM_TIME_RUN_CURRENT */ - } + /* handle this case like a contention: it will either + abort us (not the other thread, which is inevitable), + or wait for a while. If we go past this call, then we + waited; in this case we have to re-check if no other + thread is inevitable. */ + inevitable_contention_management(i); goto restart; } } @@ -188,8 +175,9 @@ } /* No segment available. Wait until release_thread_segment() signals that one segment has been freed. */ - change_timing_state_tl(tl, STM_TIME_WAIT_FREE_SEGMENT); + timing_event(tl, STM_WAIT_FREE_SEGMENT); cond_wait(C_SEGMENT_FREE); + timing_event(tl, STM_WAIT_DONE); /* Return false to the caller, which will call us again */ return false; @@ -240,6 +228,7 @@ assert(_stm_in_transaction(tl)); set_gs_register(get_segment_base(tl->associated_segment_num)); assert(STM_SEGMENT->running_thread == tl); + exec_local_finalizers(); } #if STM_TESTS @@ -331,7 +320,6 @@ if (STM_SEGMENT->nursery_end == NURSERY_END) return; /* fast path: no safe point requested */ - int previous_state = -1; assert(_seems_to_be_running_transaction()); assert(_has_mutex()); while (1) { @@ -342,10 +330,6 @@ break; /* no safe point requested */ if (STM_SEGMENT->nursery_end == NSE_SIGCOMMITSOON) { - if (previous_state == -1) { - previous_state = change_timing_state(STM_TIME_SYNC_COMMIT_SOON); - } - STM_PSEGMENT->signalled_to_commit_soon = true; stmcb_commit_soon(); if (!pause_signalled) { @@ -362,17 +346,12 @@ #ifdef STM_TESTS abort_with_mutex(); #endif - if (previous_state == -1) { - previous_state = change_timing_state(STM_TIME_SYNC_PAUSE); - } + timing_event(STM_SEGMENT->running_thread, STM_WAIT_SYNC_PAUSE); cond_signal(C_AT_SAFE_POINT); STM_PSEGMENT->safe_point = SP_WAIT_FOR_C_REQUEST_REMOVED; cond_wait(C_REQUEST_REMOVED); STM_PSEGMENT->safe_point = SP_RUNNING; - } - - if (previous_state != -1) { - change_timing_state(previous_state); + timing_event(STM_SEGMENT->running_thread, STM_WAIT_DONE); } } diff --git a/c7/stm/sync.h b/c7/stm/sync.h --- a/c7/stm/sync.h +++ b/c7/stm/sync.h @@ -28,7 +28,7 @@ static bool acquire_thread_segment(stm_thread_local_t *tl); static void release_thread_segment(stm_thread_local_t *tl); -static void wait_for_end_of_inevitable_transaction(stm_thread_local_t *); +static void wait_for_end_of_inevitable_transaction(void); enum sync_type_e { STOP_OTHERS_UNTIL_MUTEX_UNLOCK, diff --git a/c7/stm/timing.c b/c7/stm/timing.c deleted file mode 100644 --- a/c7/stm/timing.c +++ /dev/null @@ -1,91 +0,0 @@ -#ifndef _STM_CORE_H_ -# error "must be compiled via stmgc.c" -#endif - - -static inline void add_timing(stm_thread_local_t *tl, enum stm_time_e category, - double elapsed) -{ - tl->timing[category] += elapsed; - tl->events[category] += 1; -} - -#define TIMING_CHANGE(tl, newstate) \ - double curtime = get_stm_time(); \ - double elasped = curtime - tl->_timing_cur_start; \ - enum stm_time_e oldstate = tl->_timing_cur_state; \ - add_timing(tl, oldstate, elasped); \ - tl->_timing_cur_state = newstate; \ - tl->_timing_cur_start = curtime - -static enum stm_time_e change_timing_state(enum stm_time_e newstate) -{ - stm_thread_local_t *tl = STM_SEGMENT->running_thread; - TIMING_CHANGE(tl, newstate); - return oldstate; -} - -static double change_timing_state_tl(stm_thread_local_t *tl, - enum stm_time_e newstate) -{ - TIMING_CHANGE(tl, newstate); - return elasped; -} - -static void timing_end_transaction(enum stm_time_e attribute_to) -{ - stm_thread_local_t *tl = STM_SEGMENT->running_thread; - TIMING_CHANGE(tl, STM_TIME_OUTSIDE_TRANSACTION); - double time_this_transaction = tl->timing[STM_TIME_RUN_CURRENT]; - add_timing(tl, attribute_to, time_this_transaction); - tl->timing[STM_TIME_RUN_CURRENT] = 0.0f; - - if (attribute_to != STM_TIME_RUN_COMMITTED) { - struct stm_priv_segment_info_s *pseg = - get_priv_segment(STM_SEGMENT->segment_num); - marker_copy(tl, pseg, attribute_to, time_this_transaction); - } -} - -static const char *timer_names[] = { - "outside transaction", - "run current", - "run committed", - "run aborted write write", - "run aborted write read", - "run aborted inevitable", - "run aborted other", - "wait free segment", - "wait write read", - "wait inevitable", - "wait other", - "sync commit soon", - "bookkeeping", - "minor gc", - "major gc", - "sync pause", -}; - -void stm_flush_timing(stm_thread_local_t *tl, int verbose) -{ - enum stm_time_e category = tl->_timing_cur_state; - uint64_t oldevents = tl->events[category]; - TIMING_CHANGE(tl, category); - tl->events[category] = oldevents; - - assert((sizeof(timer_names) / sizeof(timer_names[0])) == _STM_TIME_N); - if (verbose > 0) { - int i; - s_mutex_lock(); - fprintf(stderr, "thread %p:\n", tl); - for (i = 0; i < _STM_TIME_N; i++) { - fprintf(stderr, " %-24s %9u %8.3f s\n", - timer_names[i], tl->events[i], (double)tl->timing[i]); - } - fprintf(stderr, " %-24s %6s %11.6f s\n", - "longest recorded marker", "", tl->longest_marker_time); - fprintf(stderr, " \"%.*s\"\n", - (int)_STM_MARKER_LEN, tl->longest_marker_self); - s_mutex_unlock(); - } -} diff --git a/c7/stm/timing.h b/c7/stm/timing.h deleted file mode 100644 --- a/c7/stm/timing.h +++ /dev/null @@ -1,14 +0,0 @@ -#include - -static inline double get_stm_time(void) -{ - struct timespec tp; - clock_gettime(CLOCK_MONOTONIC, &tp); - return tp.tv_sec + tp.tv_nsec * 0.000000001; -} - -static enum stm_time_e change_timing_state(enum stm_time_e newstate); -static double change_timing_state_tl(stm_thread_local_t *tl, - enum stm_time_e newstate); - -static void timing_end_transaction(enum stm_time_e attribute_to); diff --git a/c7/stmgc.c b/c7/stmgc.c --- a/c7/stmgc.c +++ b/c7/stmgc.c @@ -14,8 +14,9 @@ #include "stm/extra.h" #include "stm/fprintcolor.h" #include "stm/weakref.h" -#include "stm/timing.h" #include "stm/marker.h" +#include "stm/prof.h" +#include "stm/finalizer.h" #include "stm/misc.c" #include "stm/list.c" @@ -34,6 +35,7 @@ #include "stm/extra.c" #include "stm/fprintcolor.c" #include "stm/weakref.c" -#include "stm/timing.c" #include "stm/marker.c" +#include "stm/prof.c" #include "stm/rewind_setjmp.c" +#include "stm/finalizer.c" diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -54,28 +54,6 @@ From noreply at buildbot.pypy.org Mon Jan 19 16:10:00 2015 From: noreply at buildbot.pypy.org (Raemi) Date: Mon, 19 Jan 2015 16:10:00 +0100 (CET) Subject: [pypy-commit] stmgc c8-private-pages: fix duhton c7 Message-ID: <20150119151000.253AF1C0041@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c8-private-pages Changeset: r1547:5b9a7b8de4af Date: 2015-01-19 15:18 +0100 http://bitbucket.org/pypy/stmgc/changeset/5b9a7b8de4af/ Log: fix duhton c7 diff --git a/duhton/Makefile b/duhton/Makefile --- a/duhton/Makefile +++ b/duhton/Makefile @@ -6,7 +6,7 @@ COMMON = -pthread -lrt -g -Wall -all: duhton_debug duhton +all: duhton_debug duhton duhton_release duhton: *.c *.h $(C7SOURCES) $(C7HEADERS) clang $(COMMON) -O2 -o duhton *.c ../c7/stmgc.c diff --git a/duhton/object.c b/duhton/object.c --- a/duhton/object.c +++ b/duhton/object.c @@ -45,7 +45,7 @@ abort(); } void stmcb_commit_soon(void) { } - +long stmcb_obj_supports_cards(struct object_s *obj) {return 0;} diff --git a/duhton/transaction.c b/duhton/transaction.c --- a/duhton/transaction.c +++ b/duhton/transaction.c @@ -187,7 +187,6 @@ } - stm_flush_timing(&stm_thread_local, 1); stm_rewind_jmp_leaveframe(&stm_thread_local, &rjbuf); stm_unregister_thread_local(&stm_thread_local); From noreply at buildbot.pypy.org Mon Jan 19 16:10:01 2015 From: noreply at buildbot.pypy.org (Raemi) Date: Mon, 19 Jan 2015 16:10:01 +0100 (CET) Subject: [pypy-commit] stmgc c8-private-pages: add thread_local_obj support Message-ID: <20150119151001.3F4931C0041@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c8-private-pages Changeset: r1548:86a5eee853db Date: 2015-01-19 16:03 +0100 http://bitbucket.org/pypy/stmgc/changeset/86a5eee853db/ Log: add thread_local_obj support diff --git a/c8/stm/core.c b/c8/stm/core.c --- a/c8/stm/core.c +++ b/c8/stm/core.c @@ -650,6 +650,7 @@ STM_PSEGMENT->running_pthread = pthread_self(); #endif STM_PSEGMENT->shadowstack_at_start_of_transaction = tl->shadowstack; + STM_PSEGMENT->threadlocal_at_start_of_transaction = tl->thread_local_obj; enter_safe_point_if_requested(); dprintf(("> start_transaction\n")); @@ -849,6 +850,7 @@ stm_rewind_jmp_restore_shadowstack(tl); assert(tl->shadowstack == pseg->shadowstack_at_start_of_transaction); #endif + tl->thread_local_obj = pseg->threadlocal_at_start_of_transaction; tl->last_abort__bytes_in_nursery = bytes_in_nursery; list_clear(pseg->objects_pointing_to_nursery); diff --git a/c8/stm/core.h b/c8/stm/core.h --- a/c8/stm/core.h +++ b/c8/stm/core.h @@ -84,6 +84,7 @@ struct stm_commit_log_entry_s *last_commit_log_entry; struct stm_shadowentry_s *shadowstack_at_start_of_transaction; + object_t *threadlocal_at_start_of_transaction; /* For debugging */ #ifndef NDEBUG diff --git a/c8/stm/gcpage.c b/c8/stm/gcpage.c --- a/c8/stm/gcpage.c +++ b/c8/stm/gcpage.c @@ -326,6 +326,7 @@ if ((((uintptr_t)current->ss) & 3) == 0) mark_visit_object(current->ss, stm_object_pages); } + mark_visit_object(tl->thread_local_obj, stm_object_pages); tl = tl->next; } while (tl != stm_all_thread_locals); @@ -334,6 +335,9 @@ long i; for (i = 1; i < NB_SEGMENTS; i++) { if (get_priv_segment(i)->transaction_state != TS_NONE) { + mark_visit_object( + get_priv_segment(i)->threadlocal_at_start_of_transaction, + stm_object_pages); stm_rewind_jmp_enum_shadowstack( get_segment(i)->running_thread, mark_visit_objects_from_ss); diff --git a/c8/stm/nursery.c b/c8/stm/nursery.c --- a/c8/stm/nursery.c +++ b/c8/stm/nursery.c @@ -156,6 +156,8 @@ /* it is an odd-valued marker, ignore */ } } + + minor_trace_if_young(&tl->thread_local_obj); } diff --git a/c8/stm/setup.c b/c8/stm/setup.c --- a/c8/stm/setup.c +++ b/c8/stm/setup.c @@ -210,6 +210,7 @@ stm_all_thread_locals->prev = tl; num = (tl->prev->associated_segment_num) % (NB_SEGMENTS-1); } + tl->thread_local_obj = NULL; /* assign numbers consecutively, but that's for tests; we could also assign the same number to all of them and they would get their own diff --git a/c8/stmgc.h b/c8/stmgc.h --- a/c8/stmgc.h +++ b/c8/stmgc.h @@ -48,6 +48,9 @@ rewind_jmp_thread rjthread; struct stm_shadowentry_s *shadowstack, *shadowstack_base; + /* a generic optional thread-local object */ + object_t *thread_local_obj; + char *mem_clear_on_abort; size_t mem_bytes_to_clear_on_abort; long last_abort__bytes_in_nursery; diff --git a/c8/test/support.py b/c8/test/support.py --- a/c8/test/support.py +++ b/c8/test/support.py @@ -23,6 +23,7 @@ typedef struct { rewind_jmp_thread rjthread; struct stm_shadowentry_s *shadowstack, *shadowstack_base; + object_t *thread_local_obj; char *mem_clear_on_abort; size_t mem_bytes_to_clear_on_abort; long last_abort__bytes_in_nursery; diff --git a/c8/test/test_gcpage.py b/c8/test/test_gcpage.py --- a/c8/test/test_gcpage.py +++ b/c8/test/test_gcpage.py @@ -241,7 +241,6 @@ self.test_reshare_if_no_longer_modified_0(invert=1) def test_threadlocal_at_start_of_transaction(self): - py.test.skip("no threadlocal right now") self.start_transaction() x = stm_allocate(16) stm_set_char(x, 'L') @@ -262,6 +261,7 @@ self.start_transaction() assert stm_get_char(self.get_thread_local_obj()) == 'L' + self.commit_transaction() def test_marker_1(self): self.start_transaction() From noreply at buildbot.pypy.org Mon Jan 19 16:10:02 2015 From: noreply at buildbot.pypy.org (Raemi) Date: Mon, 19 Jan 2015 16:10:02 +0100 (CET) Subject: [pypy-commit] stmgc c8-private-pages: add c8 version of duhton Message-ID: <20150119151002.573031C0041@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c8-private-pages Changeset: r1549:81b95867774a Date: 2015-01-19 16:07 +0100 http://bitbucket.org/pypy/stmgc/changeset/81b95867774a/ Log: add c8 version of duhton diff too long, truncating to 2000 out of 3255 lines diff --git a/duhton-c8/Makefile b/duhton-c8/Makefile new file mode 100644 --- /dev/null +++ b/duhton-c8/Makefile @@ -0,0 +1,23 @@ + +C8SOURCES = ../c8/stmgc.c ../c8/stm/*.c + +C8HEADERS = ../c8/stmgc.h ../c8/stm/*.h + +COMMON = -pthread -lrt -g -Wall + + +all: duhton_debug duhton duhton_release + +duhton: *.c *.h $(C8SOURCES) $(C8HEADERS) + clang $(COMMON) -O2 -o duhton *.c ../c8/stmgc.c + +duhton_release: *.c *.h $(C8SOURCES) $(C8HEADERS) + clang $(COMMON) -DNDEBUG -O2 -o duhton_release *.c ../c8/stmgc.c + + +duhton_debug: *.c *.h $(C8SOURCES) $(C8HEADERS) + clang -DSTM_DEBUGPRINT $(COMMON) -DDu_DEBUG -o duhton_debug *.c ../c8/stmgc.c + + +clean: + rm -f duhton duhton_debug duhton_release diff --git a/duhton-c8/ceval.c b/duhton-c8/ceval.c new file mode 100644 --- /dev/null +++ b/duhton-c8/ceval.c @@ -0,0 +1,13 @@ +#include "duhton.h" + + +DuObject *Du_Eval(DuObject *ob, DuObject *locals) +{ + eval_fn fn = Du_TYPE(ob)->dt_eval; + if (fn) { + return fn(ob, locals); + } + else { + return ob; + } +} diff --git a/duhton-c8/compile.c b/duhton-c8/compile.c new file mode 100644 --- /dev/null +++ b/duhton-c8/compile.c @@ -0,0 +1,114 @@ +#include +#include +#include +#include "duhton.h" + + +DuObject *_Du_Parse(FILE *f, int level, int stop_after_newline) +{ + DuObject *cons, *list; + int c, i; + + list = DuList_New(); + if (level == 0) { + _du_save1(list); + DuObject *item = DuSymbol_FromString("progn"); + _du_restore1(list); + _du_save1(list); + DuList_Append(list, item); + _du_restore1(list); + } + c = fgetc(f); + while (1) { + DuObject *item; + + switch (c) { + + case EOF: + if (level > 0) + Du_FatalError("more '(' than ')'"); + if (stop_after_newline) { + return NULL; + } + goto done; + + case '(': + _du_save1(list); + item = _Du_Parse(f, level + 1, 0); + _du_restore1(list); + c = fgetc(f); + break; + + case ')': + if (level == 0) + Du_FatalError("more ')' than '('"); + goto done; + + case '\n': + if (stop_after_newline) + goto done; + c = fgetc(f); + continue; + + case ';': + while (c != '\n' && c != EOF) + c = fgetc(f); + continue; + + default: + if (isspace(c)) { + c = fgetc(f); + continue; + } + else { + char token[201]; + char *p = token; + char *end; + int number; + do { + *p++ = c; + c = fgetc(f); + } while (!(isspace(c) || c == '(' || c == ')' || c == EOF)); + *p = '\0'; + number = strtol(token, &end, 0); + _du_save1(list); + if (*end == '\0') { + item = DuInt_FromInt(number); + } + else { + item = DuSymbol_FromString(token); + } + _du_restore1(list); + break; + } + } + _du_save1(list); + DuList_Append(list, item); + _du_restore1(list); + } + + done: + cons = Du_None; + for (i = DuList_Size(list) - 1; i >= 0; i--) { + DuObject *item = DuList_GetItem(list, i); + _du_save1(list); + cons = DuCons_New(item, cons); + _du_restore1(list); + } + return cons; +} + + +DuObject *Du_Compile(char *filename, int stop_after_newline) +{ + FILE *f; + if (strcmp(filename, "-") == 0) + f = stdin; + else + f = fopen(filename, "r"); + if (!f) Du_FatalError("cannot open '%s'", filename); + DuObject *cons = _Du_Parse(f, 0, stop_after_newline); + if (f != stdin) + fclose(f); + return cons; +} diff --git a/duhton-c8/consobject.c b/duhton-c8/consobject.c new file mode 100644 --- /dev/null +++ b/duhton-c8/consobject.c @@ -0,0 +1,93 @@ +#include "duhton.h" + + +void cons_trace(struct DuConsObject_s *ob, void visit(object_t **)) +{ + visit((object_t **)&ob->car); + visit((object_t **)&ob->cdr); +} + +void cons_print(DuConsObject *ob) +{ + DuObject *p; + printf("( "); + while (1) { + /* _du_read1(ob); IMMUTABLE */ + _du_save1(ob); + Du_Print(ob->car, 0); + _du_restore1(ob); + p = ob->cdr; + if (!DuCons_Check(p)) + break; + ob = (DuConsObject *)p; + printf(" "); + } + if (p != Du_None) { + printf(" . "); + Du_Print(p, 0); + } + printf(" )"); +} + + + +DuObject *cons_eval(DuConsObject *ob, DuObject *locals) +{ + /* _du_read1(ob); IMMUTABLE */ + return _DuFrame_EvalCall(locals, ob->car, ob->cdr, 1); +} + +DuType DuCons_Type = { + "cons", + DUTYPE_CONS, + sizeof(DuConsObject), + (trace_fn)cons_trace, + (print_fn)cons_print, + (eval_fn)cons_eval, +}; + +DuObject *DuCons_New(DuObject *car, DuObject *cdr) +{ + _du_save2(car, cdr); + DuConsObject *ob = (DuConsObject *)DuObject_New(&DuCons_Type); + _du_restore2(car, cdr); + ob->car = car; + ob->cdr = cdr; + return (DuObject *)ob; +} + +DuObject *DuCons_Car(DuObject *cons) +{ + DuCons_Ensure("DuCons_Car", cons); + /* _du_read1(cons); IMMUTABLE */ + return ((DuConsObject *)cons)->car; +} + +DuObject *DuCons_Cdr(DuObject *cons) +{ + DuCons_Ensure("DuCons_Cdr", cons); + /* _du_read1(cons); IMMUTABLE */ + return ((DuConsObject *)cons)->cdr; +} + +DuObject *_DuCons_CAR(DuObject *cons) +{ + assert(DuCons_Check(cons)); + return ((DuConsObject *)cons)->car; +} + +DuObject *_DuCons_NEXT(DuObject *cons) +{ + assert(DuCons_Check(cons)); + DuObject *result = ((DuConsObject *)cons)->cdr; + if (result != Du_None && !DuCons_Check(cons)) + Du_FatalError("_DuCons_NEXT: not a well-formed cons list"); + return result; +} + +void DuCons_Ensure(char *where, DuObject *ob) +{ + if (!DuCons_Check(ob)) + Du_FatalError("%s: expected 'cons' argument, got '%s'", + where, Du_TYPE(ob)->dt_name); +} diff --git a/duhton-c8/containerobject.c b/duhton-c8/containerobject.c new file mode 100644 --- /dev/null +++ b/duhton-c8/containerobject.c @@ -0,0 +1,61 @@ +#include "duhton.h" + +typedef TLPREFIX struct DuContainerObject_s { + DuOBJECT_HEAD1 + DuObject *ob_reference; +} DuContainerObject; + + +void container_trace(struct DuContainerObject_s *ob, void visit(object_t **)) +{ + visit((object_t **)&ob->ob_reference); +} + +void container_print(DuContainerObject *ob) +{ + printf("ob_reference, 0); + printf(">"); +} + +DuObject *DuContainer_GetRef(DuObject *ob) +{ + DuContainer_Ensure("DuContainer_GetRef", ob); + + _du_read1(ob); + return ((DuContainerObject *)ob)->ob_reference; +} + +void DuContainer_SetRef(DuObject *ob, DuObject *x) +{ + DuContainer_Ensure("DuContainer_SetRef", ob); + + _du_write1(ob); + ((DuContainerObject *)ob)->ob_reference = x; +} + +DuType DuContainer_Type = { + "container", + DUTYPE_CONTAINER, + sizeof(DuContainerObject), + (trace_fn)container_trace, + (print_fn)container_print, +}; + +DuObject *DuContainer_New(DuObject *x) +{ + _du_save1(x); + DuContainerObject *ob = \ + (DuContainerObject *)DuObject_New(&DuContainer_Type); + _du_restore1(x); + + ob->ob_reference = x; + return (DuObject *)ob; +} + +void DuContainer_Ensure(char *where, DuObject *ob) +{ + if (!DuContainer_Check(ob)) + Du_FatalError("%s: expected 'container' argument, got '%s'", + where, Du_TYPE(ob)->dt_name); +} diff --git a/duhton-c8/demo/container_transaction.duh b/duhton-c8/demo/container_transaction.duh new file mode 100644 --- /dev/null +++ b/duhton-c8/demo/container_transaction.duh @@ -0,0 +1,20 @@ + +(setq c (container 0)) + +(defun g (thread n) + (set c (+ (get c) 1)) + (if (> (get c) 20000) + (print (quote overflow) (get c)) + (if (< n 10000) + (transaction f thread (+ n 1)) + (if (< (get c) 20000) + (print (quote not-enough)) + (print (quote ok)))))) + +(defun f (thread n) + (print (quote <) thread n (quote >)) + (g thread n)) + +(transaction f (quote t1) 1) +(transaction f (quote t2) 1) +(transaction f (quote t3) 1) diff --git a/duhton-c8/demo/list_transaction.duh b/duhton-c8/demo/list_transaction.duh new file mode 100644 --- /dev/null +++ b/duhton-c8/demo/list_transaction.duh @@ -0,0 +1,17 @@ + +(setq lst (list 0)) +(defun g (n) + (set lst 0 (+ (get lst 0) 1)) + (if (< n 10) + (transaction f (+ n 1)) + (sleepms 20) + (if (< (get lst 0) 20) + (print (quote not-enough)) + (print (quote ok))))) +(defun f (n) + (if (defined? marker) + (print (quote marker-already-defined))) + (setq marker 0) + (g n)) +(transaction f 1) +(transaction f 1) diff --git a/duhton-c8/demo/many_square_roots.duh b/duhton-c8/demo/many_square_roots.duh new file mode 100644 --- /dev/null +++ b/duhton-c8/demo/many_square_roots.duh @@ -0,0 +1,17 @@ + + +(defun square-root (n) + (setq i 0) + (while (< (* i i) n) + (setq i (+ i 1))) + i) + +(defun show-square-root (n) + (setq s (square-root n)) + (print (quote square-root-of) n (quote is) s)) + + +(setq n 0) +(while (< n 200) + (transaction show-square-root (+ 1000000000 (* n 100000))) + (setq n (+ n 1))) diff --git a/duhton-c8/demo/micro_transactions.duh b/duhton-c8/demo/micro_transactions.duh new file mode 100644 --- /dev/null +++ b/duhton-c8/demo/micro_transactions.duh @@ -0,0 +1,27 @@ + + + +;;(setq c (container 0)) + + +(defun increment () + ) + + +(defun big_transactions () + (setq n 0) + (while (< n 20000) + (transaction increment) + (setq n (+ n 1)) + ) + ) + +(setq n 0) +(while (< n 800) + (transaction big_transactions) + (setq n (+ n 1)) + ) + +(setq timer (time)) +(run-transactions) +(print (quote TIME_IN_PARALLEL:) (- (time) timer)) diff --git a/duhton-c8/demo/minimal.duh b/duhton-c8/demo/minimal.duh new file mode 100644 --- /dev/null +++ b/duhton-c8/demo/minimal.duh @@ -0,0 +1,3 @@ + +(print (+ 40 2)) +(print (- 45 3)) diff --git a/duhton-c8/demo/nqueens.duh b/duhton-c8/demo/nqueens.duh new file mode 100644 --- /dev/null +++ b/duhton-c8/demo/nqueens.duh @@ -0,0 +1,148 @@ + + + + + + +(defun abs (i) + (if (<= 0 i) + i + (- 0 i))) + +(defun clean_list (n) + (setq i n) + (setq res (list)) + (while (> i 0) + (append res 0) + (setq i (- i 1)) + ) + res + ) + +(defun copy_list (xs) + (setq res (list)) + (setq idx 0) + (while (< idx (len xs)) + (append res (get xs idx)) + (setq idx (+ idx 1)) + ) + res + ) + + +(defun attacks (hist col i j) + (|| (== (get hist j) i) + (== (abs (- (get hist j) i)) + (- col j))) + ) + +(defun print_solution (hist n) + (print (quote solution) n) + (setq i 0) + (while (< i n) + (setq line (list)) + (setq j 0) + (while (< j n) + (if (== j (get hist i)) + (append line (quote Q)) + (if (== 0 (% (+ i j) 2)) + (append line (quote .)) + (append line (quote ,)) + ) + ) + (setq j (+ j 1)) + ) + + (print line) + (setq i (+ i 1)) + ) + ) + +(defun solve (n col hist count) + (if (== col n) + (progn + (set count (+ (get count) 1)) + ;; (print_solution hist n) + ) + + ;; else + (setq i 0) + (while (< i n) + (setq j 0) + (while (&& (< j col) + (not (attacks hist col i j))) + (setq j (+ j 1)) + ) + + (if (>= j col) + (progn + (set hist col i) + (solve n (+ col 1) hist count) + )) + + (setq i (+ i 1)) + ) + ) + ) + + +(defun solve_parallel (n col hist count) + (if (== col n) + (progn + (set count (+ (get count) 1)) + ;; (print_solution hist n) + ) + + ;; else + (setq i 0) + (setq transaction-limit 1) + (if (== col transaction-limit) + (setq counts (list))) + + (while (< i n) + (setq j 0) + (while (&& (< j col) + (not (attacks hist col i j))) + (setq j (+ j 1)) + ) + + (if (>= j col) + (progn + (set hist col i) + (if (== col transaction-limit) + (progn + (setq new_cont (container 0)) + (append counts new_cont) + (transaction solve n (+ col 1) (copy_list hist) new_cont) + ) + (solve_parallel n (+ col 1) hist count) + ) + ) + ) + ;; iterator + (setq i (+ i 1)) + ) + + (if (== col transaction-limit) + (progn + (run-transactions) + (setq i 0) + (while (< i (len counts)) + (set count (+ (get count) (get (get counts i)))) + (setq i (+ i 1)) + ) + ) + ) + ) + ) + + + + + +(setq count (container 0)) + +(setq n 11) +(solve_parallel n 0 (clean_list n) count) +(print (quote solutions:) (get count)) + diff --git a/duhton-c8/demo/run_transactions.duh b/duhton-c8/demo/run_transactions.duh new file mode 100644 --- /dev/null +++ b/duhton-c8/demo/run_transactions.duh @@ -0,0 +1,25 @@ + +(setq c (container 0)) + +(defun g (n) + (setq i n) + (while (< 0 i) + (set c (+ (get c) 1)) + (setq i (- i 1)) + ) + ) + +(defun f (thread n) + (g n) + ) + +(transaction f (quote t1) 10000) +(transaction f (quote t2) 20000) +(transaction f (quote t3) 10002) +(run-transactions) +(transaction f (quote t1) 15) +(transaction f (quote t2) 15) +(run-transactions) +(print (quote result) (get c)) +(print (quote finished)) + diff --git a/duhton-c8/demo/simple_transaction.duh b/duhton-c8/demo/simple_transaction.duh new file mode 100644 --- /dev/null +++ b/duhton-c8/demo/simple_transaction.duh @@ -0,0 +1,10 @@ + + +(defun do_stuff (i) + (if (> (* i i) 10) + (print i))) + +(setq i 0) +(while (< i 10) + (transaction do_stuff i) + (setq i (+ i 1))) diff --git a/duhton-c8/demo/sort.duh b/duhton-c8/demo/sort.duh new file mode 100644 --- /dev/null +++ b/duhton-c8/demo/sort.duh @@ -0,0 +1,197 @@ + + + + +(setq c (container (list 1 2 3 4))) + + +(setq _rand (container (list 133542157 362436069 521288629 88675123))) +(defun xor128 () + (setq lst (get _rand)) + (setq x (get lst 0)) + (setq y (get lst 1)) + (setq z (get lst 2)) + (setq w (get lst 3)) + + (setq t (^ x (<< x 11))) + (setq x y) + (setq y z) + (setq z w) + + (setq w (^ w (^ (>> w 19) (^ t (>> t 8))))) + (set lst 0 x) + (set lst 1 y) + (set lst 2 z) + (set lst 3 w) + w + ) + + +(defun random_list (n) + (setq i n) + (setq res (list)) + (while (> i 0) + (append res (% (xor128) 10)) + (setq i (- i 1)) + ) + res + ) + + + +(defun merge_lists (as bs) + ;; merges the two lists and returns a new one + (setq res (list)) + (setq idxa 0) + (setq idxb 0) + (while (&& (< idxa (len as)) + (< idxb (len bs))) + (if (> (get as idxa) (get bs idxb)) + (progn + (append res (get bs idxb)) + (setq idxb (+ idxb 1)) + ) + (append res (get as idxa)) + (setq idxa (+ idxa 1)) + ) + ) + + (if (< idxa (len as)) + (progn + (setq xs as) + (setq idxx idxa) + ) + (setq xs bs) + (setq idxx idxb)) + + (while (< idxx (len xs)) + (append res (get xs idxx)) + (setq idxx (+ idxx 1))) + + res + ) + + +(defun split_list (xs) + ;; empties xs and fills 2 new lists to be returned + (setq half_len (/ (len xs) 2)) + (setq first (list)) + (setq second (list)) + (setq xidx 0) + + (while (< xidx (len xs)) + (if (< xidx half_len) + (append first (get xs xidx)) + (append second (get xs xidx)) + ) + (setq xidx (+ xidx 1)) + ) + + (list first second) + ) + + + +(defun merge_sort (xs) + (if (<= (len xs) 1) ; 1 elem + xs + (progn ; many elems + (setq lists (split_list xs)) + + (setq left (merge_sort (get lists 0))) + (setq right (merge_sort (get lists 1))) + ;; (print left right) + (merge_lists left right) + ) + ) + ) + +(defun merge_sort_transaction (xs res-cont) + (set res-cont (merge_sort xs)) + ) + +(defun merge_sort_parallel (xs) + (if (<= (len xs) 1) ; 1 elem + xs + (progn ; many elems + (setq lists (split_list xs)) + (setq left-c (container None)) + (setq right-c (container None)) + + (transaction merge_sort_transaction + (get lists 0) left-c) + (transaction merge_sort_transaction + (get lists 1) right-c) + + (setq current (time)) + (print (quote before-parallel)) + (run-transactions) + (print (quote time-parallel:) (- (time) current)) + + (setq left (get left-c)) + (setq right (get right-c)) + (assert (<= (len left) (+ (len right) 2))) + (assert (<= (len right) (+ (len left) 2))) + ;; (print left right) + (merge_lists left right) + ) + ) + ) + + +(defun copy_list (xs) + (setq res (list)) + (setq idx 0) + (while (< idx (len xs)) + (append res (get xs idx)) + (setq idx (+ idx 1)) + ) + res + ) + +(defun print_list (xs) + (print (quote len:) (len xs) (quote ->) xs) + ) + +(defun is_sorted (xs) + (setq idx 0) + (while (< idx (- (len xs) 1)) + (assert (<= + (get xs idx) + (get xs (+ idx 1)))) + (setq idx (+ idx 1)) + ) + (quote true) + ) + + +;; (setq as (random_list 20)) +;; (setq bs (random_list 20)) +;; (print as) +;; (print bs) +;; (print (split_list as)) + +(setq current (time)) +(print (quote before-random)) +(setq cs (random_list 300000)) +(print (quote time-random:) (- (time) current)) + +;; (print_list cs) + +;; (setq res (container None)) +;; (transaction merge_sort_transaction cs res) +;; (run-transactions) +;; (print (is_sorted (get res))) + +(setq current (time)) +(print (quote before-sorting)) +(setq sorted (merge_sort_parallel cs)) +(print (quote time-sorting:) (- (time) current)) + + +(setq current (time)) +(print (quote before-check)) +(print (quote sorted:) (is_sorted sorted)) +(print (quote time-check:) (- (time) current)) + + diff --git a/duhton-c8/demo/square_root.duh b/duhton-c8/demo/square_root.duh new file mode 100644 --- /dev/null +++ b/duhton-c8/demo/square_root.duh @@ -0,0 +1,9 @@ + + +(defun square_root (n) + (setq i 0) + (while (< (* i i) n) + (setq i (+ i 1))) + i) + +(print (square_root 1000000000)) diff --git a/duhton-c8/demo/synth.duh b/duhton-c8/demo/synth.duh new file mode 100644 --- /dev/null +++ b/duhton-c8/demo/synth.duh @@ -0,0 +1,98 @@ + + + +(defun clean_list (n) + (setq i n) + (setq res (list)) + (while (> i 0) + (append res 0) + (setq i (- i 1)) + ) + res + ) + + +(setq _rand (container (list 133542157 362436069 521288629 88675123))) +(defun xor128 () + (setq lst (get _rand)) + (setq x (get lst 0)) + (setq y (get lst 1)) + (setq z (get lst 2)) + (setq w (get lst 3)) + + (setq t (^ x (<< x 11))) + (setq x y) + (setq y z) + (setq z w) + + (setq w (^ w (^ (>> w 19) (^ t (>> t 8))))) + (set lst 0 x) + (set lst 1 y) + (set lst 2 z) + (set lst 3 w) + w + ) + + +(defun random_list (n max) + (setq i n) + (setq res (list)) + (while (> i 0) + (append res (% (xor128) max)) + (setq i (- i 1)) + ) + res + ) + + + + +(defun worker (shared private) + (setq i 1) + (while (< i 10000) + ;; every 200th modification is on 'shared' + (if (== (% i 200) 0) + (set shared (+ (get shared) 1)) + (set private (+ (get private) 1)) + ) + + (setq i (+ i 1)) + ) + ) + + + +(setq N 1000) +;; CONFL_IF_BELOW / RAND_MAX == ratio of conflicting transactions +;; to non conflicting ones +(setq RAND_MAX 8) +(setq CONFL_IF_BELOW 1) + +(print (quote N:) N) +(print (quote RAND_MAX:) RAND_MAX) +(print (quote CONFL_IF_BELOW:) CONFL_IF_BELOW) + +(setq timer (time)) +(print (quote setup-transactions:) timer) + +(setq shared (container 0)) +(setq rand-list (random_list N RAND_MAX)) +(setq i 0) +(while (< i N) + (setq private (container 0)) + (if (< (get rand-list i) CONFL_IF_BELOW) + ;; conflicting transaction + (transaction worker shared private) + ;; else non-conflicting + (transaction worker private private) + ) + + (setq i (+ i 1)) + ) + +(print (quote setup-time-diff:) (- (time) timer)) +(setq timer (time)) +(run-transactions) +(print (quote run-time-diff:) (- (time) timer)) +(print (quote shared) (get shared)) + diff --git a/duhton-c8/demo/trees.duh b/duhton-c8/demo/trees.duh new file mode 100644 --- /dev/null +++ b/duhton-c8/demo/trees.duh @@ -0,0 +1,18 @@ + +(defun create-tree (n) + (if (== n 0) 1 (cons (create-tree (- n 1)) (create-tree (- n 1)))) +) + +(defun walk-tree (tree) + (if (pair? tree) + (+ (walk-tree (car tree)) (walk-tree (cdr tree))) + 1 + ) +) + +(setq tree (create-tree 10)) +(print (walk-tree tree)) +(setq n 0) +(while (< n 1000) + (transaction walk-tree tree) + (setq n (+ n 1))) diff --git a/duhton-c8/demo/trees2.duh b/duhton-c8/demo/trees2.duh new file mode 100644 --- /dev/null +++ b/duhton-c8/demo/trees2.duh @@ -0,0 +1,21 @@ + + +(defun create-tree (n) + (if (== n 0) 1 (cons (create-tree (- n 1)) (create-tree (- n 1)))) +) + +(defun walk-tree (tree) + (if (pair? tree) + (+ (walk-tree (car tree)) (walk-tree (cdr tree))) + 1 + ) +) + +(defun lookup-tree () + (walk-tree (create-tree 10)) +) + +(setq n 0) +(while (< n 1000) + (transaction lookup-tree) + (setq n (+ n 1))) diff --git a/duhton-c8/duhton.c b/duhton-c8/duhton.c new file mode 100644 --- /dev/null +++ b/duhton-c8/duhton.c @@ -0,0 +1,70 @@ +#include +#include "duhton.h" + + +int main(int argc, char **argv) +{ + char *filename = NULL; + int interactive = 1; + int i; + int num_threads = STM_NB_SEGMENTS; + + for (i = 1; i < argc; ++i) { + if (strcmp(argv[i], "--help") == 0) { + printf("Duhton: a simple lisp-like language with STM support\n\n"); + printf("Usage: duhton [--help] [--num-threads no] [filename]\n"); + printf(" --help: this help\n"); + printf(" --num-threads : number of threads (default 4)\n\n"); + exit(0); + } else if (strcmp(argv[i], "--num-threads") == 0) { + if (i == argc - 1) { + printf("ERROR: --num-threads requires a parameter\n"); + exit(1); + } + num_threads = atoi(argv[i + 1]); + i++; + } else if (strncmp(argv[i], "--", 2) == 0) { + printf("ERROR: unrecognized parameter %s\n", argv[i]); + } else { + filename = argv[i]; + interactive = 0; + } + } + if (!filename) { + filename = "-"; /* stdin */ + } + + Du_Initialize(num_threads); + + while (1) { + if (interactive) { + printf("))) "); + fflush(stdout); + } + stm_start_inevitable_transaction(&stm_thread_local); + DuObject *code = Du_Compile(filename, interactive); + + if (code == NULL) { + printf("\n"); + break; + } + + DuObject *res = Du_Eval(code, Du_Globals); + if (interactive) { + Du_Print(res, 1); + } + + //_du_save1(stm_thread_local_obj); + //stm_collect(0); /* hack... */ + //_du_restore1(stm_thread_local_obj); + + stm_commit_transaction(); + + Du_TransactionRun(); + if (!interactive) + break; + } + + Du_Finalize(); + return 0; +} diff --git a/duhton-c8/duhton.h b/duhton-c8/duhton.h new file mode 100644 --- /dev/null +++ b/duhton-c8/duhton.h @@ -0,0 +1,213 @@ +#ifndef _DUHTON_H_ +#define _DUHTON_H_ + +/* #undef USE_GIL */ /* forces "gil-c7" instead of "c7" */ + +#include +#include +#include + +#include "../c8/stmgc.h" + + + +extern __thread stm_thread_local_t stm_thread_local; + +struct DuObject_s { + struct object_s header; + uint32_t type_id; +}; +typedef TLPREFIX struct DuObject_s DuObject; + + +#define DuOBJECT_HEAD1 struct DuObject_s ob_base; + + +#ifdef __GNUC__ +# define NORETURN __attribute__((noreturn)) +#else +# define NORETURN /* nothing */ +#endif + + +typedef void(*trace_fn)(struct DuObject_s *, void visit(object_t **)); +typedef size_t(*bytesize_fn)(struct DuObject_s *); +typedef void(*print_fn)(DuObject *); +typedef DuObject *(*eval_fn)(DuObject *, DuObject *); +typedef int(*len_fn)(DuObject *); + +typedef struct { + const char *dt_name; + int dt_typeindex; + int dt_size; + trace_fn dt_trace; + print_fn dt_print; + eval_fn dt_eval; + len_fn dt_is_true; + len_fn dt_length; + bytesize_fn dt_bytesize; +} DuType; + +/* keep this list in sync with object.c's Du_Types[] */ +#define DUTYPE_INVALID 0 +#define DUTYPE_NONE 1 +#define DUTYPE_INT 2 +#define DUTYPE_SYMBOL 3 +#define DUTYPE_CONS 4 +#define DUTYPE_LIST 5 +#define DUTYPE_TUPLE 6 +#define DUTYPE_FRAME 7 +#define DUTYPE_FRAMENODE 8 +#define DUTYPE_CONTAINER 9 +#define _DUTYPE_TOTAL 10 + +extern DuType DuNone_Type; +extern DuType DuInt_Type; +extern DuType DuSymbol_Type; +extern DuType DuCons_Type; +extern DuType DuList_Type; +extern DuType DuTuple_Type; +extern DuType DuFrame_Type; +extern DuType DuFrameNode_Type; +extern DuType DuContainer_Type; + +extern DuType *Du_Types[_DUTYPE_TOTAL]; + +#define ROUND_UP(size) ((size) < 16 ? 16 : ((size) + 7) & ~7) + + +DuObject *DuObject_New(DuType *tp); +int DuObject_IsTrue(DuObject *ob); +int DuObject_Length(DuObject *ob); + + +extern DuObject *Du_None; + +#define _DuObject_TypeNum(ob) (((DuObject*)(ob))->type_id) +#define Du_TYPE(ob) (Du_Types[_DuObject_TypeNum(ob)]) +#define DuInt_Check(ob) (_DuObject_TypeNum(ob) == DUTYPE_INT) +#define DuSymbol_Check(ob) (_DuObject_TypeNum(ob) == DUTYPE_SYMBOL) +#define DuCons_Check(ob) (_DuObject_TypeNum(ob) == DUTYPE_CONS) +#define DuList_Check(ob) (_DuObject_TypeNum(ob) == DUTYPE_LIST) +#define DuFrame_Check(ob) (_DuObject_TypeNum(ob) == DUTYPE_FRAME) +#define DuContainer_Check(ob) (_DuObject_TypeNum(ob) == DUTYPE_CONTAINER) + +void DuType_Ensure(char *where, DuObject *ob); +void DuInt_Ensure(char *where, DuObject *ob); +void DuList_Ensure(char *where, DuObject *ob); +void DuContainer_Ensure(char *where, DuObject *ob); +void DuCons_Ensure(char *where, DuObject *ob); +void DuSymbol_Ensure(char *where, DuObject *ob); +void DuFrame_Ensure(char *where, DuObject *ob); + +DuObject *DuInt_FromInt(int value); +int DuInt_AsInt(DuObject *ob); + +DuObject *DuList_New(void); +void DuList_Append(DuObject *list, DuObject *item); +int DuList_Size(DuObject *list); +DuObject *DuList_GetItem(DuObject *list, int index); +void DuList_SetItem(DuObject *list, int index, DuObject *newobj); +DuObject *DuList_Pop(DuObject *list, int index); + +DuObject *DuContainer_New(DuObject *obj); +DuObject *DuContainer_GetRef(DuObject *container); +void DuContainer_SetRef(DuObject *container, DuObject *newobj); + +DuObject *DuSymbol_FromString(const char *name); +char *DuSymbol_AsString(DuObject *ob); +int DuSymbol_Id(DuObject *ob); + +typedef TLPREFIX struct DuConsObject_s { + DuOBJECT_HEAD1 + DuObject *car, *cdr; +} DuConsObject; + +DuObject *DuCons_New(DuObject *car, DuObject *cdr); +DuObject *DuCons_Car(DuObject *cons); +DuObject *DuCons_Cdr(DuObject *cons); +DuObject *_DuCons_CAR(DuObject *cons); +DuObject *_DuCons_NEXT(DuObject *cons); + +void Du_FatalError(char *msg, ...) NORETURN; +DuObject *Du_Compile(char *filename, int stop_after_newline); +void Du_Print(DuObject *ob, int newline); + +DuObject *Du_Eval(DuObject *ob, DuObject *locals); +DuObject *Du_Progn(DuObject *cons, DuObject *locals); + +DuObject *DuFrame_New(); +DuObject *DuFrame_Copy(DuObject *frame); +DuObject *DuFrame_GetSymbol(DuObject *frame, DuObject *symbol); +void DuFrame_SetSymbol(DuObject *frame, DuObject *symbol, DuObject *value); +void DuFrame_SetSymbolStr(DuObject *frame, char *name, DuObject *value); +void DuFrame_SetBuiltinMacro(DuObject *frame, char *name, eval_fn func); +void DuFrame_SetUserFunction(DuObject *frame, DuObject *symbol, + DuObject *arglist, DuObject *progn); +DuObject *_DuFrame_EvalCall(DuObject *frame, DuObject *symbol, + DuObject *rest, int execute_now); + +void Du_Initialize(int); +void Du_Finalize(void); +extern DuObject *Du_Globals; + +void Du_TransactionAdd(DuObject *code, DuObject *frame); +void Du_TransactionRun(void); + + +#define _du_save1(p1) (_push_root((DuObject *)(p1))) +#define _du_save2(p1,p2) (_push_root((DuObject *)(p1)), \ + _push_root((DuObject *)(p2))) +#define _du_save3(p1,p2,p3) (_push_root((DuObject *)(p1)), \ + _push_root((DuObject *)(p2)), \ + _push_root((DuObject *)(p3))) +#define _du_save4(p1,p2,p3,p4) (_push_root((DuObject *)(p1)), \ + _push_root((DuObject *)(p2)), \ + _push_root((DuObject *)(p3)), \ + _push_root((DuObject *)(p4))) + + +#define _du_restore1(p1) (p1 = (typeof(p1))_pop_root()) +#define _du_restore2(p1,p2) (p2 = (typeof(p2))_pop_root(), \ + p1 = (typeof(p1))_pop_root()) +#define _du_restore3(p1,p2,p3) (p3 = (typeof(p3))_pop_root(), \ + p2 = (typeof(p2))_pop_root(), \ + p1 = (typeof(p1))_pop_root()) +#define _du_restore4(p1,p2,p3,p4)(p4 = (typeof(p4))_pop_root(), \ + p3 = (typeof(p3))_pop_root(), \ + p2 = (typeof(p2))_pop_root(), \ + p1 = (typeof(p1))_pop_root()) + + +#define _du_read1(p1) stm_read((object_t *)(p1)) +#define _du_write1(p1) stm_write((object_t *)(p1)) + +#define INIT_PREBUILT(p) ((typeof(p))stm_setup_prebuilt((object_t *)(p))) + + +#ifndef NDEBUG +# define _check_not_free(ob) \ + assert(_DuObject_TypeNum(ob) > DUTYPE_INVALID && \ + _DuObject_TypeNum(ob) < _DUTYPE_TOTAL) +#endif + +static inline void _push_root(DuObject *ob) { + #ifndef NDEBUG + if (ob) _check_not_free(ob); + #endif + STM_PUSH_ROOT(stm_thread_local, ob); +} +static inline object_t *_pop_root(void) { + object_t *ob; + STM_POP_ROOT(stm_thread_local, ob); + #ifndef NDEBUG + if (ob) _check_not_free(ob); + #endif + return ob; +} + +extern pthread_t *all_threads; +extern int all_threads_count; + +//extern __thread DuObject *stm_thread_local_obj; /* XXX temp */ +#endif /* _DUHTON_H_ */ diff --git a/duhton-c8/frame.c b/duhton-c8/frame.c new file mode 100644 --- /dev/null +++ b/duhton-c8/frame.c @@ -0,0 +1,366 @@ +#include "duhton.h" +#include +#include + +typedef TLPREFIX struct dictentry_s { + int symbol_id; + DuObject *symbol; + DuObject *value; + eval_fn builtin_macro; + DuObject *func_arglist; + DuObject *func_progn; +} dictentry_t; + +typedef TLPREFIX struct DuFrameNodeObject_s { + DuOBJECT_HEAD1 + int ob_count; + struct dictentry_s ob_items[1]; +} DuFrameNodeObject; + + +void framenode_trace(struct DuFrameNodeObject_s *ob, void visit(object_t **)) +{ + int i; + for (i=ob->ob_count-1; i>=0; i--) { + struct dictentry_s *e = &ob->ob_items[i]; + visit((object_t **)&e->symbol); + visit((object_t **)&e->value); + visit((object_t **)&e->func_arglist); + visit((object_t **)&e->func_progn); + } +} + +size_t framenode_bytesize(struct DuFrameNodeObject_s *ob) +{ + return (sizeof(DuFrameNodeObject) + + (ob->ob_count - 1) * sizeof(struct dictentry_s)); +} + + +typedef TLPREFIX struct DuFrameObject_s { + DuOBJECT_HEAD1 + DuFrameNodeObject *ob_nodes; +} DuFrameObject; + +DuObject *Du_Globals; +static DuFrameNodeObject *du_empty_framenode; + +void init_prebuilt_frame_objects(void) +{ + static DuFrameNodeObject empty_framenode = { {.type_id=DUTYPE_FRAMENODE} }; + static DuFrameObject g = { {.type_id=DUTYPE_FRAME}, + .ob_nodes=&empty_framenode }; + + du_empty_framenode = INIT_PREBUILT(&empty_framenode); + Du_Globals = (DuObject *)INIT_PREBUILT(&g); +} + +DuObject *DuFrame_New() +{ + DuFrameObject *ob = (DuFrameObject *)DuObject_New(&DuFrame_Type); + ob->ob_nodes = du_empty_framenode; + return (DuObject *)ob; +} + +#if 0 +DuObject *DuFrame_Copy(DuObject *frame) +{ + XXX fix or kill + DuFrame_Ensure("DuFrame_Copy", frame); + int i; + DuFrameObject *src = (DuFrameObject *)frame; + DuFrameObject *dst = (DuFrameObject *)DuFrame_New(); + dst->entry_count = src->entry_count; + dst->entries = malloc(sizeof(struct dictentry) * src->entry_count); + assert(dst->entries); + for (i=0; ientry_count; i++) { + struct dictentry *e = &src->entries[i]; + Du_INCREF(e->symbol); + if (e->value != NULL) Du_INCREF(e->value ); + if (e->func_arglist != NULL) Du_INCREF(e->func_arglist); + if (e->func_progn != NULL) Du_INCREF(e->func_progn ); + dst->entries[i] = *e; + } + return (DuObject *)dst; +} +#endif + +void frame_trace(struct DuFrameObject_s *ob, void visit(object_t **)) +{ + visit((object_t **)&ob->ob_nodes); +} + +void frame_print(DuFrameObject *ob) +{ + printf(""); +} + +static void _copy(dictentry_t *dst, dictentry_t *src) +{ + /* workaround for a bug in clang-3.4: cannot do "*dst = *src;" */ + memcpy(_stm_real_address((object_t *)dst), + _stm_real_address((object_t *)src), + sizeof(dictentry_t)); +} + +static void _clear(dictentry_t *dst) +{ + /* workaround for a bug in clang-3.4: many "dst->field = NULL;" + turn into a single memset() call */ + memset(_stm_real_address((object_t *)dst), 0, + sizeof(dictentry_t)); +} + +static dictentry_t * +find_entry(DuFrameObject *frame, DuObject *symbol, int write_mode) +{ + /* only allocates if write_mode = 1 */ + _du_read1(frame); + DuFrameNodeObject *ob = frame->ob_nodes; + + _du_read1(ob); + int left = 0; + int right = ob->ob_count; + dictentry_t *entries = ob->ob_items; + int search_id = DuSymbol_Id(symbol); + +#if 0 +#ifdef _GC_DEBUG + int j; + for (j = 0; j < right; j++) { + dprintf(("\t%d\n", entries[j].symbol_id)); + } +#endif +#endif + + while (right > left) { + int middle = (left + right) / 2; + int found_id = entries[middle].symbol_id; + if (search_id < found_id) + right = middle; + else if (search_id == found_id) { + if (write_mode) { + _du_write1(ob); + entries = ob->ob_items; + } + return entries + middle; + } + else + left = middle + 1; + } + + if (!write_mode) { + return NULL; + } + else { + int i; + size_t size = (sizeof(DuFrameNodeObject) + + (ob->ob_count + 1 - 1)*sizeof(dictentry_t)); + DuFrameNodeObject *newob; + + _du_save3(ob, symbol, frame); + newob = (DuFrameNodeObject *)stm_allocate(size); + newob->ob_base.type_id = DUTYPE_FRAMENODE; + _du_restore3(ob, symbol, frame); + + newob->ob_count = ob->ob_count + 1; + dictentry_t *newentries = newob->ob_items; + entries = ob->ob_items; + + for (i=0; iob_count; i++) + _copy(&newentries[i], &entries[i-1]); + + _du_write1(frame); + frame->ob_nodes = newob; + + return newentries + left; + } +} + +void DuFrame_SetBuiltinMacro(DuObject *frame, char *name, eval_fn func) +{ + DuFrame_Ensure("DuFrame_SetBuiltinMacro", frame); + + _du_save1(frame); + DuObject *sym = DuSymbol_FromString(name); + _du_restore1(frame); + + _du_save1(frame); + dictentry_t *e = find_entry((DuFrameObject *)frame, sym, 1); + _du_restore1(frame); + + _du_write1(frame); /* e is part of frame or a new object */ + e->builtin_macro = func; +} + +static void +_parse_arguments(DuObject *symbol, DuObject *arguments, + DuObject *formallist, DuObject *caller, DuObject *callee) +{ + while (DuCons_Check(formallist)) { + if (!DuCons_Check(arguments)) + Du_FatalError("call to '%s': not enough arguments", + DuSymbol_AsString(symbol)); + + /* _du_read1(arguments); IMMUTABLE */ + DuObject *arg = _DuCons_CAR(arguments); + DuObject *argumentsnext = _DuCons_NEXT(arguments); + + _du_save3(symbol, argumentsnext, caller); + _du_save2(formallist, callee); + DuObject *obj = Du_Eval(arg, caller); + _du_restore2(formallist, callee); + + /* _du_read1(formallist); IMMUTABLE */ + DuObject *sym = _DuCons_CAR(formallist); + DuObject *formallistnext = _DuCons_NEXT(formallist); + + _du_save2(formallistnext, callee); + DuFrame_SetSymbol(callee, sym, obj); + _du_restore2(formallistnext, callee); + _du_restore3(symbol, argumentsnext, caller); + + formallist = formallistnext; + arguments = argumentsnext; + } + if (arguments != Du_None) + Du_FatalError("call to '%s': too many arguments", + DuSymbol_AsString(symbol)); +} + +DuObject *_DuFrame_EvalCall(DuObject *frame, DuObject *symbol, + DuObject *rest, int execute_now) +{ + dictentry_t *e; + DuFrame_Ensure("_DuFrame_EvalCall", frame); + + /* find_entry not in write_mode will not collect */ + e = find_entry((DuFrameObject *)frame, symbol, 0); + if (!e) { + e = find_entry((DuFrameObject *)Du_Globals, symbol, 0); + if (!e) { + if (!DuSymbol_Check(symbol)) { + printf("_DuFrame_EvalCall: "); + Du_Print(symbol, 1); + Du_FatalError("expected a symbol to execute"); + } + else + goto not_defined; + } + } + if (e->func_progn) { + DuObject *func = e->func_progn; + DuObject *func_arglist = e->func_arglist; + _du_save1(func); + _du_save4(frame, symbol, rest, func_arglist); + DuObject *callee_frame = DuFrame_New(); + _du_restore4(frame, symbol, rest, func_arglist); + + _du_save1(callee_frame); + _parse_arguments(symbol, rest, func_arglist, frame, callee_frame); + _du_restore1(callee_frame); + _du_restore1(func); + + if (execute_now) { + return Du_Progn(func, callee_frame); + } + else { + Du_TransactionAdd(func, callee_frame); + return NULL; + } + } + if (e->builtin_macro) { + if (!execute_now) + Du_FatalError("symbol refers to a macro: '%s'", + DuSymbol_AsString(symbol)); + return e->builtin_macro(rest, frame); + } + not_defined: + Du_FatalError("symbol not defined as a function: '%s'", + DuSymbol_AsString(symbol)); +} + +DuObject *DuFrame_GetSymbol(DuObject *frame, DuObject *symbol) +{ + dictentry_t *e; + DuFrame_Ensure("DuFrame_GetSymbol", frame); + + e = find_entry((DuFrameObject *)frame, symbol, 0); + /* find_entry does the read_barrier */ + return e ? e->value : NULL; +} + +void DuFrame_SetSymbol(DuObject *frame, DuObject *symbol, DuObject *value) +{ + dictentry_t *e; + DuFrame_Ensure("DuFrame_SetSymbol", frame); + + _du_save2(value, frame); + e = find_entry((DuFrameObject *)frame, symbol, 1); + _du_restore2(value, frame); + + _du_write1(frame); /* e is new or part of frame */ + e->value = value; +} + +void DuFrame_SetSymbolStr(DuObject *frame, char *name, DuObject *value) +{ + _du_save2(frame, value); + DuObject *sym = DuSymbol_FromString(name); + _du_restore2(frame, value); + + DuFrame_SetSymbol(frame, sym, value); +} + +void DuFrame_SetUserFunction(DuObject *frame, DuObject *symbol, + DuObject *arglist, DuObject *progn) +{ + dictentry_t *e; + DuFrame_Ensure("DuFrame_SetUserFunction", frame); + + _du_save3(arglist, progn, frame); + e = find_entry((DuFrameObject *)frame, symbol, 1); + _du_restore3(arglist, progn, frame); + + _du_write1(frame); /* e is part of frame or new */ + e->func_arglist = arglist; + e->func_progn = progn; +} + +void DuFrame_Ensure(char *where, DuObject *ob) +{ + if (!DuFrame_Check(ob)) + Du_FatalError("%s: expected 'frame' argument, got '%s'", + where, Du_TYPE(ob)->dt_name); +} + +DuType DuFrameNode_Type = { /* internal type */ + "framenode", + DUTYPE_FRAMENODE, + 0, /* dt_size */ + (trace_fn)framenode_trace, + (print_fn)NULL, + (eval_fn)NULL, + (len_fn)NULL, + (len_fn)NULL, + (bytesize_fn)framenode_bytesize, +}; + +DuType DuFrame_Type = { + "frame", + DUTYPE_FRAME, + sizeof(DuFrameObject), + (trace_fn)frame_trace, + (print_fn)frame_print, +}; diff --git a/duhton-c8/glob.c b/duhton-c8/glob.c new file mode 100644 --- /dev/null +++ b/duhton-c8/glob.c @@ -0,0 +1,867 @@ +#include "duhton.h" +#include +#include + +pthread_t *all_threads; +int all_threads_count; + +static void _du_getargs1(const char *name, DuObject *cons, DuObject *locals, + DuObject **a) +{ + DuObject *expr1, *obj1; + + if (cons == Du_None) goto error; + + /* _du_read1(cons); IMMUTABLE */ + expr1 = _DuCons_CAR(cons); + cons = _DuCons_NEXT(cons); + if (cons != Du_None) goto error; + + obj1 = Du_Eval(expr1, locals); + *a = obj1; + return; + + error: + Du_FatalError("%s: expected one argument", name); +} + +static void _du_getargs2(const char *name, DuObject *cons, DuObject *locals, + DuObject **a, DuObject **b) +{ + DuObject *expr1, *expr2, *obj1, *obj2; + + if (cons == Du_None) goto error; + + /* _du_read1(cons); IMMUTABLE */ + expr1 = _DuCons_CAR(cons); + cons = _DuCons_NEXT(cons); + if (cons == Du_None) goto error; + + /* _du_read1(cons); IMMUTABLE */ + expr2 = _DuCons_CAR(cons); + cons = _DuCons_NEXT(cons); + if (cons != Du_None) goto error; + + _du_save2(expr2, locals); + obj1 = Du_Eval(expr1, locals); + _du_restore2(expr2, locals); + + _du_save1(obj1); + obj2 = Du_Eval(expr2, locals); + _du_restore1(obj1); + + *a = obj1; + *b = obj2; + return; + + error: + Du_FatalError("%s: expected two arguments", name); +} + +/************************************************************/ + + +DuObject *Du_Progn(DuObject *cons, DuObject *locals) +{ + DuObject *result = Du_None; + while (cons != Du_None) { + /* _du_read1(cons); IMMUTABLE */ + DuObject *expr = _DuCons_CAR(cons); + DuObject *next = _DuCons_NEXT(cons); + _du_save2(next, locals); + result = Du_Eval(expr, locals); + _du_restore2(next, locals); + cons = next; + } + return result; +} + +DuObject *du_setq(DuObject *cons, DuObject *locals) +{ + DuObject *result = Du_None; + while (cons != Du_None) { + /* _du_read1(cons); IMMUTABLE */ + DuObject *symbol = _DuCons_CAR(cons); + cons = _DuCons_NEXT(cons); + if (cons == Du_None) + Du_FatalError("setq: number of arguments is odd"); + /* _du_read1(cons); IMMUTABLE */ + DuObject *expr = _DuCons_CAR(cons); + DuObject *next = _DuCons_NEXT(cons); + + _du_save3(symbol, next, locals); + DuObject *obj = Du_Eval(expr, locals); + _du_restore3(symbol, next, locals); + + _du_save3(next, locals, obj); + DuFrame_SetSymbol(locals, symbol, obj); + _du_restore3(next, locals, obj); + + result = obj; + cons = next; + } + return result; +} + +DuObject *du_print(DuObject *cons, DuObject *locals) +{ + _du_save2(cons, locals); + DuObject *lst = DuList_New(); + _du_restore2(cons, locals); + + while (cons != Du_None) { + /* _du_read1(cons); IMMUTABLE */ + DuObject *expr = _DuCons_CAR(cons); + DuObject *next = _DuCons_NEXT(cons); + + _du_save3(lst, next, locals); + DuObject *obj = Du_Eval(expr, locals); + _du_restore3(lst, next, locals); + + _du_save3(lst, next, locals); + DuList_Append(lst, obj); + _du_restore3(lst, next, locals); + + cons = next; + } + + _du_save1(lst); + stm_become_inevitable(&stm_thread_local, "print"); + _du_restore1(lst); + + int i; + for (i=0; i 0) printf(" "); + _du_save1(lst); + Du_Print(DuList_GetItem(lst, i), 0); + _du_restore1(lst); + } + + printf("\n"); + return Du_None; +} + +DuObject *du_xor(DuObject *cons, DuObject *locals) +{ + int result = 0; + while (cons != Du_None) { + /* _du_read1(cons); IMMUTABLE */ + DuObject *expr = _DuCons_CAR(cons); + DuObject *next = _DuCons_NEXT(cons); + + _du_save2(next, locals); + DuObject *obj = Du_Eval(expr, locals); + result ^= DuInt_AsInt(obj); + _du_restore2(next, locals); + + cons = next; + } + + return DuInt_FromInt(result); +} + +DuObject *du_lshift(DuObject *cons, DuObject *locals) +{ + int result = 0; + /* _du_read1(cons); IMMUTABLE */ + DuObject *expr = _DuCons_CAR(cons); + DuObject *next = _DuCons_NEXT(cons); + + _du_save2(next, locals); + DuObject *obj = Du_Eval(expr, locals); + result = DuInt_AsInt(obj); + _du_restore2(next, locals); + + cons = next; + + while (cons != Du_None) { + /* _du_read1(cons); IMMUTABLE */ + expr = _DuCons_CAR(cons); + next = _DuCons_NEXT(cons); + + _du_save2(next, locals); + obj = Du_Eval(expr, locals); + result <<= DuInt_AsInt(obj); + _du_restore2(next, locals); + + cons = next; + } + + return DuInt_FromInt(result); +} + +DuObject *du_rshift(DuObject *cons, DuObject *locals) +{ + int result = 0; + /* _du_read1(cons); IMMUTABLE */ + DuObject *expr = _DuCons_CAR(cons); + DuObject *next = _DuCons_NEXT(cons); + + _du_save2(next, locals); + DuObject *obj = Du_Eval(expr, locals); + result = DuInt_AsInt(obj); + _du_restore2(next, locals); + + cons = next; + + while (cons != Du_None) { + /* _du_read1(cons); IMMUTABLE */ + expr = _DuCons_CAR(cons); + next = _DuCons_NEXT(cons); + + _du_save2(next, locals); + obj = Du_Eval(expr, locals); + result >>= DuInt_AsInt(obj); + _du_restore2(next, locals); + + cons = next; + } + + return DuInt_FromInt(result); +} + +DuObject *du_add(DuObject *cons, DuObject *locals) +{ + int result = 0; + while (cons != Du_None) { + /* _du_read1(cons); IMMUTABLE */ + DuObject *expr = _DuCons_CAR(cons); + DuObject *next = _DuCons_NEXT(cons); + + _du_save2(next, locals); + DuObject *obj = Du_Eval(expr, locals); + result += DuInt_AsInt(obj); + _du_restore2(next, locals); + + cons = next; + } + return DuInt_FromInt(result); +} + +DuObject *du_sub(DuObject *cons, DuObject *locals) +{ + int result = 0; + int sign = 1; + while (cons != Du_None) { + /* _du_read1(cons); IMMUTABLE */ + DuObject *expr = _DuCons_CAR(cons); + DuObject *next = _DuCons_NEXT(cons); + + _du_save2(next, locals); + DuObject *obj = Du_Eval(expr, locals); + result += sign * DuInt_AsInt(obj); + _du_restore2(next, locals); + + sign = -1; + cons = next; + } + return DuInt_FromInt(result); +} + +DuObject *du_mul(DuObject *cons, DuObject *locals) +{ + int result = 1; + while (cons != Du_None) { + /* _du_read1(cons); IMMUTABLE */ + DuObject *expr = _DuCons_CAR(cons); + DuObject *next = _DuCons_NEXT(cons); + + _du_save2(next, locals); + DuObject *obj = Du_Eval(expr, locals); + result *= DuInt_AsInt(obj); + _du_restore2(next, locals); + + cons = next; + } + return DuInt_FromInt(result); +} + +DuObject *du_div(DuObject *cons, DuObject *locals) +{ + int result = 0; + int first = 1; + + while (cons != Du_None) { + /* _du_read1(cons); IMMUTABLE */ + DuObject *expr = _DuCons_CAR(cons); + DuObject *next = _DuCons_NEXT(cons); + + _du_save2(next, locals); + DuObject *obj = Du_Eval(expr, locals); + if (first) { + result = DuInt_AsInt(obj); + first = 0; + } else { + result /= DuInt_AsInt(obj); + } + _du_restore2(next, locals); + + cons = next; + } + return DuInt_FromInt(result); +} + +DuObject *du_mod(DuObject *cons, DuObject *locals) +{ + int result = 0; + int first = 1; + + while (cons != Du_None) { + /* _du_read1(cons); IMMUTABLE */ + DuObject *expr = _DuCons_CAR(cons); + DuObject *next = _DuCons_NEXT(cons); + + _du_save2(next, locals); + DuObject *obj = Du_Eval(expr, locals); + if (first) { + result = DuInt_AsInt(obj); + first = 0; + } else { + result %= DuInt_AsInt(obj); + } + _du_restore2(next, locals); + + cons = next; + } + return DuInt_FromInt(result); +} From noreply at buildbot.pypy.org Mon Jan 19 18:35:43 2015 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 19 Jan 2015 18:35:43 +0100 (CET) Subject: [pypy-commit] stmgc hashtable: update comments Message-ID: <20150119173543.5460C1C0271@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: hashtable Changeset: r1550:073ff03f2316 Date: 2015-01-19 18:35 +0100 http://bitbucket.org/pypy/stmgc/changeset/073ff03f2316/ Log: update comments diff --git a/c7/stm/hashtable.c b/c7/stm/hashtable.c --- a/c7/stm/hashtable.c +++ b/c7/stm/hashtable.c @@ -6,8 +6,14 @@ length 2**64. Initially it is full of NULLs. It's obviously implemented as a dictionary in which NULL objects are not needed. -The only operations on a hashtable are reading or writing an object at -a given index. +A real dictionary can be implemented on top of it, by using the index +`hash(key)` in the hashtable, and storing a list of `(key, value)` +pairs at that index (usually only one, unless there is a hash +collision). + +The main operations on a hashtable are reading or writing an object at a +given index. It might support in the future enumerating the indexes of +non-NULL objects. There are two markers for every index (a read and a write marker). This is unlike regular arrays, which have only two markers in total. @@ -18,8 +24,8 @@ First idea: have the hashtable in raw memory, pointing to "entry" objects. The entry objects themselves point to the user-specified -objects, and they have the read/write markers. Every entry object -itself, once created, stays around. It is only removed by the next +objects. The entry objects have the read/write markers. Every entry +object, once created, stays around. It is only removed by the next major GC if it points to NULL and its read/write markers are not set in any currently-running transaction. From noreply at buildbot.pypy.org Mon Jan 19 18:35:44 2015 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 19 Jan 2015 18:35:44 +0100 (CET) Subject: [pypy-commit] stmgc hashtable: Close to-be-merged branch Message-ID: <20150119173544.6F7971C0271@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: hashtable Changeset: r1551:177e21cbd78f Date: 2015-01-19 18:35 +0100 http://bitbucket.org/pypy/stmgc/changeset/177e21cbd78f/ Log: Close to-be-merged branch From noreply at buildbot.pypy.org Mon Jan 19 18:35:45 2015 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 19 Jan 2015 18:35:45 +0100 (CET) Subject: [pypy-commit] stmgc default: hg merge hashtable Message-ID: <20150119173545.99A0E1C0271@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1552:957947bc7ad9 Date: 2015-01-19 18:36 +0100 http://bitbucket.org/pypy/stmgc/changeset/957947bc7ad9/ Log: hg merge hashtable diff --git a/c7/demo/demo_hashtable1.c b/c7/demo/demo_hashtable1.c new file mode 100644 --- /dev/null +++ b/c7/demo/demo_hashtable1.c @@ -0,0 +1,217 @@ +#include +#include +#include +#include +#include +#include +#include +#include + +#include "stmgc.h" + +#define NUMTHREADS 4 + + +typedef TLPREFIX struct node_s node_t; +typedef TLPREFIX struct dict_s dict_t; + + +struct node_s { + struct object_s header; + int typeid; + intptr_t freevalue; +}; + +struct dict_s { + struct node_s hdr; + stm_hashtable_t *hashtable; +}; + +#define TID_NODE 0x01234567 +#define TID_DICT 0x56789ABC +#define TID_DICTENTRY 0x6789ABCD + + +static sem_t done; +__thread stm_thread_local_t stm_thread_local; + +// global and per-thread-data +time_t default_seed; +dict_t *global_dict; + +struct thread_data { + unsigned int thread_seed; +}; +__thread struct thread_data td; + + +ssize_t stmcb_size_rounded_up(struct object_s *ob) +{ + if (((struct node_s*)ob)->typeid == TID_NODE) + return sizeof(struct node_s); + if (((struct node_s*)ob)->typeid == TID_DICT) + return sizeof(struct dict_s); + if (((struct node_s*)ob)->typeid == TID_DICTENTRY) + return sizeof(struct stm_hashtable_entry_s); + abort(); +} + +void stmcb_trace(struct object_s *obj, void visit(object_t **)) +{ + struct node_s *n; + n = (struct node_s*)obj; + if (n->typeid == TID_NODE) { + return; + } + if (n->typeid == TID_DICT) { + stm_hashtable_tracefn(((struct dict_s *)n)->hashtable, visit); + return; + } + if (n->typeid == TID_DICTENTRY) { + object_t **ref = &((struct stm_hashtable_entry_s *)obj)->object; + visit(ref); + return; + } + abort(); +} + +void stmcb_commit_soon() {} +long stmcb_obj_supports_cards(struct object_s *obj) +{ + return 0; +} +void stmcb_trace_cards(struct object_s *obj, void cb(object_t **), + uintptr_t start, uintptr_t stop) { + abort(); +} +void stmcb_get_card_base_itemsize(struct object_s *obj, + uintptr_t offset_itemsize[2]) { + abort(); +} + +int get_rand(int max) +{ + if (max == 0) + return 0; + return (int)(rand_r(&td.thread_seed) % (unsigned int)max); +} + + +void populate_hashtable(int keymin, int keymax) +{ + int i; + int diff = get_rand(keymax - keymin); + for (i = 0; i < keymax - keymin; i++) { + int key = keymin + i + diff; + if (key >= keymax) + key -= (keymax - keymin); + object_t *o = stm_allocate(sizeof(struct node_s)); + ((node_t *)o)->typeid = TID_NODE; + ((node_t *)o)->freevalue = key; + assert(global_dict->hdr.freevalue == 42); + stm_hashtable_write((object_t *)global_dict, global_dict->hashtable, + key, o, &stm_thread_local); + } +} + +void setup_thread(void) +{ + memset(&td, 0, sizeof(struct thread_data)); + td.thread_seed = default_seed++; +} + +void *demo_random(void *arg) +{ + int threadnum = (uintptr_t)arg; + int status; + rewind_jmp_buf rjbuf; + stm_register_thread_local(&stm_thread_local); + stm_rewind_jmp_enterframe(&stm_thread_local, &rjbuf); + + setup_thread(); + + volatile int start_count = 0; + + stm_start_transaction(&stm_thread_local); + ++start_count; + assert(start_count == 1); // all the writes that follow must not conflict + populate_hashtable(1291 * threadnum, 1291 * (threadnum + 1)); + stm_commit_transaction(); + + stm_rewind_jmp_leaveframe(&stm_thread_local, &rjbuf); + stm_unregister_thread_local(&stm_thread_local); + + status = sem_post(&done); assert(status == 0); + return NULL; +} + +void newthread(void*(*func)(void*), void *arg) +{ + pthread_t th; + int status = pthread_create(&th, NULL, func, arg); + if (status != 0) + abort(); + pthread_detach(th); + printf("started new thread\n"); +} + +void setup_globals(void) +{ + stm_hashtable_t *my_hashtable = stm_hashtable_create(); + struct dict_s new_templ = { + .hdr = { + .typeid = TID_DICT, + .freevalue = 42, + }, + .hashtable = my_hashtable, + }; + + stm_start_inevitable_transaction(&stm_thread_local); + global_dict = (dict_t *)stm_setup_prebuilt( + (object_t* )(uintptr_t)&new_templ); + assert(global_dict->hashtable); + stm_commit_transaction(); +} + + +int main(void) +{ + int i, status; + rewind_jmp_buf rjbuf; + + stm_hashtable_entry_userdata = TID_DICTENTRY; + + /* pick a random seed from the time in seconds. + A bit pointless for now... because the interleaving of the + threads is really random. */ + default_seed = time(NULL); + printf("running with seed=%lld\n", (long long)default_seed); + + status = sem_init(&done, 0, 0); + assert(status == 0); + + + stm_setup(); + stm_register_thread_local(&stm_thread_local); + stm_rewind_jmp_enterframe(&stm_thread_local, &rjbuf); + + setup_globals(); + + for (i = 0; i < NUMTHREADS; i++) { + newthread(demo_random, (void *)(uintptr_t)i); + } + + for (i=0; i < NUMTHREADS; i++) { + status = sem_wait(&done); + assert(status == 0); + printf("thread finished\n"); + } + + printf("Test OK!\n"); + + stm_rewind_jmp_leaveframe(&stm_thread_local, &rjbuf); + stm_unregister_thread_local(&stm_thread_local); + stm_teardown(); + + return 0; +} diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -372,6 +372,7 @@ assert(tree_is_cleared(STM_PSEGMENT->nursery_objects_shadows)); assert(tree_is_cleared(STM_PSEGMENT->callbacks_on_commit_and_abort[0])); assert(tree_is_cleared(STM_PSEGMENT->callbacks_on_commit_and_abort[1])); + assert(list_is_empty(STM_PSEGMENT->young_objects_with_light_finalizers)); assert(STM_PSEGMENT->objects_pointing_to_nursery == NULL); assert(STM_PSEGMENT->large_overflow_objects == NULL); assert(STM_PSEGMENT->finalizers == NULL); @@ -972,6 +973,8 @@ (int)pseg->transaction_state); } + abort_finalizers(pseg); + /* throw away the content of the nursery */ long bytes_in_nursery = throw_away_nursery(pseg); @@ -1060,8 +1063,6 @@ /* invoke the callbacks */ invoke_and_clear_user_callbacks(1); /* for abort */ - abort_finalizers(); - if (is_abort(STM_SEGMENT->nursery_end)) { /* done aborting */ STM_SEGMENT->nursery_end = pause_signalled ? NSE_SIGPAUSE diff --git a/c7/stm/finalizer.c b/c7/stm/finalizer.c --- a/c7/stm/finalizer.c +++ b/c7/stm/finalizer.c @@ -58,28 +58,73 @@ STM_PSEGMENT->finalizers = NULL; } -static void _abort_finalizers(void) +static void abort_finalizers(struct stm_priv_segment_info_s *pseg) { /* like _commit_finalizers(), but forget everything from the current transaction */ - if (STM_PSEGMENT->finalizers->run_finalizers != NULL) { - if (STM_PSEGMENT->finalizers->running_next != NULL) { - *STM_PSEGMENT->finalizers->running_next = (uintptr_t)-1; + if (pseg->finalizers != NULL) { + if (pseg->finalizers->run_finalizers != NULL) { + if (pseg->finalizers->running_next != NULL) { + *pseg->finalizers->running_next = (uintptr_t)-1; + } + list_free(pseg->finalizers->run_finalizers); } - list_free(STM_PSEGMENT->finalizers->run_finalizers); + list_free(pseg->finalizers->objects_with_finalizers); + free(pseg->finalizers); + pseg->finalizers = NULL; } - list_free(STM_PSEGMENT->finalizers->objects_with_finalizers); - free(STM_PSEGMENT->finalizers); - STM_PSEGMENT->finalizers = NULL; + + /* call the light finalizers for objects that are about to + be forgotten from the current transaction */ + char *old_gs_register = STM_SEGMENT->segment_base; + bool must_fix_gs = old_gs_register != pseg->pub.segment_base; + + struct list_s *lst = pseg->young_objects_with_light_finalizers; + long i, count = list_count(lst); + if (lst > 0) { + for (i = 0; i < count; i++) { + object_t *obj = (object_t *)list_item(lst, i); + assert(_is_young(obj)); + if (must_fix_gs) { + set_gs_register(pseg->pub.segment_base); + must_fix_gs = false; + } + stmcb_light_finalizer(obj); + } + list_clear(lst); + } + + /* also deals with overflow objects: they are at the tail of + old_objects_with_light_finalizers (this list is kept in order + and we cannot add any already-committed object) */ + lst = pseg->old_objects_with_light_finalizers; + count = list_count(lst); + while (count > 0) { + object_t *obj = (object_t *)list_item(lst, --count); + if (!IS_OVERFLOW_OBJ(pseg, obj)) + break; + lst->count = count; + if (must_fix_gs) { + set_gs_register(pseg->pub.segment_base); + must_fix_gs = false; + } + stmcb_light_finalizer(obj); + } + + if (STM_SEGMENT->segment_base != old_gs_register) + set_gs_register(old_gs_register); } void stm_enable_light_finalizer(object_t *obj) { - if (_is_young(obj)) + if (_is_young(obj)) { LIST_APPEND(STM_PSEGMENT->young_objects_with_light_finalizers, obj); - else + } + else { + assert(_is_from_same_transaction(obj)); LIST_APPEND(STM_PSEGMENT->old_objects_with_light_finalizers, obj); + } } object_t *stm_allocate_with_finalizer(ssize_t size_rounded_up) @@ -108,7 +153,7 @@ struct list_s *lst = STM_PSEGMENT->young_objects_with_light_finalizers; long i, count = list_count(lst); for (i = 0; i < count; i++) { - object_t* obj = (object_t *)list_item(lst, i); + object_t *obj = (object_t *)list_item(lst, i); assert(_is_young(obj)); object_t *TLPREFIX *pforwarded_array = (object_t *TLPREFIX *)obj; @@ -138,7 +183,7 @@ long i, count = list_count(lst); lst->count = 0; for (i = 0; i < count; i++) { - object_t* obj = (object_t *)list_item(lst, i); + object_t *obj = (object_t *)list_item(lst, i); if (!mark_visited_test(obj)) { /* not marked: object dies */ /* we're calling the light finalizer in the same @@ -345,6 +390,24 @@ LIST_FREE(_finalizer_emptystack); } +static void mark_visit_from_finalizer1(char *base, struct finalizers_s *f) +{ + if (f != NULL && f->run_finalizers != NULL) { + LIST_FOREACH_R(f->run_finalizers, object_t * /*item*/, + mark_visit_object(item, base)); + } +} + +static void mark_visit_from_finalizer_pending(void) +{ + long j; + for (j = 1; j <= NB_SEGMENTS; j++) { + struct stm_priv_segment_info_s *pseg = get_priv_segment(j); + mark_visit_from_finalizer1(pseg->pub.segment_base, pseg->finalizers); + } + mark_visit_from_finalizer1(stm_object_pages, &g_finalizers); +} + static void _execute_finalizers(struct finalizers_s *f) { if (f->run_finalizers == NULL) diff --git a/c7/stm/finalizer.h b/c7/stm/finalizer.h --- a/c7/stm/finalizer.h +++ b/c7/stm/finalizer.h @@ -6,6 +6,7 @@ uintptr_t *running_next; }; +static void mark_visit_from_finalizer_pending(void); static void deal_with_young_objects_with_finalizers(void); static void deal_with_old_objects_with_finalizers(void); static void deal_with_objects_with_finalizers(void); @@ -14,18 +15,13 @@ static void teardown_finalizer(void); static void _commit_finalizers(void); -static void _abort_finalizers(void); +static void abort_finalizers(struct stm_priv_segment_info_s *); #define commit_finalizers() do { \ if (STM_PSEGMENT->finalizers != NULL) \ _commit_finalizers(); \ } while (0) -#define abort_finalizers() do { \ - if (STM_PSEGMENT->finalizers != NULL) \ - _abort_finalizers(); \ -} while (0) - /* regular finalizers (objs from already-committed transactions) */ static struct finalizers_s g_finalizers; diff --git a/c7/stm/forksupport.c b/c7/stm/forksupport.c --- a/c7/stm/forksupport.c +++ b/c7/stm/forksupport.c @@ -201,9 +201,6 @@ just release these locks early */ s_mutex_unlock(); - /* Open a new profiling file, if any */ - forksupport_open_new_profiling_file(); - /* Move the copy of the mmap over the old one, overwriting it and thus freeing the old mapping in this process */ diff --git a/c7/stm/gcpage.c b/c7/stm/gcpage.c --- a/c7/stm/gcpage.c +++ b/c7/stm/gcpage.c @@ -344,6 +344,8 @@ LIST_APPEND(mark_objects_to_trace, obj); } +#define TRACE_FOR_MAJOR_COLLECTION (&mark_record_trace) + static void mark_trace(object_t *obj, char *segment_base) { assert(list_is_empty(mark_objects_to_trace)); @@ -352,7 +354,7 @@ /* trace into the object (the version from 'segment_base') */ struct object_s *realobj = (struct object_s *)REAL_ADDRESS(segment_base, obj); - stmcb_trace(realobj, &mark_record_trace); + stmcb_trace(realobj, TRACE_FOR_MAJOR_COLLECTION); if (list_is_empty(mark_objects_to_trace)) break; @@ -629,6 +631,7 @@ mark_visit_from_modified_objects(); mark_visit_from_markers(); mark_visit_from_roots(); + mark_visit_from_finalizer_pending(); LIST_FREE(mark_objects_to_trace); /* finalizer support: will mark as WL_VISITED all objects with a diff --git a/c7/stm/hashtable.c b/c7/stm/hashtable.c new file mode 100644 --- /dev/null +++ b/c7/stm/hashtable.c @@ -0,0 +1,390 @@ +/* +Design of stmgc's "hashtable" objects +===================================== + +A "hashtable" is theoretically a lazily-filled array of objects of +length 2**64. Initially it is full of NULLs. It's obviously +implemented as a dictionary in which NULL objects are not needed. + +A real dictionary can be implemented on top of it, by using the index +`hash(key)` in the hashtable, and storing a list of `(key, value)` +pairs at that index (usually only one, unless there is a hash +collision). + +The main operations on a hashtable are reading or writing an object at a +given index. It might support in the future enumerating the indexes of +non-NULL objects. + +There are two markers for every index (a read and a write marker). +This is unlike regular arrays, which have only two markers in total. + + +Implementation +-------------- + +First idea: have the hashtable in raw memory, pointing to "entry" +objects. The entry objects themselves point to the user-specified +objects. The entry objects have the read/write markers. Every entry +object, once created, stays around. It is only removed by the next +major GC if it points to NULL and its read/write markers are not set +in any currently-running transaction. + +References +---------- + +Inspired by: http://ppl.stanford.edu/papers/podc011-bronson.pdf +*/ + + +uint32_t stm_hashtable_entry_userdata; + + +#define INITIAL_HASHTABLE_SIZE 8 +#define PERTURB_SHIFT 5 +#define RESIZING_LOCK 0 + +typedef struct { + uintptr_t mask; + + /* 'resize_counter' start at an odd value, and is decremented (by + 6) for every new item put in 'items'. When it crosses 0, we + instead allocate a bigger table and change 'resize_counter' to + be a regular pointer to it (which is then even). The whole + structure is immutable then. + + The field 'resize_counter' also works as a write lock: changes + go via the intermediate value RESIZING_LOCK (0). + */ + uintptr_t resize_counter; + + stm_hashtable_entry_t *items[INITIAL_HASHTABLE_SIZE]; +} stm_hashtable_table_t; + +#define IS_EVEN(p) (((p) & 1) == 0) + +struct stm_hashtable_s { + stm_hashtable_table_t *table; + stm_hashtable_table_t initial_table; + uint64_t additions; +}; + + +static inline void init_table(stm_hashtable_table_t *table, uintptr_t itemcount) +{ + table->mask = itemcount - 1; + table->resize_counter = itemcount * 4 + 1; + memset(table->items, 0, itemcount * sizeof(stm_hashtable_entry_t *)); +} + +stm_hashtable_t *stm_hashtable_create(void) +{ + stm_hashtable_t *hashtable = malloc(sizeof(stm_hashtable_t)); + assert(hashtable); + hashtable->table = &hashtable->initial_table; + hashtable->additions = 0; + init_table(&hashtable->initial_table, INITIAL_HASHTABLE_SIZE); + return hashtable; +} + +void stm_hashtable_free(stm_hashtable_t *hashtable) +{ + uintptr_t rc = hashtable->initial_table.resize_counter; + free(hashtable); + while (IS_EVEN(rc)) { + assert(rc != RESIZING_LOCK); + + stm_hashtable_table_t *table = (stm_hashtable_table_t *)rc; + rc = table->resize_counter; + free(table); + } +} + +static bool _stm_was_read_by_anybody(object_t *obj) +{ + long i; + for (i = 1; i <= NB_SEGMENTS; i++) { + char *remote_base = get_segment_base(i); + uint8_t remote_version = get_segment(i)->transaction_read_version; + if (was_read_remote(remote_base, obj, remote_version)) + return true; + } + return false; +} + +#define VOLATILE_HASHTABLE(p) ((volatile stm_hashtable_t *)(p)) +#define VOLATILE_TABLE(p) ((volatile stm_hashtable_table_t *)(p)) + +static void _insert_clean(stm_hashtable_table_t *table, + stm_hashtable_entry_t *entry) +{ + uintptr_t mask = table->mask; + uintptr_t i = entry->index & mask; + if (table->items[i] == NULL) { + table->items[i] = entry; + return; + } + + uintptr_t perturb = entry->index; + while (1) { + i = (i << 2) + i + perturb + 1; + i &= mask; + if (table->items[i] == NULL) { + table->items[i] = entry; + return; + } + + perturb >>= PERTURB_SHIFT; + } +} + +static void _stm_rehash_hashtable(stm_hashtable_t *hashtable, + uintptr_t biggercount, + bool remove_unread) +{ + dprintf(("rehash %p to %ld, remove_unread=%d\n", + hashtable, biggercount, (int)remove_unread)); + + size_t size = (offsetof(stm_hashtable_table_t, items) + + biggercount * sizeof(stm_hashtable_entry_t *)); + stm_hashtable_table_t *biggertable = malloc(size); + assert(biggertable); // XXX + + stm_hashtable_table_t *table = hashtable->table; + table->resize_counter = (uintptr_t)biggertable; + /* ^^^ this unlocks the table by writing a non-zero value to + table->resize_counter, but the new value is a pointer to the + new bigger table, so IS_EVEN() is still true */ + + init_table(biggertable, biggercount); + + uintptr_t j, mask = table->mask; + uintptr_t rc = biggertable->resize_counter; + for (j = 0; j <= mask; j++) { + stm_hashtable_entry_t *entry = table->items[j]; + if (entry == NULL) + continue; + if (remove_unread) { + if (entry->object == NULL && + !_stm_was_read_by_anybody((object_t *)entry)) + continue; + } + _insert_clean(biggertable, entry); + rc -= 6; + } + biggertable->resize_counter = rc; + + write_fence(); /* make sure that 'biggertable' is valid here, + and make sure 'table->resize_counter' is updated + ('table' must be immutable from now on). */ + VOLATILE_HASHTABLE(hashtable)->table = biggertable; +} + +stm_hashtable_entry_t *stm_hashtable_lookup(object_t *hashtableobj, + stm_hashtable_t *hashtable, + uintptr_t index) +{ + stm_hashtable_table_t *table; + uintptr_t mask; + uintptr_t i; + stm_hashtable_entry_t *entry; + + restart: + /* classical dict lookup logic */ + table = VOLATILE_HASHTABLE(hashtable)->table; + mask = table->mask; /* read-only field */ + i = index & mask; + entry = VOLATILE_TABLE(table)->items[i]; + if (entry != NULL) { + if (entry->index == index) + return entry; /* found at the first try */ + + uintptr_t perturb = index; + while (1) { + i = (i << 2) + i + perturb + 1; + i &= mask; + entry = VOLATILE_TABLE(table)->items[i]; + if (entry != NULL) { + if (entry->index == index) + return entry; /* found */ + } + else + break; + perturb >>= PERTURB_SHIFT; + } + } + /* here, we didn't find the 'entry' with the correct index. */ + + uintptr_t rc = VOLATILE_TABLE(table)->resize_counter; + + /* if rc is RESIZING_LOCK (which is 0, so even), a concurrent thread + is writing to the hashtable. Or, if rc is another even number, it is + actually a pointer to the next version of the table, installed + just now. In both cases, this thread must simply spin loop. + */ + if (IS_EVEN(rc)) { + spin_loop(); + goto restart; + } + /* in the other cases, we need to grab the RESIZING_LOCK. + */ + if (!__sync_bool_compare_and_swap(&table->resize_counter, + rc, RESIZING_LOCK)) { + goto restart; + } + /* we now have the lock. The only table with a non-even value of + 'resize_counter' should be the last one in the chain, so if we + succeeded in locking it, check this. */ + assert(table == hashtable->table); + + /* Check that 'table->items[i]' is still NULL, + i.e. hasn't been populated under our feet. + */ + if (table->items[i] != NULL) { + table->resize_counter = rc; /* unlock */ + goto restart; + } + /* if rc is greater than 6, there is enough room for a new + item in the current table. + */ + if (rc > 6) { + /* we can only enter here once! If we allocate stuff, we may + run the GC, and so 'hashtableobj' might move afterwards. */ + if (_is_in_nursery(hashtableobj)) { + entry = (stm_hashtable_entry_t *) + stm_allocate(sizeof(stm_hashtable_entry_t)); + entry->userdata = stm_hashtable_entry_userdata; + entry->index = index; + entry->object = NULL; + } + else { + /* for a non-nursery 'hashtableobj', we pretend that the + 'entry' object we're about to return was already + existing all along, with NULL in all segments. If the + caller of this function is going to modify the 'object' + field, it will call stm_write(entry) first, which will + correctly schedule 'entry' for write propagation. We + do that even if 'hashtableobj' was created by the + running transaction: the new 'entry' object is created + as if it was older than the transaction. + + Note the following difference: if 'hashtableobj' is + still in the nursery (case above), the 'entry' object + is also allocated from the nursery, and after a minor + collection it ages as an old-but-created-by-the- + current-transaction object. We could try to emulate + this here, or to create young 'entry' objects, but + doing either of these would require careful + synchronization with other pieces of the code that may + change. + */ + acquire_privatization_lock(); + char *p = allocate_outside_nursery_large( + sizeof(stm_hashtable_entry_t)); + entry = (stm_hashtable_entry_t *)(p - stm_object_pages); + + long j; + for (j = 0; j <= NB_SEGMENTS; j++) { + struct stm_hashtable_entry_s *e; + e = (struct stm_hashtable_entry_s *) + REAL_ADDRESS(get_segment_base(j), entry); + e->header.stm_flags = GCFLAG_WRITE_BARRIER; + e->userdata = stm_hashtable_entry_userdata; + e->index = index; + e->object = NULL; + } + release_privatization_lock(); + } + write_fence(); /* make sure 'entry' is fully initialized here */ + table->items[i] = entry; + hashtable->additions += 1; + write_fence(); /* make sure 'table->items' is written here */ + VOLATILE_TABLE(table)->resize_counter = rc - 6; /* unlock */ + return entry; + } + else { + /* if rc is smaller than 6, we must allocate a new bigger table. + */ + uintptr_t biggercount = table->mask + 1; + if (biggercount < 50000) + biggercount *= 4; + else + biggercount *= 2; + _stm_rehash_hashtable(hashtable, biggercount, /*remove_unread=*/false); + goto restart; + } +} + +object_t *stm_hashtable_read(object_t *hobj, stm_hashtable_t *hashtable, + uintptr_t key) +{ + stm_hashtable_entry_t *e = stm_hashtable_lookup(hobj, hashtable, key); + stm_read((object_t *)e); + return e->object; +} + +void stm_hashtable_write(object_t *hobj, stm_hashtable_t *hashtable, + uintptr_t key, object_t *nvalue, + stm_thread_local_t *tl) +{ + STM_PUSH_ROOT(*tl, nvalue); + stm_hashtable_entry_t *e = stm_hashtable_lookup(hobj, hashtable, key); + stm_write((object_t *)e); + STM_POP_ROOT(*tl, nvalue); + e->object = nvalue; +} + +static void _stm_compact_hashtable(stm_hashtable_t *hashtable) +{ + stm_hashtable_table_t *table = hashtable->table; + assert(!IS_EVEN(table->resize_counter)); + + if (hashtable->additions * 4 > table->mask) { + hashtable->additions = 0; + uintptr_t initial_rc = (table->mask + 1) * 4 + 1; + uintptr_t num_entries_times_6 = initial_rc - table->resize_counter; + uintptr_t count = INITIAL_HASHTABLE_SIZE; + while (count * 4 < num_entries_times_6) + count *= 2; + /* sanity-check: 'num_entries_times_6 < initial_rc', and so 'count' + can never grow larger than the current table size. */ + assert(count <= table->mask + 1); + + _stm_rehash_hashtable(hashtable, count, /*remove_unread=*/true); + } + + table = hashtable->table; + assert(!IS_EVEN(table->resize_counter)); + + if (table != &hashtable->initial_table) { + uintptr_t rc = hashtable->initial_table.resize_counter; + while (1) { + assert(IS_EVEN(rc)); + assert(rc != RESIZING_LOCK); + + stm_hashtable_table_t *old_table = (stm_hashtable_table_t *)rc; + if (old_table == table) + break; + rc = old_table->resize_counter; + free(old_table); + } + hashtable->initial_table.resize_counter = (uintptr_t)table; + } +} + +void stm_hashtable_tracefn(stm_hashtable_t *hashtable, void trace(object_t **)) +{ + if (trace == TRACE_FOR_MAJOR_COLLECTION) + _stm_compact_hashtable(hashtable); + + stm_hashtable_table_t *table; + table = VOLATILE_HASHTABLE(hashtable)->table; + + uintptr_t j, mask = table->mask; + for (j = 0; j <= mask; j++) { + stm_hashtable_entry_t *volatile *pentry; + pentry = &VOLATILE_TABLE(table)->items[j]; + if (*pentry != NULL) { + trace((object_t **)pentry); + } + } +} diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c --- a/c7/stm/nursery.c +++ b/c7/stm/nursery.c @@ -44,6 +44,10 @@ tree_contains(STM_PSEGMENT->young_outside_nursery, (uintptr_t)obj)); } +static inline bool _is_from_same_transaction(object_t *obj) { + return _is_young(obj) || IS_OVERFLOW_OBJ(STM_PSEGMENT, obj); +} + long stm_can_move(object_t *obj) { /* 'long' return value to avoid using 'bool' in the public interface */ @@ -329,6 +333,7 @@ } +#define TRACE_FOR_MINOR_COLLECTION (&minor_trace_if_young) static inline void _collect_now(object_t *obj) { @@ -342,7 +347,7 @@ outside the nursery, possibly forcing nursery objects out and adding them to 'objects_pointing_to_nursery' as well. */ char *realobj = REAL_ADDRESS(STM_SEGMENT->segment_base, obj); - stmcb_trace((struct object_s *)realobj, &minor_trace_if_young); + stmcb_trace((struct object_s *)realobj, TRACE_FOR_MINOR_COLLECTION); obj->stm_flags |= GCFLAG_WRITE_BARRIER; } diff --git a/c7/stm/prof.c b/c7/stm/prof.c --- a/c7/stm/prof.c +++ b/c7/stm/prof.c @@ -74,7 +74,13 @@ return false; } -static void forksupport_open_new_profiling_file(void) +static void prof_forksupport_prepare(void) +{ + if (profiling_file != NULL) + fflush(profiling_file); +} + +static void prof_forksupport_child(void) { if (close_timing_log() && profiling_basefn != NULL) { char filename[1024]; @@ -98,6 +104,15 @@ expand_marker = default_expand_marker; profiling_expand_marker = expand_marker; + static bool fork_support_ready = false; + if (!fork_support_ready) { + int res = pthread_atfork(prof_forksupport_prepare, + NULL, prof_forksupport_child); + if (res != 0) + stm_fatalerror("pthread_atfork() failed: %m"); + fork_support_ready = true; + } + if (!open_timing_log(profiling_file_name)) return -1; diff --git a/c7/stm/prof.h b/c7/stm/prof.h deleted file mode 100644 --- a/c7/stm/prof.h +++ /dev/null @@ -1,2 +0,0 @@ - -static void forksupport_open_new_profiling_file(void); diff --git a/c7/stmgc.c b/c7/stmgc.c --- a/c7/stmgc.c +++ b/c7/stmgc.c @@ -15,7 +15,6 @@ #include "stm/fprintcolor.h" #include "stm/weakref.h" #include "stm/marker.h" -#include "stm/prof.h" #include "stm/finalizer.h" #include "stm/misc.c" @@ -39,3 +38,4 @@ #include "stm/prof.c" #include "stm/rewind_setjmp.c" #include "stm/finalizer.c" +#include "stm/hashtable.c" diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -508,7 +508,7 @@ /* Support for light finalizers. This is a simple version of finalizers that guarantees not to do anything fancy, like not resurrecting objects. */ -void (*stmcb_light_finalizer)(object_t *); +extern void (*stmcb_light_finalizer)(object_t *); void stm_enable_light_finalizer(object_t *); /* Support for regular finalizers. Unreachable objects with @@ -525,9 +525,34 @@ transaction. For older objects, the finalizer is called from a random thread between regular transactions, in a new custom transaction. */ -void (*stmcb_finalizer)(object_t *); +extern void (*stmcb_finalizer)(object_t *); object_t *stm_allocate_with_finalizer(ssize_t size_rounded_up); +/* Hashtables. Keys are 64-bit unsigned integers, values are + 'object_t *'. Note that the type 'stm_hashtable_t' is not an + object type at all; you need to allocate and free it explicitly. + If you want to embed the hashtable inside an 'object_t' you + probably need a light finalizer to do the freeing. */ +typedef struct stm_hashtable_s stm_hashtable_t; +typedef TLPREFIX struct stm_hashtable_entry_s stm_hashtable_entry_t; + +stm_hashtable_t *stm_hashtable_create(void); +void stm_hashtable_free(stm_hashtable_t *); +stm_hashtable_entry_t *stm_hashtable_lookup(object_t *, stm_hashtable_t *, + uintptr_t key); +object_t *stm_hashtable_read(object_t *, stm_hashtable_t *, uintptr_t key); +void stm_hashtable_write(object_t *, stm_hashtable_t *, uintptr_t key, + object_t *nvalue, stm_thread_local_t *); +extern uint32_t stm_hashtable_entry_userdata; +void stm_hashtable_tracefn(stm_hashtable_t *, void (object_t **)); + +struct stm_hashtable_entry_s { + struct object_s header; + uint32_t userdata; + uintptr_t index; + object_t *object; +}; + /* ==================== END ==================== */ #endif diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -165,6 +165,19 @@ void stm_enable_light_finalizer(object_t *); void (*stmcb_finalizer)(object_t *); + +typedef struct stm_hashtable_s stm_hashtable_t; +stm_hashtable_t *stm_hashtable_create(void); +void stm_hashtable_free(stm_hashtable_t *); +bool _check_hashtable_read(object_t *, stm_hashtable_t *, uintptr_t key); +object_t *hashtable_read_result; +bool _check_hashtable_write(object_t *, stm_hashtable_t *, uintptr_t key, + object_t *nvalue, stm_thread_local_t *tl); +uint32_t stm_hashtable_entry_userdata; +void stm_hashtable_tracefn(stm_hashtable_t *, void (object_t **)); + +void _set_hashtable(object_t *obj, stm_hashtable_t *h); +stm_hashtable_t *_get_hashtable(object_t *obj); """) @@ -240,6 +253,19 @@ CHECKED(stm_become_globally_unique_transaction(tl, "TESTGUT")); } +object_t *hashtable_read_result; + +bool _check_hashtable_read(object_t *hobj, stm_hashtable_t *h, uintptr_t key) +{ + CHECKED(hashtable_read_result = stm_hashtable_read(hobj, h, key)); +} + +bool _check_hashtable_write(object_t *hobj, stm_hashtable_t *h, uintptr_t key, + object_t *nvalue, stm_thread_local_t *tl) +{ + CHECKED(stm_hashtable_write(hobj, h, key, nvalue, tl)); +} + #undef CHECKED @@ -268,6 +294,20 @@ return *WEAKREF_PTR(obj, size); } +void _set_hashtable(object_t *obj, stm_hashtable_t *h) +{ + stm_char *field_addr = ((stm_char*)obj); + field_addr += SIZEOF_MYOBJ; /* header */ + *(stm_hashtable_t *TLPREFIX *)field_addr = h; +} + +stm_hashtable_t *_get_hashtable(object_t *obj) +{ + stm_char *field_addr = ((stm_char*)obj); + field_addr += SIZEOF_MYOBJ; /* header */ + return *(stm_hashtable_t *TLPREFIX *)field_addr; +} + void _set_ptr(object_t *obj, int n, object_t *v) { long nrefs = (long)((myobj_t*)obj)->type_id - 421420; @@ -296,7 +336,14 @@ ssize_t stmcb_size_rounded_up(struct object_s *obj) { struct myobj_s *myobj = (struct myobj_s*)obj; + assert(myobj->type_id != 0); if (myobj->type_id < 421420) { + if (myobj->type_id == 421419) { /* hashtable */ + return sizeof(struct myobj_s) + 1 * sizeof(void*); + } + if (myobj->type_id == 421418) { /* hashtable entry */ + return sizeof(struct stm_hashtable_entry_s); + } /* basic case: tid equals 42 plus the size of the object */ assert(myobj->type_id >= 42 + sizeof(struct myobj_s)); assert((myobj->type_id - 42) >= 16); @@ -316,6 +363,17 @@ { int i; struct myobj_s *myobj = (struct myobj_s*)obj; + if (myobj->type_id == 421419) { + /* hashtable */ + stm_hashtable_t *h = *((stm_hashtable_t **)(myobj + 1)); + stm_hashtable_tracefn(h, visit); + return; + } + if (myobj->type_id == 421418) { + /* hashtable entry */ + object_t **ref = &((struct stm_hashtable_entry_s *)myobj)->object; + visit(ref); + } if (myobj->type_id < 421420) { /* basic case: no references */ return; @@ -334,6 +392,8 @@ { int i; struct myobj_s *myobj = (struct myobj_s*)obj; + assert(myobj->type_id != 421419); + assert(myobj->type_id != 421418); if (myobj->type_id < 421420) { /* basic case: no references */ return; @@ -404,6 +464,7 @@ CARD_SIZE = lib._STM_CARD_SIZE # 16b at least NB_SEGMENTS = lib.STM_NB_SEGMENTS FAST_ALLOC = lib._STM_FAST_ALLOC +lib.stm_hashtable_entry_userdata = 421418 class Conflict(Exception): pass @@ -441,6 +502,18 @@ lib._set_weakref(o, point_to_obj) return o +def stm_allocate_hashtable(): + o = lib.stm_allocate(16) + tid = 421419 + lib._set_type_id(o, tid) + h = lib.stm_hashtable_create() + lib._set_hashtable(o, h) + return o + +def get_hashtable(o): + assert lib._get_type_id(o) == 421419 + return lib._get_hashtable(o) + def stm_get_weakref(o): return lib._get_weakref(o) @@ -558,7 +631,6 @@ - SHADOWSTACK_LENGTH = 1000 _keepalive = weakref.WeakKeyDictionary() diff --git a/c7/test/test_finalizer.py b/c7/test/test_finalizer.py --- a/c7/test/test_finalizer.py +++ b/c7/test/test_finalizer.py @@ -9,6 +9,7 @@ # @ffi.callback("void(object_t *)") def light_finalizer(obj): + assert stm_get_obj_size(obj) == 48 segnum = lib.current_segment_num() tlnum = '?' for n, tl in enumerate(self.tls): @@ -20,6 +21,10 @@ lib.stmcb_light_finalizer = light_finalizer self._light_finalizer_keepalive = light_finalizer + def teardown_method(self, meth): + lib.stmcb_light_finalizer = ffi.NULL + BaseTest.teardown_method(self, meth) + def expect_finalized(self, objs, from_tlnum=None): assert [obj for (obj, tlnum) in self.light_finalizers_called] == objs if from_tlnum is not None: @@ -49,6 +54,15 @@ self.commit_transaction() self.expect_finalized([]) + def test_young_light_finalizer_aborts(self): + self.start_transaction() + lp1 = stm_allocate(48) + lib.stm_enable_light_finalizer(lp1) + self.expect_finalized([]) + self.abort_transaction() + self.start_transaction() + self.expect_finalized([lp1], from_tlnum=0) + def test_old_light_finalizer(self): self.start_transaction() lp1 = stm_allocate(48) @@ -99,15 +113,47 @@ stm_major_collect() self.expect_finalized([lp1], from_tlnum=1) + def test_old_light_finalizer_aborts(self): + self.start_transaction() + lp1 = stm_allocate(48) + lib.stm_enable_light_finalizer(lp1) + self.push_root(lp1) + self.commit_transaction() + # + self.start_transaction() + self.expect_finalized([]) + self.abort_transaction() + self.expect_finalized([]) + + def test_overflow_light_finalizer_aborts(self): + self.start_transaction() + lp1 = stm_allocate(48) + lib.stm_enable_light_finalizer(lp1) + self.push_root(lp1) + stm_minor_collect() + lp1 = self.pop_root() + self.push_root(lp1) + self.expect_finalized([]) + self.abort_transaction() + self.expect_finalized([lp1], from_tlnum=0) + class TestRegularFinalizer(BaseTest): + expect_content_character = None + run_major_collect_in_finalizer = False def setup_method(self, meth): BaseTest.setup_method(self, meth) # @ffi.callback("void(object_t *)") def finalizer(obj): + print "finalizing!", obj + assert stm_get_obj_size(obj) in [16, 32, 48, 56] + if self.expect_content_character is not None: + assert stm_get_char(obj) == self.expect_content_character self.finalizers_called.append(obj) + if self.run_major_collect_in_finalizer: + stm_major_collect() self.finalizers_called = [] lib.stmcb_finalizer = finalizer self._finalizer_keepalive = finalizer @@ -137,6 +183,21 @@ stm_major_collect() self.expect_finalized([lp1, lp2, lp3]) + def test_finalizer_from_other_thread(self): + self.start_transaction() + lp1 = stm_allocate_with_finalizer(48) + stm_set_char(lp1, 'H') + self.expect_content_character = 'H' + print lp1 + # + self.switch(1) + self.start_transaction() + stm_major_collect() + self.expect_finalized([]) # marked as dead, but wrong thread + # + self.switch(0) + self.expect_finalized([lp1]) # now it has been finalized + def test_finalizer_ordering(self): self.start_transaction() lp1 = stm_allocate_with_finalizer_refs(1) @@ -148,7 +209,7 @@ stm_major_collect() self.expect_finalized([lp3]) - def test_finalizer_extra_transation(self): + def test_finalizer_extra_transaction(self): self.start_transaction() lp1 = stm_allocate_with_finalizer(32) print lp1 @@ -182,3 +243,12 @@ stm_major_collect() self.switch(0) self.expect_finalized([lp2, lp1]) + + def test_run_major_collect_in_finalizer(self): + self.run_major_collect_in_finalizer = True + self.start_transaction() + lp1 = stm_allocate_with_finalizer(32) + lp2 = stm_allocate_with_finalizer(32) + lp3 = stm_allocate_with_finalizer(32) + print lp1, lp2, lp3 + stm_major_collect() diff --git a/c7/test/test_hashtable.py b/c7/test/test_hashtable.py new file mode 100644 --- /dev/null +++ b/c7/test/test_hashtable.py @@ -0,0 +1,414 @@ +from support import * +import random +import py, sys + + +def htget(o, key): + h = get_hashtable(o) + res = lib._check_hashtable_read(o, h, key) + if res: + raise Conflict + return lib.hashtable_read_result + +def htset(o, key, nvalue, tl): + h = get_hashtable(o) + res = lib._check_hashtable_write(o, h, key, nvalue, tl) + if res: + raise Conflict + + +class BaseTestHashtable(BaseTest): + + def setup_method(self, meth): + BaseTest.setup_method(self, meth) + # + @ffi.callback("void(object_t *)") + def light_finalizer(obj): + print 'light_finalizer:', obj + try: + assert lib._get_type_id(obj) == 421419 + self.seen_hashtables -= 1 + except: + self.errors.append(sys.exc_info()[2]) + raise + + lib.stmcb_light_finalizer = light_finalizer + self._light_finalizer_keepalive = light_finalizer + self.seen_hashtables = 0 + self.errors = [] + + def teardown_method(self, meth): + BaseTest.teardown_method(self, meth) + lib.stmcb_light_finalizer = ffi.NULL + assert self.errors == [] + assert self.seen_hashtables == 0 + + def allocate_hashtable(self): + h = stm_allocate_hashtable() + lib.stm_enable_light_finalizer(h) + self.seen_hashtables += 1 + return h + + +class TestHashtable(BaseTestHashtable): + + def test_empty(self): + self.start_transaction() + h = self.allocate_hashtable() + for i in range(100): + index = random.randrange(0, 1<<64) + got = htget(h, index) + assert got == ffi.NULL + + def test_set_value(self): + self.start_transaction() + tl0 = self.tls[self.current_thread] + h = self.allocate_hashtable() + lp1 = stm_allocate(16) + htset(h, 12345678901, lp1, tl0) + assert htget(h, 12345678901) == lp1 + for i in range(64): + index = 12345678901 ^ (1 << i) + assert htget(h, index) == ffi.NULL + assert htget(h, 12345678901) == lp1 + + def test_no_conflict(self): + lp1 = stm_allocate_old(16) + lp2 = stm_allocate_old(16) + # + self.start_transaction() + tl0 = self.tls[self.current_thread] + h = self.allocate_hashtable() + self.push_root(h) + stm_set_char(lp1, 'A') + htset(h, 1234, lp1, tl0) + self.commit_transaction() + # + self.start_transaction() + h = self.pop_root() + stm_set_char(lp2, 'B') + htset(h, 9991234, lp2, tl0) + # + self.switch(1) + self.start_transaction() + lp1b = htget(h, 1234) + assert lp1b != ffi.NULL + assert stm_get_char(lp1b) == 'A' + assert lp1b == lp1 + self.commit_transaction() + # + self.switch(0) + assert htget(h, 9991234) == lp2 + assert stm_get_char(lp2) == 'B' + assert htget(h, 1234) == lp1 + htset(h, 1234, ffi.NULL, tl0) + self.commit_transaction() + # + self.start_transaction() + stm_major_collect() # to get rid of the hashtable object + + def test_conflict(self): + lp1 = stm_allocate_old(16) + lp2 = stm_allocate_old(16) + # + self.start_transaction() + h = self.allocate_hashtable() + self.push_root(h) + self.commit_transaction() + # + self.start_transaction() + h = self.pop_root() + self.push_root(h) + tl0 = self.tls[self.current_thread] + htset(h, 1234, lp1, tl0) + # + self.switch(1) + self.start_transaction() + tl1 = self.tls[self.current_thread] + py.test.raises(Conflict, "htset(h, 1234, lp2, tl1)") + # + self.switch(0) + self.pop_root() + stm_major_collect() # to get rid of the hashtable object + self.commit_transaction() + + def test_keepalive_minor(self): + self.start_transaction() + h = self.allocate_hashtable() + self.push_root(h) + lp1 = stm_allocate(16) + stm_set_char(lp1, 'N') + tl0 = self.tls[self.current_thread] + htset(h, 1234, lp1, tl0) + stm_minor_collect() + h = self.pop_root() + lp1b = htget(h, 1234) + assert lp1b != ffi.NULL + assert stm_get_char(lp1b) == 'N' + assert lp1b != lp1 + + def test_keepalive_major(self): + lp1 = stm_allocate_old(16) + # + self.start_transaction() + h = self.allocate_hashtable() + self.push_root(h) + stm_set_char(lp1, 'N') + tl0 = self.tls[self.current_thread] + htset(h, 1234, lp1, tl0) + self.commit_transaction() + # + self.start_transaction() + stm_major_collect() + h = self.pop_root() + lp1b = htget(h, 1234) + assert lp1b == lp1 + assert stm_get_char(lp1b) == 'N' + # + stm_major_collect() # to get rid of the hashtable object + self.commit_transaction() + + def test_minor_collect_bug1(self): + self.start_transaction() + lp1 = stm_allocate(32) + self.push_root(lp1) + h = self.allocate_hashtable() + self.push_root(h) + stm_minor_collect() + h = self.pop_root() + lp1 = self.pop_root() + print 'h', h # 0xa040010 + print 'lp1', lp1 # 0xa040040 + tl0 = self.tls[self.current_thread] + htset(h, 1, lp1, tl0) + self.commit_transaction() + # + self.start_transaction() + assert htget(h, 1) == lp1 + stm_major_collect() # to get rid of the hashtable object + + def test_minor_collect_bug1_different_thread(self): + self.start_transaction() + lp1 = stm_allocate(32) + self.push_root(lp1) + h = self.allocate_hashtable() + self.push_root(h) + stm_minor_collect() + h = self.pop_root() + lp1 = self.pop_root() + print 'h', h # 0xa040010 + print 'lp1', lp1 # 0xa040040 + tl0 = self.tls[self.current_thread] + htset(h, 1, lp1, tl0) + self.commit_transaction() + # + self.switch(1) # in a different thread + self.start_transaction() + assert htget(h, 1) == lp1 + stm_major_collect() # to get rid of the hashtable object + + +class TestRandomHashtable(BaseTestHashtable): + + def setup_method(self, meth): + BaseTestHashtable.setup_method(self, meth) + self.values = [] + self.mirror = None + self.roots = [] + self.other_thread = ([], []) + + def push_roots(self): + assert self.roots is None + self.roots = [] + for k, hitems in self.mirror.items(): + assert lib._get_type_id(k) == 421419 + for key, value in hitems.items(): + assert lib._get_type_id(value) < 1000 + self.push_root(value) + self.roots.append(key) + self.push_root(k) + self.roots.append(None) + for v in self.values: + self.push_root(v) + self.mirror = None + + def pop_roots(self): + assert self.mirror is None + for i in reversed(range(len(self.values))): + self.values[i] = self.pop_root() + assert stm_get_char(self.values[i]) == chr((i + 1) & 255) + self.mirror = {} + for r in reversed(self.roots): + obj = self.pop_root() + if r is None: + assert lib._get_type_id(obj) == 421419 + self.mirror[obj] = curhitems = {} + else: + assert lib._get_type_id(obj) < 1000 + curhitems[r] = obj + self.roots = None + + def exchange_threads(self): + old_thread = (self.values, self.roots) + self.switch(1 - self.current_thread) + (self.values, self.roots) = self.other_thread + self.mirror = None + self.other_thread = old_thread + + def test_random_single_thread(self): + import random + # + for i in range(100): + print "start_transaction" + self.start_transaction() + self.pop_roots() + for j in range(10): + r = random.random() + if r < 0.05: + h = self.allocate_hashtable() + print "allocate_hashtable ->", h + self.mirror[h] = {} + elif r < 0.10: + print "stm_minor_collect" + self.push_roots() + stm_minor_collect() + self.pop_roots() + elif r < 0.11: + print "stm_major_collect" + self.push_roots() + stm_major_collect() + self.pop_roots() + elif r < 0.5: + if not self.mirror: continue + h = random.choice(self.mirror.keys()) + if not self.mirror[h]: continue + key = random.choice(self.mirror[h].keys()) + value = self.mirror[h][key] + print "htget(%r, %r) == %r" % (h, key, value) + self.push_roots() + self.push_root(value) + result = htget(h, key) + value = self.pop_root() + assert result == value + self.pop_roots() + elif r < 0.6: + if not self.mirror: continue + h = random.choice(self.mirror.keys()) + key = random.randrange(0, 40) + if key in self.mirror[h]: continue + print "htget(%r, %r) == NULL" % (h, key) + self.push_roots() + assert htget(h, key) == ffi.NULL + self.pop_roots() + elif r < 0.63: + if not self.mirror: continue + h, _ = self.mirror.popitem() + print "popped", h + elif r < 0.75: + obj = stm_allocate(32) + self.values.append(obj) + stm_set_char(obj, chr(len(self.values) & 255)) + else: + if not self.mirror or not self.values: continue + h = random.choice(self.mirror.keys()) + key = random.randrange(0, 32) + value = random.choice(self.values) + print "htset(%r, %r, %r)" % (h, key, value) + self.push_roots() + tl = self.tls[self.current_thread] + htset(h, key, value, tl) + self.pop_roots() + self.mirror[h][key] = value + self.push_roots() + print "commit_transaction" + self.commit_transaction() + # + self.start_transaction() + self.become_inevitable() + self.pop_roots() + stm_major_collect() # to get rid of the hashtable objects + + def test_random_multiple_threads(self): + import random + self.start_transaction() + self.exchange_threads() + self.start_transaction() + self.pop_roots() + # + for j in range(1000): + r = random.random() + if r > 0.9: + if r > 0.95: + self.push_roots() + self.commit_transaction() + self.start_transaction() + self.pop_roots() + else: + self.push_roots() + self.exchange_threads() + self.pop_roots() + continue + + if r < 0.05: + h = self.allocate_hashtable() + print "allocate_hashtable ->", h + self.mirror[h] = {} + elif r < 0.10: + print "stm_minor_collect" + self.push_roots() + stm_minor_collect() + self.pop_roots() + elif r < 0.11: + print "stm_major_collect" + self.push_roots() + stm_major_collect() + self.pop_roots() + elif r < 0.5: + if not self.mirror: continue + h = random.choice(self.mirror.keys()) + if not self.mirror[h]: continue + key = random.choice(self.mirror[h].keys()) + value = self.mirror[h][key] + print "htget(%r, %r) == %r" % (h, key, value) + self.push_roots() + self.push_root(value) + result = htget(h, key) + value = self.pop_root() + assert result == value + self.pop_roots() + elif r < 0.6: + if not self.mirror: continue + h = random.choice(self.mirror.keys()) + key = random.randrange(0, 40) + if key in self.mirror[h]: continue + print "htget(%r, %r) == NULL" % (h, key) + self.push_roots() + assert htget(h, key) == ffi.NULL + self.pop_roots() + elif r < 0.63: + if not self.mirror: continue + h, _ = self.mirror.popitem() + print "popped", h + elif r < 0.75: + obj = stm_allocate(32) + self.values.append(obj) + stm_set_char(obj, chr(len(self.values) & 255)) + else: + if not self.mirror or not self.values: continue + h = random.choice(self.mirror.keys()) + key = random.randrange(0, 32) + value = random.choice(self.values) + print "htset(%r, %r, %r)" % (h, key, value) + self.push_roots() + tl = self.tls[self.current_thread] + htset(h, key, value, tl) + self.pop_roots() + self.mirror[h][key] = value + # + print 'closing down...' + self.become_inevitable() + self.commit_transaction() + self.exchange_threads() + self.pop_roots() + self.become_inevitable() + stm_major_collect() # to get rid of the hashtable objects From noreply at buildbot.pypy.org Mon Jan 19 19:35:25 2015 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 19 Jan 2015 19:35:25 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: update to stmgc/957947bc7ad9 Message-ID: <20150119183525.87AB31C102D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r75441:7ae5e18184da Date: 2015-01-19 18:37 +0100 http://bitbucket.org/pypy/pypy/changeset/7ae5e18184da/ Log: update to stmgc/957947bc7ad9 diff --git a/rpython/translator/stm/src_stm/revision b/rpython/translator/stm/src_stm/revision --- a/rpython/translator/stm/src_stm/revision +++ b/rpython/translator/stm/src_stm/revision @@ -1,1 +1,1 @@ -194265597fad +957947bc7ad9 diff --git a/rpython/translator/stm/src_stm/stm/hashtable.c b/rpython/translator/stm/src_stm/stm/hashtable.c --- a/rpython/translator/stm/src_stm/stm/hashtable.c +++ b/rpython/translator/stm/src_stm/stm/hashtable.c @@ -7,8 +7,14 @@ length 2**64. Initially it is full of NULLs. It's obviously implemented as a dictionary in which NULL objects are not needed. -The only operations on a hashtable are reading or writing an object at -a given index. +A real dictionary can be implemented on top of it, by using the index +`hash(key)` in the hashtable, and storing a list of `(key, value)` +pairs at that index (usually only one, unless there is a hash +collision). + +The main operations on a hashtable are reading or writing an object at a +given index. It might support in the future enumerating the indexes of +non-NULL objects. There are two markers for every index (a read and a write marker). This is unlike regular arrays, which have only two markers in total. @@ -19,10 +25,15 @@ First idea: have the hashtable in raw memory, pointing to "entry" objects. The entry objects themselves point to the user-specified -objects, and they have the read/write markers. Every entry object -itself, once created, stays around. It is only removed by the next +objects. The entry objects have the read/write markers. Every entry +object, once created, stays around. It is only removed by the next major GC if it points to NULL and its read/write markers are not set in any currently-running transaction. + +References +---------- + +Inspired by: http://ppl.stanford.edu/papers/podc011-bronson.pdf */ From noreply at buildbot.pypy.org Mon Jan 19 21:01:55 2015 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 19 Jan 2015 21:01:55 +0100 (CET) Subject: [pypy-commit] cffi win32-ownlib: Re-skip some errno tests on Win32, where errno is a joke Message-ID: <20150119200155.320781C00F7@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: win32-ownlib Changeset: r1638:6453fd3ed4b3 Date: 2015-01-19 21:02 +0100 http://bitbucket.org/cffi/cffi/changeset/6453fd3ed4b3/ Log: Re-skip some errno tests on Win32, where errno is a joke diff --git a/testing/test_ownlib.py b/testing/test_ownlib.py --- a/testing/test_ownlib.py +++ b/testing/test_ownlib.py @@ -132,6 +132,8 @@ def test_getting_errno(self): if self.module is None: py.test.skip("fix the auto-generation of the tiny test lib") + if sys.platform == 'win32': + py.test.skip("fails, errno at multiple addresses") ffi = FFI(backend=self.Backend()) ffi.cdef(""" int test_getting_errno(void); @@ -144,6 +146,8 @@ def test_setting_errno(self): if self.module is None: py.test.skip("fix the auto-generation of the tiny test lib") + if sys.platform == 'win32': + py.test.skip("fails, errno at multiple addresses") if self.Backend is CTypesBackend and '__pypy__' in sys.modules: py.test.skip("XXX errno issue with ctypes on pypy?") ffi = FFI(backend=self.Backend()) @@ -231,7 +235,8 @@ assert ownlib_r() is not None # kept alive by ffi res = func() assert res == -1 - assert ffi.errno == 123 + if sys.platform != 'win32': # else, errno at multiple addresses + assert ffi.errno == 123 def test_struct_by_value(self): if self.module is None: From noreply at buildbot.pypy.org Mon Jan 19 21:13:23 2015 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 19 Jan 2015 21:13:23 +0100 (CET) Subject: [pypy-commit] cffi win32-ownlib: ready to be merged Message-ID: <20150119201323.E9B1D1C00F7@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: win32-ownlib Changeset: r1639:47b5310faa2d Date: 2015-01-19 21:09 +0100 http://bitbucket.org/cffi/cffi/changeset/47b5310faa2d/ Log: ready to be merged From noreply at buildbot.pypy.org Mon Jan 19 21:13:25 2015 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 19 Jan 2015 21:13:25 +0100 (CET) Subject: [pypy-commit] cffi default: hg merge win32-ownlib: enable test_ownlib to run on Windows, and Message-ID: <20150119201325.2866C1C00F7@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1640:b1c3d82a14a8 Date: 2015-01-19 21:10 +0100 http://bitbucket.org/cffi/cffi/changeset/b1c3d82a14a8/ Log: hg merge win32-ownlib: enable test_ownlib to run on Windows, and add a test with many arguments that may still fail on some platforms with a broken libffi diff --git a/.hgignore b/.hgignore --- a/.hgignore +++ b/.hgignore @@ -6,6 +6,7 @@ demo/__pycache__ __pycache__ _cffi_backend*.so +_cffi_backend.pyd doc/build build dist diff --git a/testing/test_ownlib.py b/testing/test_ownlib.py --- a/testing/test_ownlib.py +++ b/testing/test_ownlib.py @@ -7,34 +7,133 @@ SOURCE = """\ #include -int test_getting_errno(void) { +#ifdef _WIN32 +#define EXPORT __declspec(dllexport) +#else +#define EXPORT +#endif + +EXPORT int test_getting_errno(void) { errno = 123; return -1; } -int test_setting_errno(void) { +EXPORT int test_setting_errno(void) { return errno; +}; + +typedef struct { + long x; + long y; +} POINT; + +typedef struct { + long left; + long top; + long right; + long bottom; +} RECT; + + +EXPORT int PointInRect(RECT *prc, POINT pt) +{ + if (pt.x < prc->left) + return 0; + if (pt.x > prc->right) + return 0; + if (pt.y < prc->top) + return 0; + if (pt.y > prc->bottom) + return 0; + return 1; +}; + +EXPORT long left = 10; +EXPORT long top = 20; +EXPORT long right = 30; +EXPORT long bottom = 40; + +EXPORT RECT ReturnRect(int i, RECT ar, RECT* br, POINT cp, RECT dr, + RECT *er, POINT fp, RECT gr) +{ + /*Check input */ + if (ar.left + br->left + dr.left + er->left + gr.left != left * 5) + { + ar.left = 100; + return ar; + } + if (ar.right + br->right + dr.right + er->right + gr.right != right * 5) + { + ar.right = 100; + return ar; + } + if (cp.x != fp.x) + { + ar.left = -100; + } + if (cp.y != fp.y) + { + ar.left = -200; + } + switch(i) + { + case 0: + return ar; + break; + case 1: + return dr; + break; + case 2: + return gr; + break; + + } + return ar; } -int my_array[7] = {0, 1, 2, 3, 4, 5, 6}; +EXPORT int my_array[7] = {0, 1, 2, 3, 4, 5, 6}; """ class TestOwnLib(object): Backend = CTypesBackend def setup_class(cls): - if sys.platform == 'win32': - return + cls.module = None from testing.udir import udir udir.join('testownlib.c').write(SOURCE) - subprocess.check_call( - 'gcc testownlib.c -shared -fPIC -o testownlib.so', - cwd=str(udir), shell=True) - cls.module = str(udir.join('testownlib.so')) + if sys.platform == 'win32': + import os + # did we already build it? + if os.path.exists(str(udir.join('testownlib.dll'))): + cls.module = str(udir.join('testownlib.dll')) + return + # try (not too hard) to find the version used to compile this python + # no mingw + from distutils.msvc9compiler import get_build_version + version = get_build_version() + toolskey = "VS%0.f0COMNTOOLS" % version + toolsdir = os.environ.get(toolskey, None) + if toolsdir is None: + return + productdir = os.path.join(toolsdir, os.pardir, os.pardir, "VC") + productdir = os.path.abspath(productdir) + vcvarsall = os.path.join(productdir, "vcvarsall.bat") + if os.path.isfile(vcvarsall): + cmd = '"%s"' % vcvarsall + ' & cl.exe testownlib.c ' \ + ' /LD /Fetestownlib.dll' + subprocess.check_call(cmd, cwd = str(udir), shell=True) + cls.module = str(udir.join('testownlib.dll')) + else: + subprocess.check_call( + 'gcc testownlib.c -shared -fPIC -o testownlib.so', + cwd=str(udir), shell=True) + cls.module = str(udir.join('testownlib.so')) def test_getting_errno(self): + if self.module is None: + py.test.skip("fix the auto-generation of the tiny test lib") if sys.platform == 'win32': - py.test.skip("fix the auto-generation of the tiny test lib") + py.test.skip("fails, errno at multiple addresses") ffi = FFI(backend=self.Backend()) ffi.cdef(""" int test_getting_errno(void); @@ -45,8 +144,10 @@ assert ffi.errno == 123 def test_setting_errno(self): + if self.module is None: + py.test.skip("fix the auto-generation of the tiny test lib") if sys.platform == 'win32': - py.test.skip("fix the auto-generation of the tiny test lib") + py.test.skip("fails, errno at multiple addresses") if self.Backend is CTypesBackend and '__pypy__' in sys.modules: py.test.skip("XXX errno issue with ctypes on pypy?") ffi = FFI(backend=self.Backend()) @@ -60,7 +161,7 @@ assert ffi.errno == 42 def test_my_array_7(self): - if sys.platform == 'win32': + if self.module is None: py.test.skip("fix the auto-generation of the tiny test lib") ffi = FFI(backend=self.Backend()) ffi.cdef(""" @@ -80,7 +181,7 @@ assert ownlib.my_array[i] == i def test_my_array_no_length(self): - if sys.platform == 'win32': + if self.module is None: py.test.skip("fix the auto-generation of the tiny test lib") if self.Backend is CTypesBackend: py.test.skip("not supported by the ctypes backend") @@ -100,7 +201,7 @@ assert ownlib.my_array[i] == i def test_keepalive_lib(self): - if sys.platform == 'win32': + if self.module is None: py.test.skip("fix the auto-generation of the tiny test lib") ffi = FFI(backend=self.Backend()) ffi.cdef(""" @@ -118,7 +219,7 @@ assert res == -1 def test_keepalive_ffi(self): - if sys.platform == 'win32': + if self.module is None: py.test.skip("fix the auto-generation of the tiny test lib") ffi = FFI(backend=self.Backend()) ffi.cdef(""" @@ -134,4 +235,46 @@ assert ownlib_r() is not None # kept alive by ffi res = func() assert res == -1 - assert ffi.errno == 123 + if sys.platform != 'win32': # else, errno at multiple addresses + assert ffi.errno == 123 + + def test_struct_by_value(self): + if self.module is None: + py.test.skip("fix the auto-generation of the tiny test lib") + ffi = FFI(backend=self.Backend()) + ffi.cdef(""" + typedef struct { + long x; + long y; + } POINT; + + typedef struct { + long left; + long top; + long right; + long bottom; + } RECT; + + long left, top, right, bottom; + + RECT ReturnRect(int i, RECT ar, RECT* br, POINT cp, RECT dr, + RECT *er, POINT fp, RECT gr); + """) + ownlib = ffi.dlopen(self.module) + + rect = ffi.new('RECT[1]') + pt = ffi.new('POINT[1]') + pt[0].x = 15 + pt[0].y = 25 + rect[0].left = ownlib.left + rect[0].right = ownlib.right + rect[0].top = ownlib.top + rect[0].bottom = ownlib.bottom + + for i in range(4): + ret = ownlib.ReturnRect(i, rect[0], rect, pt[0], rect[0], + rect, pt[0], rect[0]) + assert ret.left == ownlib.left + assert ret.right == ownlib.right + assert ret.top == ownlib.top + assert ret.bottom == ownlib.bottom From noreply at buildbot.pypy.org Mon Jan 19 21:56:10 2015 From: noreply at buildbot.pypy.org (mattip) Date: Mon, 19 Jan 2015 21:56:10 +0100 (CET) Subject: [pypy-commit] pypy stdlib-2.7.9: merge default into branch Message-ID: <20150119205610.1CCF21C0098@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: stdlib-2.7.9 Changeset: r75442:31e630ae6cd7 Date: 2015-01-19 22:56 +0200 http://bitbucket.org/pypy/pypy/changeset/31e630ae6cd7/ Log: merge default into branch diff too long, truncating to 2000 out of 6229 lines diff --git a/lib-python/2.7/collections.py b/lib-python/2.7/collections.py --- a/lib-python/2.7/collections.py +++ b/lib-python/2.7/collections.py @@ -17,6 +17,10 @@ except ImportError: assert '__pypy__' not in _sys.builtin_module_names newdict = lambda _ : {} +try: + from __pypy__ import reversed_dict +except ImportError: + reversed_dict = lambda d: reversed(d.keys()) try: from thread import get_ident as _get_ident @@ -29,142 +33,35 @@ ################################################################################ class OrderedDict(dict): - 'Dictionary that remembers insertion order' - # An inherited dict maps keys to values. - # The inherited dict provides __getitem__, __len__, __contains__, and get. - # The remaining methods are order-aware. - # Big-O running times for all methods are the same as regular dictionaries. + '''Dictionary that remembers insertion order. - # The internal self.__map dict maps keys to links in a doubly linked list. - # The circular doubly linked list starts and ends with a sentinel element. - # The sentinel element never gets deleted (this simplifies the algorithm). - # Each link is stored as a list of length three: [PREV, NEXT, KEY]. + In PyPy all dicts are ordered anyway. This is mostly useful as a + placeholder to mean "this dict must be ordered even on CPython". - def __init__(self, *args, **kwds): - '''Initialize an ordered dictionary. The signature is the same as - regular dictionaries, but keyword arguments are not recommended because - their insertion order is arbitrary. - - ''' - if len(args) > 1: - raise TypeError('expected at most 1 arguments, got %d' % len(args)) - try: - self.__root - except AttributeError: - self.__root = root = [] # sentinel node - root[:] = [root, root, None] - self.__map = {} - self.__update(*args, **kwds) - - def __setitem__(self, key, value, dict_setitem=dict.__setitem__): - 'od.__setitem__(i, y) <==> od[i]=y' - # Setting a new item creates a new link at the end of the linked list, - # and the inherited dictionary is updated with the new key/value pair. - if key not in self: - root = self.__root - last = root[0] - last[1] = root[0] = self.__map[key] = [last, root, key] - return dict_setitem(self, key, value) - - def __delitem__(self, key, dict_delitem=dict.__delitem__): - 'od.__delitem__(y) <==> del od[y]' - # Deleting an existing item uses self.__map to find the link which gets - # removed by updating the links in the predecessor and successor nodes. - dict_delitem(self, key) - link_prev, link_next, _ = self.__map.pop(key) - link_prev[1] = link_next # update link_prev[NEXT] - link_next[0] = link_prev # update link_next[PREV] - - def __iter__(self): - 'od.__iter__() <==> iter(od)' - # Traverse the linked list in order. - root = self.__root - curr = root[1] # start at the first node - while curr is not root: - yield curr[2] # yield the curr[KEY] - curr = curr[1] # move to next node + Known difference: iterating over an OrderedDict which is being + concurrently modified raises RuntimeError in PyPy. In CPython + instead we get some behavior that appears reasonable in some + cases but is nonsensical in other cases. This is officially + forbidden by the CPython docs, so we forbid it explicitly for now. + ''' def __reversed__(self): - 'od.__reversed__() <==> reversed(od)' - # Traverse the linked list in reverse order. - root = self.__root - curr = root[0] # start at the last node - while curr is not root: - yield curr[2] # yield the curr[KEY] - curr = curr[0] # move to previous node - - def clear(self): - 'od.clear() -> None. Remove all items from od.' - root = self.__root - root[:] = [root, root, None] - self.__map.clear() - dict.clear(self) - - # -- the following methods do not depend on the internal structure -- - - def keys(self): - 'od.keys() -> list of keys in od' - return list(self) - - def values(self): - 'od.values() -> list of values in od' - return [self[key] for key in self] - - def items(self): - 'od.items() -> list of (key, value) pairs in od' - return [(key, self[key]) for key in self] - - def iterkeys(self): - 'od.iterkeys() -> an iterator over the keys in od' - return iter(self) - - def itervalues(self): - 'od.itervalues -> an iterator over the values in od' - for k in self: - yield self[k] - - def iteritems(self): - 'od.iteritems -> an iterator over the (key, value) pairs in od' - for k in self: - yield (k, self[k]) - - update = MutableMapping.update - - __update = update # let subclasses override update without breaking __init__ - - __marker = object() - - def pop(self, key, default=__marker): - '''od.pop(k[,d]) -> v, remove specified key and return the corresponding - value. If key is not found, d is returned if given, otherwise KeyError - is raised. - - ''' - if key in self: - result = self[key] - del self[key] - return result - if default is self.__marker: - raise KeyError(key) - return default - - def setdefault(self, key, default=None): - 'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od' - if key in self: - return self[key] - self[key] = default - return default + return reversed_dict(self) def popitem(self, last=True): '''od.popitem() -> (k, v), return and remove a (key, value) pair. Pairs are returned in LIFO order if last is true or FIFO order if false. ''' - if not self: - raise KeyError('dictionary is empty') - key = next(reversed(self) if last else iter(self)) - value = self.pop(key) - return key, value + if last: + return dict.popitem(self) + else: + it = dict.__iter__(self) + try: + k = it.next() + except StopIteration: + raise KeyError('dictionary is empty') + return (k, self.pop(k)) def __repr__(self, _repr_running={}): 'od.__repr__() <==> repr(od)' @@ -183,8 +80,6 @@ 'Return state information for pickling' items = [[k, self[k]] for k in self] inst_dict = vars(self).copy() - for k in vars(OrderedDict()): - inst_dict.pop(k, None) if inst_dict: return (self.__class__, (items,), inst_dict) return self.__class__, (items,) @@ -193,17 +88,6 @@ 'od.copy() -> a shallow copy of od' return self.__class__(self) - @classmethod - def fromkeys(cls, iterable, value=None): - '''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S. - If not specified, the value defaults to None. - - ''' - self = cls() - for key in iterable: - self[key] = value - return self - def __eq__(self, other): '''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive while comparison to a regular mapping is order-insensitive. diff --git a/lib-python/2.7/test/test_collections.py b/lib-python/2.7/test/test_collections.py --- a/lib-python/2.7/test/test_collections.py +++ b/lib-python/2.7/test/test_collections.py @@ -579,7 +579,12 @@ def __repr__(self): return "MySet(%s)" % repr(list(self)) s = MySet([5,43,2,1]) - self.assertEqual(s.pop(), 1) + # changed from CPython 2.7: it was "s.pop() == 1" but I see + # nothing that guarantees a particular order here. In the + # 'all_ordered_dicts' branch of PyPy (or with OrderedDict + # instead of sets), it consistently returns 5, but this test + # should not rely on this or any other order. + self.assert_(s.pop() in [5,43,2,1]) def test_issue8750(self): empty = WithSet() @@ -1019,8 +1024,9 @@ c=3, e=5).items()), pairs) # mixed input # make sure no positional args conflict with possible kwdargs - self.assertEqual(inspect.getargspec(OrderedDict.__dict__['__init__']).args, - ['self']) + if '__init__' in OrderedDict.__dict__: # absent in PyPy + self.assertEqual(inspect.getargspec(OrderedDict.__dict__['__init__']).args, + ['self']) # Make sure that direct calls to __init__ do not clear previous contents d = OrderedDict([('a', 1), ('b', 2), ('c', 3), ('d', 44), ('e', 55)]) diff --git a/pypy/doc/build.rst b/pypy/doc/build.rst --- a/pypy/doc/build.rst +++ b/pypy/doc/build.rst @@ -47,6 +47,11 @@ Install build-time dependencies ------------------------------- +(**Note**: for some hints on how to translate the Python interpreter under +Windows, see the `windows document`_) + +.. _`windows document`: windows.html + To build PyPy on Unix using the C translation backend, you need at least a C compiler and ``make`` installed. Further, some optional modules have additional diff --git a/pypy/doc/embedding.rst b/pypy/doc/embedding.rst --- a/pypy/doc/embedding.rst +++ b/pypy/doc/embedding.rst @@ -30,12 +30,10 @@ Initialize threads. Only need to be called if there are any threads involved -.. function:: long pypy_setup_home(char* home, int verbose); +.. function:: int pypy_setup_home(char* home, int verbose); This function searches the PyPy standard library starting from the given - "PyPy home directory". It is not strictly necessary to execute it before - running Python code, but without it you will not be able to import any - non-builtin module from the standard library. The arguments are: + "PyPy home directory". The arguments are: * ``home``: NULL terminated path to an executable inside the pypy directory (can be a .so name, can be made up) @@ -84,17 +82,22 @@ const char source[] = "print 'hello from pypy'"; - int main() + int main(void) { - int res; + int res; - rpython_startup_code(); - // pypy_setup_home() is not needed in this trivial example - res = pypy_execute_source((char*)source); - if (res) { - printf("Error calling pypy_execute_source!\n"); - } - return res; + rpython_startup_code(); + res = pypy_setup_home("/opt/pypy/bin/libpypy-c.so", 1); + if (res) { + printf("Error setting pypy home!\n"); + return 1; + } + + res = pypy_execute_source((char*)source); + if (res) { + printf("Error calling pypy_execute_source!\n"); + } + return res; } If we save it as ``x.c`` now, compile it and run it (on linux) with:: diff --git a/pypy/doc/test/test_whatsnew.py b/pypy/doc/test/test_whatsnew.py --- a/pypy/doc/test/test_whatsnew.py +++ b/pypy/doc/test/test_whatsnew.py @@ -78,9 +78,10 @@ def test_whatsnew(): doc = ROOT.join('pypy', 'doc') - whatsnew_list = doc.listdir('whatsnew-*.rst') - whatsnew_list.sort() - last_whatsnew = whatsnew_list[-1].read() + #whatsnew_list = doc.listdir('whatsnew-*.rst') + #whatsnew_list.sort() + #last_whatsnew = whatsnew_list[-1].read() + last_whatsnew = doc.join('whatsnew-head.rst').read() startrev, documented = parse_doc(last_whatsnew) merged, branch = get_merged_branches(ROOT, startrev, '') merged.discard('default') diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -51,3 +51,72 @@ .. branch: ssa-flow Use SSA form for flow graphs inside build_flow() and part of simplify_graph() + +.. branch: ufuncapi + +Implement most of the GenericUfunc api to support numpy linalg. The strategy is +to encourage use of pure python or cffi ufuncs by extending frompyfunc(). +See the docstring of frompyfunc for more details. This dovetails with a branch +of pypy/numpy - cffi-linalg which is a rewrite of the _umath_linalg module in +python, calling lapack from cffi. The branch also support traditional use of +cpyext GenericUfunc definitions in c. + +.. branch: all_ordered_dicts + +This makes ordered dicts the default dictionary implementation in +RPython and in PyPy. It polishes the basic idea of rordereddict.py +and then fixes various things, up to simplifying +collections.OrderedDict. + +Note: Python programs can rely on the guaranteed dict order in PyPy +now, but for compatibility with other Python implementations they +should still use collections.OrderedDict where that really matters. +Also, support for reversed() was *not* added to the 'dict' class; +use OrderedDict. + +Benchmark results: in the noise. A few benchmarks see good speed +improvements but the average is very close to parity. + +.. branch: berkerpeksag/fix-broken-link-in-readmerst-1415127402066 +.. branch: bigint-with-int-ops +.. branch: dstufft/update-pip-bootstrap-location-to-the-new-1420760611527 +.. branch: float-opt +.. branch: gc-incminimark-pinning + +This branch adds an interface rgc.pin which would (very temporarily) +make object non-movable. That's used by rffi.alloc_buffer and +rffi.get_nonmovable_buffer and improves performance considerably for +IO operations. + +.. branch: gc_no_cleanup_nursery + +A branch started by Wenzhu Man (SoC'14) and then done by fijal. It +removes the clearing of the nursery. The drawback is that new objects +are not automatically filled with zeros any longer, which needs some +care, mostly for GC references (which the GC tries to follow, so they +must not contain garbage). The benefit is a quite large speed-up. + +.. branch: improve-gc-tracing-hooks +.. branch: improve-ptr-conv-error +.. branch: intern-not-immortal + +Fix intern() to return mortal strings, like in CPython. + +.. branch: issue1922-take2 +.. branch: kill-exported-symbols-list +.. branch: kill-rctime +.. branch: kill_ll_termios +.. branch: look-into-all-modules +.. branch: nditer-external_loop +.. branch: numpy-generic-item +.. branch: osx-shared + +``--shared`` support on OS/X (thanks wouter) + +.. branch: portable-threadlocal +.. branch: pypy-dont-copy-ops +.. branch: recursion_and_inlining +.. branch: slim-down-resumedescr +.. branch: squeaky/use-cflags-for-compiling-asm +.. branch: unicode-fix +.. branch: zlib_zdict diff --git a/pypy/doc/windows.rst b/pypy/doc/windows.rst --- a/pypy/doc/windows.rst +++ b/pypy/doc/windows.rst @@ -78,6 +78,7 @@ Then you need to execute:: + \vc\vcvars.bat editbin /largeaddressaware translator.exe where ``translator.exe`` is the pypy.exe or cpython.exe you will use to @@ -96,7 +97,7 @@ Abridged method (for -Ojit builds using Visual Studio 2008) -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +----------------------------------------------------------- Download the versions of all the external packages from https://bitbucket.org/pypy/pypy/downloads/local_2.4.zip @@ -110,7 +111,13 @@ set INCLUDE=\include;\tcltk\include;%INCLUDE% set LIB=\lib;\tcltk\lib;%LIB% -Now you should be good to go. Read on for more information. +Now you should be good to go. If you choose this method, you do not need +to download/build anything else. + +Nonabrided method (building from scratch) +----------------------------------------- + +If you want to, you can rebuild everything from scratch by continuing. The Boehm garbage collector diff --git a/pypy/goal/targetpypystandalone.py b/pypy/goal/targetpypystandalone.py --- a/pypy/goal/targetpypystandalone.py +++ b/pypy/goal/targetpypystandalone.py @@ -101,7 +101,7 @@ if space.is_none(w_path): if verbose: debug("Failed to find library based on pypy_find_stdlib") - return 1 + return rffi.cast(rffi.INT, 1) space.startup() space.call_function(w_pathsetter, w_path) # import site @@ -109,13 +109,13 @@ import_ = space.getattr(space.getbuiltinmodule('__builtin__'), space.wrap('__import__')) space.call_function(import_, space.wrap('site')) - return 0 + return rffi.cast(rffi.INT, 0) except OperationError, e: if verbose: debug("OperationError:") debug(" operror-type: " + e.w_type.getname(space)) debug(" operror-value: " + space.str_w(space.str(e.get_w_value(space)))) - return -1 + return rffi.cast(rffi.INT, -1) @entrypoint('main', [rffi.CCHARP], c_name='pypy_execute_source') def pypy_execute_source(ll_source): diff --git a/pypy/interpreter/astcompiler/optimize.py b/pypy/interpreter/astcompiler/optimize.py --- a/pypy/interpreter/astcompiler/optimize.py +++ b/pypy/interpreter/astcompiler/optimize.py @@ -254,11 +254,15 @@ return rep def visit_Name(self, name): - # Turn loading None into a constant lookup. Eventaully, we can do this - # for True and False, too. + # Turn loading None into a constant lookup. We cannot do this + # for True and False, because rebinding them is allowed (2.7). if name.id == "None": - assert name.ctx == ast.Load - return ast.Const(self.space.w_None, name.lineno, name.col_offset) + # The compiler refuses to parse "None = ...", but "del None" + # is allowed (if pointless). Check anyway: custom asts that + # correspond to "None = ..." can be made by hand. + if name.ctx == ast.Load: + return ast.Const(self.space.w_None, name.lineno, + name.col_offset) return name def visit_Tuple(self, tup): diff --git a/pypy/interpreter/test/test_compiler.py b/pypy/interpreter/test/test_compiler.py --- a/pypy/interpreter/test/test_compiler.py +++ b/pypy/interpreter/test/test_compiler.py @@ -655,6 +655,18 @@ assert ex.match(space, space.w_SyntaxError) assert 'hello_world' in space.str_w(space.str(ex.get_w_value(space))) + def test_del_None(self): + snippet = '''if 1: + try: + del None + except NameError: + pass + ''' + code = self.compiler.compile(snippet, '', 'exec', 0) + space = self.space + w_d = space.newdict() + space.exec_(code, w_d, w_d) + class TestPythonAstCompiler_25_grammar(BaseTestCompiler): def setup_method(self, method): diff --git a/pypy/interpreter/test/test_targetpypy.py b/pypy/interpreter/test/test_targetpypy.py --- a/pypy/interpreter/test/test_targetpypy.py +++ b/pypy/interpreter/test/test_targetpypy.py @@ -27,6 +27,6 @@ pypy_setup_home = d['pypy_setup_home'] lls = rffi.str2charp(__file__) res = pypy_setup_home(lls, rffi.cast(rffi.INT, 1)) - assert lltype.typeOf(res) == rffi.LONG + assert lltype.typeOf(res) == rffi.INT assert rffi.cast(lltype.Signed, res) == 0 lltype.free(lls, flavor='raw') diff --git a/pypy/module/__pypy__/__init__.py b/pypy/module/__pypy__/__init__.py --- a/pypy/module/__pypy__/__init__.py +++ b/pypy/module/__pypy__/__init__.py @@ -78,6 +78,7 @@ 'newlist_hint' : 'interp_magic.newlist_hint', 'add_memory_pressure' : 'interp_magic.add_memory_pressure', 'newdict' : 'interp_dict.newdict', + 'reversed_dict' : 'interp_dict.reversed_dict', 'strategy' : 'interp_magic.strategy', # dict,set,list 'set_debug' : 'interp_magic.set_debug', 'locals_to_fast' : 'interp_magic.locals_to_fast', diff --git a/pypy/module/__pypy__/interp_dict.py b/pypy/module/__pypy__/interp_dict.py --- a/pypy/module/__pypy__/interp_dict.py +++ b/pypy/module/__pypy__/interp_dict.py @@ -30,3 +30,17 @@ return space.newdict(strdict=True) else: raise oefmt(space.w_TypeError, "unknown type of dict %s", type) + +def reversed_dict(space, w_obj): + """Enumerate the keys in a dictionary object in reversed order. + + This is a __pypy__ function instead of being simply done by calling + reversed(), for CPython compatibility: dictionaries are only ordered + on PyPy. You should use the collections.OrderedDict class for cases + where ordering is important. That class implements __reversed__ by + calling __pypy__.reversed_dict(). + """ + from pypy.objspace.std.dictmultiobject import W_DictMultiObject + if not isinstance(w_obj, W_DictMultiObject): + raise OperationError(space.w_TypeError, space.w_None) + return w_obj.nondescr_reversed_dict(space) diff --git a/pypy/module/_cffi_backend/ccallback.py b/pypy/module/_cffi_backend/ccallback.py --- a/pypy/module/_cffi_backend/ccallback.py +++ b/pypy/module/_cffi_backend/ccallback.py @@ -99,7 +99,7 @@ def print_error(self, operr, extra_line): space = self.space - operr.write_unraisable(space, "callback ", self.w_callable, + operr.write_unraisable(space, "cffi callback ", self.w_callable, with_traceback=True, extra_line=extra_line) def write_error_return_value(self, ll_res): diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py --- a/pypy/module/_cffi_backend/test/_backend_test_c.py +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py @@ -1175,7 +1175,7 @@ assert sys.stderr.getvalue() == '' assert f(10000) == -42 assert matches(sys.stderr.getvalue(), """\ -From callback : +From cffi callback : Traceback (most recent call last): File "$", line $, in Zcb1 $ @@ -1187,7 +1187,7 @@ bigvalue = 20000 assert f(bigvalue) == -42 assert matches(sys.stderr.getvalue(), """\ -From callback : +From cffi callback : Trying to convert the result back to C: OverflowError: integer 60000 does not fit 'short' """) diff --git a/pypy/module/_multibytecodec/test/test_translation.py b/pypy/module/_multibytecodec/test/test_translation.py --- a/pypy/module/_multibytecodec/test/test_translation.py +++ b/pypy/module/_multibytecodec/test/test_translation.py @@ -1,8 +1,11 @@ from pypy.module._multibytecodec import c_codecs from rpython.translator.c.test import test_standalone +from rpython.config.translationoption import get_combined_translation_config class TestTranslation(test_standalone.StandaloneTests): + config = get_combined_translation_config(translating=True) + config.translation.gc = 'boehm' def test_translation(self): # diff --git a/pypy/module/cpyext/include/numpy/__multiarray_api.h b/pypy/module/cpyext/include/numpy/__multiarray_api.h new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/include/numpy/__multiarray_api.h @@ -0,0 +1,10 @@ + + +typedef struct { + PyObject_HEAD + npy_bool obval; +} PyBoolScalarObject; + +#define import_array() +#define PyArray_New _PyArray_New + diff --git a/pypy/module/cpyext/include/numpy/arrayobject.h b/pypy/module/cpyext/include/numpy/arrayobject.h --- a/pypy/module/cpyext/include/numpy/arrayobject.h +++ b/pypy/module/cpyext/include/numpy/arrayobject.h @@ -11,6 +11,8 @@ #endif #include "old_defines.h" +#include "npy_common.h" +#include "__multiarray_api.h" #define NPY_UNUSED(x) x #define PyArray_MAX(a,b) (((a)>(b))?(a):(b)) @@ -22,23 +24,10 @@ PyAPI_DATA(PyTypeObject) PyArray_Type; -typedef unsigned char npy_bool; -typedef unsigned char npy_uint8; -typedef unsigned short npy_uint16; -typedef signed short npy_int16; -typedef signed char npy_int8; -typedef int npy_int; - -typedef long npy_intp; -#ifndef NPY_INTP_FMT -#define NPY_INTP_FMT "ld" -#endif -#ifndef import_array -#define import_array() -#endif #define NPY_MAXDIMS 32 +#ifndef NDARRAYTYPES_H typedef struct { npy_intp *ptr; int len; @@ -73,19 +62,6 @@ NPY_NTYPES_ABI_COMPATIBLE=21 }; -#define NPY_INT8 NPY_BYTE -#define NPY_UINT8 NPY_UBYTE -#define NPY_INT16 NPY_SHORT -#define NPY_UINT16 NPY_USHORT -#define NPY_INT32 NPY_INT -#define NPY_UINT32 NPY_UINT -#define NPY_INT64 NPY_LONG -#define NPY_UINT64 NPY_ULONG -#define NPY_FLOAT32 NPY_FLOAT -#define NPY_FLOAT64 NPY_DOUBLE -#define NPY_COMPLEX32 NPY_CFLOAT -#define NPY_COMPLEX64 NPY_CDOUBLE - #define PyTypeNum_ISBOOL(type) ((type) == NPY_BOOL) #define PyTypeNum_ISINTEGER(type) (((type) >= NPY_BYTE) && \ ((type) <= NPY_ULONGLONG)) @@ -167,6 +143,21 @@ #define PyArray_ISNOTSWAPPED(arr) (1) #define PyArray_ISBYTESWAPPED(arr) (0) +#endif + +#define NPY_INT8 NPY_BYTE +#define NPY_UINT8 NPY_UBYTE +#define NPY_INT16 NPY_SHORT +#define NPY_UINT16 NPY_USHORT +#define NPY_INT32 NPY_INT +#define NPY_UINT32 NPY_UINT +#define NPY_INT64 NPY_LONG +#define NPY_UINT64 NPY_ULONG +#define NPY_FLOAT32 NPY_FLOAT +#define NPY_FLOAT64 NPY_DOUBLE +#define NPY_COMPLEX32 NPY_CFLOAT +#define NPY_COMPLEX64 NPY_CDOUBLE + /* functions */ #ifndef PyArray_NDIM diff --git a/pypy/module/cpyext/include/numpy/ndarraytypes.h b/pypy/module/cpyext/include/numpy/ndarraytypes.h new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/include/numpy/ndarraytypes.h @@ -0,0 +1,1786 @@ +#ifndef NDARRAYTYPES_H +#define NDARRAYTYPES_H + +#include "numpy/npy_common.h" +//#include "npy_endian.h" +//#include "npy_cpu.h" +//#include "utils.h" + +//for pypy - numpy has lots of typedefs +//for pypy - make life easier, less backward support +#define NPY_1_8_API_VERSION 0x00000008 +#define NPY_NO_DEPRECATED_API NPY_1_8_API_VERSION +#undef NPY_1_8_API_VERSION + +#define NPY_ENABLE_SEPARATE_COMPILATION 1 +#define NPY_VISIBILITY_HIDDEN + +#ifdef NPY_ENABLE_SEPARATE_COMPILATION + #define NPY_NO_EXPORT NPY_VISIBILITY_HIDDEN +#else + #define NPY_NO_EXPORT static +#endif + +/* Only use thread if configured in config and python supports it */ +#if defined WITH_THREAD && !NPY_NO_SMP + #define NPY_ALLOW_THREADS 1 +#else + #define NPY_ALLOW_THREADS 0 +#endif + + + +/* + * There are several places in the code where an array of dimensions + * is allocated statically. This is the size of that static + * allocation. + * + * The array creation itself could have arbitrary dimensions but all + * the places where static allocation is used would need to be changed + * to dynamic (including inside of several structures) + */ + +#define NPY_MAXDIMS 32 +#define NPY_MAXARGS 32 + +/* Used for Converter Functions "O&" code in ParseTuple */ +#define NPY_FAIL 0 +#define NPY_SUCCEED 1 + +/* + * Binary compatibility version number. This number is increased + * whenever the C-API is changed such that binary compatibility is + * broken, i.e. whenever a recompile of extension modules is needed. + */ +#define NPY_VERSION NPY_ABI_VERSION + +/* + * Minor API version. This number is increased whenever a change is + * made to the C-API -- whether it breaks binary compatibility or not. + * Some changes, such as adding a function pointer to the end of the + * function table, can be made without breaking binary compatibility. + * In this case, only the NPY_FEATURE_VERSION (*not* NPY_VERSION) + * would be increased. Whenever binary compatibility is broken, both + * NPY_VERSION and NPY_FEATURE_VERSION should be increased. + */ +#define NPY_FEATURE_VERSION NPY_API_VERSION + +enum NPY_TYPES { NPY_BOOL=0, + NPY_BYTE, NPY_UBYTE, + NPY_SHORT, NPY_USHORT, + NPY_INT, NPY_UINT, + NPY_LONG, NPY_ULONG, + NPY_LONGLONG, NPY_ULONGLONG, + NPY_FLOAT, NPY_DOUBLE, NPY_LONGDOUBLE, + NPY_CFLOAT, NPY_CDOUBLE, NPY_CLONGDOUBLE, + NPY_OBJECT=17, + NPY_STRING, NPY_UNICODE, + NPY_VOID, + /* + * New 1.6 types appended, may be integrated + * into the above in 2.0. + */ + NPY_DATETIME, NPY_TIMEDELTA, NPY_HALF, + + NPY_NTYPES, + NPY_NOTYPE, + NPY_CHAR, /* special flag */ + NPY_USERDEF=256, /* leave room for characters */ + + /* The number of types not including the new 1.6 types */ + NPY_NTYPES_ABI_COMPATIBLE=21 +}; + +/* basetype array priority */ +#define NPY_PRIORITY 0.0 + +/* default subtype priority */ +#define NPY_SUBTYPE_PRIORITY 1.0 + +/* default scalar priority */ +#define NPY_SCALAR_PRIORITY -1000000.0 + +/* How many floating point types are there (excluding half) */ +#define NPY_NUM_FLOATTYPE 3 + +/* + * These characters correspond to the array type and the struct + * module + */ + +enum NPY_TYPECHAR { + NPY_BOOLLTR = '?', + NPY_BYTELTR = 'b', + NPY_UBYTELTR = 'B', + NPY_SHORTLTR = 'h', + NPY_USHORTLTR = 'H', + NPY_INTLTR = 'i', + NPY_UINTLTR = 'I', + NPY_LONGLTR = 'l', + NPY_ULONGLTR = 'L', + NPY_LONGLONGLTR = 'q', + NPY_ULONGLONGLTR = 'Q', + NPY_HALFLTR = 'e', + NPY_FLOATLTR = 'f', + NPY_DOUBLELTR = 'd', + NPY_LONGDOUBLELTR = 'g', + NPY_CFLOATLTR = 'F', + NPY_CDOUBLELTR = 'D', + NPY_CLONGDOUBLELTR = 'G', + NPY_OBJECTLTR = 'O', + NPY_STRINGLTR = 'S', + NPY_STRINGLTR2 = 'a', + NPY_UNICODELTR = 'U', + NPY_VOIDLTR = 'V', + NPY_DATETIMELTR = 'M', + NPY_TIMEDELTALTR = 'm', + NPY_CHARLTR = 'c', + + /* + * No Descriptor, just a define -- this let's + * Python users specify an array of integers + * large enough to hold a pointer on the + * platform + */ + NPY_INTPLTR = 'p', + NPY_UINTPLTR = 'P', + + /* + * These are for dtype 'kinds', not dtype 'typecodes' + * as the above are for. + */ + NPY_GENBOOLLTR ='b', + NPY_SIGNEDLTR = 'i', + NPY_UNSIGNEDLTR = 'u', + NPY_FLOATINGLTR = 'f', + NPY_COMPLEXLTR = 'c' +}; + +typedef enum { + NPY_QUICKSORT=0, + NPY_HEAPSORT=1, + NPY_MERGESORT=2 +} NPY_SORTKIND; +#define NPY_NSORTS (NPY_MERGESORT + 1) + + +typedef enum { + NPY_INTROSELECT=0, +} NPY_SELECTKIND; +#define NPY_NSELECTS (NPY_INTROSELECT + 1) + + +typedef enum { + NPY_SEARCHLEFT=0, + NPY_SEARCHRIGHT=1 +} NPY_SEARCHSIDE; +#define NPY_NSEARCHSIDES (NPY_SEARCHRIGHT + 1) + + +typedef enum { + NPY_NOSCALAR=-1, + NPY_BOOL_SCALAR, + NPY_INTPOS_SCALAR, + NPY_INTNEG_SCALAR, + NPY_FLOAT_SCALAR, + NPY_COMPLEX_SCALAR, + NPY_OBJECT_SCALAR +} NPY_SCALARKIND; +#define NPY_NSCALARKINDS (NPY_OBJECT_SCALAR + 1) + +/* For specifying array memory layout or iteration order */ +typedef enum { + /* Fortran order if inputs are all Fortran, C otherwise */ + NPY_ANYORDER=-1, + /* C order */ + NPY_CORDER=0, + /* Fortran order */ + NPY_FORTRANORDER=1, + /* An order as close to the inputs as possible */ + NPY_KEEPORDER=2 +} NPY_ORDER; + +/* For specifying allowed casting in operations which support it */ +typedef enum { + /* Only allow identical types */ + NPY_NO_CASTING=0, + /* Allow identical and byte swapped types */ + NPY_EQUIV_CASTING=1, + /* Only allow safe casts */ + NPY_SAFE_CASTING=2, + /* Allow safe casts or casts within the same kind */ + NPY_SAME_KIND_CASTING=3, + /* Allow any casts */ + NPY_UNSAFE_CASTING=4, + + /* + * Temporary internal definition only, will be removed in upcoming + * release, see below + * */ + NPY_INTERNAL_UNSAFE_CASTING_BUT_WARN_UNLESS_SAME_KIND = 100, +} NPY_CASTING; + +typedef enum { + NPY_CLIP=0, + NPY_WRAP=1, + NPY_RAISE=2 +} NPY_CLIPMODE; + +/* The special not-a-time (NaT) value */ +#define NPY_DATETIME_NAT NPY_MIN_INT64 + +/* + * Upper bound on the length of a DATETIME ISO 8601 string + * YEAR: 21 (64-bit year) + * MONTH: 3 + * DAY: 3 + * HOURS: 3 + * MINUTES: 3 + * SECONDS: 3 + * ATTOSECONDS: 1 + 3*6 + * TIMEZONE: 5 + * NULL TERMINATOR: 1 + */ +#define NPY_DATETIME_MAX_ISO8601_STRLEN (21+3*5+1+3*6+6+1) + +typedef enum { + NPY_FR_Y = 0, /* Years */ + NPY_FR_M = 1, /* Months */ + NPY_FR_W = 2, /* Weeks */ + /* Gap where 1.6 NPY_FR_B (value 3) was */ + NPY_FR_D = 4, /* Days */ + NPY_FR_h = 5, /* hours */ + NPY_FR_m = 6, /* minutes */ + NPY_FR_s = 7, /* seconds */ + NPY_FR_ms = 8, /* milliseconds */ + NPY_FR_us = 9, /* microseconds */ + NPY_FR_ns = 10,/* nanoseconds */ + NPY_FR_ps = 11,/* picoseconds */ + NPY_FR_fs = 12,/* femtoseconds */ + NPY_FR_as = 13,/* attoseconds */ + NPY_FR_GENERIC = 14 /* Generic, unbound units, can convert to anything */ +} NPY_DATETIMEUNIT; + +/* + * NOTE: With the NPY_FR_B gap for 1.6 ABI compatibility, NPY_DATETIME_NUMUNITS + * is technically one more than the actual number of units. + */ +#define NPY_DATETIME_NUMUNITS (NPY_FR_GENERIC + 1) +#define NPY_DATETIME_DEFAULTUNIT NPY_FR_GENERIC + +/* + * Business day conventions for mapping invalid business + * days to valid business days. + */ +typedef enum { + /* Go forward in time to the following business day. */ + NPY_BUSDAY_FORWARD, + NPY_BUSDAY_FOLLOWING = NPY_BUSDAY_FORWARD, + /* Go backward in time to the preceding business day. */ + NPY_BUSDAY_BACKWARD, + NPY_BUSDAY_PRECEDING = NPY_BUSDAY_BACKWARD, + /* + * Go forward in time to the following business day, unless it + * crosses a month boundary, in which case go backward + */ + NPY_BUSDAY_MODIFIEDFOLLOWING, + /* + * Go backward in time to the preceding business day, unless it + * crosses a month boundary, in which case go forward. + */ + NPY_BUSDAY_MODIFIEDPRECEDING, + /* Produce a NaT for non-business days. */ + NPY_BUSDAY_NAT, + /* Raise an exception for non-business days. */ + NPY_BUSDAY_RAISE +} NPY_BUSDAY_ROLL; + +/************************************************************ + * NumPy Auxiliary Data for inner loops, sort functions, etc. + ************************************************************/ + +/* + * When creating an auxiliary data struct, this should always appear + * as the first member, like this: + * + * typedef struct { + * NpyAuxData base; + * double constant; + * } constant_multiplier_aux_data; + */ +typedef struct NpyAuxData_tag NpyAuxData; + +/* Function pointers for freeing or cloning auxiliary data */ +typedef void (NpyAuxData_FreeFunc) (NpyAuxData *); +typedef NpyAuxData *(NpyAuxData_CloneFunc) (NpyAuxData *); + +struct NpyAuxData_tag { + NpyAuxData_FreeFunc *free; + NpyAuxData_CloneFunc *clone; + /* To allow for a bit of expansion without breaking the ABI */ + void *reserved[2]; +}; + +/* Macros to use for freeing and cloning auxiliary data */ +#define NPY_AUXDATA_FREE(auxdata) \ + do { \ + if ((auxdata) != NULL) { \ + (auxdata)->free(auxdata); \ + } \ + } while(0) +#define NPY_AUXDATA_CLONE(auxdata) \ + ((auxdata)->clone(auxdata)) + +#define NPY_ERR(str) fprintf(stderr, #str); fflush(stderr); +#define NPY_ERR2(str) fprintf(stderr, str); fflush(stderr); + +#define NPY_STRINGIFY(x) #x +#define NPY_TOSTRING(x) NPY_STRINGIFY(x) + + /* + * Macros to define how array, and dimension/strides data is + * allocated. + */ + + /* Data buffer - PyDataMem_NEW/FREE/RENEW are in multiarraymodule.c */ + +#define NPY_USE_PYMEM 1 + +#if NPY_USE_PYMEM == 1 +#define PyArray_malloc PyMem_Malloc +#define PyArray_free PyMem_Free +#define PyArray_realloc PyMem_Realloc +#else +#define PyArray_malloc malloc +#define PyArray_free free +#define PyArray_realloc realloc +#endif + +/* Dimensions and strides */ +#define PyDimMem_NEW(size) \ + ((npy_intp *)PyArray_malloc(size*sizeof(npy_intp))) + +#define PyDimMem_FREE(ptr) PyArray_free(ptr) + +#define PyDimMem_RENEW(ptr,size) \ + ((npy_intp *)PyArray_realloc(ptr,size*sizeof(npy_intp))) + +/* forward declaration */ +struct _PyArray_Descr; + +/* These must deal with unaligned and swapped data if necessary */ +typedef PyObject * (PyArray_GetItemFunc) (void *, void *); +typedef int (PyArray_SetItemFunc)(PyObject *, void *, void *); + +typedef void (PyArray_CopySwapNFunc)(void *, npy_intp, void *, npy_intp, + npy_intp, int, void *); + +typedef void (PyArray_CopySwapFunc)(void *, void *, int, void *); +typedef npy_bool (PyArray_NonzeroFunc)(void *, void *); + + +/* + * These assume aligned and notswapped data -- a buffer will be used + * before or contiguous data will be obtained + */ + +typedef int (PyArray_CompareFunc)(const void *, const void *, void *); +typedef int (PyArray_ArgFunc)(void*, npy_intp, npy_intp*, void *); + +typedef void (PyArray_DotFunc)(void *, npy_intp, void *, npy_intp, void *, + npy_intp, void *); + +typedef void (PyArray_VectorUnaryFunc)(void *, void *, npy_intp, void *, + void *); + +/* + * XXX the ignore argument should be removed next time the API version + * is bumped. It used to be the separator. + */ +typedef int (PyArray_ScanFunc)(FILE *fp, void *dptr, + char *ignore, struct _PyArray_Descr *); +typedef int (PyArray_FromStrFunc)(char *s, void *dptr, char **endptr, + struct _PyArray_Descr *); + +typedef int (PyArray_FillFunc)(void *, npy_intp, void *); + +typedef int (PyArray_SortFunc)(void *, npy_intp, void *); +typedef int (PyArray_ArgSortFunc)(void *, npy_intp *, npy_intp, void *); +typedef int (PyArray_PartitionFunc)(void *, npy_intp, npy_intp, + npy_intp *, npy_intp *, + void *); +typedef int (PyArray_ArgPartitionFunc)(void *, npy_intp *, npy_intp, npy_intp, + npy_intp *, npy_intp *, + void *); + +typedef int (PyArray_FillWithScalarFunc)(void *, npy_intp, void *, void *); + +typedef int (PyArray_ScalarKindFunc)(void *); + +typedef void (PyArray_FastClipFunc)(void *in, npy_intp n_in, void *min, + void *max, void *out); +typedef void (PyArray_FastPutmaskFunc)(void *in, void *mask, npy_intp n_in, + void *values, npy_intp nv); +typedef int (PyArray_FastTakeFunc)(void *dest, void *src, npy_intp *indarray, + npy_intp nindarray, npy_intp n_outer, + npy_intp m_middle, npy_intp nelem, + NPY_CLIPMODE clipmode); + +typedef struct { + npy_intp *ptr; + int len; +} PyArray_Dims; + +typedef struct { + /* + * Functions to cast to most other standard types + * Can have some NULL entries. The types + * DATETIME, TIMEDELTA, and HALF go into the castdict + * even though they are built-in. + */ + PyArray_VectorUnaryFunc *cast[NPY_NTYPES_ABI_COMPATIBLE]; + + /* The next four functions *cannot* be NULL */ + + /* + * Functions to get and set items with standard Python types + * -- not array scalars + */ + PyArray_GetItemFunc *getitem; + PyArray_SetItemFunc *setitem; + + /* + * Copy and/or swap data. Memory areas may not overlap + * Use memmove first if they might + */ + PyArray_CopySwapNFunc *copyswapn; + PyArray_CopySwapFunc *copyswap; + + /* + * Function to compare items + * Can be NULL + */ + PyArray_CompareFunc *compare; + + /* + * Function to select largest + * Can be NULL + */ + PyArray_ArgFunc *argmax; + + /* + * Function to compute dot product + * Can be NULL + */ + PyArray_DotFunc *dotfunc; + + /* + * Function to scan an ASCII file and + * place a single value plus possible separator + * Can be NULL + */ + PyArray_ScanFunc *scanfunc; + + /* + * Function to read a single value from a string + * and adjust the pointer; Can be NULL + */ + PyArray_FromStrFunc *fromstr; + + /* + * Function to determine if data is zero or not + * If NULL a default version is + * used at Registration time. + */ + PyArray_NonzeroFunc *nonzero; + + /* + * Used for arange. + * Can be NULL. + */ + PyArray_FillFunc *fill; + + /* + * Function to fill arrays with scalar values + * Can be NULL + */ + PyArray_FillWithScalarFunc *fillwithscalar; + + /* + * Sorting functions + * Can be NULL + */ + PyArray_SortFunc *sort[NPY_NSORTS]; + PyArray_ArgSortFunc *argsort[NPY_NSORTS]; + + /* + * Dictionary of additional casting functions + * PyArray_VectorUnaryFuncs + * which can be populated to support casting + * to other registered types. Can be NULL + */ + PyObject *castdict; + + /* + * Functions useful for generalizing + * the casting rules. + * Can be NULL; + */ + PyArray_ScalarKindFunc *scalarkind; + int **cancastscalarkindto; + int *cancastto; + + PyArray_FastClipFunc *fastclip; + PyArray_FastPutmaskFunc *fastputmask; + PyArray_FastTakeFunc *fasttake; + + /* + * Function to select smallest + * Can be NULL + */ + PyArray_ArgFunc *argmin; + +} PyArray_ArrFuncs; + +/* The item must be reference counted when it is inserted or extracted. */ +#define NPY_ITEM_REFCOUNT 0x01 +/* Same as needing REFCOUNT */ +#define NPY_ITEM_HASOBJECT 0x01 +/* Convert to list for pickling */ +#define NPY_LIST_PICKLE 0x02 +/* The item is a POINTER */ +#define NPY_ITEM_IS_POINTER 0x04 +/* memory needs to be initialized for this data-type */ +#define NPY_NEEDS_INIT 0x08 +/* operations need Python C-API so don't give-up thread. */ +#define NPY_NEEDS_PYAPI 0x10 +/* Use f.getitem when extracting elements of this data-type */ +#define NPY_USE_GETITEM 0x20 +/* Use f.setitem when setting creating 0-d array from this data-type.*/ +#define NPY_USE_SETITEM 0x40 +/* A sticky flag specifically for structured arrays */ +#define NPY_ALIGNED_STRUCT 0x80 + +/* + *These are inherited for global data-type if any data-types in the + * field have them + */ +#define NPY_FROM_FIELDS (NPY_NEEDS_INIT | NPY_LIST_PICKLE | \ + NPY_ITEM_REFCOUNT | NPY_NEEDS_PYAPI) + +#define NPY_OBJECT_DTYPE_FLAGS (NPY_LIST_PICKLE | NPY_USE_GETITEM | \ + NPY_ITEM_IS_POINTER | NPY_ITEM_REFCOUNT | \ + NPY_NEEDS_INIT | NPY_NEEDS_PYAPI) + +#define PyDataType_FLAGCHK(dtype, flag) \ + (((dtype)->flags & (flag)) == (flag)) + +#define PyDataType_REFCHK(dtype) \ + PyDataType_FLAGCHK(dtype, NPY_ITEM_REFCOUNT) + +typedef struct _PyArray_Descr { + PyObject_HEAD + /* + * the type object representing an + * instance of this type -- should not + * be two type_numbers with the same type + * object. + */ + PyTypeObject *typeobj; + /* kind for this type */ + char kind; + /* unique-character representing this type */ + char type; + /* + * '>' (big), '<' (little), '|' + * (not-applicable), or '=' (native). + */ + char byteorder; + /* flags describing data type */ + char flags; + /* number representing this type */ + int type_num; + /* element size (itemsize) for this type */ + int elsize; + /* alignment needed for this type */ + int alignment; + /* + * Non-NULL if this type is + * is an array (C-contiguous) + * of some other type + */ + struct _arr_descr *subarray; + /* + * The fields dictionary for this type + * For statically defined descr this + * is always Py_None + */ + PyObject *fields; + /* + * An ordered tuple of field names or NULL + * if no fields are defined + */ + PyObject *names; + /* + * a table of functions specific for each + * basic data descriptor + */ + PyArray_ArrFuncs *f; + /* Metadata about this dtype */ + PyObject *metadata; + /* + * Metadata specific to the C implementation + * of the particular dtype. This was added + * for NumPy 1.7.0. + */ + NpyAuxData *c_metadata; +} PyArray_Descr; + +typedef struct _arr_descr { + PyArray_Descr *base; + PyObject *shape; /* a tuple */ +} PyArray_ArrayDescr; + +/* + * The main array object structure. + * + * It has been recommended to use the inline functions defined below + * (PyArray_DATA and friends) to access fields here for a number of + * releases. Direct access to the members themselves is deprecated. + * To ensure that your code does not use deprecated access, + * #define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION + * (or NPY_1_8_API_VERSION or higher as required). + */ +/* This struct will be moved to a private header in a future release */ +typedef struct tagPyArrayObject_fields { + PyObject_HEAD + /* Pointer to the raw data buffer */ + char *data; + /* The number of dimensions, also called 'ndim' */ + int nd; + /* The size in each dimension, also called 'shape' */ + npy_intp *dimensions; + /* + * Number of bytes to jump to get to the + * next element in each dimension + */ + npy_intp *strides; + /* + * This object is decref'd upon + * deletion of array. Except in the + * case of UPDATEIFCOPY which has + * special handling. + * + * For views it points to the original + * array, collapsed so no chains of + * views occur. + * + * For creation from buffer object it + * points to an object that shold be + * decref'd on deletion + * + * For UPDATEIFCOPY flag this is an + * array to-be-updated upon deletion + * of this one + */ + PyObject *base; + /* Pointer to type structure */ + PyArray_Descr *descr; + /* Flags describing array -- see below */ + int flags; + /* For weak references */ + PyObject *weakreflist; +} PyArrayObject_fields; + +/* + * To hide the implementation details, we only expose + * the Python struct HEAD. + */ +#if !defined(NPY_NO_DEPRECATED_API) || \ + (NPY_NO_DEPRECATED_API < NPY_1_7_API_VERSION) +/* + * Can't put this in npy_deprecated_api.h like the others. + * PyArrayObject field access is deprecated as of NumPy 1.7. + */ +typedef PyArrayObject_fields PyArrayObject; +#else +typedef struct tagPyArrayObject { + PyObject_HEAD +} PyArrayObject; +#endif + +#define NPY_SIZEOF_PYARRAYOBJECT (sizeof(PyArrayObject_fields)) + +/* Array Flags Object */ +typedef struct PyArrayFlagsObject { + PyObject_HEAD + PyObject *arr; + int flags; +} PyArrayFlagsObject; + +/* Mirrors buffer object to ptr */ + +typedef struct { + PyObject_HEAD + PyObject *base; + void *ptr; + npy_intp len; + int flags; +} PyArray_Chunk; + +typedef struct { + NPY_DATETIMEUNIT base; + int num; +} PyArray_DatetimeMetaData; + +typedef struct { + NpyAuxData base; + PyArray_DatetimeMetaData meta; +} PyArray_DatetimeDTypeMetaData; + +/* + * This structure contains an exploded view of a date-time value. + * NaT is represented by year == NPY_DATETIME_NAT. + */ +typedef struct { + npy_int64 year; + npy_int32 month, day, hour, min, sec, us, ps, as; +} npy_datetimestruct; + +/* This is not used internally. */ +typedef struct { + npy_int64 day; + npy_int32 sec, us, ps, as; +} npy_timedeltastruct; + +typedef int (PyArray_FinalizeFunc)(PyArrayObject *, PyObject *); + +/* + * Means c-style contiguous (last index varies the fastest). The data + * elements right after each other. + * + * This flag may be requested in constructor functions. + * This flag may be tested for in PyArray_FLAGS(arr). + */ +#define NPY_ARRAY_C_CONTIGUOUS 0x0001 + +/* + * Set if array is a contiguous Fortran array: the first index varies + * the fastest in memory (strides array is reverse of C-contiguous + * array) + * + * This flag may be requested in constructor functions. + * This flag may be tested for in PyArray_FLAGS(arr). + */ +#define NPY_ARRAY_F_CONTIGUOUS 0x0002 + +/* + * Note: all 0-d arrays are C_CONTIGUOUS and F_CONTIGUOUS. If a + * 1-d array is C_CONTIGUOUS it is also F_CONTIGUOUS. Arrays with + * more then one dimension can be C_CONTIGUOUS and F_CONTIGUOUS + * at the same time if they have either zero or one element. + * If NPY_RELAXED_STRIDES_CHECKING is set, a higher dimensional + * array is always C_CONTIGUOUS and F_CONTIGUOUS if it has zero elements + * and the array is contiguous if ndarray.squeeze() is contiguous. + * I.e. dimensions for which `ndarray.shape[dimension] == 1` are + * ignored. + */ + +/* + * If set, the array owns the data: it will be free'd when the array + * is deleted. + * + * This flag may be tested for in PyArray_FLAGS(arr). + */ +#define NPY_ARRAY_OWNDATA 0x0004 + +/* + * An array never has the next four set; they're only used as parameter + * flags to the the various FromAny functions + * + * This flag may be requested in constructor functions. + */ + +/* Cause a cast to occur regardless of whether or not it is safe. */ +#define NPY_ARRAY_FORCECAST 0x0010 + +/* + * Always copy the array. Returned arrays are always CONTIGUOUS, + * ALIGNED, and WRITEABLE. + * + * This flag may be requested in constructor functions. + */ +#define NPY_ARRAY_ENSURECOPY 0x0020 + +/* + * Make sure the returned array is a base-class ndarray + * + * This flag may be requested in constructor functions. + */ +#define NPY_ARRAY_ENSUREARRAY 0x0040 + +/* + * Make sure that the strides are in units of the element size Needed + * for some operations with record-arrays. + * + * This flag may be requested in constructor functions. + */ +#define NPY_ARRAY_ELEMENTSTRIDES 0x0080 + +/* + * Array data is aligned on the appropiate memory address for the type + * stored according to how the compiler would align things (e.g., an + * array of integers (4 bytes each) starts on a memory address that's + * a multiple of 4) + * + * This flag may be requested in constructor functions. + * This flag may be tested for in PyArray_FLAGS(arr). + */ +#define NPY_ARRAY_ALIGNED 0x0100 + +/* + * Array data has the native endianness + * + * This flag may be requested in constructor functions. + */ +#define NPY_ARRAY_NOTSWAPPED 0x0200 + +/* + * Array data is writeable + * + * This flag may be requested in constructor functions. + * This flag may be tested for in PyArray_FLAGS(arr). + */ +#define NPY_ARRAY_WRITEABLE 0x0400 + +/* + * If this flag is set, then base contains a pointer to an array of + * the same size that should be updated with the current contents of + * this array when this array is deallocated + * + * This flag may be requested in constructor functions. + * This flag may be tested for in PyArray_FLAGS(arr). + */ +#define NPY_ARRAY_UPDATEIFCOPY 0x1000 + +/* + * NOTE: there are also internal flags defined in multiarray/arrayobject.h, + * which start at bit 31 and work down. + */ + +#define NPY_ARRAY_BEHAVED (NPY_ARRAY_ALIGNED | \ + NPY_ARRAY_WRITEABLE) +#define NPY_ARRAY_BEHAVED_NS (NPY_ARRAY_ALIGNED | \ + NPY_ARRAY_WRITEABLE | \ + NPY_ARRAY_NOTSWAPPED) +#define NPY_ARRAY_CARRAY (NPY_ARRAY_C_CONTIGUOUS | \ + NPY_ARRAY_BEHAVED) +#define NPY_ARRAY_CARRAY_RO (NPY_ARRAY_C_CONTIGUOUS | \ + NPY_ARRAY_ALIGNED) +#define NPY_ARRAY_FARRAY (NPY_ARRAY_F_CONTIGUOUS | \ + NPY_ARRAY_BEHAVED) +#define NPY_ARRAY_FARRAY_RO (NPY_ARRAY_F_CONTIGUOUS | \ + NPY_ARRAY_ALIGNED) +#define NPY_ARRAY_DEFAULT (NPY_ARRAY_CARRAY) +#define NPY_ARRAY_IN_ARRAY (NPY_ARRAY_CARRAY_RO) +#define NPY_ARRAY_OUT_ARRAY (NPY_ARRAY_CARRAY) +#define NPY_ARRAY_INOUT_ARRAY (NPY_ARRAY_CARRAY | \ + NPY_ARRAY_UPDATEIFCOPY) +#define NPY_ARRAY_IN_FARRAY (NPY_ARRAY_FARRAY_RO) +#define NPY_ARRAY_OUT_FARRAY (NPY_ARRAY_FARRAY) +#define NPY_ARRAY_INOUT_FARRAY (NPY_ARRAY_FARRAY | \ + NPY_ARRAY_UPDATEIFCOPY) + +#define NPY_ARRAY_UPDATE_ALL (NPY_ARRAY_C_CONTIGUOUS | \ + NPY_ARRAY_F_CONTIGUOUS | \ + NPY_ARRAY_ALIGNED) + +/* This flag is for the array interface, not PyArrayObject */ +#define NPY_ARR_HAS_DESCR 0x0800 + + + + +/* + * Size of internal buffers used for alignment Make BUFSIZE a multiple + * of sizeof(npy_cdouble) -- usually 16 so that ufunc buffers are aligned + */ +#define NPY_MIN_BUFSIZE ((int)sizeof(npy_cdouble)) +#define NPY_MAX_BUFSIZE (((int)sizeof(npy_cdouble))*1000000) +#define NPY_BUFSIZE 8192 +/* buffer stress test size: */ +/*#define NPY_BUFSIZE 17*/ + +#define PyArray_MAX(a,b) (((a)>(b))?(a):(b)) +#define PyArray_MIN(a,b) (((a)<(b))?(a):(b)) +#define PyArray_CLT(p,q) ((((p).real==(q).real) ? ((p).imag < (q).imag) : \ + ((p).real < (q).real))) +#define PyArray_CGT(p,q) ((((p).real==(q).real) ? ((p).imag > (q).imag) : \ + ((p).real > (q).real))) +#define PyArray_CLE(p,q) ((((p).real==(q).real) ? ((p).imag <= (q).imag) : \ + ((p).real <= (q).real))) +#define PyArray_CGE(p,q) ((((p).real==(q).real) ? ((p).imag >= (q).imag) : \ + ((p).real >= (q).real))) +#define PyArray_CEQ(p,q) (((p).real==(q).real) && ((p).imag == (q).imag)) +#define PyArray_CNE(p,q) (((p).real!=(q).real) || ((p).imag != (q).imag)) + +/* + * C API: consists of Macros and functions. The MACROS are defined + * here. + */ + + +#define PyArray_ISCONTIGUOUS(m) PyArray_CHKFLAGS(m, NPY_ARRAY_C_CONTIGUOUS) +#define PyArray_ISWRITEABLE(m) PyArray_CHKFLAGS(m, NPY_ARRAY_WRITEABLE) +#define PyArray_ISALIGNED(m) PyArray_CHKFLAGS(m, NPY_ARRAY_ALIGNED) + +#define PyArray_IS_C_CONTIGUOUS(m) PyArray_CHKFLAGS(m, NPY_ARRAY_C_CONTIGUOUS) +#define PyArray_IS_F_CONTIGUOUS(m) PyArray_CHKFLAGS(m, NPY_ARRAY_F_CONTIGUOUS) + +#if NPY_ALLOW_THREADS +#define NPY_BEGIN_ALLOW_THREADS Py_BEGIN_ALLOW_THREADS +#define NPY_END_ALLOW_THREADS Py_END_ALLOW_THREADS +#define NPY_BEGIN_THREADS_DEF PyThreadState *_save=NULL; +#define NPY_BEGIN_THREADS do {_save = PyEval_SaveThread();} while (0); +#define NPY_END_THREADS do {if (_save) PyEval_RestoreThread(_save);} while (0); +#define NPY_BEGIN_THREADS_THRESHOLDED(loop_size) do { if (loop_size > 500) \ + { _save = PyEval_SaveThread();} } while (0); + +#define NPY_BEGIN_THREADS_DESCR(dtype) \ + do {if (!(PyDataType_FLAGCHK(dtype, NPY_NEEDS_PYAPI))) \ + NPY_BEGIN_THREADS;} while (0); + +#define NPY_END_THREADS_DESCR(dtype) \ + do {if (!(PyDataType_FLAGCHK(dtype, NPY_NEEDS_PYAPI))) \ + NPY_END_THREADS; } while (0); + +#define NPY_ALLOW_C_API_DEF PyGILState_STATE __save__; +#define NPY_ALLOW_C_API do {__save__ = PyGILState_Ensure();} while (0); +#define NPY_DISABLE_C_API do {PyGILState_Release(__save__);} while (0); +#else +#define NPY_BEGIN_ALLOW_THREADS +#define NPY_END_ALLOW_THREADS +#define NPY_BEGIN_THREADS_DEF +#define NPY_BEGIN_THREADS +#define NPY_END_THREADS +#define NPY_BEGIN_THREADS_THRESHOLDED(loop_size) +#define NPY_BEGIN_THREADS_DESCR(dtype) +#define NPY_END_THREADS_DESCR(dtype) +#define NPY_ALLOW_C_API_DEF +#define NPY_ALLOW_C_API +#define NPY_DISABLE_C_API +#endif + +/********************************** + * The nditer object, added in 1.6 + **********************************/ + +/* The actual structure of the iterator is an internal detail */ +typedef struct NpyIter_InternalOnly NpyIter; + +/* Iterator function pointers that may be specialized */ +typedef int (NpyIter_IterNextFunc)(NpyIter *iter); +typedef void (NpyIter_GetMultiIndexFunc)(NpyIter *iter, + npy_intp *outcoords); + +/*** Global flags that may be passed to the iterator constructors ***/ + +/* Track an index representing C order */ +#define NPY_ITER_C_INDEX 0x00000001 +/* Track an index representing Fortran order */ +#define NPY_ITER_F_INDEX 0x00000002 +/* Track a multi-index */ +#define NPY_ITER_MULTI_INDEX 0x00000004 +/* User code external to the iterator does the 1-dimensional innermost loop */ +#define NPY_ITER_EXTERNAL_LOOP 0x00000008 +/* Convert all the operands to a common data type */ +#define NPY_ITER_COMMON_DTYPE 0x00000010 +/* Operands may hold references, requiring API access during iteration */ +#define NPY_ITER_REFS_OK 0x00000020 +/* Zero-sized operands should be permitted, iteration checks IterSize for 0 */ +#define NPY_ITER_ZEROSIZE_OK 0x00000040 +/* Permits reductions (size-0 stride with dimension size > 1) */ +#define NPY_ITER_REDUCE_OK 0x00000080 +/* Enables sub-range iteration */ +#define NPY_ITER_RANGED 0x00000100 +/* Enables buffering */ +#define NPY_ITER_BUFFERED 0x00000200 +/* When buffering is enabled, grows the inner loop if possible */ +#define NPY_ITER_GROWINNER 0x00000400 +/* Delay allocation of buffers until first Reset* call */ +#define NPY_ITER_DELAY_BUFALLOC 0x00000800 +/* When NPY_KEEPORDER is specified, disable reversing negative-stride axes */ +#define NPY_ITER_DONT_NEGATE_STRIDES 0x00001000 + +/*** Per-operand flags that may be passed to the iterator constructors ***/ + +/* The operand will be read from and written to */ +#define NPY_ITER_READWRITE 0x00010000 +/* The operand will only be read from */ +#define NPY_ITER_READONLY 0x00020000 +/* The operand will only be written to */ +#define NPY_ITER_WRITEONLY 0x00040000 +/* The operand's data must be in native byte order */ +#define NPY_ITER_NBO 0x00080000 +/* The operand's data must be aligned */ +#define NPY_ITER_ALIGNED 0x00100000 +/* The operand's data must be contiguous (within the inner loop) */ +#define NPY_ITER_CONTIG 0x00200000 +/* The operand may be copied to satisfy requirements */ +#define NPY_ITER_COPY 0x00400000 +/* The operand may be copied with UPDATEIFCOPY to satisfy requirements */ +#define NPY_ITER_UPDATEIFCOPY 0x00800000 +/* Allocate the operand if it is NULL */ +#define NPY_ITER_ALLOCATE 0x01000000 +/* If an operand is allocated, don't use any subtype */ +#define NPY_ITER_NO_SUBTYPE 0x02000000 +/* This is a virtual array slot, operand is NULL but temporary data is there */ +#define NPY_ITER_VIRTUAL 0x04000000 +/* Require that the dimension match the iterator dimensions exactly */ +#define NPY_ITER_NO_BROADCAST 0x08000000 +/* A mask is being used on this array, affects buffer -> array copy */ +#define NPY_ITER_WRITEMASKED 0x10000000 +/* This array is the mask for all WRITEMASKED operands */ +#define NPY_ITER_ARRAYMASK 0x20000000 + +#define NPY_ITER_GLOBAL_FLAGS 0x0000ffff +#define NPY_ITER_PER_OP_FLAGS 0xffff0000 + + +/***************************** + * Basic iterator object + *****************************/ + +/* FWD declaration */ +typedef struct PyArrayIterObject_tag PyArrayIterObject; + +/* + * type of the function which translates a set of coordinates to a + * pointer to the data + */ +typedef char* (*npy_iter_get_dataptr_t)(PyArrayIterObject* iter, npy_intp*); + +struct PyArrayIterObject_tag { + PyObject_HEAD + int nd_m1; /* number of dimensions - 1 */ + npy_intp index, size; + npy_intp coordinates[NPY_MAXDIMS];/* N-dimensional loop */ + npy_intp dims_m1[NPY_MAXDIMS]; /* ao->dimensions - 1 */ + npy_intp strides[NPY_MAXDIMS]; /* ao->strides or fake */ + npy_intp backstrides[NPY_MAXDIMS];/* how far to jump back */ + npy_intp factors[NPY_MAXDIMS]; /* shape factors */ + PyArrayObject *ao; + char *dataptr; /* pointer to current item*/ + npy_bool contiguous; + + npy_intp bounds[NPY_MAXDIMS][2]; + npy_intp limits[NPY_MAXDIMS][2]; + npy_intp limits_sizes[NPY_MAXDIMS]; + npy_iter_get_dataptr_t translate; +} ; + + +/* Iterator API */ +#define PyArrayIter_Check(op) PyObject_TypeCheck(op, &PyArrayIter_Type) + +#define _PyAIT(it) ((PyArrayIterObject *)(it)) +#define PyArray_ITER_RESET(it) do { \ + _PyAIT(it)->index = 0; \ + _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao); \ + memset(_PyAIT(it)->coordinates, 0, \ + (_PyAIT(it)->nd_m1+1)*sizeof(npy_intp)); \ +} while (0) + +#define _PyArray_ITER_NEXT1(it) do { \ + (it)->dataptr += _PyAIT(it)->strides[0]; \ + (it)->coordinates[0]++; \ +} while (0) + +#define _PyArray_ITER_NEXT2(it) do { \ + if ((it)->coordinates[1] < (it)->dims_m1[1]) { \ + (it)->coordinates[1]++; \ + (it)->dataptr += (it)->strides[1]; \ + } \ + else { \ + (it)->coordinates[1] = 0; \ + (it)->coordinates[0]++; \ + (it)->dataptr += (it)->strides[0] - \ + (it)->backstrides[1]; \ + } \ +} while (0) + +#define _PyArray_ITER_NEXT3(it) do { \ + if ((it)->coordinates[2] < (it)->dims_m1[2]) { \ + (it)->coordinates[2]++; \ + (it)->dataptr += (it)->strides[2]; \ + } \ + else { \ + (it)->coordinates[2] = 0; \ + (it)->dataptr -= (it)->backstrides[2]; \ + if ((it)->coordinates[1] < (it)->dims_m1[1]) { \ + (it)->coordinates[1]++; \ + (it)->dataptr += (it)->strides[1]; \ + } \ + else { \ + (it)->coordinates[1] = 0; \ + (it)->coordinates[0]++; \ + (it)->dataptr += (it)->strides[0] \ + (it)->backstrides[1]; \ + } \ + } \ +} while (0) + +#define PyArray_ITER_NEXT(it) do { \ + _PyAIT(it)->index++; \ + if (_PyAIT(it)->nd_m1 == 0) { \ + _PyArray_ITER_NEXT1(_PyAIT(it)); \ + } \ + else if (_PyAIT(it)->contiguous) \ + _PyAIT(it)->dataptr += PyArray_DESCR(_PyAIT(it)->ao)->elsize; \ + else if (_PyAIT(it)->nd_m1 == 1) { \ + _PyArray_ITER_NEXT2(_PyAIT(it)); \ + } \ + else { \ + int __npy_i; \ + for (__npy_i=_PyAIT(it)->nd_m1; __npy_i >= 0; __npy_i--) { \ + if (_PyAIT(it)->coordinates[__npy_i] < \ + _PyAIT(it)->dims_m1[__npy_i]) { \ + _PyAIT(it)->coordinates[__npy_i]++; \ + _PyAIT(it)->dataptr += \ + _PyAIT(it)->strides[__npy_i]; \ + break; \ + } \ + else { \ + _PyAIT(it)->coordinates[__npy_i] = 0; \ + _PyAIT(it)->dataptr -= \ + _PyAIT(it)->backstrides[__npy_i]; \ + } \ + } \ + } \ +} while (0) + +#define PyArray_ITER_GOTO(it, destination) do { \ + int __npy_i; \ + _PyAIT(it)->index = 0; \ + _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao); \ + for (__npy_i = _PyAIT(it)->nd_m1; __npy_i>=0; __npy_i--) { \ + if (destination[__npy_i] < 0) { \ + destination[__npy_i] += \ + _PyAIT(it)->dims_m1[__npy_i]+1; \ + } \ + _PyAIT(it)->dataptr += destination[__npy_i] * \ + _PyAIT(it)->strides[__npy_i]; \ + _PyAIT(it)->coordinates[__npy_i] = \ + destination[__npy_i]; \ + _PyAIT(it)->index += destination[__npy_i] * \ + ( __npy_i==_PyAIT(it)->nd_m1 ? 1 : \ + _PyAIT(it)->dims_m1[__npy_i+1]+1) ; \ + } \ +} while (0) + +#define PyArray_ITER_GOTO1D(it, ind) do { \ + int __npy_i; \ + npy_intp __npy_ind = (npy_intp) (ind); \ + if (__npy_ind < 0) __npy_ind += _PyAIT(it)->size; \ + _PyAIT(it)->index = __npy_ind; \ + if (_PyAIT(it)->nd_m1 == 0) { \ + _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao) + \ + __npy_ind * _PyAIT(it)->strides[0]; \ + } \ + else if (_PyAIT(it)->contiguous) \ + _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao) + \ + __npy_ind * PyArray_DESCR(_PyAIT(it)->ao)->elsize; \ + else { \ + _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao); \ + for (__npy_i = 0; __npy_i<=_PyAIT(it)->nd_m1; \ + __npy_i++) { \ + _PyAIT(it)->dataptr += \ + (__npy_ind / _PyAIT(it)->factors[__npy_i]) \ + * _PyAIT(it)->strides[__npy_i]; \ + __npy_ind %= _PyAIT(it)->factors[__npy_i]; \ + } \ + } \ +} while (0) + +#define PyArray_ITER_DATA(it) ((void *)(_PyAIT(it)->dataptr)) + +#define PyArray_ITER_NOTDONE(it) (_PyAIT(it)->index < _PyAIT(it)->size) + + +/* + * Any object passed to PyArray_Broadcast must be binary compatible + * with this structure. + */ + +typedef struct { + PyObject_HEAD + int numiter; /* number of iters */ + npy_intp size; /* broadcasted size */ + npy_intp index; /* current index */ + int nd; /* number of dims */ + npy_intp dimensions[NPY_MAXDIMS]; /* dimensions */ + PyArrayIterObject *iters[NPY_MAXARGS]; /* iterators */ +} PyArrayMultiIterObject; + +#define _PyMIT(m) ((PyArrayMultiIterObject *)(m)) +#define PyArray_MultiIter_RESET(multi) do { \ + int __npy_mi; \ + _PyMIT(multi)->index = 0; \ + for (__npy_mi=0; __npy_mi < _PyMIT(multi)->numiter; __npy_mi++) { \ + PyArray_ITER_RESET(_PyMIT(multi)->iters[__npy_mi]); \ + } \ +} while (0) + +#define PyArray_MultiIter_NEXT(multi) do { \ + int __npy_mi; \ + _PyMIT(multi)->index++; \ + for (__npy_mi=0; __npy_mi < _PyMIT(multi)->numiter; __npy_mi++) { \ + PyArray_ITER_NEXT(_PyMIT(multi)->iters[__npy_mi]); \ + } \ +} while (0) + +#define PyArray_MultiIter_GOTO(multi, dest) do { \ + int __npy_mi; \ + for (__npy_mi=0; __npy_mi < _PyMIT(multi)->numiter; __npy_mi++) { \ + PyArray_ITER_GOTO(_PyMIT(multi)->iters[__npy_mi], dest); \ + } \ + _PyMIT(multi)->index = _PyMIT(multi)->iters[0]->index; \ +} while (0) + +#define PyArray_MultiIter_GOTO1D(multi, ind) do { \ + int __npy_mi; \ + for (__npy_mi=0; __npy_mi < _PyMIT(multi)->numiter; __npy_mi++) { \ + PyArray_ITER_GOTO1D(_PyMIT(multi)->iters[__npy_mi], ind); \ + } \ + _PyMIT(multi)->index = _PyMIT(multi)->iters[0]->index; \ +} while (0) + +#define PyArray_MultiIter_DATA(multi, i) \ + ((void *)(_PyMIT(multi)->iters[i]->dataptr)) + +#define PyArray_MultiIter_NEXTi(multi, i) \ + PyArray_ITER_NEXT(_PyMIT(multi)->iters[i]) + +#define PyArray_MultiIter_NOTDONE(multi) \ + (_PyMIT(multi)->index < _PyMIT(multi)->size) + +/* Store the information needed for fancy-indexing over an array */ + +typedef struct { + PyObject_HEAD + /* + * Multi-iterator portion --- needs to be present in this + * order to work with PyArray_Broadcast + */ + + int numiter; /* number of index-array + iterators */ + npy_intp size; /* size of broadcasted + result */ + npy_intp index; /* current index */ + int nd; /* number of dims */ + npy_intp dimensions[NPY_MAXDIMS]; /* dimensions */ + PyArrayIterObject *iters[NPY_MAXDIMS]; /* index object + iterators */ + PyArrayIterObject *ait; /* flat Iterator for + underlying array */ + + /* flat iterator for subspace (when numiter < nd) */ + PyArrayIterObject *subspace; + + /* + * if subspace iteration, then this is the array of axes in + * the underlying array represented by the index objects + */ + int iteraxes[NPY_MAXDIMS]; + /* + * if subspace iteration, the these are the coordinates to the + * start of the subspace. + */ + npy_intp bscoord[NPY_MAXDIMS]; + + PyObject *indexobj; /* creating obj */ + /* + * consec is first used to indicate wether fancy indices are + * consecutive and then denotes at which axis they are inserted + */ + int consec; + char *dataptr; + +} PyArrayMapIterObject; + +enum { From noreply at buildbot.pypy.org Mon Jan 19 23:53:48 2015 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 19 Jan 2015 23:53:48 +0100 (CET) Subject: [pypy-commit] pypy errno-again: Close branch ready to merge Message-ID: <20150119225348.8E2F11C0098@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: errno-again Changeset: r75443:2955d4b0c67b Date: 2015-01-19 23:47 +0100 http://bitbucket.org/pypy/pypy/changeset/2955d4b0c67b/ Log: Close branch ready to merge From noreply at buildbot.pypy.org Mon Jan 19 23:53:51 2015 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 19 Jan 2015 23:53:51 +0100 (CET) Subject: [pypy-commit] pypy default: hg merge errno-again Message-ID: <20150119225351.9825D1C0098@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r75444:05783c6d4ff1 Date: 2015-01-19 23:53 +0100 http://bitbucket.org/pypy/pypy/changeset/05783c6d4ff1/ Log: hg merge errno-again Changes how errno, GetLastError, and WSAGetLastError are handled. The idea is to tie reading the error status as close as possible to the external function call. This fixes some bugs, both of the very rare kind (e.g. errno on Linux might in theory be overwritten by mmap(), called rarely during major GCs, if such a major GC occurs at exactly the wrong time), and some of the less rare kind (particularly on Windows tests). Now the rffi.llexternal() declaration must specify what kind of errno support it needs ('save_err' argument), and the rposix.get_errno() function has been consequently renamed to get_saved_errno(). We need to adapt all RPython libraries that use rposix.get_errno(), which is why the function was renamed. Same with rwin32.GetLastError(). diff too long, truncating to 2000 out of 6200 lines diff --git a/pypy/interpreter/error.py b/pypy/interpreter/error.py --- a/pypy/interpreter/error.py +++ b/pypy/interpreter/error.py @@ -486,10 +486,10 @@ w_exception_class=w_exception_class) wrap_oserror._annspecialcase_ = 'specialize:arg(3)' -def exception_from_errno(space, w_type): - from rpython.rlib.rposix import get_errno +def exception_from_saved_errno(space, w_type): + from rpython.rlib.rposix import get_saved_errno - errno = get_errno() + errno = get_saved_errno() msg = os.strerror(errno) w_error = space.call_function(w_type, space.wrap(errno), space.wrap(msg)) return OperationError(w_type, w_error) diff --git a/pypy/module/__pypy__/interp_time.py b/pypy/module/__pypy__/interp_time.py --- a/pypy/module/__pypy__/interp_time.py +++ b/pypy/module/__pypy__/interp_time.py @@ -1,7 +1,7 @@ from __future__ import with_statement import sys -from pypy.interpreter.error import exception_from_errno +from pypy.interpreter.error import exception_from_saved_errno from pypy.interpreter.gateway import unwrap_spec from rpython.rtyper.lltypesystem import rffi, lltype from rpython.rtyper.tool import rffi_platform @@ -48,11 +48,13 @@ c_clock_gettime = rffi.llexternal("clock_gettime", [lltype.Signed, lltype.Ptr(TIMESPEC)], rffi.INT, - compilation_info=CConfig._compilation_info_, releasegil=False + compilation_info=CConfig._compilation_info_, releasegil=False, + save_err=rffi.RFFI_SAVE_ERRNO ) c_clock_getres = rffi.llexternal("clock_getres", [lltype.Signed, lltype.Ptr(TIMESPEC)], rffi.INT, - compilation_info=CConfig._compilation_info_, releasegil=False + compilation_info=CConfig._compilation_info_, releasegil=False, + save_err=rffi.RFFI_SAVE_ERRNO ) @unwrap_spec(clk_id="c_int") @@ -60,7 +62,7 @@ with lltype.scoped_alloc(TIMESPEC) as tp: ret = c_clock_gettime(clk_id, tp) if ret != 0: - raise exception_from_errno(space, space.w_IOError) + raise exception_from_saved_errno(space, space.w_IOError) return space.wrap(int(tp.c_tv_sec) + 1e-9 * int(tp.c_tv_nsec)) @unwrap_spec(clk_id="c_int") @@ -68,5 +70,5 @@ with lltype.scoped_alloc(TIMESPEC) as tp: ret = c_clock_getres(clk_id, tp) if ret != 0: - raise exception_from_errno(space, space.w_IOError) + raise exception_from_saved_errno(space, space.w_IOError) return space.wrap(int(tp.c_tv_sec) + 1e-9 * int(tp.c_tv_nsec)) diff --git a/pypy/module/_cffi_backend/ccallback.py b/pypy/module/_cffi_backend/ccallback.py --- a/pypy/module/_cffi_backend/ccallback.py +++ b/pypy/module/_cffi_backend/ccallback.py @@ -160,7 +160,7 @@ @jit.jit_callback("CFFI") -def invoke_callback(ffi_cif, ll_res, ll_args, ll_userdata): +def _invoke_callback(ffi_cif, ll_res, ll_args, ll_userdata): """ Callback specification. ffi_cif - something ffi specific, don't care ll_args - rffi.VOIDPP - pointer to array of pointers to args @@ -168,7 +168,6 @@ ll_userdata - a special structure which holds necessary information (what the real callback is for example), casted to VOIDP """ - e = cerrno.get_real_errno() ll_res = rffi.cast(rffi.CCHARP, ll_res) unique_id = rffi.cast(lltype.Signed, ll_userdata) callback = global_callback_mapping.get(unique_id) @@ -185,12 +184,9 @@ return # must_leave = False - ec = None space = callback.space try: must_leave = space.threadlocals.try_enter_thread(space) - ec = cerrno.get_errno_container(space) - cerrno.save_errno_into(ec, e) extra_line = '' try: w_res = callback.invoke(ll_args) @@ -212,5 +208,8 @@ callback.write_error_return_value(ll_res) if must_leave: space.threadlocals.leave_thread(space) - if ec is not None: - cerrno.restore_errno_from(ec) + +def invoke_callback(ffi_cif, ll_res, ll_args, ll_userdata): + cerrno._errno_after(rffi.RFFI_ERR_ALL) + _invoke_callback(ffi_cif, ll_res, ll_args, ll_userdata) + cerrno._errno_before(rffi.RFFI_ERR_ALL) diff --git a/pypy/module/_cffi_backend/cerrno.py b/pypy/module/_cffi_backend/cerrno.py --- a/pypy/module/_cffi_backend/cerrno.py +++ b/pypy/module/_cffi_backend/cerrno.py @@ -2,7 +2,6 @@ from rpython.rlib import rposix -from pypy.interpreter.executioncontext import ExecutionContext from pypy.interpreter.gateway import unwrap_spec WIN32 = sys.platform == 'win32' @@ -10,43 +9,22 @@ from rpython.rlib import rwin32 -ExecutionContext._cffi_saved_errno = 0 -ExecutionContext._cffi_saved_LastError = 0 - - -def get_errno_container(space): - return space.getexecutioncontext() - -get_real_errno = rposix.get_errno - - -def restore_errno_from(ec): - if WIN32: - rwin32.SetLastError(ec._cffi_saved_LastError) - rposix.set_errno(ec._cffi_saved_errno) - -def save_errno_into(ec, errno): - ec._cffi_saved_errno = errno - if WIN32: - ec._cffi_saved_LastError = rwin32.GetLastError() - +_errno_before = rposix._errno_before +_errno_after = rposix._errno_after def get_errno(space): - ec = get_errno_container(space) - return space.wrap(ec._cffi_saved_errno) + return space.wrap(rposix.get_saved_errno()) @unwrap_spec(errno=int) def set_errno(space, errno): - ec = get_errno_container(space) - ec._cffi_saved_errno = errno + rposix.set_saved_errno(errno) # ____________________________________________________________ @unwrap_spec(code=int) def getwinerror(space, code=-1): - from rpython.rlib.rwin32 import FormatError + from rpython.rlib.rwin32 import GetLastError_saved, FormatError if code == -1: - ec = get_errno_container(space) - code = ec._cffi_saved_LastError + code = GetLastError_saved() message = FormatError(code) return space.newtuple([space.wrap(code), space.wrap(message)]) diff --git a/pypy/module/_cffi_backend/ctypefunc.py b/pypy/module/_cffi_backend/ctypefunc.py --- a/pypy/module/_cffi_backend/ctypefunc.py +++ b/pypy/module/_cffi_backend/ctypefunc.py @@ -155,13 +155,9 @@ # argtype is a pointer type, and w_obj a list/tuple/str mustfree_max_plus_1 = i + 1 - ec = cerrno.get_errno_container(space) - cerrno.restore_errno_from(ec) jit_libffi.jit_ffi_call(cif_descr, rffi.cast(rffi.VOIDP, funcaddr), buffer) - e = cerrno.get_real_errno() - cerrno.save_errno_into(ec, e) resultdata = rffi.ptradd(buffer, cif_descr.exchange_result) w_res = self.ctitem.copy_and_convert_to_object(resultdata) diff --git a/pypy/module/_cffi_backend/ctypeptr.py b/pypy/module/_cffi_backend/ctypeptr.py --- a/pypy/module/_cffi_backend/ctypeptr.py +++ b/pypy/module/_cffi_backend/ctypeptr.py @@ -347,7 +347,8 @@ # ____________________________________________________________ -rffi_fdopen = rffi.llexternal("fdopen", [rffi.INT, rffi.CCHARP], rffi.CCHARP) +rffi_fdopen = rffi.llexternal("fdopen", [rffi.INT, rffi.CCHARP], rffi.CCHARP, + save_err=rffi.RFFI_SAVE_ERRNO) rffi_setbuf = rffi.llexternal("setbuf", [rffi.CCHARP, rffi.CCHARP], lltype.Void) rffi_fclose = rffi.llexternal("fclose", [rffi.CCHARP], rffi.INT) @@ -357,7 +358,7 @@ def __init__(self, fd, mode): self.llf = rffi_fdopen(fd, mode) if not self.llf: - raise OSError(rposix.get_errno(), "fdopen failed") + raise OSError(rposix.get_saved_errno(), "fdopen failed") rffi_setbuf(self.llf, lltype.nullptr(rffi.CCHARP.TO)) def close(self): diff --git a/pypy/module/_io/interp_bufferedio.py b/pypy/module/_io/interp_bufferedio.py --- a/pypy/module/_io/interp_bufferedio.py +++ b/pypy/module/_io/interp_bufferedio.py @@ -19,10 +19,16 @@ def make_write_blocking_error(space, written): + # XXX CPython reads 'errno' here. I *think* it doesn't make sense, + # because we might reach this point after calling a write() method + # that may be overridden by the user, if that method returns None. + # In that case what we get is a potentially nonsense errno. But + # we'll use get_saved_errno() anyway, and hope (like CPython does) + # that we're getting a reasonable value at this point. w_type = space.gettypeobject(W_BlockingIOError.typedef) w_value = space.call_function( w_type, - space.wrap(rposix.get_errno()), + space.wrap(rposix.get_saved_errno()), space.wrap("write could not complete without blocking"), space.wrap(written)) return OperationError(w_type, w_value) diff --git a/pypy/module/_locale/interp_locale.py b/pypy/module/_locale/interp_locale.py --- a/pypy/module/_locale/interp_locale.py +++ b/pypy/module/_locale/interp_locale.py @@ -300,7 +300,8 @@ return space.wrap(result) _bindtextdomain = rlocale.external('bindtextdomain', [rffi.CCHARP, rffi.CCHARP], - rffi.CCHARP) + rffi.CCHARP, + save_err=rffi.RFFI_SAVE_ERRNO) @unwrap_spec(domain=str) def bindtextdomain(space, domain, w_dir): @@ -325,7 +326,7 @@ rffi.free_charp(dir_c) if not dirname: - errno = rposix.get_errno() + errno = rposix.get_saved_errno() raise OperationError(space.w_OSError, space.wrap(errno)) return space.wrap(rffi.charp2str(dirname)) diff --git a/pypy/module/_multiprocessing/interp_connection.py b/pypy/module/_multiprocessing/interp_connection.py --- a/pypy/module/_multiprocessing/interp_connection.py +++ b/pypy/module/_multiprocessing/interp_connection.py @@ -406,7 +406,7 @@ size, written_ptr, rffi.NULL) if (result == 0 and - rwin32.GetLastError() == ERROR_NO_SYSTEM_RESOURCES): + rwin32.GetLastError_saved() == ERROR_NO_SYSTEM_RESOURCES): raise oefmt(space.w_ValueError, "Cannot send %d bytes over connection", size) finally: @@ -430,7 +430,7 @@ if result: return intmask(read_ptr[0]), lltype.nullptr(rffi.CCHARP.TO) - err = rwin32.GetLastError() + err = rwin32.GetLastError_saved() if err == ERROR_BROKEN_PIPE: raise OperationError(space.w_EOFError, space.w_None) elif err != ERROR_MORE_DATA: @@ -441,7 +441,7 @@ lltype.nullptr(rwin32.LPDWORD.TO), lltype.nullptr(rwin32.LPDWORD.TO), left_ptr): - raise wrap_windowserror(space, rwin32.lastWindowsError()) + raise wrap_windowserror(space, rwin32.lastSavedWindowsError()) length = intmask(read_ptr[0] + left_ptr[0]) if length > maxlength: # bad message, close connection @@ -460,7 +460,7 @@ read_ptr, rffi.NULL) if not result: rffi.free_charp(newbuf) - raise wrap_windowserror(space, rwin32.lastWindowsError()) + raise wrap_windowserror(space, rwin32.lastSavedWindowsError()) assert read_ptr[0] == left_ptr[0] return length, newbuf @@ -480,7 +480,7 @@ lltype.nullptr(rwin32.LPDWORD.TO), bytes_ptr, lltype.nullptr(rwin32.LPDWORD.TO)): - raise wrap_windowserror(space, rwin32.lastWindowsError()) + raise wrap_windowserror(space, rwin32.lastSavedWindowsError()) bytes = bytes_ptr[0] finally: lltype.free(bytes_ptr, flavor='raw') @@ -506,7 +506,8 @@ lltype.nullptr(rwin32.LPDWORD.TO), bytes_ptr, lltype.nullptr(rwin32.LPDWORD.TO)): - raise wrap_windowserror(space, rwin32.lastWindowsError()) + raise wrap_windowserror(space, + rwin32.lastSavedWindowsError()) bytes = bytes_ptr[0] finally: lltype.free(bytes_ptr, flavor='raw') diff --git a/pypy/module/_multiprocessing/interp_semaphore.py b/pypy/module/_multiprocessing/interp_semaphore.py --- a/pypy/module/_multiprocessing/interp_semaphore.py +++ b/pypy/module/_multiprocessing/interp_semaphore.py @@ -26,12 +26,14 @@ _CreateSemaphore = rwin32.winexternal( 'CreateSemaphoreA', [rffi.VOIDP, rffi.LONG, rffi.LONG, rwin32.LPCSTR], - rwin32.HANDLE) - _CloseHandle = rwin32.winexternal('CloseHandle', [rwin32.HANDLE], + rwin32.HANDLE, + save_err=rffi.RFFI_FULL_LASTERROR) + _CloseHandle_no_errno = rwin32.winexternal('CloseHandle', [rwin32.HANDLE], rwin32.BOOL, releasegil=False) _ReleaseSemaphore = rwin32.winexternal( 'ReleaseSemaphore', [rwin32.HANDLE, rffi.LONG, rffi.LONGP], - rwin32.BOOL) + rwin32.BOOL, + save_err=rffi.RFFI_SAVE_LASTERROR) else: from rpython.rlib import rposix @@ -81,51 +83,61 @@ _sem_open = external('sem_open', [rffi.CCHARP, rffi.INT, rffi.INT, rffi.UINT], - SEM_T) + SEM_T, save_err=rffi.RFFI_SAVE_ERRNO) # sem_close is releasegil=False to be able to use it in the __del__ - _sem_close = external('sem_close', [SEM_T], rffi.INT, releasegil=False) - _sem_unlink = external('sem_unlink', [rffi.CCHARP], rffi.INT) - _sem_wait = external('sem_wait', [SEM_T], rffi.INT) - _sem_trywait = external('sem_trywait', [SEM_T], rffi.INT) - _sem_post = external('sem_post', [SEM_T], rffi.INT) - _sem_getvalue = external('sem_getvalue', [SEM_T, rffi.INTP], rffi.INT) + _sem_close_no_errno = external('sem_close', [SEM_T], rffi.INT, + releasegil=False) + _sem_close = external('sem_close', [SEM_T], rffi.INT, releasegil=False, + save_err=rffi.RFFI_SAVE_ERRNO) + _sem_unlink = external('sem_unlink', [rffi.CCHARP], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) + _sem_wait = external('sem_wait', [SEM_T], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) + _sem_trywait = external('sem_trywait', [SEM_T], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) + _sem_post = external('sem_post', [SEM_T], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) + _sem_getvalue = external('sem_getvalue', [SEM_T, rffi.INTP], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) - _gettimeofday = external('gettimeofday', [TIMEVALP, rffi.VOIDP], rffi.INT) + _gettimeofday = external('gettimeofday', [TIMEVALP, rffi.VOIDP], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) _select = external('select', [rffi.INT, rffi.VOIDP, rffi.VOIDP, rffi.VOIDP, - TIMEVALP], rffi.INT) + TIMEVALP], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) @jit.dont_look_inside def sem_open(name, oflag, mode, value): res = _sem_open(name, oflag, mode, value) if res == rffi.cast(SEM_T, SEM_FAILED): - raise OSError(rposix.get_errno(), "sem_open failed") + raise OSError(rposix.get_saved_errno(), "sem_open failed") return res def sem_close(handle): res = _sem_close(handle) if res < 0: - raise OSError(rposix.get_errno(), "sem_close failed") + raise OSError(rposix.get_saved_errno(), "sem_close failed") def sem_unlink(name): res = _sem_unlink(name) if res < 0: - raise OSError(rposix.get_errno(), "sem_unlink failed") + raise OSError(rposix.get_saved_errno(), "sem_unlink failed") def sem_wait(sem): res = _sem_wait(sem) if res < 0: - raise OSError(rposix.get_errno(), "sem_wait failed") + raise OSError(rposix.get_saved_errno(), "sem_wait failed") def sem_trywait(sem): res = _sem_trywait(sem) if res < 0: - raise OSError(rposix.get_errno(), "sem_trywait failed") + raise OSError(rposix.get_saved_errno(), "sem_trywait failed") def sem_timedwait(sem, deadline): res = _sem_timedwait(sem, deadline) if res < 0: - raise OSError(rposix.get_errno(), "sem_timedwait failed") + raise OSError(rposix.get_saved_errno(), "sem_timedwait failed") def _sem_timedwait_save(sem, deadline): delay = 0 @@ -135,7 +147,7 @@ # poll if _sem_trywait(sem) == 0: return 0 - elif rposix.get_errno() != errno.EAGAIN: + elif rposix.get_saved_errno() != errno.EAGAIN: return -1 now = gettimeofday() @@ -143,7 +155,7 @@ c_tv_nsec = rffi.getintfield(deadline[0], 'c_tv_nsec') if (c_tv_sec < now[0] or (c_tv_sec == now[0] and c_tv_nsec <= now[1])): - rposix.set_errno(errno.ETIMEDOUT) + rposix.set_saved_errno(errno.ETIMEDOUT) return -1 @@ -166,21 +178,21 @@ if SEM_TIMED_WAIT: _sem_timedwait = external('sem_timedwait', [SEM_T, TIMESPECP], - rffi.INT) + rffi.INT, save_err=rffi.RFFI_SAVE_ERRNO) else: _sem_timedwait = _sem_timedwait_save def sem_post(sem): res = _sem_post(sem) if res < 0: - raise OSError(rposix.get_errno(), "sem_post failed") + raise OSError(rposix.get_saved_errno(), "sem_post failed") def sem_getvalue(sem): sval_ptr = lltype.malloc(rffi.INTP.TO, 1, flavor='raw') try: res = _sem_getvalue(sem, sval_ptr) if res < 0: - raise OSError(rposix.get_errno(), "sem_getvalue failed") + raise OSError(rposix.get_saved_errno(), "sem_getvalue failed") return rffi.cast(lltype.Signed, sval_ptr[0]) finally: lltype.free(sval_ptr, flavor='raw') @@ -190,7 +202,7 @@ try: res = _gettimeofday(now, None) if res < 0: - raise OSError(rposix.get_errno(), "gettimeofday failed") + raise OSError(rposix.get_saved_errno(), "gettimeofday failed") return (rffi.getintfield(now[0], 'c_tv_sec'), rffi.getintfield(now[0], 'c_tv_usec')) finally: @@ -216,18 +228,16 @@ if sys.platform == 'win32': def create_semaphore(space, name, val, max): - rwin32.SetLastError(0) + rwin32.SetLastError_saved(0) handle = _CreateSemaphore(rffi.NULL, val, max, rffi.NULL) # On Windows we should fail on ERROR_ALREADY_EXISTS - err = rwin32.GetLastError() + err = rwin32.GetLastError_saved() if err != 0: raise WindowsError(err, "CreateSemaphore") return handle def delete_semaphore(handle): - if not _CloseHandle(handle): - err = rwin32.GetLastError() - raise WindowsError(err, "CloseHandle") + _CloseHandle_no_errno(handle) def semlock_acquire(self, space, block, w_timeout): if not block: @@ -286,7 +296,7 @@ def semlock_release(self, space): if not _ReleaseSemaphore(self.handle, 1, lltype.nullptr(rffi.LONGP.TO)): - err = rwin32.GetLastError() + err = rwin32.GetLastError_saved() if err == 0x0000012a: # ERROR_TOO_MANY_POSTS raise OperationError( space.w_ValueError, @@ -300,7 +310,7 @@ previous_ptr = lltype.malloc(rffi.LONGP.TO, 1, flavor='raw') try: if not _ReleaseSemaphore(self.handle, 1, previous_ptr): - raise rwin32.lastWindowsError("ReleaseSemaphore") + raise rwin32.lastSavedWindowsError("ReleaseSemaphore") return previous_ptr[0] + 1 finally: lltype.free(previous_ptr, flavor='raw') @@ -320,7 +330,7 @@ return sem def delete_semaphore(handle): - sem_close(handle) + _sem_close_no_errno(handle) def semlock_acquire(self, space, block, w_timeout): if not block: diff --git a/pypy/module/_multiprocessing/interp_win32.py b/pypy/module/_multiprocessing/interp_win32.py --- a/pypy/module/_multiprocessing/interp_win32.py +++ b/pypy/module/_multiprocessing/interp_win32.py @@ -41,20 +41,24 @@ rwin32.DWORD, rwin32.DWORD, rwin32.DWORD, rwin32.DWORD, rwin32.DWORD, rwin32.DWORD, rffi.VOIDP], - rwin32.HANDLE) + rwin32.HANDLE, + save_err=rffi.RFFI_SAVE_LASTERROR) _ConnectNamedPipe = rwin32.winexternal( - 'ConnectNamedPipe', [rwin32.HANDLE, rffi.VOIDP], rwin32.BOOL) + 'ConnectNamedPipe', [rwin32.HANDLE, rffi.VOIDP], rwin32.BOOL, + save_err=rffi.RFFI_SAVE_LASTERROR) _SetNamedPipeHandleState = rwin32.winexternal( 'SetNamedPipeHandleState', [ rwin32.HANDLE, rwin32.LPDWORD, rwin32.LPDWORD, rwin32.LPDWORD], - rwin32.BOOL) + rwin32.BOOL, + save_err=rffi.RFFI_SAVE_LASTERROR) _WaitNamedPipe = rwin32.winexternal( 'WaitNamedPipeA', [rwin32.LPCSTR, rwin32.DWORD], - rwin32.BOOL) + rwin32.BOOL, + save_err=rffi.RFFI_SAVE_LASTERROR) _PeekNamedPipe = rwin32.winexternal( 'PeekNamedPipe', [ @@ -62,31 +66,36 @@ rffi.VOIDP, rwin32.DWORD, rwin32.LPDWORD, rwin32.LPDWORD, rwin32.LPDWORD], - rwin32.BOOL) + rwin32.BOOL, + save_err=rffi.RFFI_SAVE_LASTERROR) _CreateFile = rwin32.winexternal( 'CreateFileA', [ rwin32.LPCSTR, rwin32.DWORD, rwin32.DWORD, rffi.VOIDP, rwin32.DWORD, rwin32.DWORD, rwin32.HANDLE], - rwin32.HANDLE) + rwin32.HANDLE, + save_err=rffi.RFFI_SAVE_LASTERROR) _WriteFile = rwin32.winexternal( 'WriteFile', [ rwin32.HANDLE, rffi.VOIDP, rwin32.DWORD, rwin32.LPDWORD, rffi.VOIDP], - rwin32.BOOL) + rwin32.BOOL, + save_err=rffi.RFFI_SAVE_LASTERROR) _ReadFile = rwin32.winexternal( 'ReadFile', [ rwin32.HANDLE, rffi.VOIDP, rwin32.DWORD, rwin32.LPDWORD, rffi.VOIDP], - rwin32.BOOL) + rwin32.BOOL, + save_err=rffi.RFFI_SAVE_LASTERROR) _ExitProcess = rwin32.winexternal( - 'ExitProcess', [rffi.UINT], lltype.Void) + 'ExitProcess', [rffi.UINT], lltype.Void, + save_err=rffi.RFFI_SAVE_LASTERROR) _GetTickCount = rwin32.winexternal( 'GetTickCount', [], rwin32.DWORD) @@ -97,10 +106,10 @@ def CloseHandle(space, w_handle): handle = handle_w(space, w_handle) if not rwin32.CloseHandle(handle): - raise wrap_windowserror(space, rwin32.lastWindowsError()) + raise wrap_windowserror(space, rwin32.lastSavedWindowsError()) def GetLastError(space): - return space.wrap(rwin32.GetLastError()) + return space.wrap(rwin32.GetLastError_saved()) # __________________________________________________________ # functions for the "win32" namespace @@ -118,7 +127,7 @@ outputsize, inputsize, timeout, rffi.NULL) if handle == rwin32.INVALID_HANDLE_VALUE: - raise wrap_windowserror(space, rwin32.lastWindowsError()) + raise wrap_windowserror(space, rwin32.lastSavedWindowsError()) return w_handle(space, handle) @@ -129,7 +138,7 @@ raise OperationError(space.w_NotImplementedError, space.wrap("expected a NULL pointer")) if not _ConnectNamedPipe(handle, rffi.NULL): - raise wrap_windowserror(space, rwin32.lastWindowsError()) + raise wrap_windowserror(space, rwin32.lastSavedWindowsError()) def SetNamedPipeHandleState(space, w_handle, w_pipemode, w_maxinstances, w_timeout): @@ -149,7 +158,7 @@ statep[2] = rffi.ptradd(state, 2) if not _SetNamedPipeHandleState(handle, statep[0], statep[1], statep[2]): - raise wrap_windowserror(space, rwin32.lastWindowsError()) + raise wrap_windowserror(space, rwin32.lastSavedWindowsError()) finally: lltype.free(state, flavor='raw') lltype.free(statep, flavor='raw') @@ -158,7 +167,7 @@ def WaitNamedPipe(space, name, timeout): # Careful: zero means "default value specified by CreateNamedPipe()" if not _WaitNamedPipe(name, timeout): - raise wrap_windowserror(space, rwin32.lastWindowsError()) + raise wrap_windowserror(space, rwin32.lastSavedWindowsError()) @unwrap_spec(filename=str, access=r_uint, share=r_uint, disposition=r_uint, flags=r_uint) @@ -174,7 +183,7 @@ disposition, flags, rwin32.NULL_HANDLE) if handle == rwin32.INVALID_HANDLE_VALUE: - raise wrap_windowserror(space, rwin32.lastWindowsError()) + raise wrap_windowserror(space, rwin32.lastSavedWindowsError()) return w_handle(space, handle) diff --git a/pypy/module/_rawffi/alt/interp_funcptr.py b/pypy/module/_rawffi/alt/interp_funcptr.py --- a/pypy/module/_rawffi/alt/interp_funcptr.py +++ b/pypy/module/_rawffi/alt/interp_funcptr.py @@ -14,7 +14,6 @@ from rpython.rlib.objectmodel import we_are_translated from pypy.module._rawffi.alt.type_converter import FromAppLevelConverter, ToAppLevelConverter from pypy.module._rawffi.interp_rawffi import got_libffi_error, wrap_dlopenerror -from pypy.module._rawffi import lasterror import os if os.name == 'nt': @@ -202,23 +201,11 @@ self.func = func self.argchain = argchain - def before(self): - lasterror.restore_last_error(self.space) - - def after(self): - lasterror.save_last_error(self.space) - def get_longlong(self, w_ffitype): - self.before() - x = self.func.call(self.argchain, rffi.LONGLONG) - self.after() - return x + return self.func.call(self.argchain, rffi.LONGLONG) def get_ulonglong(self, w_ffitype): - self.before() - x = self.func.call(self.argchain, rffi.ULONGLONG) - self.after() - return x + return self.func.call(self.argchain, rffi.ULONGLONG) def get_signed(self, w_ffitype): # if the declared return type of the function is smaller than LONG, @@ -229,7 +216,6 @@ # to space.wrap in order to get a nice applevel . # restype = w_ffitype.get_ffitype() - self.before() call = self.func.call if restype is libffi.types.slong: x = call(self.argchain, rffi.LONG) @@ -241,19 +227,14 @@ x = rffi.cast(rffi.LONG, call(self.argchain, rffi.SIGNEDCHAR)) else: raise self.error(w_ffitype) - self.after() return x def get_unsigned(self, w_ffitype): - self.before() - x = self.func.call(self.argchain, rffi.ULONG) - self.after() - return x + return self.func.call(self.argchain, rffi.ULONG) def get_unsigned_which_fits_into_a_signed(self, w_ffitype): # the same comment as get_signed apply restype = w_ffitype.get_ffitype() - self.before() call = self.func.call if restype is libffi.types.uint: assert not libffi.IS_32_BIT @@ -266,57 +247,35 @@ x = rffi.cast(rffi.LONG, call(self.argchain, rffi.UCHAR)) else: raise self.error(w_ffitype) - self.after() return x def get_pointer(self, w_ffitype): - self.before() ptrres = self.func.call(self.argchain, rffi.VOIDP) - self.after() return rffi.cast(rffi.ULONG, ptrres) def get_char(self, w_ffitype): - self.before() - x = self.func.call(self.argchain, rffi.UCHAR) - self.after() - return x + return self.func.call(self.argchain, rffi.UCHAR) def get_unichar(self, w_ffitype): - self.before() - x = self.func.call(self.argchain, rffi.WCHAR_T) - self.after() - return x + return self.func.call(self.argchain, rffi.WCHAR_T) def get_float(self, w_ffitype): - self.before() - x = self.func.call(self.argchain, rffi.DOUBLE) - self.after() - return x + return self.func.call(self.argchain, rffi.DOUBLE) def get_singlefloat(self, w_ffitype): - self.before() - x = self.func.call(self.argchain, rffi.FLOAT) - self.after() - return x + return self.func.call(self.argchain, rffi.FLOAT) def get_struct(self, w_ffitype, w_structdescr): - self.before() addr = self.func.call(self.argchain, rffi.LONG, is_struct=True) - self.after() return w_structdescr.fromaddress(self.space, addr) def get_struct_rawffi(self, w_ffitype, w_structdescr): - self.before() uintval = self.func.call(self.argchain, rffi.ULONG, is_struct=True) - self.after() return w_structdescr.fromaddress(self.space, uintval) def get_void(self, w_ffitype): - self.before() - x = self.func.call(self.argchain, lltype.Void) - self.after() - return x + return self.func.call(self.argchain, lltype.Void) def unpack_argtypes(space, w_argtypes, w_restype): diff --git a/pypy/module/_rawffi/interp_rawffi.py b/pypy/module/_rawffi/interp_rawffi.py --- a/pypy/module/_rawffi/interp_rawffi.py +++ b/pypy/module/_rawffi/interp_rawffi.py @@ -18,7 +18,6 @@ from rpython.rlib.rarithmetic import intmask, r_uint from pypy.module._rawffi.buffer import RawFFIBuffer from pypy.module._rawffi.tracker import tracker -from pypy.module._rawffi import lasterror TYPEMAP = { # XXX A mess with unsigned/signed/normal chars :-/ @@ -496,14 +495,10 @@ try: if self.resshape is not None: result = self.resshape.allocate(space, 1, autofree=True) - lasterror.restore_last_error(space) self.ptr.call(args_ll, result.ll_buffer) - lasterror.save_last_error(space) return space.wrap(result) else: - lasterror.restore_last_error(space) self.ptr.call(args_ll, lltype.nullptr(rffi.VOIDP.TO)) - lasterror.save_last_error(space) return space.w_None except StackCheckError, e: raise OperationError(space.w_ValueError, space.wrap(e.message)) @@ -613,17 +608,19 @@ return space.wrap(W_CDLL(space, name, cdll)) def get_errno(space): - return space.wrap(rposix.get_errno()) + return space.wrap(rposix.get_saved_errno()) def set_errno(space, w_errno): - rposix.set_errno(space.int_w(w_errno)) + rposix.set_saved_errno(space.int_w(w_errno)) if sys.platform == 'win32': + # see also + # https://bitbucket.org/pypy/pypy/issue/1944/ctypes-on-windows-getlasterror def get_last_error(space): - return space.wrap(lasterror.fetch_last_error(space)) + return space.wrap(rwin32.GetLastError_saved()) @unwrap_spec(error=int) def set_last_error(space, error): - lasterror.store_last_error(space, error) + rwin32.SetLastError_saved(error) else: # always have at least a dummy version of these functions # (https://bugs.pypy.org/issue1242) diff --git a/pypy/module/_rawffi/lasterror.py b/pypy/module/_rawffi/lasterror.py deleted file mode 100644 --- a/pypy/module/_rawffi/lasterror.py +++ /dev/null @@ -1,40 +0,0 @@ -# For Windows only. -# https://bitbucket.org/pypy/pypy/issue/1944/ctypes-on-windows-getlasterror - -import os - -_MS_WINDOWS = os.name == "nt" - - -if _MS_WINDOWS: - from rpython.rlib import rwin32 - from pypy.interpreter.executioncontext import ExecutionContext - - - ExecutionContext._rawffi_last_error = 0 - - def fetch_last_error(space): - ec = space.getexecutioncontext() - return ec._rawffi_last_error - - def store_last_error(space, last_error): - ec = space.getexecutioncontext() - ec._rawffi_last_error = last_error - - def restore_last_error(space): - ec = space.getexecutioncontext() - lasterror = ec._rawffi_last_error - rwin32.SetLastError(lasterror) - - def save_last_error(space): - lasterror = rwin32.GetLastError() - ec = space.getexecutioncontext() - ec._rawffi_last_error = lasterror - -else: - - def restore_last_error(space): - pass - - def save_last_error(space): - pass diff --git a/pypy/module/cpyext/pyerrors.py b/pypy/module/cpyext/pyerrors.py --- a/pypy/module/cpyext/pyerrors.py +++ b/pypy/module/cpyext/pyerrors.py @@ -9,7 +9,7 @@ PyObject, PyObjectP, make_ref, from_ref, Py_DecRef, borrow_from) from pypy.module.cpyext.state import State from pypy.module.cpyext.import_ import PyImport_Import -from rpython.rlib.rposix import get_errno +from rpython.rlib import rposix, jit @cpython_api([PyObject, PyObject], lltype.Void) def PyErr_SetObject(space, w_type, w_value): @@ -159,6 +159,7 @@ PyErr_SetFromErrnoWithFilenameObject(space, w_type, filename) @cpython_api([PyObject, PyObject], PyObject) + at jit.dont_look_inside # direct use of _get_errno() def PyErr_SetFromErrnoWithFilenameObject(space, w_type, w_value): """Similar to PyErr_SetFromErrno(), with the additional behavior that if w_value is not NULL, it is passed to the constructor of type as a @@ -166,7 +167,7 @@ this is used to define the filename attribute of the exception instance. Return value: always NULL.""" # XXX Doesn't actually do anything with PyErr_CheckSignals. - errno = get_errno() + errno = rffi.cast(lltype.Signed, rposix._get_errno()) msg = os.strerror(errno) if w_value: w_error = space.call_function(w_type, diff --git a/pypy/module/cpyext/pystrtod.py b/pypy/module/cpyext/pystrtod.py --- a/pypy/module/cpyext/pystrtod.py +++ b/pypy/module/cpyext/pystrtod.py @@ -4,12 +4,13 @@ from pypy.module.cpyext.pyobject import PyObject from rpython.rlib import rdtoa from rpython.rlib import rfloat -from rpython.rlib import rposix +from rpython.rlib import rposix, jit from rpython.rtyper.lltypesystem import lltype from rpython.rtyper.lltypesystem import rffi @cpython_api([rffi.CCHARP, rffi.CCHARPP, PyObject], rffi.DOUBLE, error=-1.0) + at jit.dont_look_inside # direct use of _get_errno() def PyOS_string_to_double(space, s, endptr, w_overflow_exception): """Convert a string s to a double, raising a Python exception on failure. The set of accepted strings corresponds to @@ -52,8 +53,9 @@ raise OperationError( space.w_ValueError, space.wrap('invalid input at position %s' % endpos)) - if rposix.get_errno() == errno.ERANGE: - rposix.set_errno(0) + err = rffi.cast(lltype.Signed, rposix._get_errno()) + if err == errno.ERANGE: + rposix._set_errno(rffi.cast(rffi.INT, 0)) if w_overflow_exception is None: if result > 0: return rfloat.INFINITY diff --git a/pypy/module/fcntl/interp_fcntl.py b/pypy/module/fcntl/interp_fcntl.py --- a/pypy/module/fcntl/interp_fcntl.py +++ b/pypy/module/fcntl/interp_fcntl.py @@ -55,22 +55,30 @@ constants[name] = value locals().update(constants) -def external(name, args, result): - return rffi.llexternal(name, args, result, compilation_info=CConfig._compilation_info_) +def external(name, args, result, **kwds): + return rffi.llexternal(name, args, result, + compilation_info=CConfig._compilation_info_, + **kwds) _flock = lltype.Ptr(cConfig.flock) -fcntl_int = external('fcntl', [rffi.INT, rffi.INT, rffi.INT], rffi.INT) -fcntl_str = external('fcntl', [rffi.INT, rffi.INT, rffi.CCHARP], rffi.INT) -fcntl_flock = external('fcntl', [rffi.INT, rffi.INT, _flock], rffi.INT) -ioctl_int = external('ioctl', [rffi.INT, rffi.UINT, rffi.INT], rffi.INT) -ioctl_str = external('ioctl', [rffi.INT, rffi.UINT, rffi.CCHARP], rffi.INT) +fcntl_int = external('fcntl', [rffi.INT, rffi.INT, rffi.INT], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) +fcntl_str = external('fcntl', [rffi.INT, rffi.INT, rffi.CCHARP], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) +fcntl_flock = external('fcntl', [rffi.INT, rffi.INT, _flock], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) +ioctl_int = external('ioctl', [rffi.INT, rffi.UINT, rffi.INT], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) +ioctl_str = external('ioctl', [rffi.INT, rffi.UINT, rffi.CCHARP], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) has_flock = cConfig.has_flock if has_flock: - c_flock = external('flock', [rffi.INT, rffi.INT], rffi.INT) + c_flock = external('flock', [rffi.INT, rffi.INT], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) def _get_error(space, funcname): - errno = rposix.get_errno() + errno = rposix.get_saved_errno() return wrap_oserror(space, OSError(errno, funcname), exception_name = 'w_IOError') diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -123,10 +123,9 @@ assert result == 3 ** 2 self.check_trace_count(1) self.check_simple_loop({ - 'call': 1, + 'call': 2, # ccall_pow / _ll_1_threadlocalref_get(rpy_errno) 'float_eq': 2, 'float_mul': 2, - 'getarrayitem_raw': 1, # read the errno 'guard_false': 2, 'guard_not_invalidated': 1, 'guard_true': 2, @@ -136,7 +135,6 @@ 'jump': 1, 'raw_load': 1, 'raw_store': 1, - 'setarrayitem_raw': 1, # write the errno }) def define_pow_int(): diff --git a/pypy/module/pypyjit/test_pypy_c/test_ffi.py b/pypy/module/pypyjit/test_pypy_c/test_ffi.py --- a/pypy/module/pypyjit/test_pypy_c/test_ffi.py +++ b/pypy/module/pypyjit/test_pypy_c/test_ffi.py @@ -200,14 +200,11 @@ assert res == 8.0 * 300 loop, = log.loops_by_filename(self.filepath) assert loop.match_by_id('cfficall', """ - setarrayitem_raw(i69, 0, i95, descr=) # write 'errno' p96 = force_token() setfield_gc(p0, p96, descr=) - f97 = call_release_gil(i59, 1.0, 3, descr=) + f97 = call_release_gil(27, i59, 1.0, 3, descr=) guard_not_forced(descr=...) guard_no_exception(descr=...) - i98 = getarrayitem_raw(i69, 0, descr=) # read 'errno' - setfield_gc(p65, i98, descr=) """, ignore_ops=['guard_not_invalidated']) def test_cffi_call_guard_not_forced_fails(self): diff --git a/pypy/module/pypyjit/test_pypy_c/test_thread.py b/pypy/module/pypyjit/test_pypy_c/test_thread.py --- a/pypy/module/pypyjit/test_pypy_c/test_thread.py +++ b/pypy/module/pypyjit/test_pypy_c/test_thread.py @@ -64,7 +64,7 @@ guard_true(i56, descr=...) p57 = force_token() setfield_gc(p0, p57, descr=) - i58 = call_release_gil(_, i37, 1, descr=) + i58 = call_release_gil(0, _, i37, 1, descr=) guard_not_forced(descr=...) guard_no_exception(descr=...) i59 = int_is_true(i58) @@ -72,14 +72,14 @@ i60 = int_sub(i44, 1) p62 = force_token() setfield_gc(p0, p62, descr=) - i63 = call_release_gil(_, i37, 0, descr=) + i63 = call_release_gil(0, _, i37, 0, descr=) guard_not_forced(descr=...) guard_no_exception(descr=...) i64 = int_is_true(i63) guard_false(i64, descr=...) p65 = force_token() setfield_gc(p0, p65, descr=) - call_release_gil(_, i37, descr=) + call_release_gil(0, _, i37, descr=) guard_not_forced(descr=...) guard_no_exception(descr=...) guard_not_invalidated(descr=...) diff --git a/pypy/module/select/interp_epoll.py b/pypy/module/select/interp_epoll.py --- a/pypy/module/select/interp_epoll.py +++ b/pypy/module/select/interp_epoll.py @@ -4,12 +4,13 @@ from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.gateway import interp2app, unwrap_spec -from pypy.interpreter.error import OperationError, exception_from_errno, oefmt +from pypy.interpreter.error import OperationError, oefmt +from pypy.interpreter.error import exception_from_saved_errno from pypy.interpreter.typedef import TypeDef, GetSetProperty from rpython.rtyper.lltypesystem import lltype, rffi from rpython.rtyper.tool import rffi_platform from rpython.rlib._rsocket_rffi import socketclose, FD_SETSIZE -from rpython.rlib.rposix import get_errno +from rpython.rlib.rposix import get_saved_errno from rpython.rlib.rarithmetic import intmask from rpython.translator.tool.cbuild import ExternalCompilationInfo @@ -53,19 +54,22 @@ EPOLL_CTL_DEL = cconfig["EPOLL_CTL_DEL"] epoll_create = rffi.llexternal( - "epoll_create", [rffi.INT], rffi.INT, compilation_info=eci + "epoll_create", [rffi.INT], rffi.INT, compilation_info=eci, + save_err=rffi.RFFI_SAVE_ERRNO ) epoll_ctl = rffi.llexternal( "epoll_ctl", [rffi.INT, rffi.INT, rffi.INT, lltype.Ptr(epoll_event)], rffi.INT, - compilation_info=eci + compilation_info=eci, + save_err=rffi.RFFI_SAVE_ERRNO ) epoll_wait = rffi.llexternal( "epoll_wait", [rffi.INT, rffi.CArrayPtr(epoll_event), rffi.INT, rffi.INT], rffi.INT, compilation_info=eci, + save_err=rffi.RFFI_SAVE_ERRNO ) @@ -82,7 +86,7 @@ "sizehint must be greater than zero, got %d", sizehint) epfd = epoll_create(sizehint) if epfd < 0: - raise exception_from_errno(space, space.w_IOError) + raise exception_from_saved_errno(space, space.w_IOError) return space.wrap(W_Epoll(space, epfd)) @@ -114,10 +118,10 @@ rffi.setintfield(ev.c_data, 'c_fd', fd) result = epoll_ctl(self.epfd, ctl, fd, ev) - if ignore_ebadf and get_errno() == errno.EBADF: + if ignore_ebadf and get_saved_errno() == errno.EBADF: result = 0 if result < 0: - raise exception_from_errno(space, space.w_IOError) + raise exception_from_saved_errno(space, space.w_IOError) def descr_get_closed(self, space): return space.wrap(self.get_closed()) @@ -160,7 +164,7 @@ with lltype.scoped_alloc(rffi.CArray(epoll_event), maxevents) as evs: nfds = epoll_wait(self.epfd, evs, maxevents, int(timeout)) if nfds < 0: - raise exception_from_errno(space, space.w_IOError) + raise exception_from_saved_errno(space, space.w_IOError) elist_w = [None] * nfds for i in xrange(nfds): diff --git a/pypy/module/select/interp_kqueue.py b/pypy/module/select/interp_kqueue.py --- a/pypy/module/select/interp_kqueue.py +++ b/pypy/module/select/interp_kqueue.py @@ -1,8 +1,9 @@ from pypy.interpreter.baseobjspace import W_Root -from pypy.interpreter.error import OperationError, exception_from_errno, oefmt +from pypy.interpreter.error import OperationError, oefmt +from pypy.interpreter.error import exception_from_saved_errno from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault from pypy.interpreter.typedef import TypeDef, generic_new_descr, GetSetProperty -from rpython.rlib._rsocket_rffi import socketclose +from rpython.rlib._rsocket_rffi import socketclose_no_errno from rpython.rlib.rarithmetic import r_uint from rpython.rtyper.lltypesystem import rffi, lltype from rpython.rtyper.tool import rffi_platform @@ -86,7 +87,8 @@ "kqueue", [], rffi.INT, - compilation_info=eci + compilation_info=eci, + save_err=rffi.RFFI_SAVE_ERRNO ) syscall_kevent = rffi.llexternal( @@ -99,7 +101,8 @@ lltype.Ptr(timespec) ], rffi.INT, - compilation_info=eci + compilation_info=eci, + save_err=rffi.RFFI_SAVE_ERRNO ) @@ -110,7 +113,7 @@ def descr__new__(space, w_subtype): kqfd = syscall_kqueue() if kqfd < 0: - raise exception_from_errno(space, space.w_IOError) + raise exception_from_saved_errno(space, space.w_IOError) return space.wrap(W_Kqueue(space, kqfd)) @unwrap_spec(fd=int) @@ -127,7 +130,7 @@ if not self.get_closed(): kqfd = self.kqfd self.kqfd = -1 - socketclose(kqfd) + socketclose_no_errno(kqfd) def check_closed(self, space): if self.get_closed(): @@ -198,7 +201,7 @@ max_events, ptimeout) if nfds < 0: - raise exception_from_errno(space, space.w_IOError) + raise exception_from_saved_errno(space, space.w_IOError) else: elist_w = [None] * nfds for i in xrange(nfds): diff --git a/pypy/module/signal/interp_signal.py b/pypy/module/signal/interp_signal.py --- a/pypy/module/signal/interp_signal.py +++ b/pypy/module/signal/interp_signal.py @@ -5,7 +5,7 @@ import os import errno -from pypy.interpreter.error import OperationError, exception_from_errno +from pypy.interpreter.error import OperationError, exception_from_saved_errno from pypy.interpreter.executioncontext import (AsyncAction, AbstractActionFlag, PeriodicAsyncAction) from pypy.interpreter.gateway import unwrap_spec @@ -258,7 +258,7 @@ def siginterrupt(space, signum, flag): check_signum_in_range(space, signum) if rffi.cast(lltype.Signed, c_siginterrupt(signum, flag)) < 0: - errno = rposix.get_errno() + errno = rposix.get_saved_errno() raise OperationError(space.w_RuntimeError, space.wrap(errno)) @@ -311,7 +311,7 @@ ret = c_setitimer(which, new, old) if ret != 0: - raise exception_from_errno(space, get_itimer_error(space)) + raise exception_from_saved_errno(space, get_itimer_error(space)) return itimer_retval(space, old[0]) diff --git a/pypy/module/thread/gil.py b/pypy/module/thread/gil.py --- a/pypy/module/thread/gil.py +++ b/pypy/module/thread/gil.py @@ -7,12 +7,11 @@ # all but one will be blocked. The other threads get a chance to run # from time to time, using the periodic action GILReleaseAction. -from rpython.rlib import rthread, rgil, rwin32 +from rpython.rlib import rthread, rgil from pypy.module.thread.error import wrap_thread_error from pypy.interpreter.executioncontext import PeriodicAsyncAction from pypy.module.thread.threadlocals import OSThreadLocals from rpython.rlib.objectmodel import invoke_around_extcall -from rpython.rlib.rposix import get_errno, set_errno class GILThreadLocals(OSThreadLocals): """A version of OSThreadLocals that enforces a GIL.""" @@ -75,16 +74,9 @@ before_external_call._dont_reach_me_in_del_ = True def after_external_call(): - e = get_errno() - e2 = 0 - if rwin32.WIN32: - e2 = rwin32.GetLastError() rgil.gil_acquire() rthread.gc_thread_run() after_thread_switch() - if rwin32.WIN32: - rwin32.SetLastError(e2) - set_errno(e) after_external_call._gctransformer_hint_cannot_collect_ = True after_external_call._dont_reach_me_in_del_ = True diff --git a/pypy/module/thread/threadlocals.py b/pypy/module/thread/threadlocals.py --- a/pypy/module/thread/threadlocals.py +++ b/pypy/module/thread/threadlocals.py @@ -17,7 +17,8 @@ "NOT_RPYTHON" self._valuedict = {} # {thread_ident: ExecutionContext()} self._cleanup_() - self.raw_thread_local = rthread.ThreadLocalReference(ExecutionContext) + self.raw_thread_local = rthread.ThreadLocalReference(ExecutionContext, + loop_invariant=True) def _cleanup_(self): self._valuedict.clear() diff --git a/pypy/module/time/interp_time.py b/pypy/module/time/interp_time.py --- a/pypy/module/time/interp_time.py +++ b/pypy/module/time/interp_time.py @@ -62,7 +62,8 @@ _setCtrlHandlerRoutine = rffi.llexternal( 'pypy_timemodule_setCtrlHandler', [rwin32.HANDLE], rwin32.BOOL, - compilation_info=eci) + compilation_info=eci, + save_err=rffi.RFFI_SAVE_LASTERROR) class GlobalState: def __init__(self): @@ -79,8 +80,8 @@ except WindowsError, e: raise wrap_windowserror(space, e) if not _setCtrlHandlerRoutine(globalState.interrupt_event): - raise wrap_windowserror( - space, rwin32.lastWindowsError("SetConsoleCtrlHandler")) + raise wrap_windowserror(space, + rwin32.lastSavedWindowsError("SetConsoleCtrlHandler")) globalState = GlobalState() @@ -142,7 +143,7 @@ setattr(cConfig, k, v) cConfig.tm.__name__ = "_tm" -def external(name, args, result, eci=CConfig._compilation_info_): +def external(name, args, result, eci=CConfig._compilation_info_, **kwds): if _WIN and rffi.sizeof(rffi.TIME_T) == 8: # Recent Microsoft compilers use 64bit time_t and # the corresponding functions are named differently @@ -152,7 +153,8 @@ return rffi.llexternal(name, args, result, compilation_info=eci, calling_conv=calling_conv, - releasegil=False) + releasegil=False, + **kwds) if _POSIX: cConfig.timeval.__name__ = "_timeval" @@ -169,10 +171,12 @@ c_clock = external('clock', [rffi.TIME_TP], clock_t) c_time = external('time', [rffi.TIME_TP], rffi.TIME_T) c_ctime = external('ctime', [rffi.TIME_TP], rffi.CCHARP) -c_gmtime = external('gmtime', [rffi.TIME_TP], TM_P) +c_gmtime = external('gmtime', [rffi.TIME_TP], TM_P, + save_err=rffi.RFFI_SAVE_ERRNO) c_mktime = external('mktime', [TM_P], rffi.TIME_T) c_asctime = external('asctime', [TM_P], rffi.CCHARP) -c_localtime = external('localtime', [rffi.TIME_TP], TM_P) +c_localtime = external('localtime', [rffi.TIME_TP], TM_P, + save_err=rffi.RFFI_SAVE_ERRNO) if _POSIX: c_tzset = external('tzset', [], lltype.Void) if _WIN: @@ -304,7 +308,7 @@ _set_module_object(space, 'altzone', space.wrap(altzone)) def _get_error_msg(): - errno = rposix.get_errno() + errno = rposix.get_saved_errno() return os.strerror(errno) if sys.platform != 'win32': diff --git a/rpython/jit/backend/arm/callbuilder.py b/rpython/jit/backend/arm/callbuilder.py --- a/rpython/jit/backend/arm/callbuilder.py +++ b/rpython/jit/backend/arm/callbuilder.py @@ -11,6 +11,8 @@ from rpython.jit.backend.arm.helper.assembler import saved_registers from rpython.jit.backend.arm.helper.regalloc import check_imm_arg from rpython.jit.backend.arm.codebuilder import OverwritingBuilder +from rpython.jit.backend.llsupport import llerrno +from rpython.rtyper.lltypesystem import rffi class ARMCallbuilder(AbstractCallBuilder): @@ -172,6 +174,41 @@ self.mc.LSL_ri(resloc.value, resloc.value, 16) self.mc.ASR_ri(resloc.value, resloc.value, 16) + def write_real_errno(self, save_err): + if save_err & rffi.RFFI_READSAVED_ERRNO: + # Just before a call, read 'rpy_errno' and write it into the + # real 'errno'. The r0-r3 registers contain arguments to the + # future call; the r5-r7 registers contain various stuff. + # We still have r8-r12. + rpy_errno = llerrno.get_rpy_errno_offset(self.asm.cpu) + p_errno = llerrno.get_p_errno_offset(self.asm.cpu) + self.mc.LDR_ri(r.r9.value, r.sp.value, + self.asm.saved_threadlocal_addr + self.current_sp) + self.mc.LDR_ri(r.ip.value, r.r9.value, p_errno) + self.mc.LDR_ri(r.r9.value, r.r9.value, rpy_errno) + self.mc.STR_ri(r.r9.value, r.ip.value) + elif save_err & rffi.RFFI_ZERO_ERRNO_BEFORE: + # Same, but write zero. + p_errno = llerrno.get_p_errno_offset(self.asm.cpu) + self.mc.LDR_ri(r.r9.value, r.sp.value, + self.asm.saved_threadlocal_addr + self.current_sp) + self.mc.LDR_ri(r.ip.value, r.r9.value, p_errno) + self.mc.MOV_ri(r.r9.value, 0) + self.mc.STR_ri(r.r9.value, r.ip.value) + + def read_real_errno(self, save_err): + if save_err & rffi.RFFI_SAVE_ERRNO: + # Just after a call, read the real 'errno' and save a copy of + # it inside our thread-local 'rpy_errno'. Registers r8-r12 + # are unused here, and registers r2-r3 never contain anything + # after the call. + rpy_errno = llerrno.get_rpy_errno_offset(self.asm.cpu) + p_errno = llerrno.get_p_errno_offset(self.asm.cpu) + self.mc.LDR_ri(r.r3.value, r.sp.value, + self.asm.saved_threadlocal_addr) + self.mc.LDR_ri(r.ip.value, r.r3.value, p_errno) + self.mc.LDR_ri(r.ip.value, r.ip.value, 0) + self.mc.STR_ri(r.ip.value, r.r3.value, rpy_errno) class SoftFloatCallBuilder(ARMCallbuilder): diff --git a/rpython/jit/backend/arm/opassembler.py b/rpython/jit/backend/arm/opassembler.py --- a/rpython/jit/backend/arm/opassembler.py +++ b/rpython/jit/backend/arm/opassembler.py @@ -403,7 +403,9 @@ # args = [resloc, size, sign, args...] from rpython.jit.backend.llsupport.descr import CallDescr - cb = callbuilder.get_callbuilder(self.cpu, self, arglocs[3], arglocs[4:], arglocs[0]) + func_index = 3 + is_call_release_gil + cb = callbuilder.get_callbuilder(self.cpu, self, arglocs[func_index], + arglocs[func_index+1:], arglocs[0]) descr = op.getdescr() assert isinstance(descr, CallDescr) @@ -418,7 +420,9 @@ cb.ressign = signloc.value if is_call_release_gil: - cb.emit_call_release_gil() + saveerrloc = arglocs[3] + assert saveerrloc.is_imm() + cb.emit_call_release_gil(saveerrloc.value) else: cb.emit() return fcond @@ -1286,9 +1290,13 @@ return fcond def emit_opx_threadlocalref_get(self, op, arglocs, regalloc, fcond): - ofs0, res = arglocs - assert ofs0.is_imm() + ofs_loc, size_loc, sign_loc, res_loc = arglocs + assert ofs_loc.is_imm() + assert size_loc.is_imm() + assert sign_loc.is_imm() ofs = self.saved_threadlocal_addr - self.load_reg(self.mc, res, r.sp, ofs) - self.load_reg(self.mc, res, res, ofs0.value) + self.load_reg(self.mc, res_loc, r.sp, ofs) + scale = get_scale(size_loc.value) + signed = (sign_loc.value != 0) + self._load_from_mem(res_loc, res_loc, ofs_loc, scale, signed, fcond) return fcond diff --git a/rpython/jit/backend/arm/regalloc.py b/rpython/jit/backend/arm/regalloc.py --- a/rpython/jit/backend/arm/regalloc.py +++ b/rpython/jit/backend/arm/regalloc.py @@ -573,11 +573,12 @@ # ... return self._prepare_call(op) - def _prepare_call(self, op, force_store=[], save_all_regs=False): + def _prepare_call(self, op, force_store=[], save_all_regs=False, + first_arg_index=1): args = [None] * (op.numargs() + 3) calldescr = op.getdescr() assert isinstance(calldescr, CallDescr) - assert len(calldescr.arg_classes) == op.numargs() - 1 + assert len(calldescr.arg_classes) == op.numargs() - first_arg_index for i in range(op.numargs()): args[i + 3] = self.loc(op.getarg(i)) @@ -626,9 +627,12 @@ return [loc0, res] def _prepare_threadlocalref_get(self, op, fcond): - ofs0 = imm(op.getarg(1).getint()) - res = self.force_allocate_reg(op.result) - return [ofs0, res] + ofs_loc = imm(op.getarg(1).getint()) + calldescr = op.getdescr() + size_loc = imm(calldescr.get_result_size()) + sign_loc = imm(calldescr.is_result_signed()) + res_loc = self.force_allocate_reg(op.result) + return [ofs_loc, size_loc, sign_loc, res_loc] def _prepare_guard(self, op, args=None): if args is None: @@ -1235,7 +1239,10 @@ def prepare_guard_call_may_force(self, op, guard_op, fcond): args = self._prepare_call(op, save_all_regs=True) return self._prepare_guard(guard_op, args) - prepare_guard_call_release_gil = prepare_guard_call_may_force + + def prepare_guard_call_release_gil(self, op, guard_op, fcond): + args = self._prepare_call(op, save_all_regs=True, first_arg_index=2) + return self._prepare_guard(guard_op, args) def prepare_guard_call_assembler(self, op, guard_op, fcond): locs = self.locs_for_call_assembler(op, guard_op) diff --git a/rpython/jit/backend/llgraph/runner.py b/rpython/jit/backend/llgraph/runner.py --- a/rpython/jit/backend/llgraph/runner.py +++ b/rpython/jit/backend/llgraph/runner.py @@ -935,7 +935,7 @@ del self.force_guard_op return res - def execute_call_release_gil(self, descr, func, *args): + def execute_call_release_gil(self, descr, saveerr, func, *args): if hasattr(descr, '_original_func_'): func = descr._original_func_ # see pyjitpl.py # we want to call the function that does the aroundstate diff --git a/rpython/jit/backend/llsupport/callbuilder.py b/rpython/jit/backend/llsupport/callbuilder.py --- a/rpython/jit/backend/llsupport/callbuilder.py +++ b/rpython/jit/backend/llsupport/callbuilder.py @@ -42,16 +42,18 @@ self.pop_gcmap() self.load_result() - def emit_call_release_gil(self): + def emit_call_release_gil(self, save_err): """Emit a CALL_RELEASE_GIL, including calls to releasegil_addr - and reacqgil_addr.""" + and reacqgil_addr. 'save_err' is a combination of rffi.RFFI_*ERR*.""" fastgil = rffi.cast(lltype.Signed, rgil.gil_fetch_fastgil()) self.select_call_release_gil_mode() self.prepare_arguments() self.push_gcmap_for_call_release_gil() self.call_releasegil_addr_and_move_real_arguments(fastgil) + self.write_real_errno(save_err) self.emit_raw_call() self.restore_stack_pointer() + self.read_real_errno(save_err) self.move_real_result_and_call_reacqgil_addr(fastgil) self.pop_gcmap() self.load_result() @@ -62,6 +64,12 @@ def move_real_result_and_call_reacqgil_addr(self, fastgil): raise NotImplementedError + def write_real_errno(self, save_err): + raise NotImplementedError + + def read_real_errno(self, save_err): + raise NotImplementedError + def select_call_release_gil_mode(self): """Overridden in CallBuilder64""" self.is_call_release_gil = True diff --git a/rpython/jit/backend/llsupport/llerrno.py b/rpython/jit/backend/llsupport/llerrno.py new file mode 100644 --- /dev/null +++ b/rpython/jit/backend/llsupport/llerrno.py @@ -0,0 +1,59 @@ +from rpython.rtyper.lltypesystem import lltype, rffi +from rpython.translator.tool.cbuild import ExternalCompilationInfo +from rpython.jit.backend.llsupport.symbolic import WORD + + +def get_debug_saved_errno(cpu): + return cpu._debug_errno_container[3] + +def set_debug_saved_errno(cpu, nerrno): + assert nerrno >= 0 + cpu._debug_errno_container[3] = nerrno + +def get_rpy_errno_offset(cpu): + if cpu.translate_support_code: + from rpython.rlib import rthread + return rthread.tlfield_rpy_errno.getoffset() + else: + return 3 * WORD + + +def get_debug_saved_lasterror(cpu): + return cpu._debug_errno_container[4] + +def set_debug_saved_lasterror(cpu, nerrno): + assert nerrno >= 0 + cpu._debug_errno_container[4] = nerrno + +def get_rpy_lasterror_offset(cpu): + if cpu.translate_support_code: + from rpython.rlib import rthread + return rthread.tlfield_rpy_lasterror.getoffset() + else: + return 4 * WORD + + +def _fetch_addr_errno(): + eci = ExternalCompilationInfo( + separate_module_sources=[''' + #include + RPY_EXPORTED long fetch_addr_errno(void) { + return (long)(&errno); + } + ''']) + func1_ptr = rffi.llexternal('fetch_addr_errno', [], lltype.Signed, + compilation_info=eci, _nowrapper=True) + return func1_ptr() + +def get_p_errno_offset(cpu): + if cpu.translate_support_code: + from rpython.rlib import rthread + return rthread.tlfield_p_errno.getoffset() + else: + # fetch the real address of errno (in this thread), and store it + # at offset 2 in the _debug_errno_container + if cpu._debug_errno_container[2] == 0: + addr_errno = _fetch_addr_errno() + assert addr_errno != 0 + cpu._debug_errno_container[2] = addr_errno + return 2 * WORD diff --git a/rpython/jit/backend/llsupport/llmodel.py b/rpython/jit/backend/llsupport/llmodel.py --- a/rpython/jit/backend/llsupport/llmodel.py +++ b/rpython/jit/backend/llsupport/llmodel.py @@ -62,6 +62,9 @@ self.floatarraydescr = ArrayDescr(ad.basesize, ad.itemsize, ad.lendescr, FLAG_FLOAT) self.setup() + self._debug_errno_container = lltype.malloc( + rffi.CArray(lltype.Signed), 5, flavor='raw', zero=True, + track_allocation=False) def getarraydescr_for_frame(self, type): if type == history.FLOAT: @@ -222,7 +225,8 @@ # as arguments, and it returns the (possibly reallocated) jitframe. # The backend can optimize OS_THREADLOCALREF_GET calls to return a # field of this threadlocal_addr, but only if 'translate_support_code': - # in untranslated tests, threadlocal_addr is a dummy NULL. + # in untranslated tests, threadlocal_addr is a dummy container + # for errno tests only. FUNCPTR = lltype.Ptr(lltype.FuncType([llmemory.GCREF, llmemory.Address], llmemory.GCREF)) @@ -259,7 +263,8 @@ ll_threadlocal_addr = llop.threadlocalref_addr( llmemory.Address) else: - ll_threadlocal_addr = llmemory.NULL + ll_threadlocal_addr = rffi.cast(llmemory.Address, + self._debug_errno_container) llop.gc_writebarrier(lltype.Void, ll_frame) ll_frame = func(ll_frame, ll_threadlocal_addr) finally: diff --git a/rpython/jit/backend/llsupport/test/zrpy_releasegil_test.py b/rpython/jit/backend/llsupport/test/zrpy_releasegil_test.py --- a/rpython/jit/backend/llsupport/test/zrpy_releasegil_test.py +++ b/rpython/jit/backend/llsupport/test/zrpy_releasegil_test.py @@ -2,6 +2,8 @@ from rpython.rlib.jit import dont_look_inside from rpython.rlib.objectmodel import invoke_around_extcall from rpython.jit.metainterp.optimizeopt import ALL_OPTS_NAMES +from rpython.translator.tool.cbuild import ExternalCompilationInfo +from rpython.rlib import rposix from rpython.rtyper.annlowlevel import llhelper @@ -95,3 +97,37 @@ def test_close_stack(self): self.run('close_stack') assert 'call_release_gil' in udir.join('TestCompileFramework.log').read() + + def define_get_set_errno(self): + eci = ExternalCompilationInfo( + post_include_bits=[r''' + #include + static int test_get_set_errno(void) { + int r = errno; + //fprintf(stderr, "read saved errno: %d\n", r); + errno = 42; + return r; + } + ''']) + + c_test = rffi.llexternal('test_get_set_errno', [], rffi.INT, + compilation_info=eci, + save_err=rffi.RFFI_FULL_ERRNO) + + def before(n, x): + return (n, None, None, None, None, None, + None, None, None, None, None, None) + # + def f(n, x, *args): + rposix.set_saved_errno(24) + result1 = c_test() + result2 = rposix.get_saved_errno() + assert result1 == 24 + assert result2 == 42 + n -= 1 + return (n, x) + args + return before, f, None + + def test_get_set_errno(self): + self.run('get_set_errno') + assert 'call_release_gil' in udir.join('TestCompileFramework.log').read() diff --git a/rpython/jit/backend/llsupport/test/ztranslation_test.py b/rpython/jit/backend/llsupport/test/ztranslation_test.py --- a/rpython/jit/backend/llsupport/test/ztranslation_test.py +++ b/rpython/jit/backend/llsupport/test/ztranslation_test.py @@ -5,7 +5,7 @@ from rpython.rlib.jit import promote from rpython.rlib import jit_hooks, rposix from rpython.rlib.objectmodel import keepalive_until_here -from rpython.rlib.rthread import ThreadLocalReference +from rpython.rlib.rthread import ThreadLocalReference, ThreadLocalField from rpython.jit.backend.detect_cpu import getcpuclass from rpython.jit.backend.test.support import CCompiledMixin from rpython.jit.codewriter.policy import StopAtXPolicy @@ -128,7 +128,8 @@ class Foo(object): pass - t = ThreadLocalReference(Foo) + t = ThreadLocalReference(Foo, loop_invariant=True) + tf = ThreadLocalField(lltype.Char, "test_call_assembler_") def change(newthing): somewhere_else.frame.thing = newthing @@ -156,6 +157,7 @@ frame.thing = Thing(nextval + 1) i += 1 if t.get().nine != 9: raise ValueError + if ord(tf.getraw()) != 0x92: raise ValueError return frame.thing.val driver2 = JitDriver(greens = [], reds = ['n']) @@ -181,6 +183,7 @@ foo = Foo() foo.nine = value t.set(foo) + tf.setraw("\x92") return foo def mainall(codeno, bound): diff --git a/rpython/jit/backend/test/runner_test.py b/rpython/jit/backend/test/runner_test.py --- a/rpython/jit/backend/test/runner_test.py +++ b/rpython/jit/backend/test/runner_test.py @@ -20,6 +20,7 @@ from rpython.rlib.rarithmetic import intmask, is_valid_int from rpython.jit.backend.detect_cpu import autodetect from rpython.jit.backend.llsupport import jitframe +from rpython.jit.backend.llsupport.llmodel import AbstractLLCPU IS_32_BIT = sys.maxint < 2**32 @@ -2512,7 +2513,7 @@ tok = BoxInt() faildescr = BasicFailDescr(1) ops = [ - ResOperation(rop.CALL_RELEASE_GIL, [funcbox, i1], i2, + ResOperation(rop.CALL_RELEASE_GIL, [ConstInt(0), funcbox, i1], i2, descr=calldescr), ResOperation(rop.GUARD_NOT_FORCED, [], None, descr=faildescr), ResOperation(rop.FINISH, [i2], None, descr=BasicFinalDescr(0)) @@ -2570,7 +2571,8 @@ tok = BoxInt() faildescr = BasicFailDescr(1) ops = [ - ResOperation(rop.CALL_RELEASE_GIL, [funcbox, i0, i1, i2, i3], None, + ResOperation(rop.CALL_RELEASE_GIL, + [ConstInt(0), funcbox, i0, i1, i2, i3], None, descr=calldescr), ResOperation(rop.GUARD_NOT_FORCED, [], None, descr=faildescr), ResOperation(rop.FINISH, [], None, descr=BasicFinalDescr(0)) @@ -2625,7 +2627,8 @@ for i in range(50): i3 = BoxInt() ops += [ - ResOperation(rop.CALL_RELEASE_GIL, [funcbox, i1, i2], i3, + ResOperation(rop.CALL_RELEASE_GIL, + [ConstInt(0), funcbox, i1, i2], i3, descr=calldescr), ResOperation(rop.GUARD_NOT_FORCED, [], None, descr=faildescr), ] @@ -2697,7 +2700,7 @@ assert 0, kind # ops = [ - ResOperation(rop.CALL_RELEASE_GIL, [funcbox], b3, + ResOperation(rop.CALL_RELEASE_GIL, [ConstInt(0), funcbox], b3, descr=calldescr), ResOperation(rop.GUARD_NOT_FORCED, [], None, descr=faildescr), ResOperation(rop.FINISH, [b3], None, descr=BasicFinalDescr(0)) @@ -2881,7 +2884,8 @@ loadcodes = ''.join(loadcodes) print loadcodes ops += [ - ResOperation(rop.CALL_RELEASE_GIL, insideboxes, None, + ResOperation(rop.CALL_RELEASE_GIL, + [ConstInt(0)] + insideboxes, None, descr=calldescr), ResOperation(rop.GUARD_NOT_FORCED, [], None, descr=faildescr), ResOperation(rop.FINISH, [], None, descr=BasicFinalDescr(0)) @@ -2916,6 +2920,310 @@ assert got == expected, '\n'.join( ['bad args, signature %r' % codes[1:]] + different_values) + def test_call_release_gil_save_errno(self): + from rpython.translator.tool.cbuild import ExternalCompilationInfo + from rpython.rlib.libffi import types + from rpython.jit.backend.llsupport import llerrno + # + if not isinstance(self.cpu, AbstractLLCPU): + py.test.skip("not on LLGraph") + eci = ExternalCompilationInfo( + separate_module_sources=[''' + #include + static long f1(long a, long b, long c, long d, + long e, long f, long g) { + errno = 42; + return (a + 10*b + 100*c + 1000*d + + 10000*e + 100000*f + 1000000*g); + } + RPY_EXPORTED + long test_call_release_gil_save_errno(void) { + return (long)&f1; + } + ''']) + fn_name = 'test_call_release_gil_save_errno' + getter_ptr = rffi.llexternal(fn_name, [], lltype.Signed, + compilation_info=eci, _nowrapper=True) + func1_adr = getter_ptr() + calldescr = self.cpu._calldescr_dynamic_for_tests([types.slong]*7, + types.slong) + # + for saveerr in [rffi.RFFI_ERR_NONE, rffi.RFFI_SAVE_ERRNO]: + faildescr = BasicFailDescr(1) + inputargs = [BoxInt() for i in range(7)] + i1 = BoxInt() + ops = [ + ResOperation(rop.CALL_RELEASE_GIL, + [ConstInt(saveerr), ConstInt(func1_adr)] + + inputargs, i1, + descr=calldescr), + ResOperation(rop.GUARD_NOT_FORCED, [], None, descr=faildescr), + ResOperation(rop.FINISH, [i1], None, descr=BasicFinalDescr(0)) + ] + ops[-2].setfailargs([]) + looptoken = JitCellToken() + self.cpu.compile_loop(inputargs, ops, looptoken) + # + llerrno.set_debug_saved_errno(self.cpu, 24) + deadframe = self.cpu.execute_token(looptoken, 9, 8, 7, 6, 5, 4, 3) + original_result = self.cpu.get_int_value(deadframe, 0) + result = llerrno.get_debug_saved_errno(self.cpu) + print 'saveerr =', saveerr, ': got result =', result + # + if saveerr == rffi.RFFI_SAVE_ERRNO: + assert result == 42 # from the C code + else: + assert result == 24 # not touched + assert original_result == 3456789 + + def test_call_release_gil_readsaved_errno(self): + from rpython.translator.tool.cbuild import ExternalCompilationInfo + from rpython.rlib.libffi import types + from rpython.jit.backend.llsupport import llerrno + # + if not isinstance(self.cpu, AbstractLLCPU): + py.test.skip("not on LLGraph") + eci = ExternalCompilationInfo( + separate_module_sources=[r''' + #include + #include + static long f1(long a, long b, long c, long d, + long e, long f, long g) { + long r = errno; + printf("read saved errno: %ld\n", r); + r += 100 * (a + 10*b + 100*c + 1000*d + + 10000*e + 100000*f + 1000000*g); + return r; + } + RPY_EXPORTED + long test_call_release_gil_readsaved_errno(void) { + return (long)&f1; + } + ''']) + fn_name = 'test_call_release_gil_readsaved_errno' + getter_ptr = rffi.llexternal(fn_name, [], lltype.Signed, + compilation_info=eci, _nowrapper=True) + func1_adr = getter_ptr() + calldescr = self.cpu._calldescr_dynamic_for_tests([types.slong]*7, + types.slong) + # + for saveerr in [rffi.RFFI_READSAVED_ERRNO, rffi.RFFI_ZERO_ERRNO_BEFORE]: + faildescr = BasicFailDescr(1) + inputargs = [BoxInt() for i in range(7)] + i1 = BoxInt() + ops = [ + ResOperation(rop.CALL_RELEASE_GIL, + [ConstInt(saveerr), ConstInt(func1_adr)] + + inputargs, i1, + descr=calldescr), + ResOperation(rop.GUARD_NOT_FORCED, [], None, descr=faildescr), + ResOperation(rop.FINISH, [i1], None, descr=BasicFinalDescr(0)) + ] + ops[-2].setfailargs([]) + looptoken = JitCellToken() + self.cpu.compile_loop(inputargs, ops, looptoken) + # + llerrno.set_debug_saved_errno(self.cpu, 24) + deadframe = self.cpu.execute_token(looptoken, 9, 8, 7, 6, 5, 4, 3) + result = self.cpu.get_int_value(deadframe, 0) + assert llerrno.get_debug_saved_errno(self.cpu) == 24 + # + if saveerr == rffi.RFFI_READSAVED_ERRNO: + assert result == 24 + 345678900 + else: + assert result == 0 + 345678900 + + def test_call_release_gil_save_lasterror(self): + from rpython.translator.tool.cbuild import ExternalCompilationInfo + from rpython.rlib.libffi import types + from rpython.jit.backend.llsupport import llerrno + # + if not isinstance(self.cpu, AbstractLLCPU): + py.test.skip("not on LLGraph") + if sys.platform != 'win32': + py.test.skip("Windows test only") + eci = ExternalCompilationInfo( + separate_module_sources=[''' + #include + static long f1(long a, long b, long c, long d, + long e, long f, long g) { + SetLastError(42); + return (a + 10*b + 100*c + 1000*d + + 10000*e + 100000*f + 1000000*g); + } + RPY_EXPORTED + long test_call_release_gil_save_lasterror(void) { + return (long)&f1; + } + ''']) + fn_name = 'test_call_release_gil_save_lasterror' + getter_ptr = rffi.llexternal(fn_name, [], lltype.Signed, + compilation_info=eci, _nowrapper=True) + func1_adr = getter_ptr() + calldescr = self.cpu._calldescr_dynamic_for_tests([types.slong]*7, + types.slong) + # + for saveerr in [rffi.RFFI_SAVE_ERRNO, # but not _LASTERROR + rffi.RFFI_SAVE_LASTERROR]: + faildescr = BasicFailDescr(1) + inputargs = [BoxInt() for i in range(7)] + i1 = BoxInt() + ops = [ + ResOperation(rop.CALL_RELEASE_GIL, + [ConstInt(saveerr), ConstInt(func1_adr)] + + inputargs, i1, + descr=calldescr), + ResOperation(rop.GUARD_NOT_FORCED, [], None, descr=faildescr), + ResOperation(rop.FINISH, [i1], None, descr=BasicFinalDescr(0)) + ] + ops[-2].setfailargs([]) + looptoken = JitCellToken() + self.cpu.compile_loop(inputargs, ops, looptoken) + # + llerrno.set_debug_saved_lasterror(self.cpu, 24) + deadframe = self.cpu.execute_token(looptoken, 9, 8, 7, 6, 5, 4, 3) + original_result = self.cpu.get_int_value(deadframe, 0) + result = llerrno.get_debug_saved_lasterror(self.cpu) + print 'saveerr =', saveerr, ': got result =', result + # + if saveerr == rffi.RFFI_SAVE_LASTERROR: + assert result == 42 # from the C code + else: + assert result == 24 # not touched + assert original_result == 3456789 + + def test_call_release_gil_readsaved_lasterror(self): + from rpython.translator.tool.cbuild import ExternalCompilationInfo + from rpython.rlib.libffi import types + from rpython.jit.backend.llsupport import llerrno + # + if not isinstance(self.cpu, AbstractLLCPU): + py.test.skip("not on LLGraph") + if sys.platform != 'win32': + py.test.skip("Windows test only") + eci = ExternalCompilationInfo( + separate_module_sources=[r''' + #include + static long f1(long a, long b, long c, long d, + long e, long f, long g) { + long r = GetLastError(); + printf("GetLastError() result: %ld\n", r); + printf("%ld %ld %ld %ld %ld %ld %ld\n", a,b,c,d,e,f,g); + r += 100 * (a + 10*b + 100*c + 1000*d + + 10000*e + 100000*f + 1000000*g); + return r; + } + RPY_EXPORTED + long test_call_release_gil_readsaved_lasterror(void) { + return (long)&f1; + } + ''']) + fn_name = 'test_call_release_gil_readsaved_lasterror' + getter_ptr = rffi.llexternal(fn_name, [], lltype.Signed, + compilation_info=eci, _nowrapper=True) + func1_adr = getter_ptr() + calldescr = self.cpu._calldescr_dynamic_for_tests([types.slong]*7, + types.slong) + # + for saveerr in [rffi.RFFI_READSAVED_LASTERROR]: + faildescr = BasicFailDescr(1) + inputargs = [BoxInt() for i in range(7)] + i1 = BoxInt() + ops = [ + ResOperation(rop.CALL_RELEASE_GIL, + [ConstInt(saveerr), ConstInt(func1_adr)] + + inputargs, i1, + descr=calldescr), + ResOperation(rop.GUARD_NOT_FORCED, [], None, descr=faildescr), + ResOperation(rop.FINISH, [i1], None, descr=BasicFinalDescr(0)) + ] + ops[-2].setfailargs([]) + looptoken = JitCellToken() + self.cpu.compile_loop(inputargs, ops, looptoken) + # + llerrno.set_debug_saved_lasterror(self.cpu, 24) + deadframe = self.cpu.execute_token(looptoken, 9, 8, 7, 6, 5, 4, 3) + result = self.cpu.get_int_value(deadframe, 0) + assert llerrno.get_debug_saved_lasterror(self.cpu) == 24 + # + assert result == 24 + 345678900 + + def test_call_release_gil_err_all(self): + from rpython.translator.tool.cbuild import ExternalCompilationInfo + from rpython.rlib.libffi import types + from rpython.jit.backend.llsupport import llerrno + # + if not isinstance(self.cpu, AbstractLLCPU): + py.test.skip("not on LLGraph") + if sys.platform != 'win32': + eci = ExternalCompilationInfo( + separate_module_sources=[r''' + #include + static long f1(long a, long b, long c, long d, + long e, long f, long g) { + long r = errno; + errno = 42; + r += 100 * (a + 10*b + 100*c + 1000*d + + 10000*e + 100000*f + 1000000*g); From noreply at buildbot.pypy.org Mon Jan 19 23:54:50 2015 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 19 Jan 2015 23:54:50 +0100 (CET) Subject: [pypy-commit] pypy default: document errno-again Message-ID: <20150119225450.6A1B11C0098@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r75445:9fbe4eee0d73 Date: 2015-01-19 23:54 +0100 http://bitbucket.org/pypy/pypy/changeset/9fbe4eee0d73/ Log: document errno-again diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -120,3 +120,13 @@ .. branch: squeaky/use-cflags-for-compiling-asm .. branch: unicode-fix .. branch: zlib_zdict + +.. branch: errno-again + +Changes how errno, GetLastError, and WSAGetLastError are handled. +The idea is to tie reading the error status as close as possible to +the external function call. This fixes some bugs, both of the very +rare kind (e.g. errno on Linux might in theory be overwritten by +mmap(), called rarely during major GCs, if such a major GC occurs at +exactly the wrong time), and some of the less rare kind +(particularly on Windows tests). From noreply at buildbot.pypy.org Tue Jan 20 00:57:56 2015 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 20 Jan 2015 00:57:56 +0100 (CET) Subject: [pypy-commit] pypy default: fix test Message-ID: <20150119235756.786D11C00F7@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r75446:2e861a37e530 Date: 2015-01-20 00:57 +0100 http://bitbucket.org/pypy/pypy/changeset/2e861a37e530/ Log: fix test diff --git a/pypy/module/termios/test/test_termios.py b/pypy/module/termios/test/test_termios.py --- a/pypy/module/termios/test/test_termios.py +++ b/pypy/module/termios/test/test_termios.py @@ -43,6 +43,10 @@ child.sendline('import termios') child.expect('>>> ') child.sendline('termios.tcgetattr(0)') + # output of the first time is ignored: it contains the compilation + # of more C stuff relating to errno + child.expect('>>> ') + child.sendline('termios.tcgetattr(0)') child.expect('\[.*?\[.*?\]\]') lst = eval(child.match.group(0)) assert len(lst) == 7 From noreply at buildbot.pypy.org Tue Jan 20 09:48:45 2015 From: noreply at buildbot.pypy.org (fijal) Date: Tue, 20 Jan 2015 09:48:45 +0100 (CET) Subject: [pypy-commit] pypy vmprof: I should learn a thing or two about pointer arithmetics Message-ID: <20150120084845.732551C0316@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: vmprof Changeset: r75447:1d07e4257e8c Date: 2015-01-20 10:48 +0200 http://bitbucket.org/pypy/pypy/changeset/1d07e4257e8c/ Log: I should learn a thing or two about pointer arithmetics diff --git a/pypy/module/_vmprof/src/vmprof.c b/pypy/module/_vmprof/src/vmprof.c --- a/pypy/module/_vmprof/src/vmprof.c +++ b/pypy/module/_vmprof/src/vmprof.c @@ -105,7 +105,7 @@ vmprof_hacked_unw_cursor_t *cp2 = (vmprof_hacked_unw_cursor_t*)cp; void* bp = (void*)sp + sp_offset; cp2->sp = bp; - bp -= 1; + bp -= sizeof(void*); cp2->ip = ((void**)bp)[0]; // the ret is on the top of the stack minus WORD return 1; From noreply at buildbot.pypy.org Tue Jan 20 10:49:31 2015 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 20 Jan 2015 10:49:31 +0100 (CET) Subject: [pypy-commit] stmgc default: Document how you build llvm Message-ID: <20150120094931.BBB121C00F9@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1553:1e5ef25eed45 Date: 2015-01-20 10:49 +0100 http://bitbucket.org/pypy/stmgc/changeset/1e5ef25eed45/ Log: Document how you build llvm diff --git a/c7/llvmfix/README.txt b/c7/llvmfix/README.txt --- a/c7/llvmfix/README.txt +++ b/c7/llvmfix/README.txt @@ -1,3 +1,22 @@ +Apply these patches to llvm, svn revision 201645, +which you get from: + + svn co http://llvm.org/svn/llvm-project/llvm/trunk llvm -r 201645 + cd llvm/tools + svn co http://llvm.org/svn/llvm-project/cfe/trunk clang -r 201645 + cd ../.. + cd llvm/projects + svn co http://llvm.org/svn/llvm-project/compiler-rt/trunk compiler-rt -r 201645 + cd ../.. + cd llvm + patch -p0 < ~/.../c7/llvmfix/...diff + # ^^^ repeat that line for all patches in this directory + cd .. + mkdir llvm-build + cd llvm-build + ../llvm/configure # requires gcc >= 4.7! + make + no-introduce-bogus-cast-in-combine.diff From noreply at buildbot.pypy.org Tue Jan 20 12:23:21 2015 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 20 Jan 2015 12:23:21 +0100 (CET) Subject: [pypy-commit] pypy.org extradoc: update the values Message-ID: <20150120112321.B52431C0134@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r561:bd7cce42cbf4 Date: 2015-01-20 12:23 +0100 http://bitbucket.org/pypy/pypy.org/changeset/bd7cce42cbf4/ Log: update the values diff --git a/don3.html b/don3.html --- a/don3.html +++ b/don3.html @@ -9,13 +9,13 @@ - $51195 of $60000 (85.3%) + $51288 of $60000 (85.5%)
From noreply at buildbot.pypy.org Tue Jan 20 12:59:02 2015 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 20 Jan 2015 12:59:02 +0100 (CET) Subject: [pypy-commit] stmgc default: update comment Message-ID: <20150120115902.1DEA21C00F9@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1554:9ba6ad56087a Date: 2015-01-20 12:59 +0100 http://bitbucket.org/pypy/stmgc/changeset/9ba6ad56087a/ Log: update comment diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -158,7 +158,7 @@ The best is to use typedefs like above. The object_s part contains some fields reserved for the STM library. - Right now this is only one byte. + Right now this is only four bytes. */ struct object_s { From noreply at buildbot.pypy.org Tue Jan 20 13:40:42 2015 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 20 Jan 2015 13:40:42 +0100 (CET) Subject: [pypy-commit] stmgc default: It takes ages to compile with a debug-mode llvm Message-ID: <20150120124042.B82201C0134@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1555:b730bc3d7c55 Date: 2015-01-20 13:41 +0100 http://bitbucket.org/pypy/stmgc/changeset/b730bc3d7c55/ Log: It takes ages to compile with a debug-mode llvm diff --git a/c7/llvmfix/README.txt b/c7/llvmfix/README.txt --- a/c7/llvmfix/README.txt +++ b/c7/llvmfix/README.txt @@ -14,7 +14,7 @@ cd .. mkdir llvm-build cd llvm-build - ../llvm/configure # requires gcc >= 4.7! + ../llvm/configure --enable-optimized # requires gcc >= 4.7! make From noreply at buildbot.pypy.org Tue Jan 20 14:24:08 2015 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 20 Jan 2015 14:24:08 +0100 (CET) Subject: [pypy-commit] pypy default: Trying to change the order: the "nocollect" marker can appear anywhere, Message-ID: <20150120132408.E8BE11C0103@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r75448:7e73ff3a2eac Date: 2015-01-20 14:23 +0100 http://bitbucket.org/pypy/pypy/changeset/7e73ff3a2eac/ Log: Trying to change the order: the "nocollect" marker can appear anywhere, but putting it after the call prevents gcc from generating a tail-call diff --git a/rpython/memory/gctransform/framework.py b/rpython/memory/gctransform/framework.py --- a/rpython/memory/gctransform/framework.py +++ b/rpython/memory/gctransform/framework.py @@ -690,9 +690,9 @@ self.default(hop) self.pop_roots(hop, livevars) else: - self.default(hop) if hop.spaceop.opname == "direct_call": self.mark_call_cannotcollect(hop, hop.spaceop.args[0]) + self.default(hop) def mark_call_cannotcollect(self, hop, name): pass From noreply at buildbot.pypy.org Tue Jan 20 15:18:23 2015 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 20 Jan 2015 15:18:23 +0100 (CET) Subject: [pypy-commit] pypy default: If a JMP and CALL instruction goes straight to a further JMP, emit Message-ID: <20150120141823.274691C03F7@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r75449:dde0fac9f1a4 Date: 2015-01-20 15:02 +0100 http://bitbucket.org/pypy/pypy/changeset/dde0fac9f1a4/ Log: If a JMP and CALL instruction goes straight to a further JMP, emit the original JMP or CALL to go directly to the target. diff --git a/rpython/jit/backend/x86/codebuf.py b/rpython/jit/backend/x86/codebuf.py --- a/rpython/jit/backend/x86/codebuf.py +++ b/rpython/jit/backend/x86/codebuf.py @@ -46,9 +46,9 @@ def copy_to_raw_memory(self, addr): self._copy_to_raw_memory(addr) if self.relocations is not None: - for reloc in self.relocations: + for reloc in self.relocations: # for 32-bit only p = addr + reloc - adr = rffi.cast(rffi.LONGP, p - WORD) - adr[0] = intmask(adr[0] - p) + adr = rffi.cast(rffi.INTP, p - 4) + adr[0] = rffi.cast(rffi.INT, intmask(adr[0]) - p) valgrind.discard_translations(addr, self.get_relative_pos()) self._dump(addr, "jit-backend-dump", backend_name) diff --git a/rpython/jit/backend/x86/regloc.py b/rpython/jit/backend/x86/regloc.py --- a/rpython/jit/backend/x86/regloc.py +++ b/rpython/jit/backend/x86/regloc.py @@ -516,6 +516,10 @@ if code == possible_code: val = getattr(loc, "value_" + possible_code)() if possible_code == 'i': + # This is for CALL or JMP only. If target is + # immediately starting with another JMP instruction, + # follow it now. + val = self._follow_jump_instructions(val) if self.WORD == 4: _rx86_getattr(self, name + "_l")(val) self.add_pending_relocation() @@ -533,6 +537,17 @@ return func_with_new_name(INSN, "INSN_" + name) + _do_follow_jump_instructions = True + + def _follow_jump_instructions(self, addr): + if not self._do_follow_jump_instructions or addr == 0: # for tests + return addr + # 'addr' is an absolute address here + while rffi.cast(rffi.CCHARP, addr)[0] == '\xE9': # JMP <4 bytes> + addr += 5 + addr += intmask(rffi.cast(rffi.INTP, addr - 4)[0]) + return addr + def _addr_as_reg_offset(self, addr): # Encodes a (64-bit) address as an offset from the scratch register. # If we are within a "reuse_scratch_register" block, we remember the diff --git a/rpython/jit/backend/x86/test/test_regloc.py b/rpython/jit/backend/x86/test/test_regloc.py --- a/rpython/jit/backend/x86/test/test_regloc.py +++ b/rpython/jit/backend/x86/test/test_regloc.py @@ -4,6 +4,7 @@ from rpython.jit.backend.x86.test.test_rx86 import CodeBuilder32, CodeBuilder64, assert_encodes_as from rpython.jit.backend.x86.assembler import heap from rpython.jit.backend.x86.arch import IS_X86_64, IS_X86_32 +from rpython.jit.backend.x86 import codebuf from rpython.rlib.rarithmetic import intmask import py.test @@ -62,11 +63,11 @@ def test_relocation(): from rpython.rtyper.lltypesystem import lltype, rffi - from rpython.jit.backend.x86 import codebuf for target in [0x01020304, -0x05060708, 0x0102030405060708]: if target > sys.maxint: continue mc = codebuf.MachineCodeBlockWrapper() + mc._do_follow_jump_instructions = False mc.CALL(ImmedLoc(target)) length = mc.get_relative_pos() buf = lltype.malloc(rffi.CCHARP.TO, length, flavor='raw') @@ -96,6 +97,38 @@ assert ''.join([buf[i] for i in range(length)]) == expected lltype.free(buf, flavor='raw') +def test_follow_jump_instructions_32(): + buf = lltype.malloc(rffi.CCHARP.TO, 80, flavor='raw') + raw = rffi.cast(lltype.Signed, buf) + mc = codebuf.MachineCodeBlockWrapper(); mc.WORD = 4; mc.relocations = [] + mc.RET() + mc.copy_to_raw_memory(raw) + mc = codebuf.MachineCodeBlockWrapper(); mc.WORD = 4; mc.relocations = [] + mc.JMP(imm(raw)) + mc.copy_to_raw_memory(raw + 20) + assert buf[20] == '\xE9' # JMP + assert buf[21] == '\xE7' # -25 + assert buf[22] == '\xFF' + assert buf[23] == '\xFF' + assert buf[24] == '\xFF' + mc = codebuf.MachineCodeBlockWrapper(); mc.WORD = 4; mc.relocations = [] + mc.JMP(imm(raw + 20)) + mc.copy_to_raw_memory(raw + 40) + assert buf[40] == '\xE9' # JMP + assert buf[41] == '\xD3' # -45 + assert buf[42] == '\xFF' + assert buf[43] == '\xFF' + assert buf[44] == '\xFF' + mc = codebuf.MachineCodeBlockWrapper(); mc.WORD = 4; mc.relocations = [] + mc.CALL(imm(raw + 40)) + mc.copy_to_raw_memory(raw + 60) + assert buf[60] == '\xE8' # CALL + assert buf[61] == '\xBF' # -65 + assert buf[62] == '\xFF' + assert buf[63] == '\xFF' + assert buf[64] == '\xFF' + lltype.free(buf, flavor='raw') + class Test64Bits: From noreply at buildbot.pypy.org Tue Jan 20 15:23:51 2015 From: noreply at buildbot.pypy.org (Raemi) Date: Tue, 20 Jan 2015 15:23:51 +0100 (CET) Subject: [pypy-commit] stmgc c8-private-pages: improve test_random Message-ID: <20150120142351.736581C03F7@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c8-private-pages Changeset: r1556:080ece9f6963 Date: 2015-01-20 11:11 +0100 http://bitbucket.org/pypy/stmgc/changeset/080ece9f6963/ Log: improve test_random diff --git a/c8/stm/core.c b/c8/stm/core.c --- a/c8/stm/core.c +++ b/c8/stm/core.c @@ -481,14 +481,6 @@ { if (!_stm_validate()) stm_abort_transaction(); - -#if STM_TESTS - if (STM_PSEGMENT->transaction_state != TS_INEVITABLE - && STM_PSEGMENT->last_commit_log_entry->next == INEV_RUNNING) { - /* abort for tests... */ - stm_abort_transaction(); - } -#endif } diff --git a/c8/test/test_gcpage.py b/c8/test/test_gcpage.py --- a/c8/test/test_gcpage.py +++ b/c8/test/test_gcpage.py @@ -300,7 +300,8 @@ self.start_transaction() self.become_inevitable() # - py.test.raises(Conflict, self.switch, 0) + self.switch(0) + py.test.raises(Conflict, self.commit_transaction) # self.switch(1) diff --git a/c8/test/test_random.py b/c8/test/test_random.py --- a/c8/test/test_random.py +++ b/c8/test/test_random.py @@ -349,7 +349,8 @@ def op_become_inevitable(ex, global_state, thread_state): trs = thread_state.transaction_state - global_state.check_if_can_become_inevitable(trs) + if not trs.check_must_abort(): + global_state.check_if_can_become_inevitable(trs) thread_state.push_roots(ex) ex.do(raising_call(trs.check_must_abort(), @@ -426,18 +427,13 @@ v = ord(global_state.rnd.choice("abcdefghijklmnop")) assert trs.write_root(r, v) is not None # - aborts = trs.check_must_abort() - if aborts: - thread_state.abort_transaction() offset = global_state.get_root_size(r) + " - 1" if is_ref: - ex.do(raising_call(aborts, "stm_set_ref", r, offset, v, try_cards)) - if not aborts: - ex.do(raising_call(False, "stm_set_ref", r, "0", v, try_cards)) + ex.do(raising_call(False, "stm_set_ref", r, offset, v, try_cards)) + ex.do(raising_call(False, "stm_set_ref", r, "0", v, try_cards)) else: - ex.do(raising_call(aborts, "stm_set_char", r, repr(chr(v)), offset, try_cards)) - if not aborts: - ex.do(raising_call(False, "stm_set_char", r, repr(chr(v)), "HDR", try_cards)) + ex.do(raising_call(False, "stm_set_char", r, repr(chr(v)), offset, try_cards)) + ex.do(raising_call(False, "stm_set_char", r, repr(chr(v)), "HDR", try_cards)) def op_read(ex, global_state, thread_state): r = thread_state.get_random_root() @@ -509,8 +505,6 @@ ex.do('#') # trs = new_thread_state.transaction_state - if trs and not trs.check_must_abort(): - global_state.check_if_can_become_inevitable(trs) conflicts = trs and trs.check_must_abort() ex.thread_num = new_thread_state.num # @@ -568,28 +562,28 @@ # random steps: possible_actions = [ - op_allocate, - op_allocate_ref, op_allocate_ref, - op_write, op_write, op_write, - op_read, op_read, op_read, op_read, op_read, op_read, op_read, op_read, - op_commit_transaction, - op_abort_transaction, - op_forget_root, - op_become_inevitable, - op_assert_size, - op_assert_modified, - op_minor_collect, - op_major_collect, + [op_read,]*100, + [op_write,]*70, + [op_allocate,]*25, + [op_allocate_ref]*30, + [op_commit_transaction,]*10, + [op_abort_transaction,], + [op_forget_root]*10, + [op_become_inevitable]*2, + [op_assert_size]*20, + [op_assert_modified]*10, + [op_minor_collect]*5, + [op_major_collect], ] + possible_actions = [item for sublist in possible_actions for item in sublist] + print possible_actions for _ in range(2000): # make sure we are in a transaction: curr_thread = op_switch_thread(ex, global_state, curr_thread) - if (global_state.is_inevitable_transaction_running() - and curr_thread.transaction_state is None): - continue # don't bother trying to start a transaction - if curr_thread.transaction_state is None: + if global_state.is_inevitable_transaction_running(): + continue # don't bother trying to start a transaction op_start_transaction(ex, global_state, curr_thread) assert curr_thread.transaction_state is not None From noreply at buildbot.pypy.org Tue Jan 20 15:23:52 2015 From: noreply at buildbot.pypy.org (Raemi) Date: Tue, 20 Jan 2015 15:23:52 +0100 (CET) Subject: [pypy-commit] stmgc c8-private-pages: possibly fix a race Message-ID: <20150120142352.9DD2A1C03F7@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c8-private-pages Changeset: r1557:06bc63626a03 Date: 2015-01-20 11:34 +0100 http://bitbucket.org/pypy/stmgc/changeset/06bc63626a03/ Log: possibly fix a race diff --git a/c8/stm/smallmalloc.c b/c8/stm/smallmalloc.c --- a/c8/stm/smallmalloc.c +++ b/c8/stm/smallmalloc.c @@ -74,8 +74,17 @@ long i; for (i = 0; i < GCPAGE_NUM_PAGES; i++) { /* add to free_uniform_pages list */ - ((struct small_free_loc_s *)p)->nextpage = free_uniform_pages; - free_uniform_pages = (struct small_free_loc_s *)p; + struct small_free_loc_s *to_add = (struct small_free_loc_s *)p; + + retry: + to_add->nextpage = free_uniform_pages; + if (UNLIKELY(!__sync_bool_compare_and_swap( + &free_uniform_pages, + to_add->nextpage, + to_add))) { + goto retry; + } + p += 4096; } } From noreply at buildbot.pypy.org Tue Jan 20 15:23:53 2015 From: noreply at buildbot.pypy.org (Raemi) Date: Tue, 20 Jan 2015 15:23:53 +0100 (CET) Subject: [pypy-commit] stmgc c8-private-pages: test and fix for not clearing WB_EXECUTED before validation on commit; validation depends on this flag Message-ID: <20150120142353.A343A1C03F7@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c8-private-pages Changeset: r1558:8c4bbf6fd47b Date: 2015-01-20 15:22 +0100 http://bitbucket.org/pypy/stmgc/changeset/8c4bbf6fd47b/ Log: test and fix for not clearing WB_EXECUTED before validation on commit; validation depends on this flag diff --git a/c8/stm/core.c b/c8/stm/core.c --- a/c8/stm/core.c +++ b/c8/stm/core.c @@ -410,10 +410,19 @@ return result; } + +static void reset_wb_executed_flags(void); +static void readd_wb_executed_flags(void); +static void check_all_write_barrier_flags(char *segbase, struct list_s *list); + static void _validate_and_attach(struct stm_commit_log_entry_s *new) { struct stm_commit_log_entry_s *old; + OPT_ASSERT(new != NULL); + /* we are attaching a real CL entry: */ + bool is_commit = new != INEV_RUNNING; + while (1) { if (!_stm_validate()) { if (new != INEV_RUNNING) @@ -429,6 +438,16 @@ } #endif + if (is_commit) { + /* we must not remove the WB_EXECUTED flags before validation as + it is part of a condition in import_objects() called by + copy_bk_objs_in_page_from to not overwrite our modifications. + So we do it here: */ + reset_wb_executed_flags(); + check_all_write_barrier_flags(STM_SEGMENT->segment_base, + STM_PSEGMENT->modified_old_objects); + } + /* try to attach to commit log: */ old = STM_PSEGMENT->last_commit_log_entry; if (old->next == NULL) { @@ -442,6 +461,15 @@ usleep(10); } + if (is_commit) { + /* XXX: unfortunately, if we failed to attach our CL entry, + we have to re-add the WB_EXECUTED flags before we try to + validate again because of said condition (s.a) */ + readd_wb_executed_flags(); + } + + dprintf(("_validate_and_attach(%p) failed, enter safepoint\n", new)); + /* check for requested safe point. otherwise an INEV transaction may try to commit but cannot because of the busy-loop here. */ _stm_collectable_safe_point(); @@ -463,6 +491,11 @@ new->rev_num = old->rev_num + 1; OPT_ASSERT(old->next == INEV_RUNNING); + /* WB_EXECUTED must be removed before we attach */ + reset_wb_executed_flags(); + check_all_write_barrier_flags(STM_SEGMENT->segment_base, + STM_PSEGMENT->modified_old_objects); + bool yes = __sync_bool_compare_and_swap(&old->next, INEV_RUNNING, new); OPT_ASSERT(yes); } @@ -616,6 +649,7 @@ static void reset_wb_executed_flags(void) { + dprintf(("reset_wb_executed_flags()\n")); struct list_s *list = STM_PSEGMENT->modified_old_objects; struct stm_undo_s *undo = (struct stm_undo_s *)list->items; struct stm_undo_s *end = (struct stm_undo_s *)(list->items + list->count); @@ -626,6 +660,21 @@ } } +static void readd_wb_executed_flags(void) +{ + dprintf(("readd_wb_executed_flags()\n")); + struct list_s *list = STM_PSEGMENT->modified_old_objects; + struct stm_undo_s *undo = (struct stm_undo_s *)list->items; + struct stm_undo_s *end = (struct stm_undo_s *)(list->items + list->count); + + for (; undo < end; undo++) { + object_t *obj = undo->object; + obj->stm_flags |= GCFLAG_WB_EXECUTED; + } +} + + + static void _stm_start_transaction(stm_thread_local_t *tl) { @@ -719,9 +768,9 @@ struct stm_undo_s *end = (struct stm_undo_s *)(list->items + list->count); for (; undo < end; undo++) { object_t *obj = undo->object; - char *dst = REAL_ADDRESS(segbase, obj); - assert(((struct object_s *)dst)->stm_flags & GCFLAG_WRITE_BARRIER); - assert(!(((struct object_s *)dst)->stm_flags & GCFLAG_WB_EXECUTED)); + struct object_s *dst = (struct object_s*)REAL_ADDRESS(segbase, obj); + assert(dst->stm_flags & GCFLAG_WRITE_BARRIER); + assert(!(dst->stm_flags & GCFLAG_WB_EXECUTED)); } #endif } @@ -735,14 +784,6 @@ dprintf(("> stm_commit_transaction()\n")); minor_collection(1); - reset_wb_executed_flags(); - - /* minor_collection() above should have set again all WRITE_BARRIER flags. - Check that again here for the objects that are about to be copied into - the commit log. */ - check_all_write_barrier_flags(STM_SEGMENT->segment_base, - STM_PSEGMENT->modified_old_objects); - _validate_and_add_to_commit_log(); invoke_and_clear_user_callbacks(0); /* for commit */ @@ -941,37 +982,9 @@ assert(frag_size > 0); assert(frag_size + ((uintptr_t)frag & 4095) <= 4096); - /* if the page of the fragment is fully shared, nothing to do: - |S|N|N|N| */ - - /* XXXXX: re-enable the following if completely sure that we always - copy the shared page when we privatize correctly. */ - /* /\* nobody must change the page mapping until we flush *\/ */ - /* assert(STM_PSEGMENT->privatization_lock); */ - - /* int my_segnum = STM_SEGMENT->segment_num; */ - /* uintptr_t pagenum = (uintptr_t)frag / 4096; */ - /* bool fully_shared = false; */ - - /* if (get_page_status_in(my_segnum, pagenum) == PAGE_SHARED) { */ - /* fully_shared = true; */ - /* int i; */ - /* for (i = 0; fully_shared && i < NB_SEGMENTS; i++) { */ - /* if (i == my_segnum) */ - /* continue; */ - - /* /\* XXX: works if never all pages use SHARED page *\/ */ - /* if (get_page_status_in(i, pagenum) != PAGE_NO_ACCESS) { */ - /* fully_shared = false; */ - /* break; */ - /* } */ - /* } */ - /* } */ - - /* if (fully_shared) */ - /* return; /\* nothing to do *\/ */ - - /* e.g. |P|S|N|P| */ + /* XXX: is it possible to just add to the queue iff the pages + of the fragment need syncing to other segments? (keep privatization + lock until the "flush") */ /* Enqueue this object (or fragemnt of object) */ if (STM_PSEGMENT->sq_len == SYNC_QUEUE_SIZE) @@ -1017,17 +1030,6 @@ static void synchronize_objects_flush(void) { - /* XXX: not sure this applies anymore. */ - /* Do a full memory barrier. We must make sure that other - CPUs see the changes we did to the shared page ("S", in - synchronize_object_enqueue()) before we check the other segments - with is_private_page() (below). Otherwise, we risk the - following: this CPU writes "S" but the writes are not visible yet; - then it checks is_private_page() and gets false, and does nothing - more; just afterwards another CPU sets its own private_page bit - and copies the page; but it risks doing so before seeing the "S" - writes. - */ long j = STM_PSEGMENT->sq_len; if (j == 0) return; @@ -1035,7 +1037,6 @@ dprintf(("synchronize_objects_flush(): %ld fragments\n", j)); - __sync_synchronize(); assert(STM_PSEGMENT->privatization_lock); DEBUG_EXPECT_SEGFAULT(false); @@ -1051,10 +1052,10 @@ if (i == myself) continue; - if (i == 0 || (get_page_status_in(i, page) != PAGE_NO_ACCESS)) { + if (get_page_status_in(i, page) != PAGE_NO_ACCESS) { /* shared or private, but never segfault */ char *dst = REAL_ADDRESS(get_segment_base(i), frag); - dprintf(("-> flush %p to seg %lu\n", frag, i)); + dprintf(("-> flush %p to seg %lu, sz=%lu\n", frag, i, frag_size)); memcpy(dst, src, frag_size); } } diff --git a/c8/stm/nursery.c b/c8/stm/nursery.c --- a/c8/stm/nursery.c +++ b/c8/stm/nursery.c @@ -103,6 +103,8 @@ nobj = (object_t *)allocate_outside_nursery_small(size); } + dprintf(("move %p -> %p\n", obj, nobj)); + /* copy the object */ copy_large_object:; char *realnobj = REAL_ADDRESS(STM_SEGMENT->segment_base, nobj); diff --git a/c8/test/test_basic.py b/c8/test/test_basic.py --- a/c8/test/test_basic.py +++ b/c8/test/test_basic.py @@ -818,7 +818,7 @@ self.start_transaction() stm_set_char(lp_char_5, '\0', HDR, False) self.commit_transaction() - + # self.switch(2) self.start_transaction() @@ -842,7 +842,7 @@ # py.test.raises(Conflict, self.switch, 2) - + def test_repeated_wb(self): lp_char_5 = stm_allocate_old(384) @@ -859,3 +859,43 @@ self.check_char_everywhere(lp_char_5, '\0', offset=HDR) self.check_char_everywhere(lp_char_5, '\0', offset=384-1) + + def test_bug4(self): + o = stm_allocate_old(16) + p = stm_allocate_old(32) # not the same page + self.start_transaction() + stm_set_char(o, 'x') + stm_set_char(p, 'x') + self.commit_transaction() + + self.switch(2, False) + self.start_transaction() + # make both objs accessible + stm_get_char(o) + stm_get_char(p) + self.commit_transaction() + self.start_transaction() + + self.switch(0, False) + self.start_transaction() + stm_set_char(p, 'y') + self.commit_transaction() # commit new p + + self.start_transaction() + stm_set_char(o, 'f') + # o has backup copy + # this segment is the same as the one that + # committed o and p last + + self.switch(2, False) + # now we write o in version 'x' + assert stm_get_char(o) == 'x' + stm_set_char(o, 'c') + self.commit_transaction() + # o should now have 'c' and not be overwritten with 'f' + + self.start_transaction() + assert stm_get_char(o) == 'c' + self.commit_transaction() + + py.test.raises(Conflict, self.switch, 0) From noreply at buildbot.pypy.org Tue Jan 20 16:07:07 2015 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 20 Jan 2015 16:07:07 +0100 (CET) Subject: [pypy-commit] pypy default: remove this outdated comment Message-ID: <20150120150707.7A1AF1C04AB@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r75450:068df7f1d579 Date: 2015-01-20 16:06 +0100 http://bitbucket.org/pypy/pypy/changeset/068df7f1d579/ Log: remove this outdated comment diff --git a/pypy/module/thread/gil.py b/pypy/module/thread/gil.py --- a/pypy/module/thread/gil.py +++ b/pypy/module/thread/gil.py @@ -64,8 +64,6 @@ after_thread_switch = lambda: None # hook for signal.py -# Fragile code below. We have to preserve the C-level errno manually... - def before_external_call(): # this function must not raise, in such a way that the exception # transformer knows that it cannot raise! From noreply at buildbot.pypy.org Tue Jan 20 17:50:40 2015 From: noreply at buildbot.pypy.org (Raemi) Date: Tue, 20 Jan 2015 17:50:40 +0100 (CET) Subject: [pypy-commit] stmgc c8-private-pages: fix a memory leak Message-ID: <20150120165040.CD96A1C03F7@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c8-private-pages Changeset: r1559:e597e1e5cfe3 Date: 2015-01-20 16:38 +0100 http://bitbucket.org/pypy/stmgc/changeset/e597e1e5cfe3/ Log: fix a memory leak diff --git a/c8/stm/nursery.c b/c8/stm/nursery.c --- a/c8/stm/nursery.c +++ b/c8/stm/nursery.c @@ -250,7 +250,7 @@ in this segment) */ *((char *)(pseg->pub.segment_base + (((uintptr_t)obj) >> 4))) = 0; - /* XXX: _stm_large_free(stm_object_pages + item->addr); */ + _stm_large_free(stm_object_pages + item->addr); } TREE_LOOP_END; } @@ -332,13 +332,13 @@ object_t *_stm_allocate_external(ssize_t size_rounded_up) { - /* /\* first, force a collection if needed *\/ */ - /* if (is_major_collection_requested()) { */ - /* /\* use stm_collect() with level 0: if another thread does a major GC */ - /* in-between, is_major_collection_requested() will become false */ - /* again, and we'll avoid doing yet another one afterwards. *\/ */ - /* stm_collect(0); */ - /* } */ + /* first, force a collection if needed */ + if (is_major_collection_requested()) { + /* use stm_collect() with level 0: if another thread does a major GC + in-between, is_major_collection_requested() will become false + again, and we'll avoid doing yet another one afterwards. */ + stm_collect(0); + } object_t *o = (object_t *)allocate_outside_nursery_large(size_rounded_up); From noreply at buildbot.pypy.org Tue Jan 20 17:50:43 2015 From: noreply at buildbot.pypy.org (Raemi) Date: Tue, 20 Jan 2015 17:50:43 +0100 (CET) Subject: [pypy-commit] stmgc c8-private-pages: fix the slowdown for sort.duh. Still, the real fix would be to avoid adding tons of objs to the modified_old_objects list Message-ID: <20150120165043.1FDCC1C03F7@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c8-private-pages Changeset: r1560:24ba707614c4 Date: 2015-01-20 17:51 +0100 http://bitbucket.org/pypy/stmgc/changeset/24ba707614c4/ Log: fix the slowdown for sort.duh. Still, the real fix would be to avoid adding tons of objs to the modified_old_objects list diff --git a/c7/stm/gcpage.c b/c7/stm/gcpage.c --- a/c7/stm/gcpage.c +++ b/c7/stm/gcpage.c @@ -434,8 +434,8 @@ get_priv_segment(i)->modified_old_objects, object_t * /*item*/, ({ - mark_visited_test_and_set(item); - mark_trace(item, stm_object_pages); /* shared version */ + if (!mark_visited_test_and_set(item)) + mark_trace(item, stm_object_pages); /* shared version */ mark_trace(item, base); /* private version */ })); } diff --git a/c8/stm/gcpage.c b/c8/stm/gcpage.c --- a/c8/stm/gcpage.c +++ b/c8/stm/gcpage.c @@ -227,14 +227,11 @@ /* trace all references found in sharing seg0 (should always be up-to-date and not cause segfaults) */ - while (1) { + while (!list_is_empty(marked_objects_to_trace)) { + obj = (object_t *)list_pop_item(marked_objects_to_trace); + realobj = (struct object_s *)REAL_ADDRESS(stm_object_pages, obj); stmcb_trace(realobj, &mark_record_trace); - - if (list_is_empty(marked_objects_to_trace)) - break; - - obj = (object_t *)list_pop_item(marked_objects_to_trace); } } @@ -283,23 +280,53 @@ some of the pages) */ long i; + struct list_s *uniques = list_create(); for (i = 1; i < NB_SEGMENTS; i++) { char *base = get_segment_base(i); + OPT_ASSERT(list_is_empty(uniques)); + /* the list of modified_old_objs can be huge and contain a lot + of duplicated (same obj, different slice) entries. It seems + worth it to build a new list without duplicates. + The reason is that newly created objs, when moved out of the + nursery, don't have WB_EXECUTED flag. Thus we execute waaay + too many write barriers per transaction and add them all + to this list (and the commit log). XXXXX */ struct list_s *lst = get_priv_segment(i)->modified_old_objects; - long j, count = list_count(lst); - for (j = 0; j < count; j += 3) { - object_t *item = (object_t*)list_item(lst, j); - /* All modified objs have all pages accessible for now. - This is because we create a backup of the whole obj - and thus make all pages accessible. */ - assert_obj_accessible_in(i, item); - mark_visited_test_and_set(item); - mark_and_trace(item, stm_object_pages); /* shared, committed version */ - mark_and_trace(item, base); /* private, modified version */ + struct stm_undo_s *undo = (struct stm_undo_s *)lst->items; + struct stm_undo_s *end = (struct stm_undo_s *)(lst->items + lst->count); + for (; undo < end; undo++) { + object_t *obj = undo->object; + struct object_s *dst = (struct object_s*)REAL_ADDRESS(base, obj); + + if (!(dst->stm_flags & GCFLAG_VISITED)) { + LIST_APPEND(uniques, obj); + dst->stm_flags |= GCFLAG_VISITED; + } } + + + LIST_FOREACH_R(uniques, object_t*, + ({ + /* clear the VISITED flags again and actually visit them */ + struct object_s *dst = (struct object_s*)REAL_ADDRESS(base, item); + dst->stm_flags &= ~GCFLAG_VISITED; + + /* All modified objs have all pages accessible for now. + This is because we create a backup of the whole obj + and thus make all pages accessible. */ + assert_obj_accessible_in(i, item); + + if (!mark_visited_test_and_set(item)) + mark_and_trace(item, stm_object_pages); /* shared, committed version */ + mark_and_trace(item, base); /* private, modified version */ + })); + + + list_clear(uniques); } + LIST_FREE(uniques); } static void mark_visit_from_roots(void) From noreply at buildbot.pypy.org Tue Jan 20 18:28:30 2015 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 20 Jan 2015 18:28:30 +0100 (CET) Subject: [pypy-commit] pypy default: fix message (ebarrett) Message-ID: <20150120172830.8EB221C0313@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r75451:ae1e7c5107b0 Date: 2015-01-20 18:28 +0100 http://bitbucket.org/pypy/pypy/changeset/ae1e7c5107b0/ Log: fix message (ebarrett) diff --git a/rpython/annotator/classdef.py b/rpython/annotator/classdef.py --- a/rpython/annotator/classdef.py +++ b/rpython/annotator/classdef.py @@ -129,7 +129,8 @@ self.attr_allowed = False if not self.readonly: raise NoSuchAttrError( - "setting forbidden attribute %r on %r" % ( + "the attribute %r goes here to %r, " + "but it is forbidden here" % ( self.name, homedef)) def modified(self, classdef='?'): From noreply at buildbot.pypy.org Tue Jan 20 20:06:36 2015 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 20 Jan 2015 20:06:36 +0100 (CET) Subject: [pypy-commit] pypy default: fix this line Message-ID: <20150120190636.D83C71C0305@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r75452:c7e2a1195d13 Date: 2015-01-20 20:06 +0100 http://bitbucket.org/pypy/pypy/changeset/c7e2a1195d13/ Log: fix this line diff --git a/pypy/tool/release/package.py b/pypy/tool/release/package.py --- a/pypy/tool/release/package.py +++ b/pypy/tool/release/package.py @@ -346,7 +346,7 @@ help='use as pypy exe instead of pypy/goal/pypy-c') # Positional arguments, for backward compatability with buldbots parser.add_argument('extra_args', help='optional interface to positional arguments', nargs=argparse.REMAINDER, - metavar='[root-pypy-dir] [name-of-archive] [name-of-pypy-c] [destination-for-tarball] [pypy-c-path]', + metavar='[archive-name] [rename_pypy_c] [targetdir] [override_pypy_c]', ) options = parser.parse_args(args) From noreply at buildbot.pypy.org Tue Jan 20 20:25:48 2015 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 20 Jan 2015 20:25:48 +0100 (CET) Subject: [pypy-commit] pypy default: uh? Message-ID: <20150120192548.A94051C0134@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r75453:1ba9cde15d19 Date: 2015-01-20 20:25 +0100 http://bitbucket.org/pypy/pypy/changeset/1ba9cde15d19/ Log: uh? diff --git a/pypy/tool/release/package.py b/pypy/tool/release/package.py --- a/pypy/tool/release/package.py +++ b/pypy/tool/release/package.py @@ -113,7 +113,7 @@ if not sys.platform == 'win32': modules += ['_curses', 'syslog', 'gdbm', '_sqlite3'] if not options.no_tk: - modules.append(('_tkinter')) + modules.append('_tkinter') for module in modules: try: subprocess.check_call([str(pypy_c), '-c', 'import ' + module]) From noreply at buildbot.pypy.org Tue Jan 20 20:36:08 2015 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 20 Jan 2015 20:36:08 +0100 (CET) Subject: [pypy-commit] pypy default: it's enough to fix permissions in pypydir Message-ID: <20150120193608.65DB01C0134@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r75454:3b510f04fd2e Date: 2015-01-20 20:35 +0100 http://bitbucket.org/pypy/pypy/changeset/3b510f04fd2e/ Log: it's enough to fix permissions in pypydir diff --git a/pypy/tool/release/package.py b/pypy/tool/release/package.py --- a/pypy/tool/release/package.py +++ b/pypy/tool/release/package.py @@ -256,7 +256,7 @@ for source, target in binaries: archive = bindir.join(target) shutil.copy(str(source), str(archive)) - fix_permissions(builddir) + fix_permissions(pypydir) old_dir = os.getcwd() try: From noreply at buildbot.pypy.org Tue Jan 20 21:16:46 2015 From: noreply at buildbot.pypy.org (larstiq) Date: Tue, 20 Jan 2015 21:16:46 +0100 (CET) Subject: [pypy-commit] pypy osx-package.py: Package libpypy-c.dylib on OSX. Message-ID: <20150120201646.5A1181C0526@cobra.cs.uni-duesseldorf.de> Author: Wouter van Heyst Branch: osx-package.py Changeset: r75455:f92c5ff97b20 Date: 2015-01-20 21:03 +0200 http://bitbucket.org/pypy/pypy/changeset/f92c5ff97b20/ Log: Package libpypy-c.dylib on OSX. diff --git a/pypy/tool/release/package.py b/pypy/tool/release/package.py --- a/pypy/tool/release/package.py +++ b/pypy/tool/release/package.py @@ -160,9 +160,10 @@ if sys.platform == 'win32' and not rename_pypy_c.lower().endswith('.exe'): rename_pypy_c += '.exe' binaries = [(pypy_c, rename_pypy_c)] - libpypy_c = pypy_c.new(basename='libpypy-c.so') + libpypy_name = 'libpypy-c.so' if not sys.platform.startswith('darwin') else 'libpypy-c.dylib' + libpypy_c = pypy_c.new(basename=libpypy_name) if libpypy_c.check(): - binaries.append((libpypy_c, 'libpypy-c.so')) + binaries.append((libpypy_c, libpypy_name)) # builddir = options.builddir pypydir = builddir.ensure(name, dir=True) From noreply at buildbot.pypy.org Tue Jan 20 21:16:47 2015 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 20 Jan 2015 21:16:47 +0100 (CET) Subject: [pypy-commit] pypy default: Merged in larstiq/pypy/osx-package.py (pull request #297) Message-ID: <20150120201647.A72EE1C0526@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r75456:95869dc0e8dd Date: 2015-01-20 21:16 +0100 http://bitbucket.org/pypy/pypy/changeset/95869dc0e8dd/ Log: Merged in larstiq/pypy/osx-package.py (pull request #297) Package libpypy-c.dylib on OSX. diff --git a/pypy/tool/release/package.py b/pypy/tool/release/package.py --- a/pypy/tool/release/package.py +++ b/pypy/tool/release/package.py @@ -160,9 +160,10 @@ if sys.platform == 'win32' and not rename_pypy_c.lower().endswith('.exe'): rename_pypy_c += '.exe' binaries = [(pypy_c, rename_pypy_c)] - libpypy_c = pypy_c.new(basename='libpypy-c.so') + libpypy_name = 'libpypy-c.so' if not sys.platform.startswith('darwin') else 'libpypy-c.dylib' + libpypy_c = pypy_c.new(basename=libpypy_name) if libpypy_c.check(): - binaries.append((libpypy_c, 'libpypy-c.so')) + binaries.append((libpypy_c, libpypy_name)) # builddir = options.builddir pypydir = builddir.ensure(name, dir=True) From noreply at buildbot.pypy.org Tue Jan 20 21:17:52 2015 From: noreply at buildbot.pypy.org (larstiq) Date: Tue, 20 Jan 2015 21:17:52 +0100 (CET) Subject: [pypy-commit] pypy package.py-helpful-error-message: Simplify logic for the case pypy-c can not be found. Explicitly mention which file we checked existence of. Message-ID: <20150120201752.EBC3A1C0526@cobra.cs.uni-duesseldorf.de> Author: Wouter van Heyst Branch: package.py-helpful-error-message Changeset: r75457:67e9fc4a82d6 Date: 2015-01-20 21:33 +0200 http://bitbucket.org/pypy/pypy/changeset/67e9fc4a82d6/ Log: Simplify logic for the case pypy-c can not be found. Explicitly mention which file we checked existence of. diff --git a/pypy/tool/release/package.py b/pypy/tool/release/package.py --- a/pypy/tool/release/package.py +++ b/pypy/tool/release/package.py @@ -140,16 +140,11 @@ else: pypy_c = py.path.local(override_pypy_c) if not pypy_c.check(): - print pypy_c - if os.path.isdir(os.path.dirname(str(pypy_c))): - raise PyPyCNotFound( - 'Please compile pypy first, using translate.py,' - ' or check that you gave the correct path' - ' (see docstring for more info)') - else: - raise PyPyCNotFound( - 'Bogus path: %r does not exist (see docstring for more info)' - % (os.path.dirname(str(pypy_c)),)) + raise PyPyCNotFound( + 'Expected but did not find %s.' + ' Please compile pypy first, using translate.py,' + ' or check that you gave the correct path' + ' with --override_pypy_c' % pypy_c) if not options.no_cffi: try: create_cffi_import_libraries(pypy_c, options) From noreply at buildbot.pypy.org Tue Jan 20 21:17:54 2015 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 20 Jan 2015 21:17:54 +0100 (CET) Subject: [pypy-commit] pypy default: Merged in larstiq/pypy/package.py-helpful-error-message (pull request #298) Message-ID: <20150120201754.30CF01C0526@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r75458:e26a063cf5cc Date: 2015-01-20 21:17 +0100 http://bitbucket.org/pypy/pypy/changeset/e26a063cf5cc/ Log: Merged in larstiq/pypy/package.py-helpful-error-message (pull request #298) Simplify logic for the case pypy-c can not be found. Explicitly mention which file we checked existence of. diff --git a/pypy/tool/release/package.py b/pypy/tool/release/package.py --- a/pypy/tool/release/package.py +++ b/pypy/tool/release/package.py @@ -140,16 +140,11 @@ else: pypy_c = py.path.local(override_pypy_c) if not pypy_c.check(): - print pypy_c - if os.path.isdir(os.path.dirname(str(pypy_c))): - raise PyPyCNotFound( - 'Please compile pypy first, using translate.py,' - ' or check that you gave the correct path' - ' (see docstring for more info)') - else: - raise PyPyCNotFound( - 'Bogus path: %r does not exist (see docstring for more info)' - % (os.path.dirname(str(pypy_c)),)) + raise PyPyCNotFound( + 'Expected but did not find %s.' + ' Please compile pypy first, using translate.py,' + ' or check that you gave the correct path' + ' with --override_pypy_c' % pypy_c) if not options.no_cffi: try: create_cffi_import_libraries(pypy_c, options) From noreply at buildbot.pypy.org Tue Jan 20 22:47:41 2015 From: noreply at buildbot.pypy.org (mattip) Date: Tue, 20 Jan 2015 22:47:41 +0100 (CET) Subject: [pypy-commit] cffi default: enable testing on windows, 64 bit Message-ID: <20150120214741.9A9F41C010B@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: Changeset: r1641:a48d6875b570 Date: 2015-01-20 23:48 +0200 http://bitbucket.org/cffi/cffi/changeset/a48d6875b570/ Log: enable testing on windows, 64 bit diff --git a/testing/test_ownlib.py b/testing/test_ownlib.py --- a/testing/test_ownlib.py +++ b/testing/test_ownlib.py @@ -118,8 +118,12 @@ productdir = os.path.join(toolsdir, os.pardir, os.pardir, "VC") productdir = os.path.abspath(productdir) vcvarsall = os.path.join(productdir, "vcvarsall.bat") + # 64? + arch = 'x86' + if sys.maxsize > 2**32: + arch = 'amd64' if os.path.isfile(vcvarsall): - cmd = '"%s"' % vcvarsall + ' & cl.exe testownlib.c ' \ + cmd = '"%s" %s' % (vcvarsall, arch) + ' & cl.exe testownlib.c ' \ ' /LD /Fetestownlib.dll' subprocess.check_call(cmd, cwd = str(udir), shell=True) cls.module = str(udir.join('testownlib.dll')) From noreply at buildbot.pypy.org Tue Jan 20 23:41:47 2015 From: noreply at buildbot.pypy.org (amauryfa) Date: Tue, 20 Jan 2015 23:41:47 +0100 (CET) Subject: [pypy-commit] pypy stdlib-2.7.9: uudecode: when checking for trailing characters, don't bother checking the incomplete pending bits. Message-ID: <20150120224147.63F1E1C0103@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: stdlib-2.7.9 Changeset: r75459:aeaed06de33f Date: 2015-01-20 23:41 +0100 http://bitbucket.org/pypy/pypy/changeset/aeaed06de33f/ Log: uudecode: when checking for trailing characters, don't bother checking the incomplete pending bits. For compatibility with CPython2.7.9. diff --git a/pypy/module/binascii/interp_uu.py b/pypy/module/binascii/interp_uu.py --- a/pypy/module/binascii/interp_uu.py +++ b/pypy/module/binascii/interp_uu.py @@ -19,15 +19,6 @@ return (ord(c) - 0x20) & 0x3f _a2b_read._always_inline_ = True -def _a2b_write(space, res, length, char): - if res.getlength() < length: # common case: we have enough room. - res.append(chr(char)) - else: - # overflows. Only accept zeros from now on. - if char != 0: - raise_Error(space, "Trailing garbage") -_a2b_write._always_inline_ = True - @unwrap_spec(ascii='bufferstr') def a2b_uu(space, ascii): @@ -45,9 +36,20 @@ C = _a2b_read(space, ascii, i+2) D = _a2b_read(space, ascii, i+3) # - _a2b_write(space, res, length, A << 2 | B >> 4) - _a2b_write(space, res, length, (B & 0xf) << 4 | C >> 2) - _a2b_write(space, res, length, (C & 0x3) << 6 | D) + if res.getlength() < length: + res.append(chr(A << 2 | B >> 4)) + elif A != 0 or B != 0: + raise_Error(space, "Trailing garbage") + # + if res.getlength() < length: + res.append(chr((B & 0xf) << 4 | C >> 2)) + elif C != 0: + raise_Error(space, "Trailing garbage") + # + if res.getlength() < length: + res.append(chr((C & 0x3) << 6 | D)) + elif D != 0: + raise_Error(space, "Trailing garbage") remaining = length - res.getlength() if remaining > 0: diff --git a/pypy/module/binascii/test/test_binascii.py b/pypy/module/binascii/test/test_binascii.py --- a/pypy/module/binascii/test/test_binascii.py +++ b/pypy/module/binascii/test/test_binascii.py @@ -29,6 +29,7 @@ ('(WAXR6UBA3#', "\xde\x1e2[X\xa1L0"), (')WAXR6UBA3#Q', "\xde\x1e2[X\xa1L<@"), ('*WAXR6UBA3#Q!5', "\xde\x1e2[X\xa1L Author: Maciej Fijalkowski Branch: vmprof Changeset: r75460:576aa4c1f37d Date: 2015-01-21 09:46 +0200 http://bitbucket.org/pypy/pypy/changeset/576aa4c1f37d/ Log: fight a bit with virtual ips conflicting with syscalls diff --git a/pypy/module/_vmprof/__init__.py b/pypy/module/_vmprof/__init__.py --- a/pypy/module/_vmprof/__init__.py +++ b/pypy/module/_vmprof/__init__.py @@ -4,7 +4,6 @@ """ Write me :) """ - appleveldefs = { } diff --git a/pypy/module/_vmprof/interp_vmprof.py b/pypy/module/_vmprof/interp_vmprof.py --- a/pypy/module/_vmprof/interp_vmprof.py +++ b/pypy/module/_vmprof/interp_vmprof.py @@ -3,11 +3,11 @@ from rpython.translator.tool.cbuild import ExternalCompilationInfo from rpython.rtyper.annlowlevel import cast_instance_to_gcref, cast_base_ptr_to_instance from rpython.rlib.objectmodel import we_are_translated, CDefinedIntSymbolic -from rpython.rlib import jit, rgc, rposix +from rpython.rlib import jit, rposix from rpython.tool.pairtype import extendabletype from pypy.interpreter.baseobjspace import W_Root -from pypy.interpreter.error import OperationError, oefmt, wrap_oserror -from pypy.interpreter.gateway import interp2app, unwrap_spec +from pypy.interpreter.error import oefmt, wrap_oserror +from pypy.interpreter.gateway import unwrap_spec from pypy.interpreter.pyframe import PyFrame from pypy.interpreter.pycode import PyCode @@ -177,13 +177,13 @@ class VMProf(object): def __init__(self): - self.virtual_ip = 0 + self.virtual_ip = 0x7000000000000000 self.is_enabled = False self.ever_enabled = False self.strbuf = lltype.malloc(rffi.CCHARP.TO, 1024, flavor='raw', immortal=True, zero=True) def get_next_virtual_IP(self): - self.virtual_ip -= 1 + self.virtual_ip += 1 return self.virtual_ip @jit.dont_look_inside diff --git a/rpython/tool/jitlogparser/parser.py b/rpython/tool/jitlogparser/parser.py --- a/rpython/tool/jitlogparser/parser.py +++ b/rpython/tool/jitlogparser/parser.py @@ -375,24 +375,37 @@ i += 1 return res +def parse_addresses(part, callback=None): + hex_re = '0x(-?[\da-f]+)' + addrs = {} + if callback is None: + def callback(addr, stop_addr, bootstrap_addr, name, code_name): + addrs.setdefault(bootstrap_addr, []).append(name) + for entry in part: + m = re.search('has address %(hex)s to %(hex)s \(bootstrap %(hex)s' % + {'hex': hex_re}, entry) + if not m: + # a bridge + m = re.search('has address ' + hex_re + ' to ' + hex_re, entry) + addr = int(m.group(1), 16) + bootstrap_addr = addr + stop_addr = int(m.group(2), 16) + entry = entry.lower() + m = re.search('guard ' + hex_re, entry) + name = 'guard ' + m.group(1) + code_name = 'bridge' + else: + name = entry[:entry.find('(') - 1].lower() + addr = int(m.group(1), 16) + stop_addr = int(m.group(2), 16) + bootstrap_addr = int(m.group(3), 16) + code_name = entry[entry.find('(') + 1:m.span(0)[0] - 2] + callback(addr, stop_addr, bootstrap_addr, name, code_name) + return addrs def import_log(logname, ParserCls=SimpleParser): log = parse_log_file(logname) - hex_re = '0x(-?[\da-f]+)' - addrs = {} - for entry in extract_category(log, 'jit-backend-addr'): - m = re.search('bootstrap ' + hex_re, entry) - if not m: - # a bridge - m = re.search('has address ' + hex_re, entry) - addr = int(m.group(1), 16) - entry = entry.lower() - m = re.search('guard ' + hex_re, entry) - name = 'guard ' + m.group(1) - else: - name = entry[:entry.find('(') - 1].lower() - addr = int(m.group(1), 16) - addrs.setdefault(addr, []).append(name) + addrs = parse_addresses(extract_category(log, 'jit-backend-addr')) from rpython.jit.backend.tool.viewcode import World world = World() for entry in extract_category(log, 'jit-backend-dump'): From noreply at buildbot.pypy.org Wed Jan 21 14:34:41 2015 From: noreply at buildbot.pypy.org (Raemi) Date: Wed, 21 Jan 2015 14:34:41 +0100 (CET) Subject: [pypy-commit] stmgc c8-private-pages: Implement what overflow objs in c7 do using WB_EXECUTED. This undoes part of the previous commit which should not be necessary anymore. Message-ID: <20150121133441.736401C0DCB@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c8-private-pages Changeset: r1561:72a83d94df8e Date: 2015-01-21 13:03 +0100 http://bitbucket.org/pypy/stmgc/changeset/72a83d94df8e/ Log: Implement what overflow objs in c7 do using WB_EXECUTED. This undoes part of the previous commit which should not be necessary anymore. diff --git a/c8/stm/core.c b/c8/stm/core.c --- a/c8/stm/core.c +++ b/c8/stm/core.c @@ -705,6 +705,7 @@ } assert(list_is_empty(STM_PSEGMENT->modified_old_objects)); + assert(list_is_empty(STM_PSEGMENT->new_objects)); assert(list_is_empty(STM_PSEGMENT->objects_pointing_to_nursery)); assert(tree_is_cleared(STM_PSEGMENT->young_outside_nursery)); assert(tree_is_cleared(STM_PSEGMENT->nursery_objects_shadows)); @@ -756,6 +757,7 @@ STM_PSEGMENT->safe_point = SP_NO_TRANSACTION; STM_PSEGMENT->transaction_state = TS_NONE; list_clear(STM_PSEGMENT->objects_pointing_to_nursery); + list_clear(STM_PSEGMENT->new_objects); release_thread_segment(tl); /* cannot access STM_SEGMENT or STM_PSEGMENT from here ! */ @@ -775,6 +777,20 @@ #endif } +static void push_new_objects_to_other_segments(void) +{ + acquire_privatization_lock(STM_SEGMENT->segment_num); + LIST_FOREACH_R(STM_PSEGMENT->new_objects, object_t *, + ({ + assert(item->stm_flags & GCFLAG_WB_EXECUTED); + item->stm_flags &= ~GCFLAG_WB_EXECUTED; + synchronize_object_enqueue(item); + })); + synchronize_objects_flush(); + release_privatization_lock(STM_SEGMENT->segment_num); +} + + void stm_commit_transaction(void) { assert(!_has_mutex()); @@ -784,6 +800,8 @@ dprintf(("> stm_commit_transaction()\n")); minor_collection(1); + push_new_objects_to_other_segments(); + _validate_and_add_to_commit_log(); invoke_and_clear_user_callbacks(0); /* for commit */ @@ -887,6 +905,7 @@ tl->last_abort__bytes_in_nursery = bytes_in_nursery; list_clear(pseg->objects_pointing_to_nursery); + list_clear(pseg->new_objects); #pragma pop_macro("STM_SEGMENT") #pragma pop_macro("STM_PSEGMENT") } @@ -999,6 +1018,8 @@ assert(!_is_young(obj)); assert(STM_PSEGMENT->privatization_lock); assert(obj->stm_flags & GCFLAG_WRITE_BARRIER); + assert(!(obj->stm_flags & GCFLAG_WB_EXECUTED)); + ssize_t obj_size = stmcb_size_rounded_up( (struct object_s *)REAL_ADDRESS(STM_SEGMENT->segment_base, obj)); OPT_ASSERT(obj_size >= 16); diff --git a/c8/stm/core.h b/c8/stm/core.h --- a/c8/stm/core.h +++ b/c8/stm/core.h @@ -74,11 +74,20 @@ struct tree_s *young_outside_nursery; struct tree_s *nursery_objects_shadows; + /* list of objects created in the current transaction and + that survived at least one minor collection. They need + to be synchronized to other segments on commit, but they + do not need to be in the commit log entry. */ + struct list_s *new_objects; + uint8_t privatization_lock; // XXX KILL uint8_t safe_point; uint8_t transaction_state; + /* Temp for minor collection */ + bool minor_collect_will_commit_now; + struct tree_s *callbacks_on_commit_and_abort[2]; struct stm_commit_log_entry_s *last_commit_log_entry; diff --git a/c8/stm/gcpage.c b/c8/stm/gcpage.c --- a/c8/stm/gcpage.c +++ b/c8/stm/gcpage.c @@ -192,6 +192,14 @@ /************************************************************/ + +static bool is_new_object(object_t *obj) +{ + struct object_s *realobj = (struct object_s*)REAL_ADDRESS(stm_object_pages, obj); /* seg0 */ + return realobj->stm_flags & GCFLAG_WB_EXECUTED; +} + + static inline void mark_record_trace(object_t **pobj) { /* takes a normal pointer to a thread-local pointer to an object */ @@ -226,11 +234,12 @@ stmcb_trace(realobj, &mark_record_trace); /* trace all references found in sharing seg0 (should always be - up-to-date and not cause segfaults) */ + up-to-date and not cause segfaults, except for new objs) */ while (!list_is_empty(marked_objects_to_trace)) { obj = (object_t *)list_pop_item(marked_objects_to_trace); - realobj = (struct object_s *)REAL_ADDRESS(stm_object_pages, obj); + char *base = is_new_object(obj) ? segment_base : stm_object_pages; + realobj = (struct object_s *)REAL_ADDRESS(base, obj); stmcb_trace(realobj, &mark_record_trace); } } @@ -243,14 +252,31 @@ mark_and_trace(obj, segment_base); } + +static void mark_visit_possibly_new_object(char *segment_base, object_t *obj) +{ + /* if newly allocated object, we trace in segment_base, otherwise in + the sharing seg0 */ + if (obj == NULL) + return; + + if (is_new_object(obj)) { + mark_visit_object(obj, segment_base); + } else { + mark_visit_object(obj, stm_object_pages); + } +} + static void *mark_visit_objects_from_ss(void *_, const void *slice, size_t size) { const struct stm_shadowentry_s *p, *end; p = (const struct stm_shadowentry_s *)slice; end = (const struct stm_shadowentry_s *)(slice + size); for (; p < end; p++) - if ((((uintptr_t)p->ss) & 3) == 0) + if ((((uintptr_t)p->ss) & 3) == 0) { + assert(!is_new_object(p->ss)); mark_visit_object(p->ss, stm_object_pages); // seg0 + } return NULL; } @@ -273,6 +299,7 @@ } + static void mark_visit_from_modified_objects(void) { /* look for modified objects in segments and mark all of them @@ -280,53 +307,29 @@ some of the pages) */ long i; - struct list_s *uniques = list_create(); for (i = 1; i < NB_SEGMENTS; i++) { char *base = get_segment_base(i); - OPT_ASSERT(list_is_empty(uniques)); - /* the list of modified_old_objs can be huge and contain a lot - of duplicated (same obj, different slice) entries. It seems - worth it to build a new list without duplicates. - The reason is that newly created objs, when moved out of the - nursery, don't have WB_EXECUTED flag. Thus we execute waaay - too many write barriers per transaction and add them all - to this list (and the commit log). XXXXX */ struct list_s *lst = get_priv_segment(i)->modified_old_objects; + struct stm_undo_s *modified = (struct stm_undo_s *)lst->items; + struct stm_undo_s *end = (struct stm_undo_s *)(lst->items + lst->count); - struct stm_undo_s *undo = (struct stm_undo_s *)lst->items; - struct stm_undo_s *end = (struct stm_undo_s *)(lst->items + lst->count); - for (; undo < end; undo++) { - object_t *obj = undo->object; - struct object_s *dst = (struct object_s*)REAL_ADDRESS(base, obj); + for (; modified < end; modified++) { + object_t *obj = modified->object; + /* All modified objs have all pages accessible for now. + This is because we create a backup of the whole obj + and thus make all pages accessible. */ + assert_obj_accessible_in(i, obj); - if (!(dst->stm_flags & GCFLAG_VISITED)) { - LIST_APPEND(uniques, obj); - dst->stm_flags |= GCFLAG_VISITED; + assert(!is_new_object(obj)); /* should never be in that list */ + + if (!mark_visited_test_and_set(obj)) { + /* trace shared, committed version */ + mark_and_trace(obj, stm_object_pages); } + mark_and_trace(obj, base); /* private, modified version */ } - - - LIST_FOREACH_R(uniques, object_t*, - ({ - /* clear the VISITED flags again and actually visit them */ - struct object_s *dst = (struct object_s*)REAL_ADDRESS(base, item); - dst->stm_flags &= ~GCFLAG_VISITED; - - /* All modified objs have all pages accessible for now. - This is because we create a backup of the whole obj - and thus make all pages accessible. */ - assert_obj_accessible_in(i, item); - - if (!mark_visited_test_and_set(item)) - mark_and_trace(item, stm_object_pages); /* shared, committed version */ - mark_and_trace(item, base); /* private, modified version */ - })); - - - list_clear(uniques); } - LIST_FREE(uniques); } static void mark_visit_from_roots(void) @@ -341,19 +344,29 @@ /* look at all objs on the shadow stack (they are old but may be uncommitted so far, so only exist in the associated_segment_num). - However, since we just executed a minor collection, they were + IF they are uncommitted new objs, trace in the actual segment, + otherwise, since we just executed a minor collection, they were all synced to the sharing seg0. Thus we can trace them there. If they were again modified since then, they were traced by mark_visit_from_modified_object() already. */ + + /* only for new, uncommitted objects: + If 'tl' is currently running, its 'associated_segment_num' + field is the segment number that contains the correct + version of its overflowed objects. */ + char *segment_base = get_segment_base(tl->associated_segment_num); + struct stm_shadowentry_s *current = tl->shadowstack; struct stm_shadowentry_s *base = tl->shadowstack_base; while (current-- != base) { - if ((((uintptr_t)current->ss) & 3) == 0) - mark_visit_object(current->ss, stm_object_pages); + if ((((uintptr_t)current->ss) & 3) == 0) { + mark_visit_possibly_new_object(segment_base, current->ss); + } } - mark_visit_object(tl->thread_local_obj, stm_object_pages); + + mark_visit_possibly_new_object(segment_base, tl->thread_local_obj); tl = tl->next; } while (tl != stm_all_thread_locals); @@ -362,9 +375,10 @@ long i; for (i = 1; i < NB_SEGMENTS; i++) { if (get_priv_segment(i)->transaction_state != TS_NONE) { - mark_visit_object( - get_priv_segment(i)->threadlocal_at_start_of_transaction, - stm_object_pages); + mark_visit_possibly_new_object( + get_segment_base(i), + get_priv_segment(i)->threadlocal_at_start_of_transaction); + stm_rewind_jmp_enum_shadowstack( get_segment(i)->running_thread, mark_visit_objects_from_ss); @@ -372,6 +386,46 @@ } } +static void ready_new_objects(void) +{ +#pragma push_macro("STM_PSEGMENT") +#pragma push_macro("STM_SEGMENT") +#undef STM_PSEGMENT +#undef STM_SEGMENT + /* objs in new_objects only have garbage in the sharing seg0, + since it is used to mark objs as visited, we must make + sure the flag is cleared at the start of a major collection. + (XXX: ^^^ may be optional if we have the part below) + + Also, we need to be able to recognize these objects in order + to only trace them in the segment they are valid in. So we + also make sure to set WB_EXECUTED in the sharing seg0. No + other objs than new_objects have WB_EXECUTED in seg0 (since + there can only be committed versions there). + */ + + long i; + for (i = 1; i < NB_SEGMENTS; i++) { + struct stm_priv_segment_info_s *pseg = get_priv_segment(i); + struct list_s *lst = pseg->new_objects; + + LIST_FOREACH_R(lst, object_t* /*item*/, + ({ + struct object_s *realobj; + /* WB_EXECUTED always set in this segment */ + assert(realobj = (struct object_s*)REAL_ADDRESS(pseg->pub.segment_base, item)); + assert(realobj->stm_flags & GCFLAG_WB_EXECUTED); + + /* clear VISITED and ensure WB_EXECUTED in seg0 */ + mark_visited_test_and_clear(item); + realobj = (struct object_s*)REAL_ADDRESS(stm_object_pages, item); + realobj->stm_flags |= GCFLAG_WB_EXECUTED; + })); + } +#pragma pop_macro("STM_SEGMENT") +#pragma pop_macro("STM_PSEGMENT") +} + static void clean_up_segment_lists(void) { @@ -411,6 +465,16 @@ we "didn't do a collection" at all. So nothing to do on modified_old_objs. */ } + + /* remove from new_objects all objects that die */ + lst = pseg->new_objects; + uintptr_t n = list_count(lst); + while (n-- > 0) { + object_t *obj = (object_t *)list_item(lst, n); + if (!mark_visited_test(obj)) { + list_set_item(lst, n, list_pop_item(lst)); + } + } } #pragma pop_macro("STM_SEGMENT") #pragma pop_macro("STM_PSEGMENT") @@ -486,6 +550,8 @@ DEBUG_EXPECT_SEGFAULT(false); + ready_new_objects(); + /* marking */ LIST_CREATE(marked_objects_to_trace); mark_visit_from_modified_objects(); diff --git a/c8/stm/nursery.c b/c8/stm/nursery.c --- a/c8/stm/nursery.c +++ b/c8/stm/nursery.c @@ -125,8 +125,15 @@ /* a young object outside the nursery */ nobj = obj; tree_delete_item(STM_PSEGMENT->young_outside_nursery, (uintptr_t)nobj); + nobj_sync_now = ((uintptr_t)nobj) | FLAG_SYNC_LARGE; + } - nobj_sync_now = ((uintptr_t)nobj) | FLAG_SYNC_LARGE; + /* if this is not during commit, we will add them to the new_objects + list and push them to other segments on commit. Thus we can add + the WB_EXECUTED flag so that they don't end up in modified_old_objects */ + assert(!(nobj->stm_flags & GCFLAG_WB_EXECUTED)); + if (!STM_PSEGMENT->minor_collect_will_commit_now) { + nobj->stm_flags |= GCFLAG_WB_EXECUTED; } /* Must trace the object later */ @@ -192,20 +199,31 @@ _collect_now(obj); if (obj_sync_now & FLAG_SYNC_LARGE) { - /* this is a newly allocated object. We must synchronize it - to other segments (after we added WRITE_BARRIER). */ - acquire_privatization_lock(STM_SEGMENT->segment_num); - synchronize_object_enqueue(obj); - release_privatization_lock(STM_SEGMENT->segment_num); + /* this is a newly allocated obj in this transaction. We must + either synchronize the object to other segments now, or + add the object to new_objects list */ + if (STM_PSEGMENT->minor_collect_will_commit_now) { + acquire_privatization_lock(STM_SEGMENT->segment_num); + synchronize_object_enqueue(obj); + release_privatization_lock(STM_SEGMENT->segment_num); + } else { + LIST_APPEND(STM_PSEGMENT->new_objects, obj); + } } /* the list could have moved while appending */ lst = STM_PSEGMENT->objects_pointing_to_nursery; } - acquire_privatization_lock(STM_SEGMENT->segment_num); - synchronize_objects_flush(); - release_privatization_lock(STM_SEGMENT->segment_num); + /* flush all new objects to other segments now */ + if (STM_PSEGMENT->minor_collect_will_commit_now) { + acquire_privatization_lock(STM_SEGMENT->segment_num); + synchronize_objects_flush(); + release_privatization_lock(STM_SEGMENT->segment_num); + } else { + /* nothing in the queue when not committing */ + assert(STM_PSEGMENT->sq_len == 0); + } } @@ -273,6 +291,8 @@ { dprintf(("minor_collection commit=%d\n", (int)commit)); + STM_PSEGMENT->minor_collect_will_commit_now = commit; + collect_roots_in_nursery(); collect_oldrefs_to_nursery(); diff --git a/c8/stm/setup.c b/c8/stm/setup.c --- a/c8/stm/setup.c +++ b/c8/stm/setup.c @@ -95,6 +95,7 @@ pr->pub.segment_num = i; pr->pub.segment_base = segment_base; pr->modified_old_objects = list_create(); + pr->new_objects = list_create(); pr->objects_pointing_to_nursery = list_create(); pr->young_outside_nursery = tree_create(); pr->nursery_objects_shadows = tree_create(); @@ -134,6 +135,8 @@ assert(list_is_empty(pr->objects_pointing_to_nursery)); list_free(pr->objects_pointing_to_nursery); list_free(pr->modified_old_objects); + assert(list_is_empty(pr->new_objects)); + list_free(pr->new_objects); tree_free(pr->young_outside_nursery); tree_free(pr->nursery_objects_shadows); tree_free(pr->callbacks_on_commit_and_abort[0]); From noreply at buildbot.pypy.org Wed Jan 21 14:34:42 2015 From: noreply at buildbot.pypy.org (Raemi) Date: Wed, 21 Jan 2015 14:34:42 +0100 (CET) Subject: [pypy-commit] stmgc c8-private-pages: fix for validation failing in the thread doing the major collection (it didn't abort) Message-ID: <20150121133442.8A2171C0DCB@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c8-private-pages Changeset: r1562:63fb7d3bb1db Date: 2015-01-21 14:01 +0100 http://bitbucket.org/pypy/stmgc/changeset/63fb7d3bb1db/ Log: fix for validation failing in the thread doing the major collection (it didn't abort) diff --git a/c8/stm/gcpage.c b/c8/stm/gcpage.c --- a/c8/stm/gcpage.c +++ b/c8/stm/gcpage.c @@ -574,4 +574,11 @@ DEBUG_EXPECT_SEGFAULT(true); release_all_privatization_locks(); + + /* if major_do_validation_and_minor_collections() decided that we + must abort, do it now. The others are in safe-points that will + abort if they need to. */ + dprintf(("must abort?:%d\n", (int)must_abort())); + if (must_abort()) + abort_with_mutex(); } diff --git a/c8/stm/rewind_setjmp.h b/c8/stm/rewind_setjmp.h --- a/c8/stm/rewind_setjmp.h +++ b/c8/stm/rewind_setjmp.h @@ -87,6 +87,7 @@ } while (0) /* go up one frame. if there was a setjmp call in this frame, + copy the frame above the current one and add it to the list */ #define rewind_jmp_leaveframe(rjthread, rjbuf, ss) do { \ assert((rjbuf)->shadowstack_base == (char *)(ss)); \ From noreply at buildbot.pypy.org Wed Jan 21 14:34:43 2015 From: noreply at buildbot.pypy.org (Raemi) Date: Wed, 21 Jan 2015 14:34:43 +0100 (CET) Subject: [pypy-commit] stmgc c8-private-pages: test for last commit Message-ID: <20150121133443.8A40E1C0DCB@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c8-private-pages Changeset: r1563:670a3e29f448 Date: 2015-01-21 14:31 +0100 http://bitbucket.org/pypy/stmgc/changeset/670a3e29f448/ Log: test for last commit diff --git a/c8/stm/nursery.c b/c8/stm/nursery.c --- a/c8/stm/nursery.c +++ b/c8/stm/nursery.c @@ -315,11 +315,13 @@ void stm_collect(long level) { - if (level > 0) + if (level > 0) { force_major_collection_request(); - - minor_collection(/*commit=*/ false); - major_collection_if_requested(); + minor_collection(/*commit=*/ false); + major_collection_if_requested(); + } else { + minor_collection(/*commit=*/ false); + } } @@ -357,7 +359,11 @@ /* use stm_collect() with level 0: if another thread does a major GC in-between, is_major_collection_requested() will become false again, and we'll avoid doing yet another one afterwards. */ +#ifndef STM_TESTS + /* during tests, we must not do a major collection during allocation. + The reason is that it may abort us and tests don't expect it. */ stm_collect(0); +#endif } object_t *o = (object_t *)allocate_outside_nursery_large(size_rounded_up); diff --git a/c8/test/support.py b/c8/test/support.py --- a/c8/test/support.py +++ b/c8/test/support.py @@ -80,7 +80,8 @@ void _set_ptr(object_t *obj, int n, object_t *v); object_t * _get_ptr(object_t *obj, int n); -void stm_collect(long level); +/* void stm_collect(long level); */ +long _check_stm_collect(long level); uint64_t _stm_total_allocated(void); void _stm_set_nursery_free_count(uint64_t free_count); @@ -175,6 +176,10 @@ CHECKED(stm_commit_transaction()); } +bool _check_stm_collect(long level) { + CHECKED(stm_collect(level)); +} + long _check_start_transaction(stm_thread_local_t *tl) { void **jmpbuf = tl->rjthread.jmpbuf; \ if (__builtin_setjmp(jmpbuf) == 0) { /* returned directly */\ @@ -426,10 +431,13 @@ raise Conflict() def stm_minor_collect(): - lib.stm_collect(0) + assert not lib._check_stm_collect(0) # no conflict def stm_major_collect(): - lib.stm_collect(1) + res = lib._check_stm_collect(1) + if res == 1: + raise Conflict() + return res def stm_is_accessible_page(pagenum): return lib._stm_is_accessible_page(pagenum) diff --git a/c8/test/test_gcpage.py b/c8/test/test_gcpage.py --- a/c8/test/test_gcpage.py +++ b/c8/test/test_gcpage.py @@ -393,3 +393,19 @@ assert lib._stm_total_allocated() == 0 self.commit_transaction() + + def test_abort_thread_doing_major_gc(self): + + o = stm_allocate_old(16) + self.start_transaction() + stm_set_char(o, 'a') + + self.switch(1) + self.start_transaction() + stm_set_char(o, 'b') + + self.switch(0) + self.commit_transaction() + + self.switch(1, False) + py.test.raises(Conflict, stm_major_collect) diff --git a/c8/test/test_random.py b/c8/test/test_random.py --- a/c8/test/test_random.py +++ b/c8/test/test_random.py @@ -402,9 +402,19 @@ def op_major_collect(ex, global_state, thread_state): thread_state.push_roots(ex) - ex.do('stm_major_collect()') - thread_state.pop_roots(ex) - thread_state.reload_roots(ex) + + # check if we have to abort after the major gc + trs = thread_state.transaction_state + conflicts = trs.check_must_abort() + if conflicts: + ex.do("# objs_in_conflict=%s" % trs.objs_in_conflict) + ex.do(raising_call(conflicts, 'stm_major_collect')) + + if conflicts: + thread_state.abort_transaction() + else: + thread_state.pop_roots(ex) + thread_state.reload_roots(ex) def op_forget_root(ex, global_state, thread_state): @@ -497,7 +507,7 @@ def op_switch_thread(ex, global_state, thread_state, new_thread_state=None): if new_thread_state is None: new_thread_state = global_state.rnd.choice( - global_state.thread_states + [thread_state] * 3) # more likely not switch + global_state.thread_states + [thread_state] * 10) # more likely not switch if new_thread_state != thread_state: if thread_state.transaction_state: @@ -564,12 +574,12 @@ possible_actions = [ [op_read,]*100, [op_write,]*70, - [op_allocate,]*25, - [op_allocate_ref]*30, - [op_commit_transaction,]*10, + [op_allocate,]*10, + [op_allocate_ref]*10, + [op_commit_transaction,]*6, [op_abort_transaction,], [op_forget_root]*10, - [op_become_inevitable]*2, + [op_become_inevitable], [op_assert_size]*20, [op_assert_modified]*10, [op_minor_collect]*5, From noreply at buildbot.pypy.org Wed Jan 21 15:47:38 2015 From: noreply at buildbot.pypy.org (Raemi) Date: Wed, 21 Jan 2015 15:47:38 +0100 (CET) Subject: [pypy-commit] stmgc c8-private-pages: some changes to the duhton demos Message-ID: <20150121144738.918E51C00B3@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c8-private-pages Changeset: r1564:205085ecca8f Date: 2015-01-21 15:43 +0100 http://bitbucket.org/pypy/stmgc/changeset/205085ecca8f/ Log: some changes to the duhton demos diff --git a/duhton-c8/demo/many_square_roots.duh b/duhton-c8/demo/many_square_roots.duh --- a/duhton-c8/demo/many_square_roots.duh +++ b/duhton-c8/demo/many_square_roots.duh @@ -8,10 +8,11 @@ (defun show-square-root (n) (setq s (square-root n)) - (print (quote square-root-of) n (quote is) s)) + (print (quote square-root-of) n (quote is) s) + ) (setq n 0) -(while (< n 200) +(while (< n 2000) (transaction show-square-root (+ 1000000000 (* n 100000))) (setq n (+ n 1))) diff --git a/duhton-c8/demo/micro_transactions.duh b/duhton-c8/demo/micro_transactions.duh --- a/duhton-c8/demo/micro_transactions.duh +++ b/duhton-c8/demo/micro_transactions.duh @@ -10,7 +10,7 @@ (defun big_transactions () (setq n 0) - (while (< n 20000) + (while (< n 2000) (transaction increment) (setq n (+ n 1)) ) diff --git a/duhton-c8/demo/synth.duh b/duhton-c8/demo/synth.duh --- a/duhton-c8/demo/synth.duh +++ b/duhton-c8/demo/synth.duh @@ -19,7 +19,7 @@ (setq y (get lst 1)) (setq z (get lst 2)) (setq w (get lst 3)) - + (setq t (^ x (<< x 11))) (setq x y) (setq y z) @@ -55,14 +55,14 @@ (set shared (+ (get shared) 1)) (set private (+ (get private) 1)) ) - + (setq i (+ i 1)) ) ) -(setq N 1000) +(setq N 5000) ;; CONFL_IF_BELOW / RAND_MAX == ratio of conflicting transactions ;; to non conflicting ones (setq RAND_MAX 8) @@ -95,4 +95,3 @@ (run-transactions) (print (quote run-time-diff:) (- (time) timer)) (print (quote shared) (get shared)) - diff --git a/duhton-c8/demo/trees2.duh b/duhton-c8/demo/trees2.duh --- a/duhton-c8/demo/trees2.duh +++ b/duhton-c8/demo/trees2.duh @@ -11,11 +11,11 @@ ) ) -(defun lookup-tree () - (walk-tree (create-tree 10)) -) + (setq n 0) -(while (< n 1000) - (transaction lookup-tree) +(setq tree (create-tree 20)) +(print (quote tree-created)) +(while (< n 100) + (transaction walk-tree tree) (setq n (+ n 1))) diff --git a/duhton/demo/many_square_roots.duh b/duhton/demo/many_square_roots.duh --- a/duhton/demo/many_square_roots.duh +++ b/duhton/demo/many_square_roots.duh @@ -8,10 +8,11 @@ (defun show-square-root (n) (setq s (square-root n)) - (print (quote square-root-of) n (quote is) s)) + (print (quote square-root-of) n (quote is) s) + ) (setq n 0) -(while (< n 200) +(while (< n 2000) (transaction show-square-root (+ 1000000000 (* n 100000))) (setq n (+ n 1))) diff --git a/duhton/demo/micro_transactions.duh b/duhton/demo/micro_transactions.duh --- a/duhton/demo/micro_transactions.duh +++ b/duhton/demo/micro_transactions.duh @@ -10,7 +10,7 @@ (defun big_transactions () (setq n 0) - (while (< n 20000) + (while (< n 2000) (transaction increment) (setq n (+ n 1)) ) diff --git a/duhton/demo/synth.duh b/duhton/demo/synth.duh --- a/duhton/demo/synth.duh +++ b/duhton/demo/synth.duh @@ -19,7 +19,7 @@ (setq y (get lst 1)) (setq z (get lst 2)) (setq w (get lst 3)) - + (setq t (^ x (<< x 11))) (setq x y) (setq y z) @@ -55,14 +55,14 @@ (set shared (+ (get shared) 1)) (set private (+ (get private) 1)) ) - + (setq i (+ i 1)) ) ) -(setq N 1000) +(setq N 5000) ;; CONFL_IF_BELOW / RAND_MAX == ratio of conflicting transactions ;; to non conflicting ones (setq RAND_MAX 8) @@ -95,4 +95,3 @@ (run-transactions) (print (quote run-time-diff:) (- (time) timer)) (print (quote shared) (get shared)) - diff --git a/duhton/demo/trees2.duh b/duhton/demo/trees2.duh --- a/duhton/demo/trees2.duh +++ b/duhton/demo/trees2.duh @@ -11,11 +11,11 @@ ) ) -(defun lookup-tree () - (walk-tree (create-tree 10)) -) + (setq n 0) -(while (< n 1000) - (transaction lookup-tree) +(setq tree (create-tree 20)) +(print (quote tree-created)) +(while (< n 100) + (transaction walk-tree tree) (setq n (+ n 1))) From noreply at buildbot.pypy.org Wed Jan 21 15:47:40 2015 From: noreply at buildbot.pypy.org (Raemi) Date: Wed, 21 Jan 2015 15:47:40 +0100 (CET) Subject: [pypy-commit] stmgc default: Merge with c8-private-pages Message-ID: <20150121144740.217271C00B3@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: Changeset: r1565:a043f17bba3c Date: 2015-01-21 15:48 +0100 http://bitbucket.org/pypy/stmgc/changeset/a043f17bba3c/ Log: Merge with c8-private-pages diff too long, truncating to 2000 out of 9435 lines diff --git a/c7/stm/gcpage.c b/c7/stm/gcpage.c --- a/c7/stm/gcpage.c +++ b/c7/stm/gcpage.c @@ -436,8 +436,8 @@ get_priv_segment(i)->modified_old_objects, object_t * /*item*/, ({ - mark_visited_test_and_set(item); - mark_trace(item, stm_object_pages); /* shared version */ + if (!mark_visited_test_and_set(item)) + mark_trace(item, stm_object_pages); /* shared version */ mark_trace(item, base); /* private version */ })); } diff --git a/c8/demo/demo_random2.c b/c8/demo/demo_random2.c --- a/c8/demo/demo_random2.c +++ b/c8/demo/demo_random2.c @@ -10,10 +10,10 @@ #include "stmgc.h" #define NUMTHREADS 3 -#define STEPS_PER_THREAD 500 -#define THREAD_STARTS 1000 // how many restarts of threads +#define STEPS_PER_THREAD 50000 +#define THREAD_STARTS 100 // how many restarts of threads #define PREBUILT_ROOTS 3 -#define FORKS 3 +#define FORKS 0 #define ACTIVE_ROOTS_SET_SIZE 100 // max num of roots created/alive in one transaction #define MAX_ROOTS_ON_SS 1000 // max on shadow stack @@ -232,11 +232,13 @@ break; case 3: // allocate fresh 'p' pushed = push_roots(); - size_t sizes[4] = {sizeof(struct node_s), - sizeof(struct node_s) + (get_rand(100000) & ~15), - sizeof(struct node_s) + 4096, - sizeof(struct node_s) + 4096*70}; - size_t size = sizes[get_rand(4)]; + size_t sizes[] = { + sizeof(struct node_s), sizeof(struct node_s)+16, + sizeof(struct node_s), sizeof(struct node_s)+16, + sizeof(struct node_s)+32, sizeof(struct node_s)+48, + sizeof(struct node_s)+32, sizeof(struct node_s)+48, + sizeof(struct node_s) + (get_rand(100000) & ~15)}; + size_t size = sizes[get_rand(sizeof(sizes) / sizeof(size_t))]; p = stm_allocate(size); ((nodeptr_t)p)->sig = SIGNATURE; ((nodeptr_t)p)->my_size = size; @@ -352,7 +354,7 @@ /* "interpreter main loop": this is one "application-frame" */ while (td.steps_left-->0 && get_rand(10) != 0) { if (td.steps_left % 8 == 0) - fprintf(stdout, "#"); + fprintf(stderr, "#"); assert(p == NULL || ((nodeptr_t)p)->sig == SIGNATURE); @@ -461,7 +463,7 @@ .next = NULL }; - stm_start_inevitable_transaction(&stm_thread_local); + //stm_start_inevitable_transaction(&stm_thread_local); for (i = 0; i < PREBUILT_ROOTS; i++) { void* new_templ = malloc(sizeof(struct node_s)); memcpy(new_templ, &prebuilt_template, sizeof(struct node_s)); @@ -474,7 +476,7 @@ ((nodeptr_t)prebuilt_roots[i])->my_hash = hash; } } - stm_commit_transaction(); + //stm_commit_transaction(); } int main(void) @@ -493,10 +495,11 @@ stm_setup(); + setup_globals(); + stm_register_thread_local(&stm_thread_local); stm_rewind_jmp_enterframe(&stm_thread_local, &rjbuf); - setup_globals(); int thread_starts = NUMTHREADS * THREAD_STARTS; for (i = 0; i < NUMTHREADS; i++) { diff --git a/c8/stm/core.c b/c8/stm/core.c --- a/c8/stm/core.c +++ b/c8/stm/core.c @@ -2,235 +2,520 @@ # error "must be compiled via stmgc.c" #endif -#include +/* General helper: copies objects into our own segment, from some + source described by a range of 'struct stm_undo_s'. Maybe later + we could specialize this function to avoid the checks in the + inner loop. +*/ +static void import_objects( + int from_segnum, /* or -1: from undo->backup, + or -2: from undo->backup if not modified */ + uintptr_t pagenum, /* or -1: "all accessible" */ + struct stm_undo_s *undo, + struct stm_undo_s *end) +{ + char *src_segment_base = (from_segnum >= 0 ? get_segment_base(from_segnum) + : NULL); + assert(IMPLY(from_segnum >= 0, get_priv_segment(from_segnum)->modification_lock)); + assert(STM_PSEGMENT->modification_lock); + + DEBUG_EXPECT_SEGFAULT(false); + for (; undo < end; undo++) { + object_t *obj = undo->object; + stm_char *oslice = ((stm_char *)obj) + SLICE_OFFSET(undo->slice); + uintptr_t current_page_num = ((uintptr_t)oslice) / 4096; + + if (pagenum == -1) { + if (get_page_status_in(STM_SEGMENT->segment_num, + current_page_num) == PAGE_NO_ACCESS) + continue; + } + else { + if (current_page_num != pagenum) + continue; + } + + if (from_segnum == -2 && _stm_was_read(obj) && (obj->stm_flags & GCFLAG_WB_EXECUTED)) { + /* called from stm_validate(): + > if not was_read(), we certainly didn't modify + > if not WB_EXECUTED, we may have read from the obj in a different page but + did not modify it (should not occur right now, but future proof!) + only the WB_EXECUTED alone is not enough, since we may have imported from a + segment's private page (which had the flag set) */ + assert(IMPLY(_stm_was_read(obj), (obj->stm_flags & GCFLAG_WB_EXECUTED))); /* for now */ + continue; /* only copy unmodified */ + } + + /* XXX: if the next assert is always true, we should never get a segfault + in this function at all. So the DEBUG_EXPECT_SEGFAULT is correct. */ + assert((get_page_status_in(STM_SEGMENT->segment_num, + current_page_num) != PAGE_NO_ACCESS)); + + dprintf(("import slice seg=%d obj=%p off=%lu sz=%d pg=%lu\n", + from_segnum, obj, SLICE_OFFSET(undo->slice), + SLICE_SIZE(undo->slice), current_page_num)); + char *src, *dst; + if (src_segment_base != NULL) + src = REAL_ADDRESS(src_segment_base, oslice); + else + src = undo->backup; + dst = REAL_ADDRESS(STM_SEGMENT->segment_base, oslice); + memcpy(dst, src, SLICE_SIZE(undo->slice)); + + if (src_segment_base == NULL && SLICE_OFFSET(undo->slice) == 0) { + /* check that restored obj doesn't have WB_EXECUTED */ + assert(!(obj->stm_flags & GCFLAG_WB_EXECUTED)); + } + } + DEBUG_EXPECT_SEGFAULT(true); +} + + +/* ############# signal handler ############# */ + +static void copy_bk_objs_in_page_from(int from_segnum, uintptr_t pagenum, + bool only_if_not_modified) +{ + /* looks at all bk copies of objects overlapping page 'pagenum' and + copies the part in 'pagenum' back to the current segment */ + dprintf(("copy_bk_objs_in_page_from(%d, %ld, %d)\n", + from_segnum, (long)pagenum, only_if_not_modified)); + + struct list_s *list = get_priv_segment(from_segnum)->modified_old_objects; + struct stm_undo_s *undo = (struct stm_undo_s *)list->items; + struct stm_undo_s *end = (struct stm_undo_s *)(list->items + list->count); + + import_objects(only_if_not_modified ? -2 : -1, + pagenum, undo, end); +} + +static void go_to_the_past(uintptr_t pagenum, + struct stm_commit_log_entry_s *from, + struct stm_commit_log_entry_s *to) +{ + assert(STM_PSEGMENT->modification_lock); + assert(from->rev_num >= to->rev_num); + /* walk BACKWARDS the commit log and update the page 'pagenum', + initially at revision 'from', until we reach the revision 'to'. */ + + /* XXXXXXX Recursive algo for now, fix this! */ + if (from != to) { + struct stm_commit_log_entry_s *cl = to->next; + go_to_the_past(pagenum, from, cl); + + struct stm_undo_s *undo = cl->written; + struct stm_undo_s *end = cl->written + cl->written_count; + + import_objects(-1, pagenum, undo, end); + } +} + + + +static void handle_segfault_in_page(uintptr_t pagenum) +{ + /* assumes page 'pagenum' is ACCESS_NONE, privatizes it, + and validates to newest revision */ + + dprintf(("handle_segfault_in_page(%lu), seg %d\n", pagenum, STM_SEGMENT->segment_num)); + + /* XXX: bad, but no deadlocks: */ + acquire_all_privatization_locks(); + + long i; + int my_segnum = STM_SEGMENT->segment_num; + + assert(get_page_status_in(my_segnum, pagenum) == PAGE_NO_ACCESS); + + /* find who has the most recent revision of our page */ + int copy_from_segnum = -1; + uint64_t most_recent_rev = 0; + for (i = 1; i < NB_SEGMENTS; i++) { + if (i == my_segnum) + continue; + + struct stm_commit_log_entry_s *log_entry; + log_entry = get_priv_segment(i)->last_commit_log_entry; + if (get_page_status_in(i, pagenum) != PAGE_NO_ACCESS + && (copy_from_segnum == -1 || log_entry->rev_num > most_recent_rev)) { + copy_from_segnum = i; + most_recent_rev = log_entry->rev_num; + } + } + OPT_ASSERT(copy_from_segnum != my_segnum); + + /* make our page write-ready */ + page_mark_accessible(my_segnum, pagenum); + + if (copy_from_segnum == -1) { + /* this page is only accessible in the sharing segment so far (new + allocation). We can thus simply mark it accessible here. */ + pagecopy(get_virtual_page(my_segnum, pagenum), + get_virtual_page(0, pagenum)); + release_all_privatization_locks(); + return; + } + + /* before copying anything, acquire modification locks from our and + the other segment */ + uint64_t to_lock = (1UL << copy_from_segnum)| (1UL << my_segnum); + acquire_modification_lock_set(to_lock); + pagecopy(get_virtual_page(my_segnum, pagenum), + get_virtual_page(copy_from_segnum, pagenum)); + + /* if there were modifications in the page, revert them. */ + copy_bk_objs_in_page_from(copy_from_segnum, pagenum, false); + + /* we need to go from 'src_version' to 'target_version'. This + might need a walk into the past. */ + struct stm_commit_log_entry_s *src_version, *target_version; + src_version = get_priv_segment(copy_from_segnum)->last_commit_log_entry; + target_version = STM_PSEGMENT->last_commit_log_entry; + + + dprintf(("handle_segfault_in_page: rev %lu to rev %lu\n", + src_version->rev_num, target_version->rev_num)); + /* adapt revision of page to our revision: + if our rev is higher than the page we copy from, everything + is fine as we never read/modified the page anyway + */ + if (src_version->rev_num > target_version->rev_num) + go_to_the_past(pagenum, src_version, target_version); + + release_modification_lock_set(to_lock); + release_all_privatization_locks(); +} + +static void _signal_handler(int sig, siginfo_t *siginfo, void *context) +{ + assert(_stm_segfault_expected > 0); + + int saved_errno = errno; + char *addr = siginfo->si_addr; + dprintf(("si_addr: %p\n", addr)); + if (addr == NULL || addr < stm_object_pages || + addr >= stm_object_pages+TOTAL_MEMORY) { + /* actual segfault, unrelated to stmgc */ + fprintf(stderr, "Segmentation fault: accessing %p\n", addr); + abort(); + } + + int segnum = get_segment_of_linear_address(addr); + if (segnum != STM_SEGMENT->segment_num) { + fprintf(stderr, "Segmentation fault: accessing %p (seg %d) from" + " seg %d\n", addr, segnum, STM_SEGMENT->segment_num); + abort(); + } + dprintf(("-> segment: %d\n", segnum)); + + char *seg_base = STM_SEGMENT->segment_base; + uintptr_t pagenum = ((char*)addr - seg_base) / 4096UL; + if (pagenum < END_NURSERY_PAGE) { + fprintf(stderr, "Segmentation fault: accessing %p (seg %d " + "page %lu)\n", addr, segnum, pagenum); + abort(); + } + + DEBUG_EXPECT_SEGFAULT(false); + handle_segfault_in_page(pagenum); + DEBUG_EXPECT_SEGFAULT(true); + + errno = saved_errno; + /* now return and retry */ +} /* ############# commit log ############# */ void _dbg_print_commit_log() { - volatile struct stm_commit_log_entry_s *cl; - cl = (volatile struct stm_commit_log_entry_s *)&commit_log_root; + struct stm_commit_log_entry_s *cl = &commit_log_root; - fprintf(stderr, "root (%p, %d)\n", cl->next, cl->segment_num); + fprintf(stderr, "commit log:\n"); while ((cl = cl->next)) { - if ((uintptr_t)cl == -1) { - fprintf(stderr, "INEVITABLE\n"); + if (cl == INEV_RUNNING) { + fprintf(stderr, " INEVITABLE\n"); return; } - size_t i = 0; - fprintf(stderr, " elem (%p, %d)\n", cl->next, cl->segment_num); - object_t *obj; - while ((obj = cl->written[i])) { - fprintf(stderr, "-> %p\n", obj); - i++; - }; + fprintf(stderr, " entry at %p: seg %d, rev %lu\n", cl, cl->segment_num, cl->rev_num); + struct stm_undo_s *undo = cl->written; + struct stm_undo_s *end = undo + cl->written_count; + for (; undo < end; undo++) { + fprintf(stderr, " obj %p, size %d, ofs %lu: ", undo->object, + SLICE_SIZE(undo->slice), SLICE_OFFSET(undo->slice)); + /* long i; */ + /* for (i=0; islice); i += 8) */ + /* fprintf(stderr, " 0x%016lx", *(long *)(undo->backup + i)); */ + fprintf(stderr, "\n"); + } } } -static void _update_obj_from(int from_seg, object_t *obj) +static void reset_modified_from_backup_copies(int segment_num); /* forward */ + +static bool _stm_validate() { - /* during validation this looks up the obj in the - from_seg (backup or normal) and copies the version - over the current segment's one */ - size_t obj_size; - char *realobj = REAL_ADDRESS(STM_SEGMENT->segment_base, obj); - uintptr_t pagenum = (uintptr_t)obj / 4096UL; + /* returns true if we reached a valid state, or false if + we need to abort now */ + dprintf(("_stm_validate()\n")); + /* go from last known entry in commit log to the + most current one and apply all changes done + by other transactions. Abort if we have read one of + the committed objs. */ + struct stm_commit_log_entry_s *first_cl = STM_PSEGMENT->last_commit_log_entry; + struct stm_commit_log_entry_s *next_cl, *last_cl, *cl; + int my_segnum = STM_SEGMENT->segment_num; + /* Don't check this 'cl'. This entry is already checked */ - OPT_ASSERT(!is_shared_log_page(pagenum)); - assert(is_private_log_page_in(STM_SEGMENT->segment_num, pagenum)); - assert(is_private_log_page_in(from_seg, pagenum)); + if (STM_PSEGMENT->transaction_state == TS_INEVITABLE) { + //assert(first_cl->next == INEV_RUNNING); + /* the above assert may fail when running a major collection + while the commit of the inevitable transaction is in progress */ + return true; + } - /* look the obj up in the other segment's modified_old_objects to - get its backup copy: */ - acquire_modified_objs_lock(from_seg); + bool needs_abort = false; - wlog_t *item; - struct tree_s *tree = get_priv_segment(from_seg)->modified_old_objects; - TREE_FIND(tree, (uintptr_t)obj, item, goto not_found); + while(1) { + /* retry IF: */ + /* if at the time of "HERE" (s.b.) there happen to be + more commits (and bk copies) then it could be that + copy_bk_objs_in_page_from (s.b.) reads a bk copy that + is itself more recent than last_cl. This is fixed + by re-validating. */ + first_cl = STM_PSEGMENT->last_commit_log_entry; + if (first_cl->next == NULL) + break; - obj_size = stmcb_size_rounded_up((struct object_s*)item->val); - memcpy(realobj, (char*)item->val, obj_size); - assert(obj->stm_flags & GCFLAG_WRITE_BARRIER); - release_modified_objs_lock(from_seg); - return; + if (first_cl->next == INEV_RUNNING) { + /* need to reach safe point if an INEV transaction + is waiting for us, otherwise deadlock */ + break; + } - not_found: - /* copy from page directly (obj is unmodified) */ - obj_size = stmcb_size_rounded_up( - (struct object_s*)REAL_ADDRESS(get_segment_base(from_seg), obj)); - memcpy(realobj, - REAL_ADDRESS(get_segment_base(from_seg), obj), - obj_size); - obj->stm_flags |= GCFLAG_WRITE_BARRIER; /* may already be gone */ - release_modified_objs_lock(from_seg); + /* Find the set of segments we need to copy from and lock them: */ + uint64_t segments_to_lock = 1UL << my_segnum; + cl = first_cl; + while ((next_cl = cl->next) != NULL) { + if (next_cl == INEV_RUNNING) { + /* only validate entries up to INEV */ + break; + } + assert(next_cl->rev_num > cl->rev_num); + cl = next_cl; + + if (cl->written_count) { + segments_to_lock |= (1UL << cl->segment_num); + } + } + last_cl = cl; + + /* HERE */ + + acquire_privatization_lock(STM_SEGMENT->segment_num); + acquire_modification_lock_set(segments_to_lock); + + + /* import objects from first_cl to last_cl: */ + if (first_cl != last_cl) { + uint64_t segment_really_copied_from = 0UL; + + cl = first_cl; + while ((cl = cl->next) != NULL) { + if (!needs_abort) { + struct stm_undo_s *undo = cl->written; + struct stm_undo_s *end = cl->written + cl->written_count; + for (; undo < end; undo++) { + if (_stm_was_read(undo->object)) { + /* first reset all modified objects from the backup + copies as soon as the first conflict is detected; + then we will proceed below to update our segment from + the old (but unmodified) version to the newer version. + */ + reset_modified_from_backup_copies(my_segnum); + needs_abort = true; + + dprintf(("_stm_validate() failed for obj %p\n", undo->object)); + break; + } + } + } + + if (cl->written_count) { + struct stm_undo_s *undo = cl->written; + struct stm_undo_s *end = cl->written + cl->written_count; + + segment_really_copied_from |= (1UL << cl->segment_num); + + import_objects(cl->segment_num, -1, undo, end); + + /* here we can actually have our own modified version, so + make sure to only copy things that are not modified in our + segment... (if we do not abort) */ + copy_bk_objs_in_page_from + (cl->segment_num, -1, /* any page */ + !needs_abort); /* if we abort, we still want to copy everything */ + } + + /* last fully validated entry */ + STM_PSEGMENT->last_commit_log_entry = cl; + if (cl == last_cl) + break; + } + assert(cl == last_cl); + + /* XXX: this optimization fails in test_basic.py, bug3 */ + /* OPT_ASSERT(segment_really_copied_from < (1 << NB_SEGMENTS)); */ + /* int segnum; */ + /* for (segnum = 1; segnum < NB_SEGMENTS; segnum++) { */ + /* if (segment_really_copied_from & (1UL << segnum)) { */ + /* /\* here we can actually have our own modified version, so */ + /* make sure to only copy things that are not modified in our */ + /* segment... (if we do not abort) *\/ */ + /* copy_bk_objs_in_page_from( */ + /* segnum, -1, /\* any page *\/ */ + /* !needs_abort); /\* if we abort, we still want to copy everything *\/ */ + /* } */ + /* } */ + } + + /* done with modifications */ + release_modification_lock_set(segments_to_lock); + release_privatization_lock(STM_SEGMENT->segment_num); + } + + return !needs_abort; } -void stm_validate(void *free_if_abort) -{ - /* go from last known entry in commit log to the - most current one and apply all changes done - by other transactions. Abort if we read one of - the committed objs. */ - if (STM_PSEGMENT->transaction_state == TS_INEVITABLE) { - assert((uintptr_t)STM_PSEGMENT->last_commit_log_entry->next == -1); - return; - } - - volatile struct stm_commit_log_entry_s *cl, *prev_cl; - cl = prev_cl = (volatile struct stm_commit_log_entry_s *) - STM_PSEGMENT->last_commit_log_entry; - - bool needs_abort = false; - /* Don't check 'cl'. This entry is already checked */ - while ((cl = cl->next)) { - if ((uintptr_t)cl == -1) { - /* there is an inevitable transaction running */ -#if STM_TESTS - free(free_if_abort); - stm_abort_transaction(); -#endif - cl = prev_cl; - _stm_collectable_safe_point(); - continue; - } - prev_cl = cl; - - OPT_ASSERT(cl->segment_num >= 0 && cl->segment_num < NB_SEGMENTS); - - object_t *obj; - size_t i = 0; - while ((obj = cl->written[i])) { - _update_obj_from(cl->segment_num, obj); - - if (_stm_was_read(obj)) { - needs_abort = true; - - /* if we wrote this obj, we need to free its backup and - remove it from modified_old_objects because - we would otherwise overwrite the updated obj on abort */ - acquire_modified_objs_lock(STM_SEGMENT->segment_num); - wlog_t *item; - struct tree_s *tree = STM_PSEGMENT->modified_old_objects; - TREE_FIND(tree, (uintptr_t)obj, item, goto not_found); - - free((void*)item->val); - TREE_FIND_DELETE(tree, item); - - not_found: - /* nothing todo */ - release_modified_objs_lock(STM_SEGMENT->segment_num); - } - - i++; - }; - - /* last fully validated entry */ - STM_PSEGMENT->last_commit_log_entry = (struct stm_commit_log_entry_s *)cl; - } - - if (needs_abort) { - free(free_if_abort); - stm_abort_transaction(); - } -} - -static struct stm_commit_log_entry_s *_create_commit_log_entry() +static struct stm_commit_log_entry_s *_create_commit_log_entry(void) { /* puts all modified_old_objects in a new commit log entry */ // we don't need the privatization lock, as we are only // reading from modified_old_objs and nobody but us can change it - struct tree_s *tree = STM_PSEGMENT->modified_old_objects; - size_t count = tree_count(tree); - size_t byte_len = sizeof(struct stm_commit_log_entry_s) + (count + 1) * sizeof(object_t*); + struct list_s *list = STM_PSEGMENT->modified_old_objects; + OPT_ASSERT((list_count(list) % 3) == 0); + size_t count = list_count(list) / 3; + size_t byte_len = sizeof(struct stm_commit_log_entry_s) + + count * sizeof(struct stm_undo_s); struct stm_commit_log_entry_s *result = malloc(byte_len); result->next = NULL; result->segment_num = STM_SEGMENT->segment_num; - - int i = 0; - wlog_t *item; - TREE_LOOP_FORWARD(tree, item); { - result->written[i] = (object_t*)item->addr; - i++; - } TREE_LOOP_END; - - OPT_ASSERT(count == i); - result->written[count] = NULL; - + result->rev_num = -1; /* invalid */ + result->written_count = count; + memcpy(result->written, list->items, count * sizeof(struct stm_undo_s)); return result; } -static void _validate_and_add_to_commit_log() + +static void reset_wb_executed_flags(void); +static void readd_wb_executed_flags(void); +static void check_all_write_barrier_flags(char *segbase, struct list_s *list); + +static void _validate_and_attach(struct stm_commit_log_entry_s *new) { - struct stm_commit_log_entry_s *new; - volatile struct stm_commit_log_entry_s **to; + struct stm_commit_log_entry_s *old; + + OPT_ASSERT(new != NULL); + /* we are attaching a real CL entry: */ + bool is_commit = new != INEV_RUNNING; + + while (1) { + if (!_stm_validate()) { + if (new != INEV_RUNNING) + free(new); + stm_abort_transaction(); + } + +#if STM_TESTS + if (STM_PSEGMENT->transaction_state != TS_INEVITABLE + && STM_PSEGMENT->last_commit_log_entry->next == INEV_RUNNING) { + /* abort for tests... */ + stm_abort_transaction(); + } +#endif + + if (is_commit) { + /* we must not remove the WB_EXECUTED flags before validation as + it is part of a condition in import_objects() called by + copy_bk_objs_in_page_from to not overwrite our modifications. + So we do it here: */ + reset_wb_executed_flags(); + check_all_write_barrier_flags(STM_SEGMENT->segment_base, + STM_PSEGMENT->modified_old_objects); + } + + /* try to attach to commit log: */ + old = STM_PSEGMENT->last_commit_log_entry; + if (old->next == NULL) { + if (new != INEV_RUNNING) /* INEVITABLE */ + new->rev_num = old->rev_num + 1; + + if (__sync_bool_compare_and_swap(&old->next, NULL, new)) + break; /* success! */ + } else if (old->next == INEV_RUNNING) { + /* we failed because there is an INEV transaction running */ + usleep(10); + } + + if (is_commit) { + /* XXX: unfortunately, if we failed to attach our CL entry, + we have to re-add the WB_EXECUTED flags before we try to + validate again because of said condition (s.a) */ + readd_wb_executed_flags(); + } + + dprintf(("_validate_and_attach(%p) failed, enter safepoint\n", new)); + + /* check for requested safe point. otherwise an INEV transaction + may try to commit but cannot because of the busy-loop here. */ + _stm_collectable_safe_point(); + } +} + +static void _validate_and_turn_inevitable(void) +{ + _validate_and_attach((struct stm_commit_log_entry_s *)INEV_RUNNING); +} + +static void _validate_and_add_to_commit_log(void) +{ + struct stm_commit_log_entry_s *old, *new; new = _create_commit_log_entry(); if (STM_PSEGMENT->transaction_state == TS_INEVITABLE) { - OPT_ASSERT((uintptr_t)STM_PSEGMENT->last_commit_log_entry->next == -1); + old = STM_PSEGMENT->last_commit_log_entry; + new->rev_num = old->rev_num + 1; + OPT_ASSERT(old->next == INEV_RUNNING); - to = &(STM_PSEGMENT->last_commit_log_entry->next); - bool yes = __sync_bool_compare_and_swap(to, (void*)-1, new); + /* WB_EXECUTED must be removed before we attach */ + reset_wb_executed_flags(); + check_all_write_barrier_flags(STM_SEGMENT->segment_base, + STM_PSEGMENT->modified_old_objects); + + bool yes = __sync_bool_compare_and_swap(&old->next, INEV_RUNNING, new); OPT_ASSERT(yes); - return; + } + else { + _validate_and_attach(new); } - /* regular transaction: */ - do { - stm_validate(new); - - /* try attaching to commit log: */ - to = &(STM_PSEGMENT->last_commit_log_entry->next); - } while (!__sync_bool_compare_and_swap(to, NULL, new)); -} - -static void _validate_and_turn_inevitable() -{ - struct stm_commit_log_entry_s *new; - volatile struct stm_commit_log_entry_s **to; - - new = (struct stm_commit_log_entry_s*)-1; - do { - stm_validate(NULL); - - /* try attaching to commit log: */ - to = &(STM_PSEGMENT->last_commit_log_entry->next); - } while (!__sync_bool_compare_and_swap(to, NULL, new)); + acquire_modification_lock(STM_SEGMENT->segment_num); + list_clear(STM_PSEGMENT->modified_old_objects); + STM_PSEGMENT->last_commit_log_entry = new; + release_modification_lock(STM_SEGMENT->segment_num); } /* ############# STM ############# */ +void stm_validate() +{ + if (!_stm_validate()) + stm_abort_transaction(); +} -void _privatize_shared_page(uintptr_t pagenum) -{ - /* privatize pages of obj for our segment iff previously - the pages were fully shared. */ -#ifndef NDEBUG - long l; - for (l = 0; l < NB_SEGMENTS; l++) { - assert(get_priv_segment(l)->privatization_lock); - } -#endif - - uintptr_t i; - int my_segnum = STM_SEGMENT->segment_num; - - assert(is_shared_log_page(pagenum)); - char *src = (char*)(get_virt_page_of(0, pagenum) * 4096UL); - - for (i = 1; i < NB_SEGMENTS; i++) { - assert(!is_private_log_page_in(i, pagenum)); - - page_privatize_in(i, pagenum, src); - } - set_page_private_in(0, pagenum); - - OPT_ASSERT(is_private_log_page_in(my_segnum, pagenum)); - assert(!is_shared_log_page(pagenum)); -} void _stm_write_slowpath(object_t *obj) { @@ -246,56 +531,100 @@ realobj = REAL_ADDRESS(STM_SEGMENT->segment_base, obj); obj_size = stmcb_size_rounded_up((struct object_s *)realobj); /* get the last page containing data from the object */ - end_page = (((uintptr_t)obj) + obj_size - 1) / 4096UL; + if (LIKELY(is_small_uniform(obj))) { + end_page = first_page; + } else { + end_page = (((uintptr_t)obj) + obj_size - 1) / 4096UL; + } /* add to read set: */ stm_read(obj); - /* create backup copy: */ - struct object_s *bk_obj = malloc(obj_size); - memcpy(bk_obj, realobj, obj_size); + if (obj->stm_flags & GCFLAG_WB_EXECUTED) { + /* already executed WB once in this transaction. do GC + part again: */ + dprintf(("write_slowpath-fast(%p)\n", obj)); + obj->stm_flags &= ~GCFLAG_WRITE_BARRIER; + LIST_APPEND(STM_PSEGMENT->objects_pointing_to_nursery, obj); + return; + } - /* if there are shared pages, privatize them */ + assert(!(obj->stm_flags & GCFLAG_WB_EXECUTED)); + dprintf(("write_slowpath(%p): sz=%lu\n", obj, obj_size)); + + retry: + /* privatize pages: */ + /* XXX don't always acquire all locks... */ + acquire_all_privatization_locks(); uintptr_t page; for (page = first_page; page <= end_page; page++) { - if (is_shared_log_page(page)) { - long i; - for (i = 0; i < NB_SEGMENTS; i++) { - acquire_privatization_lock(i); - } - if (is_shared_log_page(page)) - _privatize_shared_page(page); - for (i = NB_SEGMENTS-1; i >= 0; i--) { - release_privatization_lock(i); - } + if (get_page_status_in(my_segnum, page) == PAGE_NO_ACCESS) { + /* XXX: slow? */ + release_all_privatization_locks(); + + volatile char *dummy = REAL_ADDRESS(STM_SEGMENT->segment_base, page * 4096UL); + *dummy; /* force segfault */ + + goto retry; } } - /* pages not shared anymore. but we still may have - only a read protected page ourselves: */ + /* all pages are private to us and we hold the privatization_locks so + we are allowed to modify them */ - acquire_privatization_lock(my_segnum); - OPT_ASSERT(is_private_log_page_in(my_segnum, first_page)); + /* phew, now add the obj to the write-set and register the + backup copy. */ + /* XXX: we should not be here at all fiddling with page status + if 'obj' is merely an overflow object. FIX ME, likely by copying + the overflow number logic from c7. */ - /* remove the WRITE_BARRIER flag */ + DEBUG_EXPECT_SEGFAULT(false); + + acquire_modification_lock(STM_SEGMENT->segment_num); + uintptr_t slice_sz; + uintptr_t in_page_offset = (uintptr_t)obj % 4096UL; + uintptr_t remaining_obj_sz = obj_size; + for (page = first_page; page <= end_page; page++) { + /* XXX Maybe also use mprotect() again to mark pages of the object as read-only, and + only stick it into modified_old_objects page-by-page? Maybe it's + possible to do card-marking that way, too. */ + OPT_ASSERT(remaining_obj_sz); + + slice_sz = remaining_obj_sz; + if (in_page_offset + slice_sz > 4096UL) { + /* not over page boundaries */ + slice_sz = 4096UL - in_page_offset; + } + + size_t slice_off = obj_size - remaining_obj_sz; + + /* make backup slice: */ + char *bk_slice = malloc(slice_sz); + memcpy(bk_slice, realobj + slice_off, slice_sz); + + STM_PSEGMENT->modified_old_objects = list_append3( + STM_PSEGMENT->modified_old_objects, + (uintptr_t)obj, /* obj */ + (uintptr_t)bk_slice, /* bk_addr */ + NEW_SLICE(slice_off, slice_sz)); + + remaining_obj_sz -= slice_sz; + in_page_offset = (in_page_offset + slice_sz) % 4096UL; /* mostly 0 */ + } + OPT_ASSERT(remaining_obj_sz == 0); + + /* remove the WRITE_BARRIER flag and add WB_EXECUTED */ obj->stm_flags &= ~GCFLAG_WRITE_BARRIER; + obj->stm_flags |= GCFLAG_WB_EXECUTED; + + DEBUG_EXPECT_SEGFAULT(true); + + release_modification_lock(STM_SEGMENT->segment_num); + /* done fiddling with protection and privatization */ + release_all_privatization_locks(); /* also add it to the GC list for minor collections */ LIST_APPEND(STM_PSEGMENT->objects_pointing_to_nursery, obj); - - /* done fiddling with protection and privatization */ - release_privatization_lock(my_segnum); - - /* phew, now add the obj to the write-set and register the - backup copy. */ - /* XXX: possibly slow check; try overflow objs again? */ - if (!tree_contains(STM_PSEGMENT->modified_old_objects, (uintptr_t)obj)) { - acquire_modified_objs_lock(my_segnum); - tree_insert(STM_PSEGMENT->modified_old_objects, - (uintptr_t)obj, (uintptr_t)bk_obj); - release_modified_objs_lock(my_segnum); - } - } static void reset_transaction_read_version(void) @@ -308,7 +637,7 @@ (long)(NB_READMARKER_PAGES * 4096UL))); if (mmap(readmarkers, NB_READMARKER_PAGES * 4096UL, PROT_READ | PROT_WRITE, - MAP_FIXED | MAP_PAGES_FLAGS, -1, 0) != readmarkers) { + MAP_FIXED | MAP_SHARED | MAP_ANONYMOUS | MAP_NORESERVE, -1, 0) != readmarkers) { /* fall-back */ #if STM_TESTS stm_fatalerror("reset_transaction_read_version: %m"); @@ -318,15 +647,40 @@ STM_SEGMENT->transaction_read_version = 1; } +static void reset_wb_executed_flags(void) +{ + dprintf(("reset_wb_executed_flags()\n")); + struct list_s *list = STM_PSEGMENT->modified_old_objects; + struct stm_undo_s *undo = (struct stm_undo_s *)list->items; + struct stm_undo_s *end = (struct stm_undo_s *)(list->items + list->count); + + for (; undo < end; undo++) { + object_t *obj = undo->object; + obj->stm_flags &= ~GCFLAG_WB_EXECUTED; + } +} + +static void readd_wb_executed_flags(void) +{ + dprintf(("readd_wb_executed_flags()\n")); + struct list_s *list = STM_PSEGMENT->modified_old_objects; + struct stm_undo_s *undo = (struct stm_undo_s *)list->items; + struct stm_undo_s *end = (struct stm_undo_s *)(list->items + list->count); + + for (; undo < end; undo++) { + object_t *obj = undo->object; + obj->stm_flags |= GCFLAG_WB_EXECUTED; + } +} + + + static void _stm_start_transaction(stm_thread_local_t *tl) { assert(!_stm_in_transaction(tl)); - retry: - - if (!acquire_thread_segment(tl)) - goto retry; + while (!acquire_thread_segment(tl)) {} /* GS invalid before this point! */ assert(STM_PSEGMENT->safe_point == SP_NO_TRANSACTION); @@ -337,11 +691,12 @@ STM_PSEGMENT->running_pthread = pthread_self(); #endif STM_PSEGMENT->shadowstack_at_start_of_transaction = tl->shadowstack; + STM_PSEGMENT->threadlocal_at_start_of_transaction = tl->thread_local_obj; enter_safe_point_if_requested(); - dprintf(("start_transaction\n")); + dprintf(("> start_transaction\n")); - s_mutex_unlock(); + s_mutex_unlock(); // XXX it's probably possible to not acquire this here uint8_t old_rv = STM_SEGMENT->transaction_read_version; STM_SEGMENT->transaction_read_version = old_rv + 1; @@ -349,7 +704,8 @@ reset_transaction_read_version(); } - assert(tree_is_cleared(STM_PSEGMENT->modified_old_objects)); + assert(list_is_empty(STM_PSEGMENT->modified_old_objects)); + assert(list_is_empty(STM_PSEGMENT->new_objects)); assert(list_is_empty(STM_PSEGMENT->objects_pointing_to_nursery)); assert(tree_is_cleared(STM_PSEGMENT->young_outside_nursery)); assert(tree_is_cleared(STM_PSEGMENT->nursery_objects_shadows)); @@ -358,7 +714,7 @@ check_nursery_at_transaction_start(); - stm_validate(NULL); + stm_validate(); } long stm_start_transaction(stm_thread_local_t *tl) @@ -401,40 +757,56 @@ STM_PSEGMENT->safe_point = SP_NO_TRANSACTION; STM_PSEGMENT->transaction_state = TS_NONE; list_clear(STM_PSEGMENT->objects_pointing_to_nursery); + list_clear(STM_PSEGMENT->new_objects); release_thread_segment(tl); /* cannot access STM_SEGMENT or STM_PSEGMENT from here ! */ } +static void check_all_write_barrier_flags(char *segbase, struct list_s *list) +{ +#ifndef NDEBUG + struct stm_undo_s *undo = (struct stm_undo_s *)list->items; + struct stm_undo_s *end = (struct stm_undo_s *)(list->items + list->count); + for (; undo < end; undo++) { + object_t *obj = undo->object; + struct object_s *dst = (struct object_s*)REAL_ADDRESS(segbase, obj); + assert(dst->stm_flags & GCFLAG_WRITE_BARRIER); + assert(!(dst->stm_flags & GCFLAG_WB_EXECUTED)); + } +#endif +} + +static void push_new_objects_to_other_segments(void) +{ + acquire_privatization_lock(STM_SEGMENT->segment_num); + LIST_FOREACH_R(STM_PSEGMENT->new_objects, object_t *, + ({ + assert(item->stm_flags & GCFLAG_WB_EXECUTED); + item->stm_flags &= ~GCFLAG_WB_EXECUTED; + synchronize_object_enqueue(item); + })); + synchronize_objects_flush(); + release_privatization_lock(STM_SEGMENT->segment_num); +} + + void stm_commit_transaction(void) { assert(!_has_mutex()); assert(STM_PSEGMENT->safe_point == SP_RUNNING); assert(STM_PSEGMENT->running_pthread == pthread_self()); - dprintf(("stm_commit_transaction()\n")); + dprintf(("> stm_commit_transaction()\n")); minor_collection(1); + push_new_objects_to_other_segments(); + _validate_and_add_to_commit_log(); - /* clear WRITE_BARRIER flags, free all backup copies, - and clear the tree: */ - acquire_modified_objs_lock(STM_SEGMENT->segment_num); - - struct tree_s *tree = STM_PSEGMENT->modified_old_objects; - wlog_t *item; - TREE_LOOP_FORWARD(tree, item); { - object_t *obj = (object_t*)item->addr; - struct object_s* bk_obj = (struct object_s *)item->val; - free(bk_obj); - obj->stm_flags |= GCFLAG_WRITE_BARRIER; - } TREE_LOOP_END; - tree_clear(tree); - - release_modified_objs_lock(STM_SEGMENT->segment_num); - invoke_and_clear_user_callbacks(0); /* for commit */ + /* XXX do we still need a s_mutex_lock() section here? */ s_mutex_lock(); enter_safe_point_if_requested(); assert(STM_SEGMENT->nursery_end == NURSERY_END); @@ -452,35 +824,37 @@ s_mutex_unlock(); } -void reset_modified_from_backup_copies(int segment_num) +static void reset_modified_from_backup_copies(int segment_num) { #pragma push_macro("STM_PSEGMENT") #pragma push_macro("STM_SEGMENT") #undef STM_PSEGMENT #undef STM_SEGMENT - acquire_modified_objs_lock(segment_num); + assert(get_priv_segment(segment_num)->modification_lock); struct stm_priv_segment_info_s *pseg = get_priv_segment(segment_num); - struct tree_s *tree = pseg->modified_old_objects; - wlog_t *item; - TREE_LOOP_FORWARD(tree, item); { - object_t *obj = (object_t*)item->addr; - struct object_s* bk_obj = (struct object_s *)item->val; - size_t obj_size; + struct list_s *list = pseg->modified_old_objects; + struct stm_undo_s *undo = (struct stm_undo_s *)list->items; + struct stm_undo_s *end = (struct stm_undo_s *)(list->items + list->count); - obj_size = stmcb_size_rounded_up(bk_obj); + for (; undo < end; undo++) { + object_t *obj = undo->object; + char *dst = REAL_ADDRESS(pseg->pub.segment_base, obj); - memcpy(REAL_ADDRESS(pseg->pub.segment_base, obj), - bk_obj, obj_size); - assert(obj->stm_flags & GCFLAG_WRITE_BARRIER); /* not written */ + memcpy(dst + SLICE_OFFSET(undo->slice), + undo->backup, + SLICE_SIZE(undo->slice)); - free(bk_obj); - } TREE_LOOP_END; + dprintf(("reset_modified_from_backup_copies(%d): obj=%p off=%lu bk=%p\n", + segment_num, obj, SLICE_OFFSET(undo->slice), undo->backup)); - tree_clear(tree); + free(undo->backup); + } - release_modified_objs_lock(segment_num); + /* check that all objects have the GCFLAG_WRITE_BARRIER afterwards */ + check_all_write_barrier_flags(pseg->pub.segment_base, list); + list_clear(list); #pragma pop_macro("STM_SEGMENT") #pragma pop_macro("STM_PSEGMENT") } @@ -505,7 +879,9 @@ long bytes_in_nursery = throw_away_nursery(pseg); + acquire_modification_lock(segment_num); reset_modified_from_backup_copies(segment_num); + release_modification_lock(segment_num); stm_thread_local_t *tl = pseg->pub.running_thread; #ifdef STM_NO_AUTOMATIC_SETJMP @@ -525,8 +901,11 @@ stm_rewind_jmp_restore_shadowstack(tl); assert(tl->shadowstack == pseg->shadowstack_at_start_of_transaction); #endif -tl->last_abort__bytes_in_nursery = bytes_in_nursery; + tl->thread_local_obj = pseg->threadlocal_at_start_of_transaction; + tl->last_abort__bytes_in_nursery = bytes_in_nursery; + list_clear(pseg->objects_pointing_to_nursery); + list_clear(pseg->new_objects); #pragma pop_macro("STM_SEGMENT") #pragma pop_macro("STM_PSEGMENT") } @@ -613,3 +992,95 @@ synchronize_all_threads(STOP_OTHERS_AND_BECOME_GLOBALLY_UNIQUE); s_mutex_unlock(); } + + + +static inline void _synchronize_fragment(stm_char *frag, ssize_t frag_size) +{ + /* double-check that the result fits in one page */ + assert(frag_size > 0); + assert(frag_size + ((uintptr_t)frag & 4095) <= 4096); + + /* XXX: is it possible to just add to the queue iff the pages + of the fragment need syncing to other segments? (keep privatization + lock until the "flush") */ + + /* Enqueue this object (or fragemnt of object) */ + if (STM_PSEGMENT->sq_len == SYNC_QUEUE_SIZE) + synchronize_objects_flush(); + STM_PSEGMENT->sq_fragments[STM_PSEGMENT->sq_len] = frag; + STM_PSEGMENT->sq_fragsizes[STM_PSEGMENT->sq_len] = frag_size; + ++STM_PSEGMENT->sq_len; +} + +static void synchronize_object_enqueue(object_t *obj) +{ + assert(!_is_young(obj)); + assert(STM_PSEGMENT->privatization_lock); + assert(obj->stm_flags & GCFLAG_WRITE_BARRIER); + assert(!(obj->stm_flags & GCFLAG_WB_EXECUTED)); + + ssize_t obj_size = stmcb_size_rounded_up( + (struct object_s *)REAL_ADDRESS(STM_SEGMENT->segment_base, obj)); + OPT_ASSERT(obj_size >= 16); + + if (LIKELY(is_small_uniform(obj))) { + OPT_ASSERT(obj_size <= GC_LAST_SMALL_SIZE); + _synchronize_fragment((stm_char *)obj, obj_size); + return; + } + + /* else, a more complicated case for large objects, to copy + around data only within the needed pages + */ + uintptr_t start = (uintptr_t)obj; + uintptr_t end = start + obj_size; + + do { + uintptr_t copy_up_to = (start + 4096) & ~4095; /* end of page */ + if (copy_up_to >= end) { + copy_up_to = end; /* this is the last fragment */ + } + uintptr_t copy_size = copy_up_to - start; + + _synchronize_fragment((stm_char *)start, copy_size); + + start = copy_up_to; + } while (start != end); +} + +static void synchronize_objects_flush(void) +{ + long j = STM_PSEGMENT->sq_len; + if (j == 0) + return; + STM_PSEGMENT->sq_len = 0; + + dprintf(("synchronize_objects_flush(): %ld fragments\n", j)); + + assert(STM_PSEGMENT->privatization_lock); + DEBUG_EXPECT_SEGFAULT(false); + + long i, myself = STM_SEGMENT->segment_num; + do { + --j; + stm_char *frag = STM_PSEGMENT->sq_fragments[j]; + uintptr_t page = ((uintptr_t)frag) / 4096UL; + ssize_t frag_size = STM_PSEGMENT->sq_fragsizes[j]; + + char *src = REAL_ADDRESS(STM_SEGMENT->segment_base, frag); + for (i = 0; i < NB_SEGMENTS; i++) { + if (i == myself) + continue; + + if (get_page_status_in(i, page) != PAGE_NO_ACCESS) { + /* shared or private, but never segfault */ + char *dst = REAL_ADDRESS(get_segment_base(i), frag); + dprintf(("-> flush %p to seg %lu, sz=%lu\n", frag, i, frag_size)); + memcpy(dst, src, frag_size); + } + } + } while (j > 0); + + DEBUG_EXPECT_SEGFAULT(true); +} diff --git a/c8/stm/core.h b/c8/stm/core.h --- a/c8/stm/core.h +++ b/c8/stm/core.h @@ -6,6 +6,8 @@ #include #include #include +#include + /************************************************************/ @@ -15,9 +17,8 @@ #define NB_PAGES (2500*256) // 2500MB -#define NB_SEGMENTS STM_NB_SEGMENTS +#define NB_SEGMENTS (STM_NB_SEGMENTS+1) /* +1 for sharing seg 0 */ #define NB_SEGMENTS_MAX 240 /* don't increase NB_SEGMENTS past this */ -#define MAP_PAGES_FLAGS (MAP_SHARED | MAP_ANONYMOUS | MAP_NORESERVE) #define NB_NURSERY_PAGES (STM_GC_NURSERY/4) #define TOTAL_MEMORY (NB_PAGES * 4096UL * NB_SEGMENTS) @@ -25,6 +26,7 @@ #define FIRST_OBJECT_PAGE ((READMARKER_END + 4095) / 4096UL) #define FIRST_NURSERY_PAGE FIRST_OBJECT_PAGE #define END_NURSERY_PAGE (FIRST_NURSERY_PAGE + NB_NURSERY_PAGES) +#define NB_SHARED_PAGES (NB_PAGES - END_NURSERY_PAGE) #define READMARKER_START ((FIRST_OBJECT_PAGE * 4096UL) >> 4) #define FIRST_READMARKER_PAGE (READMARKER_START / 4096UL) @@ -35,9 +37,15 @@ enum /* stm_flags */ { GCFLAG_WRITE_BARRIER = _STM_GCFLAG_WRITE_BARRIER, GCFLAG_HAS_SHADOW = 0x02, + GCFLAG_WB_EXECUTED = 0x04, + GCFLAG_VISITED = 0x08, }; + +#define SYNC_QUEUE_SIZE 31 + + /************************************************************/ @@ -48,27 +56,58 @@ struct stm_priv_segment_info_s { struct stm_segment_info_s pub; - uint8_t modified_objs_lock; - struct tree_s *modified_old_objects; + /* lock protecting from concurrent modification of + 'modified_old_objects', page-revision-changes, ... + Always acquired in global order of segments to avoid deadlocks. */ + uint8_t modification_lock; + + /* All the old objects (older than the current transaction) that + the current transaction attempts to modify. This is used to + track the STM status: these are old objects that where written + to and that will need to be recorded in the commit log. The + list contains three entries for every such object, in the same + format as 'struct stm_undo_s' below. + */ + struct list_s *modified_old_objects; + struct list_s *objects_pointing_to_nursery; struct tree_s *young_outside_nursery; struct tree_s *nursery_objects_shadows; - uint8_t privatization_lock; + /* list of objects created in the current transaction and + that survived at least one minor collection. They need + to be synchronized to other segments on commit, but they + do not need to be in the commit log entry. */ + struct list_s *new_objects; + + uint8_t privatization_lock; // XXX KILL uint8_t safe_point; uint8_t transaction_state; + /* Temp for minor collection */ + bool minor_collect_will_commit_now; + struct tree_s *callbacks_on_commit_and_abort[2]; struct stm_commit_log_entry_s *last_commit_log_entry; struct stm_shadowentry_s *shadowstack_at_start_of_transaction; + object_t *threadlocal_at_start_of_transaction; /* For debugging */ #ifndef NDEBUG pthread_t running_pthread; #endif + + /* This is for smallmalloc.c */ + struct small_malloc_data_s small_malloc_data; + + /* The sync queue used to synchronize newly allocated objs to + other segments */ + stm_char *sq_fragments[SYNC_QUEUE_SIZE]; + int sq_fragsizes[SYNC_QUEUE_SIZE]; + int sq_len; }; enum /* safe_point */ { @@ -88,16 +127,41 @@ }; /* Commit Log things */ +struct stm_undo_s { + object_t *object; /* the object that is modified */ + char *backup; /* some backup data (a slice of the original obj) */ + uint64_t slice; /* location and size of this slice (cannot cross + pages). The size is in the lower 2 bytes, and + the offset in the remaining 6 bytes. */ +}; +#define SLICE_OFFSET(slice) ((slice) >> 16) +#define SLICE_SIZE(slice) ((int)((slice) & 0xFFFF)) +#define NEW_SLICE(offset, size) (((uint64_t)(offset)) << 16 | (size)) + +/* The model is: we have a global chained list, from 'commit_log_root', + of 'struct stm_commit_log_entry_s' entries. Every one is fully + read-only apart from the 'next' field. Every one stands for one + commit that occurred. It lists the old objects that were modified + in this commit, and their attached "undo logs" --- that is, the + data from 'written[n].backup' is the content of (slices of) the + object as they were *before* that commit occurred. +*/ +#define INEV_RUNNING ((void*)-1) struct stm_commit_log_entry_s { - volatile struct stm_commit_log_entry_s *next; + struct stm_commit_log_entry_s *volatile next; int segment_num; - object_t *written[]; /* terminated with a NULL ptr */ + uint64_t rev_num; + size_t written_count; + struct stm_undo_s written[]; }; -static struct stm_commit_log_entry_s commit_log_root = {NULL, -1}; +static struct stm_commit_log_entry_s commit_log_root = {NULL, -1, 0, 0}; +#ifndef STM_TESTS static char *stm_object_pages; -static int stm_object_pages_fd; +#else +char *stm_object_pages; +#endif static stm_thread_local_t *stm_all_thread_locals = NULL; @@ -133,7 +197,11 @@ static stm_thread_local_t *abort_with_mutex_no_longjmp(void); static void abort_data_structures_from_segment_num(int segment_num); +static void synchronize_object_enqueue(object_t *obj); +static void synchronize_objects_flush(void); +static void _signal_handler(int sig, siginfo_t *siginfo, void *context); +static bool _stm_validate(); static inline void _duck(void) { /* put a call to _duck() between two instructions that set 0 into @@ -154,12 +222,83 @@ spinlock_release(get_priv_segment(segnum)->privatization_lock); } -static inline void acquire_modified_objs_lock(int segnum) +static inline bool all_privatization_locks_acquired() { - spinlock_acquire(get_priv_segment(segnum)->modified_objs_lock); +#ifndef NDEBUG + long l; + for (l = 0; l < NB_SEGMENTS; l++) { + if (!get_priv_segment(l)->privatization_lock) + return false; + } + return true; +#else + abort(); +#endif } -static inline void release_modified_objs_lock(int segnum) +static inline void acquire_all_privatization_locks() { - spinlock_release(get_priv_segment(segnum)->modified_objs_lock); + /* XXX: don't do for the sharing seg0 */ + long l; + for (l = 0; l < NB_SEGMENTS; l++) { + acquire_privatization_lock(l); + } } + +static inline void release_all_privatization_locks() +{ + long l; + for (l = NB_SEGMENTS-1; l >= 0; l--) { + release_privatization_lock(l); + } +} + + + +/* Modification locks are used to prevent copying from a segment + where either the revision of some pages is inconsistent with the + rest, or the modified_old_objects list is being modified (bk_copys). + + Lock ordering: acquire privatization lock around acquiring a set + of modification locks! +*/ + +static inline void acquire_modification_lock(int segnum) +{ + spinlock_acquire(get_priv_segment(segnum)->modification_lock); +} + +static inline void release_modification_lock(int segnum) +{ + spinlock_release(get_priv_segment(segnum)->modification_lock); +} + +static inline void acquire_modification_lock_set(uint64_t seg_set) +{ + assert(NB_SEGMENTS <= 64); + OPT_ASSERT(seg_set < (1 << NB_SEGMENTS)); + + /* acquire locks in global order */ + int i; + for (i = 0; i < NB_SEGMENTS; i++) { + if ((seg_set & (1 << i)) == 0) + continue; + + spinlock_acquire(get_priv_segment(i)->modification_lock); + } +} + +static inline void release_modification_lock_set(uint64_t seg_set) +{ + assert(NB_SEGMENTS <= 64); + OPT_ASSERT(seg_set < (1 << NB_SEGMENTS)); + + int i; + for (i = 0; i < NB_SEGMENTS; i++) { + if ((seg_set & (1 << i)) == 0) + continue; + + assert(get_priv_segment(i)->modification_lock); + spinlock_release(get_priv_segment(i)->modification_lock); + } +} diff --git a/c8/stm/fprintcolor.c b/c8/stm/fprintcolor.c --- a/c8/stm/fprintcolor.c +++ b/c8/stm/fprintcolor.c @@ -8,7 +8,8 @@ char buffer[2048]; va_list ap; int result; - int size = (int)sprintf(buffer, "\033[%dm[%d,%lx] ", dprintfcolor(), + int size = (int)sprintf(buffer, "\033[%dm[%d,%d,%lx] ", + dprintfcolor(), STM_SEGMENT->segment_num, (int)getpid(), (long)pthread_self()); assert(size >= 0); diff --git a/c8/stm/gcpage.c b/c8/stm/gcpage.c --- a/c8/stm/gcpage.c +++ b/c8/stm/gcpage.c @@ -2,42 +2,79 @@ # error "must be compiled via stmgc.c" #endif +static struct list_s *testing_prebuilt_objs = NULL; +static struct tree_s *tree_prebuilt_objs = NULL; /* XXX refactor */ + static void setup_gcpage(void) { + char *base = stm_object_pages + END_NURSERY_PAGE * 4096UL; + uintptr_t length = NB_SHARED_PAGES * 4096UL; + _stm_largemalloc_init_arena(base, length); + uninitialized_page_start = stm_object_pages + END_NURSERY_PAGE * 4096UL; - uninitialized_page_stop = stm_object_pages + NB_PAGES * 4096UL; + uninitialized_page_stop = uninitialized_page_start + NB_SHARED_PAGES * 4096UL; } static void teardown_gcpage(void) { -} - -static void setup_N_pages(char *pages_addr, uint64_t num) -{ - long i; - for (i = 0; i < NB_SEGMENTS; i++) { - acquire_privatization_lock(i); - } - pages_initialize_shared((pages_addr - stm_object_pages) / 4096UL, num); - for (i = NB_SEGMENTS-1; i >= 0; i--) { - release_privatization_lock(i); + LIST_FREE(testing_prebuilt_objs); + if (tree_prebuilt_objs != NULL) { + tree_free(tree_prebuilt_objs); + tree_prebuilt_objs = NULL; } } + +static void setup_N_pages(char *pages_addr, long num) +{ + /* make pages accessible in sharing segment only (pages already + PROT_READ/WRITE (see setup.c), but not marked accessible as page + status). */ + + /* lock acquiring maybe not necessary because the affected pages don't + need privatization protection. (but there is an assert right + now to enforce that XXXXXX) */ + acquire_all_privatization_locks(); + + uintptr_t p = (pages_addr - stm_object_pages) / 4096UL; + dprintf(("setup_N_pages(%p, %lu): pagenum %lu\n", pages_addr, num, p)); + while (num-->0) { + /* XXX: page_range_mark_accessible() */ + page_mark_accessible(0, p + num); + } + + release_all_privatization_locks(); +} + + static int lock_growth_large = 0; -static char *allocate_outside_nursery_large(uint64_t size) +static stm_char *allocate_outside_nursery_large(uint64_t size) { - /* XXX: real allocation */ + /* Allocate the object with largemalloc.c from the lower addresses. */ + char *addr = _stm_large_malloc(size); + if (addr == NULL) + stm_fatalerror("not enough memory!"); + + if (LIKELY(addr + size <= uninitialized_page_start)) { + dprintf(("allocate_outside_nursery_large(%lu): %p, page=%lu\n", + size, (char*)(addr - stm_object_pages), + (uintptr_t)(addr - stm_object_pages) / 4096UL)); + + return (stm_char*)(addr - stm_object_pages); + } + + + /* uncommon case: need to initialize some more pages */ spinlock_acquire(lock_growth_large); - char *addr = uninitialized_page_start; char *start = uninitialized_page_start; - if (addr + size > start) { /* XXX: always for now */ + if (addr + size > start) { uintptr_t npages; - npages = (addr + size - start) / 4096UL + 1; + npages = (addr + size - start) / 4096UL; + npages += GCPAGE_NUM_PAGES; if (uninitialized_page_stop - start < npages * 4096UL) { stm_fatalerror("out of memory!"); /* XXX */ } @@ -48,26 +85,500 @@ stm_fatalerror("uninitialized_page_start changed?"); } } - dprintf(("allocate_outside_nursery_large(%lu): %p, seg=%d, page=%lu\n", - size, addr, get_segment_of_linear_address(addr), - (addr - get_segment_base(get_segment_of_linear_address(addr))) / 4096UL)); + + dprintf(("allocate_outside_nursery_large(%lu): %p, page=%lu\n", + size, (char*)(addr - stm_object_pages), + (uintptr_t)(addr - stm_object_pages) / 4096UL)); spinlock_release(lock_growth_large); - return addr; + return (stm_char*)(addr - stm_object_pages); } object_t *_stm_allocate_old(ssize_t size_rounded_up) { /* only for tests xxx but stm_setup_prebuilt() uses this now too */ - char *p = allocate_outside_nursery_large(size_rounded_up); - memset(p, 0, size_rounded_up); + stm_char *p = allocate_outside_nursery_large(size_rounded_up); + object_t *o = (object_t *)p; - object_t *o = (object_t *)(p - stm_object_pages); + // sharing seg0 needs to be current: + assert(STM_SEGMENT->segment_num == 0); + memset(REAL_ADDRESS(STM_SEGMENT->segment_base, o), 0, size_rounded_up); o->stm_flags = GCFLAG_WRITE_BARRIER; + if (testing_prebuilt_objs == NULL) + testing_prebuilt_objs = list_create(); + LIST_APPEND(testing_prebuilt_objs, o); + dprintf(("allocate_old(%lu): %p, seg=%d, page=%lu\n", size_rounded_up, p, - get_segment_of_linear_address(p), - (p - STM_SEGMENT->segment_base) / 4096UL)); + get_segment_of_linear_address(stm_object_pages + (uintptr_t)p), + (uintptr_t)p / 4096UL)); return o; } + + +/************************************************************/ + + +static void major_collection_if_requested(void) +{ + assert(!_has_mutex()); + if (!is_major_collection_requested()) + return; + + s_mutex_lock(); + + if (is_major_collection_requested()) { /* if still true */ + + synchronize_all_threads(STOP_OTHERS_UNTIL_MUTEX_UNLOCK); + + if (is_major_collection_requested()) { /* if *still* true */ + major_collection_now_at_safe_point(); + } + + } + + s_mutex_unlock(); +} + + +/************************************************************/ + +/* objects to trace are traced in the sharing seg0 or in a + certain segment if there exist modifications there. + All other segments' versions should be identical to seg0's + version and thus don't need tracing. */ +static struct list_s *marked_objects_to_trace; + +/* we use the sharing seg0's pages for the GCFLAG_VISITED flag */ + +static inline struct object_s *mark_loc(object_t *obj) +{ + /* uses the memory in seg0 for marking: */ + struct object_s *result = (struct object_s*)REAL_ADDRESS(stm_object_pages, obj); + return result; +} + +static inline bool mark_visited_test(object_t *obj) +{ + struct object_s *realobj = mark_loc(obj); + return !!(realobj->stm_flags & GCFLAG_VISITED); +} + +static inline bool mark_visited_test_and_set(object_t *obj) +{ + struct object_s *realobj = mark_loc(obj); + if (realobj->stm_flags & GCFLAG_VISITED) { + return true; + } + else { + realobj->stm_flags |= GCFLAG_VISITED; + return false; + } +} + +static inline bool mark_visited_test_and_clear(object_t *obj) +{ + struct object_s *realobj = mark_loc(obj); + if (realobj->stm_flags & GCFLAG_VISITED) { + realobj->stm_flags &= ~GCFLAG_VISITED; + return true; + } + else { + return false; + } +} + + +/************************************************************/ + + +static bool is_new_object(object_t *obj) +{ + struct object_s *realobj = (struct object_s*)REAL_ADDRESS(stm_object_pages, obj); /* seg0 */ + return realobj->stm_flags & GCFLAG_WB_EXECUTED; +} + + +static inline void mark_record_trace(object_t **pobj) +{ + /* takes a normal pointer to a thread-local pointer to an object */ + object_t *obj = *pobj; + + /* Note: this obj might be visited already, but from a different + segment. We ignore this case and skip re-visiting the object + anyway. The idea is that such an object is old (not from the + current transaction), otherwise it would not be possible to see + it in two segments; and moreover it is not modified, otherwise + mark_trace() would have been called on two different segments + already. That means that this object is identical in all + segments and only needs visiting once. (It may actually be in a + shared page, or maybe not.) + */ + if (obj == NULL || mark_visited_test_and_set(obj)) + return; /* already visited this object */ + + LIST_APPEND(marked_objects_to_trace, obj); +} + + +static void mark_and_trace(object_t *obj, char *segment_base) +{ + /* mark the obj and trace all reachable objs from it */ + + assert(list_is_empty(marked_objects_to_trace)); + + /* trace into the object (the version from 'segment_base') */ + struct object_s *realobj = + (struct object_s *)REAL_ADDRESS(segment_base, obj); + stmcb_trace(realobj, &mark_record_trace); + + /* trace all references found in sharing seg0 (should always be + up-to-date and not cause segfaults, except for new objs) */ + while (!list_is_empty(marked_objects_to_trace)) { + obj = (object_t *)list_pop_item(marked_objects_to_trace); + + char *base = is_new_object(obj) ? segment_base : stm_object_pages; + realobj = (struct object_s *)REAL_ADDRESS(base, obj); + stmcb_trace(realobj, &mark_record_trace); + } +} + +static inline void mark_visit_object(object_t *obj, char *segment_base) +{ + /* if already visited, don't trace */ + if (obj == NULL || mark_visited_test_and_set(obj)) + return; + mark_and_trace(obj, segment_base); +} + + +static void mark_visit_possibly_new_object(char *segment_base, object_t *obj) +{ + /* if newly allocated object, we trace in segment_base, otherwise in + the sharing seg0 */ + if (obj == NULL) + return; + + if (is_new_object(obj)) { + mark_visit_object(obj, segment_base); + } else { + mark_visit_object(obj, stm_object_pages); + } +} + +static void *mark_visit_objects_from_ss(void *_, const void *slice, size_t size) +{ + const struct stm_shadowentry_s *p, *end; + p = (const struct stm_shadowentry_s *)slice; + end = (const struct stm_shadowentry_s *)(slice + size); + for (; p < end; p++) + if ((((uintptr_t)p->ss) & 3) == 0) { + assert(!is_new_object(p->ss)); + mark_visit_object(p->ss, stm_object_pages); // seg0 + } + return NULL; +} + +static void assert_obj_accessible_in(long segnum, object_t *obj) +{ +#ifndef NDEBUG + uintptr_t page = (uintptr_t)obj / 4096UL; + assert(get_page_status_in(segnum, page) == PAGE_ACCESSIBLE); + + struct object_s *realobj = + (struct object_s *)REAL_ADDRESS(get_segment_base(segnum), obj); + + size_t obj_size = stmcb_size_rounded_up(realobj); + uintptr_t count = obj_size / 4096UL + 1; + while (count--> 0) { + assert(get_page_status_in(segnum, page) == PAGE_ACCESSIBLE); + page++; + } +#endif +} + + + +static void mark_visit_from_modified_objects(void) +{ + /* look for modified objects in segments and mark all of them + for further tracing (XXX: don't if we are going to share + some of the pages) */ + + long i; + for (i = 1; i < NB_SEGMENTS; i++) { + char *base = get_segment_base(i); + + struct list_s *lst = get_priv_segment(i)->modified_old_objects; + struct stm_undo_s *modified = (struct stm_undo_s *)lst->items; + struct stm_undo_s *end = (struct stm_undo_s *)(lst->items + lst->count); + + for (; modified < end; modified++) { + object_t *obj = modified->object; + /* All modified objs have all pages accessible for now. + This is because we create a backup of the whole obj + and thus make all pages accessible. */ + assert_obj_accessible_in(i, obj); + + assert(!is_new_object(obj)); /* should never be in that list */ + + if (!mark_visited_test_and_set(obj)) { + /* trace shared, committed version */ + mark_and_trace(obj, stm_object_pages); + } + mark_and_trace(obj, base); /* private, modified version */ + } + } +} + +static void mark_visit_from_roots(void) +{ + if (testing_prebuilt_objs != NULL) { + LIST_FOREACH_R(testing_prebuilt_objs, object_t * /*item*/, + mark_visit_object(item, stm_object_pages)); // seg0 + } + + stm_thread_local_t *tl = stm_all_thread_locals; + do { + /* look at all objs on the shadow stack (they are old but may + be uncommitted so far, so only exist in the associated_segment_num). + + IF they are uncommitted new objs, trace in the actual segment, + otherwise, since we just executed a minor collection, they were + all synced to the sharing seg0. Thus we can trace them there. + + If they were again modified since then, they were traced + by mark_visit_from_modified_object() already. + */ + + /* only for new, uncommitted objects: + If 'tl' is currently running, its 'associated_segment_num' + field is the segment number that contains the correct + version of its overflowed objects. */ + char *segment_base = get_segment_base(tl->associated_segment_num); + + struct stm_shadowentry_s *current = tl->shadowstack; + struct stm_shadowentry_s *base = tl->shadowstack_base; + while (current-- != base) { + if ((((uintptr_t)current->ss) & 3) == 0) { + mark_visit_possibly_new_object(segment_base, current->ss); + } + } + + mark_visit_possibly_new_object(segment_base, tl->thread_local_obj); + + tl = tl->next; + } while (tl != stm_all_thread_locals); + + /* also visit all objs in the rewind-shadowstack */ + long i; + for (i = 1; i < NB_SEGMENTS; i++) { + if (get_priv_segment(i)->transaction_state != TS_NONE) { + mark_visit_possibly_new_object( + get_segment_base(i), + get_priv_segment(i)->threadlocal_at_start_of_transaction); + + stm_rewind_jmp_enum_shadowstack( + get_segment(i)->running_thread, + mark_visit_objects_from_ss); + } + } +} + +static void ready_new_objects(void) +{ +#pragma push_macro("STM_PSEGMENT") +#pragma push_macro("STM_SEGMENT") +#undef STM_PSEGMENT +#undef STM_SEGMENT + /* objs in new_objects only have garbage in the sharing seg0, + since it is used to mark objs as visited, we must make + sure the flag is cleared at the start of a major collection. + (XXX: ^^^ may be optional if we have the part below) + + Also, we need to be able to recognize these objects in order + to only trace them in the segment they are valid in. So we + also make sure to set WB_EXECUTED in the sharing seg0. No + other objs than new_objects have WB_EXECUTED in seg0 (since + there can only be committed versions there). + */ + + long i; + for (i = 1; i < NB_SEGMENTS; i++) { + struct stm_priv_segment_info_s *pseg = get_priv_segment(i); + struct list_s *lst = pseg->new_objects; + + LIST_FOREACH_R(lst, object_t* /*item*/, + ({ + struct object_s *realobj; + /* WB_EXECUTED always set in this segment */ + assert(realobj = (struct object_s*)REAL_ADDRESS(pseg->pub.segment_base, item)); + assert(realobj->stm_flags & GCFLAG_WB_EXECUTED); + + /* clear VISITED and ensure WB_EXECUTED in seg0 */ + mark_visited_test_and_clear(item); + realobj = (struct object_s*)REAL_ADDRESS(stm_object_pages, item); + realobj->stm_flags |= GCFLAG_WB_EXECUTED; + })); + } +#pragma pop_macro("STM_SEGMENT") +#pragma pop_macro("STM_PSEGMENT") +} + + +static void clean_up_segment_lists(void) +{ +#pragma push_macro("STM_PSEGMENT") +#pragma push_macro("STM_SEGMENT") +#undef STM_PSEGMENT +#undef STM_SEGMENT + + long i; + for (i = 1; i < NB_SEGMENTS; i++) { + struct stm_priv_segment_info_s *pseg = get_priv_segment(i); + struct list_s *lst; From noreply at buildbot.pypy.org Wed Jan 21 17:27:51 2015 From: noreply at buildbot.pypy.org (Raemi) Date: Wed, 21 Jan 2015 17:27:51 +0100 (CET) Subject: [pypy-commit] stmgc default: undo bogus change so we actually do major collections again Message-ID: <20150121162752.04CA01C00B5@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: Changeset: r1566:fb965316ef58 Date: 2015-01-21 17:24 +0100 http://bitbucket.org/pypy/stmgc/changeset/fb965316ef58/ Log: undo bogus change so we actually do major collections again diff --git a/c8/stm/nursery.c b/c8/stm/nursery.c --- a/c8/stm/nursery.c +++ b/c8/stm/nursery.c @@ -315,13 +315,19 @@ void stm_collect(long level) { - if (level > 0) { + if (level > 0) force_major_collection_request(); - minor_collection(/*commit=*/ false); + + minor_collection(/*commit=*/ false); + +#ifdef STM_TESTS + /* tests don't want aborts in stm_allocate, thus + we only do major collections if explicitly requested */ + if (level > 0) major_collection_if_requested(); - } else { - minor_collection(/*commit=*/ false); - } +#else + major_collection_if_requested(); +#endif } From noreply at buildbot.pypy.org Wed Jan 21 17:27:53 2015 From: noreply at buildbot.pypy.org (Raemi) Date: Wed, 21 Jan 2015 17:27:53 +0100 (CET) Subject: [pypy-commit] stmgc default: fix a case where objs in new_objects may not have WB_EXECUTED (gcpage.c asserts that) Message-ID: <20150121162753.522771C00B5@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: Changeset: r1567:787672796c2f Date: 2015-01-21 17:25 +0100 http://bitbucket.org/pypy/stmgc/changeset/787672796c2f/ Log: fix a case where objs in new_objects may not have WB_EXECUTED (gcpage.c asserts that) diff --git a/c8/stm/core.c b/c8/stm/core.c --- a/c8/stm/core.c +++ b/c8/stm/core.c @@ -788,6 +788,16 @@ })); synchronize_objects_flush(); release_privatization_lock(STM_SEGMENT->segment_num); + + /* we can as well clear the list here, since the + objects are only useful if the commit succeeds. And + we never do a major collection in-between. + They should also survive any page privatization happening + before the actual commit, since we always do a pagecopy + in handle_segfault_in_page() that also copies + unknown-to-the-segment/uncommitted things. + */ + list_clear(STM_PSEGMENT->new_objects); } @@ -801,7 +811,7 @@ minor_collection(1); push_new_objects_to_other_segments(); - + /* push before validate. otherwise they are reachable too early */ _validate_and_add_to_commit_log(); invoke_and_clear_user_callbacks(0); /* for commit */ From noreply at buildbot.pypy.org Wed Jan 21 17:27:54 2015 From: noreply at buildbot.pypy.org (Raemi) Date: Wed, 21 Jan 2015 17:27:54 +0100 (CET) Subject: [pypy-commit] stmgc default: free the commit log as part of major gcs Message-ID: <20150121162754.6CE311C00B5@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: Changeset: r1568:d24560f5557a Date: 2015-01-21 17:26 +0100 http://bitbucket.org/pypy/stmgc/changeset/d24560f5557a/ Log: free the commit log as part of major gcs diff --git a/c8/stm/core.h b/c8/stm/core.h --- a/c8/stm/core.h +++ b/c8/stm/core.h @@ -154,7 +154,7 @@ size_t written_count; struct stm_undo_s written[]; }; -static struct stm_commit_log_entry_s commit_log_root = {NULL, -1, 0, 0}; +static struct stm_commit_log_entry_s commit_log_root; #ifndef STM_TESTS diff --git a/c8/stm/gcpage.c b/c8/stm/gcpage.c --- a/c8/stm/gcpage.c +++ b/c8/stm/gcpage.c @@ -531,6 +531,52 @@ _stm_smallmalloc_sweep(); } +static void clean_up_commit_log_entries() +{ + struct stm_commit_log_entry_s *cl, *next; + +#ifndef NDEBUG + /* check that all segments are at the same revision: */ + cl = get_priv_segment(0)->last_commit_log_entry; + for (long i = 1; i < NB_SEGMENTS; i++) { + assert(get_priv_segment(i)->last_commit_log_entry == cl); + } +#endif + + /* if there is only one element, we don't have to do anything: */ + cl = &commit_log_root; + + if (cl->next == NULL || cl->next == INEV_RUNNING) + return; + + bool was_inev = false; + uint64_t rev_num = -1; + + next = cl->next; /* guaranteed to exist */ + do { + cl = next; + rev_num = cl->rev_num; + + next = cl->next; + free(cl); + if (next == INEV_RUNNING) { + was_inev = true; + break; + } + } while (next != NULL); + + /* set the commit_log_root to the last, common cl entry: */ + commit_log_root.next = was_inev ? INEV_RUNNING : NULL; + commit_log_root.rev_num = rev_num; + + /* update in all segments: */ + for (long i = 0; i < NB_SEGMENTS; i++) { + get_priv_segment(i)->last_commit_log_entry = &commit_log_root; + } + + assert(_stm_count_cl_entries() == 0); +} + static void major_collection_now_at_safe_point(void) @@ -544,6 +590,12 @@ dprintf((" | used before collection: %ld\n", (long)pages_ctl.total_allocated)); + dprintf((" | commit log entries before: %ld\n", + _stm_count_cl_entries())); + + /* free all commit log entries. all segments are on the most recent + revision now. */ + clean_up_commit_log_entries(); /* only necessary because of assert that fails otherwise (XXX) */ acquire_all_privatization_locks(); @@ -558,10 +610,10 @@ mark_visit_from_roots(); LIST_FREE(marked_objects_to_trace); - /* /\* cleanup *\/ */ + /* cleanup */ clean_up_segment_lists(); - /* /\* sweeping *\/ */ + /* sweeping */ sweep_large_objects(); sweep_small_objects(); diff --git a/c8/stm/misc.c b/c8/stm/misc.c --- a/c8/stm/misc.c +++ b/c8/stm/misc.c @@ -41,6 +41,19 @@ return (obj->stm_flags & _STM_GCFLAG_WRITE_BARRIER) == 0; } +long _stm_count_cl_entries() +{ + struct stm_commit_log_entry_s *cl = &commit_log_root; + + long count = 0; + while ((cl = cl->next)) { + if (cl == INEV_RUNNING) + break; + count++; + } + return count; +} + #ifdef STM_TESTS bool _stm_is_accessible_page(uintptr_t pagenum) diff --git a/c8/stm/setup.c b/c8/stm/setup.c --- a/c8/stm/setup.c +++ b/c8/stm/setup.c @@ -78,6 +78,11 @@ setup_protection_settings(); setup_signal_handler(); + commit_log_root.next = NULL; + commit_log_root.segment_num = -1; + commit_log_root.rev_num = 0; + commit_log_root.written_count = 0; + long i; /* including seg0 */ for (i = 0; i < NB_SEGMENTS; i++) { @@ -129,6 +134,8 @@ need to call it. */ assert(!_has_mutex()); + assert(commit_log_root.segment_num == -1); + long i; for (i = 0; i < NB_SEGMENTS; i++) { struct stm_priv_segment_info_s *pr = get_priv_segment(i); diff --git a/c8/stmgc.h b/c8/stmgc.h --- a/c8/stmgc.h +++ b/c8/stmgc.h @@ -113,6 +113,7 @@ object_t *_stm_enum_objects_pointing_to_nursery(long index); object_t *_stm_next_last_cl_entry(); void _stm_start_enum_last_cl_entry(); +long _stm_count_cl_entries(); uint64_t _stm_total_allocated(void); #endif diff --git a/c8/test/support.py b/c8/test/support.py --- a/c8/test/support.py +++ b/c8/test/support.py @@ -110,6 +110,7 @@ object_t *_stm_enum_objects_pointing_to_nursery(long index); object_t *_stm_next_last_cl_entry(); void _stm_start_enum_last_cl_entry(); +long _stm_count_cl_entries(); void *memset(void *s, int c, size_t n); @@ -435,6 +436,7 @@ def stm_major_collect(): res = lib._check_stm_collect(1) + assert count_commit_log_entries() == 0 if res == 1: raise Conflict() return res @@ -474,7 +476,7 @@ return None return map(lib._stm_enum_old_objects_with_cards, range(count)) -def last_commit_log_entries(): +def last_commit_log_entry_objs(): lib._stm_start_enum_last_cl_entry() res = [] obj = lib._stm_next_last_cl_entry() @@ -483,6 +485,9 @@ obj = lib._stm_next_last_cl_entry() return res +def count_commit_log_entries(): + return lib._stm_count_cl_entries() + SHADOWSTACK_LENGTH = 1000 diff --git a/c8/test/test_basic.py b/c8/test/test_basic.py --- a/c8/test/test_basic.py +++ b/c8/test/test_basic.py @@ -33,12 +33,14 @@ self.switch(1) self.start_transaction() self.commit_transaction() - assert last_commit_log_entries() == [] + assert last_commit_log_entry_objs() == [] + assert count_commit_log_entries() == 1 self.switch(0) self.commit_transaction() - assert last_commit_log_entries() == [] + assert last_commit_log_entry_objs() == [] + assert count_commit_log_entries() == 2 def test_simple_read(self): self.start_transaction() @@ -46,7 +48,7 @@ stm_read(lp1) assert stm_was_read(lp1) self.commit_transaction() - assert last_commit_log_entries() == [] + assert last_commit_log_entry_objs() == [] def test_simple_write(self): self.start_transaction() @@ -57,7 +59,7 @@ assert modified_old_objects() == [] # object not old assert objects_pointing_to_nursery() == [] # short transaction self.commit_transaction() - assert last_commit_log_entries() == [] + assert last_commit_log_entry_objs() == [] def test_allocate_old(self): lp1 = stm_allocate_old(16) @@ -106,7 +108,7 @@ # self.switch(1) self.commit_transaction() - assert last_commit_log_entries() == [lp1] + assert last_commit_log_entry_objs() == [lp1] # py.test.raises(Conflict, self.switch, 0) # detects rw conflict @@ -128,13 +130,13 @@ # self.switch(0) self.commit_transaction() - assert last_commit_log_entries() == [lp1] # commit '0' + assert last_commit_log_entry_objs() == [lp1] # commit '0' # py.test.raises(Conflict, self.switch, 1) self.start_transaction() # updates to '0' stm_set_char(lp1, 'x') self.commit_transaction() - assert last_commit_log_entries() == [lp1] # commit 'x' + assert last_commit_log_entry_objs() == [lp1] # commit 'x' # if only_bk: self.start_transaction() @@ -169,7 +171,7 @@ # self.switch(1) self.commit_transaction() - assert last_commit_log_entries() == [lp1] + assert last_commit_log_entry_objs() == [lp1] # '1' is committed # if only_bk: @@ -206,13 +208,13 @@ # self.switch(0) self.commit_transaction() - assert last_commit_log_entries() == [lp1] # commit '0' + assert last_commit_log_entry_objs() == [lp1] # commit '0' # py.test.raises(Conflict, self.switch, 1) self.start_transaction() # updates to '0' stm_set_char(lp1, 'x') self.commit_transaction() - assert last_commit_log_entries() == [lp1] # commit 'x' + assert last_commit_log_entry_objs() == [lp1] # commit 'x' # if only_bk: self.start_transaction() @@ -240,7 +242,7 @@ self.start_transaction() stm_set_char(lp1, 'C') self.commit_transaction() - assert last_commit_log_entries() == [lp1] # commit 'C' + assert last_commit_log_entry_objs() == [lp1] # commit 'C' # if only_bk: self.start_transaction() @@ -270,7 +272,7 @@ self.start_transaction() stm_set_char(lp2, 'C') self.commit_transaction() # R1 - assert last_commit_log_entries() == [lp2] # commit 'C' + assert last_commit_log_entry_objs() == [lp2] # commit 'C' if only_bk: self.start_transaction() stm_set_char(lp2, 'c') # R1.1 @@ -279,7 +281,7 @@ self.start_transaction() stm_set_char(lp1, 'D') self.commit_transaction() # R2 - assert last_commit_log_entries() == [lp1] # commit 'D' + assert last_commit_log_entry_objs() == [lp1] # commit 'D' if only_bk: self.start_transaction() stm_set_char(lp1, 'd') # R2.1 @@ -309,13 +311,13 @@ self.start_transaction() stm_set_char(lp2, 'C') self.commit_transaction() # R1 - assert last_commit_log_entries() == [lp2] # commit 'C' + assert last_commit_log_entry_objs() == [lp2] # commit 'C' # self.switch(2) self.start_transaction() stm_set_char(lp1, 'D') self.commit_transaction() # R2 - assert last_commit_log_entries() == [lp1] # commit 'D' + assert last_commit_log_entry_objs() == [lp1] # commit 'D' # self.switch(3) self.start_transaction() # stm_validate() -> R2 @@ -325,13 +327,13 @@ self.start_transaction() stm_set_char(lp1, 'I') self.commit_transaction() # R2 - assert last_commit_log_entries() == [lp1] # commit 'I' + assert last_commit_log_entry_objs() == [lp1] # commit 'I' # self.switch(1) self.start_transaction() stm_set_char(lp2, 'H') self.commit_transaction() # R3 - assert last_commit_log_entries() == [lp2] # commit 'H' + assert last_commit_log_entry_objs() == [lp2] # commit 'H' # self.switch(3, validate=False) # R2 again assert stm_get_char(lp1) == 'D' # R2 @@ -353,7 +355,7 @@ self.start_transaction() stm_set_char(lp1, 'C') self.commit_transaction() # R1 - assert last_commit_log_entries() == [lp1] # commit 'C' + assert last_commit_log_entry_objs() == [lp1] # commit 'C' self.start_transaction() stm_set_char(lp1, 'c') # bk_copy # @@ -384,7 +386,7 @@ assert (p2 - p1) % 4096 == 0 assert stm_get_char(lp) == 'u' self.commit_transaction() - assert last_commit_log_entries() == [lp] + assert last_commit_log_entry_objs() == [lp] def test_commit_fresh_objects2(self): self.switch(1) diff --git a/c8/test/test_gcpage.py b/c8/test/test_gcpage.py --- a/c8/test/test_gcpage.py +++ b/c8/test/test_gcpage.py @@ -409,3 +409,26 @@ self.switch(1, False) py.test.raises(Conflict, stm_major_collect) + + + def test_cleaning_of_cl(self): + self.start_transaction() + stm_major_collect() + self.commit_transaction() + + self.switch(1) + self.start_transaction() + self.commit_transaction() + + self.switch(0) + self.start_transaction() + assert count_commit_log_entries() == 2 + stm_major_collect() + assert count_commit_log_entries() == 0 + + stm_major_collect() + + self.become_inevitable() + stm_major_collect() + stm_major_collect() + self.commit_transaction() From noreply at buildbot.pypy.org Wed Jan 21 17:31:56 2015 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 21 Jan 2015 17:31:56 +0100 (CET) Subject: [pypy-commit] pypy default: Write a "Using Mercurial" section at the start of Message-ID: <20150121163156.357761C00B5@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r75461:e4d3cf3e0470 Date: 2015-01-21 17:31 +0100 http://bitbucket.org/pypy/pypy/changeset/e4d3cf3e0470/ Log: Write a "Using Mercurial" section at the start of getting-started- dev.rst. diff --git a/pypy/doc/getting-started-dev.rst b/pypy/doc/getting-started-dev.rst --- a/pypy/doc/getting-started-dev.rst +++ b/pypy/doc/getting-started-dev.rst @@ -4,6 +4,93 @@ .. contents:: +Using Mercurial +--------------- + +PyPy development is based on Mercurial (hg). If you are not used to +version control, the cycle for a new PyPy contributor goes typically +like this: + +* Make an account on bitbucket_. + +* Go to https://bitbucket.org/pypy/pypy/ and click "fork" (left + icons). You get a fork of the repository, e.g. in + https://bitbucket.org/yourname/pypy/. + +* Clone this new repo (i.e. the fork) to your local machine with the command + ``hg clone ssh://hg at bitbucket.org/yourname/pypy``. It is a very slow + operation but only ever needs to be done once. If you already cloned + ``https://bitbucket.org/pypy/pypy`` before, even if some time ago, + then you can reuse the same clone by editing the file ``.hg/hgrc`` in + your clone to contain the line ``default = + ssh://hg at bitbucket.org/yourname/pypy``, and then do ``hg pull && hg + up``. If you already have such a clone but don't want to change it, + you can clone that copy with ``hg clone /path/to/other/copy``, and + then edit ``.hg/hgrc`` as above and do ``hg pull && hg up``. + +* Now you have a complete copy of the PyPy repo. Make a branch + with a command like ``hg branch name_of_your_branch``. + +* Edit things. Use ``hg diff`` to see what you changed. Use ``hg add`` + to make Mercurial aware of new files you added, e.g. new test files. + Use ``hg status`` to see if there are such files. Run tests! (See + the rest of this page.) + +* Commit regularly with ``hg commit``. A one-line commit message is + fine. We love to have tons of commits; make one as soon as you have + some progress, even if it is only some new test that doesn't pass yet, + or fixing things even if not all tests pass. Step by step, you are + building the history of your changes, which is the point of a version + control system. (There are commands like ``hg log`` and ``hg up`` + that you should read about later, to learn how to navigate this + history.) + +* The commits stay on your machine until you do ``hg push`` to "push" + them back to the repo named in the file ``.hg/hgrc``. Repos are + basically just collections of commits (a commit is also called a + changeset): there is one repo per url, plus one for each local copy on + each local machine. The commands ``hg push`` and ``hg pull`` copy + commits around, with the goal that all repos in question end up with + the exact same set of commits. By opposition, ``hg up`` only updates + the "working copy" by reading the local repository, i.e. it makes the + files that you see correspond to the latest (or any other) commit + locally present. + +* You should push often; there is no real reason not to. Remember that + even if they are pushed, with the setup above, the commits are (1) + only in ``bitbucket.org/yourname/pypy``, and (2) in the branch you + named. Yes, they are publicly visible, but don't worry about someone + walking around the thousands of repos on bitbucket saying "hah, look + at the bad coding style of that guy". Try to get into the mindset + that your work is not secret and it's fine that way. We might not + accept it as is for PyPy, asking you instead to improve some things, + but we are not going to judge you. + +* The final step is to open a pull request, so that we know that you'd + like to merge that branch back to the original ``pypy/pypy`` repo. + This can also be done several times if you have interesting + intermediate states, but if you get there, then we're likely to + proceed to the next stage, which is... + +* Get a regular account for pushing directly to + ``bitbucket.org/pypy/pypy`` (just ask and you'll get it, basically). + Once you have it you can rewrite your file ``.hg/hgrc`` to contain + ``default = ssh://hg at bitbucket.org/pypy/pypy``. Your changes will + then be pushed directly to the official repo, but (if you follow these + rules) they are still on a branch, and we can still review the + branches you want to merge. + +* If you get closer to the regular day-to-day development, you'll notice + that we generally push small changes as one or a few commits directly + to the branch ``default``. Also, we often collaborate even if we are + on other branches, which do not really "belong" to anyone. At this + point you'll need ``hg merge`` and learn how to resolve conflicts that + sometimes occur when two people try to push different commits in + parallel on the same branch. But it is likely an issue for later ``:-)`` + +.. _bitbucket: https://bitbucket.org/ + + Running PyPy's unit tests ------------------------- From noreply at buildbot.pypy.org Wed Jan 21 18:23:09 2015 From: noreply at buildbot.pypy.org (cfbolz) Date: Wed, 21 Jan 2015 18:23:09 +0100 (CET) Subject: [pypy-commit] pypy default: remove unneeded funcarg Message-ID: <20150121172309.DB4C61C0305@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: Changeset: r75462:56822c21b3fb Date: 2015-01-21 16:35 +0100 http://bitbucket.org/pypy/pypy/changeset/56822c21b3fb/ Log: remove unneeded funcarg diff --git a/pypy/objspace/std/test/test_mapdict.py b/pypy/objspace/std/test/test_mapdict.py --- a/pypy/objspace/std/test/test_mapdict.py +++ b/pypy/objspace/std/test/test_mapdict.py @@ -144,7 +144,7 @@ assert obj2.map.back.ever_mutated == True assert obj2.map is obj.map -def test_attr_immutability_delete(monkeypatch): +def test_attr_immutability_delete(): cls = Class() obj = cls.instantiate() obj.setdictvalue(space, "a", 10) From noreply at buildbot.pypy.org Wed Jan 21 18:23:22 2015 From: noreply at buildbot.pypy.org (cfbolz) Date: Wed, 21 Jan 2015 18:23:22 +0100 (CET) Subject: [pypy-commit] pypy default: merge default Message-ID: <20150121172322.0914B1C0305@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: Changeset: r75463:eef53f795276 Date: 2015-01-21 18:22 +0100 http://bitbucket.org/pypy/pypy/changeset/eef53f795276/ Log: merge default diff too long, truncating to 2000 out of 23420 lines diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -28,7 +28,7 @@ DEALINGS IN THE SOFTWARE. -PyPy Copyright holders 2003-2014 +PyPy Copyright holders 2003-2015 ----------------------------------- Except when otherwise stated (look for LICENSE files or information at diff --git a/lib-python/2.7/collections.py b/lib-python/2.7/collections.py --- a/lib-python/2.7/collections.py +++ b/lib-python/2.7/collections.py @@ -17,6 +17,10 @@ except ImportError: assert '__pypy__' not in _sys.builtin_module_names newdict = lambda _ : {} +try: + from __pypy__ import reversed_dict +except ImportError: + reversed_dict = lambda d: reversed(d.keys()) try: from thread import get_ident as _get_ident @@ -29,142 +33,35 @@ ################################################################################ class OrderedDict(dict): - 'Dictionary that remembers insertion order' - # An inherited dict maps keys to values. - # The inherited dict provides __getitem__, __len__, __contains__, and get. - # The remaining methods are order-aware. - # Big-O running times for all methods are the same as regular dictionaries. + '''Dictionary that remembers insertion order. - # The internal self.__map dict maps keys to links in a doubly linked list. - # The circular doubly linked list starts and ends with a sentinel element. - # The sentinel element never gets deleted (this simplifies the algorithm). - # Each link is stored as a list of length three: [PREV, NEXT, KEY]. + In PyPy all dicts are ordered anyway. This is mostly useful as a + placeholder to mean "this dict must be ordered even on CPython". - def __init__(self, *args, **kwds): - '''Initialize an ordered dictionary. The signature is the same as - regular dictionaries, but keyword arguments are not recommended because - their insertion order is arbitrary. - - ''' - if len(args) > 1: - raise TypeError('expected at most 1 arguments, got %d' % len(args)) - try: - self.__root - except AttributeError: - self.__root = root = [] # sentinel node - root[:] = [root, root, None] - self.__map = {} - self.__update(*args, **kwds) - - def __setitem__(self, key, value, dict_setitem=dict.__setitem__): - 'od.__setitem__(i, y) <==> od[i]=y' - # Setting a new item creates a new link at the end of the linked list, - # and the inherited dictionary is updated with the new key/value pair. - if key not in self: - root = self.__root - last = root[0] - last[1] = root[0] = self.__map[key] = [last, root, key] - return dict_setitem(self, key, value) - - def __delitem__(self, key, dict_delitem=dict.__delitem__): - 'od.__delitem__(y) <==> del od[y]' - # Deleting an existing item uses self.__map to find the link which gets - # removed by updating the links in the predecessor and successor nodes. - dict_delitem(self, key) - link_prev, link_next, _ = self.__map.pop(key) - link_prev[1] = link_next # update link_prev[NEXT] - link_next[0] = link_prev # update link_next[PREV] - - def __iter__(self): - 'od.__iter__() <==> iter(od)' - # Traverse the linked list in order. - root = self.__root - curr = root[1] # start at the first node - while curr is not root: - yield curr[2] # yield the curr[KEY] - curr = curr[1] # move to next node + Known difference: iterating over an OrderedDict which is being + concurrently modified raises RuntimeError in PyPy. In CPython + instead we get some behavior that appears reasonable in some + cases but is nonsensical in other cases. This is officially + forbidden by the CPython docs, so we forbid it explicitly for now. + ''' def __reversed__(self): - 'od.__reversed__() <==> reversed(od)' - # Traverse the linked list in reverse order. - root = self.__root - curr = root[0] # start at the last node - while curr is not root: - yield curr[2] # yield the curr[KEY] - curr = curr[0] # move to previous node - - def clear(self): - 'od.clear() -> None. Remove all items from od.' - root = self.__root - root[:] = [root, root, None] - self.__map.clear() - dict.clear(self) - - # -- the following methods do not depend on the internal structure -- - - def keys(self): - 'od.keys() -> list of keys in od' - return list(self) - - def values(self): - 'od.values() -> list of values in od' - return [self[key] for key in self] - - def items(self): - 'od.items() -> list of (key, value) pairs in od' - return [(key, self[key]) for key in self] - - def iterkeys(self): - 'od.iterkeys() -> an iterator over the keys in od' - return iter(self) - - def itervalues(self): - 'od.itervalues -> an iterator over the values in od' - for k in self: - yield self[k] - - def iteritems(self): - 'od.iteritems -> an iterator over the (key, value) pairs in od' - for k in self: - yield (k, self[k]) - - update = MutableMapping.update - - __update = update # let subclasses override update without breaking __init__ - - __marker = object() - - def pop(self, key, default=__marker): - '''od.pop(k[,d]) -> v, remove specified key and return the corresponding - value. If key is not found, d is returned if given, otherwise KeyError - is raised. - - ''' - if key in self: - result = self[key] - del self[key] - return result - if default is self.__marker: - raise KeyError(key) - return default - - def setdefault(self, key, default=None): - 'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od' - if key in self: - return self[key] - self[key] = default - return default + return reversed_dict(self) def popitem(self, last=True): '''od.popitem() -> (k, v), return and remove a (key, value) pair. Pairs are returned in LIFO order if last is true or FIFO order if false. ''' - if not self: - raise KeyError('dictionary is empty') - key = next(reversed(self) if last else iter(self)) - value = self.pop(key) - return key, value + if last: + return dict.popitem(self) + else: + it = dict.__iter__(self) + try: + k = it.next() + except StopIteration: + raise KeyError('dictionary is empty') + return (k, self.pop(k)) def __repr__(self, _repr_running={}): 'od.__repr__() <==> repr(od)' @@ -183,8 +80,6 @@ 'Return state information for pickling' items = [[k, self[k]] for k in self] inst_dict = vars(self).copy() - for k in vars(OrderedDict()): - inst_dict.pop(k, None) if inst_dict: return (self.__class__, (items,), inst_dict) return self.__class__, (items,) @@ -193,17 +88,6 @@ 'od.copy() -> a shallow copy of od' return self.__class__(self) - @classmethod - def fromkeys(cls, iterable, value=None): - '''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S. - If not specified, the value defaults to None. - - ''' - self = cls() - for key in iterable: - self[key] = value - return self - def __eq__(self, other): '''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive while comparison to a regular mapping is order-insensitive. diff --git a/lib-python/2.7/distutils/unixccompiler.py b/lib-python/2.7/distutils/unixccompiler.py --- a/lib-python/2.7/distutils/unixccompiler.py +++ b/lib-python/2.7/distutils/unixccompiler.py @@ -58,7 +58,7 @@ executables = {'preprocessor' : None, 'compiler' : ["cc"], 'compiler_so' : ["cc"], - 'compiler_cxx' : ["cc"], + 'compiler_cxx' : ["c++"], # pypy: changed, 'cc' is bogus 'linker_so' : ["cc", "-shared"], 'linker_exe' : ["cc"], 'archiver' : ["ar", "-cr"], diff --git a/lib-python/2.7/sqlite3/test/dbapi.py b/lib-python/2.7/sqlite3/test/dbapi.py --- a/lib-python/2.7/sqlite3/test/dbapi.py +++ b/lib-python/2.7/sqlite3/test/dbapi.py @@ -478,6 +478,29 @@ except TypeError: pass + def CheckCurDescription(self): + self.cu.execute("select * from test") + + actual = self.cu.description + expected = [ + ('id', None, None, None, None, None, None), + ('name', None, None, None, None, None, None), + ('income', None, None, None, None, None, None), + ] + self.assertEqual(expected, actual) + + def CheckCurDescriptionVoidStatement(self): + self.cu.execute("insert into test(name) values (?)", ("foo",)) + self.assertIsNone(self.cu.description) + + def CheckCurDescriptionWithoutStatement(self): + cu = self.cx.cursor() + try: + self.assertIsNone(cu.description) + finally: + cu.close() + + @unittest.skipUnless(threading, 'This test requires threading.') class ThreadTests(unittest.TestCase): def setUp(self): diff --git a/lib-python/2.7/subprocess.py b/lib-python/2.7/subprocess.py --- a/lib-python/2.7/subprocess.py +++ b/lib-python/2.7/subprocess.py @@ -1589,7 +1589,7 @@ 'copyfile' in caller.f_globals): dest_dir = sys.pypy_resolvedirof(target_executable) src_dir = sys.pypy_resolvedirof(sys.executable) - for libname in ['libpypy-c.so']: + for libname in ['libpypy-c.so', 'libpypy-c.dylib']: dest_library = os.path.join(dest_dir, libname) src_library = os.path.join(src_dir, libname) if os.path.exists(src_library): diff --git a/lib-python/2.7/test/test_collections.py b/lib-python/2.7/test/test_collections.py --- a/lib-python/2.7/test/test_collections.py +++ b/lib-python/2.7/test/test_collections.py @@ -578,7 +578,12 @@ def __repr__(self): return "MySet(%s)" % repr(list(self)) s = MySet([5,43,2,1]) - self.assertEqual(s.pop(), 1) + # changed from CPython 2.7: it was "s.pop() == 1" but I see + # nothing that guarantees a particular order here. In the + # 'all_ordered_dicts' branch of PyPy (or with OrderedDict + # instead of sets), it consistently returns 5, but this test + # should not rely on this or any other order. + self.assert_(s.pop() in [5,43,2,1]) def test_issue8750(self): empty = WithSet() @@ -1010,8 +1015,9 @@ c=3, e=5).items()), pairs) # mixed input # make sure no positional args conflict with possible kwdargs - self.assertEqual(inspect.getargspec(OrderedDict.__dict__['__init__']).args, - ['self']) + if '__init__' in OrderedDict.__dict__: # absent in PyPy + self.assertEqual(inspect.getargspec(OrderedDict.__dict__['__init__']).args, + ['self']) # Make sure that direct calls to __init__ do not clear previous contents d = OrderedDict([('a', 1), ('b', 2), ('c', 3), ('d', 44), ('e', 55)]) @@ -1108,6 +1114,16 @@ od.popitem() self.assertEqual(len(od), 0) + def test_popitem_first(self): + pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)] + shuffle(pairs) + od = OrderedDict(pairs) + while pairs: + self.assertEqual(od.popitem(last=False), pairs.pop(0)) + with self.assertRaises(KeyError): + od.popitem(last=False) + self.assertEqual(len(od), 0) + def test_pop(self): pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)] shuffle(pairs) @@ -1179,7 +1195,11 @@ od = OrderedDict(pairs) # yaml.dump(od) --> # '!!python/object/apply:__main__.OrderedDict\n- - [a, 1]\n - [b, 2]\n' - self.assertTrue(all(type(pair)==list for pair in od.__reduce__()[1])) + + # PyPy bug fix: added [0] at the end of this line, because the + # test is really about the 2-tuples that need to be 2-lists + # inside the list of 6 of them + self.assertTrue(all(type(pair)==list for pair in od.__reduce__()[1][0])) def test_reduce_not_too_fat(self): # do not save instance dictionary if not needed @@ -1189,6 +1209,16 @@ od.x = 10 self.assertEqual(len(od.__reduce__()), 3) + def test_reduce_exact_output(self): + # PyPy: test that __reduce__() produces the exact same answer as + # CPython does, even though in the 'all_ordered_dicts' branch we + # have to emulate it. + pairs = [['c', 1], ['b', 2], ['d', 4]] + od = OrderedDict(pairs) + self.assertEqual(od.__reduce__(), (OrderedDict, (pairs,))) + od.x = 10 + self.assertEqual(od.__reduce__(), (OrderedDict, (pairs,), {'x': 10})) + def test_repr(self): od = OrderedDict([('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]) self.assertEqual(repr(od), diff --git a/lib-python/2.7/test/test_xml_etree.py b/lib-python/2.7/test/test_xml_etree.py --- a/lib-python/2.7/test/test_xml_etree.py +++ b/lib-python/2.7/test/test_xml_etree.py @@ -225,9 +225,9 @@ >>> element.remove(subelement) >>> serialize(element) # 5 '' - >>> element.remove(subelement) + >>> element.remove(subelement) # doctest: +ELLIPSIS Traceback (most recent call last): - ValueError: list.remove(x): x not in list + ValueError: list.remove(... >>> serialize(element) # 6 '' >>> element[0:0] = [subelement, subelement, subelement] diff --git a/lib-python/stdlib-upgrade.txt b/lib-python/stdlib-upgrade.txt --- a/lib-python/stdlib-upgrade.txt +++ b/lib-python/stdlib-upgrade.txt @@ -7,7 +7,7 @@ 1. check out the branch vendor/stdlib 2. upgrade the files there -3. update stdlib-versions.txt with the output of hg -id from the cpython repo +3. update stdlib-version.txt with the output of hg -id from the cpython repo 4. commit 5. update to default/py3k 6. create a integration branch for the new stdlib diff --git a/lib_pypy/_functools.py b/lib_pypy/_functools.py --- a/lib_pypy/_functools.py +++ b/lib_pypy/_functools.py @@ -9,7 +9,10 @@ of the given arguments and keywords. """ - def __init__(self, func, *args, **keywords): + def __init__(self, *args, **keywords): + if not args: + raise TypeError('__init__() takes at least 2 arguments (1 given)') + func, args = args[0], args[1:] if not callable(func): raise TypeError("the first argument must be callable") self._func = func diff --git a/lib_pypy/_sqlite3.py b/lib_pypy/_sqlite3.py --- a/lib_pypy/_sqlite3.py +++ b/lib_pypy/_sqlite3.py @@ -1175,8 +1175,9 @@ try: return self.__description except AttributeError: - self.__description = self.__statement._get_description() - return self.__description + if self.__statement: + self.__description = self.__statement._get_description() + return self.__description description = property(__get_description) def __get_lastrowid(self): diff --git a/lib_pypy/cffi/__init__.py b/lib_pypy/cffi/__init__.py --- a/lib_pypy/cffi/__init__.py +++ b/lib_pypy/cffi/__init__.py @@ -6,3 +6,8 @@ __version__ = "0.8.6" __version_info__ = (0, 8, 6) + +# The verifier module file names are based on the CRC32 of a string that +# contains the following version number. It may be older than __version__ +# if nothing is clearly incompatible. +__version_verifier_modules__ = "0.8.6" diff --git a/lib_pypy/cffi/api.py b/lib_pypy/cffi/api.py --- a/lib_pypy/cffi/api.py +++ b/lib_pypy/cffi/api.py @@ -69,6 +69,7 @@ self._function_caches = [] self._libraries = [] self._cdefsources = [] + self._windows_unicode = None if hasattr(backend, 'set_ffi'): backend.set_ffi(self) for name in backend.__dict__: @@ -77,6 +78,7 @@ # with self._lock: self.BVoidP = self._get_cached_btype(model.voidp_type) + self.BCharA = self._get_cached_btype(model.char_array_type) if isinstance(backend, types.ModuleType): # _cffi_backend: attach these constants to the class if not hasattr(FFI, 'NULL'): @@ -189,13 +191,16 @@ cdecl = self._typeof(cdecl) return self._backend.alignof(cdecl) - def offsetof(self, cdecl, fieldname): + def offsetof(self, cdecl, *fields_or_indexes): """Return the offset of the named field inside the given - structure, which must be given as a C type name. + structure or array, which must be given as a C type name. + You can give several field names in case of nested structures. + You can also give numeric values which correspond to array + items, in case of an array type. """ if isinstance(cdecl, basestring): cdecl = self._typeof(cdecl) - return self._backend.typeoffsetof(cdecl, fieldname)[1] + return self._typeoffsetof(cdecl, *fields_or_indexes)[1] def new(self, cdecl, init=None): """Allocate an instance according to the specified C type and @@ -264,6 +269,16 @@ """ return self._backend.buffer(cdata, size) + def from_buffer(self, python_buffer): + """Return a that points to the data of the + given Python object, which must support the buffer interface. + Note that this is not meant to be used on the built-in types str, + unicode, or bytearray (you can build 'char[]' arrays explicitly) + but only on objects containing large quantities of raw data + in some other format, like 'array.array' or numpy arrays. + """ + return self._backend.from_buffer(self.BCharA, python_buffer) + def callback(self, cdecl, python_callable=None, error=None): """Return a callback object or a decorator making such a callback object. 'cdecl' must name a C function pointer type. @@ -335,9 +350,23 @@ which requires binary compatibility in the signatures. """ from .verifier import Verifier, _caller_dir_pycache + # + # If set_unicode(True) was called, insert the UNICODE and + # _UNICODE macro declarations + if self._windows_unicode: + self._apply_windows_unicode(kwargs) + # + # Set the tmpdir here, and not in Verifier.__init__: it picks + # up the caller's directory, which we want to be the caller of + # ffi.verify(), as opposed to the caller of Veritier(). tmpdir = tmpdir or _caller_dir_pycache() + # + # Make a Verifier() and use it to load the library. self.verifier = Verifier(self, source, tmpdir, **kwargs) lib = self.verifier.load_library() + # + # Save the loaded library for keep-alive purposes, even + # if the caller doesn't keep it alive itself (it should). self._libraries.append(lib) return lib @@ -356,15 +385,29 @@ with self._lock: return model.pointer_cache(self, ctype) - def addressof(self, cdata, field=None): + def addressof(self, cdata, *fields_or_indexes): """Return the address of a . - If 'field' is specified, return the address of this field. + If 'fields_or_indexes' are given, returns the address of that + field or array item in the structure or array, recursively in + case of nested structures. """ ctype = self._backend.typeof(cdata) - ctype, offset = self._backend.typeoffsetof(ctype, field) + if fields_or_indexes: + ctype, offset = self._typeoffsetof(ctype, *fields_or_indexes) + else: + if ctype.kind == "pointer": + raise TypeError("addressof(pointer)") + offset = 0 ctypeptr = self._pointer_to(ctype) return self._backend.rawaddressof(ctypeptr, cdata, offset) + def _typeoffsetof(self, ctype, field_or_index, *fields_or_indexes): + ctype, offset = self._backend.typeoffsetof(ctype, field_or_index) + for field1 in fields_or_indexes: + ctype, offset1 = self._backend.typeoffsetof(ctype, field1, 1) + offset += offset1 + return ctype, offset + def include(self, ffi_to_include): """Includes the typedefs, structs, unions and enums defined in another FFI instance. Usage is similar to a #include in C, @@ -387,6 +430,44 @@ def from_handle(self, x): return self._backend.from_handle(x) + def set_unicode(self, enabled_flag): + """Windows: if 'enabled_flag' is True, enable the UNICODE and + _UNICODE defines in C, and declare the types like TCHAR and LPTCSTR + to be (pointers to) wchar_t. If 'enabled_flag' is False, + declare these types to be (pointers to) plain 8-bit characters. + This is mostly for backward compatibility; you usually want True. + """ + if self._windows_unicode is not None: + raise ValueError("set_unicode() can only be called once") + enabled_flag = bool(enabled_flag) + if enabled_flag: + self.cdef("typedef wchar_t TBYTE;" + "typedef wchar_t TCHAR;" + "typedef const wchar_t *LPCTSTR;" + "typedef const wchar_t *PCTSTR;" + "typedef wchar_t *LPTSTR;" + "typedef wchar_t *PTSTR;" + "typedef TBYTE *PTBYTE;" + "typedef TCHAR *PTCHAR;") + else: + self.cdef("typedef char TBYTE;" + "typedef char TCHAR;" + "typedef const char *LPCTSTR;" + "typedef const char *PCTSTR;" + "typedef char *LPTSTR;" + "typedef char *PTSTR;" + "typedef TBYTE *PTBYTE;" + "typedef TCHAR *PTCHAR;") + self._windows_unicode = enabled_flag + + def _apply_windows_unicode(self, kwds): + defmacros = kwds.get('define_macros', ()) + if not isinstance(defmacros, (list, tuple)): + raise TypeError("'define_macros' must be a list or tuple") + defmacros = list(defmacros) + [('UNICODE', '1'), + ('_UNICODE', '1')] + kwds['define_macros'] = defmacros + def _load_backend_lib(backend, name, flags): if name is None: diff --git a/lib_pypy/cffi/backend_ctypes.py b/lib_pypy/cffi/backend_ctypes.py --- a/lib_pypy/cffi/backend_ctypes.py +++ b/lib_pypy/cffi/backend_ctypes.py @@ -169,6 +169,7 @@ class CTypesGenericPtr(CTypesData): __slots__ = ['_address', '_as_ctype_ptr'] _automatic_casts = False + kind = "pointer" @classmethod def _newp(cls, init): @@ -370,10 +371,12 @@ (CTypesPrimitive, type(source).__name__)) return source # + kind1 = kind class CTypesPrimitive(CTypesGenericPrimitive): __slots__ = ['_value'] _ctype = ctype _reftypename = '%s &' % name + kind = kind1 def __init__(self, value): self._value = value @@ -703,12 +706,13 @@ class struct_or_union(base_ctypes_class): pass struct_or_union.__name__ = '%s_%s' % (kind, name) + kind1 = kind # class CTypesStructOrUnion(CTypesBaseStructOrUnion): __slots__ = ['_blob'] _ctype = struct_or_union _reftypename = '%s &' % (name,) - _kind = kind + _kind = kind = kind1 # CTypesStructOrUnion._fix_class() return CTypesStructOrUnion @@ -994,27 +998,42 @@ def getcname(self, BType, replace_with): return BType._get_c_name(replace_with) - def typeoffsetof(self, BType, fieldname): - if fieldname is not None and issubclass(BType, CTypesGenericPtr): - BType = BType._BItem - if not issubclass(BType, CTypesBaseStructOrUnion): - raise TypeError("expected a struct or union ctype") - if fieldname is None: - return (BType, 0) - else: + def typeoffsetof(self, BType, fieldname, num=0): + if isinstance(fieldname, str): + if num == 0 and issubclass(BType, CTypesGenericPtr): + BType = BType._BItem + if not issubclass(BType, CTypesBaseStructOrUnion): + raise TypeError("expected a struct or union ctype") BField = BType._bfield_types[fieldname] if BField is Ellipsis: raise TypeError("not supported for bitfields") return (BField, BType._offsetof(fieldname)) + elif isinstance(fieldname, (int, long)): + if issubclass(BType, CTypesGenericArray): + BType = BType._CTPtr + if not issubclass(BType, CTypesGenericPtr): + raise TypeError("expected an array or ptr ctype") + BItem = BType._BItem + offset = BItem._get_size() * fieldname + if offset > sys.maxsize: + raise OverflowError + return (BItem, offset) + else: + raise TypeError(type(fieldname)) - def rawaddressof(self, BTypePtr, cdata, offset): + def rawaddressof(self, BTypePtr, cdata, offset=None): if isinstance(cdata, CTypesBaseStructOrUnion): ptr = ctypes.pointer(type(cdata)._to_ctypes(cdata)) elif isinstance(cdata, CTypesGenericPtr): + if offset is None or not issubclass(type(cdata)._BItem, + CTypesBaseStructOrUnion): + raise TypeError("unexpected cdata type") + ptr = type(cdata)._to_ctypes(cdata) + elif isinstance(cdata, CTypesGenericArray): ptr = type(cdata)._to_ctypes(cdata) else: raise TypeError("expected a ") - if offset != 0: + if offset: ptr = ctypes.cast( ctypes.c_void_p( ctypes.cast(ptr, ctypes.c_void_p).value + offset), diff --git a/lib_pypy/cffi/commontypes.py b/lib_pypy/cffi/commontypes.py --- a/lib_pypy/cffi/commontypes.py +++ b/lib_pypy/cffi/commontypes.py @@ -29,6 +29,9 @@ result = model.PointerType(resolve_common_type(result[:-2])) elif result in model.PrimitiveType.ALL_PRIMITIVE_TYPES: result = model.PrimitiveType(result) + elif result == 'set-unicode-needed': + raise api.FFIError("The Windows type %r is only available after " + "you call ffi.set_unicode()" % (commontype,)) else: if commontype == result: raise api.FFIError("Unsupported type: %r. Please file a bug " @@ -86,8 +89,6 @@ "ULONGLONG": "unsigned long long", "WCHAR": "wchar_t", "SHORT": "short", - "TBYTE": "WCHAR", - "TCHAR": "WCHAR", "UCHAR": "unsigned char", "UINT": "unsigned int", "UINT8": "unsigned char", @@ -157,14 +158,12 @@ "LPCVOID": model.const_voidp_type, "LPCWSTR": "const WCHAR *", - "LPCTSTR": "LPCWSTR", "LPDWORD": "DWORD *", "LPHANDLE": "HANDLE *", "LPINT": "int *", "LPLONG": "long *", "LPSTR": "CHAR *", "LPWSTR": "WCHAR *", - "LPTSTR": "LPWSTR", "LPVOID": model.voidp_type, "LPWORD": "WORD *", "LRESULT": "LONG_PTR", @@ -173,7 +172,6 @@ "PBYTE": "BYTE *", "PCHAR": "CHAR *", "PCSTR": "const CHAR *", - "PCTSTR": "LPCWSTR", "PCWSTR": "const WCHAR *", "PDWORD": "DWORD *", "PDWORDLONG": "DWORDLONG *", @@ -200,9 +198,6 @@ "PSIZE_T": "SIZE_T *", "PSSIZE_T": "SSIZE_T *", "PSTR": "CHAR *", - "PTBYTE": "TBYTE *", - "PTCHAR": "TCHAR *", - "PTSTR": "LPWSTR", "PUCHAR": "UCHAR *", "PUHALF_PTR": "UHALF_PTR *", "PUINT": "UINT *", @@ -240,6 +235,15 @@ "USN": "LONGLONG", "VOID": model.void_type, "WPARAM": "UINT_PTR", + + "TBYTE": "set-unicode-needed", + "TCHAR": "set-unicode-needed", + "LPCTSTR": "set-unicode-needed", + "PCTSTR": "set-unicode-needed", + "LPTSTR": "set-unicode-needed", + "PTSTR": "set-unicode-needed", + "PTBYTE": "set-unicode-needed", + "PTCHAR": "set-unicode-needed", }) return result diff --git a/lib_pypy/cffi/cparser.py b/lib_pypy/cffi/cparser.py --- a/lib_pypy/cffi/cparser.py +++ b/lib_pypy/cffi/cparser.py @@ -1,4 +1,3 @@ - from . import api, model from .commontypes import COMMON_TYPES, resolve_common_type try: @@ -209,6 +208,8 @@ def _add_constants(self, key, val): if key in self._int_constants: + if self._int_constants[key] == val: + return # ignore identical double declarations raise api.FFIError( "multiple declarations of constant: %s" % (key,)) self._int_constants[key] = val @@ -228,12 +229,18 @@ pyvalue = int(int_str, 0) self._add_constants(key, pyvalue) + self._declare('macro ' + key, pyvalue) elif value == '...': self._declare('macro ' + key, value) else: - raise api.CDefError('only supports the syntax "#define ' - '%s ..." (literally) or "#define ' - '%s 0x1FF" for now' % (key, key)) + raise api.CDefError( + 'only supports one of the following syntax:\n' + ' #define %s ... (literally dot-dot-dot)\n' + ' #define %s NUMBER (with NUMBER an integer' + ' constant, decimal/hex/octal)\n' + 'got:\n' + ' #define %s %s' + % (key, key, key, value)) def _parse_decl(self, decl): node = decl.type @@ -460,6 +467,8 @@ elif kind == 'union': tp = model.UnionType(explicit_name, None, None, None) elif kind == 'enum': + if explicit_name == '__dotdotdot__': + raise CDefError("Enums cannot be declared with ...") tp = self._build_enum_type(explicit_name, type.values) else: raise AssertionError("kind = %r" % (kind,)) @@ -532,9 +541,24 @@ def _parse_constant(self, exprnode, partial_length_ok=False): # for now, limited to expressions that are an immediate number - # or negative number + # or positive/negative number if isinstance(exprnode, pycparser.c_ast.Constant): - return int(exprnode.value, 0) + s = exprnode.value + if s.startswith('0'): + if s.startswith('0x') or s.startswith('0X'): + return int(s, 16) + return int(s, 8) + elif '1' <= s[0] <= '9': + return int(s, 10) + elif s[0] == "'" and s[-1] == "'" and ( + len(s) == 3 or (len(s) == 4 and s[1] == "\\")): + return ord(s[-2]) + else: + raise api.CDefError("invalid constant %r" % (s,)) + # + if (isinstance(exprnode, pycparser.c_ast.UnaryOp) and + exprnode.op == '+'): + return self._parse_constant(exprnode.expr) # if (isinstance(exprnode, pycparser.c_ast.UnaryOp) and exprnode.op == '-'): diff --git a/lib_pypy/cffi/ffiplatform.py b/lib_pypy/cffi/ffiplatform.py --- a/lib_pypy/cffi/ffiplatform.py +++ b/lib_pypy/cffi/ffiplatform.py @@ -11,6 +11,9 @@ """ +LIST_OF_FILE_NAMES = ['sources', 'include_dirs', 'library_dirs', + 'extra_objects', 'depends'] + def get_extension(srcfilename, modname, sources=(), **kwds): from distutils.core import Extension allsources = [srcfilename] diff --git a/lib_pypy/cffi/model.py b/lib_pypy/cffi/model.py --- a/lib_pypy/cffi/model.py +++ b/lib_pypy/cffi/model.py @@ -235,6 +235,8 @@ BPtrItem = PointerType(self.item).get_cached_btype(ffi, finishlist) return global_cache(self, ffi, 'new_array_type', BPtrItem, self.length) +char_array_type = ArrayType(PrimitiveType('char'), None) + class StructOrUnionOrEnum(BaseTypeByIdentity): _attrs_ = ('name',) @@ -478,7 +480,7 @@ try: res = getattr(ffi._backend, funcname)(*args) except NotImplementedError as e: - raise NotImplementedError("%r: %s" % (srctype, e)) + raise NotImplementedError("%s: %r: %s" % (funcname, srctype, e)) # note that setdefault() on WeakValueDictionary is not atomic # and contains a rare bug (http://bugs.python.org/issue19542); # we have to use a lock and do it ourselves diff --git a/lib_pypy/cffi/vengine_cpy.py b/lib_pypy/cffi/vengine_cpy.py --- a/lib_pypy/cffi/vengine_cpy.py +++ b/lib_pypy/cffi/vengine_cpy.py @@ -65,7 +65,7 @@ # The following two 'chained_list_constants' items contains # the head of these two chained lists, as a string that gives the # call to do, if any. - self._chained_list_constants = ['0', '0'] + self._chained_list_constants = ['((void)lib,0)', '((void)lib,0)'] # prnt = self._prnt # first paste some standard set of lines that are mostly '#define' @@ -138,15 +138,22 @@ prnt() prnt('#endif') - def load_library(self): + def load_library(self, flags=None): # XXX review all usages of 'self' here! # import it as a new extension module + if hasattr(sys, "getdlopenflags"): + previous_flags = sys.getdlopenflags() try: + if hasattr(sys, "setdlopenflags") and flags is not None: + sys.setdlopenflags(flags) module = imp.load_dynamic(self.verifier.get_module_name(), self.verifier.modulefilename) except ImportError as e: error = "importing %r: %s" % (self.verifier.modulefilename, e) raise ffiplatform.VerificationError(error) + finally: + if hasattr(sys, "setdlopenflags"): + sys.setdlopenflags(previous_flags) # # call loading_cpy_struct() to get the struct layout inferred by # the C compiler @@ -228,7 +235,8 @@ converter = '_cffi_to_c_int' extraarg = ', %s' % tp.name else: - converter = '_cffi_to_c_%s' % (tp.name.replace(' ', '_'),) + converter = '(%s)_cffi_to_c_%s' % (tp.get_c_name(''), + tp.name.replace(' ', '_')) errvalue = '-1' # elif isinstance(tp, model.PointerType): @@ -267,8 +275,8 @@ self._prnt(' if (datasize != 0) {') self._prnt(' if (datasize < 0)') self._prnt(' %s;' % errcode) - self._prnt(' %s = alloca(datasize);' % (tovar,)) - self._prnt(' memset((void *)%s, 0, datasize);' % (tovar,)) + self._prnt(' %s = alloca((size_t)datasize);' % (tovar,)) + self._prnt(' memset((void *)%s, 0, (size_t)datasize);' % (tovar,)) self._prnt(' if (_cffi_convert_array_from_object(' '(char *)%s, _cffi_type(%d), %s) < 0)' % ( tovar, self._gettypenum(tp), fromvar)) @@ -336,7 +344,7 @@ prnt = self._prnt numargs = len(tp.args) if numargs == 0: - argname = 'no_arg' + argname = 'noarg' elif numargs == 1: argname = 'arg0' else: @@ -386,6 +394,9 @@ prnt(' Py_END_ALLOW_THREADS') prnt() # + prnt(' (void)self; /* unused */') + if numargs == 0: + prnt(' (void)noarg; /* unused */') if result_code: prnt(' return %s;' % self._convert_expr_from_c(tp.result, 'result', 'result type')) @@ -452,6 +463,7 @@ prnt('static void %s(%s *p)' % (checkfuncname, cname)) prnt('{') prnt(' /* only to generate compile-time warnings or errors */') + prnt(' (void)p;') for fname, ftype, fbitsize in tp.enumfields(): if (isinstance(ftype, model.PrimitiveType) and ftype.is_integer_type()) or fbitsize >= 0: @@ -482,6 +494,8 @@ prnt(' sizeof(((%s *)0)->%s),' % (cname, fname)) prnt(' -1') prnt(' };') + prnt(' (void)self; /* unused */') + prnt(' (void)noarg; /* unused */') prnt(' return _cffi_get_struct_layout(nums);') prnt(' /* the next line is not executed, but compiled */') prnt(' %s(0);' % (checkfuncname,)) @@ -578,7 +592,8 @@ # constants, likely declared with '#define' def _generate_cpy_const(self, is_int, name, tp=None, category='const', - vartp=None, delayed=True, size_too=False): + vartp=None, delayed=True, size_too=False, + check_value=None): prnt = self._prnt funcname = '_cffi_%s_%s' % (category, name) prnt('static int %s(PyObject *lib)' % funcname) @@ -590,6 +605,9 @@ else: assert category == 'const' # + if check_value is not None: + self._check_int_constant_value(name, check_value) + # if not is_int: if category == 'var': realexpr = '&' + name @@ -637,6 +655,27 @@ # ---------- # enums + def _check_int_constant_value(self, name, value, err_prefix=''): + prnt = self._prnt + if value <= 0: + prnt(' if ((%s) > 0 || (long)(%s) != %dL) {' % ( + name, name, value)) + else: + prnt(' if ((%s) <= 0 || (unsigned long)(%s) != %dUL) {' % ( + name, name, value)) + prnt(' char buf[64];') + prnt(' if ((%s) <= 0)' % name) + prnt(' snprintf(buf, 63, "%%ld", (long)(%s));' % name) + prnt(' else') + prnt(' snprintf(buf, 63, "%%lu", (unsigned long)(%s));' % + name) + prnt(' PyErr_Format(_cffi_VerificationError,') + prnt(' "%s%s has the real value %s, not %s",') + prnt(' "%s", "%s", buf, "%d");' % ( + err_prefix, name, value)) + prnt(' return -1;') + prnt(' }') + def _enum_funcname(self, prefix, name): # "$enum_$1" => "___D_enum____D_1" name = name.replace('$', '___D_') @@ -653,25 +692,8 @@ prnt('static int %s(PyObject *lib)' % funcname) prnt('{') for enumerator, enumvalue in zip(tp.enumerators, tp.enumvalues): - if enumvalue < 0: - prnt(' if ((%s) >= 0 || (long)(%s) != %dL) {' % ( - enumerator, enumerator, enumvalue)) - else: - prnt(' if ((%s) < 0 || (unsigned long)(%s) != %dUL) {' % ( - enumerator, enumerator, enumvalue)) - prnt(' char buf[64];') - prnt(' if ((%s) < 0)' % enumerator) - prnt(' snprintf(buf, 63, "%%ld", (long)(%s));' % enumerator) - prnt(' else') - prnt(' snprintf(buf, 63, "%%lu", (unsigned long)(%s));' % - enumerator) - prnt(' PyErr_Format(_cffi_VerificationError,') - prnt(' "enum %s: %s has the real value %s, ' - 'not %s",') - prnt(' "%s", "%s", buf, "%d");' % ( - name, enumerator, enumvalue)) - prnt(' return -1;') - prnt(' }') + self._check_int_constant_value(enumerator, enumvalue, + "enum %s: " % name) prnt(' return %s;' % self._chained_list_constants[True]) self._chained_list_constants[True] = funcname + '(lib)' prnt('}') @@ -695,8 +717,11 @@ # macros: for now only for integers def _generate_cpy_macro_decl(self, tp, name): - assert tp == '...' - self._generate_cpy_const(True, name) + if tp == '...': + check_value = None + else: + check_value = tp # an integer + self._generate_cpy_const(True, name, check_value=check_value) _generate_cpy_macro_collecttype = _generate_nothing _generate_cpy_macro_method = _generate_nothing @@ -783,6 +808,24 @@ typedef unsigned __int16 uint16_t; typedef unsigned __int32 uint32_t; typedef unsigned __int64 uint64_t; + typedef __int8 int_least8_t; + typedef __int16 int_least16_t; + typedef __int32 int_least32_t; + typedef __int64 int_least64_t; + typedef unsigned __int8 uint_least8_t; + typedef unsigned __int16 uint_least16_t; + typedef unsigned __int32 uint_least32_t; + typedef unsigned __int64 uint_least64_t; + typedef __int8 int_fast8_t; + typedef __int16 int_fast16_t; + typedef __int32 int_fast32_t; + typedef __int64 int_fast64_t; + typedef unsigned __int8 uint_fast8_t; + typedef unsigned __int16 uint_fast16_t; + typedef unsigned __int32 uint_fast32_t; + typedef unsigned __int64 uint_fast64_t; + typedef __int64 intmax_t; + typedef unsigned __int64 uintmax_t; # else # include # endif @@ -828,12 +871,15 @@ PyLong_FromLongLong((long long)(x))) #define _cffi_from_c_int(x, type) \ - (((type)-1) > 0 ? /* unsigned */ \ - (sizeof(type) < sizeof(long) ? PyInt_FromLong(x) : \ - sizeof(type) == sizeof(long) ? PyLong_FromUnsignedLong(x) : \ - PyLong_FromUnsignedLongLong(x)) \ - : (sizeof(type) <= sizeof(long) ? PyInt_FromLong(x) : \ - PyLong_FromLongLong(x))) + (((type)-1) > 0 ? /* unsigned */ \ + (sizeof(type) < sizeof(long) ? \ + PyInt_FromLong((long)x) : \ + sizeof(type) == sizeof(long) ? \ + PyLong_FromUnsignedLong((unsigned long)x) : \ + PyLong_FromUnsignedLongLong((unsigned long long)x)) : \ + (sizeof(type) <= sizeof(long) ? \ + PyInt_FromLong((long)x) : \ + PyLong_FromLongLong((long long)x))) #define _cffi_to_c_int(o, type) \ (sizeof(type) == 1 ? (((type)-1) > 0 ? (type)_cffi_to_c_u8(o) \ @@ -844,7 +890,7 @@ : (type)_cffi_to_c_i32(o)) : \ sizeof(type) == 8 ? (((type)-1) > 0 ? (type)_cffi_to_c_u64(o) \ : (type)_cffi_to_c_i64(o)) : \ - (Py_FatalError("unsupported size for type " #type), 0)) + (Py_FatalError("unsupported size for type " #type), (type)0)) #define _cffi_to_c_i8 \ ((int(*)(PyObject *))_cffi_exports[1]) @@ -907,6 +953,7 @@ { PyObject *library; int was_alive = (_cffi_types != NULL); + (void)self; /* unused */ if (!PyArg_ParseTuple(args, "OOO", &_cffi_types, &_cffi_VerificationError, &library)) return NULL; diff --git a/lib_pypy/cffi/vengine_gen.py b/lib_pypy/cffi/vengine_gen.py --- a/lib_pypy/cffi/vengine_gen.py +++ b/lib_pypy/cffi/vengine_gen.py @@ -58,12 +58,12 @@ modname = self.verifier.get_module_name() prnt("void %s%s(void) { }\n" % (prefix, modname)) - def load_library(self): + def load_library(self, flags=0): # import it with the CFFI backend backend = self.ffi._backend # needs to make a path that contains '/', on Posix filename = os.path.join(os.curdir, self.verifier.modulefilename) - module = backend.load_library(filename) + module = backend.load_library(filename, flags) # # call loading_gen_struct() to get the struct layout inferred by # the C compiler @@ -235,6 +235,7 @@ prnt('static void %s(%s *p)' % (checkfuncname, cname)) prnt('{') prnt(' /* only to generate compile-time warnings or errors */') + prnt(' (void)p;') for fname, ftype, fbitsize in tp.enumfields(): if (isinstance(ftype, model.PrimitiveType) and ftype.is_integer_type()) or fbitsize >= 0: @@ -354,11 +355,20 @@ # ---------- # constants, likely declared with '#define' - def _generate_gen_const(self, is_int, name, tp=None, category='const'): + def _generate_gen_const(self, is_int, name, tp=None, category='const', + check_value=None): prnt = self._prnt funcname = '_cffi_%s_%s' % (category, name) self.export_symbols.append(funcname) - if is_int: + if check_value is not None: + assert is_int + assert category == 'const' + prnt('int %s(char *out_error)' % funcname) + prnt('{') + self._check_int_constant_value(name, check_value) + prnt(' return 0;') + prnt('}') + elif is_int: assert category == 'const' prnt('int %s(long long *out_value)' % funcname) prnt('{') @@ -367,6 +377,7 @@ prnt('}') else: assert tp is not None + assert check_value is None prnt(tp.get_c_name(' %s(void)' % funcname, name),) prnt('{') if category == 'var': @@ -383,9 +394,13 @@ _loading_gen_constant = _loaded_noop - def _load_constant(self, is_int, tp, name, module): + def _load_constant(self, is_int, tp, name, module, check_value=None): funcname = '_cffi_const_%s' % name - if is_int: + if check_value is not None: + assert is_int + self._load_known_int_constant(module, funcname) + value = check_value + elif is_int: BType = self.ffi._typeof_locked("long long*")[0] BFunc = self.ffi._typeof_locked("int(*)(long long*)")[0] function = module.load_function(BFunc, funcname) @@ -396,6 +411,7 @@ BLongLong = self.ffi._typeof_locked("long long")[0] value += (1 << (8*self.ffi.sizeof(BLongLong))) else: + assert check_value is None BFunc = self.ffi._typeof_locked(tp.get_c_name('(*)(void)', name))[0] function = module.load_function(BFunc, funcname) value = function() @@ -410,6 +426,36 @@ # ---------- # enums + def _check_int_constant_value(self, name, value): + prnt = self._prnt + if value <= 0: + prnt(' if ((%s) > 0 || (long)(%s) != %dL) {' % ( + name, name, value)) + else: + prnt(' if ((%s) <= 0 || (unsigned long)(%s) != %dUL) {' % ( + name, name, value)) + prnt(' char buf[64];') + prnt(' if ((%s) <= 0)' % name) + prnt(' sprintf(buf, "%%ld", (long)(%s));' % name) + prnt(' else') + prnt(' sprintf(buf, "%%lu", (unsigned long)(%s));' % + name) + prnt(' sprintf(out_error, "%s has the real value %s, not %s",') + prnt(' "%s", buf, "%d");' % (name[:100], value)) + prnt(' return -1;') + prnt(' }') + + def _load_known_int_constant(self, module, funcname): + BType = self.ffi._typeof_locked("char[]")[0] + BFunc = self.ffi._typeof_locked("int(*)(char*)")[0] + function = module.load_function(BFunc, funcname) + p = self.ffi.new(BType, 256) + if function(p) < 0: + error = self.ffi.string(p) + if sys.version_info >= (3,): + error = str(error, 'utf-8') + raise ffiplatform.VerificationError(error) + def _enum_funcname(self, prefix, name): # "$enum_$1" => "___D_enum____D_1" name = name.replace('$', '___D_') @@ -427,24 +473,7 @@ prnt('int %s(char *out_error)' % funcname) prnt('{') for enumerator, enumvalue in zip(tp.enumerators, tp.enumvalues): - if enumvalue < 0: - prnt(' if ((%s) >= 0 || (long)(%s) != %dL) {' % ( - enumerator, enumerator, enumvalue)) - else: - prnt(' if ((%s) < 0 || (unsigned long)(%s) != %dUL) {' % ( - enumerator, enumerator, enumvalue)) - prnt(' char buf[64];') - prnt(' if ((%s) < 0)' % enumerator) - prnt(' sprintf(buf, "%%ld", (long)(%s));' % enumerator) - prnt(' else') - prnt(' sprintf(buf, "%%lu", (unsigned long)(%s));' % - enumerator) - prnt(' sprintf(out_error,' - ' "%s has the real value %s, not %s",') - prnt(' "%s", buf, "%d");' % ( - enumerator[:100], enumvalue)) - prnt(' return -1;') - prnt(' }') + self._check_int_constant_value(enumerator, enumvalue) prnt(' return 0;') prnt('}') prnt() @@ -456,16 +485,8 @@ tp.enumvalues = tuple(enumvalues) tp.partial_resolved = True else: - BType = self.ffi._typeof_locked("char[]")[0] - BFunc = self.ffi._typeof_locked("int(*)(char*)")[0] funcname = self._enum_funcname(prefix, name) - function = module.load_function(BFunc, funcname) - p = self.ffi.new(BType, 256) - if function(p) < 0: - error = self.ffi.string(p) - if sys.version_info >= (3,): - error = str(error, 'utf-8') - raise ffiplatform.VerificationError(error) + self._load_known_int_constant(module, funcname) def _loaded_gen_enum(self, tp, name, module, library): for enumerator, enumvalue in zip(tp.enumerators, tp.enumvalues): @@ -476,13 +497,21 @@ # macros: for now only for integers def _generate_gen_macro_decl(self, tp, name): - assert tp == '...' - self._generate_gen_const(True, name) + if tp == '...': + check_value = None + else: + check_value = tp # an integer + self._generate_gen_const(True, name, check_value=check_value) _loading_gen_macro = _loaded_noop def _loaded_gen_macro(self, tp, name, module, library): - value = self._load_constant(True, tp, name, module) + if tp == '...': + check_value = None + else: + check_value = tp # an integer + value = self._load_constant(True, tp, name, module, + check_value=check_value) setattr(library, name, value) type(library)._cffi_dir.append(name) @@ -565,6 +594,24 @@ typedef unsigned __int16 uint16_t; typedef unsigned __int32 uint32_t; typedef unsigned __int64 uint64_t; + typedef __int8 int_least8_t; + typedef __int16 int_least16_t; + typedef __int32 int_least32_t; + typedef __int64 int_least64_t; + typedef unsigned __int8 uint_least8_t; + typedef unsigned __int16 uint_least16_t; + typedef unsigned __int32 uint_least32_t; + typedef unsigned __int64 uint_least64_t; + typedef __int8 int_fast8_t; + typedef __int16 int_fast16_t; + typedef __int32 int_fast32_t; + typedef __int64 int_fast64_t; + typedef unsigned __int8 uint_fast8_t; + typedef unsigned __int16 uint_fast16_t; + typedef unsigned __int32 uint_fast32_t; + typedef unsigned __int64 uint_fast64_t; + typedef __int64 intmax_t; + typedef unsigned __int64 uintmax_t; # else # include # endif diff --git a/lib_pypy/cffi/verifier.py b/lib_pypy/cffi/verifier.py --- a/lib_pypy/cffi/verifier.py +++ b/lib_pypy/cffi/verifier.py @@ -1,12 +1,23 @@ -import sys, os, binascii, imp, shutil -from . import __version__ +import sys, os, binascii, shutil +from . import __version_verifier_modules__ from . import ffiplatform +if sys.version_info >= (3, 3): + import importlib.machinery + def _extension_suffixes(): + return importlib.machinery.EXTENSION_SUFFIXES[:] +else: + import imp + def _extension_suffixes(): + return [suffix for suffix, _, type in imp.get_suffixes() + if type == imp.C_EXTENSION] + class Verifier(object): def __init__(self, ffi, preamble, tmpdir=None, modulename=None, - ext_package=None, tag='', force_generic_engine=False, **kwds): + ext_package=None, tag='', force_generic_engine=False, + source_extension='.c', flags=None, relative_to=None, **kwds): self.ffi = ffi self.preamble = preamble if not modulename: @@ -14,14 +25,15 @@ vengine_class = _locate_engine_class(ffi, force_generic_engine) self._vengine = vengine_class(self) self._vengine.patch_extension_kwds(kwds) - self.kwds = kwds + self.flags = flags + self.kwds = self.make_relative_to(kwds, relative_to) # if modulename: if tag: raise TypeError("can't specify both 'modulename' and 'tag'") else: - key = '\x00'.join([sys.version[:3], __version__, preamble, - flattened_kwds] + + key = '\x00'.join([sys.version[:3], __version_verifier_modules__, + preamble, flattened_kwds] + ffi._cdefsources) if sys.version_info >= (3,): key = key.encode('utf-8') @@ -33,7 +45,7 @@ k1, k2) suffix = _get_so_suffixes()[0] self.tmpdir = tmpdir or _caller_dir_pycache() - self.sourcefilename = os.path.join(self.tmpdir, modulename + '.c') + self.sourcefilename = os.path.join(self.tmpdir, modulename + source_extension) self.modulefilename = os.path.join(self.tmpdir, modulename + suffix) self.ext_package = ext_package self._has_source = False @@ -97,6 +109,20 @@ def generates_python_module(self): return self._vengine._gen_python_module + def make_relative_to(self, kwds, relative_to): + if relative_to and os.path.dirname(relative_to): + dirname = os.path.dirname(relative_to) + kwds = kwds.copy() + for key in ffiplatform.LIST_OF_FILE_NAMES: + if key in kwds: + lst = kwds[key] + if not isinstance(lst, (list, tuple)): + raise TypeError("keyword '%s' should be a list or tuple" + % (key,)) + lst = [os.path.join(dirname, fn) for fn in lst] + kwds[key] = lst + return kwds + # ---------- def _locate_module(self): @@ -148,7 +174,10 @@ def _load_library(self): assert self._has_module - return self._vengine.load_library() + if self.flags is not None: + return self._vengine.load_library(self.flags) + else: + return self._vengine.load_library() # ____________________________________________________________ @@ -181,6 +210,9 @@ def _caller_dir_pycache(): if _TMPDIR: return _TMPDIR + result = os.environ.get('CFFI_TMPDIR') + if result: + return result filename = sys._getframe(2).f_code.co_filename return os.path.abspath(os.path.join(os.path.dirname(filename), '__pycache__')) @@ -222,11 +254,7 @@ pass def _get_so_suffixes(): - suffixes = [] - for suffix, mode, type in imp.get_suffixes(): - if type == imp.C_EXTENSION: - suffixes.append(suffix) - + suffixes = _extension_suffixes() if not suffixes: # bah, no C_EXTENSION available. Occurs on pypy without cpyext if sys.platform == 'win32': diff --git a/lib_pypy/readline.py b/lib_pypy/readline.py --- a/lib_pypy/readline.py +++ b/lib_pypy/readline.py @@ -6,4 +6,11 @@ are only stubs at the moment. """ -from pyrepl.readline import * +try: + from pyrepl.readline import * +except ImportError: + import sys + if sys.platform == 'win32': + raise ImportError("the 'readline' module is not available on Windows" + " (on either PyPy or CPython)") + raise diff --git a/pypy/doc/build.rst b/pypy/doc/build.rst --- a/pypy/doc/build.rst +++ b/pypy/doc/build.rst @@ -47,6 +47,11 @@ Install build-time dependencies ------------------------------- +(**Note**: for some hints on how to translate the Python interpreter under +Windows, see the `windows document`_) + +.. _`windows document`: windows.html + To build PyPy on Unix using the C translation backend, you need at least a C compiler and ``make`` installed. Further, some optional modules have additional diff --git a/pypy/doc/embedding.rst b/pypy/doc/embedding.rst --- a/pypy/doc/embedding.rst +++ b/pypy/doc/embedding.rst @@ -30,12 +30,10 @@ Initialize threads. Only need to be called if there are any threads involved -.. function:: long pypy_setup_home(char* home, int verbose); +.. function:: int pypy_setup_home(char* home, int verbose); This function searches the PyPy standard library starting from the given - "PyPy home directory". It is not strictly necessary to execute it before - running Python code, but without it you will not be able to import any - non-builtin module from the standard library. The arguments are: + "PyPy home directory". The arguments are: * ``home``: NULL terminated path to an executable inside the pypy directory (can be a .so name, can be made up) @@ -84,25 +82,36 @@ const char source[] = "print 'hello from pypy'"; - int main() + int main(void) { - int res; + int res; - rpython_startup_code(); - // pypy_setup_home() is not needed in this trivial example - res = pypy_execute_source((char*)source); - if (res) { - printf("Error calling pypy_execute_source!\n"); - } - return res; + rpython_startup_code(); + res = pypy_setup_home("/opt/pypy/bin/libpypy-c.so", 1); + if (res) { + printf("Error setting pypy home!\n"); + return 1; + } + + res = pypy_execute_source((char*)source); + if (res) { + printf("Error calling pypy_execute_source!\n"); + } + return res; } -If we save it as ``x.c`` now, compile it and run it with:: +If we save it as ``x.c`` now, compile it and run it (on linux) with:: fijal at hermann:/opt/pypy$ gcc -o x x.c -lpypy-c -L. fijal at hermann:/opt/pypy$ LD_LIBRARY_PATH=. ./x hello from pypy +on OSX it is necessary to set the rpath of the binary if one wants to link to it:: + + gcc -o x x.c -lpypy-c -L. -Wl,-rpath -Wl, at executable_path + ./x + hello from pypy + Worked! .. note:: If the compilation fails because of missing PyPy.h header file, diff --git a/pypy/doc/getting-started-dev.rst b/pypy/doc/getting-started-dev.rst --- a/pypy/doc/getting-started-dev.rst +++ b/pypy/doc/getting-started-dev.rst @@ -4,6 +4,93 @@ .. contents:: +Using Mercurial +--------------- + +PyPy development is based on Mercurial (hg). If you are not used to +version control, the cycle for a new PyPy contributor goes typically +like this: + +* Make an account on bitbucket_. + +* Go to https://bitbucket.org/pypy/pypy/ and click "fork" (left + icons). You get a fork of the repository, e.g. in + https://bitbucket.org/yourname/pypy/. + +* Clone this new repo (i.e. the fork) to your local machine with the command + ``hg clone ssh://hg at bitbucket.org/yourname/pypy``. It is a very slow + operation but only ever needs to be done once. If you already cloned + ``https://bitbucket.org/pypy/pypy`` before, even if some time ago, + then you can reuse the same clone by editing the file ``.hg/hgrc`` in + your clone to contain the line ``default = + ssh://hg at bitbucket.org/yourname/pypy``, and then do ``hg pull && hg + up``. If you already have such a clone but don't want to change it, + you can clone that copy with ``hg clone /path/to/other/copy``, and + then edit ``.hg/hgrc`` as above and do ``hg pull && hg up``. + +* Now you have a complete copy of the PyPy repo. Make a branch + with a command like ``hg branch name_of_your_branch``. + +* Edit things. Use ``hg diff`` to see what you changed. Use ``hg add`` + to make Mercurial aware of new files you added, e.g. new test files. + Use ``hg status`` to see if there are such files. Run tests! (See + the rest of this page.) + +* Commit regularly with ``hg commit``. A one-line commit message is + fine. We love to have tons of commits; make one as soon as you have + some progress, even if it is only some new test that doesn't pass yet, + or fixing things even if not all tests pass. Step by step, you are + building the history of your changes, which is the point of a version + control system. (There are commands like ``hg log`` and ``hg up`` + that you should read about later, to learn how to navigate this + history.) + +* The commits stay on your machine until you do ``hg push`` to "push" + them back to the repo named in the file ``.hg/hgrc``. Repos are + basically just collections of commits (a commit is also called a + changeset): there is one repo per url, plus one for each local copy on + each local machine. The commands ``hg push`` and ``hg pull`` copy + commits around, with the goal that all repos in question end up with + the exact same set of commits. By opposition, ``hg up`` only updates + the "working copy" by reading the local repository, i.e. it makes the + files that you see correspond to the latest (or any other) commit + locally present. + +* You should push often; there is no real reason not to. Remember that + even if they are pushed, with the setup above, the commits are (1) + only in ``bitbucket.org/yourname/pypy``, and (2) in the branch you + named. Yes, they are publicly visible, but don't worry about someone + walking around the thousands of repos on bitbucket saying "hah, look + at the bad coding style of that guy". Try to get into the mindset + that your work is not secret and it's fine that way. We might not + accept it as is for PyPy, asking you instead to improve some things, + but we are not going to judge you. + +* The final step is to open a pull request, so that we know that you'd + like to merge that branch back to the original ``pypy/pypy`` repo. + This can also be done several times if you have interesting + intermediate states, but if you get there, then we're likely to + proceed to the next stage, which is... + +* Get a regular account for pushing directly to + ``bitbucket.org/pypy/pypy`` (just ask and you'll get it, basically). + Once you have it you can rewrite your file ``.hg/hgrc`` to contain + ``default = ssh://hg at bitbucket.org/pypy/pypy``. Your changes will + then be pushed directly to the official repo, but (if you follow these + rules) they are still on a branch, and we can still review the + branches you want to merge. + +* If you get closer to the regular day-to-day development, you'll notice + that we generally push small changes as one or a few commits directly + to the branch ``default``. Also, we often collaborate even if we are + on other branches, which do not really "belong" to anyone. At this + point you'll need ``hg merge`` and learn how to resolve conflicts that + sometimes occur when two people try to push different commits in + parallel on the same branch. But it is likely an issue for later ``:-)`` + +.. _bitbucket: https://bitbucket.org/ + + Running PyPy's unit tests ------------------------- diff --git a/pypy/doc/install.rst b/pypy/doc/install.rst --- a/pypy/doc/install.rst +++ b/pypy/doc/install.rst @@ -38,14 +38,13 @@ and not move the binary there, else PyPy would not be able to find its library. -If you want to install 3rd party libraries, the most convenient way is to -install distribute_ and pip_: +If you want to install 3rd party libraries, the most convenient way is +to install pip_ (unless you want to install virtualenv as explained +below; then you can directly use pip inside virtualenvs): .. code-block:: console - $ curl -O http://python-distribute.org/distribute_setup.py - $ curl -O https://raw.githubusercontent.com/pypa/pip/master/contrib/get-pip.py - $ ./pypy-2.1/bin/pypy distribute_setup.py + $ curl -O https://bootstrap.pypa.io/get-pip.py $ ./pypy-2.1/bin/pypy get-pip.py $ ./pypy-2.1/bin/pip install pygments # for example @@ -69,7 +68,6 @@ Note that bin/python is now a symlink to bin/pypy. -.. _distribute: http://www.python-distribute.org/ .. _pip: http://pypi.python.org/pypi/pip diff --git a/pypy/doc/test/test_whatsnew.py b/pypy/doc/test/test_whatsnew.py --- a/pypy/doc/test/test_whatsnew.py +++ b/pypy/doc/test/test_whatsnew.py @@ -78,9 +78,10 @@ def test_whatsnew(): doc = ROOT.join('pypy', 'doc') - whatsnew_list = doc.listdir('whatsnew-*.rst') - whatsnew_list.sort() - last_whatsnew = whatsnew_list[-1].read() + #whatsnew_list = doc.listdir('whatsnew-*.rst') + #whatsnew_list.sort() + #last_whatsnew = whatsnew_list[-1].read() + last_whatsnew = doc.join('whatsnew-head.rst').read() startrev, documented = parse_doc(last_whatsnew) merged, branch = get_merged_branches(ROOT, startrev, '') merged.discard('default') diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -51,3 +51,82 @@ .. branch: ssa-flow Use SSA form for flow graphs inside build_flow() and part of simplify_graph() + +.. branch: ufuncapi + +Implement most of the GenericUfunc api to support numpy linalg. The strategy is +to encourage use of pure python or cffi ufuncs by extending frompyfunc(). +See the docstring of frompyfunc for more details. This dovetails with a branch +of pypy/numpy - cffi-linalg which is a rewrite of the _umath_linalg module in +python, calling lapack from cffi. The branch also support traditional use of +cpyext GenericUfunc definitions in c. + +.. branch: all_ordered_dicts + +This makes ordered dicts the default dictionary implementation in +RPython and in PyPy. It polishes the basic idea of rordereddict.py +and then fixes various things, up to simplifying +collections.OrderedDict. + +Note: Python programs can rely on the guaranteed dict order in PyPy +now, but for compatibility with other Python implementations they +should still use collections.OrderedDict where that really matters. +Also, support for reversed() was *not* added to the 'dict' class; +use OrderedDict. + +Benchmark results: in the noise. A few benchmarks see good speed +improvements but the average is very close to parity. + +.. branch: berkerpeksag/fix-broken-link-in-readmerst-1415127402066 +.. branch: bigint-with-int-ops +.. branch: dstufft/update-pip-bootstrap-location-to-the-new-1420760611527 +.. branch: float-opt +.. branch: gc-incminimark-pinning + +This branch adds an interface rgc.pin which would (very temporarily) +make object non-movable. That's used by rffi.alloc_buffer and +rffi.get_nonmovable_buffer and improves performance considerably for +IO operations. + +.. branch: gc_no_cleanup_nursery + +A branch started by Wenzhu Man (SoC'14) and then done by fijal. It +removes the clearing of the nursery. The drawback is that new objects +are not automatically filled with zeros any longer, which needs some +care, mostly for GC references (which the GC tries to follow, so they +must not contain garbage). The benefit is a quite large speed-up. + +.. branch: improve-gc-tracing-hooks +.. branch: improve-ptr-conv-error +.. branch: intern-not-immortal + +Fix intern() to return mortal strings, like in CPython. + +.. branch: issue1922-take2 +.. branch: kill-exported-symbols-list +.. branch: kill-rctime +.. branch: kill_ll_termios +.. branch: look-into-all-modules +.. branch: nditer-external_loop +.. branch: numpy-generic-item +.. branch: osx-shared + +``--shared`` support on OS/X (thanks wouter) + +.. branch: portable-threadlocal +.. branch: pypy-dont-copy-ops +.. branch: recursion_and_inlining +.. branch: slim-down-resumedescr +.. branch: squeaky/use-cflags-for-compiling-asm +.. branch: unicode-fix +.. branch: zlib_zdict + +.. branch: errno-again + +Changes how errno, GetLastError, and WSAGetLastError are handled. +The idea is to tie reading the error status as close as possible to +the external function call. This fixes some bugs, both of the very +rare kind (e.g. errno on Linux might in theory be overwritten by +mmap(), called rarely during major GCs, if such a major GC occurs at +exactly the wrong time), and some of the less rare kind +(particularly on Windows tests). diff --git a/pypy/doc/windows.rst b/pypy/doc/windows.rst --- a/pypy/doc/windows.rst +++ b/pypy/doc/windows.rst @@ -78,6 +78,7 @@ Then you need to execute:: + \vc\vcvars.bat editbin /largeaddressaware translator.exe where ``translator.exe`` is the pypy.exe or cpython.exe you will use to @@ -96,7 +97,7 @@ Abridged method (for -Ojit builds using Visual Studio 2008) -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +----------------------------------------------------------- Download the versions of all the external packages from https://bitbucket.org/pypy/pypy/downloads/local_2.4.zip @@ -110,7 +111,13 @@ set INCLUDE=\include;\tcltk\include;%INCLUDE% set LIB=\lib;\tcltk\lib;%LIB% -Now you should be good to go. Read on for more information. +Now you should be good to go. If you choose this method, you do not need +to download/build anything else. + +Nonabrided method (building from scratch) +----------------------------------------- + +If you want to, you can rebuild everything from scratch by continuing. The Boehm garbage collector diff --git a/pypy/goal/targetpypystandalone.py b/pypy/goal/targetpypystandalone.py --- a/pypy/goal/targetpypystandalone.py +++ b/pypy/goal/targetpypystandalone.py @@ -101,7 +101,7 @@ if space.is_none(w_path): if verbose: debug("Failed to find library based on pypy_find_stdlib") - return 1 + return rffi.cast(rffi.INT, 1) space.startup() space.call_function(w_pathsetter, w_path) # import site @@ -109,13 +109,13 @@ import_ = space.getattr(space.getbuiltinmodule('__builtin__'), space.wrap('__import__')) space.call_function(import_, space.wrap('site')) - return 0 + return rffi.cast(rffi.INT, 0) except OperationError, e: if verbose: debug("OperationError:") debug(" operror-type: " + e.w_type.getname(space)) debug(" operror-value: " + space.str_w(space.str(e.get_w_value(space)))) - return -1 + return rffi.cast(rffi.INT, -1) @entrypoint('main', [rffi.CCHARP], c_name='pypy_execute_source') def pypy_execute_source(ll_source): @@ -234,8 +234,7 @@ enable_translationmodules(config) config.translation.suggest(check_str_without_nul=True) - if sys.platform.startswith('linux'): - config.translation.suggest(shared=True) + config.translation.suggest(shared=True) if config.translation.thread: config.objspace.usemodules.thread = True diff --git a/pypy/interpreter/astcompiler/optimize.py b/pypy/interpreter/astcompiler/optimize.py --- a/pypy/interpreter/astcompiler/optimize.py +++ b/pypy/interpreter/astcompiler/optimize.py @@ -83,17 +83,16 @@ class __extend__(ast.BoolOp): - def _accept_jump_if_any_is(self, gen, condition, target): - self.values[0].accept_jump_if(gen, condition, target) - for i in range(1, len(self.values)): + def _accept_jump_if_any_is(self, gen, condition, target, skip_last=0): + for i in range(len(self.values) - skip_last): self.values[i].accept_jump_if(gen, condition, target) def accept_jump_if(self, gen, condition, target): if condition and self.op == ast.And or \ (not condition and self.op == ast.Or): end = gen.new_block() - self._accept_jump_if_any_is(gen, not condition, end) - gen.emit_jump(ops.JUMP_FORWARD, target) + self._accept_jump_if_any_is(gen, not condition, end, skip_last=1) + self.values[-1].accept_jump_if(gen, condition, target) gen.use_next_block(end) else: self._accept_jump_if_any_is(gen, condition, target) @@ -255,11 +254,15 @@ return rep def visit_Name(self, name): - # Turn loading None into a constant lookup. Eventaully, we can do this - # for True and False, too. + # Turn loading None into a constant lookup. We cannot do this + # for True and False, because rebinding them is allowed (2.7). if name.id == "None": - assert name.ctx == ast.Load - return ast.Const(self.space.w_None, name.lineno, name.col_offset) + # The compiler refuses to parse "None = ...", but "del None" + # is allowed (if pointless). Check anyway: custom asts that + # correspond to "None = ..." can be made by hand. + if name.ctx == ast.Load: + return ast.Const(self.space.w_None, name.lineno, + name.col_offset) return name def visit_Tuple(self, tup): diff --git a/pypy/interpreter/error.py b/pypy/interpreter/error.py --- a/pypy/interpreter/error.py +++ b/pypy/interpreter/error.py @@ -486,10 +486,10 @@ w_exception_class=w_exception_class) wrap_oserror._annspecialcase_ = 'specialize:arg(3)' -def exception_from_errno(space, w_type): - from rpython.rlib.rposix import get_errno +def exception_from_saved_errno(space, w_type): + from rpython.rlib.rposix import get_saved_errno - errno = get_errno() + errno = get_saved_errno() msg = os.strerror(errno) w_error = space.call_function(w_type, space.wrap(errno), space.wrap(msg)) return OperationError(w_type, w_error) diff --git a/pypy/interpreter/test/test_compiler.py b/pypy/interpreter/test/test_compiler.py --- a/pypy/interpreter/test/test_compiler.py +++ b/pypy/interpreter/test/test_compiler.py @@ -654,6 +654,18 @@ assert ex.match(space, space.w_SyntaxError) assert 'hello_world' in space.str_w(space.str(ex.get_w_value(space))) + def test_del_None(self): + snippet = '''if 1: + try: + del None + except NameError: + pass + ''' + code = self.compiler.compile(snippet, '', 'exec', 0) + space = self.space + w_d = space.newdict() + space.exec_(code, w_d, w_d) + class TestPythonAstCompiler_25_grammar(BaseTestCompiler): def setup_method(self, method): diff --git a/pypy/interpreter/test/test_targetpypy.py b/pypy/interpreter/test/test_targetpypy.py --- a/pypy/interpreter/test/test_targetpypy.py +++ b/pypy/interpreter/test/test_targetpypy.py @@ -27,6 +27,6 @@ pypy_setup_home = d['pypy_setup_home'] lls = rffi.str2charp(__file__) res = pypy_setup_home(lls, rffi.cast(rffi.INT, 1)) - assert lltype.typeOf(res) == rffi.LONG + assert lltype.typeOf(res) == rffi.INT assert rffi.cast(lltype.Signed, res) == 0 lltype.free(lls, flavor='raw') diff --git a/pypy/module/__builtin__/app_io.py b/pypy/module/__builtin__/app_io.py --- a/pypy/module/__builtin__/app_io.py +++ b/pypy/module/__builtin__/app_io.py @@ -86,9 +86,11 @@ def print_(*args, **kwargs): """The new-style print function from py3k.""" - fp = kwargs.pop("file", sys.stdout) + fp = kwargs.pop("file", None) if fp is None: - return + fp = sys.stdout + if fp is None: + return def write(data): if not isinstance(data, basestring): data = str(data) diff --git a/pypy/module/__builtin__/test/test_builtin.py b/pypy/module/__builtin__/test/test_builtin.py --- a/pypy/module/__builtin__/test/test_builtin.py +++ b/pypy/module/__builtin__/test/test_builtin.py @@ -651,9 +651,12 @@ out = sys.stdout = StringIO.StringIO() try: pr("Hello,", "person!") + pr("2nd line", file=None) + sys.stdout = None + pr("nowhere") finally: sys.stdout = save - assert out.getvalue() == "Hello, person!\n" + assert out.getvalue() == "Hello, person!\n2nd line\n" out = StringIO.StringIO() pr("Hello,", "person!", file=out) assert out.getvalue() == "Hello, person!\n" @@ -668,7 +671,6 @@ result = out.getvalue() assert isinstance(result, unicode) assert result == u"Hello, person!\n" - pr("Hello", file=None) # This works. out = StringIO.StringIO() pr(None, file=out) assert out.getvalue() == "None\n" diff --git a/pypy/module/__pypy__/__init__.py b/pypy/module/__pypy__/__init__.py --- a/pypy/module/__pypy__/__init__.py +++ b/pypy/module/__pypy__/__init__.py @@ -78,6 +78,7 @@ 'newlist_hint' : 'interp_magic.newlist_hint', 'add_memory_pressure' : 'interp_magic.add_memory_pressure', 'newdict' : 'interp_dict.newdict', + 'reversed_dict' : 'interp_dict.reversed_dict', 'strategy' : 'interp_magic.strategy', # dict,set,list 'set_debug' : 'interp_magic.set_debug', 'locals_to_fast' : 'interp_magic.locals_to_fast', diff --git a/pypy/module/__pypy__/interp_dict.py b/pypy/module/__pypy__/interp_dict.py --- a/pypy/module/__pypy__/interp_dict.py +++ b/pypy/module/__pypy__/interp_dict.py @@ -30,3 +30,17 @@ return space.newdict(strdict=True) else: raise oefmt(space.w_TypeError, "unknown type of dict %s", type) + +def reversed_dict(space, w_obj): + """Enumerate the keys in a dictionary object in reversed order. + + This is a __pypy__ function instead of being simply done by calling + reversed(), for CPython compatibility: dictionaries are only ordered + on PyPy. You should use the collections.OrderedDict class for cases + where ordering is important. That class implements __reversed__ by + calling __pypy__.reversed_dict(). + """ + from pypy.objspace.std.dictmultiobject import W_DictMultiObject + if not isinstance(w_obj, W_DictMultiObject): + raise OperationError(space.w_TypeError, space.w_None) + return w_obj.nondescr_reversed_dict(space) diff --git a/pypy/module/__pypy__/interp_time.py b/pypy/module/__pypy__/interp_time.py --- a/pypy/module/__pypy__/interp_time.py +++ b/pypy/module/__pypy__/interp_time.py @@ -1,7 +1,7 @@ from __future__ import with_statement import sys -from pypy.interpreter.error import exception_from_errno +from pypy.interpreter.error import exception_from_saved_errno from pypy.interpreter.gateway import unwrap_spec from rpython.rtyper.lltypesystem import rffi, lltype from rpython.rtyper.tool import rffi_platform From noreply at buildbot.pypy.org Wed Jan 21 18:45:52 2015 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 21 Jan 2015 18:45:52 +0100 (CET) Subject: [pypy-commit] pypy default: Add objectmodel.likely(), .unlikely() Message-ID: <20150121174552.85B8F1C04CF@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r75464:5fc2a9b77793 Date: 2015-01-21 18:44 +0100 http://bitbucket.org/pypy/pypy/changeset/5fc2a9b77793/ Log: Add objectmodel.likely(), .unlikely() diff --git a/rpython/rlib/objectmodel.py b/rpython/rlib/objectmodel.py --- a/rpython/rlib/objectmodel.py +++ b/rpython/rlib/objectmodel.py @@ -636,6 +636,30 @@ # ____________________________________________________________ +def likely(condition): + assert isinstance(condition, bool) + return condition + +def unlikely(condition): + assert isinstance(condition, bool) + return condition + +class Entry(ExtRegistryEntry): + _about_ = (likely, unlikely) + + def compute_result_annotation(self, s_x): + from rpython.annotator import model as annmodel + return annmodel.SomeBool() + + def specialize_call(self, hop): + from rpython.rtyper.lltypesystem import lltype + vlist = hop.inputargs(lltype.Bool) + hop.exception_cannot_occur() + return hop.genop(self.instance.__name__, vlist, + resulttype=lltype.Bool) + +# ____________________________________________________________ + class r_dict(object): """An RPython dict-like object. diff --git a/rpython/rtyper/lltypesystem/lloperation.py b/rpython/rtyper/lltypesystem/lloperation.py --- a/rpython/rtyper/lltypesystem/lloperation.py +++ b/rpython/rtyper/lltypesystem/lloperation.py @@ -365,6 +365,9 @@ 'convert_float_bytes_to_longlong': LLOp(canfold=True), 'convert_longlong_bytes_to_float': LLOp(canfold=True), + 'likely': LLOp(canfold=True), + 'unlikely': LLOp(canfold=True), + # __________ pointer operations __________ 'malloc': LLOp(canmallocgc=True), diff --git a/rpython/rtyper/lltypesystem/opimpl.py b/rpython/rtyper/lltypesystem/opimpl.py --- a/rpython/rtyper/lltypesystem/opimpl.py +++ b/rpython/rtyper/lltypesystem/opimpl.py @@ -699,6 +699,14 @@ return p[0] op_raw_load.need_result_type = True +def op_likely(x): + assert isinstance(x, bool) + return x + +def op_unlikely(x): + assert isinstance(x, bool) + return x + # ____________________________________________________________ def get_op_impl(opname): diff --git a/rpython/translator/c/src/int.h b/rpython/translator/c/src/int.h --- a/rpython/translator/c/src/int.h +++ b/rpython/translator/c/src/int.h @@ -238,6 +238,14 @@ #define OP_BOOL_NOT(x, r) r = !(x) +#ifdef __GNUC__ +# define OP_LIKELY(x, r) r = __builtin_expect((x), 1) +# define OP_UNLIKELY(x, r) r = __builtin_expect((x), 0) +#else +# define OP_LIKELY(x, r) r = (x) +# define OP_UNLIKELY(x, r) r = (x) +#endif + RPY_EXTERN long long op_llong_mul_ovf(long long a, long long b); /* The definitions above can be used with various types */ diff --git a/rpython/translator/c/test/test_lltyped.py b/rpython/translator/c/test/test_lltyped.py --- a/rpython/translator/c/test/test_lltyped.py +++ b/rpython/translator/c/test/test_lltyped.py @@ -956,3 +956,18 @@ fn = self.getcompiled(f, [int]) assert fn(0) == 9 + + def test_likely_unlikely(self): + from rpython.rlib.objectmodel import likely, unlikely + + def f(n): + if unlikely(n > 50): + return -10 + if likely(n > 5): + return 42 + return 3 + + fn = self.getcompiled(f, [int]) + assert fn(0) == 3 + assert fn(10) == 42 + assert fn(100) == -10 From noreply at buildbot.pypy.org Wed Jan 21 18:45:53 2015 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 21 Jan 2015 18:45:53 +0100 (CET) Subject: [pypy-commit] pypy default: Use likely() here, as motivated by benchmarks Message-ID: <20150121174553.E25481C04CF@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r75465:2d569e13232a Date: 2015-01-21 18:45 +0100 http://bitbucket.org/pypy/pypy/changeset/2d569e13232a/ Log: Use likely() here, as motivated by benchmarks diff --git a/rpython/rtyper/lltypesystem/rordereddict.py b/rpython/rtyper/lltypesystem/rordereddict.py --- a/rpython/rtyper/lltypesystem/rordereddict.py +++ b/rpython/rtyper/lltypesystem/rordereddict.py @@ -4,7 +4,7 @@ from rpython.rtyper.rdict import AbstractDictRepr, AbstractDictIteratorRepr from rpython.rtyper.lltypesystem import lltype, llmemory, rffi from rpython.rlib import objectmodel, jit, rgc -from rpython.rlib.objectmodel import specialize +from rpython.rlib.objectmodel import specialize, likely from rpython.rlib.debug import ll_assert from rpython.rlib.rarithmetic import r_uint, intmask from rpython.rtyper import rmodel @@ -46,7 +46,7 @@ @jit.oopspec('ordereddict.lookup(d, key, hash, flag)') def ll_call_lookup_function(d, key, hash, flag): fun = d.lookup_function_no & FUNC_MASK - if fun == FUNC_BYTE: + if likely(fun == FUNC_BYTE): return ll_dict_lookup(d, key, hash, flag, TYPE_BYTE) elif fun == FUNC_SHORT: return ll_dict_lookup(d, key, hash, flag, TYPE_SHORT) From noreply at buildbot.pypy.org Wed Jan 21 18:45:55 2015 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 21 Jan 2015 18:45:55 +0100 (CET) Subject: [pypy-commit] pypy default: merge heads Message-ID: <20150121174555.52B7C1C04CF@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r75466:3a5aa32cbc44 Date: 2015-01-21 18:45 +0100 http://bitbucket.org/pypy/pypy/changeset/3a5aa32cbc44/ Log: merge heads diff --git a/pypy/objspace/std/test/test_mapdict.py b/pypy/objspace/std/test/test_mapdict.py --- a/pypy/objspace/std/test/test_mapdict.py +++ b/pypy/objspace/std/test/test_mapdict.py @@ -144,7 +144,7 @@ assert obj2.map.back.ever_mutated == True assert obj2.map is obj.map -def test_attr_immutability_delete(monkeypatch): +def test_attr_immutability_delete(): cls = Class() obj = cls.instantiate() obj.setdictvalue(space, "a", 10) From noreply at buildbot.pypy.org Wed Jan 21 18:54:36 2015 From: noreply at buildbot.pypy.org (cfbolz) Date: Wed, 21 Jan 2015 18:54:36 +0100 (CET) Subject: [pypy-commit] pypy typed-cells: try to reduce the overhead of writing ints into types (and eventually modules) Message-ID: <20150121175436.82E521C04CF@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: typed-cells Changeset: r75467:8ff346f1fd62 Date: 2015-01-21 18:16 +0100 http://bitbucket.org/pypy/pypy/changeset/8ff346f1fd62/ Log: try to reduce the overhead of writing ints into types (and eventually modules) even further, by storing them unboxed bit experimental diff --git a/pypy/objspace/std/test/test_versionedtype.py b/pypy/objspace/std/test/test_versionedtype.py --- a/pypy/objspace/std/test/test_versionedtype.py +++ b/pypy/objspace/std/test/test_versionedtype.py @@ -210,6 +210,56 @@ assert w_A.version_tag() is atag assert space.int_w(space.getattr(w_A, w_x)) == 4 + def test_no_cell_when_writing_same_value(self): + space = self.space + w_x = space.wrap("x") + w_A, w_B, w_C = self.get_three_classes() + atag = w_A.version_tag() + w_val = space.newint(1) + space.setattr(w_A, w_x, w_val) + space.setattr(w_A, w_x, w_val) + w_val1 = w_A._getdictvalue_no_unwrapping(space, "x") + assert w_val1 is w_val + + def test_int_cells(self): + space = self.space + w_x = space.wrap("x") + w_A, w_B, w_C = self.get_three_classes() + atag = w_A.version_tag() + space.setattr(w_A, w_x, space.newint(1)) + assert w_A.version_tag() is not atag + assert space.int_w(space.getattr(w_A, w_x)) == 1 + + atag = w_A.version_tag() + space.setattr(w_A, w_x, space.newint(2)) + assert w_A.version_tag() is not atag + assert space.int_w(space.getattr(w_A, w_x)) == 2 + cell = w_A._getdictvalue_no_unwrapping(space, "x") + assert cell.intvalue == 2 + + atag = w_A.version_tag() + space.setattr(w_A, w_x, space.newint(3)) + assert w_A.version_tag() is atag + assert space.int_w(space.getattr(w_A, w_x)) == 3 + assert cell.intvalue == 3 + + space.setattr(w_A, w_x, space.newint(4)) + assert w_A.version_tag() is atag + assert space.int_w(space.getattr(w_A, w_x)) == 4 + assert cell.intvalue == 4 + + def test_int_cell_turns_into_cell(self): + space = self.space + w_x = space.wrap("x") + w_A, w_B, w_C = self.get_three_classes() + atag = w_A.version_tag() + space.setattr(w_A, w_x, space.newint(1)) + space.setattr(w_A, w_x, space.newint(2)) + space.setattr(w_A, w_x, space.newfloat(2.2)) + cell = w_A._getdictvalue_no_unwrapping(space, "x") + assert space.float_w(cell.w_value) == 2.2 + + class AppTestVersionedType(test_typeobject.AppTestTypeObject): spaceconfig = {"objspace.std.withtypeversion": True} diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py --- a/pypy/objspace/std/typeobject.py +++ b/pypy/objspace/std/typeobject.py @@ -11,18 +11,47 @@ from rpython.rlib.objectmodel import current_object_addr_as_int, compute_hash from rpython.rlib.rarithmetic import intmask, r_uint +class BaseTypeCell(W_Root): + def unwrap_cell(self, space): + raise NotImplementedError("abstract base") -class TypeCell(W_Root): +class TypeCell(BaseTypeCell): def __init__(self, w_value=None): self.w_value = w_value + def unwrap_cell(self, space): + return self.w_value + +class IntTypeCell(BaseTypeCell): + def __init__(self, intvalue): + self.intvalue = intvalue + + def unwrap_cell(self, space): + return space.wrap(self.intvalue) + def unwrap_cell(space, w_value): - if (space.config.objspace.std.withtypeversion and - isinstance(w_value, TypeCell)): - return w_value.w_value + if space.config.objspace.std.withtypeversion: + if isinstance(w_value, BaseTypeCell): + return w_value.unwrap_cell(space) return w_value +def write_cell(space, w_cell, w_value): + from pypy.objspace.std.intobject import W_IntObject + if isinstance(w_cell, TypeCell): + w_cell.w_value = w_value + return None + elif isinstance(w_cell, IntTypeCell) and type(w_value) is W_IntObject: + w_cell.intvalue = w_value.intval + return None + elif space.is_w(w_cell, w_value): + # If the new value and the current value are the same, don't + # create a level of indirection, or mutate the version. + return None + if type(w_value) is W_IntObject: + return IntTypeCell(w_value.intval) + else: + return TypeCell(w_value) class VersionTag(object): pass @@ -275,10 +304,9 @@ w_curr = w_self._pure_getdictvalue_no_unwrapping( space, version_tag, name) if w_curr is not None: - if isinstance(w_curr, TypeCell): - w_curr.w_value = w_value + w_value = write_cell(space, w_curr, w_value) + if w_value is None: return True - w_value = TypeCell(w_value) w_self.mutated(name) w_self.dict_w[name] = w_value return True @@ -369,8 +397,8 @@ tup_w = w_self._pure_lookup_where_with_method_cache(name, version_tag) w_class, w_value = tup_w if (space.config.objspace.std.withtypeversion and - isinstance(w_value, TypeCell)): - return w_class, w_value.w_value + isinstance(w_value, BaseTypeCell)): + return w_class, w_value.unwrap_cell(space) return tup_w # don't make a new tuple, reuse the old one def _pure_lookup_where_possibly_with_method_cache(w_self, name, version_tag): From noreply at buildbot.pypy.org Wed Jan 21 18:54:49 2015 From: noreply at buildbot.pypy.org (cfbolz) Date: Wed, 21 Jan 2015 18:54:49 +0100 (CET) Subject: [pypy-commit] pypy typed-cells: merge default Message-ID: <20150121175449.E9C0D1C04CF@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: typed-cells Changeset: r75468:50b80a2378f4 Date: 2015-01-21 18:28 +0100 http://bitbucket.org/pypy/pypy/changeset/50b80a2378f4/ Log: merge default diff too long, truncating to 2000 out of 23420 lines diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -28,7 +28,7 @@ DEALINGS IN THE SOFTWARE. -PyPy Copyright holders 2003-2014 +PyPy Copyright holders 2003-2015 ----------------------------------- Except when otherwise stated (look for LICENSE files or information at diff --git a/lib-python/2.7/collections.py b/lib-python/2.7/collections.py --- a/lib-python/2.7/collections.py +++ b/lib-python/2.7/collections.py @@ -17,6 +17,10 @@ except ImportError: assert '__pypy__' not in _sys.builtin_module_names newdict = lambda _ : {} +try: + from __pypy__ import reversed_dict +except ImportError: + reversed_dict = lambda d: reversed(d.keys()) try: from thread import get_ident as _get_ident @@ -29,142 +33,35 @@ ################################################################################ class OrderedDict(dict): - 'Dictionary that remembers insertion order' - # An inherited dict maps keys to values. - # The inherited dict provides __getitem__, __len__, __contains__, and get. - # The remaining methods are order-aware. - # Big-O running times for all methods are the same as regular dictionaries. + '''Dictionary that remembers insertion order. - # The internal self.__map dict maps keys to links in a doubly linked list. - # The circular doubly linked list starts and ends with a sentinel element. - # The sentinel element never gets deleted (this simplifies the algorithm). - # Each link is stored as a list of length three: [PREV, NEXT, KEY]. + In PyPy all dicts are ordered anyway. This is mostly useful as a + placeholder to mean "this dict must be ordered even on CPython". - def __init__(self, *args, **kwds): - '''Initialize an ordered dictionary. The signature is the same as - regular dictionaries, but keyword arguments are not recommended because - their insertion order is arbitrary. - - ''' - if len(args) > 1: - raise TypeError('expected at most 1 arguments, got %d' % len(args)) - try: - self.__root - except AttributeError: - self.__root = root = [] # sentinel node - root[:] = [root, root, None] - self.__map = {} - self.__update(*args, **kwds) - - def __setitem__(self, key, value, dict_setitem=dict.__setitem__): - 'od.__setitem__(i, y) <==> od[i]=y' - # Setting a new item creates a new link at the end of the linked list, - # and the inherited dictionary is updated with the new key/value pair. - if key not in self: - root = self.__root - last = root[0] - last[1] = root[0] = self.__map[key] = [last, root, key] - return dict_setitem(self, key, value) - - def __delitem__(self, key, dict_delitem=dict.__delitem__): - 'od.__delitem__(y) <==> del od[y]' - # Deleting an existing item uses self.__map to find the link which gets - # removed by updating the links in the predecessor and successor nodes. - dict_delitem(self, key) - link_prev, link_next, _ = self.__map.pop(key) - link_prev[1] = link_next # update link_prev[NEXT] - link_next[0] = link_prev # update link_next[PREV] - - def __iter__(self): - 'od.__iter__() <==> iter(od)' - # Traverse the linked list in order. - root = self.__root - curr = root[1] # start at the first node - while curr is not root: - yield curr[2] # yield the curr[KEY] - curr = curr[1] # move to next node + Known difference: iterating over an OrderedDict which is being + concurrently modified raises RuntimeError in PyPy. In CPython + instead we get some behavior that appears reasonable in some + cases but is nonsensical in other cases. This is officially + forbidden by the CPython docs, so we forbid it explicitly for now. + ''' def __reversed__(self): - 'od.__reversed__() <==> reversed(od)' - # Traverse the linked list in reverse order. - root = self.__root - curr = root[0] # start at the last node - while curr is not root: - yield curr[2] # yield the curr[KEY] - curr = curr[0] # move to previous node - - def clear(self): - 'od.clear() -> None. Remove all items from od.' - root = self.__root - root[:] = [root, root, None] - self.__map.clear() - dict.clear(self) - - # -- the following methods do not depend on the internal structure -- - - def keys(self): - 'od.keys() -> list of keys in od' - return list(self) - - def values(self): - 'od.values() -> list of values in od' - return [self[key] for key in self] - - def items(self): - 'od.items() -> list of (key, value) pairs in od' - return [(key, self[key]) for key in self] - - def iterkeys(self): - 'od.iterkeys() -> an iterator over the keys in od' - return iter(self) - - def itervalues(self): - 'od.itervalues -> an iterator over the values in od' - for k in self: - yield self[k] - - def iteritems(self): - 'od.iteritems -> an iterator over the (key, value) pairs in od' - for k in self: - yield (k, self[k]) - - update = MutableMapping.update - - __update = update # let subclasses override update without breaking __init__ - - __marker = object() - - def pop(self, key, default=__marker): - '''od.pop(k[,d]) -> v, remove specified key and return the corresponding - value. If key is not found, d is returned if given, otherwise KeyError - is raised. - - ''' - if key in self: - result = self[key] - del self[key] - return result - if default is self.__marker: - raise KeyError(key) - return default - - def setdefault(self, key, default=None): - 'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od' - if key in self: - return self[key] - self[key] = default - return default + return reversed_dict(self) def popitem(self, last=True): '''od.popitem() -> (k, v), return and remove a (key, value) pair. Pairs are returned in LIFO order if last is true or FIFO order if false. ''' - if not self: - raise KeyError('dictionary is empty') - key = next(reversed(self) if last else iter(self)) - value = self.pop(key) - return key, value + if last: + return dict.popitem(self) + else: + it = dict.__iter__(self) + try: + k = it.next() + except StopIteration: + raise KeyError('dictionary is empty') + return (k, self.pop(k)) def __repr__(self, _repr_running={}): 'od.__repr__() <==> repr(od)' @@ -183,8 +80,6 @@ 'Return state information for pickling' items = [[k, self[k]] for k in self] inst_dict = vars(self).copy() - for k in vars(OrderedDict()): - inst_dict.pop(k, None) if inst_dict: return (self.__class__, (items,), inst_dict) return self.__class__, (items,) @@ -193,17 +88,6 @@ 'od.copy() -> a shallow copy of od' return self.__class__(self) - @classmethod - def fromkeys(cls, iterable, value=None): - '''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S. - If not specified, the value defaults to None. - - ''' - self = cls() - for key in iterable: - self[key] = value - return self - def __eq__(self, other): '''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive while comparison to a regular mapping is order-insensitive. diff --git a/lib-python/2.7/distutils/unixccompiler.py b/lib-python/2.7/distutils/unixccompiler.py --- a/lib-python/2.7/distutils/unixccompiler.py +++ b/lib-python/2.7/distutils/unixccompiler.py @@ -58,7 +58,7 @@ executables = {'preprocessor' : None, 'compiler' : ["cc"], 'compiler_so' : ["cc"], - 'compiler_cxx' : ["cc"], + 'compiler_cxx' : ["c++"], # pypy: changed, 'cc' is bogus 'linker_so' : ["cc", "-shared"], 'linker_exe' : ["cc"], 'archiver' : ["ar", "-cr"], diff --git a/lib-python/2.7/sqlite3/test/dbapi.py b/lib-python/2.7/sqlite3/test/dbapi.py --- a/lib-python/2.7/sqlite3/test/dbapi.py +++ b/lib-python/2.7/sqlite3/test/dbapi.py @@ -478,6 +478,29 @@ except TypeError: pass + def CheckCurDescription(self): + self.cu.execute("select * from test") + + actual = self.cu.description + expected = [ + ('id', None, None, None, None, None, None), + ('name', None, None, None, None, None, None), + ('income', None, None, None, None, None, None), + ] + self.assertEqual(expected, actual) + + def CheckCurDescriptionVoidStatement(self): + self.cu.execute("insert into test(name) values (?)", ("foo",)) + self.assertIsNone(self.cu.description) + + def CheckCurDescriptionWithoutStatement(self): + cu = self.cx.cursor() + try: + self.assertIsNone(cu.description) + finally: + cu.close() + + @unittest.skipUnless(threading, 'This test requires threading.') class ThreadTests(unittest.TestCase): def setUp(self): diff --git a/lib-python/2.7/subprocess.py b/lib-python/2.7/subprocess.py --- a/lib-python/2.7/subprocess.py +++ b/lib-python/2.7/subprocess.py @@ -1589,7 +1589,7 @@ 'copyfile' in caller.f_globals): dest_dir = sys.pypy_resolvedirof(target_executable) src_dir = sys.pypy_resolvedirof(sys.executable) - for libname in ['libpypy-c.so']: + for libname in ['libpypy-c.so', 'libpypy-c.dylib']: dest_library = os.path.join(dest_dir, libname) src_library = os.path.join(src_dir, libname) if os.path.exists(src_library): diff --git a/lib-python/2.7/test/test_collections.py b/lib-python/2.7/test/test_collections.py --- a/lib-python/2.7/test/test_collections.py +++ b/lib-python/2.7/test/test_collections.py @@ -578,7 +578,12 @@ def __repr__(self): return "MySet(%s)" % repr(list(self)) s = MySet([5,43,2,1]) - self.assertEqual(s.pop(), 1) + # changed from CPython 2.7: it was "s.pop() == 1" but I see + # nothing that guarantees a particular order here. In the + # 'all_ordered_dicts' branch of PyPy (or with OrderedDict + # instead of sets), it consistently returns 5, but this test + # should not rely on this or any other order. + self.assert_(s.pop() in [5,43,2,1]) def test_issue8750(self): empty = WithSet() @@ -1010,8 +1015,9 @@ c=3, e=5).items()), pairs) # mixed input # make sure no positional args conflict with possible kwdargs - self.assertEqual(inspect.getargspec(OrderedDict.__dict__['__init__']).args, - ['self']) + if '__init__' in OrderedDict.__dict__: # absent in PyPy + self.assertEqual(inspect.getargspec(OrderedDict.__dict__['__init__']).args, + ['self']) # Make sure that direct calls to __init__ do not clear previous contents d = OrderedDict([('a', 1), ('b', 2), ('c', 3), ('d', 44), ('e', 55)]) @@ -1108,6 +1114,16 @@ od.popitem() self.assertEqual(len(od), 0) + def test_popitem_first(self): + pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)] + shuffle(pairs) + od = OrderedDict(pairs) + while pairs: + self.assertEqual(od.popitem(last=False), pairs.pop(0)) + with self.assertRaises(KeyError): + od.popitem(last=False) + self.assertEqual(len(od), 0) + def test_pop(self): pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)] shuffle(pairs) @@ -1179,7 +1195,11 @@ od = OrderedDict(pairs) # yaml.dump(od) --> # '!!python/object/apply:__main__.OrderedDict\n- - [a, 1]\n - [b, 2]\n' - self.assertTrue(all(type(pair)==list for pair in od.__reduce__()[1])) + + # PyPy bug fix: added [0] at the end of this line, because the + # test is really about the 2-tuples that need to be 2-lists + # inside the list of 6 of them + self.assertTrue(all(type(pair)==list for pair in od.__reduce__()[1][0])) def test_reduce_not_too_fat(self): # do not save instance dictionary if not needed @@ -1189,6 +1209,16 @@ od.x = 10 self.assertEqual(len(od.__reduce__()), 3) + def test_reduce_exact_output(self): + # PyPy: test that __reduce__() produces the exact same answer as + # CPython does, even though in the 'all_ordered_dicts' branch we + # have to emulate it. + pairs = [['c', 1], ['b', 2], ['d', 4]] + od = OrderedDict(pairs) + self.assertEqual(od.__reduce__(), (OrderedDict, (pairs,))) + od.x = 10 + self.assertEqual(od.__reduce__(), (OrderedDict, (pairs,), {'x': 10})) + def test_repr(self): od = OrderedDict([('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]) self.assertEqual(repr(od), diff --git a/lib-python/2.7/test/test_xml_etree.py b/lib-python/2.7/test/test_xml_etree.py --- a/lib-python/2.7/test/test_xml_etree.py +++ b/lib-python/2.7/test/test_xml_etree.py @@ -225,9 +225,9 @@ >>> element.remove(subelement) >>> serialize(element) # 5 '' - >>> element.remove(subelement) + >>> element.remove(subelement) # doctest: +ELLIPSIS Traceback (most recent call last): - ValueError: list.remove(x): x not in list + ValueError: list.remove(... >>> serialize(element) # 6 '' >>> element[0:0] = [subelement, subelement, subelement] diff --git a/lib-python/stdlib-upgrade.txt b/lib-python/stdlib-upgrade.txt --- a/lib-python/stdlib-upgrade.txt +++ b/lib-python/stdlib-upgrade.txt @@ -7,7 +7,7 @@ 1. check out the branch vendor/stdlib 2. upgrade the files there -3. update stdlib-versions.txt with the output of hg -id from the cpython repo +3. update stdlib-version.txt with the output of hg -id from the cpython repo 4. commit 5. update to default/py3k 6. create a integration branch for the new stdlib diff --git a/lib_pypy/_functools.py b/lib_pypy/_functools.py --- a/lib_pypy/_functools.py +++ b/lib_pypy/_functools.py @@ -9,7 +9,10 @@ of the given arguments and keywords. """ - def __init__(self, func, *args, **keywords): + def __init__(self, *args, **keywords): + if not args: + raise TypeError('__init__() takes at least 2 arguments (1 given)') + func, args = args[0], args[1:] if not callable(func): raise TypeError("the first argument must be callable") self._func = func diff --git a/lib_pypy/_sqlite3.py b/lib_pypy/_sqlite3.py --- a/lib_pypy/_sqlite3.py +++ b/lib_pypy/_sqlite3.py @@ -1175,8 +1175,9 @@ try: return self.__description except AttributeError: - self.__description = self.__statement._get_description() - return self.__description + if self.__statement: + self.__description = self.__statement._get_description() + return self.__description description = property(__get_description) def __get_lastrowid(self): diff --git a/lib_pypy/cffi/__init__.py b/lib_pypy/cffi/__init__.py --- a/lib_pypy/cffi/__init__.py +++ b/lib_pypy/cffi/__init__.py @@ -6,3 +6,8 @@ __version__ = "0.8.6" __version_info__ = (0, 8, 6) + +# The verifier module file names are based on the CRC32 of a string that +# contains the following version number. It may be older than __version__ +# if nothing is clearly incompatible. +__version_verifier_modules__ = "0.8.6" diff --git a/lib_pypy/cffi/api.py b/lib_pypy/cffi/api.py --- a/lib_pypy/cffi/api.py +++ b/lib_pypy/cffi/api.py @@ -69,6 +69,7 @@ self._function_caches = [] self._libraries = [] self._cdefsources = [] + self._windows_unicode = None if hasattr(backend, 'set_ffi'): backend.set_ffi(self) for name in backend.__dict__: @@ -77,6 +78,7 @@ # with self._lock: self.BVoidP = self._get_cached_btype(model.voidp_type) + self.BCharA = self._get_cached_btype(model.char_array_type) if isinstance(backend, types.ModuleType): # _cffi_backend: attach these constants to the class if not hasattr(FFI, 'NULL'): @@ -189,13 +191,16 @@ cdecl = self._typeof(cdecl) return self._backend.alignof(cdecl) - def offsetof(self, cdecl, fieldname): + def offsetof(self, cdecl, *fields_or_indexes): """Return the offset of the named field inside the given - structure, which must be given as a C type name. + structure or array, which must be given as a C type name. + You can give several field names in case of nested structures. + You can also give numeric values which correspond to array + items, in case of an array type. """ if isinstance(cdecl, basestring): cdecl = self._typeof(cdecl) - return self._backend.typeoffsetof(cdecl, fieldname)[1] + return self._typeoffsetof(cdecl, *fields_or_indexes)[1] def new(self, cdecl, init=None): """Allocate an instance according to the specified C type and @@ -264,6 +269,16 @@ """ return self._backend.buffer(cdata, size) + def from_buffer(self, python_buffer): + """Return a that points to the data of the + given Python object, which must support the buffer interface. + Note that this is not meant to be used on the built-in types str, + unicode, or bytearray (you can build 'char[]' arrays explicitly) + but only on objects containing large quantities of raw data + in some other format, like 'array.array' or numpy arrays. + """ + return self._backend.from_buffer(self.BCharA, python_buffer) + def callback(self, cdecl, python_callable=None, error=None): """Return a callback object or a decorator making such a callback object. 'cdecl' must name a C function pointer type. @@ -335,9 +350,23 @@ which requires binary compatibility in the signatures. """ from .verifier import Verifier, _caller_dir_pycache + # + # If set_unicode(True) was called, insert the UNICODE and + # _UNICODE macro declarations + if self._windows_unicode: + self._apply_windows_unicode(kwargs) + # + # Set the tmpdir here, and not in Verifier.__init__: it picks + # up the caller's directory, which we want to be the caller of + # ffi.verify(), as opposed to the caller of Veritier(). tmpdir = tmpdir or _caller_dir_pycache() + # + # Make a Verifier() and use it to load the library. self.verifier = Verifier(self, source, tmpdir, **kwargs) lib = self.verifier.load_library() + # + # Save the loaded library for keep-alive purposes, even + # if the caller doesn't keep it alive itself (it should). self._libraries.append(lib) return lib @@ -356,15 +385,29 @@ with self._lock: return model.pointer_cache(self, ctype) - def addressof(self, cdata, field=None): + def addressof(self, cdata, *fields_or_indexes): """Return the address of a . - If 'field' is specified, return the address of this field. + If 'fields_or_indexes' are given, returns the address of that + field or array item in the structure or array, recursively in + case of nested structures. """ ctype = self._backend.typeof(cdata) - ctype, offset = self._backend.typeoffsetof(ctype, field) + if fields_or_indexes: + ctype, offset = self._typeoffsetof(ctype, *fields_or_indexes) + else: + if ctype.kind == "pointer": + raise TypeError("addressof(pointer)") + offset = 0 ctypeptr = self._pointer_to(ctype) return self._backend.rawaddressof(ctypeptr, cdata, offset) + def _typeoffsetof(self, ctype, field_or_index, *fields_or_indexes): + ctype, offset = self._backend.typeoffsetof(ctype, field_or_index) + for field1 in fields_or_indexes: + ctype, offset1 = self._backend.typeoffsetof(ctype, field1, 1) + offset += offset1 + return ctype, offset + def include(self, ffi_to_include): """Includes the typedefs, structs, unions and enums defined in another FFI instance. Usage is similar to a #include in C, @@ -387,6 +430,44 @@ def from_handle(self, x): return self._backend.from_handle(x) + def set_unicode(self, enabled_flag): + """Windows: if 'enabled_flag' is True, enable the UNICODE and + _UNICODE defines in C, and declare the types like TCHAR and LPTCSTR + to be (pointers to) wchar_t. If 'enabled_flag' is False, + declare these types to be (pointers to) plain 8-bit characters. + This is mostly for backward compatibility; you usually want True. + """ + if self._windows_unicode is not None: + raise ValueError("set_unicode() can only be called once") + enabled_flag = bool(enabled_flag) + if enabled_flag: + self.cdef("typedef wchar_t TBYTE;" + "typedef wchar_t TCHAR;" + "typedef const wchar_t *LPCTSTR;" + "typedef const wchar_t *PCTSTR;" + "typedef wchar_t *LPTSTR;" + "typedef wchar_t *PTSTR;" + "typedef TBYTE *PTBYTE;" + "typedef TCHAR *PTCHAR;") + else: + self.cdef("typedef char TBYTE;" + "typedef char TCHAR;" + "typedef const char *LPCTSTR;" + "typedef const char *PCTSTR;" + "typedef char *LPTSTR;" + "typedef char *PTSTR;" + "typedef TBYTE *PTBYTE;" + "typedef TCHAR *PTCHAR;") + self._windows_unicode = enabled_flag + + def _apply_windows_unicode(self, kwds): + defmacros = kwds.get('define_macros', ()) + if not isinstance(defmacros, (list, tuple)): + raise TypeError("'define_macros' must be a list or tuple") + defmacros = list(defmacros) + [('UNICODE', '1'), + ('_UNICODE', '1')] + kwds['define_macros'] = defmacros + def _load_backend_lib(backend, name, flags): if name is None: diff --git a/lib_pypy/cffi/backend_ctypes.py b/lib_pypy/cffi/backend_ctypes.py --- a/lib_pypy/cffi/backend_ctypes.py +++ b/lib_pypy/cffi/backend_ctypes.py @@ -169,6 +169,7 @@ class CTypesGenericPtr(CTypesData): __slots__ = ['_address', '_as_ctype_ptr'] _automatic_casts = False + kind = "pointer" @classmethod def _newp(cls, init): @@ -370,10 +371,12 @@ (CTypesPrimitive, type(source).__name__)) return source # + kind1 = kind class CTypesPrimitive(CTypesGenericPrimitive): __slots__ = ['_value'] _ctype = ctype _reftypename = '%s &' % name + kind = kind1 def __init__(self, value): self._value = value @@ -703,12 +706,13 @@ class struct_or_union(base_ctypes_class): pass struct_or_union.__name__ = '%s_%s' % (kind, name) + kind1 = kind # class CTypesStructOrUnion(CTypesBaseStructOrUnion): __slots__ = ['_blob'] _ctype = struct_or_union _reftypename = '%s &' % (name,) - _kind = kind + _kind = kind = kind1 # CTypesStructOrUnion._fix_class() return CTypesStructOrUnion @@ -994,27 +998,42 @@ def getcname(self, BType, replace_with): return BType._get_c_name(replace_with) - def typeoffsetof(self, BType, fieldname): - if fieldname is not None and issubclass(BType, CTypesGenericPtr): - BType = BType._BItem - if not issubclass(BType, CTypesBaseStructOrUnion): - raise TypeError("expected a struct or union ctype") - if fieldname is None: - return (BType, 0) - else: + def typeoffsetof(self, BType, fieldname, num=0): + if isinstance(fieldname, str): + if num == 0 and issubclass(BType, CTypesGenericPtr): + BType = BType._BItem + if not issubclass(BType, CTypesBaseStructOrUnion): + raise TypeError("expected a struct or union ctype") BField = BType._bfield_types[fieldname] if BField is Ellipsis: raise TypeError("not supported for bitfields") return (BField, BType._offsetof(fieldname)) + elif isinstance(fieldname, (int, long)): + if issubclass(BType, CTypesGenericArray): + BType = BType._CTPtr + if not issubclass(BType, CTypesGenericPtr): + raise TypeError("expected an array or ptr ctype") + BItem = BType._BItem + offset = BItem._get_size() * fieldname + if offset > sys.maxsize: + raise OverflowError + return (BItem, offset) + else: + raise TypeError(type(fieldname)) - def rawaddressof(self, BTypePtr, cdata, offset): + def rawaddressof(self, BTypePtr, cdata, offset=None): if isinstance(cdata, CTypesBaseStructOrUnion): ptr = ctypes.pointer(type(cdata)._to_ctypes(cdata)) elif isinstance(cdata, CTypesGenericPtr): + if offset is None or not issubclass(type(cdata)._BItem, + CTypesBaseStructOrUnion): + raise TypeError("unexpected cdata type") + ptr = type(cdata)._to_ctypes(cdata) + elif isinstance(cdata, CTypesGenericArray): ptr = type(cdata)._to_ctypes(cdata) else: raise TypeError("expected a ") - if offset != 0: + if offset: ptr = ctypes.cast( ctypes.c_void_p( ctypes.cast(ptr, ctypes.c_void_p).value + offset), diff --git a/lib_pypy/cffi/commontypes.py b/lib_pypy/cffi/commontypes.py --- a/lib_pypy/cffi/commontypes.py +++ b/lib_pypy/cffi/commontypes.py @@ -29,6 +29,9 @@ result = model.PointerType(resolve_common_type(result[:-2])) elif result in model.PrimitiveType.ALL_PRIMITIVE_TYPES: result = model.PrimitiveType(result) + elif result == 'set-unicode-needed': + raise api.FFIError("The Windows type %r is only available after " + "you call ffi.set_unicode()" % (commontype,)) else: if commontype == result: raise api.FFIError("Unsupported type: %r. Please file a bug " @@ -86,8 +89,6 @@ "ULONGLONG": "unsigned long long", "WCHAR": "wchar_t", "SHORT": "short", - "TBYTE": "WCHAR", - "TCHAR": "WCHAR", "UCHAR": "unsigned char", "UINT": "unsigned int", "UINT8": "unsigned char", @@ -157,14 +158,12 @@ "LPCVOID": model.const_voidp_type, "LPCWSTR": "const WCHAR *", - "LPCTSTR": "LPCWSTR", "LPDWORD": "DWORD *", "LPHANDLE": "HANDLE *", "LPINT": "int *", "LPLONG": "long *", "LPSTR": "CHAR *", "LPWSTR": "WCHAR *", - "LPTSTR": "LPWSTR", "LPVOID": model.voidp_type, "LPWORD": "WORD *", "LRESULT": "LONG_PTR", @@ -173,7 +172,6 @@ "PBYTE": "BYTE *", "PCHAR": "CHAR *", "PCSTR": "const CHAR *", - "PCTSTR": "LPCWSTR", "PCWSTR": "const WCHAR *", "PDWORD": "DWORD *", "PDWORDLONG": "DWORDLONG *", @@ -200,9 +198,6 @@ "PSIZE_T": "SIZE_T *", "PSSIZE_T": "SSIZE_T *", "PSTR": "CHAR *", - "PTBYTE": "TBYTE *", - "PTCHAR": "TCHAR *", - "PTSTR": "LPWSTR", "PUCHAR": "UCHAR *", "PUHALF_PTR": "UHALF_PTR *", "PUINT": "UINT *", @@ -240,6 +235,15 @@ "USN": "LONGLONG", "VOID": model.void_type, "WPARAM": "UINT_PTR", + + "TBYTE": "set-unicode-needed", + "TCHAR": "set-unicode-needed", + "LPCTSTR": "set-unicode-needed", + "PCTSTR": "set-unicode-needed", + "LPTSTR": "set-unicode-needed", + "PTSTR": "set-unicode-needed", + "PTBYTE": "set-unicode-needed", + "PTCHAR": "set-unicode-needed", }) return result diff --git a/lib_pypy/cffi/cparser.py b/lib_pypy/cffi/cparser.py --- a/lib_pypy/cffi/cparser.py +++ b/lib_pypy/cffi/cparser.py @@ -1,4 +1,3 @@ - from . import api, model from .commontypes import COMMON_TYPES, resolve_common_type try: @@ -209,6 +208,8 @@ def _add_constants(self, key, val): if key in self._int_constants: + if self._int_constants[key] == val: + return # ignore identical double declarations raise api.FFIError( "multiple declarations of constant: %s" % (key,)) self._int_constants[key] = val @@ -228,12 +229,18 @@ pyvalue = int(int_str, 0) self._add_constants(key, pyvalue) + self._declare('macro ' + key, pyvalue) elif value == '...': self._declare('macro ' + key, value) else: - raise api.CDefError('only supports the syntax "#define ' - '%s ..." (literally) or "#define ' - '%s 0x1FF" for now' % (key, key)) + raise api.CDefError( + 'only supports one of the following syntax:\n' + ' #define %s ... (literally dot-dot-dot)\n' + ' #define %s NUMBER (with NUMBER an integer' + ' constant, decimal/hex/octal)\n' + 'got:\n' + ' #define %s %s' + % (key, key, key, value)) def _parse_decl(self, decl): node = decl.type @@ -460,6 +467,8 @@ elif kind == 'union': tp = model.UnionType(explicit_name, None, None, None) elif kind == 'enum': + if explicit_name == '__dotdotdot__': + raise CDefError("Enums cannot be declared with ...") tp = self._build_enum_type(explicit_name, type.values) else: raise AssertionError("kind = %r" % (kind,)) @@ -532,9 +541,24 @@ def _parse_constant(self, exprnode, partial_length_ok=False): # for now, limited to expressions that are an immediate number - # or negative number + # or positive/negative number if isinstance(exprnode, pycparser.c_ast.Constant): - return int(exprnode.value, 0) + s = exprnode.value + if s.startswith('0'): + if s.startswith('0x') or s.startswith('0X'): + return int(s, 16) + return int(s, 8) + elif '1' <= s[0] <= '9': + return int(s, 10) + elif s[0] == "'" and s[-1] == "'" and ( + len(s) == 3 or (len(s) == 4 and s[1] == "\\")): + return ord(s[-2]) + else: + raise api.CDefError("invalid constant %r" % (s,)) + # + if (isinstance(exprnode, pycparser.c_ast.UnaryOp) and + exprnode.op == '+'): + return self._parse_constant(exprnode.expr) # if (isinstance(exprnode, pycparser.c_ast.UnaryOp) and exprnode.op == '-'): diff --git a/lib_pypy/cffi/ffiplatform.py b/lib_pypy/cffi/ffiplatform.py --- a/lib_pypy/cffi/ffiplatform.py +++ b/lib_pypy/cffi/ffiplatform.py @@ -11,6 +11,9 @@ """ +LIST_OF_FILE_NAMES = ['sources', 'include_dirs', 'library_dirs', + 'extra_objects', 'depends'] + def get_extension(srcfilename, modname, sources=(), **kwds): from distutils.core import Extension allsources = [srcfilename] diff --git a/lib_pypy/cffi/model.py b/lib_pypy/cffi/model.py --- a/lib_pypy/cffi/model.py +++ b/lib_pypy/cffi/model.py @@ -235,6 +235,8 @@ BPtrItem = PointerType(self.item).get_cached_btype(ffi, finishlist) return global_cache(self, ffi, 'new_array_type', BPtrItem, self.length) +char_array_type = ArrayType(PrimitiveType('char'), None) + class StructOrUnionOrEnum(BaseTypeByIdentity): _attrs_ = ('name',) @@ -478,7 +480,7 @@ try: res = getattr(ffi._backend, funcname)(*args) except NotImplementedError as e: - raise NotImplementedError("%r: %s" % (srctype, e)) + raise NotImplementedError("%s: %r: %s" % (funcname, srctype, e)) # note that setdefault() on WeakValueDictionary is not atomic # and contains a rare bug (http://bugs.python.org/issue19542); # we have to use a lock and do it ourselves diff --git a/lib_pypy/cffi/vengine_cpy.py b/lib_pypy/cffi/vengine_cpy.py --- a/lib_pypy/cffi/vengine_cpy.py +++ b/lib_pypy/cffi/vengine_cpy.py @@ -65,7 +65,7 @@ # The following two 'chained_list_constants' items contains # the head of these two chained lists, as a string that gives the # call to do, if any. - self._chained_list_constants = ['0', '0'] + self._chained_list_constants = ['((void)lib,0)', '((void)lib,0)'] # prnt = self._prnt # first paste some standard set of lines that are mostly '#define' @@ -138,15 +138,22 @@ prnt() prnt('#endif') - def load_library(self): + def load_library(self, flags=None): # XXX review all usages of 'self' here! # import it as a new extension module + if hasattr(sys, "getdlopenflags"): + previous_flags = sys.getdlopenflags() try: + if hasattr(sys, "setdlopenflags") and flags is not None: + sys.setdlopenflags(flags) module = imp.load_dynamic(self.verifier.get_module_name(), self.verifier.modulefilename) except ImportError as e: error = "importing %r: %s" % (self.verifier.modulefilename, e) raise ffiplatform.VerificationError(error) + finally: + if hasattr(sys, "setdlopenflags"): + sys.setdlopenflags(previous_flags) # # call loading_cpy_struct() to get the struct layout inferred by # the C compiler @@ -228,7 +235,8 @@ converter = '_cffi_to_c_int' extraarg = ', %s' % tp.name else: - converter = '_cffi_to_c_%s' % (tp.name.replace(' ', '_'),) + converter = '(%s)_cffi_to_c_%s' % (tp.get_c_name(''), + tp.name.replace(' ', '_')) errvalue = '-1' # elif isinstance(tp, model.PointerType): @@ -267,8 +275,8 @@ self._prnt(' if (datasize != 0) {') self._prnt(' if (datasize < 0)') self._prnt(' %s;' % errcode) - self._prnt(' %s = alloca(datasize);' % (tovar,)) - self._prnt(' memset((void *)%s, 0, datasize);' % (tovar,)) + self._prnt(' %s = alloca((size_t)datasize);' % (tovar,)) + self._prnt(' memset((void *)%s, 0, (size_t)datasize);' % (tovar,)) self._prnt(' if (_cffi_convert_array_from_object(' '(char *)%s, _cffi_type(%d), %s) < 0)' % ( tovar, self._gettypenum(tp), fromvar)) @@ -336,7 +344,7 @@ prnt = self._prnt numargs = len(tp.args) if numargs == 0: - argname = 'no_arg' + argname = 'noarg' elif numargs == 1: argname = 'arg0' else: @@ -386,6 +394,9 @@ prnt(' Py_END_ALLOW_THREADS') prnt() # + prnt(' (void)self; /* unused */') + if numargs == 0: + prnt(' (void)noarg; /* unused */') if result_code: prnt(' return %s;' % self._convert_expr_from_c(tp.result, 'result', 'result type')) @@ -452,6 +463,7 @@ prnt('static void %s(%s *p)' % (checkfuncname, cname)) prnt('{') prnt(' /* only to generate compile-time warnings or errors */') + prnt(' (void)p;') for fname, ftype, fbitsize in tp.enumfields(): if (isinstance(ftype, model.PrimitiveType) and ftype.is_integer_type()) or fbitsize >= 0: @@ -482,6 +494,8 @@ prnt(' sizeof(((%s *)0)->%s),' % (cname, fname)) prnt(' -1') prnt(' };') + prnt(' (void)self; /* unused */') + prnt(' (void)noarg; /* unused */') prnt(' return _cffi_get_struct_layout(nums);') prnt(' /* the next line is not executed, but compiled */') prnt(' %s(0);' % (checkfuncname,)) @@ -578,7 +592,8 @@ # constants, likely declared with '#define' def _generate_cpy_const(self, is_int, name, tp=None, category='const', - vartp=None, delayed=True, size_too=False): + vartp=None, delayed=True, size_too=False, + check_value=None): prnt = self._prnt funcname = '_cffi_%s_%s' % (category, name) prnt('static int %s(PyObject *lib)' % funcname) @@ -590,6 +605,9 @@ else: assert category == 'const' # + if check_value is not None: + self._check_int_constant_value(name, check_value) + # if not is_int: if category == 'var': realexpr = '&' + name @@ -637,6 +655,27 @@ # ---------- # enums + def _check_int_constant_value(self, name, value, err_prefix=''): + prnt = self._prnt + if value <= 0: + prnt(' if ((%s) > 0 || (long)(%s) != %dL) {' % ( + name, name, value)) + else: + prnt(' if ((%s) <= 0 || (unsigned long)(%s) != %dUL) {' % ( + name, name, value)) + prnt(' char buf[64];') + prnt(' if ((%s) <= 0)' % name) + prnt(' snprintf(buf, 63, "%%ld", (long)(%s));' % name) + prnt(' else') + prnt(' snprintf(buf, 63, "%%lu", (unsigned long)(%s));' % + name) + prnt(' PyErr_Format(_cffi_VerificationError,') + prnt(' "%s%s has the real value %s, not %s",') + prnt(' "%s", "%s", buf, "%d");' % ( + err_prefix, name, value)) + prnt(' return -1;') + prnt(' }') + def _enum_funcname(self, prefix, name): # "$enum_$1" => "___D_enum____D_1" name = name.replace('$', '___D_') @@ -653,25 +692,8 @@ prnt('static int %s(PyObject *lib)' % funcname) prnt('{') for enumerator, enumvalue in zip(tp.enumerators, tp.enumvalues): - if enumvalue < 0: - prnt(' if ((%s) >= 0 || (long)(%s) != %dL) {' % ( - enumerator, enumerator, enumvalue)) - else: - prnt(' if ((%s) < 0 || (unsigned long)(%s) != %dUL) {' % ( - enumerator, enumerator, enumvalue)) - prnt(' char buf[64];') - prnt(' if ((%s) < 0)' % enumerator) - prnt(' snprintf(buf, 63, "%%ld", (long)(%s));' % enumerator) - prnt(' else') - prnt(' snprintf(buf, 63, "%%lu", (unsigned long)(%s));' % - enumerator) - prnt(' PyErr_Format(_cffi_VerificationError,') - prnt(' "enum %s: %s has the real value %s, ' - 'not %s",') - prnt(' "%s", "%s", buf, "%d");' % ( - name, enumerator, enumvalue)) - prnt(' return -1;') - prnt(' }') + self._check_int_constant_value(enumerator, enumvalue, + "enum %s: " % name) prnt(' return %s;' % self._chained_list_constants[True]) self._chained_list_constants[True] = funcname + '(lib)' prnt('}') @@ -695,8 +717,11 @@ # macros: for now only for integers def _generate_cpy_macro_decl(self, tp, name): - assert tp == '...' - self._generate_cpy_const(True, name) + if tp == '...': + check_value = None + else: + check_value = tp # an integer + self._generate_cpy_const(True, name, check_value=check_value) _generate_cpy_macro_collecttype = _generate_nothing _generate_cpy_macro_method = _generate_nothing @@ -783,6 +808,24 @@ typedef unsigned __int16 uint16_t; typedef unsigned __int32 uint32_t; typedef unsigned __int64 uint64_t; + typedef __int8 int_least8_t; + typedef __int16 int_least16_t; + typedef __int32 int_least32_t; + typedef __int64 int_least64_t; + typedef unsigned __int8 uint_least8_t; + typedef unsigned __int16 uint_least16_t; + typedef unsigned __int32 uint_least32_t; + typedef unsigned __int64 uint_least64_t; + typedef __int8 int_fast8_t; + typedef __int16 int_fast16_t; + typedef __int32 int_fast32_t; + typedef __int64 int_fast64_t; + typedef unsigned __int8 uint_fast8_t; + typedef unsigned __int16 uint_fast16_t; + typedef unsigned __int32 uint_fast32_t; + typedef unsigned __int64 uint_fast64_t; + typedef __int64 intmax_t; + typedef unsigned __int64 uintmax_t; # else # include # endif @@ -828,12 +871,15 @@ PyLong_FromLongLong((long long)(x))) #define _cffi_from_c_int(x, type) \ - (((type)-1) > 0 ? /* unsigned */ \ - (sizeof(type) < sizeof(long) ? PyInt_FromLong(x) : \ - sizeof(type) == sizeof(long) ? PyLong_FromUnsignedLong(x) : \ - PyLong_FromUnsignedLongLong(x)) \ - : (sizeof(type) <= sizeof(long) ? PyInt_FromLong(x) : \ - PyLong_FromLongLong(x))) + (((type)-1) > 0 ? /* unsigned */ \ + (sizeof(type) < sizeof(long) ? \ + PyInt_FromLong((long)x) : \ + sizeof(type) == sizeof(long) ? \ + PyLong_FromUnsignedLong((unsigned long)x) : \ + PyLong_FromUnsignedLongLong((unsigned long long)x)) : \ + (sizeof(type) <= sizeof(long) ? \ + PyInt_FromLong((long)x) : \ + PyLong_FromLongLong((long long)x))) #define _cffi_to_c_int(o, type) \ (sizeof(type) == 1 ? (((type)-1) > 0 ? (type)_cffi_to_c_u8(o) \ @@ -844,7 +890,7 @@ : (type)_cffi_to_c_i32(o)) : \ sizeof(type) == 8 ? (((type)-1) > 0 ? (type)_cffi_to_c_u64(o) \ : (type)_cffi_to_c_i64(o)) : \ - (Py_FatalError("unsupported size for type " #type), 0)) + (Py_FatalError("unsupported size for type " #type), (type)0)) #define _cffi_to_c_i8 \ ((int(*)(PyObject *))_cffi_exports[1]) @@ -907,6 +953,7 @@ { PyObject *library; int was_alive = (_cffi_types != NULL); + (void)self; /* unused */ if (!PyArg_ParseTuple(args, "OOO", &_cffi_types, &_cffi_VerificationError, &library)) return NULL; diff --git a/lib_pypy/cffi/vengine_gen.py b/lib_pypy/cffi/vengine_gen.py --- a/lib_pypy/cffi/vengine_gen.py +++ b/lib_pypy/cffi/vengine_gen.py @@ -58,12 +58,12 @@ modname = self.verifier.get_module_name() prnt("void %s%s(void) { }\n" % (prefix, modname)) - def load_library(self): + def load_library(self, flags=0): # import it with the CFFI backend backend = self.ffi._backend # needs to make a path that contains '/', on Posix filename = os.path.join(os.curdir, self.verifier.modulefilename) - module = backend.load_library(filename) + module = backend.load_library(filename, flags) # # call loading_gen_struct() to get the struct layout inferred by # the C compiler @@ -235,6 +235,7 @@ prnt('static void %s(%s *p)' % (checkfuncname, cname)) prnt('{') prnt(' /* only to generate compile-time warnings or errors */') + prnt(' (void)p;') for fname, ftype, fbitsize in tp.enumfields(): if (isinstance(ftype, model.PrimitiveType) and ftype.is_integer_type()) or fbitsize >= 0: @@ -354,11 +355,20 @@ # ---------- # constants, likely declared with '#define' - def _generate_gen_const(self, is_int, name, tp=None, category='const'): + def _generate_gen_const(self, is_int, name, tp=None, category='const', + check_value=None): prnt = self._prnt funcname = '_cffi_%s_%s' % (category, name) self.export_symbols.append(funcname) - if is_int: + if check_value is not None: + assert is_int + assert category == 'const' + prnt('int %s(char *out_error)' % funcname) + prnt('{') + self._check_int_constant_value(name, check_value) + prnt(' return 0;') + prnt('}') + elif is_int: assert category == 'const' prnt('int %s(long long *out_value)' % funcname) prnt('{') @@ -367,6 +377,7 @@ prnt('}') else: assert tp is not None + assert check_value is None prnt(tp.get_c_name(' %s(void)' % funcname, name),) prnt('{') if category == 'var': @@ -383,9 +394,13 @@ _loading_gen_constant = _loaded_noop - def _load_constant(self, is_int, tp, name, module): + def _load_constant(self, is_int, tp, name, module, check_value=None): funcname = '_cffi_const_%s' % name - if is_int: + if check_value is not None: + assert is_int + self._load_known_int_constant(module, funcname) + value = check_value + elif is_int: BType = self.ffi._typeof_locked("long long*")[0] BFunc = self.ffi._typeof_locked("int(*)(long long*)")[0] function = module.load_function(BFunc, funcname) @@ -396,6 +411,7 @@ BLongLong = self.ffi._typeof_locked("long long")[0] value += (1 << (8*self.ffi.sizeof(BLongLong))) else: + assert check_value is None BFunc = self.ffi._typeof_locked(tp.get_c_name('(*)(void)', name))[0] function = module.load_function(BFunc, funcname) value = function() @@ -410,6 +426,36 @@ # ---------- # enums + def _check_int_constant_value(self, name, value): + prnt = self._prnt + if value <= 0: + prnt(' if ((%s) > 0 || (long)(%s) != %dL) {' % ( + name, name, value)) + else: + prnt(' if ((%s) <= 0 || (unsigned long)(%s) != %dUL) {' % ( + name, name, value)) + prnt(' char buf[64];') + prnt(' if ((%s) <= 0)' % name) + prnt(' sprintf(buf, "%%ld", (long)(%s));' % name) + prnt(' else') + prnt(' sprintf(buf, "%%lu", (unsigned long)(%s));' % + name) + prnt(' sprintf(out_error, "%s has the real value %s, not %s",') + prnt(' "%s", buf, "%d");' % (name[:100], value)) + prnt(' return -1;') + prnt(' }') + + def _load_known_int_constant(self, module, funcname): + BType = self.ffi._typeof_locked("char[]")[0] + BFunc = self.ffi._typeof_locked("int(*)(char*)")[0] + function = module.load_function(BFunc, funcname) + p = self.ffi.new(BType, 256) + if function(p) < 0: + error = self.ffi.string(p) + if sys.version_info >= (3,): + error = str(error, 'utf-8') + raise ffiplatform.VerificationError(error) + def _enum_funcname(self, prefix, name): # "$enum_$1" => "___D_enum____D_1" name = name.replace('$', '___D_') @@ -427,24 +473,7 @@ prnt('int %s(char *out_error)' % funcname) prnt('{') for enumerator, enumvalue in zip(tp.enumerators, tp.enumvalues): - if enumvalue < 0: - prnt(' if ((%s) >= 0 || (long)(%s) != %dL) {' % ( - enumerator, enumerator, enumvalue)) - else: - prnt(' if ((%s) < 0 || (unsigned long)(%s) != %dUL) {' % ( - enumerator, enumerator, enumvalue)) - prnt(' char buf[64];') - prnt(' if ((%s) < 0)' % enumerator) - prnt(' sprintf(buf, "%%ld", (long)(%s));' % enumerator) - prnt(' else') - prnt(' sprintf(buf, "%%lu", (unsigned long)(%s));' % - enumerator) - prnt(' sprintf(out_error,' - ' "%s has the real value %s, not %s",') - prnt(' "%s", buf, "%d");' % ( - enumerator[:100], enumvalue)) - prnt(' return -1;') - prnt(' }') + self._check_int_constant_value(enumerator, enumvalue) prnt(' return 0;') prnt('}') prnt() @@ -456,16 +485,8 @@ tp.enumvalues = tuple(enumvalues) tp.partial_resolved = True else: - BType = self.ffi._typeof_locked("char[]")[0] - BFunc = self.ffi._typeof_locked("int(*)(char*)")[0] funcname = self._enum_funcname(prefix, name) - function = module.load_function(BFunc, funcname) - p = self.ffi.new(BType, 256) - if function(p) < 0: - error = self.ffi.string(p) - if sys.version_info >= (3,): - error = str(error, 'utf-8') - raise ffiplatform.VerificationError(error) + self._load_known_int_constant(module, funcname) def _loaded_gen_enum(self, tp, name, module, library): for enumerator, enumvalue in zip(tp.enumerators, tp.enumvalues): @@ -476,13 +497,21 @@ # macros: for now only for integers def _generate_gen_macro_decl(self, tp, name): - assert tp == '...' - self._generate_gen_const(True, name) + if tp == '...': + check_value = None + else: + check_value = tp # an integer + self._generate_gen_const(True, name, check_value=check_value) _loading_gen_macro = _loaded_noop def _loaded_gen_macro(self, tp, name, module, library): - value = self._load_constant(True, tp, name, module) + if tp == '...': + check_value = None + else: + check_value = tp # an integer + value = self._load_constant(True, tp, name, module, + check_value=check_value) setattr(library, name, value) type(library)._cffi_dir.append(name) @@ -565,6 +594,24 @@ typedef unsigned __int16 uint16_t; typedef unsigned __int32 uint32_t; typedef unsigned __int64 uint64_t; + typedef __int8 int_least8_t; + typedef __int16 int_least16_t; + typedef __int32 int_least32_t; + typedef __int64 int_least64_t; + typedef unsigned __int8 uint_least8_t; + typedef unsigned __int16 uint_least16_t; + typedef unsigned __int32 uint_least32_t; + typedef unsigned __int64 uint_least64_t; + typedef __int8 int_fast8_t; + typedef __int16 int_fast16_t; + typedef __int32 int_fast32_t; + typedef __int64 int_fast64_t; + typedef unsigned __int8 uint_fast8_t; + typedef unsigned __int16 uint_fast16_t; + typedef unsigned __int32 uint_fast32_t; + typedef unsigned __int64 uint_fast64_t; + typedef __int64 intmax_t; + typedef unsigned __int64 uintmax_t; # else # include # endif diff --git a/lib_pypy/cffi/verifier.py b/lib_pypy/cffi/verifier.py --- a/lib_pypy/cffi/verifier.py +++ b/lib_pypy/cffi/verifier.py @@ -1,12 +1,23 @@ -import sys, os, binascii, imp, shutil -from . import __version__ +import sys, os, binascii, shutil +from . import __version_verifier_modules__ from . import ffiplatform +if sys.version_info >= (3, 3): + import importlib.machinery + def _extension_suffixes(): + return importlib.machinery.EXTENSION_SUFFIXES[:] +else: + import imp + def _extension_suffixes(): + return [suffix for suffix, _, type in imp.get_suffixes() + if type == imp.C_EXTENSION] + class Verifier(object): def __init__(self, ffi, preamble, tmpdir=None, modulename=None, - ext_package=None, tag='', force_generic_engine=False, **kwds): + ext_package=None, tag='', force_generic_engine=False, + source_extension='.c', flags=None, relative_to=None, **kwds): self.ffi = ffi self.preamble = preamble if not modulename: @@ -14,14 +25,15 @@ vengine_class = _locate_engine_class(ffi, force_generic_engine) self._vengine = vengine_class(self) self._vengine.patch_extension_kwds(kwds) - self.kwds = kwds + self.flags = flags + self.kwds = self.make_relative_to(kwds, relative_to) # if modulename: if tag: raise TypeError("can't specify both 'modulename' and 'tag'") else: - key = '\x00'.join([sys.version[:3], __version__, preamble, - flattened_kwds] + + key = '\x00'.join([sys.version[:3], __version_verifier_modules__, + preamble, flattened_kwds] + ffi._cdefsources) if sys.version_info >= (3,): key = key.encode('utf-8') @@ -33,7 +45,7 @@ k1, k2) suffix = _get_so_suffixes()[0] self.tmpdir = tmpdir or _caller_dir_pycache() - self.sourcefilename = os.path.join(self.tmpdir, modulename + '.c') + self.sourcefilename = os.path.join(self.tmpdir, modulename + source_extension) self.modulefilename = os.path.join(self.tmpdir, modulename + suffix) self.ext_package = ext_package self._has_source = False @@ -97,6 +109,20 @@ def generates_python_module(self): return self._vengine._gen_python_module + def make_relative_to(self, kwds, relative_to): + if relative_to and os.path.dirname(relative_to): + dirname = os.path.dirname(relative_to) + kwds = kwds.copy() + for key in ffiplatform.LIST_OF_FILE_NAMES: + if key in kwds: + lst = kwds[key] + if not isinstance(lst, (list, tuple)): + raise TypeError("keyword '%s' should be a list or tuple" + % (key,)) + lst = [os.path.join(dirname, fn) for fn in lst] + kwds[key] = lst + return kwds + # ---------- def _locate_module(self): @@ -148,7 +174,10 @@ def _load_library(self): assert self._has_module - return self._vengine.load_library() + if self.flags is not None: + return self._vengine.load_library(self.flags) + else: + return self._vengine.load_library() # ____________________________________________________________ @@ -181,6 +210,9 @@ def _caller_dir_pycache(): if _TMPDIR: return _TMPDIR + result = os.environ.get('CFFI_TMPDIR') + if result: + return result filename = sys._getframe(2).f_code.co_filename return os.path.abspath(os.path.join(os.path.dirname(filename), '__pycache__')) @@ -222,11 +254,7 @@ pass def _get_so_suffixes(): - suffixes = [] - for suffix, mode, type in imp.get_suffixes(): - if type == imp.C_EXTENSION: - suffixes.append(suffix) - + suffixes = _extension_suffixes() if not suffixes: # bah, no C_EXTENSION available. Occurs on pypy without cpyext if sys.platform == 'win32': diff --git a/lib_pypy/readline.py b/lib_pypy/readline.py --- a/lib_pypy/readline.py +++ b/lib_pypy/readline.py @@ -6,4 +6,11 @@ are only stubs at the moment. """ -from pyrepl.readline import * +try: + from pyrepl.readline import * +except ImportError: + import sys + if sys.platform == 'win32': + raise ImportError("the 'readline' module is not available on Windows" + " (on either PyPy or CPython)") + raise diff --git a/pypy/doc/build.rst b/pypy/doc/build.rst --- a/pypy/doc/build.rst +++ b/pypy/doc/build.rst @@ -47,6 +47,11 @@ Install build-time dependencies ------------------------------- +(**Note**: for some hints on how to translate the Python interpreter under +Windows, see the `windows document`_) + +.. _`windows document`: windows.html + To build PyPy on Unix using the C translation backend, you need at least a C compiler and ``make`` installed. Further, some optional modules have additional diff --git a/pypy/doc/embedding.rst b/pypy/doc/embedding.rst --- a/pypy/doc/embedding.rst +++ b/pypy/doc/embedding.rst @@ -30,12 +30,10 @@ Initialize threads. Only need to be called if there are any threads involved -.. function:: long pypy_setup_home(char* home, int verbose); +.. function:: int pypy_setup_home(char* home, int verbose); This function searches the PyPy standard library starting from the given - "PyPy home directory". It is not strictly necessary to execute it before - running Python code, but without it you will not be able to import any - non-builtin module from the standard library. The arguments are: + "PyPy home directory". The arguments are: * ``home``: NULL terminated path to an executable inside the pypy directory (can be a .so name, can be made up) @@ -84,25 +82,36 @@ const char source[] = "print 'hello from pypy'"; - int main() + int main(void) { - int res; + int res; - rpython_startup_code(); - // pypy_setup_home() is not needed in this trivial example - res = pypy_execute_source((char*)source); - if (res) { - printf("Error calling pypy_execute_source!\n"); - } - return res; + rpython_startup_code(); + res = pypy_setup_home("/opt/pypy/bin/libpypy-c.so", 1); + if (res) { + printf("Error setting pypy home!\n"); + return 1; + } + + res = pypy_execute_source((char*)source); + if (res) { + printf("Error calling pypy_execute_source!\n"); + } + return res; } -If we save it as ``x.c`` now, compile it and run it with:: +If we save it as ``x.c`` now, compile it and run it (on linux) with:: fijal at hermann:/opt/pypy$ gcc -o x x.c -lpypy-c -L. fijal at hermann:/opt/pypy$ LD_LIBRARY_PATH=. ./x hello from pypy +on OSX it is necessary to set the rpath of the binary if one wants to link to it:: + + gcc -o x x.c -lpypy-c -L. -Wl,-rpath -Wl, at executable_path + ./x + hello from pypy + Worked! .. note:: If the compilation fails because of missing PyPy.h header file, diff --git a/pypy/doc/getting-started-dev.rst b/pypy/doc/getting-started-dev.rst --- a/pypy/doc/getting-started-dev.rst +++ b/pypy/doc/getting-started-dev.rst @@ -4,6 +4,93 @@ .. contents:: +Using Mercurial +--------------- + +PyPy development is based on Mercurial (hg). If you are not used to +version control, the cycle for a new PyPy contributor goes typically +like this: + +* Make an account on bitbucket_. + +* Go to https://bitbucket.org/pypy/pypy/ and click "fork" (left + icons). You get a fork of the repository, e.g. in + https://bitbucket.org/yourname/pypy/. + +* Clone this new repo (i.e. the fork) to your local machine with the command + ``hg clone ssh://hg at bitbucket.org/yourname/pypy``. It is a very slow + operation but only ever needs to be done once. If you already cloned + ``https://bitbucket.org/pypy/pypy`` before, even if some time ago, + then you can reuse the same clone by editing the file ``.hg/hgrc`` in + your clone to contain the line ``default = + ssh://hg at bitbucket.org/yourname/pypy``, and then do ``hg pull && hg + up``. If you already have such a clone but don't want to change it, + you can clone that copy with ``hg clone /path/to/other/copy``, and + then edit ``.hg/hgrc`` as above and do ``hg pull && hg up``. + +* Now you have a complete copy of the PyPy repo. Make a branch + with a command like ``hg branch name_of_your_branch``. + +* Edit things. Use ``hg diff`` to see what you changed. Use ``hg add`` + to make Mercurial aware of new files you added, e.g. new test files. + Use ``hg status`` to see if there are such files. Run tests! (See + the rest of this page.) + +* Commit regularly with ``hg commit``. A one-line commit message is + fine. We love to have tons of commits; make one as soon as you have + some progress, even if it is only some new test that doesn't pass yet, + or fixing things even if not all tests pass. Step by step, you are + building the history of your changes, which is the point of a version + control system. (There are commands like ``hg log`` and ``hg up`` + that you should read about later, to learn how to navigate this + history.) + +* The commits stay on your machine until you do ``hg push`` to "push" + them back to the repo named in the file ``.hg/hgrc``. Repos are + basically just collections of commits (a commit is also called a + changeset): there is one repo per url, plus one for each local copy on + each local machine. The commands ``hg push`` and ``hg pull`` copy + commits around, with the goal that all repos in question end up with + the exact same set of commits. By opposition, ``hg up`` only updates + the "working copy" by reading the local repository, i.e. it makes the + files that you see correspond to the latest (or any other) commit + locally present. + +* You should push often; there is no real reason not to. Remember that + even if they are pushed, with the setup above, the commits are (1) + only in ``bitbucket.org/yourname/pypy``, and (2) in the branch you + named. Yes, they are publicly visible, but don't worry about someone + walking around the thousands of repos on bitbucket saying "hah, look + at the bad coding style of that guy". Try to get into the mindset + that your work is not secret and it's fine that way. We might not + accept it as is for PyPy, asking you instead to improve some things, + but we are not going to judge you. + +* The final step is to open a pull request, so that we know that you'd + like to merge that branch back to the original ``pypy/pypy`` repo. + This can also be done several times if you have interesting + intermediate states, but if you get there, then we're likely to + proceed to the next stage, which is... + +* Get a regular account for pushing directly to + ``bitbucket.org/pypy/pypy`` (just ask and you'll get it, basically). + Once you have it you can rewrite your file ``.hg/hgrc`` to contain + ``default = ssh://hg at bitbucket.org/pypy/pypy``. Your changes will + then be pushed directly to the official repo, but (if you follow these + rules) they are still on a branch, and we can still review the + branches you want to merge. + +* If you get closer to the regular day-to-day development, you'll notice + that we generally push small changes as one or a few commits directly + to the branch ``default``. Also, we often collaborate even if we are + on other branches, which do not really "belong" to anyone. At this + point you'll need ``hg merge`` and learn how to resolve conflicts that + sometimes occur when two people try to push different commits in + parallel on the same branch. But it is likely an issue for later ``:-)`` + +.. _bitbucket: https://bitbucket.org/ + + Running PyPy's unit tests ------------------------- diff --git a/pypy/doc/install.rst b/pypy/doc/install.rst --- a/pypy/doc/install.rst +++ b/pypy/doc/install.rst @@ -38,14 +38,13 @@ and not move the binary there, else PyPy would not be able to find its library. -If you want to install 3rd party libraries, the most convenient way is to -install distribute_ and pip_: +If you want to install 3rd party libraries, the most convenient way is +to install pip_ (unless you want to install virtualenv as explained +below; then you can directly use pip inside virtualenvs): .. code-block:: console - $ curl -O http://python-distribute.org/distribute_setup.py - $ curl -O https://raw.githubusercontent.com/pypa/pip/master/contrib/get-pip.py - $ ./pypy-2.1/bin/pypy distribute_setup.py + $ curl -O https://bootstrap.pypa.io/get-pip.py $ ./pypy-2.1/bin/pypy get-pip.py $ ./pypy-2.1/bin/pip install pygments # for example @@ -69,7 +68,6 @@ Note that bin/python is now a symlink to bin/pypy. -.. _distribute: http://www.python-distribute.org/ .. _pip: http://pypi.python.org/pypi/pip diff --git a/pypy/doc/test/test_whatsnew.py b/pypy/doc/test/test_whatsnew.py --- a/pypy/doc/test/test_whatsnew.py +++ b/pypy/doc/test/test_whatsnew.py @@ -78,9 +78,10 @@ def test_whatsnew(): doc = ROOT.join('pypy', 'doc') - whatsnew_list = doc.listdir('whatsnew-*.rst') - whatsnew_list.sort() - last_whatsnew = whatsnew_list[-1].read() + #whatsnew_list = doc.listdir('whatsnew-*.rst') + #whatsnew_list.sort() + #last_whatsnew = whatsnew_list[-1].read() + last_whatsnew = doc.join('whatsnew-head.rst').read() startrev, documented = parse_doc(last_whatsnew) merged, branch = get_merged_branches(ROOT, startrev, '') merged.discard('default') diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -51,3 +51,82 @@ .. branch: ssa-flow Use SSA form for flow graphs inside build_flow() and part of simplify_graph() + +.. branch: ufuncapi + +Implement most of the GenericUfunc api to support numpy linalg. The strategy is +to encourage use of pure python or cffi ufuncs by extending frompyfunc(). +See the docstring of frompyfunc for more details. This dovetails with a branch +of pypy/numpy - cffi-linalg which is a rewrite of the _umath_linalg module in +python, calling lapack from cffi. The branch also support traditional use of +cpyext GenericUfunc definitions in c. + +.. branch: all_ordered_dicts + +This makes ordered dicts the default dictionary implementation in +RPython and in PyPy. It polishes the basic idea of rordereddict.py +and then fixes various things, up to simplifying +collections.OrderedDict. + +Note: Python programs can rely on the guaranteed dict order in PyPy +now, but for compatibility with other Python implementations they +should still use collections.OrderedDict where that really matters. +Also, support for reversed() was *not* added to the 'dict' class; +use OrderedDict. + +Benchmark results: in the noise. A few benchmarks see good speed +improvements but the average is very close to parity. + +.. branch: berkerpeksag/fix-broken-link-in-readmerst-1415127402066 +.. branch: bigint-with-int-ops +.. branch: dstufft/update-pip-bootstrap-location-to-the-new-1420760611527 +.. branch: float-opt +.. branch: gc-incminimark-pinning + +This branch adds an interface rgc.pin which would (very temporarily) +make object non-movable. That's used by rffi.alloc_buffer and +rffi.get_nonmovable_buffer and improves performance considerably for +IO operations. + +.. branch: gc_no_cleanup_nursery + +A branch started by Wenzhu Man (SoC'14) and then done by fijal. It +removes the clearing of the nursery. The drawback is that new objects +are not automatically filled with zeros any longer, which needs some +care, mostly for GC references (which the GC tries to follow, so they +must not contain garbage). The benefit is a quite large speed-up. + +.. branch: improve-gc-tracing-hooks +.. branch: improve-ptr-conv-error +.. branch: intern-not-immortal + +Fix intern() to return mortal strings, like in CPython. + +.. branch: issue1922-take2 +.. branch: kill-exported-symbols-list +.. branch: kill-rctime +.. branch: kill_ll_termios +.. branch: look-into-all-modules +.. branch: nditer-external_loop +.. branch: numpy-generic-item +.. branch: osx-shared + +``--shared`` support on OS/X (thanks wouter) + +.. branch: portable-threadlocal +.. branch: pypy-dont-copy-ops +.. branch: recursion_and_inlining +.. branch: slim-down-resumedescr +.. branch: squeaky/use-cflags-for-compiling-asm +.. branch: unicode-fix +.. branch: zlib_zdict + +.. branch: errno-again + +Changes how errno, GetLastError, and WSAGetLastError are handled. +The idea is to tie reading the error status as close as possible to +the external function call. This fixes some bugs, both of the very +rare kind (e.g. errno on Linux might in theory be overwritten by +mmap(), called rarely during major GCs, if such a major GC occurs at +exactly the wrong time), and some of the less rare kind +(particularly on Windows tests). diff --git a/pypy/doc/windows.rst b/pypy/doc/windows.rst --- a/pypy/doc/windows.rst +++ b/pypy/doc/windows.rst @@ -78,6 +78,7 @@ Then you need to execute:: + \vc\vcvars.bat editbin /largeaddressaware translator.exe where ``translator.exe`` is the pypy.exe or cpython.exe you will use to @@ -96,7 +97,7 @@ Abridged method (for -Ojit builds using Visual Studio 2008) -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +----------------------------------------------------------- Download the versions of all the external packages from https://bitbucket.org/pypy/pypy/downloads/local_2.4.zip @@ -110,7 +111,13 @@ set INCLUDE=\include;\tcltk\include;%INCLUDE% set LIB=\lib;\tcltk\lib;%LIB% -Now you should be good to go. Read on for more information. +Now you should be good to go. If you choose this method, you do not need +to download/build anything else. + +Nonabrided method (building from scratch) +----------------------------------------- + +If you want to, you can rebuild everything from scratch by continuing. The Boehm garbage collector diff --git a/pypy/goal/targetpypystandalone.py b/pypy/goal/targetpypystandalone.py --- a/pypy/goal/targetpypystandalone.py +++ b/pypy/goal/targetpypystandalone.py @@ -101,7 +101,7 @@ if space.is_none(w_path): if verbose: debug("Failed to find library based on pypy_find_stdlib") - return 1 + return rffi.cast(rffi.INT, 1) space.startup() space.call_function(w_pathsetter, w_path) # import site @@ -109,13 +109,13 @@ import_ = space.getattr(space.getbuiltinmodule('__builtin__'), space.wrap('__import__')) space.call_function(import_, space.wrap('site')) - return 0 + return rffi.cast(rffi.INT, 0) except OperationError, e: if verbose: debug("OperationError:") debug(" operror-type: " + e.w_type.getname(space)) debug(" operror-value: " + space.str_w(space.str(e.get_w_value(space)))) - return -1 + return rffi.cast(rffi.INT, -1) @entrypoint('main', [rffi.CCHARP], c_name='pypy_execute_source') def pypy_execute_source(ll_source): @@ -234,8 +234,7 @@ enable_translationmodules(config) config.translation.suggest(check_str_without_nul=True) - if sys.platform.startswith('linux'): - config.translation.suggest(shared=True) + config.translation.suggest(shared=True) if config.translation.thread: config.objspace.usemodules.thread = True diff --git a/pypy/interpreter/astcompiler/optimize.py b/pypy/interpreter/astcompiler/optimize.py --- a/pypy/interpreter/astcompiler/optimize.py +++ b/pypy/interpreter/astcompiler/optimize.py @@ -83,17 +83,16 @@ class __extend__(ast.BoolOp): - def _accept_jump_if_any_is(self, gen, condition, target): - self.values[0].accept_jump_if(gen, condition, target) - for i in range(1, len(self.values)): + def _accept_jump_if_any_is(self, gen, condition, target, skip_last=0): + for i in range(len(self.values) - skip_last): self.values[i].accept_jump_if(gen, condition, target) def accept_jump_if(self, gen, condition, target): if condition and self.op == ast.And or \ (not condition and self.op == ast.Or): end = gen.new_block() - self._accept_jump_if_any_is(gen, not condition, end) - gen.emit_jump(ops.JUMP_FORWARD, target) + self._accept_jump_if_any_is(gen, not condition, end, skip_last=1) + self.values[-1].accept_jump_if(gen, condition, target) gen.use_next_block(end) else: self._accept_jump_if_any_is(gen, condition, target) @@ -255,11 +254,15 @@ return rep def visit_Name(self, name): - # Turn loading None into a constant lookup. Eventaully, we can do this - # for True and False, too. + # Turn loading None into a constant lookup. We cannot do this + # for True and False, because rebinding them is allowed (2.7). if name.id == "None": - assert name.ctx == ast.Load - return ast.Const(self.space.w_None, name.lineno, name.col_offset) + # The compiler refuses to parse "None = ...", but "del None" + # is allowed (if pointless). Check anyway: custom asts that + # correspond to "None = ..." can be made by hand. + if name.ctx == ast.Load: + return ast.Const(self.space.w_None, name.lineno, + name.col_offset) return name def visit_Tuple(self, tup): diff --git a/pypy/interpreter/error.py b/pypy/interpreter/error.py --- a/pypy/interpreter/error.py +++ b/pypy/interpreter/error.py @@ -486,10 +486,10 @@ w_exception_class=w_exception_class) wrap_oserror._annspecialcase_ = 'specialize:arg(3)' -def exception_from_errno(space, w_type): - from rpython.rlib.rposix import get_errno +def exception_from_saved_errno(space, w_type): + from rpython.rlib.rposix import get_saved_errno - errno = get_errno() + errno = get_saved_errno() msg = os.strerror(errno) w_error = space.call_function(w_type, space.wrap(errno), space.wrap(msg)) return OperationError(w_type, w_error) diff --git a/pypy/interpreter/test/test_compiler.py b/pypy/interpreter/test/test_compiler.py --- a/pypy/interpreter/test/test_compiler.py +++ b/pypy/interpreter/test/test_compiler.py @@ -654,6 +654,18 @@ assert ex.match(space, space.w_SyntaxError) assert 'hello_world' in space.str_w(space.str(ex.get_w_value(space))) + def test_del_None(self): + snippet = '''if 1: + try: + del None + except NameError: + pass + ''' + code = self.compiler.compile(snippet, '', 'exec', 0) + space = self.space + w_d = space.newdict() + space.exec_(code, w_d, w_d) + class TestPythonAstCompiler_25_grammar(BaseTestCompiler): def setup_method(self, method): diff --git a/pypy/interpreter/test/test_targetpypy.py b/pypy/interpreter/test/test_targetpypy.py --- a/pypy/interpreter/test/test_targetpypy.py +++ b/pypy/interpreter/test/test_targetpypy.py @@ -27,6 +27,6 @@ pypy_setup_home = d['pypy_setup_home'] lls = rffi.str2charp(__file__) res = pypy_setup_home(lls, rffi.cast(rffi.INT, 1)) - assert lltype.typeOf(res) == rffi.LONG + assert lltype.typeOf(res) == rffi.INT assert rffi.cast(lltype.Signed, res) == 0 lltype.free(lls, flavor='raw') diff --git a/pypy/module/__builtin__/app_io.py b/pypy/module/__builtin__/app_io.py --- a/pypy/module/__builtin__/app_io.py +++ b/pypy/module/__builtin__/app_io.py @@ -86,9 +86,11 @@ def print_(*args, **kwargs): """The new-style print function from py3k.""" - fp = kwargs.pop("file", sys.stdout) + fp = kwargs.pop("file", None) if fp is None: - return + fp = sys.stdout + if fp is None: + return def write(data): if not isinstance(data, basestring): data = str(data) diff --git a/pypy/module/__builtin__/test/test_builtin.py b/pypy/module/__builtin__/test/test_builtin.py --- a/pypy/module/__builtin__/test/test_builtin.py +++ b/pypy/module/__builtin__/test/test_builtin.py @@ -651,9 +651,12 @@ out = sys.stdout = StringIO.StringIO() try: pr("Hello,", "person!") + pr("2nd line", file=None) + sys.stdout = None + pr("nowhere") finally: sys.stdout = save - assert out.getvalue() == "Hello, person!\n" + assert out.getvalue() == "Hello, person!\n2nd line\n" out = StringIO.StringIO() pr("Hello,", "person!", file=out) assert out.getvalue() == "Hello, person!\n" @@ -668,7 +671,6 @@ result = out.getvalue() assert isinstance(result, unicode) assert result == u"Hello, person!\n" - pr("Hello", file=None) # This works. out = StringIO.StringIO() pr(None, file=out) assert out.getvalue() == "None\n" diff --git a/pypy/module/__pypy__/__init__.py b/pypy/module/__pypy__/__init__.py --- a/pypy/module/__pypy__/__init__.py +++ b/pypy/module/__pypy__/__init__.py @@ -78,6 +78,7 @@ 'newlist_hint' : 'interp_magic.newlist_hint', 'add_memory_pressure' : 'interp_magic.add_memory_pressure', 'newdict' : 'interp_dict.newdict', + 'reversed_dict' : 'interp_dict.reversed_dict', 'strategy' : 'interp_magic.strategy', # dict,set,list 'set_debug' : 'interp_magic.set_debug', 'locals_to_fast' : 'interp_magic.locals_to_fast', diff --git a/pypy/module/__pypy__/interp_dict.py b/pypy/module/__pypy__/interp_dict.py --- a/pypy/module/__pypy__/interp_dict.py +++ b/pypy/module/__pypy__/interp_dict.py @@ -30,3 +30,17 @@ return space.newdict(strdict=True) else: raise oefmt(space.w_TypeError, "unknown type of dict %s", type) + +def reversed_dict(space, w_obj): + """Enumerate the keys in a dictionary object in reversed order. + + This is a __pypy__ function instead of being simply done by calling + reversed(), for CPython compatibility: dictionaries are only ordered + on PyPy. You should use the collections.OrderedDict class for cases + where ordering is important. That class implements __reversed__ by + calling __pypy__.reversed_dict(). + """ + from pypy.objspace.std.dictmultiobject import W_DictMultiObject + if not isinstance(w_obj, W_DictMultiObject): + raise OperationError(space.w_TypeError, space.w_None) + return w_obj.nondescr_reversed_dict(space) diff --git a/pypy/module/__pypy__/interp_time.py b/pypy/module/__pypy__/interp_time.py --- a/pypy/module/__pypy__/interp_time.py +++ b/pypy/module/__pypy__/interp_time.py @@ -1,7 +1,7 @@ from __future__ import with_statement import sys -from pypy.interpreter.error import exception_from_errno +from pypy.interpreter.error import exception_from_saved_errno from pypy.interpreter.gateway import unwrap_spec from rpython.rtyper.lltypesystem import rffi, lltype from rpython.rtyper.tool import rffi_platform From noreply at buildbot.pypy.org Wed Jan 21 18:54:51 2015 From: noreply at buildbot.pypy.org (cfbolz) Date: Wed, 21 Jan 2015 18:54:51 +0100 (CET) Subject: [pypy-commit] pypy typed-cells: rename the classes Message-ID: <20150121175451.C618D1C04CF@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: typed-cells Changeset: r75469:43929ae70123 Date: 2015-01-21 18:37 +0100 http://bitbucket.org/pypy/pypy/changeset/43929ae70123/ Log: rename the classes diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -8,7 +8,7 @@ W_DictMultiObject, DictStrategy, ObjectDictStrategy, BaseKeyIterator, BaseValueIterator, BaseItemIterator, _never_equal_to_string ) -from pypy.objspace.std.typeobject import TypeCell +from pypy.objspace.std.typeobject import MutableCell # ____________________________________________________________ @@ -872,15 +872,15 @@ if version_tag is not None: name = space.str_w(w_name) # We need to care for obscure cases in which the w_descr is - # a TypeCell, which may change without changing the version_tag + # a MutableCell, which may change without changing the version_tag _, w_descr = w_type._pure_lookup_where_possibly_with_method_cache( name, version_tag) # selector = ("", INVALID) if w_descr is None: selector = (name, DICT) # common case: no such attr in the class - elif isinstance(w_descr, TypeCell): - pass # we have a TypeCell in the class: give up + elif isinstance(w_descr, MutableCell): + pass # we have a MutableCell in the class: give up elif space.is_data_descr(w_descr): # we have a data descriptor, which means the dictionary value # (if any) has no relevance. @@ -929,11 +929,11 @@ # We know here that w_obj.getdictvalue(space, name) just returned None, # so the 'name' is not in the instance. We repeat the lookup to find it # in the class, this time taking care of the result: it can be either a - # quasi-constant class attribute, or actually a TypeCell --- which we + # quasi-constant class attribute, or actually a MutableCell --- which we # must not cache. (It should not be None here, but you never know...) _, w_method = w_type._pure_lookup_where_possibly_with_method_cache( name, version_tag) - if w_method is None or isinstance(w_method, TypeCell): + if w_method is None or isinstance(w_method, MutableCell): return _fill_cache(pycode, nameindex, map, version_tag, -1, w_method) diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py --- a/pypy/objspace/std/typeobject.py +++ b/pypy/objspace/std/typeobject.py @@ -11,18 +11,18 @@ from rpython.rlib.objectmodel import current_object_addr_as_int, compute_hash from rpython.rlib.rarithmetic import intmask, r_uint -class BaseTypeCell(W_Root): +class MutableCell(W_Root): def unwrap_cell(self, space): raise NotImplementedError("abstract base") -class TypeCell(BaseTypeCell): +class ObjectMutableCell(MutableCell): def __init__(self, w_value=None): self.w_value = w_value def unwrap_cell(self, space): return self.w_value -class IntTypeCell(BaseTypeCell): +class IntMutableCell(MutableCell): def __init__(self, intvalue): self.intvalue = intvalue @@ -32,16 +32,16 @@ def unwrap_cell(space, w_value): if space.config.objspace.std.withtypeversion: - if isinstance(w_value, BaseTypeCell): + if isinstance(w_value, MutableCell): return w_value.unwrap_cell(space) return w_value def write_cell(space, w_cell, w_value): from pypy.objspace.std.intobject import W_IntObject - if isinstance(w_cell, TypeCell): + if isinstance(w_cell, ObjectMutableCell): w_cell.w_value = w_value return None - elif isinstance(w_cell, IntTypeCell) and type(w_value) is W_IntObject: + elif isinstance(w_cell, IntMutableCell) and type(w_value) is W_IntObject: w_cell.intvalue = w_value.intval return None elif space.is_w(w_cell, w_value): @@ -49,9 +49,9 @@ # create a level of indirection, or mutate the version. return None if type(w_value) is W_IntObject: - return IntTypeCell(w_value.intval) + return IntMutableCell(w_value.intval) else: - return TypeCell(w_value) + return ObjectMutableCell(w_value) class VersionTag(object): pass @@ -397,7 +397,7 @@ tup_w = w_self._pure_lookup_where_with_method_cache(name, version_tag) w_class, w_value = tup_w if (space.config.objspace.std.withtypeversion and - isinstance(w_value, BaseTypeCell)): + isinstance(w_value, MutableCell)): return w_class, w_value.unwrap_cell(space) return tup_w # don't make a new tuple, reuse the old one From noreply at buildbot.pypy.org Wed Jan 21 18:54:53 2015 From: noreply at buildbot.pypy.org (cfbolz) Date: Wed, 21 Jan 2015 18:54:53 +0100 (CET) Subject: [pypy-commit] pypy typed-cells: also use mutable int cells for celldict by sharing the code Message-ID: <20150121175453.156801C04CF@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: typed-cells Changeset: r75470:190ff2945d6d Date: 2015-01-21 18:54 +0100 http://bitbucket.org/pypy/pypy/changeset/190ff2945d6d/ Log: also use mutable int cells for celldict by sharing the code diff --git a/pypy/objspace/std/celldict.py b/pypy/objspace/std/celldict.py --- a/pypy/objspace/std/celldict.py +++ b/pypy/objspace/std/celldict.py @@ -9,23 +9,16 @@ from pypy.objspace.std.dictmultiobject import ( DictStrategy, ObjectDictStrategy, _never_equal_to_string, create_iterator_classes) +from pypy.objspace.std.typeobject import ( + MutableCell, IntMutableCell, ObjectMutableCell, write_cell) class VersionTag(object): pass - -class ModuleCell(W_Root): - def __init__(self, w_value=None): - self.w_value = w_value - - def __repr__(self): - return "" % (self.w_value, ) - - -def unwrap_cell(w_value): - if isinstance(w_value, ModuleCell): - return w_value.w_value +def unwrap_cell(space, w_value): + if isinstance(w_value, MutableCell): + return w_value.unwrap_cell(space) return w_value @@ -71,15 +64,9 @@ def setitem_str(self, w_dict, key, w_value): cell = self.getdictvalue_no_unwrapping(w_dict, key) - if isinstance(cell, ModuleCell): - cell.w_value = w_value + w_value = write_cell(self.space, cell, w_value) + if w_value is None: return - if cell is not None: - # If the new value and the current value are the same, don't - # create a level of indirection, or mutate the version. - if self.space.is_w(w_value, cell): - return - w_value = ModuleCell(w_value) self.mutated() self.unerase(w_dict.dstorage)[key] = w_value @@ -131,7 +118,7 @@ def getitem_str(self, w_dict, key): cell = self.getdictvalue_no_unwrapping(w_dict, key) - return unwrap_cell(cell) + return unwrap_cell(self.space, cell) def w_keys(self, w_dict): space = self.space @@ -140,12 +127,12 @@ def values(self, w_dict): iterator = self.unerase(w_dict.dstorage).itervalues - return [unwrap_cell(cell) for cell in iterator()] + return [unwrap_cell(self.space, cell) for cell in iterator()] def items(self, w_dict): space = self.space iterator = self.unerase(w_dict.dstorage).iteritems - return [space.newtuple([_wrapkey(space, key), unwrap_cell(cell)]) + return [space.newtuple([_wrapkey(space, key), unwrap_cell(self.space, cell)]) for key, cell in iterator()] def clear(self, w_dict): @@ -157,7 +144,7 @@ d = self.unerase(w_dict.dstorage) key, cell = d.popitem() self.mutated() - return _wrapkey(space, key), unwrap_cell(cell) + return _wrapkey(space, key), unwrap_cell(self.space, cell) def switch_to_object_strategy(self, w_dict): space = self.space @@ -165,7 +152,7 @@ strategy = space.fromcache(ObjectDictStrategy) d_new = strategy.unerase(strategy.get_empty_storage()) for key, cell in d.iteritems(): - d_new[_wrapkey(space, key)] = unwrap_cell(cell) + d_new[_wrapkey(space, key)] = unwrap_cell(self.space, cell) w_dict.strategy = strategy w_dict.dstorage = strategy.erase(d_new) @@ -181,7 +168,7 @@ wrapkey = _wrapkey def wrapvalue(space, value): - return unwrap_cell(value) + return unwrap_cell(space, value) create_iterator_classes(ModuleDictStrategy) diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py --- a/pypy/objspace/std/typeobject.py +++ b/pypy/objspace/std/typeobject.py @@ -22,6 +22,10 @@ def unwrap_cell(self, space): return self.w_value + def __repr__(self): + return "" % (self.w_value, ) + + class IntMutableCell(MutableCell): def __init__(self, intvalue): self.intvalue = intvalue @@ -29,6 +33,9 @@ def unwrap_cell(self, space): return space.wrap(self.intvalue) + def __repr__(self): + return "" % (self.intvalue, ) + def unwrap_cell(space, w_value): if space.config.objspace.std.withtypeversion: @@ -38,6 +45,9 @@ def write_cell(space, w_cell, w_value): from pypy.objspace.std.intobject import W_IntObject + if w_cell is None: + # attribute does not exist at all, write it without a cell first + return w_value if isinstance(w_cell, ObjectMutableCell): w_cell.w_value = w_value return None @@ -303,10 +313,9 @@ if version_tag is not None: w_curr = w_self._pure_getdictvalue_no_unwrapping( space, version_tag, name) - if w_curr is not None: - w_value = write_cell(space, w_curr, w_value) - if w_value is None: - return True + w_value = write_cell(space, w_curr, w_value) + if w_value is None: + return True w_self.mutated(name) w_self.dict_w[name] = w_value return True From noreply at buildbot.pypy.org Wed Jan 21 19:26:21 2015 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 21 Jan 2015 19:26:21 +0100 (CET) Subject: [pypy-commit] pypy default: Trying to add another likely() here for the common path where no exception occurred Message-ID: <20150121182621.A6B2D1C00B3@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r75471:d6a21b1c7ca8 Date: 2015-01-21 18:48 +0100 http://bitbucket.org/pypy/pypy/changeset/d6a21b1c7ca8/ Log: Trying to add another likely() here for the common path where no exception occurred diff --git a/rpython/translator/exceptiontransform.py b/rpython/translator/exceptiontransform.py --- a/rpython/translator/exceptiontransform.py +++ b/rpython/translator/exceptiontransform.py @@ -398,6 +398,7 @@ else: v_exc_type = self.gen_getfield('exc_type', llops) var_no_exc = self.gen_isnull(v_exc_type, llops) + var_no_exc = llops.genop('likely', [var_no_exc], lltype.Bool) block.operations.extend(llops) From noreply at buildbot.pypy.org Wed Jan 21 19:26:23 2015 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 21 Jan 2015 19:26:23 +0100 (CET) Subject: [pypy-commit] pypy default: Support likely and unlikely here Message-ID: <20150121182623.27C7B1C00B3@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r75472:da66d668de34 Date: 2015-01-21 19:25 +0100 http://bitbucket.org/pypy/pypy/changeset/da66d668de34/ Log: Support likely and unlikely here diff --git a/rpython/jit/codewriter/jtransform.py b/rpython/jit/codewriter/jtransform.py --- a/rpython/jit/codewriter/jtransform.py +++ b/rpython/jit/codewriter/jtransform.py @@ -253,6 +253,12 @@ return [None, # hack, do the right renaming from op.args[0] to op.result SpaceOperation("record_known_class", [op.args[0], const_vtable], None)] + def rewrite_op_likely(self, op): + return None # "no real effect" + + def rewrite_op_unlikely(self, op): + return None # "no real effect" + def rewrite_op_raw_malloc_usage(self, op): if self.cpu.translate_support_code or isinstance(op.args[0], Variable): return # the operation disappears diff --git a/rpython/jit/codewriter/test/test_jtransform.py b/rpython/jit/codewriter/test/test_jtransform.py --- a/rpython/jit/codewriter/test/test_jtransform.py +++ b/rpython/jit/codewriter/test/test_jtransform.py @@ -1376,3 +1376,12 @@ tr.rewrite_operation(op) except Exception, e: assert 'foobar' in str(e) + +def test_likely_unlikely(): + v1 = varoftype(lltype.Bool) + v2 = varoftype(lltype.Bool) + op = SpaceOperation('likely', [v1], v2) + tr = Transformer() + assert tr.rewrite_operation(op) is None + op = SpaceOperation('unlikely', [v1], v2) + assert tr.rewrite_operation(op) is None From noreply at buildbot.pypy.org Thu Jan 22 00:45:19 2015 From: noreply at buildbot.pypy.org (cfbolz) Date: Thu, 22 Jan 2015 00:45:19 +0100 (CET) Subject: [pypy-commit] pypy typed-cells: update test_pypy_c Message-ID: <20150121234519.2D4CF1C00B3@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: typed-cells Changeset: r75473:0e13cfc290da Date: 2015-01-22 00:44 +0100 http://bitbucket.org/pypy/pypy/changeset/0e13cfc290da/ Log: update test_pypy_c diff --git a/pypy/module/pypyjit/test_pypy_c/test_instance.py b/pypy/module/pypyjit/test_pypy_c/test_instance.py --- a/pypy/module/pypyjit/test_pypy_c/test_instance.py +++ b/pypy/module/pypyjit/test_pypy_c/test_instance.py @@ -83,7 +83,7 @@ loops = log.loops_by_filename(self.filepath) assert len(loops) == 1 - def test_mutate_class(self): + def test_mutate_class_int(self): def fn(n): class A(object): count = 1 @@ -106,7 +106,7 @@ entry_bridge, = log.loops_by_filename(self.filepath, is_entry_bridge=True) ops = entry_bridge.ops_by_id('mutate', opcode='LOAD_ATTR') assert log.opnames(ops) == ['guard_value', 'guard_not_invalidated', - 'getfield_gc', 'guard_nonnull_class'] + 'getfield_gc'] # the STORE_ATTR is folded away assert list(entry_bridge.ops_by_id('meth1', opcode='STORE_ATTR')) == [] # @@ -114,19 +114,77 @@ # ---------------------- loop, = log.loops_by_filename(self.filepath) assert loop.match(""" - i8 = getfield_gc_pure(p5, descr=...) - i9 = int_lt(i8, i7) - guard_true(i9, descr=.*) - guard_not_invalidated(descr=.*) - i82 = getfield_gc_pure(p8, descr=...) - i11 = int_add_ovf(i82, 1) + i58 = int_lt(i38, i31) + guard_true(i58, descr=...) + guard_not_invalidated(descr=...) + i59 = int_add_ovf(i57, 1) guard_no_overflow(descr=...) - i12 = force_token() - --TICK-- - p20 = new_with_vtable(ConstClass(W_IntObject)) - setfield_gc(p20, i11, descr=) - setfield_gc(ConstPtr(ptr21), p20, descr=) - jump(..., descr=...) + p60 = force_token() + i61 = getfield_raw(..., descr=...) + setfield_gc(ConstPtr(ptr39), i59, descr=...) + i62 = int_lt(i61, 0) + guard_false(i62, descr=...) + jump(p0, p1, p3, p6, p7, p12, i59, p18, i31, i59, descr=...) + """) + + def test_mutate_class(self): + def fn(n): + class LL(object): + def __init__(self, n): + self.n = n + class A(object): + count = None + def __init__(self, a): + self.a = a + def f(self): + return self.count + i = 0 + a = A(1) + while i < n: + A.count = LL(A.count) # ID: mutate + a.f() # ID: meth1 + i += 1 + return i + # + log = self.run(fn, [1000], threshold=10) + assert log.result == 1000 + # + # first, we test the entry bridge + # ------------------------------- + entry_bridge, = log.loops_by_filename(self.filepath, is_entry_bridge=True) + ops = entry_bridge.ops_by_id('mutate', opcode='LOAD_ATTR') + assert log.opnames(ops) == ['guard_value', 'guard_not_invalidated', + 'getfield_gc', 'guard_nonnull_class', + 'getfield_gc', 'guard_value', # type check on the attribute + ] + # the STORE_ATTR is folded away + assert list(entry_bridge.ops_by_id('meth1', opcode='STORE_ATTR')) == [] + # + # then, the actual loop + # ---------------------- + loop, = log.loops_by_filename(self.filepath) + assert loop.match(""" + i70 = int_lt(i58, i33) + guard_true(i70, descr=...) + guard_not_invalidated(descr=...) + p71 = getfield_gc(p64, descr=...) + guard_value(p71, ConstPtr(ptr42), descr=...) + p72 = force_token() + p73 = force_token() + i74 = int_add(i58, 1) + i75 = getfield_raw(..., descr=...) + i76 = int_lt(i75, 0) + guard_false(i76, descr=...) + p77 = new_with_vtable(...) + setfield_gc(p77, p64, descr=...) + setfield_gc(p77, ConstPtr(null), descr=...) + setfield_gc(p77, ConstPtr(null), descr=...) + setfield_gc(p77, ConstPtr(null), descr=...) + setfield_gc(p77, ConstPtr(null), descr=...) + setfield_gc(p77, ConstPtr(ptr42), descr=...) + setfield_gc(ConstPtr(ptr69), p77, descr=...) + jump(p0, p1, p3, p6, p7, p12, i74, p20, p26, i33, p77, descr=...) + """) def test_oldstyle_newstyle_mix(self): diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py --- a/pypy/objspace/std/typeobject.py +++ b/pypy/objspace/std/typeobject.py @@ -407,6 +407,7 @@ w_class, w_value = tup_w if (space.config.objspace.std.withtypeversion and isinstance(w_value, MutableCell)): + import pdb; pdb.set_trace() return w_class, w_value.unwrap_cell(space) return tup_w # don't make a new tuple, reuse the old one From noreply at buildbot.pypy.org Thu Jan 22 09:32:23 2015 From: noreply at buildbot.pypy.org (cfbolz) Date: Thu, 22 Jan 2015 09:32:23 +0100 (CET) Subject: [pypy-commit] pypy typed-cells: remove pdb :-( Message-ID: <20150122083223.3A0AD1C04DA@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: typed-cells Changeset: r75474:acf5d7f51ac8 Date: 2015-01-22 09:32 +0100 http://bitbucket.org/pypy/pypy/changeset/acf5d7f51ac8/ Log: remove pdb :-( diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py --- a/pypy/objspace/std/typeobject.py +++ b/pypy/objspace/std/typeobject.py @@ -407,7 +407,6 @@ w_class, w_value = tup_w if (space.config.objspace.std.withtypeversion and isinstance(w_value, MutableCell)): - import pdb; pdb.set_trace() return w_class, w_value.unwrap_cell(space) return tup_w # don't make a new tuple, reuse the old one From noreply at buildbot.pypy.org Thu Jan 22 10:03:55 2015 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 22 Jan 2015 10:03:55 +0100 (CET) Subject: [pypy-commit] pypy default: fix test, 2nd step Message-ID: <20150122090355.26D191C0532@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r75475:6bfb35a7371e Date: 2015-01-22 10:03 +0100 http://bitbucket.org/pypy/pypy/changeset/6bfb35a7371e/ Log: fix test, 2nd step diff --git a/pypy/module/pypyjit/test_pypy_c/test_containers.py b/pypy/module/pypyjit/test_pypy_c/test_containers.py --- a/pypy/module/pypyjit/test_pypy_c/test_containers.py +++ b/pypy/module/pypyjit/test_pypy_c/test_containers.py @@ -78,7 +78,7 @@ {{{ setfield_gc(p13, 0, descr=) setfield_gc(p13, 0, descr=) - setfield_gc(p13, 16, descr=) + setfield_gc(p13, 32, descr=) }}} guard_no_exception(descr=...) p20 = new_with_vtable(ConstClass(W_IntObject)) From noreply at buildbot.pypy.org Thu Jan 22 10:05:12 2015 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 22 Jan 2015 10:05:12 +0100 (CET) Subject: [pypy-commit] pypy default: add merged branches Message-ID: <20150122090512.31C9C1C0532@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r75476:28167a7092ca Date: 2015-01-22 10:04 +0100 http://bitbucket.org/pypy/pypy/changeset/28167a7092ca/ Log: add merged branches diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -130,3 +130,6 @@ mmap(), called rarely during major GCs, if such a major GC occurs at exactly the wrong time), and some of the less rare kind (particularly on Windows tests). + +.. branch: osx-package.py +.. branch: package.py-helpful-error-message From noreply at buildbot.pypy.org Thu Jan 22 10:10:29 2015 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 22 Jan 2015 10:10:29 +0100 (CET) Subject: [pypy-commit] pypy default: Comment out again the likely() hint added in exceptiontransform.py as not measurably helping. Message-ID: <20150122091029.54ACC1C00B3@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r75477:2eb2275f6e38 Date: 2015-01-22 10:10 +0100 http://bitbucket.org/pypy/pypy/changeset/2eb2275f6e38/ Log: Comment out again the likely() hint added in exceptiontransform.py as not measurably helping. diff --git a/rpython/translator/exceptiontransform.py b/rpython/translator/exceptiontransform.py --- a/rpython/translator/exceptiontransform.py +++ b/rpython/translator/exceptiontransform.py @@ -398,7 +398,10 @@ else: v_exc_type = self.gen_getfield('exc_type', llops) var_no_exc = self.gen_isnull(v_exc_type, llops) - var_no_exc = llops.genop('likely', [var_no_exc], lltype.Bool) + # + # We could add a "var_no_exc is likely true" hint, but it seems + # not to help, so it was commented out again. + #var_no_exc = llops.genop('likely', [var_no_exc], lltype.Bool) block.operations.extend(llops) From noreply at buildbot.pypy.org Thu Jan 22 11:08:13 2015 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 22 Jan 2015 11:08:13 +0100 (CET) Subject: [pypy-commit] pypy.org extradoc: update the values Message-ID: <20150122100813.91ECA1C05A0@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r562:9735b8aa9f41 Date: 2015-01-22 11:08 +0100 http://bitbucket.org/pypy/pypy.org/changeset/9735b8aa9f41/ Log: update the values diff --git a/don1.html b/don1.html --- a/don1.html +++ b/don1.html @@ -15,7 +15,7 @@ - $58605 of $105000 (55.8%) + $58609 of $105000 (55.8%)
diff --git a/don4.html b/don4.html --- a/don4.html +++ b/don4.html @@ -9,7 +9,7 @@ @@ -17,7 +17,7 @@ 2nd call: - $21681 of $80000 (27.1%) + $21731 of $80000 (27.2%)
From noreply at buildbot.pypy.org Thu Jan 22 11:22:45 2015 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 22 Jan 2015 11:22:45 +0100 (CET) Subject: [pypy-commit] buildbot default: Compact the benchmarks running on tannit Message-ID: <20150122102245.EC1011C05A0@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r924:9b183c705221 Date: 2015-01-22 11:23 +0100 http://bitbucket.org/pypy/buildbot/changeset/9b183c705221/ Log: Compact the benchmarks running on tannit diff --git a/bot2/pypybuildbot/master.py b/bot2/pypybuildbot/master.py --- a/bot2/pypybuildbot/master.py +++ b/bot2/pypybuildbot/master.py @@ -212,7 +212,7 @@ LINUX64, # on allegro64, uses all cores JITLINUX32, # on tannit32, uses 1 core JITLINUX64, # on allegro64, uses 1 core - APPLVLLINUX32, # on tannit32, uses 1 core + #APPLVLLINUX32, # on tannit32, uses 1 core APPLVLLINUX64, # on allegro64, uses 1 core # other platforms #MACOSX32, # on minime @@ -226,14 +226,14 @@ PYPYBUILDBOT # on cobra ], branch='default', hour=0, minute=0), - Nightly("nightly-2-00", [ + Nightly("nightly-1-00", [ NUMPY_64, # on tannit64, uses 1 core, takes about 15min. # XXX maybe use a trigger instead? JITBENCH, # on tannit32, uses 1 core (in part exclusively) JITBENCH64, # on tannit64, uses 1 core (in part exclusively) JITBENCH64_NEW, # on speed64, uses 1 core (in part exclusively) - ], branch=None, hour=2, minute=0), + ], branch=None, hour=1, minute=0), Nightly("nightly-2-00-py3k", [ LINUX64, # on allegro64, uses all cores From noreply at buildbot.pypy.org Thu Jan 22 11:37:53 2015 From: noreply at buildbot.pypy.org (Raemi) Date: Thu, 22 Jan 2015 11:37:53 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c8: import stmgc-c8 5cfce5d61c50 Message-ID: <20150122103753.1A05E1C05A0@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c8 Changeset: r75478:1f67666be256 Date: 2015-01-22 10:58 +0100 http://bitbucket.org/pypy/pypy/changeset/1f67666be256/ Log: import stmgc-c8 5cfce5d61c50 diff too long, truncating to 2000 out of 8126 lines diff --git a/rpython/translator/stm/import_stmgc.py b/rpython/translator/stm/import_stmgc.py --- a/rpython/translator/stm/import_stmgc.py +++ b/rpython/translator/stm/import_stmgc.py @@ -15,7 +15,7 @@ yield line def main(stmgc_dir): - stmgc_dir = py.path.local(stmgc_dir).join('c7') + stmgc_dir = py.path.local(stmgc_dir).join('c8') popen = subprocess.Popen(['hg', 'id', '-i'], cwd=str(stmgc_dir), stdout=subprocess.PIPE) rev = popen.stdout.read().strip() diff --git a/rpython/translator/stm/src_stm/revision b/rpython/translator/stm/src_stm/revision --- a/rpython/translator/stm/src_stm/revision +++ b/rpython/translator/stm/src_stm/revision @@ -1,1 +1,1 @@ -957947bc7ad9 +5cfce5d61c50 diff --git a/rpython/translator/stm/src_stm/stm/contention.c b/rpython/translator/stm/src_stm/stm/contention.c deleted file mode 100644 --- a/rpython/translator/stm/src_stm/stm/contention.c +++ /dev/null @@ -1,323 +0,0 @@ -/* Imported by rpython/translator/stm/import_stmgc.py */ -#ifndef _STM_CORE_H_ -# error "must be compiled via stmgc.c" -#endif - - -/* Here are the possible kinds of contention: - - STM_CONTENTION_WRITE_WRITE - - A write-write contention occurs when we are running our - transaction and detect that we are about to write to an object - that another thread is also writing to. This kind of - contention must be resolved before continuing. This *must* - abort one of the two threads: the caller's thread is not at a - safe-point, so cannot wait! - - It is reported as a timing event with the following two markers: - the current thread (i.e. where the second-in-time write occurs); - and the other thread (from its 'modified_old_objects_markers', - where the first-in-time write occurred). - - STM_CONTENTION_WRITE_READ - - A write-read contention occurs when we are trying to commit: it - means that an object we wrote to was also read by another - transaction. Even though it would seem obvious that we should - just abort the other thread and proceed in our commit, a more - subtle answer would be in some cases to wait for the other thread - to commit first. It would commit having read the old value, and - then we can commit our change to it. - - It is reported as a timing event with only one marker: the - older location of the write that was done by the current thread. - - STM_CONTENTION_INEVITABLE - - An inevitable contention occurs when we're trying to become - inevitable but another thread already is. We can never abort the - other thread in this case, but we still have the choice to abort - ourselves or pause until the other thread commits. - - It is reported with two markers, one for the current thread and - one for the other thread. Each marker gives the location that - attempts to make the transaction inevitable. -*/ - - -struct contmgr_s { - enum stm_event_e kind; - struct stm_priv_segment_info_s *other_pseg; - bool abort_other; - bool try_sleep; // XXX add a way to timeout, but should handle repeated - // calls to contention_management() to avoid re-sleeping - // for the whole duration -}; - - -/************************************************************/ - - -__attribute__((unused)) -static void cm_always_abort_myself(struct contmgr_s *cm) -{ - cm->abort_other = false; -} - -__attribute__((unused)) -static void cm_always_abort_other(struct contmgr_s *cm) -{ - cm->abort_other = true; -} - -__attribute__((unused)) -static void cm_abort_the_younger(struct contmgr_s *cm) -{ - if (STM_PSEGMENT->start_time >= cm->other_pseg->start_time) { - /* We started after the other thread. Abort */ - cm->abort_other = false; - } - else { - cm->abort_other = true; - } -} - -__attribute__((unused)) -static void cm_always_wait_for_other_thread(struct contmgr_s *cm) -{ - /* we tried this contention management, but it seems to have - very bad cases: if thread 1 always reads an object in every - transaction, and thread 2 wants to write this object just - once, then thread 2 will pause when it tries to commit; - it will wait until thread 1 committed; but by the time - thread 2 resumes again, thread 1 has already started the - next transaction and read the object again. - */ - cm_abort_the_younger(cm); - cm->try_sleep = true; -} - -__attribute__((unused)) -static void cm_pause_if_younger(struct contmgr_s *cm) -{ - if (STM_PSEGMENT->start_time >= cm->other_pseg->start_time) { - /* We started after the other thread. Pause */ - cm->try_sleep = true; - cm->abort_other = false; - } - else { - cm->abort_other = true; - } -} - - -/************************************************************/ - - -static bool contention_management(uint8_t other_segment_num, - enum stm_event_e kind, - object_t *obj) -{ - assert(_has_mutex()); - assert(other_segment_num != STM_SEGMENT->segment_num); - - bool others_may_have_run = false; - if (must_abort()) - abort_with_mutex(); - - /* Report the contention */ - timing_contention(kind, other_segment_num, obj); - - /* Who should abort here: this thread, or the other thread? */ - struct contmgr_s contmgr; - contmgr.kind = kind; - contmgr.other_pseg = get_priv_segment(other_segment_num); - contmgr.abort_other = false; - contmgr.try_sleep = false; - - /* Pick one contention management... could be made dynamically choosable */ -#ifdef STM_TESTS - cm_abort_the_younger(&contmgr); -#else - cm_pause_if_younger(&contmgr); -#endif - - /* Fix the choices that are found incorrect due to TS_INEVITABLE - or is_abort() */ - if (is_abort(contmgr.other_pseg->pub.nursery_end)) { - contmgr.abort_other = true; - contmgr.try_sleep = false; - } - else if (STM_PSEGMENT->transaction_state == TS_INEVITABLE) { - assert(contmgr.other_pseg->transaction_state != TS_INEVITABLE); - contmgr.abort_other = true; - contmgr.try_sleep = false; - } - else if (contmgr.other_pseg->transaction_state == TS_INEVITABLE) { - contmgr.abort_other = false; - } - - /* Do one of three things here... - */ - if (contmgr.try_sleep && kind != STM_CONTENTION_WRITE_WRITE && - contmgr.other_pseg->safe_point != SP_WAIT_FOR_C_TRANSACTION_DONE) { - others_may_have_run = true; - /* Sleep. - - - Not for write-write contentions, because we're not at a - safe-point. - - - To prevent loops of threads waiting for each others, use - a crude heuristic of never pausing for a thread that is - itself already paused here. - */ - contmgr.other_pseg->signal_when_done = true; - - /* tell the other to commit ASAP */ - signal_other_to_commit_soon(contmgr.other_pseg); - - dprintf(("pausing...\n")); - - timing_event(STM_SEGMENT->running_thread, STM_WAIT_CONTENTION); - - cond_signal(C_AT_SAFE_POINT); - STM_PSEGMENT->safe_point = SP_WAIT_FOR_C_TRANSACTION_DONE; - cond_wait(C_TRANSACTION_DONE); - STM_PSEGMENT->safe_point = SP_RUNNING; - dprintf(("pausing done\n")); - - timing_event(STM_SEGMENT->running_thread, STM_WAIT_DONE); - - if (must_abort()) - abort_with_mutex(); - } - - else if (!contmgr.abort_other) { - /* tell the other to commit ASAP, since it causes aborts */ - signal_other_to_commit_soon(contmgr.other_pseg); - - dprintf(("abort in contention: kind %d\n", kind)); - abort_with_mutex(); - } - - else { - /* We have to signal the other thread to abort, and wait until - it does. */ - contmgr.other_pseg->pub.nursery_end = NSE_SIGABORT; - - timing_event(STM_SEGMENT->running_thread, - STM_ABORTING_OTHER_CONTENTION); - - int sp = contmgr.other_pseg->safe_point; - switch (sp) { - - case SP_RUNNING: - /* The other thread is running now, so as NSE_SIGABORT was - set in its 'nursery_end', it will soon enter a - mutex_lock() and thus abort. - - In this case, we will wait until it broadcasts "I'm done - aborting". Important: this is not a safe point of any - kind! The shadowstack may not be correct here. It - should not end in a deadlock, because the target thread - is, in principle, guaranteed to call abort_with_mutex() - very soon. Just to be on the safe side, make it really - impossible for the target thread to later enter the same - cond_wait(C_ABORTED) (and thus wait, possibly for us, - ending in a deadlock): check again must_abort() first. - */ - if (must_abort()) - abort_with_mutex(); - - others_may_have_run = true; - dprintf(("contention: wait C_ABORTED...\n")); - cond_wait(C_ABORTED); - dprintf(("contention: done\n")); - - if (must_abort()) - abort_with_mutex(); - break; - - /* The other cases are where the other thread is at a - safe-point. We wake it up by sending the correct signal. - We don't have to wait here: the other thread will not do - anything more than abort when it really wakes up later. - */ - case SP_WAIT_FOR_C_REQUEST_REMOVED: - cond_broadcast(C_REQUEST_REMOVED); - break; - - case SP_WAIT_FOR_C_AT_SAFE_POINT: - cond_broadcast(C_AT_SAFE_POINT); - break; - - case SP_WAIT_FOR_C_TRANSACTION_DONE: - cond_broadcast(C_TRANSACTION_DONE); - break; - -#ifdef STM_TESTS - case SP_WAIT_FOR_OTHER_THREAD: - /* for tests: the other thread will abort as soon as - stm_stop_safe_point() is called */ - break; -#endif - - default: - stm_fatalerror("unexpected other_pseg->safe_point: %d", sp); - } - - if (is_aborting_now(other_segment_num)) { - /* The other thread is blocked in a safe-point with NSE_SIGABORT. - We don't have to wake it up right now, but we know it will - abort as soon as it wakes up. We can safely force it to - reset its state now. */ - dprintf(("killing data structures\n")); - abort_data_structures_from_segment_num(other_segment_num); - } - dprintf(("killed other thread\n")); - - /* we should commit soon, we caused an abort */ - //signal_other_to_commit_soon(get_priv_segment(STM_SEGMENT->segment_num)); - if (!STM_PSEGMENT->signalled_to_commit_soon) { - STM_PSEGMENT->signalled_to_commit_soon = true; - stmcb_commit_soon(); - } - } - return others_may_have_run; -} - -static void write_write_contention_management(uintptr_t lock_idx, - object_t *obj) -{ - s_mutex_lock(); - - uint8_t prev_owner = ((volatile uint8_t *)write_locks)[lock_idx]; - if (prev_owner != 0 && prev_owner != STM_PSEGMENT->write_lock_num) { - - uint8_t other_segment_num = prev_owner; - assert(get_priv_segment(other_segment_num)->write_lock_num == - prev_owner); - - contention_management(other_segment_num, - STM_CONTENTION_WRITE_WRITE, obj); - - /* now we return into _stm_write_slowpath() and will try again - to acquire the write lock on our object. */ - } - - s_mutex_unlock(); -} - -static bool write_read_contention_management(uint8_t other_segment_num, - object_t *obj) -{ - return contention_management(other_segment_num, - STM_CONTENTION_WRITE_READ, obj); -} - -static void inevitable_contention_management(uint8_t other_segment_num) -{ - contention_management(other_segment_num, - STM_CONTENTION_INEVITABLE, NULL); -} diff --git a/rpython/translator/stm/src_stm/stm/contention.h b/rpython/translator/stm/src_stm/stm/contention.h deleted file mode 100644 --- a/rpython/translator/stm/src_stm/stm/contention.h +++ /dev/null @@ -1,17 +0,0 @@ -/* Imported by rpython/translator/stm/import_stmgc.py */ - -static void write_write_contention_management(uintptr_t lock_idx, - object_t *obj); -static bool write_read_contention_management(uint8_t other_segment_num, - object_t *obj); -static void inevitable_contention_management(uint8_t other_segment_num); - -static inline bool is_abort(uintptr_t nursery_end) { - return (nursery_end <= _STM_NSE_SIGNAL_MAX && nursery_end != NSE_SIGPAUSE - && nursery_end != NSE_SIGCOMMITSOON); -} - -static inline bool is_aborting_now(uint8_t other_segment_num) { - return (is_abort(get_segment(other_segment_num)->nursery_end) && - get_priv_segment(other_segment_num)->safe_point != SP_RUNNING); -} diff --git a/rpython/translator/stm/src_stm/stm/core.c b/rpython/translator/stm/src_stm/stm/core.c --- a/rpython/translator/stm/src_stm/stm/core.c +++ b/rpython/translator/stm/src_stm/stm/core.c @@ -4,305 +4,628 @@ #endif -static void teardown_core(void) +/* General helper: copies objects into our own segment, from some + source described by a range of 'struct stm_undo_s'. Maybe later + we could specialize this function to avoid the checks in the + inner loop. +*/ +static void import_objects( + int from_segnum, /* or -1: from undo->backup, + or -2: from undo->backup if not modified */ + uintptr_t pagenum, /* or -1: "all accessible" */ + struct stm_undo_s *undo, + struct stm_undo_s *end) { - memset(write_locks, 0, sizeof(write_locks)); + char *src_segment_base = (from_segnum >= 0 ? get_segment_base(from_segnum) + : NULL); + + assert(IMPLY(from_segnum >= 0, get_priv_segment(from_segnum)->modification_lock)); + assert(STM_PSEGMENT->modification_lock); + + DEBUG_EXPECT_SEGFAULT(false); + for (; undo < end; undo++) { + object_t *obj = undo->object; + stm_char *oslice = ((stm_char *)obj) + SLICE_OFFSET(undo->slice); + uintptr_t current_page_num = ((uintptr_t)oslice) / 4096; + + if (pagenum == -1) { + if (get_page_status_in(STM_SEGMENT->segment_num, + current_page_num) == PAGE_NO_ACCESS) + continue; + } + else { + if (current_page_num != pagenum) + continue; + } + + if (from_segnum == -2 && _stm_was_read(obj) && (obj->stm_flags & GCFLAG_WB_EXECUTED)) { + /* called from stm_validate(): + > if not was_read(), we certainly didn't modify + > if not WB_EXECUTED, we may have read from the obj in a different page but + did not modify it (should not occur right now, but future proof!) + only the WB_EXECUTED alone is not enough, since we may have imported from a + segment's private page (which had the flag set) */ + assert(IMPLY(_stm_was_read(obj), (obj->stm_flags & GCFLAG_WB_EXECUTED))); /* for now */ + continue; /* only copy unmodified */ + } + + /* XXX: if the next assert is always true, we should never get a segfault + in this function at all. So the DEBUG_EXPECT_SEGFAULT is correct. */ + assert((get_page_status_in(STM_SEGMENT->segment_num, + current_page_num) != PAGE_NO_ACCESS)); + + dprintf(("import slice seg=%d obj=%p off=%lu sz=%d pg=%lu\n", + from_segnum, obj, SLICE_OFFSET(undo->slice), + SLICE_SIZE(undo->slice), current_page_num)); + char *src, *dst; + if (src_segment_base != NULL) + src = REAL_ADDRESS(src_segment_base, oslice); + else + src = undo->backup; + dst = REAL_ADDRESS(STM_SEGMENT->segment_base, oslice); + memcpy(dst, src, SLICE_SIZE(undo->slice)); + + if (src_segment_base == NULL && SLICE_OFFSET(undo->slice) == 0) { + /* check that restored obj doesn't have WB_EXECUTED */ + assert(!(obj->stm_flags & GCFLAG_WB_EXECUTED)); + } + } + DEBUG_EXPECT_SEGFAULT(true); } -#ifdef NDEBUG -#define EVENTUALLY(condition) /* nothing */ -#else -#define EVENTUALLY(condition) \ - { \ - if (!(condition)) { \ - acquire_privatization_lock(); \ - if (!(condition)) \ - stm_fatalerror("fails: " #condition); \ - release_privatization_lock(); \ - } \ - } -#endif -static void check_flag_write_barrier(object_t *obj) +/* ############# signal handler ############# */ + +static void copy_bk_objs_in_page_from(int from_segnum, uintptr_t pagenum, + bool only_if_not_modified) { - /* check that all copies of the object, apart from mine, have the - GCFLAG_WRITE_BARRIER. (a bit messy because it's possible that we - read a page in the middle of privatization by another thread) - */ -#ifndef NDEBUG - long i; - struct object_s *o1; - for (i = 0; i <= NB_SEGMENTS; i++) { - if (i == STM_SEGMENT->segment_num) - continue; - o1 = (struct object_s *)REAL_ADDRESS(get_segment_base(i), obj); - EVENTUALLY(o1->stm_flags & GCFLAG_WRITE_BARRIER); - } -#endif + /* looks at all bk copies of objects overlapping page 'pagenum' and + copies the part in 'pagenum' back to the current segment */ + dprintf(("copy_bk_objs_in_page_from(%d, %ld, %d)\n", + from_segnum, (long)pagenum, only_if_not_modified)); + + struct list_s *list = get_priv_segment(from_segnum)->modified_old_objects; + struct stm_undo_s *undo = (struct stm_undo_s *)list->items; + struct stm_undo_s *end = (struct stm_undo_s *)(list->items + list->count); + + import_objects(only_if_not_modified ? -2 : -1, + pagenum, undo, end); } -__attribute__((always_inline)) -static void write_slowpath_overflow_obj(object_t *obj, bool mark_card) +static void go_to_the_past(uintptr_t pagenum, + struct stm_commit_log_entry_s *from, + struct stm_commit_log_entry_s *to) { - /* An overflow object is an object from the same transaction, but - outside the nursery. More precisely, it is no longer young, - i.e. it comes from before the most recent minor collection. - */ - assert(STM_PSEGMENT->objects_pointing_to_nursery != NULL); + assert(STM_PSEGMENT->modification_lock); + assert(from->rev_num >= to->rev_num); + /* walk BACKWARDS the commit log and update the page 'pagenum', + initially at revision 'from', until we reach the revision 'to'. */ - assert(obj->stm_flags & GCFLAG_WRITE_BARRIER); - if (!mark_card) { - /* The basic case, with no card marking. We append the object - into 'objects_pointing_to_nursery', and remove the flag so - that the write_slowpath will not be called again until the - next minor collection. */ - if (obj->stm_flags & GCFLAG_CARDS_SET) { - /* if we clear this flag, we also need to clear the cards */ - _reset_object_cards(get_priv_segment(STM_SEGMENT->segment_num), - obj, CARD_CLEAR, false); - } - obj->stm_flags &= ~(GCFLAG_WRITE_BARRIER | GCFLAG_CARDS_SET); - LIST_APPEND(STM_PSEGMENT->objects_pointing_to_nursery, obj); - } - else { - /* Card marking. Don't remove GCFLAG_WRITE_BARRIER because we - need to come back to _stm_write_slowpath_card() for every - card to mark. Add GCFLAG_CARDS_SET. */ - assert(!(obj->stm_flags & GCFLAG_CARDS_SET)); - obj->stm_flags |= GCFLAG_CARDS_SET; - assert(STM_PSEGMENT->old_objects_with_cards); - LIST_APPEND(STM_PSEGMENT->old_objects_with_cards, obj); + /* XXXXXXX Recursive algo for now, fix this! */ + if (from != to) { + struct stm_commit_log_entry_s *cl = to->next; + go_to_the_past(pagenum, from, cl); + + struct stm_undo_s *undo = cl->written; + struct stm_undo_s *end = cl->written + cl->written_count; + + import_objects(-1, pagenum, undo, end); } } -__attribute__((always_inline)) -static void write_slowpath_common(object_t *obj, bool mark_card) + + +static void handle_segfault_in_page(uintptr_t pagenum) { - assert(_seems_to_be_running_transaction()); - assert(!_is_young(obj)); - assert(obj->stm_flags & GCFLAG_WRITE_BARRIER); + /* assumes page 'pagenum' is ACCESS_NONE, privatizes it, + and validates to newest revision */ - uintptr_t base_lock_idx = get_write_lock_idx((uintptr_t)obj); + dprintf(("handle_segfault_in_page(%lu), seg %d\n", pagenum, STM_SEGMENT->segment_num)); - if (IS_OVERFLOW_OBJ(STM_PSEGMENT, obj)) { - assert(write_locks[base_lock_idx] == 0); - write_slowpath_overflow_obj(obj, mark_card); + /* XXX: bad, but no deadlocks: */ + acquire_all_privatization_locks(); + + long i; + int my_segnum = STM_SEGMENT->segment_num; + + assert(get_page_status_in(my_segnum, pagenum) == PAGE_NO_ACCESS); + + /* find who has the most recent revision of our page */ + int copy_from_segnum = -1; + uint64_t most_recent_rev = 0; + for (i = 1; i < NB_SEGMENTS; i++) { + if (i == my_segnum) + continue; + + struct stm_commit_log_entry_s *log_entry; + log_entry = get_priv_segment(i)->last_commit_log_entry; + if (get_page_status_in(i, pagenum) != PAGE_NO_ACCESS + && (copy_from_segnum == -1 || log_entry->rev_num > most_recent_rev)) { + copy_from_segnum = i; + most_recent_rev = log_entry->rev_num; + } + } + OPT_ASSERT(copy_from_segnum != my_segnum); + + /* make our page write-ready */ + page_mark_accessible(my_segnum, pagenum); + + if (copy_from_segnum == -1) { + /* this page is only accessible in the sharing segment so far (new + allocation). We can thus simply mark it accessible here. */ + pagecopy(get_virtual_page(my_segnum, pagenum), + get_virtual_page(0, pagenum)); + release_all_privatization_locks(); return; } - /* Else, it's an old object and we need to privatise it. - Do a read-barrier now. Note that this must occur before the - safepoints that may be issued in write_write_contention_management(). - */ - stm_read(obj); - /* Take the segment's own lock number */ - uint8_t lock_num = STM_PSEGMENT->write_lock_num; + /* before copying anything, acquire modification locks from our and + the other segment */ + uint64_t to_lock = (1UL << copy_from_segnum)| (1UL << my_segnum); + acquire_modification_lock_set(to_lock); + pagecopy(get_virtual_page(my_segnum, pagenum), + get_virtual_page(copy_from_segnum, pagenum)); - /* If CARDS_SET, we entered here at least once already, so we - already own the write_lock */ - assert(IMPLY(obj->stm_flags & GCFLAG_CARDS_SET, - write_locks[base_lock_idx] == lock_num)); + /* if there were modifications in the page, revert them. */ + copy_bk_objs_in_page_from(copy_from_segnum, pagenum, false); - /* XXX XXX XXX make the logic of write-locking objects optional! */ + /* we need to go from 'src_version' to 'target_version'. This + might need a walk into the past. */ + struct stm_commit_log_entry_s *src_version, *target_version; + src_version = get_priv_segment(copy_from_segnum)->last_commit_log_entry; + target_version = STM_PSEGMENT->last_commit_log_entry; - /* claim the write-lock for this object. In case we're running the - same transaction since a long while, the object can be already in - 'modified_old_objects' (but, because it had GCFLAG_WRITE_BARRIER, - not in 'objects_pointing_to_nursery'). We'll detect this case - by finding that we already own the write-lock. */ - retry: - if (write_locks[base_lock_idx] == 0) { - /* A lock to prevent reading garbage from - lookup_other_thread_recorded_marker() */ - acquire_marker_lock(STM_SEGMENT->segment_base); + dprintf(("handle_segfault_in_page: rev %lu to rev %lu\n", + src_version->rev_num, target_version->rev_num)); + /* adapt revision of page to our revision: + if our rev is higher than the page we copy from, everything + is fine as we never read/modified the page anyway + */ + if (src_version->rev_num > target_version->rev_num) + go_to_the_past(pagenum, src_version, target_version); - if (UNLIKELY(!__sync_bool_compare_and_swap(&write_locks[base_lock_idx], - 0, lock_num))) { - release_marker_lock(STM_SEGMENT->segment_base); - goto retry; + release_modification_lock_set(to_lock); + release_all_privatization_locks(); +} + +static void _signal_handler(int sig, siginfo_t *siginfo, void *context) +{ + assert(_stm_segfault_expected > 0); + + int saved_errno = errno; + char *addr = siginfo->si_addr; + dprintf(("si_addr: %p\n", addr)); + if (addr == NULL || addr < stm_object_pages || + addr >= stm_object_pages+TOTAL_MEMORY) { + /* actual segfault, unrelated to stmgc */ + fprintf(stderr, "Segmentation fault: accessing %p\n", addr); + abort(); + } + + int segnum = get_segment_of_linear_address(addr); + if (segnum != STM_SEGMENT->segment_num) { + fprintf(stderr, "Segmentation fault: accessing %p (seg %d) from" + " seg %d\n", addr, segnum, STM_SEGMENT->segment_num); + abort(); + } + dprintf(("-> segment: %d\n", segnum)); + + char *seg_base = STM_SEGMENT->segment_base; + uintptr_t pagenum = ((char*)addr - seg_base) / 4096UL; + if (pagenum < END_NURSERY_PAGE) { + fprintf(stderr, "Segmentation fault: accessing %p (seg %d " + "page %lu)\n", addr, segnum, pagenum); + abort(); + } + + DEBUG_EXPECT_SEGFAULT(false); + handle_segfault_in_page(pagenum); + DEBUG_EXPECT_SEGFAULT(true); + + errno = saved_errno; + /* now return and retry */ +} + +/* ############# commit log ############# */ + + +void _dbg_print_commit_log() +{ + struct stm_commit_log_entry_s *cl = &commit_log_root; + + fprintf(stderr, "commit log:\n"); + while ((cl = cl->next)) { + if (cl == INEV_RUNNING) { + fprintf(stderr, " INEVITABLE\n"); + return; + } + fprintf(stderr, " entry at %p: seg %d, rev %lu\n", cl, cl->segment_num, cl->rev_num); + struct stm_undo_s *undo = cl->written; + struct stm_undo_s *end = undo + cl->written_count; + for (; undo < end; undo++) { + fprintf(stderr, " obj %p, size %d, ofs %lu: ", undo->object, + SLICE_SIZE(undo->slice), SLICE_OFFSET(undo->slice)); + /* long i; */ + /* for (i=0; islice); i += 8) */ + /* fprintf(stderr, " 0x%016lx", *(long *)(undo->backup + i)); */ + fprintf(stderr, "\n"); + } + } +} + +static void reset_modified_from_backup_copies(int segment_num); /* forward */ + +static bool _stm_validate() +{ + /* returns true if we reached a valid state, or false if + we need to abort now */ + dprintf(("_stm_validate()\n")); + /* go from last known entry in commit log to the + most current one and apply all changes done + by other transactions. Abort if we have read one of + the committed objs. */ + struct stm_commit_log_entry_s *first_cl = STM_PSEGMENT->last_commit_log_entry; + struct stm_commit_log_entry_s *next_cl, *last_cl, *cl; + int my_segnum = STM_SEGMENT->segment_num; + /* Don't check this 'cl'. This entry is already checked */ + + if (STM_PSEGMENT->transaction_state == TS_INEVITABLE) { + //assert(first_cl->next == INEV_RUNNING); + /* the above assert may fail when running a major collection + while the commit of the inevitable transaction is in progress */ + return true; + } + + bool needs_abort = false; + + while(1) { + /* retry IF: */ + /* if at the time of "HERE" (s.b.) there happen to be + more commits (and bk copies) then it could be that + copy_bk_objs_in_page_from (s.b.) reads a bk copy that + is itself more recent than last_cl. This is fixed + by re-validating. */ + first_cl = STM_PSEGMENT->last_commit_log_entry; + if (first_cl->next == NULL) + break; + + if (first_cl->next == INEV_RUNNING) { + /* need to reach safe point if an INEV transaction + is waiting for us, otherwise deadlock */ + break; } - dprintf_test(("write_slowpath %p -> mod_old\n", obj)); + /* Find the set of segments we need to copy from and lock them: */ + uint64_t segments_to_lock = 1UL << my_segnum; + cl = first_cl; + while ((next_cl = cl->next) != NULL) { + if (next_cl == INEV_RUNNING) { + /* only validate entries up to INEV */ + break; + } + assert(next_cl->rev_num > cl->rev_num); + cl = next_cl; - /* Add the current marker, recording where we wrote to this object */ - timing_record_write(); - - /* Change to this old object from this transaction. - Add it to the list 'modified_old_objects'. */ - LIST_APPEND(STM_PSEGMENT->modified_old_objects, obj); - - release_marker_lock(STM_SEGMENT->segment_base); - - /* We need to privatize the pages containing the object, if they - are still SHARED_PAGE. The common case is that there is only - one page in total. */ - uintptr_t first_page = ((uintptr_t)obj) / 4096UL; - - /* If the object is in the uniform pages of small objects - (outside the nursery), then it fits into one page. This is - the common case. Otherwise, we need to compute it based on - its location and size. */ - if ((obj->stm_flags & GCFLAG_SMALL_UNIFORM) != 0) { - page_privatize(first_page); - } - else { - char *realobj; - size_t obj_size; - uintptr_t i, end_page; - - /* get the size of the object */ - realobj = REAL_ADDRESS(STM_SEGMENT->segment_base, obj); - obj_size = stmcb_size_rounded_up((struct object_s *)realobj); - - /* get the last page containing data from the object */ - end_page = (((uintptr_t)obj) + obj_size - 1) / 4096UL; - - for (i = first_page; i <= end_page; i++) { - page_privatize(i); + if (cl->written_count) { + segments_to_lock |= (1UL << cl->segment_num); } } + last_cl = cl; + + /* HERE */ + + acquire_privatization_lock(STM_SEGMENT->segment_num); + acquire_modification_lock_set(segments_to_lock); + + + /* import objects from first_cl to last_cl: */ + if (first_cl != last_cl) { + uint64_t segment_really_copied_from = 0UL; + + cl = first_cl; + while ((cl = cl->next) != NULL) { + if (!needs_abort) { + struct stm_undo_s *undo = cl->written; + struct stm_undo_s *end = cl->written + cl->written_count; + for (; undo < end; undo++) { + if (_stm_was_read(undo->object)) { + /* first reset all modified objects from the backup + copies as soon as the first conflict is detected; + then we will proceed below to update our segment from + the old (but unmodified) version to the newer version. + */ + reset_modified_from_backup_copies(my_segnum); + needs_abort = true; + + dprintf(("_stm_validate() failed for obj %p\n", undo->object)); + break; + } + } + } + + if (cl->written_count) { + struct stm_undo_s *undo = cl->written; + struct stm_undo_s *end = cl->written + cl->written_count; + + segment_really_copied_from |= (1UL << cl->segment_num); + + import_objects(cl->segment_num, -1, undo, end); + + /* here we can actually have our own modified version, so + make sure to only copy things that are not modified in our + segment... (if we do not abort) */ + copy_bk_objs_in_page_from + (cl->segment_num, -1, /* any page */ + !needs_abort); /* if we abort, we still want to copy everything */ + } + + /* last fully validated entry */ + STM_PSEGMENT->last_commit_log_entry = cl; + if (cl == last_cl) + break; + } + assert(cl == last_cl); + + /* XXX: this optimization fails in test_basic.py, bug3 */ + /* OPT_ASSERT(segment_really_copied_from < (1 << NB_SEGMENTS)); */ + /* int segnum; */ + /* for (segnum = 1; segnum < NB_SEGMENTS; segnum++) { */ + /* if (segment_really_copied_from & (1UL << segnum)) { */ + /* /\* here we can actually have our own modified version, so */ + /* make sure to only copy things that are not modified in our */ + /* segment... (if we do not abort) *\/ */ + /* copy_bk_objs_in_page_from( */ + /* segnum, -1, /\* any page *\/ */ + /* !needs_abort); /\* if we abort, we still want to copy everything *\/ */ + /* } */ + /* } */ + } + + /* done with modifications */ + release_modification_lock_set(segments_to_lock); + release_privatization_lock(STM_SEGMENT->segment_num); } - else if (write_locks[base_lock_idx] == lock_num) { -#ifdef STM_TESTS - bool found = false; - LIST_FOREACH_R(STM_PSEGMENT->modified_old_objects, object_t *, - ({ if (item == obj) { found = true; break; } })); - assert(found); + + return !needs_abort; +} + +static struct stm_commit_log_entry_s *_create_commit_log_entry(void) +{ + /* puts all modified_old_objects in a new commit log entry */ + + // we don't need the privatization lock, as we are only + // reading from modified_old_objs and nobody but us can change it + struct list_s *list = STM_PSEGMENT->modified_old_objects; + OPT_ASSERT((list_count(list) % 3) == 0); + size_t count = list_count(list) / 3; + size_t byte_len = sizeof(struct stm_commit_log_entry_s) + + count * sizeof(struct stm_undo_s); + struct stm_commit_log_entry_s *result = malloc(byte_len); + + result->next = NULL; + result->segment_num = STM_SEGMENT->segment_num; + result->rev_num = -1; /* invalid */ + result->written_count = count; + memcpy(result->written, list->items, count * sizeof(struct stm_undo_s)); + return result; +} + + +static void reset_wb_executed_flags(void); +static void readd_wb_executed_flags(void); +static void check_all_write_barrier_flags(char *segbase, struct list_s *list); + +static void _validate_and_attach(struct stm_commit_log_entry_s *new) +{ + struct stm_commit_log_entry_s *old; + + OPT_ASSERT(new != NULL); + /* we are attaching a real CL entry: */ + bool is_commit = new != INEV_RUNNING; + + while (1) { + if (!_stm_validate()) { + if (new != INEV_RUNNING) + free(new); + stm_abort_transaction(); + } + +#if STM_TESTS + if (STM_PSEGMENT->transaction_state != TS_INEVITABLE + && STM_PSEGMENT->last_commit_log_entry->next == INEV_RUNNING) { + /* abort for tests... */ + stm_abort_transaction(); + } #endif + + if (is_commit) { + /* we must not remove the WB_EXECUTED flags before validation as + it is part of a condition in import_objects() called by + copy_bk_objs_in_page_from to not overwrite our modifications. + So we do it here: */ + reset_wb_executed_flags(); + check_all_write_barrier_flags(STM_SEGMENT->segment_base, + STM_PSEGMENT->modified_old_objects); + } + + /* try to attach to commit log: */ + old = STM_PSEGMENT->last_commit_log_entry; + if (old->next == NULL) { + if (new != INEV_RUNNING) /* INEVITABLE */ + new->rev_num = old->rev_num + 1; + + if (__sync_bool_compare_and_swap(&old->next, NULL, new)) + break; /* success! */ + } else if (old->next == INEV_RUNNING) { + /* we failed because there is an INEV transaction running */ + usleep(10); + } + + if (is_commit) { + /* XXX: unfortunately, if we failed to attach our CL entry, + we have to re-add the WB_EXECUTED flags before we try to + validate again because of said condition (s.a) */ + readd_wb_executed_flags(); + } + + dprintf(("_validate_and_attach(%p) failed, enter safepoint\n", new)); + + /* check for requested safe point. otherwise an INEV transaction + may try to commit but cannot because of the busy-loop here. */ + _stm_collectable_safe_point(); + } +} + +static void _validate_and_turn_inevitable(void) +{ + _validate_and_attach((struct stm_commit_log_entry_s *)INEV_RUNNING); +} + +static void _validate_and_add_to_commit_log(void) +{ + struct stm_commit_log_entry_s *old, *new; + + new = _create_commit_log_entry(); + if (STM_PSEGMENT->transaction_state == TS_INEVITABLE) { + old = STM_PSEGMENT->last_commit_log_entry; + new->rev_num = old->rev_num + 1; + OPT_ASSERT(old->next == INEV_RUNNING); + + /* WB_EXECUTED must be removed before we attach */ + reset_wb_executed_flags(); + check_all_write_barrier_flags(STM_SEGMENT->segment_base, + STM_PSEGMENT->modified_old_objects); + + bool yes = __sync_bool_compare_and_swap(&old->next, INEV_RUNNING, new); + OPT_ASSERT(yes); } else { - /* call the contention manager, and then retry (unless we were - aborted). */ - write_write_contention_management(base_lock_idx, obj); - goto retry; + _validate_and_attach(new); } + acquire_modification_lock(STM_SEGMENT->segment_num); + list_clear(STM_PSEGMENT->modified_old_objects); + STM_PSEGMENT->last_commit_log_entry = new; + release_modification_lock(STM_SEGMENT->segment_num); +} - /* check that we really have a private page */ - assert(is_private_page(STM_SEGMENT->segment_num, - ((uintptr_t)obj) / 4096)); +/* ############# STM ############# */ +void stm_validate() +{ + if (!_stm_validate()) + stm_abort_transaction(); +} - /* check that so far all copies of the object have the flag */ - check_flag_write_barrier(obj); - - assert(obj->stm_flags & GCFLAG_WRITE_BARRIER); - if (!mark_card) { - /* A common case for write_locks[] that was either 0 or lock_num: - we need to add the object to the appropriate list if there is one. - */ - if (STM_PSEGMENT->objects_pointing_to_nursery != NULL) { - dprintf_test(("write_slowpath %p -> old obj_to_nurs\n", obj)); - LIST_APPEND(STM_PSEGMENT->objects_pointing_to_nursery, obj); - } - - if (obj->stm_flags & GCFLAG_CARDS_SET) { - /* if we clear this flag, we have to tell sync_old_objs that - everything needs to be synced */ - _reset_object_cards(get_priv_segment(STM_SEGMENT->segment_num), - obj, CARD_MARKED_OLD, true); /* mark all */ - } - - /* remove GCFLAG_WRITE_BARRIER if we succeeded in getting the base - write-lock (not for card marking). */ - obj->stm_flags &= ~(GCFLAG_WRITE_BARRIER | GCFLAG_CARDS_SET); - } - else { - /* don't remove WRITE_BARRIER, but add CARDS_SET */ - obj->stm_flags |= GCFLAG_CARDS_SET; - assert(STM_PSEGMENT->old_objects_with_cards); - LIST_APPEND(STM_PSEGMENT->old_objects_with_cards, obj); - } - - /* for sanity, check again that all other segment copies of this - object still have the flag (so privatization worked) */ - check_flag_write_barrier(obj); -} void _stm_write_slowpath(object_t *obj) { - write_slowpath_common(obj, /*mark_card=*/false); -} + assert(_seems_to_be_running_transaction()); + assert(!_is_in_nursery(obj)); + assert(obj->stm_flags & GCFLAG_WRITE_BARRIER); -static bool obj_should_use_cards(object_t *obj) -{ - struct object_s *realobj = (struct object_s *) - REAL_ADDRESS(STM_SEGMENT->segment_base, obj); - long supports = stmcb_obj_supports_cards(realobj); - if (!supports) - return 0; + int my_segnum = STM_SEGMENT->segment_num; + uintptr_t end_page, first_page = ((uintptr_t)obj) / 4096UL; + char *realobj; + size_t obj_size; - /* check also if it makes sense: */ - size_t size = stmcb_size_rounded_up(realobj); - return (size >= _STM_MIN_CARD_OBJ_SIZE); -} - -char _stm_write_slowpath_card_extra(object_t *obj) -{ - /* the PyPy JIT calls this function directly if it finds that an - array doesn't have the GCFLAG_CARDS_SET */ - bool mark_card = obj_should_use_cards(obj); - write_slowpath_common(obj, mark_card); - return mark_card; -} - -long _stm_write_slowpath_card_extra_base(void) -{ - /* for the PyPy JIT: _stm_write_slowpath_card_extra_base[obj >> 4] - is the byte that must be set to CARD_MARKED. The logic below - does the same, but more explicitly. */ - return (((long)write_locks) - WRITELOCK_START + 1) - + 0x4000000000000000L; // <- workaround for a clang bug :-( -} - -void _stm_write_slowpath_card(object_t *obj, uintptr_t index) -{ - /* If CARDS_SET is not set so far, issue a normal write barrier. - If the object is large enough, ask it to set up the object for - card marking instead. - */ - if (!(obj->stm_flags & GCFLAG_CARDS_SET)) { - char mark_card = _stm_write_slowpath_card_extra(obj); - if (!mark_card) - return; + realobj = REAL_ADDRESS(STM_SEGMENT->segment_base, obj); + obj_size = stmcb_size_rounded_up((struct object_s *)realobj); + /* get the last page containing data from the object */ + if (LIKELY(is_small_uniform(obj))) { + end_page = first_page; + } else { + end_page = (((uintptr_t)obj) + obj_size - 1) / 4096UL; } - dprintf_test(("write_slowpath_card %p -> index:%lu\n", - obj, index)); + /* add to read set: */ + stm_read(obj); - /* We reach this point if we have to mark the card. - */ - assert(obj->stm_flags & GCFLAG_WRITE_BARRIER); - assert(obj->stm_flags & GCFLAG_CARDS_SET); - assert(!(obj->stm_flags & GCFLAG_SMALL_UNIFORM)); /* not supported/tested */ + if (obj->stm_flags & GCFLAG_WB_EXECUTED) { + /* already executed WB once in this transaction. do GC + part again: */ + dprintf(("write_slowpath-fast(%p)\n", obj)); + obj->stm_flags &= ~GCFLAG_WRITE_BARRIER; + LIST_APPEND(STM_PSEGMENT->objects_pointing_to_nursery, obj); + return; + } -#ifndef NDEBUG - struct object_s *realobj = (struct object_s *) - REAL_ADDRESS(STM_SEGMENT->segment_base, obj); - size_t size = stmcb_size_rounded_up(realobj); - /* we need at least one lock in addition to the STM-reserved object - write-lock */ - assert(size >= 32); - /* the 'index' must be in range(length-of-obj), but we don't have - a direct way to know the length. We know that it is smaller - than the size in bytes. */ - assert(index < size); -#endif + assert(!(obj->stm_flags & GCFLAG_WB_EXECUTED)); + dprintf(("write_slowpath(%p): sz=%lu\n", obj, obj_size)); - /* Write into the card's lock. This is used by the next minor - collection to know what parts of the big object may have changed. - We already own the object here or it is an overflow obj. */ - uintptr_t base_lock_idx = get_write_lock_idx((uintptr_t)obj); - uintptr_t card_lock_idx = base_lock_idx + get_index_to_card_index(index); - write_locks[card_lock_idx] = CARD_MARKED; + retry: + /* privatize pages: */ + /* XXX don't always acquire all locks... */ + acquire_all_privatization_locks(); - /* More debug checks */ - dprintf(("mark %p index %lu, card:%lu with %d\n", - obj, index, get_index_to_card_index(index), CARD_MARKED)); - assert(IMPLY(IS_OVERFLOW_OBJ(STM_PSEGMENT, obj), - write_locks[base_lock_idx] == 0)); - assert(IMPLY(!IS_OVERFLOW_OBJ(STM_PSEGMENT, obj), - write_locks[base_lock_idx] == STM_PSEGMENT->write_lock_num)); + uintptr_t page; + for (page = first_page; page <= end_page; page++) { + if (get_page_status_in(my_segnum, page) == PAGE_NO_ACCESS) { + /* XXX: slow? */ + release_all_privatization_locks(); + + volatile char *dummy = REAL_ADDRESS(STM_SEGMENT->segment_base, page * 4096UL); + *dummy; /* force segfault */ + + goto retry; + } + } + /* all pages are private to us and we hold the privatization_locks so + we are allowed to modify them */ + + /* phew, now add the obj to the write-set and register the + backup copy. */ + /* XXX: we should not be here at all fiddling with page status + if 'obj' is merely an overflow object. FIX ME, likely by copying + the overflow number logic from c7. */ + + DEBUG_EXPECT_SEGFAULT(false); + + acquire_modification_lock(STM_SEGMENT->segment_num); + uintptr_t slice_sz; + uintptr_t in_page_offset = (uintptr_t)obj % 4096UL; + uintptr_t remaining_obj_sz = obj_size; + for (page = first_page; page <= end_page; page++) { + /* XXX Maybe also use mprotect() again to mark pages of the object as read-only, and + only stick it into modified_old_objects page-by-page? Maybe it's + possible to do card-marking that way, too. */ + OPT_ASSERT(remaining_obj_sz); + + slice_sz = remaining_obj_sz; + if (in_page_offset + slice_sz > 4096UL) { + /* not over page boundaries */ + slice_sz = 4096UL - in_page_offset; + } + + size_t slice_off = obj_size - remaining_obj_sz; + + /* make backup slice: */ + char *bk_slice = malloc(slice_sz); + memcpy(bk_slice, realobj + slice_off, slice_sz); + + STM_PSEGMENT->modified_old_objects = list_append3( + STM_PSEGMENT->modified_old_objects, + (uintptr_t)obj, /* obj */ + (uintptr_t)bk_slice, /* bk_addr */ + NEW_SLICE(slice_off, slice_sz)); + + remaining_obj_sz -= slice_sz; + in_page_offset = (in_page_offset + slice_sz) % 4096UL; /* mostly 0 */ + } + OPT_ASSERT(remaining_obj_sz == 0); + + /* remove the WRITE_BARRIER flag and add WB_EXECUTED */ + obj->stm_flags &= ~GCFLAG_WRITE_BARRIER; + obj->stm_flags |= GCFLAG_WB_EXECUTED; + + DEBUG_EXPECT_SEGFAULT(true); + + release_modification_lock(STM_SEGMENT->segment_num); + /* done fiddling with protection and privatization */ + release_all_privatization_locks(); + + /* also add it to the GC list for minor collections */ + LIST_APPEND(STM_PSEGMENT->objects_pointing_to_nursery, obj); } static void reset_transaction_read_version(void) @@ -315,7 +638,7 @@ (long)(NB_READMARKER_PAGES * 4096UL))); if (mmap(readmarkers, NB_READMARKER_PAGES * 4096UL, PROT_READ | PROT_WRITE, - MAP_FIXED | MAP_PAGES_FLAGS, -1, 0) != readmarkers) { + MAP_FIXED | MAP_SHARED | MAP_ANONYMOUS | MAP_NORESERVE, -1, 0) != readmarkers) { /* fall-back */ #if STM_TESTS stm_fatalerror("reset_transaction_read_version: %m"); @@ -325,24 +648,46 @@ STM_SEGMENT->transaction_read_version = 1; } -static uint64_t _global_start_time = 0; +static void reset_wb_executed_flags(void) +{ + dprintf(("reset_wb_executed_flags()\n")); + struct list_s *list = STM_PSEGMENT->modified_old_objects; + struct stm_undo_s *undo = (struct stm_undo_s *)list->items; + struct stm_undo_s *end = (struct stm_undo_s *)(list->items + list->count); + + for (; undo < end; undo++) { + object_t *obj = undo->object; + obj->stm_flags &= ~GCFLAG_WB_EXECUTED; + } +} + +static void readd_wb_executed_flags(void) +{ + dprintf(("readd_wb_executed_flags()\n")); + struct list_s *list = STM_PSEGMENT->modified_old_objects; + struct stm_undo_s *undo = (struct stm_undo_s *)list->items; + struct stm_undo_s *end = (struct stm_undo_s *)(list->items + list->count); + + for (; undo < end; undo++) { + object_t *obj = undo->object; + obj->stm_flags |= GCFLAG_WB_EXECUTED; + } +} + + + static void _stm_start_transaction(stm_thread_local_t *tl) { assert(!_stm_in_transaction(tl)); - while (!acquire_thread_segment(tl)) - ; + while (!acquire_thread_segment(tl)) {} /* GS invalid before this point! */ assert(STM_PSEGMENT->safe_point == SP_NO_TRANSACTION); assert(STM_PSEGMENT->transaction_state == TS_NONE); - timing_event(tl, STM_TRANSACTION_START); - STM_PSEGMENT->start_time = _global_start_time++; - STM_PSEGMENT->signalled_to_commit_soon = false; + STM_PSEGMENT->transaction_state = TS_REGULAR; STM_PSEGMENT->safe_point = SP_RUNNING; - STM_PSEGMENT->marker_inev.object = NULL; - STM_PSEGMENT->transaction_state = TS_REGULAR; #ifndef NDEBUG STM_PSEGMENT->running_pthread = pthread_self(); #endif @@ -350,16 +695,10 @@ STM_PSEGMENT->threadlocal_at_start_of_transaction = tl->thread_local_obj; enter_safe_point_if_requested(); - dprintf(("start_transaction\n")); + dprintf(("> start_transaction\n")); - s_mutex_unlock(); + s_mutex_unlock(); // XXX it's probably possible to not acquire this here - /* Now running the SP_RUNNING start. We can set our - 'transaction_read_version' after releasing the mutex, - because it is only read by a concurrent thread in - stm_commit_transaction(), which waits until SP_RUNNING - threads are paused. - */ uint8_t old_rv = STM_SEGMENT->transaction_read_version; STM_SEGMENT->transaction_read_version = old_rv + 1; if (UNLIKELY(old_rv == 0xff)) { @@ -367,22 +706,17 @@ } assert(list_is_empty(STM_PSEGMENT->modified_old_objects)); - assert(list_is_empty(STM_PSEGMENT->modified_old_objects_markers)); + assert(list_is_empty(STM_PSEGMENT->new_objects)); + assert(list_is_empty(STM_PSEGMENT->objects_pointing_to_nursery)); assert(list_is_empty(STM_PSEGMENT->young_weakrefs)); assert(tree_is_cleared(STM_PSEGMENT->young_outside_nursery)); assert(tree_is_cleared(STM_PSEGMENT->nursery_objects_shadows)); assert(tree_is_cleared(STM_PSEGMENT->callbacks_on_commit_and_abort[0])); assert(tree_is_cleared(STM_PSEGMENT->callbacks_on_commit_and_abort[1])); - assert(list_is_empty(STM_PSEGMENT->young_objects_with_light_finalizers)); - assert(STM_PSEGMENT->objects_pointing_to_nursery == NULL); - assert(STM_PSEGMENT->large_overflow_objects == NULL); - assert(STM_PSEGMENT->finalizers == NULL); -#ifndef NDEBUG - /* this should not be used when objects_pointing_to_nursery == NULL */ - STM_PSEGMENT->modified_old_objects_markers_num_old = 99999999999999999L; -#endif check_nursery_at_transaction_start(); + + stm_validate(); } long stm_start_transaction(stm_thread_local_t *tl) @@ -399,553 +733,142 @@ void stm_start_inevitable_transaction(stm_thread_local_t *tl) { - /* used to be more efficient, starting directly an inevitable transaction, - but there is no real point any more, I believe */ - rewind_jmp_buf rjbuf; - stm_rewind_jmp_enterframe(tl, &rjbuf); - - stm_start_transaction(tl); - stm_become_inevitable(tl, "start_inevitable_transaction"); - - stm_rewind_jmp_leaveframe(tl, &rjbuf); + s_mutex_lock(); + _stm_start_transaction(tl); + _stm_become_inevitable("stm_start_inevitable_transaction"); } +#ifdef STM_NO_AUTOMATIC_SETJMP +void _test_run_abort(stm_thread_local_t *tl) __attribute__((noreturn)); +int stm_is_inevitable(void) +{ + switch (STM_PSEGMENT->transaction_state) { + case TS_REGULAR: return 0; + case TS_INEVITABLE: return 1; + default: abort(); + } +} +#endif /************************************************************/ +static void _finish_transaction() +{ + stm_thread_local_t *tl = STM_SEGMENT->running_thread; -static bool detect_write_read_conflicts(void) -{ - /* Detect conflicts of the form: we want to commit a write to an object, - but the same object was also read in a different thread. - */ - long i; - for (i = 1; i <= NB_SEGMENTS; i++) { - - if (i == STM_SEGMENT->segment_num) - continue; - - if (get_priv_segment(i)->transaction_state == TS_NONE) - continue; /* no need to check */ - - if (is_aborting_now(i)) - continue; /* no need to check: is pending immediate abort */ - - char *remote_base = get_segment_base(i); - uint8_t remote_version = get_segment(i)->transaction_read_version; - - LIST_FOREACH_R( - STM_PSEGMENT->modified_old_objects, - object_t * /*item*/, - ({ - if (was_read_remote(remote_base, item, remote_version)) { - /* A write-read conflict! */ - dprintf(("write-read conflict on %p, our seg: %d, other: %ld\n", - item, STM_SEGMENT->segment_num, i)); - if (write_read_contention_management(i, item)) { - /* If we reach this point, we didn't abort, but we - had to wait for the other thread to commit. If we - did, then we have to restart committing from our call - to synchronize_all_threads(). */ - return true; - } - /* we aborted the other transaction without waiting, so - we can just break out of this loop on - modified_old_objects and continue with the next - segment */ - break; - } - })); - } - - return false; -} - -static void copy_object_to_shared(object_t *obj, int source_segment_num) -{ - /* Only used by major GC. XXX There is a lot of code duplication - with synchronize_object_now() but I don't completely see how to - improve... - */ - assert(!_is_young(obj)); - - char *segment_base = get_segment_base(source_segment_num); - uintptr_t start = (uintptr_t)obj; - uintptr_t first_page = start / 4096UL; - struct object_s *realobj = (struct object_s *) - REAL_ADDRESS(segment_base, obj); - - if (realobj->stm_flags & GCFLAG_SMALL_UNIFORM) { - abort();//XXX WRITE THE FAST CASE - } - else { - ssize_t obj_size = stmcb_size_rounded_up(realobj); - assert(obj_size >= 16); - uintptr_t end = start + obj_size; - uintptr_t last_page = (end - 1) / 4096UL; - - for (; first_page <= last_page; first_page++) { - - /* Copy the object into the shared page, if needed */ - if (is_private_page(source_segment_num, first_page)) { - - uintptr_t copy_size; - if (first_page == last_page) { - /* this is the final fragment */ - copy_size = end - start; - } - else { - /* this is a non-final fragment, going up to the - page's end */ - copy_size = 4096 - (start & 4095); - } - /* double-check that the result fits in one page */ - assert(copy_size > 0); - assert(copy_size + (start & 4095) <= 4096); - - char *src = REAL_ADDRESS(segment_base, start); - char *dst = REAL_ADDRESS(stm_object_pages, start); - if (copy_size == 4096) - pagecopy(dst, src); - else - memcpy(dst, src, copy_size); - } - - start = (start + 4096) & ~4095; - } - } -} - -static void _page_wise_synchronize_object_now(object_t *obj) -{ - uintptr_t start = (uintptr_t)obj; - uintptr_t first_page = start / 4096UL; - - char *realobj = REAL_ADDRESS(STM_SEGMENT->segment_base, obj); - ssize_t obj_size = stmcb_size_rounded_up((struct object_s *)realobj); - assert(obj_size >= 16); - uintptr_t end = start + obj_size; - uintptr_t last_page = (end - 1) / 4096UL; - long i, myself = STM_SEGMENT->segment_num; - - for (; first_page <= last_page; first_page++) { - - uintptr_t copy_size; - if (first_page == last_page) { - /* this is the final fragment */ - copy_size = end - start; - } - else { - /* this is a non-final fragment, going up to the - page's end */ - copy_size = 4096 - (start & 4095); - } - /* double-check that the result fits in one page */ - assert(copy_size > 0); - assert(copy_size + (start & 4095) <= 4096); - - /* First copy the object into the shared page, if needed */ - char *src = REAL_ADDRESS(STM_SEGMENT->segment_base, start); - char *dst = REAL_ADDRESS(stm_object_pages, start); - if (is_private_page(myself, first_page)) { - if (copy_size == 4096) - pagecopy(dst, src); - else - memcpy(dst, src, copy_size); - } - else { - assert(memcmp(dst, src, copy_size) == 0); /* same page */ - } - - for (i = 1; i <= NB_SEGMENTS; i++) { - if (i == myself) - continue; - - /* src = REAL_ADDRESS(stm_object_pages, start); */ - dst = REAL_ADDRESS(get_segment_base(i), start); - if (is_private_page(i, first_page)) { - /* The page is a private page. We need to diffuse this - fragment of object from the shared page to this private - page. */ - if (copy_size == 4096) - pagecopy(dst, src); - else - memcpy(dst, src, copy_size); - } - else { - assert(!memcmp(dst, src, copy_size)); /* same page */ - } - } - - start = (start + 4096) & ~4095; - } -} - -static inline bool _has_private_page_in_range( - long seg_num, uintptr_t start, uintptr_t size) -{ - uintptr_t first_page = start / 4096UL; - uintptr_t last_page = (start + size) / 4096UL; - for (; first_page <= last_page; first_page++) - if (is_private_page(seg_num, first_page)) - return true; - return false; -} - -static void _card_wise_synchronize_object_now(object_t *obj) -{ - assert(obj_should_use_cards(obj)); - assert(!(obj->stm_flags & GCFLAG_CARDS_SET)); - assert(!IS_OVERFLOW_OBJ(STM_PSEGMENT, obj)); - - uintptr_t offset_itemsize[2]; - struct object_s *realobj = (struct object_s *)REAL_ADDRESS(STM_SEGMENT->segment_base, obj); - size_t obj_size = stmcb_size_rounded_up(realobj); - assert(obj_size >= 32); - stmcb_get_card_base_itemsize(realobj, offset_itemsize); - size_t real_idx_count = (obj_size - offset_itemsize[0]) / offset_itemsize[1]; - - uintptr_t first_card_index = get_write_lock_idx((uintptr_t)obj); - uintptr_t card_index = 1; - uintptr_t last_card_index = get_index_to_card_index(real_idx_count - 1); /* max valid index */ - long i, myself = STM_SEGMENT->segment_num; - - /* simple heuristic to check if probably the whole object is - marked anyway so we should do page-wise synchronize */ - if (write_locks[first_card_index + 1] == CARD_MARKED_OLD - && write_locks[first_card_index + last_card_index] == CARD_MARKED_OLD - && write_locks[first_card_index + (last_card_index >> 1) + 1] == CARD_MARKED_OLD) { - - dprintf(("card_wise_sync assumes %p,size:%lu is fully marked\n", obj, obj_size)); - _reset_object_cards(get_priv_segment(STM_SEGMENT->segment_num), - obj, CARD_CLEAR, false); - _page_wise_synchronize_object_now(obj); - return; - } - - dprintf(("card_wise_sync syncs %p,size:%lu card-wise\n", obj, obj_size)); - - /* Combine multiple marked cards and do a memcpy for them. We don't - try yet to use page_copy() or otherwise take into account privatization - of pages (except _has_private_page_in_range) */ - bool all_cards_were_cleared = true; - - uintptr_t start_card_index = -1; - while (card_index <= last_card_index) { - uintptr_t card_lock_idx = first_card_index + card_index; - uint8_t card_value = write_locks[card_lock_idx]; - - if (card_value == CARD_MARKED_OLD) { - write_locks[card_lock_idx] = CARD_CLEAR; - - if (start_card_index == -1) { /* first marked card */ - start_card_index = card_index; - /* start = (uintptr_t)obj + stmcb_index_to_byte_offset( */ - /* realobj, get_card_index_to_index(card_index)); */ - if (all_cards_were_cleared) { - all_cards_were_cleared = false; - } - } - } - else { - OPT_ASSERT(card_value == CARD_CLEAR); - } - - if (start_card_index != -1 /* something to copy */ - && (card_value != CARD_MARKED_OLD /* found non-marked card */ - || card_index == last_card_index)) { /* this is the last card */ - /* do the copying: */ - uintptr_t start, copy_size; - uintptr_t next_card_offset; - uintptr_t start_card_offset; - uintptr_t next_card_index = card_index; - - if (card_value == CARD_MARKED_OLD) { - /* card_index is the last card of the object, but we need - to go one further to get the right offset */ - next_card_index++; - } - - start_card_offset = offset_itemsize[0] + - get_card_index_to_index(start_card_index) * offset_itemsize[1]; - - next_card_offset = offset_itemsize[0] + - get_card_index_to_index(next_card_index) * offset_itemsize[1]; - - if (next_card_offset > obj_size) - next_card_offset = obj_size; - - start = (uintptr_t)obj + start_card_offset; - copy_size = next_card_offset - start_card_offset; - OPT_ASSERT(copy_size > 0); - - /* dprintf(("copy %lu bytes\n", copy_size)); */ - - /* since we have marked cards, at least one page here must be private */ - assert(_has_private_page_in_range(myself, start, copy_size)); - - /* copy to shared segment: */ - char *src = REAL_ADDRESS(STM_SEGMENT->segment_base, start); - char *dst = REAL_ADDRESS(stm_object_pages, start); - memcpy(dst, src, copy_size); - - /* copy to other segments */ - for (i = 1; i <= NB_SEGMENTS; i++) { - if (i == myself) - continue; - if (!_has_private_page_in_range(i, start, copy_size)) - continue; - /* src = REAL_ADDRESS(stm_object_pages, start); */ - dst = REAL_ADDRESS(get_segment_base(i), start); - memcpy(dst, src, copy_size); - } - - start_card_index = -1; - } - - card_index++; - } - - if (all_cards_were_cleared) { - /* well, seems like we never called stm_write_card() on it, so actually - we need to fall back to synchronize the whole object */ - _page_wise_synchronize_object_now(obj); - return; - } - -#ifndef NDEBUG - char *src = REAL_ADDRESS(stm_object_pages, (uintptr_t)obj); - char *dst; - for (i = 1; i <= NB_SEGMENTS; i++) { - dst = REAL_ADDRESS(get_segment_base(i), (uintptr_t)obj); - assert(memcmp(dst, src, obj_size) == 0); - } -#endif -} - - -static void synchronize_object_now(object_t *obj, bool ignore_cards) -{ - /* Copy around the version of 'obj' that lives in our own segment. - It is first copied into the shared pages, and then into other - segments' own private pages. - - Must be called with the privatization lock acquired. - */ - assert(!_is_young(obj)); - assert(obj->stm_flags & GCFLAG_WRITE_BARRIER); - assert(STM_PSEGMENT->privatization_lock == 1); - - if (obj->stm_flags & GCFLAG_SMALL_UNIFORM) { - assert(!(obj->stm_flags & GCFLAG_CARDS_SET)); - abort();//XXX WRITE THE FAST CASE - } else if (ignore_cards || !obj_should_use_cards(obj)) { - _page_wise_synchronize_object_now(obj); - } else { - _card_wise_synchronize_object_now(obj); - } - - _cards_cleared_in_object(get_priv_segment(STM_SEGMENT->segment_num), obj); -} - -static void push_overflow_objects_from_privatized_pages(void) -{ - if (STM_PSEGMENT->large_overflow_objects == NULL) - return; - - acquire_privatization_lock(); - LIST_FOREACH_R(STM_PSEGMENT->large_overflow_objects, object_t *, - synchronize_object_now(item, true /*ignore_cards*/)); - release_privatization_lock(); -} - -static void push_modified_to_other_segments(void) -{ - acquire_privatization_lock(); - LIST_FOREACH_R( - STM_PSEGMENT->modified_old_objects, - object_t * /*item*/, - ({ - /* clear the write-lock (note that this runs with all other - threads paused, so no need to be careful about ordering) */ - uintptr_t lock_idx = (((uintptr_t)item) >> 4) - WRITELOCK_START; - assert(lock_idx < sizeof(write_locks)); - assert(write_locks[lock_idx] == STM_PSEGMENT->write_lock_num); - write_locks[lock_idx] = 0; - - /* the WRITE_BARRIER flag should have been set again by - minor_collection() */ - assert((item->stm_flags & GCFLAG_WRITE_BARRIER) != 0); - - /* copy the object to the shared page, and to the other - private pages as needed */ - synchronize_object_now(item, false); /* don't ignore_cards */ - })); - release_privatization_lock(); - - list_clear(STM_PSEGMENT->modified_old_objects); - list_clear(STM_PSEGMENT->modified_old_objects_markers); -} - -static void _finish_transaction(enum stm_event_e event) -{ STM_PSEGMENT->safe_point = SP_NO_TRANSACTION; STM_PSEGMENT->transaction_state = TS_NONE; - - /* marker_inev is not needed anymore */ - STM_PSEGMENT->marker_inev.object = NULL; - - /* reset these lists to NULL for the next transaction */ - _verify_cards_cleared_in_all_lists(get_priv_segment(STM_SEGMENT->segment_num)); - LIST_FREE(STM_PSEGMENT->objects_pointing_to_nursery); - list_clear(STM_PSEGMENT->old_objects_with_cards); - LIST_FREE(STM_PSEGMENT->large_overflow_objects); - - stm_thread_local_t *tl = STM_SEGMENT->running_thread; - timing_event(tl, event); + list_clear(STM_PSEGMENT->objects_pointing_to_nursery); + list_clear(STM_PSEGMENT->new_objects); release_thread_segment(tl); /* cannot access STM_SEGMENT or STM_PSEGMENT from here ! */ } +static void check_all_write_barrier_flags(char *segbase, struct list_s *list) +{ +#ifndef NDEBUG + struct stm_undo_s *undo = (struct stm_undo_s *)list->items; + struct stm_undo_s *end = (struct stm_undo_s *)(list->items + list->count); + for (; undo < end; undo++) { + object_t *obj = undo->object; + struct object_s *dst = (struct object_s*)REAL_ADDRESS(segbase, obj); + assert(dst->stm_flags & GCFLAG_WRITE_BARRIER); + assert(!(dst->stm_flags & GCFLAG_WB_EXECUTED)); + } +#endif +} + +static void push_new_objects_to_other_segments(void) +{ + acquire_privatization_lock(STM_SEGMENT->segment_num); + LIST_FOREACH_R(STM_PSEGMENT->new_objects, object_t *, + ({ + assert(item->stm_flags & GCFLAG_WB_EXECUTED); + item->stm_flags &= ~GCFLAG_WB_EXECUTED; + synchronize_object_enqueue(item); + })); + synchronize_objects_flush(); + release_privatization_lock(STM_SEGMENT->segment_num); + + /* we can as well clear the list here, since the + objects are only useful if the commit succeeds. And + we never do a major collection in-between. + They should also survive any page privatization happening + before the actual commit, since we always do a pagecopy + in handle_segfault_in_page() that also copies + unknown-to-the-segment/uncommitted things. + */ + list_clear(STM_PSEGMENT->new_objects); +} + + void stm_commit_transaction(void) { - restart_all: - exec_local_finalizers(); - assert(!_has_mutex()); assert(STM_PSEGMENT->safe_point == SP_RUNNING); assert(STM_PSEGMENT->running_pthread == pthread_self()); - minor_collection(/*commit=*/ true); + dprintf(("> stm_commit_transaction()\n")); + minor_collection(1); - /* synchronize overflow objects living in privatized pages */ - push_overflow_objects_from_privatized_pages(); - - s_mutex_lock(); - - restart: - /* force all other threads to be paused. They will unpause - automatically when we are done here, i.e. at mutex_unlock(). - Important: we should not call cond_wait() in the meantime. */ - synchronize_all_threads(STOP_OTHERS_UNTIL_MUTEX_UNLOCK); - - if (any_local_finalizers()) { - s_mutex_unlock(); - goto restart_all; - } - - /* detect conflicts */ - if (detect_write_read_conflicts()) - goto restart; - - /* cannot abort any more from here */ - dprintf(("commit_transaction\n")); - - assert(STM_SEGMENT->nursery_end == NURSERY_END); - stm_rewind_jmp_forget(STM_SEGMENT->running_thread); - - /* if a major collection is required, do it here */ - if (is_major_collection_requested()) { - timing_event(STM_SEGMENT->running_thread, STM_GC_MAJOR_START); - major_collection_now_at_safe_point(); - timing_event(STM_SEGMENT->running_thread, STM_GC_MAJOR_DONE); - } - - /* synchronize modified old objects to other threads */ - push_modified_to_other_segments(); - _verify_cards_cleared_in_all_lists(get_priv_segment(STM_SEGMENT->segment_num)); - - commit_finalizers(); - - /* update 'overflow_number' if needed */ - if (STM_PSEGMENT->overflow_number_has_been_used) { - highest_overflow_number += GCFLAG_OVERFLOW_NUMBER_bit0; - assert(highest_overflow_number != /* XXX else, overflow! */ - (uint32_t)-GCFLAG_OVERFLOW_NUMBER_bit0); - STM_PSEGMENT->overflow_number = highest_overflow_number; - STM_PSEGMENT->overflow_number_has_been_used = false; - } + push_new_objects_to_other_segments(); + /* push before validate. otherwise they are reachable too early */ + _validate_and_add_to_commit_log(); invoke_and_clear_user_callbacks(0); /* for commit */ - /* send what is hopefully the correct signals */ - if (STM_PSEGMENT->transaction_state == TS_INEVITABLE) { - /* wake up one thread in wait_for_end_of_inevitable_transaction() */ - cond_signal(C_INEVITABLE); - if (globally_unique_transaction) - committed_globally_unique_transaction(); + /* XXX do we still need a s_mutex_lock() section here? */ + s_mutex_lock(); + enter_safe_point_if_requested(); + assert(STM_SEGMENT->nursery_end == NURSERY_END); + + stm_rewind_jmp_forget(STM_SEGMENT->running_thread); + + if (globally_unique_transaction && STM_PSEGMENT->transaction_state == TS_INEVITABLE) { + committed_globally_unique_transaction(); } /* done */ - stm_thread_local_t *tl = STM_SEGMENT->running_thread; - _finish_transaction(STM_TRANSACTION_COMMIT); + _finish_transaction(); /* cannot access STM_SEGMENT or STM_PSEGMENT from here ! */ s_mutex_unlock(); - - invoke_general_finalizers(tl); } -void stm_abort_transaction(void) +static void reset_modified_from_backup_copies(int segment_num) { - s_mutex_lock(); - abort_with_mutex(); -} +#pragma push_macro("STM_PSEGMENT") +#pragma push_macro("STM_SEGMENT") +#undef STM_PSEGMENT +#undef STM_SEGMENT + assert(get_priv_segment(segment_num)->modification_lock); -static void -reset_modified_from_other_segments(int segment_num) -{ - /* pull the right versions from segment 0 in order - to reset our pages as part of an abort. + struct stm_priv_segment_info_s *pseg = get_priv_segment(segment_num); + struct list_s *list = pseg->modified_old_objects; + struct stm_undo_s *undo = (struct stm_undo_s *)list->items; + struct stm_undo_s *end = (struct stm_undo_s *)(list->items + list->count); - Note that this function is also sometimes called from - contention.c to clean up the state of a different thread, - when we would really like it to be aborted now and it is - suspended at a safe-point. - */ - struct stm_priv_segment_info_s *pseg = get_priv_segment(segment_num); - char *local_base = get_segment_base(segment_num); - char *remote_base = get_segment_base(0); + for (; undo < end; undo++) { + object_t *obj = undo->object; + char *dst = REAL_ADDRESS(pseg->pub.segment_base, obj); - LIST_FOREACH_R( - pseg->modified_old_objects, - object_t * /*item*/, - ({ - /* memcpy in the opposite direction than - push_modified_to_other_segments() */ - char *src = REAL_ADDRESS(remote_base, item); - char *dst = REAL_ADDRESS(local_base, item); - ssize_t size = stmcb_size_rounded_up((struct object_s *)src); - memcpy(dst, src, size); + memcpy(dst + SLICE_OFFSET(undo->slice), + undo->backup, + SLICE_SIZE(undo->slice)); - if (obj_should_use_cards(item)) - _reset_object_cards(pseg, item, CARD_CLEAR, false); + dprintf(("reset_modified_from_backup_copies(%d): obj=%p off=%lu bk=%p\n", + segment_num, obj, SLICE_OFFSET(undo->slice), undo->backup)); - /* objects in 'modified_old_objects' usually have the - WRITE_BARRIER flag, unless they have been modified - recently. Ignore the old flag; after copying from the - other segment, we should have the flag. */ - assert(((struct object_s *)dst)->stm_flags & GCFLAG_WRITE_BARRIER); + free(undo->backup); + } - /* write all changes to the object before we release the - write lock below. This is needed because we need to - ensure that if the write lock is not set, another thread - can get it and then change 'src' in parallel. The - write_fence() ensures in particular that 'src' has been - fully read before we release the lock: reading it - is necessary to write 'dst'. */ - write_fence(); + /* check that all objects have the GCFLAG_WRITE_BARRIER afterwards */ + check_all_write_barrier_flags(pseg->pub.segment_base, list); - /* clear the write-lock */ - uintptr_t lock_idx = (((uintptr_t)item) >> 4) - WRITELOCK_START; From noreply at buildbot.pypy.org Thu Jan 22 11:37:54 2015 From: noreply at buildbot.pypy.org (Raemi) Date: Thu, 22 Jan 2015 11:37:54 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c8: add missing files from last commit Message-ID: <20150122103754.587201C05A0@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c8 Changeset: r75479:acf538f01489 Date: 2015-01-22 10:59 +0100 http://bitbucket.org/pypy/pypy/changeset/acf538f01489/ Log: add missing files from last commit diff --git a/rpython/translator/stm/src_stm/stm/smallmalloc.c b/rpython/translator/stm/src_stm/stm/smallmalloc.c new file mode 100644 --- /dev/null +++ b/rpython/translator/stm/src_stm/stm/smallmalloc.c @@ -0,0 +1,366 @@ +/* Imported by rpython/translator/stm/import_stmgc.py */ +#ifndef _STM_CORE_H_ +# error "must be compiled via stmgc.c" +#endif + + +#define PAGE_SMSIZE_START 0 +#define PAGE_SMSIZE_END NB_SHARED_PAGES + +typedef struct { + uint8_t sz; +} fpsz_t; + +static fpsz_t full_pages_object_size[PAGE_SMSIZE_END - PAGE_SMSIZE_START]; +/* ^^^ This array contains the size (in number of words) of the objects + in the given page, provided it's a "full page of small objects". It + is 0 if it's not such a page, if it's fully free, or if it's in + small_page_lists. It is not 0 as soon as the page enters the + segment's 'small_malloc_data.loc_free' (even if the page is not + technically full yet, it will be very soon in this case). +*/ + +static fpsz_t *get_fpsz(char *smallpage) +{ + uintptr_t pagenum = (((char *)smallpage) - END_NURSERY_PAGE * 4096UL - stm_object_pages) / 4096; + /* <= PAGE_SMSIZE_END because we may ask for it when there is no + page with smallobjs yet and uninit_page_stop == NB_PAGES... */ + assert(PAGE_SMSIZE_START <= pagenum && pagenum <= PAGE_SMSIZE_END); + return &full_pages_object_size[pagenum - PAGE_SMSIZE_START]; +} + + +#ifdef STM_TESTS +bool (*_stm_smallmalloc_keep)(char *data); /* a hook for tests */ +#endif + +static void teardown_smallmalloc(void) +{ + memset(small_page_lists, 0, sizeof(small_page_lists)); + assert(free_uniform_pages == NULL); /* done by the previous line */ + first_small_uniform_loc = (uintptr_t) -1; +#ifdef STM_TESTS + _stm_smallmalloc_keep = NULL; +#endif + memset(full_pages_object_size, 0, sizeof(full_pages_object_size)); +} + +static int gmfp_lock = 0; + +static void grab_more_free_pages_for_small_allocations(void) +{ + dprintf(("grab_more_free_pages_for_small_allocation()\n")); + /* Grab GCPAGE_NUM_PAGES pages out of the top addresses. Use the + lock of pages.c to prevent any remapping from occurring under our + feet. + */ + spinlock_acquire(gmfp_lock); + + if (free_uniform_pages == NULL) { + + uintptr_t decrease_by = GCPAGE_NUM_PAGES * 4096; + if (uninitialized_page_stop - uninitialized_page_start < decrease_by) + goto out_of_memory; + + uninitialized_page_stop -= decrease_by; + first_small_uniform_loc = uninitialized_page_stop - stm_object_pages; + + if (!_stm_largemalloc_resize_arena(uninitialized_page_stop - uninitialized_page_start)) + goto out_of_memory; + + /* make writable in sharing seg */ + setup_N_pages(uninitialized_page_stop, GCPAGE_NUM_PAGES); + + char *p = uninitialized_page_stop; + long i; + for (i = 0; i < GCPAGE_NUM_PAGES; i++) { + /* add to free_uniform_pages list */ + struct small_free_loc_s *to_add = (struct small_free_loc_s *)p; + + retry: + to_add->nextpage = free_uniform_pages; + if (UNLIKELY(!__sync_bool_compare_and_swap( + &free_uniform_pages, + to_add->nextpage, + to_add))) { + goto retry; + } + + p += 4096; + } + } + + spinlock_release(gmfp_lock); + return; + + out_of_memory: + stm_fatalerror("out of memory!\n"); /* XXX */ +} + +static char *_allocate_small_slowpath(uint64_t size) +{ + dprintf(("_allocate_small_slowpath(%lu)\n", size)); + long n = size / 8; + struct small_free_loc_s *smallpage; + struct small_free_loc_s *TLPREFIX *fl = + &STM_PSEGMENT->small_malloc_data.loc_free[n]; + assert(*fl == NULL); + + retry: + /* First try to grab the next page from the global 'small_page_list' + */ + smallpage = small_page_lists[n]; + if (smallpage != NULL) { + if (UNLIKELY(!__sync_bool_compare_and_swap(&small_page_lists[n], + smallpage, + smallpage->nextpage))) + goto retry; + + /* Succeeded: we have a page in 'smallpage' */ + *fl = smallpage->next; + get_fpsz((char *)smallpage)->sz = n; + return (char *)smallpage; + } + + /* There is no more page waiting for the correct size of objects. + Maybe we can pick one from free_uniform_pages. + */ + smallpage = free_uniform_pages; + if (smallpage != NULL) { + if (UNLIKELY(!__sync_bool_compare_and_swap(&free_uniform_pages, + smallpage, + smallpage->nextpage))) + goto retry; + + /* Succeeded: we have a page in 'smallpage', which is not + initialized so far, apart from the 'nextpage' field read + above. Initialize it. + */ + struct small_free_loc_s *p, **previous; + assert(!(((uintptr_t)smallpage) & 4095)); + previous = (struct small_free_loc_s **) + REAL_ADDRESS(STM_SEGMENT->segment_base, fl); + + /* Initialize all slots from the second one to the last one to + contain a chained list */ + uintptr_t i = size; + while (i <= 4096 - size) { + p = (struct small_free_loc_s *)(((char *)smallpage) + i); + *previous = p; + previous = &p->next; + i += size; + } + *previous = NULL; + + /* The first slot is immediately returned */ + get_fpsz((char *)smallpage)->sz = n; + return (char *)smallpage; + } + + /* Not a single free page left. Grab some more free pages and retry. + */ + grab_more_free_pages_for_small_allocations(); + goto retry; +} + +__attribute__((always_inline)) +static inline stm_char *allocate_outside_nursery_small(uint64_t size) +{ + OPT_ASSERT((size & 7) == 0); + OPT_ASSERT(16 <= size && size <= GC_LAST_SMALL_SIZE); + + struct small_free_loc_s *TLPREFIX *fl = + &STM_PSEGMENT->small_malloc_data.loc_free[size / 8]; + + struct small_free_loc_s *result = *fl; + + increment_total_allocated(size); + + if (UNLIKELY(result == NULL)) + return (stm_char*) + (_allocate_small_slowpath(size) - stm_object_pages); + + *fl = result->next; + dprintf(("allocate_outside_nursery_small(%lu): %p\n", + size, (char*)((char *)result - stm_object_pages))); + return (stm_char*) + ((char *)result - stm_object_pages); +} + +object_t *_stm_allocate_old_small(ssize_t size_rounded_up) +{ + stm_char *p = allocate_outside_nursery_small(size_rounded_up); + object_t *o = (object_t *)p; + + // sharing seg0 needs to be current: + assert(STM_SEGMENT->segment_num == 0); + memset(REAL_ADDRESS(STM_SEGMENT->segment_base, o), 0, size_rounded_up); + o->stm_flags = GCFLAG_WRITE_BARRIER; + + dprintf(("_stm_allocate_old_small(%lu): %p, seg=%d, page=%lu\n", + size_rounded_up, p, + get_segment_of_linear_address(stm_object_pages + (uintptr_t)p), + (uintptr_t)p / 4096UL)); + + return o; +} + +/************************************************************/ + +static inline bool _smallmalloc_sweep_keep(char *p) +{ +#ifdef STM_TESTS + if (_stm_smallmalloc_keep != NULL) { + // test wants a TLPREFIXd address + return _stm_smallmalloc_keep((char*)(p - stm_object_pages)); + } +#endif + return smallmalloc_keep_object_at(p); +} + +void check_order_inside_small_page(struct small_free_loc_s *page) +{ +#ifndef NDEBUG + /* the free locations are supposed to be in increasing order */ + while (page->next != NULL) { + assert(page->next > page); + page = page->next; + } +#endif +} + +static char *getbaseptr(struct small_free_loc_s *fl) +{ + return (char *)(((uintptr_t)fl) & ~4095); +} + +void sweep_small_page(char *baseptr, struct small_free_loc_s *page_free, + long szword) +{ + if (page_free != NULL) + check_order_inside_small_page(page_free); + + /* for every non-free location, ask if we must free it */ + uintptr_t i, size = szword * 8; + bool any_object_remaining = false, any_object_dying = false; + struct small_free_loc_s *fl = page_free; + struct small_free_loc_s *flprev = NULL; + + /* XXX could optimize for the case where all objects die: we don't + need to painfully rebuild the free list in the whole page, just + to have it ignored in the end because we put the page into + 'free_uniform_pages' */ + + for (i = 0; i <= 4096 - size; i += size) { + char *p = baseptr + i; + if (p == (char *)fl) { + /* location is already free */ + flprev = fl; + fl = fl->next; + any_object_dying = true; + } + else if (!_smallmalloc_sweep_keep(p)) { + /* the location should be freed now */ + increment_total_allocated(-szword*8); +#ifdef STM_TESTS + /* fill location with 0xdd in all segs except seg0 */ + int j; + object_t *obj = (object_t*)(p - stm_object_pages); + uintptr_t page = (baseptr - stm_object_pages) / 4096UL; + for (j = 1; j < NB_SEGMENTS; j++) + if (get_page_status_in(j, page) == PAGE_ACCESSIBLE) + memset(get_virtual_address(j, obj), 0xdd, szword*8); +#endif + //dprintf(("free small %p : %lu\n", (char*)(p - stm_object_pages), szword*8)); + + if (flprev == NULL) { + flprev = (struct small_free_loc_s *)p; + flprev->next = fl; + page_free = flprev; + } + else { + assert(flprev->next == fl); + flprev->next = (struct small_free_loc_s *)p; + flprev = (struct small_free_loc_s *)p; + flprev->next = fl; + } + any_object_dying = true; + } + else { + //dprintf(("keep small %p : %lu\n", (char*)(p - stm_object_pages), szword*8)); + any_object_remaining = true; + } + } + if (!any_object_remaining) { + /* give page back to free_uniform_pages and thus make it + inaccessible from all other segments again (except seg0) */ + uintptr_t page = (baseptr - stm_object_pages) / 4096UL; + for (i = 1; i < NB_SEGMENTS; i++) { + if (get_page_status_in(i, page) == PAGE_ACCESSIBLE) + page_mark_inaccessible(i, page); + } + + ((struct small_free_loc_s *)baseptr)->nextpage = free_uniform_pages; + free_uniform_pages = (struct small_free_loc_s *)baseptr; + } + else if (!any_object_dying) { + get_fpsz(baseptr)->sz = szword; + } + else { + check_order_inside_small_page(page_free); + page_free->nextpage = small_page_lists[szword]; + small_page_lists[szword] = page_free; + } +} + +void _stm_smallmalloc_sweep(void) +{ + long i, szword; + for (szword = 2; szword < GC_N_SMALL_REQUESTS; szword++) { + struct small_free_loc_s *page = small_page_lists[szword]; + struct small_free_loc_s *nextpage; + small_page_lists[szword] = NULL; + + /* process the pages that the various segments are busy filling */ + /* including sharing seg0 for old-malloced things */ + for (i = 0; i < NB_SEGMENTS; i++) { + struct stm_priv_segment_info_s *pseg = get_priv_segment(i); + struct small_free_loc_s **fl = + &pseg->small_malloc_data.loc_free[szword]; + if (*fl != NULL) { + /* the entry in full_pages_object_size[] should already be + szword. We reset it to 0. */ + fpsz_t *fpsz = get_fpsz((char *)*fl); + assert(fpsz->sz == szword); + fpsz->sz = 0; + sweep_small_page(getbaseptr(*fl), *fl, szword); + *fl = NULL; + } + } + + /* process all the other partially-filled pages */ + while (page != NULL) { + /* for every page in small_page_lists: assert that the + corresponding full_pages_object_size[] entry is 0 */ + assert(get_fpsz((char *)page)->sz == 0); + nextpage = page->nextpage; + sweep_small_page(getbaseptr(page), page, szword); + page = nextpage; + } + } + + /* process the really full pages, which are the ones which still + have a non-zero full_pages_object_size[] entry */ + char *pageptr = uninitialized_page_stop; + fpsz_t *fpsz_start = get_fpsz(pageptr); + fpsz_t *fpsz_end = &full_pages_object_size[PAGE_SMSIZE_END - + PAGE_SMSIZE_START]; + fpsz_t *fpsz; + for (fpsz = fpsz_start; fpsz < fpsz_end; fpsz++, pageptr += 4096) { + uint8_t sz = fpsz->sz; + if (sz != 0) { + fpsz->sz = 0; + sweep_small_page(pageptr, NULL, sz); + } + } +} diff --git a/rpython/translator/stm/src_stm/stm/smallmalloc.h b/rpython/translator/stm/src_stm/stm/smallmalloc.h new file mode 100644 --- /dev/null +++ b/rpython/translator/stm/src_stm/stm/smallmalloc.h @@ -0,0 +1,67 @@ +/* Imported by rpython/translator/stm/import_stmgc.py */ + +/* Outside the nursery, we are taking from the highest addresses + complete pages, one at a time, which uniformly contain objects of + size "8 * N" for some N in range(2, GC_N_SMALL_REQUESTS). We are + taking from the lowest addresses "large" objects, which are at least + 288 bytes long, allocated by largemalloc.c. The limit is the same + as used in PyPy's default GC. +*/ + +#define GC_N_SMALL_REQUESTS 36 +#define GC_LAST_SMALL_SIZE (8 * (GC_N_SMALL_REQUESTS - 1)) + + +struct small_free_loc_s { + /* A chained list of locations within the same page which are + free. */ + struct small_free_loc_s *next; + + /* A chained list of all small pages containing objects of a given + small size, and that have at least one free object. It points + *inside* the next page, to another struct small_free_loc_s. This + field is only meaningful on the first small_free_loc_s of a given + page! */ + struct small_free_loc_s *nextpage; + + /* This structure is only two words, so it always fits inside one + free slot inside the page. */ +}; + + +/* For every size from 16 bytes to 8*(GC_N_SMALL_REQUESTS-1), this is + a list of pages that contain objects of that size and have at least + one free location. Additionally, the item 0 in the following list + is a chained list of fully-free pages (which can be reused for a + different size than the one they originally contained). +*/ +static struct small_free_loc_s *small_page_lists[GC_N_SMALL_REQUESTS]; + +#define free_uniform_pages (small_page_lists[0]) + + +/* For is_small_uniform(). */ +static uintptr_t first_small_uniform_loc = (uintptr_t) -1; + + +/* This is a definition for 'STM_PSEGMENT->small_malloc_data'. Each + segment grabs one page at a time from the global list, and then + requests for data are answered locally. +*/ +struct small_malloc_data_s { + struct small_free_loc_s *loc_free[GC_N_SMALL_REQUESTS]; +}; + + +/* Functions + */ +static inline stm_char *allocate_outside_nursery_small(uint64_t size) + __attribute__((always_inline)); + +void _stm_smallmalloc_sweep(void); + +static void teardown_smallmalloc(void); + +static inline bool is_small_uniform(object_t *obj) { + return ((uintptr_t)obj) >= first_small_uniform_loc; +} From noreply at buildbot.pypy.org Thu Jan 22 11:37:55 2015 From: noreply at buildbot.pypy.org (Raemi) Date: Thu, 22 Jan 2015 11:37:55 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c8: allow duhton to translate by adding some dummy implementations for missing features in c8 Message-ID: <20150122103755.9476A1C05A0@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c8 Changeset: r75480:8453464f9ab4 Date: 2015-01-22 11:00 +0100 http://bitbucket.org/pypy/pypy/changeset/8453464f9ab4/ Log: allow duhton to translate by adding some dummy implementations for missing features in c8 diff --git a/rpython/translator/stm/src_stm/stmgcintf.c b/rpython/translator/stm/src_stm/stmgcintf.c --- a/rpython/translator/stm/src_stm/stmgcintf.c +++ b/rpython/translator/stm/src_stm/stmgcintf.c @@ -9,6 +9,14 @@ __thread uintptr_t pypy_stm_nursery_low_fill_mark; __thread uintptr_t pypy_stm_nursery_low_fill_mark_saved; + +/* C8: not implemented properly yet: */ +void (*stmcb_light_finalizer)(object_t *o); +void (*stmcb_finalizer)(object_t *o); +/* C8: not implemented properly yet ^^^^^^^^^^^^^^^^^^ */ + + + extern Signed pypy_stmcb_size_rounded_up(void*); extern void pypy_stmcb_get_card_base_itemsize(void*, uintptr_t[]); extern void pypy_stmcb_trace(void*, void(*)(void*)); diff --git a/rpython/translator/stm/src_stm/stmgcintf.h b/rpython/translator/stm/src_stm/stmgcintf.h --- a/rpython/translator/stm/src_stm/stmgcintf.h +++ b/rpython/translator/stm/src_stm/stmgcintf.h @@ -39,6 +39,23 @@ long _pypy_stm_count(void); +/* C8: not implemented properly yet: */ +typedef struct { + stm_thread_local_t *tl; + char *segment_base; /* base to interpret the 'object' below */ + uintptr_t odd_number; /* marker odd number, or 0 if marker is missing */ + object_t *object; /* marker object, or NULL if marker is missing */ +} stm_loc_marker_t; +extern void (*stmcb_light_finalizer)(object_t *o); +extern void (*stmcb_finalizer)(object_t *o); +static inline object_t *stm_allocate_with_finalizer(ssize_t size_rounded_up) { + return stm_allocate(size_rounded_up); +} +static inline void stm_enable_light_finalizer(object_t *o) {}; +static inline int stm_set_timing_log(const char *profiling_file_name, + int expand_marker(stm_loc_marker_t *, char *, int)) {return 0;} +/* C8: not implemented properly yet ^^^^^^^^^^^^^^^^^^ */ + static inline void pypy_stm_become_inevitable(const char *msg) { From noreply at buildbot.pypy.org Thu Jan 22 11:37:56 2015 From: noreply at buildbot.pypy.org (Raemi) Date: Thu, 22 Jan 2015 11:37:56 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c8: fix for c8's prebuilt-setup Message-ID: <20150122103756.D6E651C05A0@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c8 Changeset: r75481:52a66fc6dc00 Date: 2015-01-22 11:24 +0100 http://bitbucket.org/pypy/pypy/changeset/52a66fc6dc00/ Log: fix for c8's prebuilt-setup diff --git a/rpython/translator/c/genc.py b/rpython/translator/c/genc.py --- a/rpython/translator/c/genc.py +++ b/rpython/translator/c/genc.py @@ -788,7 +788,7 @@ database, database.translator.rtyper) for line in preimplementationlines: print >> f, line - f.write('#endif /* _PY_PREIMPL_H */\n') + f.write('#endif /* _PY_PREIMPL_H */\n') def gen_startupcode(f, database): # generate the start-up code and put it into a function @@ -806,7 +806,6 @@ if database.with_stm: print >> f, '\tpypy_stm_setup();' - print >> f, '\tpypy_stm_setup_prebuilt();' for line in database.gcpolicy.gc_startup_code(): print >> f,"\t" + line diff --git a/rpython/translator/stm/src_stm/stmgcintf.c b/rpython/translator/stm/src_stm/stmgcintf.c --- a/rpython/translator/stm/src_stm/stmgcintf.c +++ b/rpython/translator/stm/src_stm/stmgcintf.c @@ -88,6 +88,8 @@ void pypy_stm_setup(void) { stm_setup(); + pypy_stm_setup_prebuilt(); + pypy_stm_register_thread_local(); pypy_stm_ready_atomic = 1; /* set transaction length to unlimited until the first thread From noreply at buildbot.pypy.org Thu Jan 22 12:24:09 2015 From: noreply at buildbot.pypy.org (fijal) Date: Thu, 22 Jan 2015 12:24:09 +0100 (CET) Subject: [pypy-commit] extradoc extradoc: (fijal, arigo, cfbolz) post about ordered dicts Message-ID: <20150122112409.999661C00B5@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: extradoc Changeset: r5488:ec0df9155cd5 Date: 2015-01-22 13:24 +0200 http://bitbucket.org/pypy/extradoc/changeset/ec0df9155cd5/ Log: (fijal, arigo, cfbolz) post about ordered dicts diff --git a/blog/draft/ordered-dicts.rst b/blog/draft/ordered-dicts.rst new file mode 100644 --- /dev/null +++ b/blog/draft/ordered-dicts.rst @@ -0,0 +1,81 @@ +Faster, more memory efficient and more ordered dictionaries on PyPy +------------------------------------------------------------------- + +Hello everyone! + +As of today, we merged the latest branch that brings better dictionaries to PyPy by default. The work is based on an idea by Raymond Hettinger on python-dev [https://mail.python.org/pipermail/python-dev/2012-December/123028.html], with prior work done notably in Java.  It was done by Maciej Fijałkowski and Armin Rigo, with Laurence Tratt recently prodding us to finish it.  (Earlier work going in a similar direction include Alex Gaynor's work on ordered dicts in Topaz, which was also used in the Hippy VM.  Each of these pieces of work is itself based on the original dict implementation in RPython, whose origins fade in the Subversion prehistory of PyPy.)  Coincidentally, a very similar idea has been implemented in Zend PHP very recently [https://nikic.github.io/2014/12/22/PHPs-new-hashtable-implementation.html]. + +This post covers the basics of design and implementation as well as some basic benchmarks. + + +Dictionaries are now ordered! +----------------------------- + +One surprising part is that the new design, besides being more +memory efficient, is ordered by design: it preserves the +insertion order.  This is not forbidden by the Python language, which allows any order.  It makes the ``collections.OrderedDict`` subclass much faster than before: it is now a thin subclass of ``dict``.  Obviously, we recommend that any portable Python program continues to use ``OrderedDict`` when ordering is important.  Note that a non-portable program might rely on more: for example, a ``**keywords`` argument now receives the keywords in the same order as the one in which they were given in the call.  (Whether such a thing might be called a language design change or not is a bit borderline.)  The point is that Python programs that work on CPython or previous versions of PyPy should continue to work on PyPy. + +There is one exception, though.  The iterators of the ``OrderedDict`` subclass are now working just like the ones of the ``dict`` builtin: they will raise ``RuntimeError`` when iterating if the dictionary was modified.  In the CPython design, the class ``OrderedDict`` explicitly doesn't worry about that, and instead you get some result that might range from correct to incorrect to crashes (i.e. random Python exceptions). + + +Original PyPy dictionary design +------------------------------- + +Originally, PyPy dictionaries, as well as CPython dictionaries +are implemented as follows (simplified view):: + +  struct dict { +     long num_items; +     dict_entry* items;   /* pointer to array */ +  } +   +  struct dict_entry { +      long hash; +      PyObject* key; +      PyObject* value; +  } +   +Where items is a sparse array, with 1/3 to 1/2 of the items being NULL. +The average space occupied by a dictionary is ``3 * WORD * 12/7`` plus some small constant (the smallest dict has 8 entries, which is +``8 * 3 * WORD + 2 * WORD = 26 WORDs``). + + +New PyPy dictionary design +-------------------------- + +The new PyPy dictionary is split in two arrays:: + +  struct dict { +      long num_items; +      variable_int *sparse_array; +      dict_entry* compact_array; +  } +   +  struct dict_entry { +      long hash; +      PyObject *key; +      PyObject *value; +  } +   +Here, ``compact_array`` stores all the items in order of insertion, while ``sparse_array`` is a 1/2 to 2/3 full array of integers. The integers themselves are of the smallest size necessary for indexing the ``compact_array``. So if ``compact_array`` has less than 256 items, then ``sparse_array`` will be made of bytes; if less than 2^16, it'll be two-byte integers; and so on. + +This design saves quite a bit of memory.  For example, on 64bit systems we can, but almost never, use indexing of more than 4 billion elements; and for small dicts, the extra ``sparse_array`` takes very little space.  For example a 100 element dict, would be on average for the original design on 64bit: 100 * 12/7 * WORD * 3 =~ 4100 bytes, while on new design it's 100 * 12/7 + 3 * WORD * 100 =~ 2600 bytes, quite a significant saving. + +GC friendliness +--------------- + +The obvious benefit of having more compact dictionaries is an increased cache friendliness. In modern CPUs cache misses are much more costly than doing additional simple work, like having an additional level of (in-cache) indirection. Additionally, there is a GC benefit coming from it. When doing a minor collection, the GC has to visit all the GC fields in old objects that can point to young objects. In the case of large arrays, this can prove problematic since the array grows and with each minor collection we need to visit more and more GC pointers. In order to avoid it, large arrays in PyPy employ a technique called "card marking" where the GC only visits "cards" or subsets of arrays that were modified between collections. The problem with dictionaries was that by design modifications in a dictionary occur randomly, hence a lot of cards used to get invalidated. In the new design, however, new items are typically appended to the ``compact_array``, hence invalidate much fewer cards --- which improves GC performance.  (The new ``sparse_array`` is an array of integers, so it does not suffer from the same problems.) + + +Deletion +-------- + +Deleting entries from dictionaries is not very common, but important in a few use cases.  To preserve order, when we delete an entry, we mark the entry as removed but don't otherwise shuffle the remaining entries.  If we repeat this operation often enough, there will be a lot of removed entries in the (originally compact) array.  At this point, we need to do a "packing" operation, which moves all live entries to the start of the array (and then reindexes the sparse array, as the positions changed).  This works well, but there are use cases where previously no reindexing was ever needed, so it makes these cases a bit slower (for example when repeatedly adding and removing keys in equal number). + +Benchmarks +---------- + +The PyPy speed benchmarks show mostly small effect [http://speed.pypy.org/changes/?tre=10&rev=75419%3Ac52fc1774518&exe=1&env=1]. The microbenchmarks that we did show large improvements on large and very large dictionaries (particularly, building dictionaries of at least a couple 100s of items is now twice faster) and break-even on small ones (between 20% slower and 20% faster depending very much on the usage patterns and sizes of dictionaries). The new dictionaries enable various optimization possibilities which we're going to explore in the near future. + +Cheers, +fijal, arigo and the PyPy team From noreply at buildbot.pypy.org Thu Jan 22 12:26:45 2015 From: noreply at buildbot.pypy.org (fijal) Date: Thu, 22 Jan 2015 12:26:45 +0100 (CET) Subject: [pypy-commit] extradoc extradoc: fix rst Message-ID: <20150122112645.33C911C00B5@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: extradoc Changeset: r5489:219dc0c66db0 Date: 2015-01-22 13:27 +0200 http://bitbucket.org/pypy/extradoc/changeset/219dc0c66db0/ Log: fix rst diff --git a/blog/draft/ordered-dicts.rst b/blog/draft/ordered-dicts.rst --- a/blog/draft/ordered-dicts.rst +++ b/blog/draft/ordered-dicts.rst @@ -24,17 +24,17 @@ Originally, PyPy dictionaries, as well as CPython dictionaries are implemented as follows (simplified view):: -  struct dict { -     long num_items; -     dict_entry* items;   /* pointer to array */ -  } -   -  struct dict_entry { -      long hash; -      PyObject* key; -      PyObject* value; -  } -   + struct dict { + long num_items; + dict_entry* items;   /* pointer to array */ + } + + struct dict_entry { + long hash; + PyObject* key; + PyObject* value; + } + Where items is a sparse array, with 1/3 to 1/2 of the items being NULL. The average space occupied by a dictionary is ``3 * WORD * 12/7`` plus some small constant (the smallest dict has 8 entries, which is ``8 * 3 * WORD + 2 * WORD = 26 WORDs``). @@ -45,21 +45,21 @@ The new PyPy dictionary is split in two arrays:: -  struct dict { -      long num_items; -      variable_int *sparse_array; -      dict_entry* compact_array; -  } -   -  struct dict_entry { -      long hash; -      PyObject *key; -      PyObject *value; -  } -   + struct dict { + long num_items; + variable_int *sparse_array; + dict_entry* compact_array; + } + + struct dict_entry { + long hash; + PyObject *key; + PyObject *value; + } + Here, ``compact_array`` stores all the items in order of insertion, while ``sparse_array`` is a 1/2 to 2/3 full array of integers. The integers themselves are of the smallest size necessary for indexing the ``compact_array``. So if ``compact_array`` has less than 256 items, then ``sparse_array`` will be made of bytes; if less than 2^16, it'll be two-byte integers; and so on. -This design saves quite a bit of memory.  For example, on 64bit systems we can, but almost never, use indexing of more than 4 billion elements; and for small dicts, the extra ``sparse_array`` takes very little space.  For example a 100 element dict, would be on average for the original design on 64bit: 100 * 12/7 * WORD * 3 =~ 4100 bytes, while on new design it's 100 * 12/7 + 3 * WORD * 100 =~ 2600 bytes, quite a significant saving. +This design saves quite a bit of memory. For example, on 64bit systems we can, but almost never, use indexing of more than 4 billion elements; and for small dicts, the extra ``sparse_array`` takes very little space.  For example a 100 element dict, would be on average for the original design on 64bit: 100 * 12/7 * WORD * 3 =~ 4100 bytes, while on new design it's 100 * 12/7 + 3 * WORD * 100 =~ 2600 bytes, quite a significant saving. GC friendliness --------------- From noreply at buildbot.pypy.org Thu Jan 22 13:37:57 2015 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 22 Jan 2015 13:37:57 +0100 (CET) Subject: [pypy-commit] pypy default: add comment Message-ID: <20150122123758.011AD1C05D7@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r75482:cfeb96967bad Date: 2015-01-22 13:37 +0100 http://bitbucket.org/pypy/pypy/changeset/cfeb96967bad/ Log: add comment diff --git a/rpython/rtyper/lltypesystem/rordereddict.py b/rpython/rtyper/lltypesystem/rordereddict.py --- a/rpython/rtyper/lltypesystem/rordereddict.py +++ b/rpython/rtyper/lltypesystem/rordereddict.py @@ -46,6 +46,10 @@ @jit.oopspec('ordereddict.lookup(d, key, hash, flag)') def ll_call_lookup_function(d, key, hash, flag): fun = d.lookup_function_no & FUNC_MASK + # This likely() here forces gcc to compile the check for fun == FUNC_BYTE + # first. Otherwise, this is a regular switch and gcc (at least 4.7) + # compiles this as a series of checks, with the FUNC_BYTE case last. + # It sounds minor, but it is worth 6-7% on a PyPy microbenchmark. if likely(fun == FUNC_BYTE): return ll_dict_lookup(d, key, hash, flag, TYPE_BYTE) elif fun == FUNC_SHORT: From noreply at buildbot.pypy.org Thu Jan 22 16:26:43 2015 From: noreply at buildbot.pypy.org (Raemi) Date: Thu, 22 Jan 2015 16:26:43 +0100 (CET) Subject: [pypy-commit] stmgc default: construct a list of unique objs again during major gc. it's still necessary because large objects get traced many many times otherwise, since they are split into multiple slices Message-ID: <20150122152643.0389C1C059A@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: Changeset: r1572:509da83c0d5d Date: 2015-01-22 16:26 +0100 http://bitbucket.org/pypy/stmgc/changeset/509da83c0d5d/ Log: construct a list of unique objs again during major gc. it's still necessary because large objects get traced many many times otherwise, since they are split into multiple slices diff --git a/c8/stm/gcpage.c b/c8/stm/gcpage.c --- a/c8/stm/gcpage.c +++ b/c8/stm/gcpage.c @@ -307,29 +307,53 @@ some of the pages) */ long i; + struct list_s *uniques = list_create(); + for (i = 1; i < NB_SEGMENTS; i++) { char *base = get_segment_base(i); + OPT_ASSERT(list_is_empty(uniques)); + /* the mod_old_objects list may contain maanny slices for + the same *huge* object. it seems worth to first construct + a list of unique objects. we use the VISITED flag for this + purpose as it is never set outside of seg0: */ struct list_s *lst = get_priv_segment(i)->modified_old_objects; + struct stm_undo_s *modified = (struct stm_undo_s *)lst->items; struct stm_undo_s *end = (struct stm_undo_s *)(lst->items + lst->count); - for (; modified < end; modified++) { object_t *obj = modified->object; - /* All modified objs have all pages accessible for now. - This is because we create a backup of the whole obj - and thus make all pages accessible. */ - assert_obj_accessible_in(i, obj); + struct object_s *dst = (struct object_s*)REAL_ADDRESS(base, obj); - assert(!is_new_object(obj)); /* should never be in that list */ + if (!(dst->stm_flags & GCFLAG_VISITED)) { + LIST_APPEND(uniques, obj); + dst->stm_flags |= GCFLAG_VISITED; + } + } - if (!mark_visited_test_and_set(obj)) { - /* trace shared, committed version */ - mark_and_trace(obj, stm_object_pages); - } - mark_and_trace(obj, base); /* private, modified version */ - } + LIST_FOREACH_R(uniques, object_t*, + ({ + /* clear the VISITED flags again and actually visit them */ + struct object_s *dst = (struct object_s*)REAL_ADDRESS(base, item); + dst->stm_flags &= ~GCFLAG_VISITED; + + /* All modified objs have all pages accessible for now. + This is because we create a backup of the whole obj + and thus make all pages accessible. */ + assert_obj_accessible_in(i, item); + + assert(!is_new_object(item)); /* should never be in that list */ + + if (!mark_visited_test_and_set(item)) { + /* trace shared, committed version */ + mark_and_trace(item, stm_object_pages); + } + mark_and_trace(item, base); /* private, modified version */ + })); + + list_clear(uniques); } + LIST_FREE(uniques); } static void mark_visit_from_roots(void) From noreply at buildbot.pypy.org Thu Jan 22 16:28:31 2015 From: noreply at buildbot.pypy.org (Raemi) Date: Thu, 22 Jan 2015 16:28:31 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c8: import stmgc-c8 509da83c0d5d Message-ID: <20150122152831.20CE11C059A@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c8 Changeset: r75483:df00e75f0207 Date: 2015-01-22 16:28 +0100 http://bitbucket.org/pypy/pypy/changeset/df00e75f0207/ Log: import stmgc-c8 509da83c0d5d diff --git a/rpython/translator/stm/src_stm/revision b/rpython/translator/stm/src_stm/revision --- a/rpython/translator/stm/src_stm/revision +++ b/rpython/translator/stm/src_stm/revision @@ -1,1 +1,1 @@ -5cfce5d61c50 +509da83c0d5d diff --git a/rpython/translator/stm/src_stm/stm/core.c b/rpython/translator/stm/src_stm/stm/core.c --- a/rpython/translator/stm/src_stm/stm/core.c +++ b/rpython/translator/stm/src_stm/stm/core.c @@ -3,6 +3,32 @@ # error "must be compiled via stmgc.c" #endif +/* *** MISC *** */ +static void free_bk(struct stm_undo_s *undo) +{ + free(undo->backup); + assert(undo->backup = (char*)-88); + increment_total_allocated(-SLICE_SIZE(undo->slice)); +} + +static struct stm_commit_log_entry_s *malloc_cle(long entries) +{ + size_t byte_len = sizeof(struct stm_commit_log_entry_s) + + entries * sizeof(struct stm_undo_s); + struct stm_commit_log_entry_s *result = malloc(byte_len); + increment_total_allocated(byte_len); + return result; +} + +static void free_cle(struct stm_commit_log_entry_s *e) +{ + size_t byte_len = sizeof(struct stm_commit_log_entry_s) + + e->written_count * sizeof(struct stm_undo_s); + increment_total_allocated(-byte_len); + free(e); +} +/* *** MISC *** */ + /* General helper: copies objects into our own segment, from some source described by a range of 'struct stm_undo_s'. Maybe later @@ -150,6 +176,9 @@ /* make our page write-ready */ page_mark_accessible(my_segnum, pagenum); + /* account for this page now: XXX */ + /* increment_total_allocated(4096); */ + if (copy_from_segnum == -1) { /* this page is only accessible in the sharing segment so far (new allocation). We can thus simply mark it accessible here. */ @@ -390,6 +419,7 @@ return !needs_abort; } + static struct stm_commit_log_entry_s *_create_commit_log_entry(void) { /* puts all modified_old_objects in a new commit log entry */ @@ -399,9 +429,7 @@ struct list_s *list = STM_PSEGMENT->modified_old_objects; OPT_ASSERT((list_count(list) % 3) == 0); size_t count = list_count(list) / 3; - size_t byte_len = sizeof(struct stm_commit_log_entry_s) + - count * sizeof(struct stm_undo_s); - struct stm_commit_log_entry_s *result = malloc(byte_len); + struct stm_commit_log_entry_s *result = malloc_cle(count); result->next = NULL; result->segment_num = STM_SEGMENT->segment_num; @@ -427,7 +455,7 @@ while (1) { if (!_stm_validate()) { if (new != INEV_RUNNING) - free(new); + free_cle((struct stm_commit_log_entry_s*)new); stm_abort_transaction(); } @@ -601,8 +629,10 @@ /* make backup slice: */ char *bk_slice = malloc(slice_sz); + increment_total_allocated(slice_sz); memcpy(bk_slice, realobj + slice_off, slice_sz); + /* !! follows layout of "struct stm_undo_s" !! */ STM_PSEGMENT->modified_old_objects = list_append3( STM_PSEGMENT->modified_old_objects, (uintptr_t)obj, /* obj */ @@ -860,7 +890,7 @@ dprintf(("reset_modified_from_backup_copies(%d): obj=%p off=%lu bk=%p\n", segment_num, obj, SLICE_OFFSET(undo->slice), undo->backup)); - free(undo->backup); + free_bk(undo); } /* check that all objects have the GCFLAG_WRITE_BARRIER afterwards */ @@ -1089,7 +1119,7 @@ if (get_page_status_in(i, page) != PAGE_NO_ACCESS) { /* shared or private, but never segfault */ char *dst = REAL_ADDRESS(get_segment_base(i), frag); - dprintf(("-> flush %p to seg %lu, sz=%lu\n", frag, i, frag_size)); + //dprintf(("-> flush %p to seg %lu, sz=%lu\n", frag, i, frag_size)); memcpy(dst, src, frag_size); } } diff --git a/rpython/translator/stm/src_stm/stm/core.h b/rpython/translator/stm/src_stm/stm/core.h --- a/rpython/translator/stm/src_stm/stm/core.h +++ b/rpython/translator/stm/src_stm/stm/core.h @@ -149,6 +149,8 @@ #define SLICE_SIZE(slice) ((int)((slice) & 0xFFFF)) #define NEW_SLICE(offset, size) (((uint64_t)(offset)) << 16 | (size)) + + /* The model is: we have a global chained list, from 'commit_log_root', of 'struct stm_commit_log_entry_s' entries. Every one is fully read-only apart from the 'next' field. Every one stands for one @@ -168,6 +170,11 @@ static struct stm_commit_log_entry_s commit_log_root; +static void free_bk(struct stm_undo_s *undo); +static struct stm_commit_log_entry_s *malloc_cle(long entries); +static void free_cle(struct stm_commit_log_entry_s *e); + + #ifndef STM_TESTS static char *stm_object_pages; #else diff --git a/rpython/translator/stm/src_stm/stm/gcpage.c b/rpython/translator/stm/src_stm/stm/gcpage.c --- a/rpython/translator/stm/src_stm/stm/gcpage.c +++ b/rpython/translator/stm/src_stm/stm/gcpage.c @@ -308,29 +308,53 @@ some of the pages) */ long i; + struct list_s *uniques = list_create(); + for (i = 1; i < NB_SEGMENTS; i++) { char *base = get_segment_base(i); + OPT_ASSERT(list_is_empty(uniques)); + /* the mod_old_objects list may contain maanny slices for + the same *huge* object. it seems worth to first construct + a list of unique objects. we use the VISITED flag for this + purpose as it is never set outside of seg0: */ struct list_s *lst = get_priv_segment(i)->modified_old_objects; + struct stm_undo_s *modified = (struct stm_undo_s *)lst->items; struct stm_undo_s *end = (struct stm_undo_s *)(lst->items + lst->count); - for (; modified < end; modified++) { object_t *obj = modified->object; - /* All modified objs have all pages accessible for now. - This is because we create a backup of the whole obj - and thus make all pages accessible. */ - assert_obj_accessible_in(i, obj); + struct object_s *dst = (struct object_s*)REAL_ADDRESS(base, obj); - assert(!is_new_object(obj)); /* should never be in that list */ + if (!(dst->stm_flags & GCFLAG_VISITED)) { + LIST_APPEND(uniques, obj); + dst->stm_flags |= GCFLAG_VISITED; + } + } - if (!mark_visited_test_and_set(obj)) { - /* trace shared, committed version */ - mark_and_trace(obj, stm_object_pages); - } - mark_and_trace(obj, base); /* private, modified version */ - } + LIST_FOREACH_R(uniques, object_t*, + ({ + /* clear the VISITED flags again and actually visit them */ + struct object_s *dst = (struct object_s*)REAL_ADDRESS(base, item); + dst->stm_flags &= ~GCFLAG_VISITED; + + /* All modified objs have all pages accessible for now. + This is because we create a backup of the whole obj + and thus make all pages accessible. */ + assert_obj_accessible_in(i, item); + + assert(!is_new_object(item)); /* should never be in that list */ + + if (!mark_visited_test_and_set(item)) { + /* trace shared, committed version */ + mark_and_trace(item, stm_object_pages); + } + mark_and_trace(item, base); /* private, modified version */ + })); + + list_clear(uniques); } + LIST_FREE(uniques); } static void mark_visit_from_roots(void) @@ -485,7 +509,7 @@ { /* this is called by _stm_largemalloc_sweep() */ object_t *obj = (object_t *)(data - stm_object_pages); - dprintf(("keep obj %p ? -> %d\n", obj, mark_visited_test(obj))); + //dprintf(("keep obj %p ? -> %d\n", obj, mark_visited_test(obj))); if (!mark_visited_test_and_clear(obj)) { /* This is actually needed in order to avoid random write-read conflicts with objects read and freed long in the past. @@ -511,7 +535,7 @@ /* XXX: identical to largemalloc_keep_object_at()? */ /* this is called by _stm_smallmalloc_sweep() */ object_t *obj = (object_t *)(data - stm_object_pages); - dprintf(("keep small obj %p ? -> %d\n", obj, mark_visited_test(obj))); + //dprintf(("keep small obj %p ? -> %d\n", obj, mark_visited_test(obj))); if (!mark_visited_test_and_clear(obj)) { /* This is actually needed in order to avoid random write-read conflicts with objects read and freed long in the past. @@ -558,8 +582,14 @@ cl = next; rev_num = cl->rev_num; + /* free bk copies of entries: */ + long count = cl->written_count; + while (count-->0) { + free_bk(&cl->written[count]); + } + next = cl->next; - free(cl); + free_cle(cl); if (next == INEV_RUNNING) { was_inev = true; break; diff --git a/rpython/translator/stm/src_stm/stm/nursery.c b/rpython/translator/stm/src_stm/stm/nursery.c --- a/rpython/translator/stm/src_stm/stm/nursery.c +++ b/rpython/translator/stm/src_stm/stm/nursery.c @@ -104,7 +104,7 @@ nobj = (object_t *)allocate_outside_nursery_small(size); } - dprintf(("move %p -> %p\n", obj, nobj)); + //dprintf(("move %p -> %p\n", obj, nobj)); /* copy the object */ copy_large_object:; @@ -175,7 +175,7 @@ { assert(!_is_young(obj)); - dprintf(("_collect_now: %p\n", obj)); + //dprintf(("_collect_now: %p\n", obj)); assert(!(obj->stm_flags & GCFLAG_WRITE_BARRIER)); diff --git a/rpython/translator/stm/src_stm/stm/pages.c b/rpython/translator/stm/src_stm/stm/pages.c --- a/rpython/translator/stm/src_stm/stm/pages.c +++ b/rpython/translator/stm/src_stm/stm/pages.c @@ -75,9 +75,6 @@ /* set this flag *after* we un-protected it, because XXX later */ set_page_status_in(segnum, pagenum, PAGE_ACCESSIBLE); - - // XXX: maybe? - //increment_total_allocated(4096); } __attribute__((unused)) @@ -95,7 +92,4 @@ perror("mprotect"); stm_fatalerror("mprotect failed! Consider running 'sysctl vm.max_map_count=16777216'"); } - - // XXX: maybe? - //increment_total_allocated(-4096); } diff --git a/rpython/translator/stm/src_stm/stm/smallmalloc.c b/rpython/translator/stm/src_stm/stm/smallmalloc.c --- a/rpython/translator/stm/src_stm/stm/smallmalloc.c +++ b/rpython/translator/stm/src_stm/stm/smallmalloc.c @@ -181,8 +181,8 @@ (_allocate_small_slowpath(size) - stm_object_pages); *fl = result->next; - dprintf(("allocate_outside_nursery_small(%lu): %p\n", - size, (char*)((char *)result - stm_object_pages))); + /* dprintf(("allocate_outside_nursery_small(%lu): %p\n", */ + /* size, (char*)((char *)result - stm_object_pages))); */ return (stm_char*) ((char *)result - stm_object_pages); } From noreply at buildbot.pypy.org Thu Jan 22 16:50:47 2015 From: noreply at buildbot.pypy.org (cfbolz) Date: Thu, 22 Jan 2015 16:50:47 +0100 (CET) Subject: [pypy-commit] pypy default: merge typed-cells Message-ID: <20150122155047.6D03A1C05AD@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: Changeset: r75484:fd3d597fb6ea Date: 2015-01-22 16:49 +0100 http://bitbucket.org/pypy/pypy/changeset/fd3d597fb6ea/ Log: merge typed-cells diff --git a/pypy/module/pypyjit/test_pypy_c/test_instance.py b/pypy/module/pypyjit/test_pypy_c/test_instance.py --- a/pypy/module/pypyjit/test_pypy_c/test_instance.py +++ b/pypy/module/pypyjit/test_pypy_c/test_instance.py @@ -83,7 +83,7 @@ loops = log.loops_by_filename(self.filepath) assert len(loops) == 1 - def test_mutate_class(self): + def test_mutate_class_int(self): def fn(n): class A(object): count = 1 @@ -106,7 +106,7 @@ entry_bridge, = log.loops_by_filename(self.filepath, is_entry_bridge=True) ops = entry_bridge.ops_by_id('mutate', opcode='LOAD_ATTR') assert log.opnames(ops) == ['guard_value', 'guard_not_invalidated', - 'getfield_gc', 'guard_nonnull_class'] + 'getfield_gc'] # the STORE_ATTR is folded away assert list(entry_bridge.ops_by_id('meth1', opcode='STORE_ATTR')) == [] # @@ -114,19 +114,77 @@ # ---------------------- loop, = log.loops_by_filename(self.filepath) assert loop.match(""" - i8 = getfield_gc_pure(p5, descr=...) - i9 = int_lt(i8, i7) - guard_true(i9, descr=.*) - guard_not_invalidated(descr=.*) - i82 = getfield_gc_pure(p8, descr=...) - i11 = int_add_ovf(i82, 1) + i58 = int_lt(i38, i31) + guard_true(i58, descr=...) + guard_not_invalidated(descr=...) + i59 = int_add_ovf(i57, 1) guard_no_overflow(descr=...) - i12 = force_token() - --TICK-- - p20 = new_with_vtable(ConstClass(W_IntObject)) - setfield_gc(p20, i11, descr=) - setfield_gc(ConstPtr(ptr21), p20, descr=) - jump(..., descr=...) + p60 = force_token() + i61 = getfield_raw(..., descr=...) + setfield_gc(ConstPtr(ptr39), i59, descr=...) + i62 = int_lt(i61, 0) + guard_false(i62, descr=...) + jump(p0, p1, p3, p6, p7, p12, i59, p18, i31, i59, descr=...) + """) + + def test_mutate_class(self): + def fn(n): + class LL(object): + def __init__(self, n): + self.n = n + class A(object): + count = None + def __init__(self, a): + self.a = a + def f(self): + return self.count + i = 0 + a = A(1) + while i < n: + A.count = LL(A.count) # ID: mutate + a.f() # ID: meth1 + i += 1 + return i + # + log = self.run(fn, [1000], threshold=10) + assert log.result == 1000 + # + # first, we test the entry bridge + # ------------------------------- + entry_bridge, = log.loops_by_filename(self.filepath, is_entry_bridge=True) + ops = entry_bridge.ops_by_id('mutate', opcode='LOAD_ATTR') + assert log.opnames(ops) == ['guard_value', 'guard_not_invalidated', + 'getfield_gc', 'guard_nonnull_class', + 'getfield_gc', 'guard_value', # type check on the attribute + ] + # the STORE_ATTR is folded away + assert list(entry_bridge.ops_by_id('meth1', opcode='STORE_ATTR')) == [] + # + # then, the actual loop + # ---------------------- + loop, = log.loops_by_filename(self.filepath) + assert loop.match(""" + i70 = int_lt(i58, i33) + guard_true(i70, descr=...) + guard_not_invalidated(descr=...) + p71 = getfield_gc(p64, descr=...) + guard_value(p71, ConstPtr(ptr42), descr=...) + p72 = force_token() + p73 = force_token() + i74 = int_add(i58, 1) + i75 = getfield_raw(..., descr=...) + i76 = int_lt(i75, 0) + guard_false(i76, descr=...) + p77 = new_with_vtable(...) + setfield_gc(p77, p64, descr=...) + setfield_gc(p77, ConstPtr(null), descr=...) + setfield_gc(p77, ConstPtr(null), descr=...) + setfield_gc(p77, ConstPtr(null), descr=...) + setfield_gc(p77, ConstPtr(null), descr=...) + setfield_gc(p77, ConstPtr(ptr42), descr=...) + setfield_gc(ConstPtr(ptr69), p77, descr=...) + jump(p0, p1, p3, p6, p7, p12, i74, p20, p26, i33, p77, descr=...) + """) def test_oldstyle_newstyle_mix(self): diff --git a/pypy/objspace/std/celldict.py b/pypy/objspace/std/celldict.py --- a/pypy/objspace/std/celldict.py +++ b/pypy/objspace/std/celldict.py @@ -9,23 +9,16 @@ from pypy.objspace.std.dictmultiobject import ( DictStrategy, ObjectDictStrategy, _never_equal_to_string, create_iterator_classes) +from pypy.objspace.std.typeobject import ( + MutableCell, IntMutableCell, ObjectMutableCell, write_cell) class VersionTag(object): pass - -class ModuleCell(W_Root): - def __init__(self, w_value=None): - self.w_value = w_value - - def __repr__(self): - return "" % (self.w_value, ) - - -def unwrap_cell(w_value): - if isinstance(w_value, ModuleCell): - return w_value.w_value +def unwrap_cell(space, w_value): + if isinstance(w_value, MutableCell): + return w_value.unwrap_cell(space) return w_value @@ -71,15 +64,9 @@ def setitem_str(self, w_dict, key, w_value): cell = self.getdictvalue_no_unwrapping(w_dict, key) - if isinstance(cell, ModuleCell): - cell.w_value = w_value + w_value = write_cell(self.space, cell, w_value) + if w_value is None: return - if cell is not None: - # If the new value and the current value are the same, don't - # create a level of indirection, or mutate the version. - if self.space.is_w(w_value, cell): - return - w_value = ModuleCell(w_value) self.mutated() self.unerase(w_dict.dstorage)[key] = w_value @@ -131,7 +118,7 @@ def getitem_str(self, w_dict, key): cell = self.getdictvalue_no_unwrapping(w_dict, key) - return unwrap_cell(cell) + return unwrap_cell(self.space, cell) def w_keys(self, w_dict): space = self.space @@ -140,12 +127,12 @@ def values(self, w_dict): iterator = self.unerase(w_dict.dstorage).itervalues - return [unwrap_cell(cell) for cell in iterator()] + return [unwrap_cell(self.space, cell) for cell in iterator()] def items(self, w_dict): space = self.space iterator = self.unerase(w_dict.dstorage).iteritems - return [space.newtuple([_wrapkey(space, key), unwrap_cell(cell)]) + return [space.newtuple([_wrapkey(space, key), unwrap_cell(self.space, cell)]) for key, cell in iterator()] def clear(self, w_dict): @@ -157,7 +144,7 @@ d = self.unerase(w_dict.dstorage) key, cell = d.popitem() self.mutated() - return _wrapkey(space, key), unwrap_cell(cell) + return _wrapkey(space, key), unwrap_cell(self.space, cell) def switch_to_object_strategy(self, w_dict): space = self.space @@ -165,7 +152,7 @@ strategy = space.fromcache(ObjectDictStrategy) d_new = strategy.unerase(strategy.get_empty_storage()) for key, cell in d.iteritems(): - d_new[_wrapkey(space, key)] = unwrap_cell(cell) + d_new[_wrapkey(space, key)] = unwrap_cell(self.space, cell) w_dict.strategy = strategy w_dict.dstorage = strategy.erase(d_new) @@ -181,7 +168,7 @@ wrapkey = _wrapkey def wrapvalue(space, value): - return unwrap_cell(value) + return unwrap_cell(space, value) create_iterator_classes(ModuleDictStrategy) diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -8,7 +8,7 @@ W_DictMultiObject, DictStrategy, ObjectDictStrategy, BaseKeyIterator, BaseValueIterator, BaseItemIterator, _never_equal_to_string ) -from pypy.objspace.std.typeobject import TypeCell +from pypy.objspace.std.typeobject import MutableCell # ____________________________________________________________ @@ -872,15 +872,15 @@ if version_tag is not None: name = space.str_w(w_name) # We need to care for obscure cases in which the w_descr is - # a TypeCell, which may change without changing the version_tag + # a MutableCell, which may change without changing the version_tag _, w_descr = w_type._pure_lookup_where_possibly_with_method_cache( name, version_tag) # selector = ("", INVALID) if w_descr is None: selector = (name, DICT) # common case: no such attr in the class - elif isinstance(w_descr, TypeCell): - pass # we have a TypeCell in the class: give up + elif isinstance(w_descr, MutableCell): + pass # we have a MutableCell in the class: give up elif space.is_data_descr(w_descr): # we have a data descriptor, which means the dictionary value # (if any) has no relevance. @@ -929,11 +929,11 @@ # We know here that w_obj.getdictvalue(space, name) just returned None, # so the 'name' is not in the instance. We repeat the lookup to find it # in the class, this time taking care of the result: it can be either a - # quasi-constant class attribute, or actually a TypeCell --- which we + # quasi-constant class attribute, or actually a MutableCell --- which we # must not cache. (It should not be None here, but you never know...) _, w_method = w_type._pure_lookup_where_possibly_with_method_cache( name, version_tag) - if w_method is None or isinstance(w_method, TypeCell): + if w_method is None or isinstance(w_method, MutableCell): return _fill_cache(pycode, nameindex, map, version_tag, -1, w_method) diff --git a/pypy/objspace/std/test/test_versionedtype.py b/pypy/objspace/std/test/test_versionedtype.py --- a/pypy/objspace/std/test/test_versionedtype.py +++ b/pypy/objspace/std/test/test_versionedtype.py @@ -210,6 +210,56 @@ assert w_A.version_tag() is atag assert space.int_w(space.getattr(w_A, w_x)) == 4 + def test_no_cell_when_writing_same_value(self): + space = self.space + w_x = space.wrap("x") + w_A, w_B, w_C = self.get_three_classes() + atag = w_A.version_tag() + w_val = space.newint(1) + space.setattr(w_A, w_x, w_val) + space.setattr(w_A, w_x, w_val) + w_val1 = w_A._getdictvalue_no_unwrapping(space, "x") + assert w_val1 is w_val + + def test_int_cells(self): + space = self.space + w_x = space.wrap("x") + w_A, w_B, w_C = self.get_three_classes() + atag = w_A.version_tag() + space.setattr(w_A, w_x, space.newint(1)) + assert w_A.version_tag() is not atag + assert space.int_w(space.getattr(w_A, w_x)) == 1 + + atag = w_A.version_tag() + space.setattr(w_A, w_x, space.newint(2)) + assert w_A.version_tag() is not atag + assert space.int_w(space.getattr(w_A, w_x)) == 2 + cell = w_A._getdictvalue_no_unwrapping(space, "x") + assert cell.intvalue == 2 + + atag = w_A.version_tag() + space.setattr(w_A, w_x, space.newint(3)) + assert w_A.version_tag() is atag + assert space.int_w(space.getattr(w_A, w_x)) == 3 + assert cell.intvalue == 3 + + space.setattr(w_A, w_x, space.newint(4)) + assert w_A.version_tag() is atag + assert space.int_w(space.getattr(w_A, w_x)) == 4 + assert cell.intvalue == 4 + + def test_int_cell_turns_into_cell(self): + space = self.space + w_x = space.wrap("x") + w_A, w_B, w_C = self.get_three_classes() + atag = w_A.version_tag() + space.setattr(w_A, w_x, space.newint(1)) + space.setattr(w_A, w_x, space.newint(2)) + space.setattr(w_A, w_x, space.newfloat(2.2)) + cell = w_A._getdictvalue_no_unwrapping(space, "x") + assert space.float_w(cell.w_value) == 2.2 + + class AppTestVersionedType(test_typeobject.AppTestTypeObject): spaceconfig = {"objspace.std.withtypeversion": True} diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py --- a/pypy/objspace/std/typeobject.py +++ b/pypy/objspace/std/typeobject.py @@ -11,18 +11,57 @@ from rpython.rlib.objectmodel import current_object_addr_as_int, compute_hash from rpython.rlib.rarithmetic import intmask, r_uint +class MutableCell(W_Root): + def unwrap_cell(self, space): + raise NotImplementedError("abstract base") -class TypeCell(W_Root): +class ObjectMutableCell(MutableCell): def __init__(self, w_value=None): self.w_value = w_value + def unwrap_cell(self, space): + return self.w_value + + def __repr__(self): + return "" % (self.w_value, ) + + +class IntMutableCell(MutableCell): + def __init__(self, intvalue): + self.intvalue = intvalue + + def unwrap_cell(self, space): + return space.wrap(self.intvalue) + + def __repr__(self): + return "" % (self.intvalue, ) + def unwrap_cell(space, w_value): - if (space.config.objspace.std.withtypeversion and - isinstance(w_value, TypeCell)): - return w_value.w_value + if space.config.objspace.std.withtypeversion: + if isinstance(w_value, MutableCell): + return w_value.unwrap_cell(space) return w_value +def write_cell(space, w_cell, w_value): + from pypy.objspace.std.intobject import W_IntObject + if w_cell is None: + # attribute does not exist at all, write it without a cell first + return w_value + if isinstance(w_cell, ObjectMutableCell): + w_cell.w_value = w_value + return None + elif isinstance(w_cell, IntMutableCell) and type(w_value) is W_IntObject: + w_cell.intvalue = w_value.intval + return None + elif space.is_w(w_cell, w_value): + # If the new value and the current value are the same, don't + # create a level of indirection, or mutate the version. + return None + if type(w_value) is W_IntObject: + return IntMutableCell(w_value.intval) + else: + return ObjectMutableCell(w_value) class VersionTag(object): pass @@ -274,11 +313,9 @@ if version_tag is not None: w_curr = w_self._pure_getdictvalue_no_unwrapping( space, version_tag, name) - if w_curr is not None: - if isinstance(w_curr, TypeCell): - w_curr.w_value = w_value - return True - w_value = TypeCell(w_value) + w_value = write_cell(space, w_curr, w_value) + if w_value is None: + return True w_self.mutated(name) w_self.dict_w[name] = w_value return True @@ -369,8 +406,8 @@ tup_w = w_self._pure_lookup_where_with_method_cache(name, version_tag) w_class, w_value = tup_w if (space.config.objspace.std.withtypeversion and - isinstance(w_value, TypeCell)): - return w_class, w_value.w_value + isinstance(w_value, MutableCell)): + return w_class, w_value.unwrap_cell(space) return tup_w # don't make a new tuple, reuse the old one def _pure_lookup_where_possibly_with_method_cache(w_self, name, version_tag): From noreply at buildbot.pypy.org Thu Jan 22 16:50:49 2015 From: noreply at buildbot.pypy.org (cfbolz) Date: Thu, 22 Jan 2015 16:50:49 +0100 (CET) Subject: [pypy-commit] pypy default: merge Message-ID: <20150122155049.33D471C05AD@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: Changeset: r75485:023b261dd2d5 Date: 2015-01-22 16:50 +0100 http://bitbucket.org/pypy/pypy/changeset/023b261dd2d5/ Log: merge diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -130,3 +130,6 @@ mmap(), called rarely during major GCs, if such a major GC occurs at exactly the wrong time), and some of the less rare kind (particularly on Windows tests). + +.. branch: osx-package.py +.. branch: package.py-helpful-error-message diff --git a/pypy/module/pypyjit/test_pypy_c/test_containers.py b/pypy/module/pypyjit/test_pypy_c/test_containers.py --- a/pypy/module/pypyjit/test_pypy_c/test_containers.py +++ b/pypy/module/pypyjit/test_pypy_c/test_containers.py @@ -78,7 +78,7 @@ {{{ setfield_gc(p13, 0, descr=) setfield_gc(p13, 0, descr=) - setfield_gc(p13, 16, descr=) + setfield_gc(p13, 32, descr=) }}} guard_no_exception(descr=...) p20 = new_with_vtable(ConstClass(W_IntObject)) diff --git a/rpython/jit/codewriter/jtransform.py b/rpython/jit/codewriter/jtransform.py --- a/rpython/jit/codewriter/jtransform.py +++ b/rpython/jit/codewriter/jtransform.py @@ -253,6 +253,12 @@ return [None, # hack, do the right renaming from op.args[0] to op.result SpaceOperation("record_known_class", [op.args[0], const_vtable], None)] + def rewrite_op_likely(self, op): + return None # "no real effect" + + def rewrite_op_unlikely(self, op): + return None # "no real effect" + def rewrite_op_raw_malloc_usage(self, op): if self.cpu.translate_support_code or isinstance(op.args[0], Variable): return # the operation disappears diff --git a/rpython/jit/codewriter/test/test_jtransform.py b/rpython/jit/codewriter/test/test_jtransform.py --- a/rpython/jit/codewriter/test/test_jtransform.py +++ b/rpython/jit/codewriter/test/test_jtransform.py @@ -1376,3 +1376,12 @@ tr.rewrite_operation(op) except Exception, e: assert 'foobar' in str(e) + +def test_likely_unlikely(): + v1 = varoftype(lltype.Bool) + v2 = varoftype(lltype.Bool) + op = SpaceOperation('likely', [v1], v2) + tr = Transformer() + assert tr.rewrite_operation(op) is None + op = SpaceOperation('unlikely', [v1], v2) + assert tr.rewrite_operation(op) is None diff --git a/rpython/rlib/objectmodel.py b/rpython/rlib/objectmodel.py --- a/rpython/rlib/objectmodel.py +++ b/rpython/rlib/objectmodel.py @@ -636,6 +636,30 @@ # ____________________________________________________________ +def likely(condition): + assert isinstance(condition, bool) + return condition + +def unlikely(condition): + assert isinstance(condition, bool) + return condition + +class Entry(ExtRegistryEntry): + _about_ = (likely, unlikely) + + def compute_result_annotation(self, s_x): + from rpython.annotator import model as annmodel + return annmodel.SomeBool() + + def specialize_call(self, hop): + from rpython.rtyper.lltypesystem import lltype + vlist = hop.inputargs(lltype.Bool) + hop.exception_cannot_occur() + return hop.genop(self.instance.__name__, vlist, + resulttype=lltype.Bool) + +# ____________________________________________________________ + class r_dict(object): """An RPython dict-like object. diff --git a/rpython/rtyper/lltypesystem/lloperation.py b/rpython/rtyper/lltypesystem/lloperation.py --- a/rpython/rtyper/lltypesystem/lloperation.py +++ b/rpython/rtyper/lltypesystem/lloperation.py @@ -365,6 +365,9 @@ 'convert_float_bytes_to_longlong': LLOp(canfold=True), 'convert_longlong_bytes_to_float': LLOp(canfold=True), + 'likely': LLOp(canfold=True), + 'unlikely': LLOp(canfold=True), + # __________ pointer operations __________ 'malloc': LLOp(canmallocgc=True), diff --git a/rpython/rtyper/lltypesystem/opimpl.py b/rpython/rtyper/lltypesystem/opimpl.py --- a/rpython/rtyper/lltypesystem/opimpl.py +++ b/rpython/rtyper/lltypesystem/opimpl.py @@ -699,6 +699,14 @@ return p[0] op_raw_load.need_result_type = True +def op_likely(x): + assert isinstance(x, bool) + return x + +def op_unlikely(x): + assert isinstance(x, bool) + return x + # ____________________________________________________________ def get_op_impl(opname): diff --git a/rpython/rtyper/lltypesystem/rordereddict.py b/rpython/rtyper/lltypesystem/rordereddict.py --- a/rpython/rtyper/lltypesystem/rordereddict.py +++ b/rpython/rtyper/lltypesystem/rordereddict.py @@ -4,7 +4,7 @@ from rpython.rtyper.rdict import AbstractDictRepr, AbstractDictIteratorRepr from rpython.rtyper.lltypesystem import lltype, llmemory, rffi from rpython.rlib import objectmodel, jit, rgc -from rpython.rlib.objectmodel import specialize +from rpython.rlib.objectmodel import specialize, likely from rpython.rlib.debug import ll_assert from rpython.rlib.rarithmetic import r_uint, intmask from rpython.rtyper import rmodel @@ -46,7 +46,11 @@ @jit.oopspec('ordereddict.lookup(d, key, hash, flag)') def ll_call_lookup_function(d, key, hash, flag): fun = d.lookup_function_no & FUNC_MASK - if fun == FUNC_BYTE: + # This likely() here forces gcc to compile the check for fun == FUNC_BYTE + # first. Otherwise, this is a regular switch and gcc (at least 4.7) + # compiles this as a series of checks, with the FUNC_BYTE case last. + # It sounds minor, but it is worth 6-7% on a PyPy microbenchmark. + if likely(fun == FUNC_BYTE): return ll_dict_lookup(d, key, hash, flag, TYPE_BYTE) elif fun == FUNC_SHORT: return ll_dict_lookup(d, key, hash, flag, TYPE_SHORT) diff --git a/rpython/translator/c/src/int.h b/rpython/translator/c/src/int.h --- a/rpython/translator/c/src/int.h +++ b/rpython/translator/c/src/int.h @@ -238,6 +238,14 @@ #define OP_BOOL_NOT(x, r) r = !(x) +#ifdef __GNUC__ +# define OP_LIKELY(x, r) r = __builtin_expect((x), 1) +# define OP_UNLIKELY(x, r) r = __builtin_expect((x), 0) +#else +# define OP_LIKELY(x, r) r = (x) +# define OP_UNLIKELY(x, r) r = (x) +#endif + RPY_EXTERN long long op_llong_mul_ovf(long long a, long long b); /* The definitions above can be used with various types */ diff --git a/rpython/translator/c/test/test_lltyped.py b/rpython/translator/c/test/test_lltyped.py --- a/rpython/translator/c/test/test_lltyped.py +++ b/rpython/translator/c/test/test_lltyped.py @@ -956,3 +956,18 @@ fn = self.getcompiled(f, [int]) assert fn(0) == 9 + + def test_likely_unlikely(self): + from rpython.rlib.objectmodel import likely, unlikely + + def f(n): + if unlikely(n > 50): + return -10 + if likely(n > 5): + return 42 + return 3 + + fn = self.getcompiled(f, [int]) + assert fn(0) == 3 + assert fn(10) == 42 + assert fn(100) == -10 diff --git a/rpython/translator/exceptiontransform.py b/rpython/translator/exceptiontransform.py --- a/rpython/translator/exceptiontransform.py +++ b/rpython/translator/exceptiontransform.py @@ -398,6 +398,10 @@ else: v_exc_type = self.gen_getfield('exc_type', llops) var_no_exc = self.gen_isnull(v_exc_type, llops) + # + # We could add a "var_no_exc is likely true" hint, but it seems + # not to help, so it was commented out again. + #var_no_exc = llops.genop('likely', [var_no_exc], lltype.Bool) block.operations.extend(llops) From noreply at buildbot.pypy.org Thu Jan 22 16:52:59 2015 From: noreply at buildbot.pypy.org (cfbolz) Date: Thu, 22 Jan 2015 16:52:59 +0100 (CET) Subject: [pypy-commit] pypy default: document branch Message-ID: <20150122155259.E1B3A1C05AD@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: Changeset: r75486:93eb9a8d524e Date: 2015-01-22 16:52 +0100 http://bitbucket.org/pypy/pypy/changeset/93eb9a8d524e/ Log: document branch diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -133,3 +133,7 @@ .. branch: osx-package.py .. branch: package.py-helpful-error-message + +.. branch: typed-cells + +Improve performance of integer globals and class attributes. From noreply at buildbot.pypy.org Thu Jan 22 17:11:36 2015 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 22 Jan 2015 17:11:36 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: Don't load the whole file in memory (doesn't fit for a 600MB file produced by a 10-minutes run) and print progress. Message-ID: <20150122161136.89E161C0134@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r75487:dc21409bb756 Date: 2015-01-22 17:11 +0100 http://bitbucket.org/pypy/pypy/changeset/dc21409bb756/ Log: Don't load the whole file in memory (doesn't fit for a 600MB file produced by a 10-minutes run) and print progress. diff --git a/pypy/stm/print_stm_log.py b/pypy/stm/print_stm_log.py --- a/pypy/stm/print_stm_log.py +++ b/pypy/stm/print_stm_log.py @@ -45,13 +45,14 @@ class LogEntry(object): def __init__(self, timestamp, threadnum, otherthreadnum, - event, marker1, marker2): + event, marker1, marker2, frac): self.timestamp = timestamp self.threadnum = threadnum self.otherthreadnum = otherthreadnum self.event = event self.marker1 = marker1 self.marker2 = marker2 + self.frac = frac def __str__(self): s = '[%.3f][%s->%s]\t%s' % ( @@ -66,11 +67,13 @@ def parse_log(filename): f = open(filename, 'rb') - result = [] try: header = f.read(16) if header != "STMGC-C7-PROF01\n": raise ValueError("wrong format in file %r" % (filename,)) + f.seek(0, 2) + frac = 1.0 / f.tell() + f.seek(16, 0) result = [] while True: packet = f.read(19) @@ -81,12 +84,11 @@ raise ValueError("the file %r appears corrupted" % (filename,)) m1 = f.read(len1) m2 = f.read(len2) - result.append(LogEntry(sec + 0.000000001 * nsec, - threadnum, otherthreadnum, event, m1, m2)) + yield LogEntry(sec + 0.000000001 * nsec, + threadnum, otherthreadnum, event, m1, m2, + f.tell() * frac) finally: f.close() - return result - class ThreadState(object): @@ -174,13 +176,17 @@ return r + '%' def dump(logentries): - start_time, stop_time = logentries[0].timestamp, logentries[-1].timestamp - total_time = stop_time - start_time - print 'Total real time: %.3fs' % (total_time,) - # threads = {} conflicts = {} + cnt = 0 for entry in logentries: + if (cnt & 0x7ffff) == 0: + if cnt == 0: + start_time = entry.timestamp + else: + print >> sys.stderr, '%.0f%%' % (entry.frac * 100.0,), + cnt += 1 + # if entry.event == STM_TRANSACTION_START: t = threads.get(entry.threadnum) if t is None: @@ -221,6 +227,14 @@ if t is not None and t.in_transaction(): t.transaction_unpause(entry) # + if cnt == 0: + raise Exception("empty file") + print >> sys.stderr + print + stop_time = entry.timestamp + total_time = stop_time - start_time + print 'Total real time: %.3fs' % (total_time,) + # total_cpu_time = sum([v.cpu_time for v in threads.values()]) print 'Total CPU time in STM mode: %.3fs (%s)' % ( total_cpu_time, percent(total_cpu_time, total_time)) From noreply at buildbot.pypy.org Thu Jan 22 17:33:12 2015 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 22 Jan 2015 17:33:12 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: Trying to parallelize the rtyper Message-ID: <20150122163312.1F4AB1C050C@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r75488:ebe1ffb23101 Date: 2015-01-22 17:32 +0100 http://bitbucket.org/pypy/pypy/changeset/ebe1ffb23101/ Log: Trying to parallelize the rtyper diff --git a/lib_pypy/transaction.py b/lib_pypy/transaction.py --- a/lib_pypy/transaction.py +++ b/lib_pypy/transaction.py @@ -37,10 +37,10 @@ signals_enabled = _SignalsEnabled() try: - from __pypy__.thread import last_abort_info + from __pypy__.thread import hint_commit_soon except ImportError: # Not a STM-enabled PyPy. - def last_abort_info(): + def hint_commit_soon(): return None @@ -221,21 +221,17 @@ self.lock_if_released_then_finished.release() raise _Done - @staticmethod + @staticmethod def _do_it((f, args, kwds), got_exception): # this is a staticmethod in order to make sure that we don't # accidentally use 'self' in the atomic block. - try: - while True: - with signals_enabled: - with atomic: - info = last_abort_info() - if info is None: - if not got_exception: - f(*args, **kwds) - # else return early if already an exc to reraise - return - report_abort_info(info) + try: + hint_commit_soon() + with signals_enabled: + with atomic: + if not got_exception: + f(*args, **kwds) + hint_commit_soon() except: got_exception[:] = sys.exc_info() @@ -253,7 +249,7 @@ _thread_local = _ThreadLocal() -def report_abort_info(info): +def XXXreport_abort_info(info): header = info[0] f = cStringIO.StringIO() if len(info) > 1: @@ -279,3 +275,27 @@ header[1], 'atom '*header[3], 'inev '*(header[4]>1), header[5], header[6]) sys.stderr.write(f.getvalue()) + + +class threadlocalproperty(object): + def __init__(self, *default): + self.tl_default = default + self.tl_name = intern(str(id(self))) + + def tl_get(self, obj): + try: + return obj._threadlocalproperties + except AttributeError: + return obj.__dict__.setdefault('_threadlocalproperties', + thread._local()) + + def __get__(self, obj, cls=None): + if obj is None: + return self + return getattr(self.tl_get(obj), self.tl_name, *self.tl_default) + + def __set__(self, obj, value): + setattr(self.tl_get(obj), self.tl_name, value) + + def __delete__(self, obj): + delattr(self.tl_get(obj), self.tl_name) diff --git a/rpython/annotator/annrpython.py b/rpython/annotator/annrpython.py --- a/rpython/annotator/annrpython.py +++ b/rpython/annotator/annrpython.py @@ -76,7 +76,7 @@ args_s = [self.typeannotation(t) for t in input_arg_types] # XXX hack - annmodel.TLS.check_str_without_nul = ( + annmodel.STATE.check_str_without_nul = ( self.translator.config.translation.check_str_without_nul) flowgraph, inputcells = self.get_call_parameters(function, args_s, policy) @@ -112,7 +112,7 @@ from rpython.annotator.policy import AnnotatorPolicy policy = AnnotatorPolicy() # XXX hack - annmodel.TLS.check_str_without_nul = ( + annmodel.STATE.check_str_without_nul = ( self.translator.config.translation.check_str_without_nul) graph, inputcells = self.get_call_parameters(function, args_s, policy) self.build_graph_types(graph, inputcells, complete_now=False) diff --git a/rpython/annotator/bookkeeper.py b/rpython/annotator/bookkeeper.py --- a/rpython/annotator/bookkeeper.py +++ b/rpython/annotator/bookkeeper.py @@ -40,6 +40,13 @@ Currently used for factories and user-defined classes.""" + try: + from transaction import threadlocalproperty + except ImportError: + pass + else: + position_key = threadlocalproperty() + def __setstate__(self, dic): self.__dict__.update(dic) # normal action delayed_imports() diff --git a/rpython/annotator/model.py b/rpython/annotator/model.py --- a/rpython/annotator/model.py +++ b/rpython/annotator/model.py @@ -43,7 +43,16 @@ # A global attribute :-( Patch it with 'True' to enable checking of # the no_nul attribute... check_str_without_nul = False -TLS = State() +STATE = State() + +try: + import thread + TLS = thread._local() +except ImportError: + class Tls(object): + pass + TLS = Tls() + class SomeObject(object): """The set of all objects. Each instance stands @@ -243,7 +252,7 @@ return False d1 = self.__dict__ d2 = other.__dict__ - if not TLS.check_str_without_nul: + if not STATE.check_str_without_nul: d1 = d1.copy() d1['no_nul'] = 0 d2 = d2.copy() From noreply at buildbot.pypy.org Fri Jan 23 10:25:44 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 23 Jan 2015 10:25:44 +0100 (CET) Subject: [pypy-commit] pypy default: update the docstrings Message-ID: <20150123092544.066841C0134@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r75489:96024f66d2d8 Date: 2015-01-23 10:25 +0100 http://bitbucket.org/pypy/pypy/changeset/96024f66d2d8/ Log: update the docstrings diff --git a/rpython/rlib/rposix.py b/rpython/rlib/rposix.py --- a/rpython/rlib/rposix.py +++ b/rpython/rlib/rposix.py @@ -97,14 +97,22 @@ _nowrapper=True, c_type='int') def get_saved_errno(): - """Return the saved value of the errno. This value is saved after a call - to an llexternal function with 'save_err & RFFI_ERRNO_AFTER != 0'.""" + """Return the value of the "saved errno". + This value is saved after a call to a C function, if it was declared + with the flag llexternal(..., save_err=rffi.RFFI_SAVE_ERRNO). + Functions without that flag don't change the saved errno. + """ from rpython.rlib import rthread return intmask(rthread.tlfield_rpy_errno.getraw()) def set_saved_errno(errno): - """Set the saved value of the errno. This value will be used by a - following llexternal function with 'save_err & RFFI_ERRNO_BEFORE != 0'.""" + """Set the value of the saved errno. This value will be used to + initialize the real errno just before calling the following C function, + provided it was declared llexternal(..., save_err=RFFI_READSAVED_ERRNO). + Note also that it is more common to want the real errno to be initially + zero; for that case, use llexternal(..., save_err=RFFI_ZERO_ERRNO_BEFORE) + and then you don't need set_saved_errno(0). + """ from rpython.rlib import rthread rthread.tlfield_rpy_errno.setraw(rffi.cast(INT, errno)) diff --git a/rpython/rlib/rwin32.py b/rpython/rlib/rwin32.py --- a/rpython/rlib/rwin32.py +++ b/rpython/rlib/rwin32.py @@ -124,10 +124,24 @@ _nowrapper=True, sandboxsafe=True) def GetLastError_saved(): + """Return the value of the "saved LastError". + The C-level GetLastError() is saved there after a call to a C + function, if that C function was declared with the flag + llexternal(..., save_err=rffi.RFFI_SAVE_LASTERROR). + Functions without that flag don't change the saved LastError. + Alternatively, if the function was declared RFFI_SAVE_WSALASTERROR, + then the value of the C-level WSAGetLastError() is saved instead + (into the same "saved LastError" variable). + """ from rpython.rlib import rthread return rffi.cast(lltype.Signed, rthread.tlfield_rpy_lasterror.getraw()) def SetLastError_saved(err): + """Set the value of the saved LastError. This value will be used in + a call to the C-level SetLastError() just before calling the + following C function, provided it was declared + llexternal(..., save_err=RFFI_READSAVED_LASTERROR). + """ from rpython.rlib import rthread rthread.tlfield_rpy_lasterror.setraw(rffi.cast(DWORD, err)) From noreply at buildbot.pypy.org Fri Jan 23 10:40:48 2015 From: noreply at buildbot.pypy.org (cfbolz) Date: Fri, 23 Jan 2015 10:40:48 +0100 (CET) Subject: [pypy-commit] pypy default: move ever_mutated attribute where it belongs Message-ID: <20150123094048.BF78F1C0134@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: Changeset: r75490:d57e4d7adf30 Date: 2015-01-23 00:28 +0100 http://bitbucket.org/pypy/pypy/changeset/d57e4d7adf30/ Log: move ever_mutated attribute where it belongs diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -20,7 +20,7 @@ # we want to propagate knowledge that the result cannot be negative class AbstractAttribute(object): - _immutable_fields_ = ['terminator', 'ever_mutated?'] + _immutable_fields_ = ['terminator'] cache_attrs = None _size_estimate = 0 @@ -28,7 +28,6 @@ self.space = space assert isinstance(terminator, Terminator) self.terminator = terminator - self.ever_mutated = False def read(self, obj, selector): attr = self.find_map_attr(selector) @@ -276,13 +275,15 @@ return Terminator.set_terminator(self, obj, terminator) class PlainAttribute(AbstractAttribute): - _immutable_fields_ = ['selector', 'storageindex', 'back'] + _immutable_fields_ = ['selector', 'storageindex', 'back', 'ever_mutated?'] + def __init__(self, selector, back): AbstractAttribute.__init__(self, back.space, back.terminator) self.selector = selector self.storageindex = back.length() self.back = back self._size_estimate = self.length() * NUM_DIGITS_POW2 + self.ever_mutated = False def _copy_attr(self, obj, new_obj): w_value = self.read(obj, self.selector) From noreply at buildbot.pypy.org Fri Jan 23 10:40:50 2015 From: noreply at buildbot.pypy.org (cfbolz) Date: Fri, 23 Jan 2015 10:40:50 +0100 (CET) Subject: [pypy-commit] pypy default: merge default Message-ID: <20150123094050.511731C0134@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: Changeset: r75491:c5c8fff7c426 Date: 2015-01-23 10:40 +0100 http://bitbucket.org/pypy/pypy/changeset/c5c8fff7c426/ Log: merge default diff --git a/rpython/rlib/rposix.py b/rpython/rlib/rposix.py --- a/rpython/rlib/rposix.py +++ b/rpython/rlib/rposix.py @@ -97,14 +97,22 @@ _nowrapper=True, c_type='int') def get_saved_errno(): - """Return the saved value of the errno. This value is saved after a call - to an llexternal function with 'save_err & RFFI_ERRNO_AFTER != 0'.""" + """Return the value of the "saved errno". + This value is saved after a call to a C function, if it was declared + with the flag llexternal(..., save_err=rffi.RFFI_SAVE_ERRNO). + Functions without that flag don't change the saved errno. + """ from rpython.rlib import rthread return intmask(rthread.tlfield_rpy_errno.getraw()) def set_saved_errno(errno): - """Set the saved value of the errno. This value will be used by a - following llexternal function with 'save_err & RFFI_ERRNO_BEFORE != 0'.""" + """Set the value of the saved errno. This value will be used to + initialize the real errno just before calling the following C function, + provided it was declared llexternal(..., save_err=RFFI_READSAVED_ERRNO). + Note also that it is more common to want the real errno to be initially + zero; for that case, use llexternal(..., save_err=RFFI_ZERO_ERRNO_BEFORE) + and then you don't need set_saved_errno(0). + """ from rpython.rlib import rthread rthread.tlfield_rpy_errno.setraw(rffi.cast(INT, errno)) diff --git a/rpython/rlib/rwin32.py b/rpython/rlib/rwin32.py --- a/rpython/rlib/rwin32.py +++ b/rpython/rlib/rwin32.py @@ -124,10 +124,24 @@ _nowrapper=True, sandboxsafe=True) def GetLastError_saved(): + """Return the value of the "saved LastError". + The C-level GetLastError() is saved there after a call to a C + function, if that C function was declared with the flag + llexternal(..., save_err=rffi.RFFI_SAVE_LASTERROR). + Functions without that flag don't change the saved LastError. + Alternatively, if the function was declared RFFI_SAVE_WSALASTERROR, + then the value of the C-level WSAGetLastError() is saved instead + (into the same "saved LastError" variable). + """ from rpython.rlib import rthread return rffi.cast(lltype.Signed, rthread.tlfield_rpy_lasterror.getraw()) def SetLastError_saved(err): + """Set the value of the saved LastError. This value will be used in + a call to the C-level SetLastError() just before calling the + following C function, provided it was declared + llexternal(..., save_err=RFFI_READSAVED_LASTERROR). + """ from rpython.rlib import rthread rthread.tlfield_rpy_lasterror.setraw(rffi.cast(DWORD, err)) From noreply at buildbot.pypy.org Fri Jan 23 10:42:11 2015 From: noreply at buildbot.pypy.org (cfbolz) Date: Fri, 23 Jan 2015 10:42:11 +0100 (CET) Subject: [pypy-commit] pypy typed-cells: small cleanup Message-ID: <20150123094211.CA7111C0134@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: typed-cells Changeset: r75492:b44911a280e7 Date: 2015-01-23 00:00 +0100 http://bitbucket.org/pypy/pypy/changeset/b44911a280e7/ Log: small cleanup diff --git a/pypy/objspace/std/celldict.py b/pypy/objspace/std/celldict.py --- a/pypy/objspace/std/celldict.py +++ b/pypy/objspace/std/celldict.py @@ -10,17 +10,13 @@ DictStrategy, ObjectDictStrategy, _never_equal_to_string, create_iterator_classes) from pypy.objspace.std.typeobject import ( - MutableCell, IntMutableCell, ObjectMutableCell, write_cell) + MutableCell, IntMutableCell, ObjectMutableCell, write_cell, + unwrap_cell) class VersionTag(object): pass -def unwrap_cell(space, w_value): - if isinstance(w_value, MutableCell): - return w_value.unwrap_cell(space) - return w_value - def _wrapkey(space, key): return space.wrap(key) diff --git a/pypy/objspace/std/dictproxyobject.py b/pypy/objspace/std/dictproxyobject.py --- a/pypy/objspace/std/dictproxyobject.py +++ b/pypy/objspace/std/dictproxyobject.py @@ -3,7 +3,7 @@ from pypy.interpreter.error import OperationError, oefmt from pypy.objspace.std.dictmultiobject import ( DictStrategy, create_iterator_classes) -from pypy.objspace.std.typeobject import unwrap_cell +from pypy.objspace.std.typeobject import unwrap_cell_iftypeversion class DictProxyStrategy(DictStrategy): @@ -83,11 +83,12 @@ return space.newlist_bytes(self.unerase(w_dict.dstorage).dict_w.keys()) def values(self, w_dict): - return [unwrap_cell(self.space, w_value) for w_value in self.unerase(w_dict.dstorage).dict_w.itervalues()] + return [unwrap_cell_iftypeversion(self.space, w_value) + for w_value in self.unerase(w_dict.dstorage).dict_w.itervalues()] def items(self, w_dict): space = self.space - return [space.newtuple([space.wrap(key), unwrap_cell(self.space, w_value)]) + return [space.newtuple([space.wrap(key), unwrap_cell_iftypeversion(self.space, w_value)]) for (key, w_value) in self.unerase(w_dict.dstorage).dict_w.iteritems()] def clear(self, w_dict): @@ -108,6 +109,6 @@ def wrapkey(space, key): return space.wrap(key) def wrapvalue(space, value): - return unwrap_cell(space, value) + return unwrap_cell_iftypeversion(space, value) create_iterator_classes(DictProxyStrategy) diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py --- a/pypy/objspace/std/typeobject.py +++ b/pypy/objspace/std/typeobject.py @@ -36,11 +36,15 @@ def __repr__(self): return "" % (self.intvalue, ) +def unwrap_cell(space, w_value): + if isinstance(w_value, MutableCell): + return w_value.unwrap_cell(space) + return w_value -def unwrap_cell(space, w_value): + +def unwrap_cell_iftypeversion(space, w_value): if space.config.objspace.std.withtypeversion: - if isinstance(w_value, MutableCell): - return w_value.unwrap_cell(space) + return unwrap_cell(space, w_value) return w_value def write_cell(space, w_cell, w_value): @@ -274,12 +278,12 @@ if space.config.objspace.std.withtypeversion: version_tag = w_self.version_tag() if version_tag is not None: - return unwrap_cell( + return unwrap_cell_iftypeversion( space, w_self._pure_getdictvalue_no_unwrapping( space, version_tag, attr)) w_value = w_self._getdictvalue_no_unwrapping(space, attr) - return unwrap_cell(space, w_value) + return unwrap_cell_iftypeversion(space, w_value) def _getdictvalue_no_unwrapping(w_self, space, attr): w_value = w_self.dict_w.get(attr, None) From noreply at buildbot.pypy.org Fri Jan 23 10:42:13 2015 From: noreply at buildbot.pypy.org (cfbolz) Date: Fri, 23 Jan 2015 10:42:13 +0100 (CET) Subject: [pypy-commit] pypy typed-cells: start using mutable int boxes on instances Message-ID: <20150123094213.63ECC1C0134@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: typed-cells Changeset: r75493:67cd227f7eae Date: 2015-01-23 00:14 +0100 http://bitbucket.org/pypy/pypy/changeset/67cd227f7eae/ Log: start using mutable int boxes on instances diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -8,8 +8,8 @@ W_DictMultiObject, DictStrategy, ObjectDictStrategy, BaseKeyIterator, BaseValueIterator, BaseItemIterator, _never_equal_to_string ) -from pypy.objspace.std.typeobject import MutableCell - +from pypy.objspace.std.typeobject import ( + MutableCell, IntMutableCell, ObjectMutableCell, unwrap_cell) # ____________________________________________________________ # attribute shapes @@ -39,9 +39,12 @@ jit.isconstant(obj) and not attr.ever_mutated ): - return self._pure_mapdict_read_storage(obj, attr.storageindex) + result = self._pure_mapdict_read_storage(obj, attr.storageindex) + assert not isinstance(result, MutableCell) else: - return obj._mapdict_read_storage(attr.storageindex) + result = unwrap_cell( + attr.space, obj._mapdict_read_storage(attr.storageindex)) + return result @jit.elidable def _pure_mapdict_read_storage(self, obj, storageindex): @@ -53,9 +56,25 @@ return self.terminator._write_terminator(obj, selector, w_value) if not attr.ever_mutated: attr.ever_mutated = True - obj._mapdict_write_storage(attr.storageindex, w_value) + # introduce cells only on the second write, to make immutability for + # int fields still work + cell = obj._mapdict_read_storage(attr.storageindex) + w_value = self._write_cell(attr.space, cell, w_value) + if w_value is not None: + obj._mapdict_write_storage(attr.storageindex, w_value) return True + def _write_cell(self, space, w_cell, w_value): + from pypy.objspace.std.intobject import W_IntObject + assert not isinstance(w_cell, ObjectMutableCell) + if isinstance(w_cell, IntMutableCell) and type(w_value) is W_IntObject: + w_cell.intvalue = w_value.intval + return None + if type(w_value) is W_IntObject: + return IntMutableCell(w_value.intval) + return w_value + + def delete(self, obj, selector): pass @@ -320,7 +339,8 @@ new_obj = self.back.materialize_r_dict(space, obj, dict_w) if self.selector[1] == DICT: w_attr = space.wrap(self.selector[0]) - dict_w[w_attr] = obj._mapdict_read_storage(self.storageindex) + dict_w[w_attr] = unwrap_cell( + space, obj._mapdict_read_storage(self.storageindex)) else: self._copy_attr(obj, new_obj) return new_obj @@ -856,7 +876,8 @@ map = w_obj._get_mapdict_map() if entry.is_valid_for_map(map) and entry.w_method is None: # everything matches, it's incredibly fast - return w_obj._mapdict_read_storage(entry.storageindex) + return unwrap_cell( + map.space, w_obj._mapdict_read_storage(entry.storageindex)) return LOAD_ATTR_slowpath(pycode, w_obj, nameindex, map) LOAD_ATTR_caching._always_inline_ = True @@ -901,7 +922,8 @@ # Note that if map.terminator is a DevolvedDictTerminator, # map.find_map_attr will always return None if selector[1]==DICT. _fill_cache(pycode, nameindex, map, version_tag, attr.storageindex) - return w_obj._mapdict_read_storage(attr.storageindex) + return unwrap_cell( + space, w_obj._mapdict_read_storage(attr.storageindex)) if space.config.objspace.std.withmethodcachecounter: INVALID_CACHE_ENTRY.failure_counter += 1 return space.getattr(w_obj, w_name) diff --git a/pypy/objspace/std/test/test_mapdict.py b/pypy/objspace/std/test/test_mapdict.py --- a/pypy/objspace/std/test/test_mapdict.py +++ b/pypy/objspace/std/test/test_mapdict.py @@ -154,6 +154,26 @@ assert obj.map.ever_mutated == True assert obj.map is map1 +def test_mutbox(): + from pypy.objspace.std.intobject import W_IntObject + cls = Class() + obj = cls.instantiate() + obj.setdictvalue(space, "a", W_IntObject(5)) + assert obj.getdictvalue(space, "a").intval == 5 + w_val = obj._mapdict_read_storage(0) + assert w_val.intval == 5 # still a W_IntObject + + obj.setdictvalue(space, "a", W_IntObject(6)) + assert obj.getdictvalue(space, "a") == 6 # because of the FakeSpace :-( + mutbox1 = obj._mapdict_read_storage(0) + assert mutbox1.intvalue == 6 + + obj.setdictvalue(space, "a", W_IntObject(7)) + assert obj.getdictvalue(space, "a") == 7 # because of the FakeSpace :-( + mutbox2 = obj._mapdict_read_storage(0) + assert mutbox2.intvalue == 7 + assert mutbox2 is mutbox1 + def test_delete(): for i, dattr in enumerate(["a", "b", "c"]): c = Class() From noreply at buildbot.pypy.org Fri Jan 23 10:42:15 2015 From: noreply at buildbot.pypy.org (cfbolz) Date: Fri, 23 Jan 2015 10:42:15 +0100 (CET) Subject: [pypy-commit] pypy typed-cells: merge default Message-ID: <20150123094215.23BB21C0134@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: typed-cells Changeset: r75494:d96914fdd66f Date: 2015-01-23 00:29 +0100 http://bitbucket.org/pypy/pypy/changeset/d96914fdd66f/ Log: merge default diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -130,3 +130,10 @@ mmap(), called rarely during major GCs, if such a major GC occurs at exactly the wrong time), and some of the less rare kind (particularly on Windows tests). + +.. branch: osx-package.py +.. branch: package.py-helpful-error-message + +.. branch: typed-cells + +Improve performance of integer globals and class attributes. diff --git a/pypy/module/pypyjit/test_pypy_c/test_containers.py b/pypy/module/pypyjit/test_pypy_c/test_containers.py --- a/pypy/module/pypyjit/test_pypy_c/test_containers.py +++ b/pypy/module/pypyjit/test_pypy_c/test_containers.py @@ -78,7 +78,7 @@ {{{ setfield_gc(p13, 0, descr=) setfield_gc(p13, 0, descr=) - setfield_gc(p13, 16, descr=) + setfield_gc(p13, 32, descr=) }}} guard_no_exception(descr=...) p20 = new_with_vtable(ConstClass(W_IntObject)) diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -20,7 +20,7 @@ # we want to propagate knowledge that the result cannot be negative class AbstractAttribute(object): - _immutable_fields_ = ['terminator', 'ever_mutated?'] + _immutable_fields_ = ['terminator'] cache_attrs = None _size_estimate = 0 @@ -28,7 +28,6 @@ self.space = space assert isinstance(terminator, Terminator) self.terminator = terminator - self.ever_mutated = False def read(self, obj, selector): attr = self.find_map_attr(selector) @@ -295,13 +294,15 @@ return Terminator.set_terminator(self, obj, terminator) class PlainAttribute(AbstractAttribute): - _immutable_fields_ = ['selector', 'storageindex', 'back'] + _immutable_fields_ = ['selector', 'storageindex', 'back', 'ever_mutated?'] + def __init__(self, selector, back): AbstractAttribute.__init__(self, back.space, back.terminator) self.selector = selector self.storageindex = back.length() self.back = back self._size_estimate = self.length() * NUM_DIGITS_POW2 + self.ever_mutated = False def _copy_attr(self, obj, new_obj): w_value = self.read(obj, self.selector) diff --git a/rpython/jit/codewriter/jtransform.py b/rpython/jit/codewriter/jtransform.py --- a/rpython/jit/codewriter/jtransform.py +++ b/rpython/jit/codewriter/jtransform.py @@ -253,6 +253,12 @@ return [None, # hack, do the right renaming from op.args[0] to op.result SpaceOperation("record_known_class", [op.args[0], const_vtable], None)] + def rewrite_op_likely(self, op): + return None # "no real effect" + + def rewrite_op_unlikely(self, op): + return None # "no real effect" + def rewrite_op_raw_malloc_usage(self, op): if self.cpu.translate_support_code or isinstance(op.args[0], Variable): return # the operation disappears diff --git a/rpython/jit/codewriter/test/test_jtransform.py b/rpython/jit/codewriter/test/test_jtransform.py --- a/rpython/jit/codewriter/test/test_jtransform.py +++ b/rpython/jit/codewriter/test/test_jtransform.py @@ -1376,3 +1376,12 @@ tr.rewrite_operation(op) except Exception, e: assert 'foobar' in str(e) + +def test_likely_unlikely(): + v1 = varoftype(lltype.Bool) + v2 = varoftype(lltype.Bool) + op = SpaceOperation('likely', [v1], v2) + tr = Transformer() + assert tr.rewrite_operation(op) is None + op = SpaceOperation('unlikely', [v1], v2) + assert tr.rewrite_operation(op) is None diff --git a/rpython/rlib/objectmodel.py b/rpython/rlib/objectmodel.py --- a/rpython/rlib/objectmodel.py +++ b/rpython/rlib/objectmodel.py @@ -636,6 +636,30 @@ # ____________________________________________________________ +def likely(condition): + assert isinstance(condition, bool) + return condition + +def unlikely(condition): + assert isinstance(condition, bool) + return condition + +class Entry(ExtRegistryEntry): + _about_ = (likely, unlikely) + + def compute_result_annotation(self, s_x): + from rpython.annotator import model as annmodel + return annmodel.SomeBool() + + def specialize_call(self, hop): + from rpython.rtyper.lltypesystem import lltype + vlist = hop.inputargs(lltype.Bool) + hop.exception_cannot_occur() + return hop.genop(self.instance.__name__, vlist, + resulttype=lltype.Bool) + +# ____________________________________________________________ + class r_dict(object): """An RPython dict-like object. diff --git a/rpython/rtyper/lltypesystem/lloperation.py b/rpython/rtyper/lltypesystem/lloperation.py --- a/rpython/rtyper/lltypesystem/lloperation.py +++ b/rpython/rtyper/lltypesystem/lloperation.py @@ -365,6 +365,9 @@ 'convert_float_bytes_to_longlong': LLOp(canfold=True), 'convert_longlong_bytes_to_float': LLOp(canfold=True), + 'likely': LLOp(canfold=True), + 'unlikely': LLOp(canfold=True), + # __________ pointer operations __________ 'malloc': LLOp(canmallocgc=True), diff --git a/rpython/rtyper/lltypesystem/opimpl.py b/rpython/rtyper/lltypesystem/opimpl.py --- a/rpython/rtyper/lltypesystem/opimpl.py +++ b/rpython/rtyper/lltypesystem/opimpl.py @@ -699,6 +699,14 @@ return p[0] op_raw_load.need_result_type = True +def op_likely(x): + assert isinstance(x, bool) + return x + +def op_unlikely(x): + assert isinstance(x, bool) + return x + # ____________________________________________________________ def get_op_impl(opname): diff --git a/rpython/rtyper/lltypesystem/rordereddict.py b/rpython/rtyper/lltypesystem/rordereddict.py --- a/rpython/rtyper/lltypesystem/rordereddict.py +++ b/rpython/rtyper/lltypesystem/rordereddict.py @@ -4,7 +4,7 @@ from rpython.rtyper.rdict import AbstractDictRepr, AbstractDictIteratorRepr from rpython.rtyper.lltypesystem import lltype, llmemory, rffi from rpython.rlib import objectmodel, jit, rgc -from rpython.rlib.objectmodel import specialize +from rpython.rlib.objectmodel import specialize, likely from rpython.rlib.debug import ll_assert from rpython.rlib.rarithmetic import r_uint, intmask from rpython.rtyper import rmodel @@ -46,7 +46,11 @@ @jit.oopspec('ordereddict.lookup(d, key, hash, flag)') def ll_call_lookup_function(d, key, hash, flag): fun = d.lookup_function_no & FUNC_MASK - if fun == FUNC_BYTE: + # This likely() here forces gcc to compile the check for fun == FUNC_BYTE + # first. Otherwise, this is a regular switch and gcc (at least 4.7) + # compiles this as a series of checks, with the FUNC_BYTE case last. + # It sounds minor, but it is worth 6-7% on a PyPy microbenchmark. + if likely(fun == FUNC_BYTE): return ll_dict_lookup(d, key, hash, flag, TYPE_BYTE) elif fun == FUNC_SHORT: return ll_dict_lookup(d, key, hash, flag, TYPE_SHORT) diff --git a/rpython/translator/c/src/int.h b/rpython/translator/c/src/int.h --- a/rpython/translator/c/src/int.h +++ b/rpython/translator/c/src/int.h @@ -238,6 +238,14 @@ #define OP_BOOL_NOT(x, r) r = !(x) +#ifdef __GNUC__ +# define OP_LIKELY(x, r) r = __builtin_expect((x), 1) +# define OP_UNLIKELY(x, r) r = __builtin_expect((x), 0) +#else +# define OP_LIKELY(x, r) r = (x) +# define OP_UNLIKELY(x, r) r = (x) +#endif + RPY_EXTERN long long op_llong_mul_ovf(long long a, long long b); /* The definitions above can be used with various types */ diff --git a/rpython/translator/c/test/test_lltyped.py b/rpython/translator/c/test/test_lltyped.py --- a/rpython/translator/c/test/test_lltyped.py +++ b/rpython/translator/c/test/test_lltyped.py @@ -956,3 +956,18 @@ fn = self.getcompiled(f, [int]) assert fn(0) == 9 + + def test_likely_unlikely(self): + from rpython.rlib.objectmodel import likely, unlikely + + def f(n): + if unlikely(n > 50): + return -10 + if likely(n > 5): + return 42 + return 3 + + fn = self.getcompiled(f, [int]) + assert fn(0) == 3 + assert fn(10) == 42 + assert fn(100) == -10 diff --git a/rpython/translator/exceptiontransform.py b/rpython/translator/exceptiontransform.py --- a/rpython/translator/exceptiontransform.py +++ b/rpython/translator/exceptiontransform.py @@ -398,6 +398,10 @@ else: v_exc_type = self.gen_getfield('exc_type', llops) var_no_exc = self.gen_isnull(v_exc_type, llops) + # + # We could add a "var_no_exc is likely true" hint, but it seems + # not to help, so it was commented out again. + #var_no_exc = llops.genop('likely', [var_no_exc], lltype.Bool) block.operations.extend(llops) From noreply at buildbot.pypy.org Fri Jan 23 10:42:16 2015 From: noreply at buildbot.pypy.org (cfbolz) Date: Fri, 23 Jan 2015 10:42:16 +0100 (CET) Subject: [pypy-commit] pypy typed-cells: move _write_cell to PlainAttribute Message-ID: <20150123094216.6B5F01C0134@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: typed-cells Changeset: r75495:77a1f797146f Date: 2015-01-23 00:38 +0100 http://bitbucket.org/pypy/pypy/changeset/77a1f797146f/ Log: move _write_cell to PlainAttribute diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -58,22 +58,11 @@ # introduce cells only on the second write, to make immutability for # int fields still work cell = obj._mapdict_read_storage(attr.storageindex) - w_value = self._write_cell(attr.space, cell, w_value) + w_value = attr._write_cell(attr.space, cell, w_value) if w_value is not None: obj._mapdict_write_storage(attr.storageindex, w_value) return True - def _write_cell(self, space, w_cell, w_value): - from pypy.objspace.std.intobject import W_IntObject - assert not isinstance(w_cell, ObjectMutableCell) - if isinstance(w_cell, IntMutableCell) and type(w_value) is W_IntObject: - w_cell.intvalue = w_value.intval - return None - if type(w_value) is W_IntObject: - return IntMutableCell(w_value.intval) - return w_value - - def delete(self, obj, selector): pass @@ -304,6 +293,16 @@ self._size_estimate = self.length() * NUM_DIGITS_POW2 self.ever_mutated = False + def _write_cell(self, w_cell, w_value): + from pypy.objspace.std.intobject import W_IntObject + assert not isinstance(w_cell, ObjectMutableCell) + if isinstance(w_cell, IntMutableCell) and type(w_value) is W_IntObject: + w_cell.intvalue = w_value.intval + return None + if type(w_value) is W_IntObject: + return IntMutableCell(w_value.intval) + return w_value + def _copy_attr(self, obj, new_obj): w_value = self.read(obj, self.selector) new_obj._get_mapdict_map().add_attr(new_obj, self.selector, w_value) From noreply at buildbot.pypy.org Fri Jan 23 10:42:17 2015 From: noreply at buildbot.pypy.org (cfbolz) Date: Fri, 23 Jan 2015 10:42:17 +0100 (CET) Subject: [pypy-commit] pypy typed-cells: rename test Message-ID: <20150123094217.ACEB31C0134@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: typed-cells Changeset: r75496:c6b5082e2dff Date: 2015-01-23 00:39 +0100 http://bitbucket.org/pypy/pypy/changeset/c6b5082e2dff/ Log: rename test diff --git a/pypy/objspace/std/test/test_mapdict.py b/pypy/objspace/std/test/test_mapdict.py --- a/pypy/objspace/std/test/test_mapdict.py +++ b/pypy/objspace/std/test/test_mapdict.py @@ -154,7 +154,7 @@ assert obj.map.ever_mutated == True assert obj.map is map1 -def test_mutbox(): +def test_mutcell(): from pypy.objspace.std.intobject import W_IntObject cls = Class() obj = cls.instantiate() @@ -165,14 +165,14 @@ obj.setdictvalue(space, "a", W_IntObject(6)) assert obj.getdictvalue(space, "a") == 6 # because of the FakeSpace :-( - mutbox1 = obj._mapdict_read_storage(0) - assert mutbox1.intvalue == 6 + mutcell1 = obj._mapdict_read_storage(0) + assert mutcell1.intvalue == 6 obj.setdictvalue(space, "a", W_IntObject(7)) assert obj.getdictvalue(space, "a") == 7 # because of the FakeSpace :-( - mutbox2 = obj._mapdict_read_storage(0) - assert mutbox2.intvalue == 7 - assert mutbox2 is mutbox1 + mutcell2 = obj._mapdict_read_storage(0) + assert mutcell2.intvalue == 7 + assert mutcell2 is mutcell1 def test_delete(): for i, dattr in enumerate(["a", "b", "c"]): From noreply at buildbot.pypy.org Fri Jan 23 10:42:18 2015 From: noreply at buildbot.pypy.org (cfbolz) Date: Fri, 23 Jan 2015 10:42:18 +0100 (CET) Subject: [pypy-commit] pypy typed-cells: keep a flag on PlainAttributes that indicates whether we need to check for a Message-ID: <20150123094218.D28281C0134@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: typed-cells Changeset: r75497:bb52cf0f8734 Date: 2015-01-23 00:47 +0100 http://bitbucket.org/pypy/pypy/changeset/bb52cf0f8734/ Log: keep a flag on PlainAttributes that indicates whether we need to check for a MutableCell diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -41,8 +41,8 @@ result = self._pure_mapdict_read_storage(obj, attr.storageindex) assert not isinstance(result, MutableCell) else: - result = unwrap_cell( - attr.space, obj._mapdict_read_storage(attr.storageindex)) + result = attr._read_cell( + obj._mapdict_read_storage(attr.storageindex)) return result @jit.elidable @@ -58,7 +58,7 @@ # introduce cells only on the second write, to make immutability for # int fields still work cell = obj._mapdict_read_storage(attr.storageindex) - w_value = attr._write_cell(attr.space, cell, w_value) + w_value = attr._write_cell(cell, w_value) if w_value is not None: obj._mapdict_write_storage(attr.storageindex, w_value) return True @@ -283,7 +283,8 @@ return Terminator.set_terminator(self, obj, terminator) class PlainAttribute(AbstractAttribute): - _immutable_fields_ = ['selector', 'storageindex', 'back', 'ever_mutated?'] + _immutable_fields_ = ['selector', 'storageindex', 'back', + 'ever_mutated?', 'can_contain_mutable_cell?'] def __init__(self, selector, back): AbstractAttribute.__init__(self, back.space, back.terminator) @@ -292,6 +293,16 @@ self.back = back self._size_estimate = self.length() * NUM_DIGITS_POW2 self.ever_mutated = False + # this flag means: at some point there was an instance that used a + # derivative of this map that had a MutableCell stored into the + # corresponding field. + # if the flag is False, we don't need to unbox the attribute. + self.can_contain_mutable_cell = False + + def _read_cell(self, w_cell): + if not self.can_contain_mutable_cell: + return w_cell + return unwrap_cell(self.space, w_cell) def _write_cell(self, w_cell, w_value): from pypy.objspace.std.intobject import W_IntObject @@ -300,6 +311,8 @@ w_cell.intvalue = w_value.intval return None if type(w_value) is W_IntObject: + if not self.can_contain_mutable_cell: + self.can_contain_mutable_cell = True return IntMutableCell(w_value.intval) return w_value diff --git a/pypy/objspace/std/test/test_mapdict.py b/pypy/objspace/std/test/test_mapdict.py --- a/pypy/objspace/std/test/test_mapdict.py +++ b/pypy/objspace/std/test/test_mapdict.py @@ -169,11 +169,28 @@ assert mutcell1.intvalue == 6 obj.setdictvalue(space, "a", W_IntObject(7)) - assert obj.getdictvalue(space, "a") == 7 # because of the FakeSpace :-( + assert obj.getdictvalue(space, "a") == 7 # FakeSpace again mutcell2 = obj._mapdict_read_storage(0) assert mutcell2.intvalue == 7 assert mutcell2 is mutcell1 + +def test_mutcell_unwrap_only_if_needed(): + from pypy.objspace.std.intobject import W_IntObject + cls = Class() + obj = cls.instantiate() + obj.setdictvalue(space, "a", "foo") + assert not obj._get_mapdict_map().can_contain_mutable_cell + obj.setdictvalue(space, "a", W_IntObject(6)) + obj.setdictvalue(space, "a", W_IntObject(6)) + assert obj._get_mapdict_map().can_contain_mutable_cell + + obj._get_mapdict_map().can_contain_mutable_cell = False + mutcell = IntMutableCell(1) + obj._mapdict_write_storage(0, mutcell) + assert obj.getdictvalue(space, "a") is mutcell # not unwrapped + + def test_delete(): for i, dattr in enumerate(["a", "b", "c"]): c = Class() From noreply at buildbot.pypy.org Fri Jan 23 10:42:20 2015 From: noreply at buildbot.pypy.org (cfbolz) Date: Fri, 23 Jan 2015 10:42:20 +0100 (CET) Subject: [pypy-commit] pypy typed-cells: make mutboxes on the first write Message-ID: <20150123094220.022811C0134@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: typed-cells Changeset: r75498:e458fdc1f2e4 Date: 2015-01-23 10:36 +0100 http://bitbucket.org/pypy/pypy/changeset/e458fdc1f2e4/ Log: make mutboxes on the first write diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -39,11 +39,9 @@ not attr.ever_mutated ): result = self._pure_mapdict_read_storage(obj, attr.storageindex) - assert not isinstance(result, MutableCell) else: - result = attr._read_cell( - obj._mapdict_read_storage(attr.storageindex)) - return result + result = obj._mapdict_read_storage(attr.storageindex) + return attr._read_cell(result) @jit.elidable def _pure_mapdict_read_storage(self, obj, storageindex): @@ -55,8 +53,6 @@ return self.terminator._write_terminator(obj, selector, w_value) if not attr.ever_mutated: attr.ever_mutated = True - # introduce cells only on the second write, to make immutability for - # int fields still work cell = obj._mapdict_read_storage(attr.storageindex) w_value = attr._write_cell(cell, w_value) if w_value is not None: @@ -161,6 +157,7 @@ def add_attr(self, obj, selector, w_value): # grumble, jit needs this attr = self._get_new_attr(selector[0], selector[1]) + w_value = attr._write_cell(None, w_value) oldattr = obj._get_mapdict_map() if not jit.we_are_jitted(): size_est = (oldattr._size_estimate + attr.size_estimate() diff --git a/pypy/objspace/std/test/test_mapdict.py b/pypy/objspace/std/test/test_mapdict.py --- a/pypy/objspace/std/test/test_mapdict.py +++ b/pypy/objspace/std/test/test_mapdict.py @@ -159,14 +159,16 @@ cls = Class() obj = cls.instantiate() obj.setdictvalue(space, "a", W_IntObject(5)) - assert obj.getdictvalue(space, "a").intval == 5 - w_val = obj._mapdict_read_storage(0) - assert w_val.intval == 5 # still a W_IntObject + # not wrapped because of the FakeSpace :-( + assert obj.getdictvalue(space, "a") == 5 + mutcell = obj._mapdict_read_storage(0) + assert mutcell.intvalue == 5 obj.setdictvalue(space, "a", W_IntObject(6)) - assert obj.getdictvalue(space, "a") == 6 # because of the FakeSpace :-( + assert obj.getdictvalue(space, "a") == 6 # FakeSpace again mutcell1 = obj._mapdict_read_storage(0) assert mutcell1.intvalue == 6 + assert mutcell is mutcell1 obj.setdictvalue(space, "a", W_IntObject(7)) assert obj.getdictvalue(space, "a") == 7 # FakeSpace again From noreply at buildbot.pypy.org Fri Jan 23 11:04:13 2015 From: noreply at buildbot.pypy.org (mjacob) Date: Fri, 23 Jan 2015 11:04:13 +0100 (CET) Subject: [pypy-commit] pypy llvm-translation-backend: Refactor the LLVMGCRoot stack walker for clarity. Message-ID: <20150123100413.01C451C0483@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: llvm-translation-backend Changeset: r75499:65d0d0e7e277 Date: 2015-01-23 11:03 +0100 http://bitbucket.org/pypy/pypy/changeset/65d0d0e7e277/ Log: Refactor the LLVMGCRoot stack walker for clarity. diff --git a/rpython/memory/gctransform/llvmgcroot.py b/rpython/memory/gctransform/llvmgcroot.py --- a/rpython/memory/gctransform/llvmgcroot.py +++ b/rpython/memory/gctransform/llvmgcroot.py @@ -129,27 +129,31 @@ table contains the stack frame's shape. The shape is a description of the offsets from the frame data base. """ + callee_bp = llop.stack_current(llmemory.Address) segment = llop.getarrayitem(LIST_NODE_PTR, self.top_segment, 0) - current_frame = llop.stack_current(llmemory.Address) while True: - retaddr = current_frame.address[1] - frame_data_base = current_frame + 2 * sizeofaddr - shape = hashtable_get(self.gcdata, retaddr) + self._walk_stack_segment(collect_stack_root, callee_bp) + if not segment: + break + callee_bp = segment.frame + segment = segment.next_ + + def _walk_stack_segment(self, collect_stack_root, callee_bp): + while True: + retaddr = callee_bp.address[1] + caller_sp = callee_bp + 2 * sizeofaddr + caller_shape = hashtable_get(self.gcdata, retaddr) i = 0 - while i < len(shape.liveoffsets): - if (frame_data_base + shape.liveoffsets[i]).address[0]: - collect_stack_root(self.gc, frame_data_base + - shape.liveoffsets[i]) + while i < len(caller_shape.liveoffsets): + root_loc = caller_sp + caller_shape.liveoffsets[i] + if root_loc.address[0]: + collect_stack_root(self.gc, root_loc) i += 1 - if shape.framesize & 1: - if segment: - current_frame = segment.frame - segment = segment.next_ - continue - break - current_frame += sizeofaddr + shape.framesize + if caller_shape.framesize & 1: + return + callee_bp += sizeofaddr + caller_shape.framesize def hash_ptr(adr): From noreply at buildbot.pypy.org Fri Jan 23 12:18:59 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 23 Jan 2015 12:18:59 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: Fix test Message-ID: <20150123111859.3969A1C059A@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r75500:1528d1320101 Date: 2015-01-23 12:18 +0100 http://bitbucket.org/pypy/pypy/changeset/1528d1320101/ Log: Fix test diff --git a/rpython/translator/stm/test/test_inevitable.py b/rpython/translator/stm/test/test_inevitable.py --- a/rpython/translator/stm/test/test_inevitable.py +++ b/rpython/translator/stm/test/test_inevitable.py @@ -13,11 +13,17 @@ if self.llinterpreter.inevitable_cause is None: self.llinterpreter.inevitable_cause = info + def op_gc_dump_rpy_heap(self): + pass # for test_unsupported_op + + def op_do_malloc_fixedsize(self): + pass def op_do_malloc_fixedsize_clear(self): - pass # just to check that it doesn't turn inevitable - + pass + def op_do_malloc_varsize(self): + pass def op_do_malloc_varsize_clear(self): - pass # just to check that it doesn't turn inevitable + pass class TestTransform: @@ -52,10 +58,10 @@ addr = llmemory.raw_malloc(llmemory.sizeof(X)) def f1(): - llmemory.raw_free(addr) + llop.gc_dump_rpy_heap(lltype.Void) res = self.interpret_inevitable(f1, []) - assert res == 'raw_free' + assert res == 'gc_dump_rpy_heap' def test_raw_getfield(self): X = lltype.Struct('X', ('foo', lltype.Signed)) @@ -120,7 +126,7 @@ lltype.free(p, flavor='raw') res = self.interpret_inevitable(f1, []) - assert res == 'free' + assert res is None def test_raw_malloc_2(self): X = lltype.Struct('X', ('foo', lltype.Signed)) @@ -130,7 +136,7 @@ llmemory.raw_free(addr) res = self.interpret_inevitable(f1, []) - assert res == 'raw_free' + assert res is None def test_unknown_raw_free(self): X = lltype.Struct('X', ('foo', lltype.Signed)) @@ -138,7 +144,7 @@ lltype.free(p, flavor='raw') res = self.interpret_inevitable(f2, [lltype.malloc(X, flavor='raw')]) - assert res == 'free' + assert res is None def test_ext_direct_call_safe(self): @@ -244,7 +250,7 @@ return i res = self.interpret_inevitable(f, [2]) - assert res == 'free' # not setfield or getfield + assert res is None # not setfield or getfield or free def test_do_malloc_llops(self): def f(i): From noreply at buildbot.pypy.org Fri Jan 23 12:31:38 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 23 Jan 2015 12:31:38 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: Fix tests Message-ID: <20150123113138.D76A61C059A@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r75501:12acb89160b9 Date: 2015-01-23 12:31 +0100 http://bitbucket.org/pypy/pypy/changeset/12acb89160b9/ Log: Fix tests diff --git a/pypy/module/__pypy__/test/test_atomic.py b/pypy/module/__pypy__/test/test_atomic.py --- a/pypy/module/__pypy__/test/test_atomic.py +++ b/pypy/module/__pypy__/test/test_atomic.py @@ -3,23 +3,6 @@ from rpython.rtyper.lltypesystem import rffi -def test_bdecode(space): - from pypy.module.__pypy__.interp_atomic import bdecode - def bdec(s, expected): - p = rffi.str2charp(s) - w_obj, q = bdecode(space, p) - assert q == rffi.ptradd(p, len(s)) - rffi.free_charp(p) - w_expected = space.wrap(expected) - assert space.eq_w(w_obj, w_expected) - - bdec("i123e", 123) - bdec("i-123e", -123) - bdec('12:+"*-%&/()=?\x00', '+"*-%&/()=?\x00') - bdec("li123eli456eee", [123, [456]]) - bdec("l5:abcdei2ee", ["abcde", 2]) - - class AppTestAtomic(GenericTestThread): def test_simple(self): diff --git a/pypy/module/__pypy__/test/test_signal.py b/pypy/module/__pypy__/test/test_signal.py --- a/pypy/module/__pypy__/test/test_signal.py +++ b/pypy/module/__pypy__/test/test_signal.py @@ -4,7 +4,7 @@ class AppTestMinimal: - spaceconfig = dict(usemodules=['__pypy__']) + spaceconfig = dict(usemodules=['__pypy__', 'thread']) def test_signal(self): from __pypy__ import thread From noreply at buildbot.pypy.org Fri Jan 23 13:31:24 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 23 Jan 2015 13:31:24 +0100 (CET) Subject: [pypy-commit] pypy default: fix fix fix: we must not repeatedly set the quasi-immutable flag to True, Message-ID: <20150123123124.06B951C06CA@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r75502:98d7d3cb4144 Date: 2015-01-23 13:31 +0100 http://bitbucket.org/pypy/pypy/changeset/98d7d3cb4144/ Log: fix fix fix: we must not repeatedly set the quasi-immutable flag to True, because every time it triggers the logic diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -292,7 +292,8 @@ def delete(self, obj, selector): if selector == self.selector: # ok, attribute is deleted - self.ever_mutated = True + if not self.ever_mutated: + self.ever_mutated = True return self.back.copy(obj) new_obj = self.back.delete(obj, selector) if new_obj is not None: From noreply at buildbot.pypy.org Fri Jan 23 13:33:26 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 23 Jan 2015 13:33:26 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: manual transplant of 98d7d3cb4144 Message-ID: <20150123123326.60CC51C06CA@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r75503:e4078ea31734 Date: 2015-01-23 13:32 +0100 http://bitbucket.org/pypy/pypy/changeset/e4078ea31734/ Log: manual transplant of 98d7d3cb4144 diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -300,7 +300,8 @@ def delete(self, obj, selector): if selector == self.selector: # ok, attribute is deleted - self.ever_mutated = True + if not self.ever_mutated: + self.ever_mutated = True return self.back.copy(obj) new_obj = self.back.delete(obj, selector) if new_obj is not None: From noreply at buildbot.pypy.org Fri Jan 23 15:54:42 2015 From: noreply at buildbot.pypy.org (cfbolz) Date: Fri, 23 Jan 2015 15:54:42 +0100 (CET) Subject: [pypy-commit] pypy typed-cells: merge default Message-ID: <20150123145442.48CD21C0134@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: typed-cells Changeset: r75504:fddc86065cd5 Date: 2015-01-23 15:32 +0100 http://bitbucket.org/pypy/pypy/changeset/fddc86065cd5/ Log: merge default diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -320,7 +320,8 @@ def delete(self, obj, selector): if selector == self.selector: # ok, attribute is deleted - self.ever_mutated = True + if not self.ever_mutated: + self.ever_mutated = True return self.back.copy(obj) new_obj = self.back.delete(obj, selector) if new_obj is not None: diff --git a/rpython/rlib/rposix.py b/rpython/rlib/rposix.py --- a/rpython/rlib/rposix.py +++ b/rpython/rlib/rposix.py @@ -97,14 +97,22 @@ _nowrapper=True, c_type='int') def get_saved_errno(): - """Return the saved value of the errno. This value is saved after a call - to an llexternal function with 'save_err & RFFI_ERRNO_AFTER != 0'.""" + """Return the value of the "saved errno". + This value is saved after a call to a C function, if it was declared + with the flag llexternal(..., save_err=rffi.RFFI_SAVE_ERRNO). + Functions without that flag don't change the saved errno. + """ from rpython.rlib import rthread return intmask(rthread.tlfield_rpy_errno.getraw()) def set_saved_errno(errno): - """Set the saved value of the errno. This value will be used by a - following llexternal function with 'save_err & RFFI_ERRNO_BEFORE != 0'.""" + """Set the value of the saved errno. This value will be used to + initialize the real errno just before calling the following C function, + provided it was declared llexternal(..., save_err=RFFI_READSAVED_ERRNO). + Note also that it is more common to want the real errno to be initially + zero; for that case, use llexternal(..., save_err=RFFI_ZERO_ERRNO_BEFORE) + and then you don't need set_saved_errno(0). + """ from rpython.rlib import rthread rthread.tlfield_rpy_errno.setraw(rffi.cast(INT, errno)) diff --git a/rpython/rlib/rwin32.py b/rpython/rlib/rwin32.py --- a/rpython/rlib/rwin32.py +++ b/rpython/rlib/rwin32.py @@ -124,10 +124,24 @@ _nowrapper=True, sandboxsafe=True) def GetLastError_saved(): + """Return the value of the "saved LastError". + The C-level GetLastError() is saved there after a call to a C + function, if that C function was declared with the flag + llexternal(..., save_err=rffi.RFFI_SAVE_LASTERROR). + Functions without that flag don't change the saved LastError. + Alternatively, if the function was declared RFFI_SAVE_WSALASTERROR, + then the value of the C-level WSAGetLastError() is saved instead + (into the same "saved LastError" variable). + """ from rpython.rlib import rthread return rffi.cast(lltype.Signed, rthread.tlfield_rpy_lasterror.getraw()) def SetLastError_saved(err): + """Set the value of the saved LastError. This value will be used in + a call to the C-level SetLastError() just before calling the + following C function, provided it was declared + llexternal(..., save_err=RFFI_READSAVED_LASTERROR). + """ from rpython.rlib import rthread rthread.tlfield_rpy_lasterror.setraw(rffi.cast(DWORD, err)) From noreply at buildbot.pypy.org Fri Jan 23 20:38:23 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 23 Jan 2015 20:38:23 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: hashtable.get(), hashtable.setdefault() Message-ID: <20150123193823.680381C06B1@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r75505:edeba38dd1ee Date: 2015-01-23 20:38 +0100 http://bitbucket.org/pypy/pypy/changeset/edeba38dd1ee/ Log: hashtable.get(), hashtable.setdefault() diff --git a/pypy/module/_stm/hashtable.py b/pypy/module/_stm/hashtable.py --- a/pypy/module/_stm/hashtable.py +++ b/pypy/module/_stm/hashtable.py @@ -4,7 +4,7 @@ from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.typedef import TypeDef -from pypy.interpreter.gateway import interp2app, unwrap_spec +from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault from rpython.rlib import rstm from rpython.rtyper.annlowlevel import cast_gcref_to_instance @@ -40,6 +40,22 @@ gcref = self.h.get(key) return space.newbool(not not gcref) + @unwrap_spec(key=int, w_default=WrappedDefault(None)) + def get_w(self, space, key, w_default): + gcref = self.h.get(key) + if not gcref: + return w_default + return cast_gcref_to_instance(W_Root, gcref) + + @unwrap_spec(key=int, w_default=WrappedDefault(None)) + def setdefault_w(self, space, key, w_default): + gcref = self.h.get(key) + if not gcref: + gcref = cast_instance_to_gcref(w_default) + self.h.set(key, gcref) + return w_default + return cast_gcref_to_instance(W_Root, gcref) + def W_Hashtable___new__(space, w_subtype): r = space.allocate_instance(W_Hashtable, w_subtype) @@ -53,4 +69,6 @@ __setitem__ = interp2app(W_Hashtable.setitem_w), __delitem__ = interp2app(W_Hashtable.delitem_w), __contains__ = interp2app(W_Hashtable.contains_w), + get = interp2app(W_Hashtable.get_w), + setdefault = interp2app(W_Hashtable.setdefault_w), ) diff --git a/pypy/module/_stm/test/test_hashtable.py b/pypy/module/_stm/test/test_hashtable.py --- a/pypy/module/_stm/test/test_hashtable.py +++ b/pypy/module/_stm/test/test_hashtable.py @@ -16,3 +16,20 @@ raises(KeyError, "h[42]") assert h[42+65536] == "bar" raises(KeyError, "del h[42]") + + def test_get_setdefault(self): + import _stm + h = _stm.hashtable() + assert h.get(42) is None + assert h.get(-43, None) is None + assert h.get(44, 81) == 81 + raises(KeyError, "h[42]") + raises(KeyError, "h[-43]") + raises(KeyError, "h[44]") + assert h.setdefault(42) is None + assert h[42] is None + assert h.setdefault(42, "81") is None + assert h[42] is None + assert h.setdefault(44, "-81") == "-81" + assert h[44] == "-81" + assert h[42] is None From noreply at buildbot.pypy.org Fri Jan 23 22:05:06 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 23 Jan 2015 22:05:06 +0100 (CET) Subject: [pypy-commit] stmgc bag: A branch to add "bags", i.e. unordered lists Message-ID: <20150123210506.9C1CE1C0134@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: bag Changeset: r1573:5df1bb87357d Date: 2015-01-23 22:03 +0100 http://bitbucket.org/pypy/stmgc/changeset/5df1bb87357d/ Log: A branch to add "bags", i.e. unordered lists From noreply at buildbot.pypy.org Fri Jan 23 22:05:07 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 23 Jan 2015 22:05:07 +0100 (CET) Subject: [pypy-commit] stmgc bag: getting started Message-ID: <20150123210507.A8F951C0134@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: bag Changeset: r1574:cfd89ce23688 Date: 2015-01-23 22:05 +0100 http://bitbucket.org/pypy/stmgc/changeset/cfd89ce23688/ Log: getting started diff --git a/c7/stm/bag.c b/c7/stm/bag.c new file mode 100644 --- /dev/null +++ b/c7/stm/bag.c @@ -0,0 +1,85 @@ +/* +Design of stmgc's "bag" objects +=============================== + +A "bag" is an unordered list of objects. You can only add objects and +pop a random object. + +Conflicts never occur, but popping may return "the bag looks empty", +which can be wrong in the serialized order. The caller should be +ready to handle this case. The guarantee is that if you get the +result "the bag looks empty" in all threads that may add objects to +it, and afterwards none of the threads adds any object, then at this +point the bag is really empty. + + +Implementation +-------------- + +In raw memory, for each segment, we have a list and a deque: + + abort list deque + +--------------+ +-----------------------+---------------+ + | already | | next items | added in this | + | popped items | | to pop | transaction | + +--------------+ +-----------------------+---------------+ + +Adding objects puts them at the right end of the deque. Popping them +takes them off the left end and stores a copy of the pointer into a +separate list. This list, the "abort list", is only used to re-add +the objects in case the transaction aborts. + +If, when we try to pop, we find that the deque is completely empty, +then we try to "steal" some items from another segment's deque. This +movement is done completely outside the normal STM rules: the objects +remain moved even after an abort. More precisely, we take some +objects from the left end of the other segment's deque (but not from +the "added in this transaction" part) and add them to our own deque. +Our own "added in this transaction" part remains empty, and the +objects are not copied in the other transaction's abort list. This +is done with careful compare-and-swaps. +*/ + + +struct stm_bag_seg_s { + struct deque_block_s *deque_left, *deque_middle, *deque_right; + deque_idx_t deque_left_pos, deque_middle_pos, deque_right_pos; + struct list_s *abort_list; +}; + +struct stm_bag_s { + struct stm_bag_seg_s by_segment[STM_NB_SEGMENTS]; +}; + +stm_bag_t *stm_bag_create(void) +{ + int i; + stm_bag_t *bag = malloc(sizeof(stm_bag_t)); + assert(bag); + for (i = 0; i < STM_NB_SEGMENTS; i++) { + struct stm_bag_seg_s *bs = &bag->by_segment[i]; + struct deque_block_s *block = deque_new_block(); + bs->deque_left = block; + bs->deque_middle = block; + bs->deque_right = block; + bs->deque_left_pos = 0; + bs->deque_middle_pos = 0; + bs->deque_right_pos = 0; + LIST_CREATE(bs->abort_list); + } + return bag; +} + +void stm_bag_free(stm_bag_t *bag) +{ + int i; + for (i = 0; i < STM_NB_SEGMENTS; i++) { + struct stm_bag_seg_s *bs = &bag->by_segment[i]; + while (bs->deque_left) { + struct deque_block_s *block = bs->deque_left; + bs->deque_left = block->next; + deque_free_block(block); + } + LIST_FREE(bs->abort_list); + } +} diff --git a/c7/stm/list.h b/c7/stm/list.h --- a/c7/stm/list.h +++ b/c7/stm/list.h @@ -218,3 +218,27 @@ TREE_FIND(*tree, addr, result, return false); return true; } + +/************************************************************/ + +#define DEQUE_BLOCK_SIZE 31 +typedef unsigned char deque_idx_t; + +struct deque_block_s { + struct deque_block_s *next; + uintptr_t items[DEQUE_BLOCK_SIZE]; +}; + +static inline struct deque_block_s *deque_new_block(void) +{ + struct deque_block_s *db = malloc(sizeof(struct deque_block_s)); + if (db == NULL) + stm_fatalerror("out of memory in deque_new_block"); /* XXX */ + db->next = NULL; + return db; +} + +static inline void deque_free_block(struct deque_block_s *db) +{ + free(db); +} diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -553,6 +553,13 @@ object_t *object; }; +/* Bags, i.e. unordered lists. */ +typedef struct stm_bag_s stm_bag_t; +stm_bag_t *stm_bag_create(void); +void stm_bag_free(stm_bag_t *); +void stm_bag_add(stm_bag_t *, object_t *); +object_t *stm_bag_try_pop(stm_bag_t *); + /* ==================== END ==================== */ #endif From noreply at buildbot.pypy.org Fri Jan 23 23:12:15 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 23 Jan 2015 23:12:15 +0100 (CET) Subject: [pypy-commit] stmgc default: Bug Message-ID: <20150123221215.A64C61C0134@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1575:983d271a2f52 Date: 2015-01-23 22:43 +0100 http://bitbucket.org/pypy/stmgc/changeset/983d271a2f52/ Log: Bug diff --git a/c7/stm/hashtable.c b/c7/stm/hashtable.c --- a/c7/stm/hashtable.c +++ b/c7/stm/hashtable.c @@ -165,8 +165,10 @@ continue; if (remove_unread) { if (entry->object == NULL && - !_stm_was_read_by_anybody((object_t *)entry)) + !_stm_was_read_by_anybody((object_t *)entry)) { + dprintf((" removing dead %p\n", entry)); continue; + } } _insert_clean(biggertable, entry); rc -= 6; @@ -349,6 +351,7 @@ can never grow larger than the current table size. */ assert(count <= table->mask + 1); + dprintf(("compact with %ld items:\n", num_entries_times_6 / 6)); _stm_rehash_hashtable(hashtable, count, /*remove_unread=*/true); } diff --git a/c7/test/test_hashtable.py b/c7/test/test_hashtable.py --- a/c7/test/test_hashtable.py +++ b/c7/test/test_hashtable.py @@ -207,6 +207,35 @@ assert htget(h, 1) == lp1 stm_major_collect() # to get rid of the hashtable object + def test_major_collect_bug2(self): + self.start_transaction() + lp1 = stm_allocate(24) + self.push_root(lp1) + self.commit_transaction() + lp1 = self.pop_root() + # + self.switch(1) + self.start_transaction() + stm_write(lp1) + # + self.switch(0) + self.start_transaction() + h = self.allocate_hashtable() + tl0 = self.tls[self.current_thread] + htset(h, 10, stm_allocate(32), tl0) + htset(h, 11, stm_allocate(32), tl0) + htset(h, 12, stm_allocate(32), tl0) + self.push_root(h) + # + self.switch(1) # in a different thread + stm_major_collect() + # + self.switch(0) # back to the original thread + h = self.pop_root() + assert htget(h, 10) != ffi.NULL + assert htget(h, 11) != ffi.NULL + assert htget(h, 12) != ffi.NULL + class TestRandomHashtable(BaseTestHashtable): @@ -329,7 +358,11 @@ stm_major_collect() # to get rid of the hashtable objects def test_random_multiple_threads(self): - import random + from random import randrange, Random + seed = randrange(0, 10000) + print "----------------------------------------- seed:", seed + random = Random(seed) + # self.start_transaction() self.exchange_threads() self.start_transaction() @@ -351,7 +384,7 @@ if r < 0.05: h = self.allocate_hashtable() - print "allocate_hashtable ->", h + print "allocate_hashtable -> %r/%r" % (h, get_hashtable(h)) self.mirror[h] = {} elif r < 0.10: print "stm_minor_collect" @@ -369,7 +402,7 @@ if not self.mirror[h]: continue key = random.choice(self.mirror[h].keys()) value = self.mirror[h][key] - print "htget(%r, %r) == %r" % (h, key, value) + print "htget(%r/%r, %r) == %r" % (h, get_hashtable(h), key, value) self.push_roots() self.push_root(value) result = htget(h, key) @@ -381,7 +414,7 @@ h = random.choice(self.mirror.keys()) key = random.randrange(0, 40) if key in self.mirror[h]: continue - print "htget(%r, %r) == NULL" % (h, key) + print "htget(%r/%r, %r) == NULL" % (h, get_hashtable(h), key) self.push_roots() assert htget(h, key) == ffi.NULL self.pop_roots() @@ -398,7 +431,7 @@ h = random.choice(self.mirror.keys()) key = random.randrange(0, 32) value = random.choice(self.values) - print "htset(%r, %r, %r)" % (h, key, value) + print "htset(%r/%r, %r, %r)" % (h, get_hashtable(h), key, value) self.push_roots() tl = self.tls[self.current_thread] htset(h, key, value, tl) From noreply at buildbot.pypy.org Fri Jan 23 23:12:16 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 23 Jan 2015 23:12:16 +0100 (CET) Subject: [pypy-commit] stmgc default: fix Message-ID: <20150123221216.A98CB1C0134@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1576:216c013a4b37 Date: 2015-01-23 23:11 +0100 http://bitbucket.org/pypy/stmgc/changeset/216c013a4b37/ Log: fix diff --git a/c7/stm/hashtable.c b/c7/stm/hashtable.c --- a/c7/stm/hashtable.c +++ b/c7/stm/hashtable.c @@ -139,10 +139,10 @@ static void _stm_rehash_hashtable(stm_hashtable_t *hashtable, uintptr_t biggercount, - bool remove_unread) + int remove_unread_from_seg) { - dprintf(("rehash %p to %ld, remove_unread=%d\n", - hashtable, biggercount, (int)remove_unread)); + dprintf(("rehash %p to %ld, remove_unread_from_seg=%d\n", + hashtable, biggercount, remove_unread_from_seg)); size_t size = (offsetof(stm_hashtable_table_t, items) + biggercount * sizeof(stm_hashtable_entry_t *)); @@ -159,12 +159,14 @@ uintptr_t j, mask = table->mask; uintptr_t rc = biggertable->resize_counter; + char *segment_base = get_segment_base(remove_unread_from_seg); for (j = 0; j <= mask; j++) { stm_hashtable_entry_t *entry = table->items[j]; if (entry == NULL) continue; - if (remove_unread) { - if (entry->object == NULL && + if (remove_unread_from_seg != 0) { + if (((struct stm_hashtable_entry_s *) + REAL_ADDRESS(segment_base, entry))->object == NULL && !_stm_was_read_by_anybody((object_t *)entry)) { dprintf((" removing dead %p\n", entry)); continue; @@ -257,6 +259,7 @@ entry->userdata = stm_hashtable_entry_userdata; entry->index = index; entry->object = NULL; + hashtable->additions = STM_SEGMENT->segment_num; } else { /* for a non-nursery 'hashtableobj', we pretend that the @@ -294,11 +297,11 @@ e->index = index; e->object = NULL; } + hashtable->additions += 0x100; release_privatization_lock(); } write_fence(); /* make sure 'entry' is fully initialized here */ table->items[i] = entry; - hashtable->additions += 1; write_fence(); /* make sure 'table->items' is written here */ VOLATILE_TABLE(table)->resize_counter = rc - 6; /* unlock */ return entry; @@ -311,7 +314,7 @@ biggercount *= 4; else biggercount *= 2; - _stm_rehash_hashtable(hashtable, biggercount, /*remove_unread=*/false); + _stm_rehash_hashtable(hashtable, biggercount, /*remove_unread=*/0); goto restart; } } @@ -340,8 +343,10 @@ stm_hashtable_table_t *table = hashtable->table; assert(!IS_EVEN(table->resize_counter)); - if (hashtable->additions * 4 > table->mask) { - hashtable->additions = 0; + if ((hashtable->additions >> 8) * 4 > table->mask) { + int segment_num = (hashtable->additions & 0xFF); + if (!segment_num) segment_num = 1; + hashtable->additions = segment_num; uintptr_t initial_rc = (table->mask + 1) * 4 + 1; uintptr_t num_entries_times_6 = initial_rc - table->resize_counter; uintptr_t count = INITIAL_HASHTABLE_SIZE; @@ -352,7 +357,7 @@ assert(count <= table->mask + 1); dprintf(("compact with %ld items:\n", num_entries_times_6 / 6)); - _stm_rehash_hashtable(hashtable, count, /*remove_unread=*/true); + _stm_rehash_hashtable(hashtable, count, /*remove_unread=*/segment_num); } table = hashtable->table; diff --git a/c7/test/test_hashtable.py b/c7/test/test_hashtable.py --- a/c7/test/test_hashtable.py +++ b/c7/test/test_hashtable.py @@ -216,7 +216,7 @@ # self.switch(1) self.start_transaction() - stm_write(lp1) + stm_write(lp1) # force this page to be shared # self.switch(0) self.start_transaction() @@ -228,7 +228,7 @@ self.push_root(h) # self.switch(1) # in a different thread - stm_major_collect() + stm_major_collect() # force a _stm_rehash_hashtable() # self.switch(0) # back to the original thread h = self.pop_root() From noreply at buildbot.pypy.org Fri Jan 23 23:12:17 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 23 Jan 2015 23:12:17 +0100 (CET) Subject: [pypy-commit] stmgc bag: Bug Message-ID: <20150123221217.B588C1C0134@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: bag Changeset: r1577:e5a6f049c5f8 Date: 2015-01-23 22:43 +0100 http://bitbucket.org/pypy/stmgc/changeset/e5a6f049c5f8/ Log: Bug diff --git a/c7/stm/hashtable.c b/c7/stm/hashtable.c --- a/c7/stm/hashtable.c +++ b/c7/stm/hashtable.c @@ -165,8 +165,10 @@ continue; if (remove_unread) { if (entry->object == NULL && - !_stm_was_read_by_anybody((object_t *)entry)) + !_stm_was_read_by_anybody((object_t *)entry)) { + dprintf((" removing dead %p\n", entry)); continue; + } } _insert_clean(biggertable, entry); rc -= 6; @@ -349,6 +351,7 @@ can never grow larger than the current table size. */ assert(count <= table->mask + 1); + dprintf(("compact with %ld items:\n", num_entries_times_6 / 6)); _stm_rehash_hashtable(hashtable, count, /*remove_unread=*/true); } diff --git a/c7/test/test_hashtable.py b/c7/test/test_hashtable.py --- a/c7/test/test_hashtable.py +++ b/c7/test/test_hashtable.py @@ -207,6 +207,35 @@ assert htget(h, 1) == lp1 stm_major_collect() # to get rid of the hashtable object + def test_major_collect_bug2(self): + self.start_transaction() + lp1 = stm_allocate(24) + self.push_root(lp1) + self.commit_transaction() + lp1 = self.pop_root() + # + self.switch(1) + self.start_transaction() + stm_write(lp1) + # + self.switch(0) + self.start_transaction() + h = self.allocate_hashtable() + tl0 = self.tls[self.current_thread] + htset(h, 10, stm_allocate(32), tl0) + htset(h, 11, stm_allocate(32), tl0) + htset(h, 12, stm_allocate(32), tl0) + self.push_root(h) + # + self.switch(1) # in a different thread + stm_major_collect() + # + self.switch(0) # back to the original thread + h = self.pop_root() + assert htget(h, 10) != ffi.NULL + assert htget(h, 11) != ffi.NULL + assert htget(h, 12) != ffi.NULL + class TestRandomHashtable(BaseTestHashtable): @@ -329,7 +358,11 @@ stm_major_collect() # to get rid of the hashtable objects def test_random_multiple_threads(self): - import random + from random import randrange, Random + seed = randrange(0, 10000) + print "----------------------------------------- seed:", seed + random = Random(seed) + # self.start_transaction() self.exchange_threads() self.start_transaction() @@ -351,7 +384,7 @@ if r < 0.05: h = self.allocate_hashtable() - print "allocate_hashtable ->", h + print "allocate_hashtable -> %r/%r" % (h, get_hashtable(h)) self.mirror[h] = {} elif r < 0.10: print "stm_minor_collect" @@ -369,7 +402,7 @@ if not self.mirror[h]: continue key = random.choice(self.mirror[h].keys()) value = self.mirror[h][key] - print "htget(%r, %r) == %r" % (h, key, value) + print "htget(%r/%r, %r) == %r" % (h, get_hashtable(h), key, value) self.push_roots() self.push_root(value) result = htget(h, key) @@ -381,7 +414,7 @@ h = random.choice(self.mirror.keys()) key = random.randrange(0, 40) if key in self.mirror[h]: continue - print "htget(%r, %r) == NULL" % (h, key) + print "htget(%r/%r, %r) == NULL" % (h, get_hashtable(h), key) self.push_roots() assert htget(h, key) == ffi.NULL self.pop_roots() @@ -398,7 +431,7 @@ h = random.choice(self.mirror.keys()) key = random.randrange(0, 32) value = random.choice(self.values) - print "htset(%r, %r, %r)" % (h, key, value) + print "htset(%r/%r, %r, %r)" % (h, get_hashtable(h), key, value) self.push_roots() tl = self.tls[self.current_thread] htset(h, key, value, tl) From noreply at buildbot.pypy.org Fri Jan 23 23:12:18 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 23 Jan 2015 23:12:18 +0100 (CET) Subject: [pypy-commit] stmgc bag: fix Message-ID: <20150123221218.C25671C0134@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: bag Changeset: r1578:82bbe5ad5e6a Date: 2015-01-23 23:11 +0100 http://bitbucket.org/pypy/stmgc/changeset/82bbe5ad5e6a/ Log: fix diff --git a/c7/stm/hashtable.c b/c7/stm/hashtable.c --- a/c7/stm/hashtable.c +++ b/c7/stm/hashtable.c @@ -139,10 +139,10 @@ static void _stm_rehash_hashtable(stm_hashtable_t *hashtable, uintptr_t biggercount, - bool remove_unread) + int remove_unread_from_seg) { - dprintf(("rehash %p to %ld, remove_unread=%d\n", - hashtable, biggercount, (int)remove_unread)); + dprintf(("rehash %p to %ld, remove_unread_from_seg=%d\n", + hashtable, biggercount, remove_unread_from_seg)); size_t size = (offsetof(stm_hashtable_table_t, items) + biggercount * sizeof(stm_hashtable_entry_t *)); @@ -159,12 +159,14 @@ uintptr_t j, mask = table->mask; uintptr_t rc = biggertable->resize_counter; + char *segment_base = get_segment_base(remove_unread_from_seg); for (j = 0; j <= mask; j++) { stm_hashtable_entry_t *entry = table->items[j]; if (entry == NULL) continue; - if (remove_unread) { - if (entry->object == NULL && + if (remove_unread_from_seg != 0) { + if (((struct stm_hashtable_entry_s *) + REAL_ADDRESS(segment_base, entry))->object == NULL && !_stm_was_read_by_anybody((object_t *)entry)) { dprintf((" removing dead %p\n", entry)); continue; @@ -257,6 +259,7 @@ entry->userdata = stm_hashtable_entry_userdata; entry->index = index; entry->object = NULL; + hashtable->additions = STM_SEGMENT->segment_num; } else { /* for a non-nursery 'hashtableobj', we pretend that the @@ -294,11 +297,11 @@ e->index = index; e->object = NULL; } + hashtable->additions += 0x100; release_privatization_lock(); } write_fence(); /* make sure 'entry' is fully initialized here */ table->items[i] = entry; - hashtable->additions += 1; write_fence(); /* make sure 'table->items' is written here */ VOLATILE_TABLE(table)->resize_counter = rc - 6; /* unlock */ return entry; @@ -311,7 +314,7 @@ biggercount *= 4; else biggercount *= 2; - _stm_rehash_hashtable(hashtable, biggercount, /*remove_unread=*/false); + _stm_rehash_hashtable(hashtable, biggercount, /*remove_unread=*/0); goto restart; } } @@ -340,8 +343,10 @@ stm_hashtable_table_t *table = hashtable->table; assert(!IS_EVEN(table->resize_counter)); - if (hashtable->additions * 4 > table->mask) { - hashtable->additions = 0; + if ((hashtable->additions >> 8) * 4 > table->mask) { + int segment_num = (hashtable->additions & 0xFF); + if (!segment_num) segment_num = 1; + hashtable->additions = segment_num; uintptr_t initial_rc = (table->mask + 1) * 4 + 1; uintptr_t num_entries_times_6 = initial_rc - table->resize_counter; uintptr_t count = INITIAL_HASHTABLE_SIZE; @@ -352,7 +357,7 @@ assert(count <= table->mask + 1); dprintf(("compact with %ld items:\n", num_entries_times_6 / 6)); - _stm_rehash_hashtable(hashtable, count, /*remove_unread=*/true); + _stm_rehash_hashtable(hashtable, count, /*remove_unread=*/segment_num); } table = hashtable->table; diff --git a/c7/test/test_hashtable.py b/c7/test/test_hashtable.py --- a/c7/test/test_hashtable.py +++ b/c7/test/test_hashtable.py @@ -216,7 +216,7 @@ # self.switch(1) self.start_transaction() - stm_write(lp1) + stm_write(lp1) # force this page to be shared # self.switch(0) self.start_transaction() @@ -228,7 +228,7 @@ self.push_root(h) # self.switch(1) # in a different thread - stm_major_collect() + stm_major_collect() # force a _stm_rehash_hashtable() # self.switch(0) # back to the original thread h = self.pop_root() From noreply at buildbot.pypy.org Fri Jan 23 23:41:12 2015 From: noreply at buildbot.pypy.org (amauryfa) Date: Fri, 23 Jan 2015 23:41:12 +0100 (CET) Subject: [pypy-commit] pypy stdlib-2.7.9: SSL module: add "cadata" parameter to load_verify_locations(). Message-ID: <20150123224112.C34861C00BE@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: stdlib-2.7.9 Changeset: r75506:8e49368b697a Date: 2015-01-21 19:04 +0100 http://bitbucket.org/pypy/pypy/changeset/8e49368b697a/ Log: SSL module: add "cadata" parameter to load_verify_locations(). diff --git a/pypy/module/_ssl/interp_ssl.py b/pypy/module/_ssl/interp_ssl.py --- a/pypy/module/_ssl/interp_ssl.py +++ b/pypy/module/_ssl/interp_ssl.py @@ -962,7 +962,8 @@ if ret != 1: raise _ssl_seterror(space, None, -1) - def load_verify_locations_w(self, space, w_cafile=None, w_capath=None): + def load_verify_locations_w(self, space, w_cafile=None, w_capath=None, + w_cadata=None): if space.is_none(w_cafile): cafile = None else: @@ -971,21 +972,112 @@ capath = None else: capath = space.str_w(w_capath) - if cafile is None and capath is None: + if space.is_none(w_cadata): + cadata = None + ca_file_type = -1 + else: + if not space.isinstance_w(w_cadata, space.w_unicode): + ca_file_type = SSL_FILETYPE_ASN1 + cadata = space.bufferstr_w(w_cadata) + else: + ca_file_type = SSL_FILETYPE_PEM + try: + cadata = space.unicode_w(w_cadata).encode('ascii') + except UnicodeEncodeError: + raise oefmt(space.w_TypeError, + "cadata should be a ASCII string or a " + "bytes-like object") + if cafile is None and capath is None and cadata is None: raise OperationError(space.w_TypeError, space.wrap( "cafile and capath cannot be both omitted")) - set_errno(0) - ret = libssl_SSL_CTX_load_verify_locations( - self.ctx, cafile, capath) - if ret != 1: - errno = get_errno() - if errno: - libssl_ERR_clear_error() - raise wrap_oserror(space, OSError(errno, ''), - exception_name = 'w_IOError') + # load from cadata + if cadata: + biobuf = libssl_BIO_new_mem_buf(cadata, len(cadata)) + if not biobuf: + raise ssl_error(space, "Can't allocate buffer") + try: + store = libssl_SSL_CTX_get_cert_store(self.ctx) + loaded = 0 + while True: + if ca_file_type == SSL_FILETYPE_ASN1: + cert = libssl_d2i_X509_bio( + biobuf, None) + else: + cert = libssl_PEM_read_bio_X509( + biobuf, None, None, None) + if not cert: + break + try: + r = libssl_X509_STORE_add_cert(store, cert) + finally: + libssl_X509_free(cert) + if not r: + err = libssl_ERR_peek_last_error() + if (libssl_ERR_GET_LIB(err) == ERR_LIB_X509 and + libssl_ERR_GET_REASON(err) == + X509_R_CERT_ALREADY_IN_HASH_TABLE): + # cert already in hash table, not an error + libssl_ERR_clear_error() + else: + break + loaded += 1 + + err = libssl_ERR_peek_last_error() + if (ca_file_type == SSL_FILETYPE_ASN1 and + loaded > 0 and + libssl_ERR_GET_LIB(err) == ERR_LIB_ASN1 and + libssl_ERR_GET_REASON(err) == ASN1_R_HEADER_TOO_LONG): + # EOF ASN1 file, not an error + libssl_ERR_clear_error() + elif (ca_file_type == SSL_FILETYPE_PEM and + loaded > 0 and + libssl_ERR_GET_LIB(err) == ERR_LIB_PEM and + libssl_ERR_GET_REASON(err) == PEM_R_NO_START_LINE): + # EOF PEM file, not an error + libssl_ERR_clear_error + else: + _ssl_seterror(space, None, 0) + finally: + libssl_BIO_free(biobuf) + + # load cafile or capath + if cafile or capath: + set_errno(0) + ret = libssl_SSL_CTX_load_verify_locations( + self.ctx, cafile, capath) + if ret != 1: + errno = get_errno() + if errno: + libssl_ERR_clear_error() + raise wrap_oserror(space, OSError(errno, ''), + exception_name = 'w_IOError') + else: + raise _ssl_seterror(space, None, -1) + + def cert_store_stats_w(self, space): + store = libssl_SSL_CTX_get_cert_store(self.ctx) + counters = {'x509': 0, 'x509_ca': 0, 'crl': 0} + for i in range(libssl_sk_X509_OBJECT_num(store[0].c_objs)): + obj = libssl_sk_X509_OBJECT_value(store[0].c_objs, i) + if intmask(obj.c_type) == X509_LU_X509: + counters['x509'] += 1 + if libssl_pypy_X509_OBJECT_data_x509(obj): + counters['x509_ca'] += 1 + elif intmask(obj.c_type) == X509_LU_CRL: + counters['crl'] += 1 else: - raise _ssl_seterror(space, None, -1) - + # Ignore X509_LU_FAIL, X509_LU_RETRY, X509_LU_PKEY. + # As far as I can tell they are internal states and never + # stored in a cert store + pass + w_result = space.newdict() + space.setitem(w_result, + space.wrap('x509'), space.wrap(counters['x509'])) + space.setitem(w_result, + space.wrap('x509_ca'), space.wrap(counters['x509_ca'])) + space.setitem(w_result, + space.wrap('crl'), space.wrap(counters['crl'])) + return w_result _SSLContext.typedef = TypeDef( "_ssl._SSLContext", @@ -993,6 +1085,7 @@ _wrap_socket=interp2app(_SSLContext.descr_wrap_socket), set_ciphers=interp2app(_SSLContext.descr_set_ciphers), load_verify_locations=interp2app(_SSLContext.load_verify_locations_w), + cert_store_stats=interp2app(_SSLContext.cert_store_stats_w), load_cert_chain=interp2app(_SSLContext.load_cert_chain_w), set_default_verify_paths=interp2app(_SSLContext.descr_set_default_verify_paths), diff --git a/pypy/module/_ssl/test/test_ssl.py b/pypy/module/_ssl/test/test_ssl.py --- a/pypy/module/_ssl/test/test_ssl.py +++ b/pypy/module/_ssl/test/test_ssl.py @@ -276,6 +276,11 @@ ctx.load_verify_locations(self.keycert) ctx.load_verify_locations(cafile=self.keycert, capath=None) + ctx = _ssl._SSLContext(_ssl.PROTOCOL_TLSv1) + with open(self.keycert) as f: + cacert_pem = f.read().decode('ascii') + ctx.load_verify_locations(cadata=cacert_pem) + assert ctx.cert_store_stats()["x509_ca"] SSL_CERTIFICATE = """ -----BEGIN CERTIFICATE----- diff --git a/rpython/rlib/ropenssl.py b/rpython/rlib/ropenssl.py --- a/rpython/rlib/ropenssl.py +++ b/rpython/rlib/ropenssl.py @@ -38,6 +38,7 @@ # Unnamed structures are not supported by rffi_platform. # So we replace an attribute access with a macro call. '#define pypy_GENERAL_NAME_dirn(name) (name->d.dirn)', + '#define pypy_X509_OBJECT_data_x509(obj) (obj->data.x509)', ], ) @@ -47,9 +48,11 @@ include_dir='inc32', library_dir='out32'), ]) +X509 = rffi.COpaquePtr('X509') ASN1_STRING = lltype.Ptr(lltype.ForwardReference()) ASN1_ITEM = rffi.COpaquePtr('ASN1_ITEM') X509_NAME = rffi.COpaquePtr('X509_NAME') +stack_st_X509_OBJECT = rffi.COpaquePtr('struct stack_st_X509_OBJECT') class CConfigBootstrap: _compilation_info_ = eci @@ -72,6 +75,7 @@ OPENSSL_NO_ECDH = rffi_platform.Defined("OPENSSL_NO_ECDH") OPENSSL_NPN_NEGOTIATED = rffi_platform.Defined("OPENSSL_NPN_NEGOTIATED") SSL_FILETYPE_PEM = rffi_platform.ConstantInteger("SSL_FILETYPE_PEM") + SSL_FILETYPE_ASN1 = rffi_platform.ConstantInteger("SSL_FILETYPE_ASN1") SSL_OP_ALL = rffi_platform.ConstantInteger("SSL_OP_ALL") SSL_OP_NO_SSLv2 = rffi_platform.ConstantInteger("SSL_OP_NO_SSLv2") SSL_OP_NO_SSLv3 = rffi_platform.ConstantInteger("SSL_OP_NO_SSLv3") @@ -102,6 +106,15 @@ SSL_MODE_AUTO_RETRY = rffi_platform.ConstantInteger("SSL_MODE_AUTO_RETRY") SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER = rffi_platform.ConstantInteger("SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER") + ERR_LIB_X509 = rffi_platform.ConstantInteger("ERR_LIB_X509") + ERR_LIB_PEM = rffi_platform.ConstantInteger("ERR_LIB_PEM") + ERR_LIB_ASN1 = rffi_platform.ConstantInteger("ERR_LIB_ASN1") + PEM_R_NO_START_LINE = rffi_platform.ConstantInteger("PEM_R_NO_START_LINE") + ASN1_R_HEADER_TOO_LONG = rffi_platform.ConstantInteger( + "ASN1_R_HEADER_TOO_LONG") + X509_R_CERT_ALREADY_IN_HASH_TABLE = rffi_platform.ConstantInteger( + "X509_R_CERT_ALREADY_IN_HASH_TABLE") + NID_undef = rffi_platform.ConstantInteger("NID_undef") NID_subject_alt_name = rffi_platform.ConstantInteger("NID_subject_alt_name") GEN_DIRNAME = rffi_platform.ConstantInteger("GEN_DIRNAME") @@ -128,6 +141,17 @@ X509_extension_st = rffi_platform.Struct( 'struct X509_extension_st', [('value', ASN1_STRING)]) + x509_store_st = rffi_platform.Struct( + 'struct x509_store_st', + [('objs', stack_st_X509_OBJECT)]) + + x509_object_st = rffi_platform.Struct( + 'struct x509_object_st', + [('type', rffi.INT)]) + + X509_LU_X509 = rffi_platform.ConstantInteger("X509_LU_X509") + X509_LU_CRL = rffi_platform.ConstantInteger("X509_LU_CRL") + X509V3_EXT_D2I = lltype.FuncType([rffi.VOIDP, rffi.CCHARPP, rffi.LONG], rffi.VOIDP) v3_ext_method = rffi_platform.Struct( @@ -150,12 +174,6 @@ ('name', rffi.CCHARP), ]) - OBJ_NAME_st = rffi_platform.Struct( - 'OBJ_NAME', - [('alias', rffi.INT), - ('name', rffi.CCHARP), - ]) - for k, v in rffi_platform.configure(CConfig).items(): globals()[k] = v @@ -166,9 +184,10 @@ SSL_CIPHER = rffi.COpaquePtr('SSL_CIPHER') SSL = rffi.COpaquePtr('SSL') BIO = rffi.COpaquePtr('BIO') -X509 = rffi.COpaquePtr('X509') X509_NAME_ENTRY = rffi.CArrayPtr(X509_name_entry_st) X509_EXTENSION = rffi.CArrayPtr(X509_extension_st) +X509_STORE = rffi.CArrayPtr(x509_store_st) +X509_OBJECT = lltype.Ptr(x509_object_st) X509V3_EXT_METHOD = rffi.CArrayPtr(v3_ext_method) ASN1_OBJECT = rffi.COpaquePtr('ASN1_OBJECT') ASN1_STRING.TO.become(asn1_string_st) @@ -217,6 +236,7 @@ ssl_external('SSLv23_method', [], SSL_METHOD) ssl_external('SSL_CTX_use_PrivateKey_file', [SSL_CTX, rffi.CCHARP, rffi.INT], rffi.INT) ssl_external('SSL_CTX_use_certificate_chain_file', [SSL_CTX, rffi.CCHARP], rffi.INT) +ssl_external('SSL_CTX_get_cert_store', [SSL_CTX], X509_STORE) ssl_external('SSL_CTX_get_options', [SSL_CTX], rffi.LONG, macro=True) ssl_external('SSL_CTX_set_options', [SSL_CTX, rffi.LONG], rffi.LONG, macro=True) if HAVE_SSL_CTX_CLEAR_OPTIONS: @@ -264,6 +284,7 @@ ssl_external('X509_NAME_ENTRY_get_data', [X509_NAME_ENTRY], ASN1_STRING) ssl_external('i2d_X509', [X509, rffi.CCHARPP], rffi.INT) ssl_external('X509_free', [X509], lltype.Void, releasegil=False) +ssl_external('X509_check_ca', [X509], rffi.INT) ssl_external('X509_get_notBefore', [X509], ASN1_TIME, macro=True) ssl_external('X509_get_notAfter', [X509], ASN1_TIME, macro=True) ssl_external('X509_get_serialNumber', [X509], ASN1_INTEGER) @@ -272,6 +293,7 @@ ssl_external('X509_get_ext', [X509, rffi.INT], X509_EXTENSION) ssl_external('X509V3_EXT_get', [X509_EXTENSION], X509V3_EXT_METHOD) +ssl_external('X509_STORE_add_cert', [X509_STORE, X509], rffi.INT) ssl_external('OBJ_obj2txt', [rffi.CCHARP, rffi.INT, ASN1_OBJECT, rffi.INT], rffi.INT) @@ -293,6 +315,13 @@ macro=True) ssl_external('sk_GENERAL_NAME_value', [GENERAL_NAMES, rffi.INT], GENERAL_NAME, macro=True) +ssl_external('sk_X509_OBJECT_num', [stack_st_X509_OBJECT], rffi.INT, + macro=True) +ssl_external('sk_X509_OBJECT_value', [stack_st_X509_OBJECT, rffi.INT], + X509_OBJECT, macro=True) +ssl_external('pypy_X509_OBJECT_data_x509', [X509_OBJECT], X509, + macro=True) + ssl_external('GENERAL_NAME_print', [BIO, GENERAL_NAME], rffi.INT) ssl_external('pypy_GENERAL_NAME_dirn', [GENERAL_NAME], X509_NAME, macro=True) @@ -306,6 +335,8 @@ ssl_external('ERR_peek_last_error', [], rffi.INT) ssl_external('ERR_error_string', [rffi.ULONG, rffi.CCHARP], rffi.CCHARP) ssl_external('ERR_clear_error', [], lltype.Void) +ssl_external('ERR_GET_LIB', [rffi.ULONG], rffi.INT, macro=True) +ssl_external('ERR_GET_REASON', [rffi.ULONG], rffi.INT, macro=True) # 'releasegil=False' here indicates that this function will be called # with the GIL held, and so is allowed to run in a RPython __del__ method. @@ -323,10 +354,14 @@ ssl_external('BIO_s_file', [], BIO_METHOD) ssl_external('BIO_new', [BIO_METHOD], BIO) ssl_external('BIO_set_nbio', [BIO, rffi.INT], rffi.INT, macro=True) +ssl_external('BIO_new_mem_buf', [rffi.VOIDP, rffi.INT], BIO) ssl_external('BIO_free', [BIO], rffi.INT) ssl_external('BIO_reset', [BIO], rffi.INT, macro=True) ssl_external('BIO_read_filename', [BIO, rffi.CCHARP], rffi.INT, macro=True) ssl_external('BIO_gets', [BIO, rffi.CCHARP, rffi.INT], rffi.INT) +ssl_external('d2i_X509_bio', [BIO, rffi.VOIDP], X509) +ssl_external('PEM_read_bio_X509', + [BIO, rffi.VOIDP, rffi.VOIDP, rffi.VOIDP], X509) ssl_external('PEM_read_bio_X509_AUX', [BIO, rffi.VOIDP, rffi.VOIDP, rffi.VOIDP], X509) diff --git a/rpython/rtyper/tool/rffi_platform.py b/rpython/rtyper/tool/rffi_platform.py --- a/rpython/rtyper/tool/rffi_platform.py +++ b/rpython/rtyper/tool/rffi_platform.py @@ -493,7 +493,7 @@ def prepare_code(self): yield '#ifdef %s' % self.macro yield 'int i;' - yield 'char *p = %s;' % self.name + yield 'const char *p = %s;' % self.name yield 'dump("defined", 1);' yield 'for (i = 0; p[i] != 0; i++ ) {' yield ' printf("value_%d: %d\\n", i, (int)(unsigned char)p[i]);' From noreply at buildbot.pypy.org Fri Jan 23 23:41:14 2015 From: noreply at buildbot.pypy.org (amauryfa) Date: Fri, 23 Jan 2015 23:41:14 +0100 (CET) Subject: [pypy-commit] pypy stdlib-2.7.9: SSL: Add support for npn_protocols Message-ID: <20150123224114.1485A1C00BE@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: stdlib-2.7.9 Changeset: r75507:75d21c757ba1 Date: 2014-12-16 18:40 +0100 http://bitbucket.org/pypy/pypy/changeset/75d21c757ba1/ Log: SSL: Add support for npn_protocols diff --git a/pypy/module/_ssl/interp_ssl.py b/pypy/module/_ssl/interp_ssl.py --- a/pypy/module/_ssl/interp_ssl.py +++ b/pypy/module/_ssl/interp_ssl.py @@ -1,7 +1,8 @@ from rpython.rlib import rpoll, rsocket -from rpython.rlib.rarithmetic import intmask +from rpython.rlib.rarithmetic import intmask, widen, r_uint from rpython.rlib.ropenssl import * from rpython.rlib.rposix import get_errno, set_errno +from rpython.rlib.rweakref import RWeakValueDictionary from rpython.rtyper.lltypesystem import lltype, rffi from pypy.interpreter.baseobjspace import W_Root @@ -72,6 +73,10 @@ constants["OP_NO_SSLv2"] = SSL_OP_NO_SSLv2 constants["OP_NO_SSLv3"] = SSL_OP_NO_SSLv3 constants["OP_NO_TLSv1"] = SSL_OP_NO_TLSv1 +constants["HAS_SNI"] = HAS_SNI +constants["HAS_ECDH"] = True # To break the test suite +constants["HAS_NPN"] = HAS_NPN +constants["HAS_TLS_UNIQUE"] = True # To break the test suite constants["OPENSSL_VERSION_NUMBER"] = OPENSSL_VERSION_NUMBER ver = OPENSSL_VERSION_NUMBER @@ -95,6 +100,54 @@ space.wrap(errno), space.wrap(msg)) return OperationError(w_exception_class, w_exception) +class SSLNpnProtocols(object): + + def __init__(self, ctx, protos): + self.protos = protos + self.buf, self.pinned, self.is_raw = rffi.get_nonmovingbuffer(protos) + NPN_STORAGE.set(r_uint(rffi.cast(rffi.UINT, self.buf)), self) + + # set both server and client callbacks, because the context + # can be used to create both types of sockets + libssl_SSL_CTX_set_next_protos_advertised_cb( + ctx, self.advertiseNPN_cb, self.buf) + libssl_SSL_CTX_set_next_proto_select_cb( + ctx, self.selectNPN_cb, self.buf) + + def __del__(self): + rffi.free_nonmovingbuffer( + self.protos, self.buf, self.pinned, self.is_raw) + + @staticmethod + def advertiseNPN_cb(s, data_ptr, len_ptr, args): + npn = NPN_STORAGE.get(r_uint(rffi.cast(rffi.UINT, args))) + if npn and npn.protos: + data_ptr[0] = npn.buf + len_ptr[0] = rffi.cast(rffi.UINT, len(npn.protos)) + else: + data_ptr[0] = lltype.nullptr(rffi.CCHARP.TO) + len_ptr[0] = rffi.cast(rffi.UINT, 0) + + return rffi.cast(rffi.INT, SSL_TLSEXT_ERR_OK) + + @staticmethod + def selectNPN_cb(s, out_ptr, outlen_ptr, server, server_len, args): + npn = NPN_STORAGE.get(r_uint(rffi.cast(rffi.UINT, args))) + if npn and npn.protos: + client = npn.buf + client_len = len(npn.protos) + else: + client = lltype.nullptr(rffi.CCHARP.TO) + client_len = 0 + + libssl_SSL_select_next_proto(out_ptr, outlen_ptr, + server, server_len, + client, client_len) + return rffi.cast(rffi.INT, SSL_TLSEXT_ERR_OK) + +NPN_STORAGE = RWeakValueDictionary(r_uint, SSLNpnProtocols) + + if HAVE_OPENSSL_RAND: # helper routines for seeding the SSL PRNG @unwrap_spec(string=str, entropy=float) @@ -452,6 +505,15 @@ else: return _decode_certificate(space, self.peer_cert) + def selected_npn_protocol(self, space): + with lltype.scoped_alloc(rffi.CCHARPP.TO, 1) as out_ptr: + with lltype.scoped_alloc(rffi.UINTP.TO, 1) as len_ptr: + libssl_SSL_get0_next_proto_negotiated(self.ssl, + out_ptr, len_ptr) + if out_ptr[0]: + return space.wrap( + rffi.charpsize2str(out_ptr[0], widen(len_ptr[0]))) + _SSLSocket.typedef = TypeDef( "_ssl._SSLSocket", @@ -462,6 +524,7 @@ peer_certificate=interp2app(_SSLSocket.peer_certificate), cipher=interp2app(_SSLSocket.cipher), shutdown=interp2app(_SSLSocket.shutdown), + selected_npn_protocol = interp2app(_SSLSocket.selected_npn_protocol), ) @@ -1079,6 +1142,15 @@ space.wrap('crl'), space.wrap(counters['crl'])) return w_result + @unwrap_spec(protos='bufferstr') + def set_npn_protocols_w(self, space, protos): + if not HAS_NPN: + raise oefmt(space.w_NotImplementedError, + "The NPN extension requires OpenSSL 1.0.1 or later.") + + self.npn_protocols = SSLNpnProtocols(self.ctx, protos) + + _SSLContext.typedef = TypeDef( "_ssl._SSLContext", __new__=interp2app(_SSLContext.descr_new), @@ -1088,6 +1160,7 @@ cert_store_stats=interp2app(_SSLContext.cert_store_stats_w), load_cert_chain=interp2app(_SSLContext.load_cert_chain_w), set_default_verify_paths=interp2app(_SSLContext.descr_set_default_verify_paths), + _set_npn_protocols=interp2app(_SSLContext.set_npn_protocols_w), options=GetSetProperty(_SSLContext.descr_get_options, _SSLContext.descr_set_options), diff --git a/pypy/module/_ssl/test/test_ssl.py b/pypy/module/_ssl/test/test_ssl.py --- a/pypy/module/_ssl/test/test_ssl.py +++ b/pypy/module/_ssl/test/test_ssl.py @@ -222,6 +222,15 @@ raises(ssl.SSLError, ss.write, "hello\n") del ss; gc.collect() + def test_npn_protocol(self): + import socket, _ssl, gc + ctx = _ssl._SSLContext(_ssl.PROTOCOL_TLSv1) + ctx._set_npn_protocols(b'\x08http/1.1\x06spdy/2') + ss = ctx._wrap_socket(self.s, True, + server_hostname="svn.python.org") + self.s.close() + del ss; gc.collect() + class AppTestConnectedSSL_Timeout(AppTestConnectedSSL): # Same tests, with a socket timeout diff --git a/rpython/rlib/ropenssl.py b/rpython/rlib/ropenssl.py --- a/rpython/rlib/ropenssl.py +++ b/rpython/rlib/ropenssl.py @@ -83,6 +83,7 @@ SSL_OP_DONT_INSERT_EMPTY_FRAGMENTS = rffi_platform.ConstantInteger( "SSL_OP_DONT_INSERT_EMPTY_FRAGMENTS") HAS_SNI = rffi_platform.Defined("SSL_CTRL_SET_TLSEXT_HOSTNAME") + HAS_NPN = rffi_platform.Defined("OPENSSL_NPN_NEGOTIATED") SSL_VERIFY_NONE = rffi_platform.ConstantInteger("SSL_VERIFY_NONE") SSL_VERIFY_PEER = rffi_platform.ConstantInteger("SSL_VERIFY_PEER") SSL_VERIFY_FAIL_IF_NO_PEER_CERT = rffi_platform.ConstantInteger("SSL_VERIFY_FAIL_IF_NO_PEER_CERT") @@ -105,6 +106,7 @@ "SSL_RECEIVED_SHUTDOWN") SSL_MODE_AUTO_RETRY = rffi_platform.ConstantInteger("SSL_MODE_AUTO_RETRY") SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER = rffi_platform.ConstantInteger("SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER") + SSL_TLSEXT_ERR_OK = rffi_platform.ConstantInteger("SSL_TLSEXT_ERR_OK") ERR_LIB_X509 = rffi_platform.ConstantInteger("ERR_LIB_X509") ERR_LIB_PEM = rffi_platform.ConstantInteger("ERR_LIB_PEM") @@ -365,6 +367,24 @@ ssl_external('PEM_read_bio_X509_AUX', [BIO, rffi.VOIDP, rffi.VOIDP, rffi.VOIDP], X509) +if HAS_NPN: + SSL_NEXT_PROTOS_ADV_CB = lltype.Ptr(lltype.FuncType( + [SSL, rffi.CCHARPP, rffi.UINTP, rffi.VOIDP], rffi.INT)) + ssl_external('SSL_CTX_set_next_protos_advertised_cb', + [SSL_CTX, SSL_NEXT_PROTOS_ADV_CB, rffi.VOIDP], lltype.Void) + SSL_NEXT_PROTOS_SEL_CB = lltype.Ptr(lltype.FuncType( + [SSL, rffi.CCHARPP, rffi.UCHARP, rffi.CCHARP, rffi.UINT, rffi.VOIDP], + rffi.INT)) + ssl_external('SSL_CTX_set_next_proto_select_cb', + [SSL_CTX, SSL_NEXT_PROTOS_SEL_CB, rffi.VOIDP], lltype.Void) + ssl_external( + 'SSL_select_next_proto', [rffi.CCHARPP, rffi.UCHARP, + rffi.CCHARP, rffi.UINT, + rffi.CCHARP, rffi.UINT], rffi.INT) + ssl_external( + 'SSL_get0_next_proto_negotiated', [ + SSL, rffi.CCHARPP, rffi.UINTP], lltype.Void) + EVP_MD_CTX = rffi.COpaquePtr('EVP_MD_CTX', compilation_info=eci) EVP_MD = lltype.Ptr(EVP_MD_st) From noreply at buildbot.pypy.org Sat Jan 24 10:35:01 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 24 Jan 2015 10:35:01 +0100 (CET) Subject: [pypy-commit] stmgc bag: First tests pass Message-ID: <20150124093501.7BB661C0499@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: bag Changeset: r1579:a46480cfd06d Date: 2015-01-24 10:35 +0100 http://bitbucket.org/pypy/stmgc/changeset/a46480cfd06d/ Log: First tests pass diff --git a/c7/stm/bag.c b/c7/stm/bag.c --- a/c7/stm/bag.c +++ b/c7/stm/bag.c @@ -42,8 +42,7 @@ struct stm_bag_seg_s { - struct deque_block_s *deque_left, *deque_middle, *deque_right; - deque_idx_t deque_left_pos, deque_middle_pos, deque_right_pos; + uintptr_t *deque_left, *deque_middle, *deque_right; struct list_s *abort_list; }; @@ -59,12 +58,9 @@ for (i = 0; i < STM_NB_SEGMENTS; i++) { struct stm_bag_seg_s *bs = &bag->by_segment[i]; struct deque_block_s *block = deque_new_block(); - bs->deque_left = block; - bs->deque_middle = block; - bs->deque_right = block; - bs->deque_left_pos = 0; - bs->deque_middle_pos = 0; - bs->deque_right_pos = 0; + bs->deque_left = &block->items[0]; + bs->deque_middle = &block->items[0]; + bs->deque_right = &block->items[0]; LIST_CREATE(bs->abort_list); } return bag; @@ -75,11 +71,49 @@ int i; for (i = 0; i < STM_NB_SEGMENTS; i++) { struct stm_bag_seg_s *bs = &bag->by_segment[i]; - while (bs->deque_left) { - struct deque_block_s *block = bs->deque_left; - bs->deque_left = block->next; + struct deque_block_s *block = deque_block(bs->deque_left); + while (block != NULL) { + struct deque_block_s *nextblock = block->next; deque_free_block(block); + block = nextblock; } LIST_FREE(bs->abort_list); } } + +void stm_bag_add(stm_bag_t *bag, object_t *newobj) +{ + int i = STM_SEGMENT->segment_num - 1; + struct stm_bag_seg_s *bs = &bag->by_segment[i]; + struct deque_block_s *block = deque_block(bs->deque_right); + + *bs->deque_right++ = (uintptr_t)newobj; + + if (bs->deque_right == &block->items[DEQUE_BLOCK_SIZE]) { + assert(block->next == NULL); + block->next = deque_new_block(); + bs->deque_right = &block->next->items[0]; + } +} + +object_t *stm_bag_try_pop(stm_bag_t *bag) +{ + int i = STM_SEGMENT->segment_num - 1; + struct stm_bag_seg_s *bs = &bag->by_segment[i]; + if (bs->deque_left == bs->deque_right) { + return NULL; + } + struct deque_block_s *block = deque_block(bs->deque_left); + uintptr_t result = *bs->deque_left++; + + if (bs->deque_left == &block->items[DEQUE_BLOCK_SIZE]) { + bs->deque_left = &block->next->items[0]; + deque_free_block(block); + } + return (object_t *)result; +} + +void stm_bag_tracefn(stm_bag_t *bag, void visit(object_t **)) +{ + abort(); +} diff --git a/c7/stm/list.c b/c7/stm/list.c --- a/c7/stm/list.c +++ b/c7/stm/list.c @@ -193,3 +193,22 @@ missing: return false; } + + +/************************************************************/ + +static struct deque_block_s *deque_new_block(void) +{ + void *mem; + struct deque_block_s *db; + size_t size = sizeof(struct deque_block_s); + + assert((size & (size - 1)) == 0); /* a power of two */ + + if (posix_memalign(&mem, size, size) != 0) + stm_fatalerror("out of memory in deque_new_block"); /* XXX */ + + db = (struct deque_block_s *)mem; + db->next = NULL; + return db; +} diff --git a/c7/stm/list.h b/c7/stm/list.h --- a/c7/stm/list.h +++ b/c7/stm/list.h @@ -222,23 +222,27 @@ /************************************************************/ #define DEQUE_BLOCK_SIZE 31 -typedef unsigned char deque_idx_t; struct deque_block_s { struct deque_block_s *next; uintptr_t items[DEQUE_BLOCK_SIZE]; }; -static inline struct deque_block_s *deque_new_block(void) -{ - struct deque_block_s *db = malloc(sizeof(struct deque_block_s)); - if (db == NULL) - stm_fatalerror("out of memory in deque_new_block"); /* XXX */ - db->next = NULL; - return db; -} +static struct deque_block_s *deque_new_block(void); static inline void deque_free_block(struct deque_block_s *db) { free(db); } + +static inline struct deque_block_s *deque_block(uintptr_t *inner_ptr) +{ + size_t size = sizeof(struct deque_block_s); + return (struct deque_block_s *)(((uintptr_t)inner_ptr) & ~(size - 1)); +} + +static inline bool deque_index_equal(uintptr_t *inner_ptr, uintptr_t index) +{ + struct deque_block_s *block = deque_block(inner_ptr); + return (inner_ptr == &block->items[index]); +} diff --git a/c7/stmgc.c b/c7/stmgc.c --- a/c7/stmgc.c +++ b/c7/stmgc.c @@ -39,3 +39,4 @@ #include "stm/rewind_setjmp.c" #include "stm/finalizer.c" #include "stm/hashtable.c" +#include "stm/bag.c" diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -559,6 +559,7 @@ void stm_bag_free(stm_bag_t *); void stm_bag_add(stm_bag_t *, object_t *); object_t *stm_bag_try_pop(stm_bag_t *); +void stm_bag_tracefn(stm_bag_t *, void (object_t **)); /* ==================== END ==================== */ diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -178,6 +178,16 @@ void _set_hashtable(object_t *obj, stm_hashtable_t *h); stm_hashtable_t *_get_hashtable(object_t *obj); + +typedef struct stm_bag_s stm_bag_t; +stm_bag_t *stm_bag_create(void); +void stm_bag_free(stm_bag_t *); +void stm_bag_add(stm_bag_t *, object_t *); +object_t *stm_bag_try_pop(stm_bag_t *); +void stm_bag_tracefn(stm_bag_t *, void (object_t **)); + +void _set_bag(object_t *obj, stm_bag_t *h); +stm_bag_t *_get_bag(object_t *obj); """) @@ -308,6 +318,20 @@ return *(stm_hashtable_t *TLPREFIX *)field_addr; } +void _set_bag(object_t *obj, stm_bag_t *bag) +{ + stm_char *field_addr = ((stm_char*)obj); + field_addr += SIZEOF_MYOBJ; /* header */ + *(stm_bag_t *TLPREFIX *)field_addr = bag; +} + +stm_bag_t *_get_bag(object_t *obj) +{ + stm_char *field_addr = ((stm_char*)obj); + field_addr += SIZEOF_MYOBJ; /* header */ + return *(stm_bag_t *TLPREFIX *)field_addr; +} + void _set_ptr(object_t *obj, int n, object_t *v) { long nrefs = (long)((myobj_t*)obj)->type_id - 421420; @@ -344,6 +368,9 @@ if (myobj->type_id == 421418) { /* hashtable entry */ return sizeof(struct stm_hashtable_entry_s); } + if (myobj->type_id == 421417) { /* bag */ + return sizeof(struct myobj_s) + 1 * sizeof(void*); + } /* basic case: tid equals 42 plus the size of the object */ assert(myobj->type_id >= 42 + sizeof(struct myobj_s)); assert((myobj->type_id - 42) >= 16); @@ -374,6 +401,12 @@ object_t **ref = &((struct stm_hashtable_entry_s *)myobj)->object; visit(ref); } + if (myobj->type_id == 421417) { + /* bag */ + stm_bag_t *b = *((stm_bag_t **)(myobj + 1)); + stm_bag_tracefn(b, visit); + return; + } if (myobj->type_id < 421420) { /* basic case: no references */ return; @@ -394,6 +427,7 @@ struct myobj_s *myobj = (struct myobj_s*)obj; assert(myobj->type_id != 421419); assert(myobj->type_id != 421418); + assert(myobj->type_id != 421417); if (myobj->type_id < 421420) { /* basic case: no references */ return; @@ -514,6 +548,18 @@ assert lib._get_type_id(o) == 421419 return lib._get_hashtable(o) +def stm_allocate_bag(): + o = lib.stm_allocate(16) + tid = 421417 + lib._set_type_id(o, tid) + h = lib.stm_bag_create() + lib._set_bag(o, h) + return o + +def get_bag(o): + assert lib._get_type_id(o) == 421417 + return lib._get_bag(o) + def stm_get_weakref(o): return lib._get_weakref(o) diff --git a/c7/test/test_bag.py b/c7/test/test_bag.py new file mode 100644 --- /dev/null +++ b/c7/test/test_bag.py @@ -0,0 +1,69 @@ +from support import * +import py + + +class BagLooksEmpty(Exception): + pass + +def b_add(o, nvalue): + b = get_bag(o) + lib.stm_bag_add(b, nvalue) + +def b_pop(o): + b = get_bag(o) + r = lib.stm_bag_try_pop(b) + if not r: + raise BagLooksEmpty + return r + + +class BaseTestBag(BaseTest): + + def setup_method(self, meth): + BaseTest.setup_method(self, meth) + # + @ffi.callback("void(object_t *)") + def light_finalizer(obj): + print 'light_finalizer:', obj + try: + assert lib._get_type_id(obj) == 421417 + self.seen_bags -= 1 + except: + self.errors.append(sys.exc_info()[2]) + raise + + lib.stmcb_light_finalizer = light_finalizer + self._light_finalizer_keepalive = light_finalizer + self.seen_bags = 0 + self.errors = [] + + def teardown_method(self, meth): + BaseTest.teardown_method(self, meth) + lib.stmcb_light_finalizer = ffi.NULL + assert self.errors == [] + assert self.seen_bags == 0 + + def allocate_bag(self): + q = stm_allocate_bag() + lib.stm_enable_light_finalizer(q) + self.seen_bags += 1 + return q + + +class TestBag(BaseTestBag): + + def test_small_push_pop(self): + self.start_transaction() + q = self.allocate_bag() + lp1 = stm_allocate(16) + lp2 = stm_allocate(16) + for i in range(4): + b_add(q, lp1) + b_add(q, lp2) + for j in range(4): + got = b_pop(q) + assert got == lp1 + got = b_pop(q) + assert got == lp2 + py.test.raises(BagLooksEmpty, b_pop, q) + py.test.raises(BagLooksEmpty, b_pop, q) From noreply at buildbot.pypy.org Sat Jan 24 10:35:57 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 24 Jan 2015 10:35:57 +0100 (CET) Subject: [pypy-commit] stmgc bag: one more passing test Message-ID: <20150124093557.A62D51C0499@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: bag Changeset: r1580:93786da12927 Date: 2015-01-24 10:36 +0100 http://bitbucket.org/pypy/stmgc/changeset/93786da12927/ Log: one more passing test diff --git a/c7/test/test_bag.py b/c7/test/test_bag.py --- a/c7/test/test_bag.py +++ b/c7/test/test_bag.py @@ -67,3 +67,15 @@ assert got == lp2 py.test.raises(BagLooksEmpty, b_pop, q) py.test.raises(BagLooksEmpty, b_pop, q) + + def test_large_push_pop(self): + self.start_transaction() + q = self.allocate_bag() + lps = [stm_allocate(16) for i in range(65)] + for lp in lps: + b_add(q, lp) + for lp in lps: + got = b_pop(q) + assert got == lp + py.test.raises(BagLooksEmpty, b_pop, q) + py.test.raises(BagLooksEmpty, b_pop, q) From noreply at buildbot.pypy.org Sat Jan 24 10:56:51 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 24 Jan 2015 10:56:51 +0100 (CET) Subject: [pypy-commit] stmgc bag: in-progress Message-ID: <20150124095651.1187F1C04DB@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: bag Changeset: r1581:22510e2df160 Date: 2015-01-24 10:57 +0100 http://bitbucket.org/pypy/stmgc/changeset/22510e2df160/ Log: in-progress diff --git a/c7/stm/bag.c b/c7/stm/bag.c --- a/c7/stm/bag.c +++ b/c7/stm/bag.c @@ -104,16 +104,34 @@ return NULL; } struct deque_block_s *block = deque_block(bs->deque_left); + bool any_old_item_to_pop = (bs->deque_left != bs->deque_middle); uintptr_t result = *bs->deque_left++; if (bs->deque_left == &block->items[DEQUE_BLOCK_SIZE]) { bs->deque_left = &block->next->items[0]; deque_free_block(block); } + if (!any_old_item_to_pop) { + bs->deque_middle = bs->deque_left; + } return (object_t *)result; } -void stm_bag_tracefn(stm_bag_t *bag, void visit(object_t **)) +void stm_bag_tracefn(stm_bag_t *bag, void trace(object_t **)) { - abort(); + if (trace == TRACE_FOR_MINOR_COLLECTION) { + /* only trace the items added in the current transaction; + the rest is already old and cannot point into the nursery. */ + int i = STM_SEGMENT->segment_num - 1; + struct stm_bag_seg_s *bs = &bag->by_segment[i]; + + deque_trace(bs->deque_middle, bs->deque_right, trace); + } + else { + int i; + for (i = 0; i < NB_SEGMENTS; i++) { + struct stm_bag_seg_s *bs = &bag->by_segment[i]; + deque_trace(bs->deque_left, bs->deque_right, trace); + } + } } diff --git a/c7/stm/list.c b/c7/stm/list.c --- a/c7/stm/list.c +++ b/c7/stm/list.c @@ -212,3 +212,29 @@ db->next = NULL; return db; } + +static void deque_trace(uintptr_t *start, uintptr_t *stop, + void trace(object_t **)) +{ + struct deque_block_s *block = deque_block(start); + struct deque_block_s *last_block = deque_block(stop); + uintptr_t *end; + + while (1) { + if (block == last_block) + end = stop; + else + end = &block->items[DEQUE_BLOCK_SIZE]; + + while (start != end) { + trace((object_t **)start); + start++; + } + + if (block == last_block) + break; + + block = block->next; + start = &block->items[0]; + } +} diff --git a/c7/stm/list.h b/c7/stm/list.h --- a/c7/stm/list.h +++ b/c7/stm/list.h @@ -246,3 +246,5 @@ struct deque_block_s *block = deque_block(inner_ptr); return (inner_ptr == &block->items[index]); } + +static void deque_trace(uintptr_t *start, uintptr_t *stop, void (object_t **)); diff --git a/c7/test/test_bag.py b/c7/test/test_bag.py --- a/c7/test/test_bag.py +++ b/c7/test/test_bag.py @@ -79,3 +79,55 @@ assert got == lp py.test.raises(BagLooksEmpty, b_pop, q) py.test.raises(BagLooksEmpty, b_pop, q) + + def test_keepalive_minor(self): + self.start_transaction() + b = self.allocate_bag() + self.push_root(b) + lp1 = stm_allocate(16) + stm_set_char(lp1, 'N') + b_add(b, lp1) + stm_minor_collect() + b = self.pop_root() + lp1b = b_pop(b) + assert lp1b != ffi.NULL + assert stm_get_char(lp1b) == 'N' + assert lp1b != lp1 + + def test_keepalive_major(self): + self.start_transaction() + b = self.allocate_bag() + self.push_root(b) + lp1 = stm_allocate(16) + stm_set_char(lp1, 'N') + b_add(b, lp1) + stm_major_collect() + b = self.pop_root() + lp1b = b_pop(b) + assert lp1b != ffi.NULL + assert stm_get_char(lp1b) == 'N' + assert lp1b != lp1 + + def test_transaction_local(self): + self.start_transaction() + q = self.allocate_bag() + self.push_root(q) + self.commit_transaction() + q = self.pop_root() + # + self.start_transaction() + lp1 = stm_allocate(16) + b_add(q, lp1) + # + self.switch(1) + self.start_transaction() + lp2 = stm_allocate(16) + b_add(q, lp2) + got = b_pop(q) + assert got == lp2 + # + self.switch(0) + got = b_pop(q) + assert got == lp1 + # + stm_major_collect() # to get rid of the bag object From noreply at buildbot.pypy.org Sat Jan 24 11:54:05 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 24 Jan 2015 11:54:05 +0100 (CET) Subject: [pypy-commit] stmgc bag: in-progress Message-ID: <20150124105405.5C96B1C01AE@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: bag Changeset: r1582:21b4163950f8 Date: 2015-01-24 11:54 +0100 http://bitbucket.org/pypy/stmgc/changeset/21b4163950f8/ Log: in-progress diff --git a/c7/stm/bag.c b/c7/stm/bag.c --- a/c7/stm/bag.c +++ b/c7/stm/bag.c @@ -44,6 +44,7 @@ struct stm_bag_seg_s { uintptr_t *deque_left, *deque_middle, *deque_right; struct list_s *abort_list; + uint64_t start_time; /* the transaction's unique_start_time */ }; struct stm_bag_s { @@ -62,6 +63,7 @@ bs->deque_middle = &block->items[0]; bs->deque_right = &block->items[0]; LIST_CREATE(bs->abort_list); + bs->start_time = 0; } return bag; } @@ -79,41 +81,105 @@ } LIST_FREE(bs->abort_list); } + + s_mutex_lock(); + for (i = 0; i < STM_NB_SEGMENTS; i++) { + struct stm_bag_seg_s *bs = &bag->by_segment[i]; + struct stm_segment_info_s *pub = get_segment(i + 1); + stm_thread_local_t *tl = pub->running_thread; + if (tl->associated_segment_num == i + 1) { + stm_call_on_abort(tl, bs, NULL); + } + } + s_mutex_unlock(); + + free(bag); +} + +static void bag_add(struct stm_bag_seg_s *bs, object_t *newobj) +{ + struct deque_block_s *block = deque_block(bs->deque_right); + *bs->deque_right++ = (uintptr_t)newobj; + + if (bs->deque_right == &block->items[DEQUE_BLOCK_SIZE]) { + if (block->next == NULL) + block->next = deque_new_block(); + bs->deque_right = &block->next->items[0]; + } +} + +static void bag_abort_callback(void *key) +{ + struct stm_bag_seg_s *bs = (struct stm_bag_seg_s *)key; + + /* remove the "added in this transaction" items */ + bs->deque_right = bs->deque_middle; + + /* reinstall the items from the "abort_list" */ + LIST_FOREACH_F(bs->abort_list, object_t *, bag_add(bs, item)); + list_clear(bs->abort_list); + + /* these items are not "added in this transaction" */ + bs->deque_middle = bs->deque_right; +} + +static struct stm_bag_seg_s *bag_check_start_time(stm_bag_t *bag) +{ + int i = STM_SEGMENT->segment_num - 1; + struct stm_bag_seg_s *bs = &bag->by_segment[i]; + + if (bs->start_time != STM_PSEGMENT->unique_start_time) { + /* There was a commit or an abort since the last operation + on the same bag in the same segment. If there was an + abort, bag_abort_callback() should have been called to + reset the state. Assume that any non-reset state is + there because of a commit. + + The middle pointer moves to the right: there are no + more "added in this transaction" entries. And the + "already popped items" list is forgotten. + */ + bs->deque_middle = bs->deque_right; + list_clear(bs->abort_list); + bs->start_time = STM_PSEGMENT->unique_start_time; + + /* We're about to modify the bag, so register an abort + callback now. */ + stm_thread_local_t *tl = STM_SEGMENT->running_thread; + assert(tl->associated_segment_num == STM_SEGMENT->segment_num); + stm_call_on_abort(tl, bs, &bag_abort_callback); + } + + return bs; } void stm_bag_add(stm_bag_t *bag, object_t *newobj) { - int i = STM_SEGMENT->segment_num - 1; - struct stm_bag_seg_s *bs = &bag->by_segment[i]; - struct deque_block_s *block = deque_block(bs->deque_right); - - *bs->deque_right++ = (uintptr_t)newobj; - - if (bs->deque_right == &block->items[DEQUE_BLOCK_SIZE]) { - assert(block->next == NULL); - block->next = deque_new_block(); - bs->deque_right = &block->next->items[0]; - } + struct stm_bag_seg_s *bs = bag_check_start_time(bag); + bag_add(bs, newobj); } object_t *stm_bag_try_pop(stm_bag_t *bag) { - int i = STM_SEGMENT->segment_num - 1; - struct stm_bag_seg_s *bs = &bag->by_segment[i]; + struct stm_bag_seg_s *bs = bag_check_start_time(bag); if (bs->deque_left == bs->deque_right) { return NULL; } + struct deque_block_s *block = deque_block(bs->deque_left); - bool any_old_item_to_pop = (bs->deque_left != bs->deque_middle); + bool from_same_transaction = (bs->deque_left == bs->deque_middle); uintptr_t result = *bs->deque_left++; if (bs->deque_left == &block->items[DEQUE_BLOCK_SIZE]) { bs->deque_left = &block->next->items[0]; deque_free_block(block); } - if (!any_old_item_to_pop) { + if (from_same_transaction) { bs->deque_middle = bs->deque_left; } + else { + LIST_APPEND(bs->abort_list, result); + } return (object_t *)result; } diff --git a/c7/stm/contention.c b/c7/stm/contention.c --- a/c7/stm/contention.c +++ b/c7/stm/contention.c @@ -73,7 +73,7 @@ __attribute__((unused)) static void cm_abort_the_younger(struct contmgr_s *cm) { - if (STM_PSEGMENT->start_time >= cm->other_pseg->start_time) { + if (STM_PSEGMENT->unique_start_time >= cm->other_pseg->unique_start_time) { /* We started after the other thread. Abort */ cm->abort_other = false; } @@ -100,7 +100,7 @@ __attribute__((unused)) static void cm_pause_if_younger(struct contmgr_s *cm) { - if (STM_PSEGMENT->start_time >= cm->other_pseg->start_time) { + if (STM_PSEGMENT->unique_start_time >= cm->other_pseg->unique_start_time) { /* We started after the other thread. Pause */ cm->try_sleep = true; cm->abort_other = false; diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -324,7 +324,7 @@ STM_SEGMENT->transaction_read_version = 1; } -static uint64_t _global_start_time = 0; +static uint64_t _global_start_time = 1; static void _stm_start_transaction(stm_thread_local_t *tl) { @@ -337,7 +337,7 @@ assert(STM_PSEGMENT->safe_point == SP_NO_TRANSACTION); assert(STM_PSEGMENT->transaction_state == TS_NONE); timing_event(tl, STM_TRANSACTION_START); - STM_PSEGMENT->start_time = _global_start_time++; + STM_PSEGMENT->unique_start_time = _global_start_time++; STM_PSEGMENT->signalled_to_commit_soon = false; STM_PSEGMENT->safe_point = SP_RUNNING; STM_PSEGMENT->marker_inev.object = NULL; diff --git a/c7/stm/core.h b/c7/stm/core.h --- a/c7/stm/core.h +++ b/c7/stm/core.h @@ -138,7 +138,7 @@ /* Start time: to know approximately for how long a transaction has been running, in contention management */ - uint64_t start_time; + uint64_t unique_start_time; /* This is the number stored in the overflowed objects (a multiple of GCFLAG_OVERFLOW_NUMBER_bit0). It is incremented when the diff --git a/c7/stm/list.h b/c7/stm/list.h --- a/c7/stm/list.h +++ b/c7/stm/list.h @@ -96,6 +96,16 @@ } \ } while (0) +#define LIST_FOREACH_F(lst, TYPE, CODE) \ + do { \ + struct list_s *_lst = (lst); \ + uintptr_t _i, _c = _lst->count; \ + for (_i = 0; _i < _c; _i++) { \ + TYPE item = (TYPE)_lst->items[_i]; \ + CODE; \ + } \ + } while (0) + /************************************************************/ /* The tree_xx functions are, like the name hints, implemented as a tree, diff --git a/c7/test/test_bag.py b/c7/test/test_bag.py --- a/c7/test/test_bag.py +++ b/c7/test/test_bag.py @@ -131,3 +131,31 @@ assert got == lp1 # stm_major_collect() # to get rid of the bag object + + def test_abort_recovers_popped(self): + self.start_transaction() + q = self.allocate_bag() + self.push_root(q) + lp1 = stm_allocate(16) + lp2 = stm_allocate(16) + stm_set_char(lp1, 'M') + stm_set_char(lp2, 'N') + b_add(q, lp1) + b_add(q, lp2) + self.commit_transaction() + q = self.pop_root() + # + self.start_transaction() + lp1 = b_pop(q) + lp2 = b_pop(q) + assert stm_get_char(lp1) == 'M' + assert stm_get_char(lp2) == 'N' + self.abort_transaction() + # + self.start_transaction() + lp1 = b_pop(q) + lp2 = b_pop(q) + assert stm_get_char(lp1) == 'M' + assert stm_get_char(lp2) == 'N' + # + stm_major_collect() # to get rid of the bag object From noreply at buildbot.pypy.org Sat Jan 24 12:02:06 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 24 Jan 2015 12:02:06 +0100 (CET) Subject: [pypy-commit] stmgc bag: Align the bag's segment data to 64 bytes boundaries to prevent false sharing Message-ID: <20150124110206.C38631C0499@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: bag Changeset: r1583:4c2c1694892d Date: 2015-01-24 12:02 +0100 http://bitbucket.org/pypy/stmgc/changeset/4c2c1694892d/ Log: Align the bag's segment data to 64 bytes boundaries to prevent false sharing diff --git a/c7/stm/bag.c b/c7/stm/bag.c --- a/c7/stm/bag.c +++ b/c7/stm/bag.c @@ -41,23 +41,32 @@ */ -struct stm_bag_seg_s { - uintptr_t *deque_left, *deque_middle, *deque_right; - struct list_s *abort_list; - uint64_t start_time; /* the transaction's unique_start_time */ -}; +typedef union { + struct { + uintptr_t *deque_left, *deque_middle, *deque_right; + struct list_s *abort_list; + uint64_t start_time; /* the transaction's unique_start_time */ + }; + char alignment[64]; /* 64-bytes alignment, to prevent false sharing */ +} stm_bag_seg_t; struct stm_bag_s { - struct stm_bag_seg_s by_segment[STM_NB_SEGMENTS]; + stm_bag_seg_t by_segment[STM_NB_SEGMENTS]; }; stm_bag_t *stm_bag_create(void) { int i; - stm_bag_t *bag = malloc(sizeof(stm_bag_t)); - assert(bag); + stm_bag_t *bag; + void *mem; + + assert(sizeof(stm_bag_seg_t) == 64); + if (posix_memalign(&mem, sizeof(stm_bag_seg_t), sizeof(stm_bag_t)) != 0) + stm_fatalerror("out of memory in stm_bag_create"); /* XXX */ + + bag = (stm_bag_t *)mem; for (i = 0; i < STM_NB_SEGMENTS; i++) { - struct stm_bag_seg_s *bs = &bag->by_segment[i]; + stm_bag_seg_t *bs = &bag->by_segment[i]; struct deque_block_s *block = deque_new_block(); bs->deque_left = &block->items[0]; bs->deque_middle = &block->items[0]; @@ -72,7 +81,7 @@ { int i; for (i = 0; i < STM_NB_SEGMENTS; i++) { - struct stm_bag_seg_s *bs = &bag->by_segment[i]; + stm_bag_seg_t *bs = &bag->by_segment[i]; struct deque_block_s *block = deque_block(bs->deque_left); while (block != NULL) { struct deque_block_s *nextblock = block->next; @@ -84,7 +93,7 @@ s_mutex_lock(); for (i = 0; i < STM_NB_SEGMENTS; i++) { - struct stm_bag_seg_s *bs = &bag->by_segment[i]; + stm_bag_seg_t *bs = &bag->by_segment[i]; struct stm_segment_info_s *pub = get_segment(i + 1); stm_thread_local_t *tl = pub->running_thread; if (tl->associated_segment_num == i + 1) { @@ -96,7 +105,7 @@ free(bag); } -static void bag_add(struct stm_bag_seg_s *bs, object_t *newobj) +static void bag_add(stm_bag_seg_t *bs, object_t *newobj) { struct deque_block_s *block = deque_block(bs->deque_right); *bs->deque_right++ = (uintptr_t)newobj; @@ -110,7 +119,7 @@ static void bag_abort_callback(void *key) { - struct stm_bag_seg_s *bs = (struct stm_bag_seg_s *)key; + stm_bag_seg_t *bs = (stm_bag_seg_t *)key; /* remove the "added in this transaction" items */ bs->deque_right = bs->deque_middle; @@ -123,10 +132,10 @@ bs->deque_middle = bs->deque_right; } -static struct stm_bag_seg_s *bag_check_start_time(stm_bag_t *bag) +static stm_bag_seg_t *bag_check_start_time(stm_bag_t *bag) { int i = STM_SEGMENT->segment_num - 1; - struct stm_bag_seg_s *bs = &bag->by_segment[i]; + stm_bag_seg_t *bs = &bag->by_segment[i]; if (bs->start_time != STM_PSEGMENT->unique_start_time) { /* There was a commit or an abort since the last operation @@ -155,13 +164,13 @@ void stm_bag_add(stm_bag_t *bag, object_t *newobj) { - struct stm_bag_seg_s *bs = bag_check_start_time(bag); + stm_bag_seg_t *bs = bag_check_start_time(bag); bag_add(bs, newobj); } object_t *stm_bag_try_pop(stm_bag_t *bag) { - struct stm_bag_seg_s *bs = bag_check_start_time(bag); + stm_bag_seg_t *bs = bag_check_start_time(bag); if (bs->deque_left == bs->deque_right) { return NULL; } @@ -189,14 +198,14 @@ /* only trace the items added in the current transaction; the rest is already old and cannot point into the nursery. */ int i = STM_SEGMENT->segment_num - 1; - struct stm_bag_seg_s *bs = &bag->by_segment[i]; + stm_bag_seg_t *bs = &bag->by_segment[i]; deque_trace(bs->deque_middle, bs->deque_right, trace); } else { int i; for (i = 0; i < NB_SEGMENTS; i++) { - struct stm_bag_seg_s *bs = &bag->by_segment[i]; + stm_bag_seg_t *bs = &bag->by_segment[i]; deque_trace(bs->deque_left, bs->deque_right, trace); } } From noreply at buildbot.pypy.org Sat Jan 24 13:00:44 2015 From: noreply at buildbot.pypy.org (fijal) Date: Sat, 24 Jan 2015 13:00:44 +0100 (CET) Subject: [pypy-commit] pypy vmprof: Getting somewhere - a more sane way to record all the code objs and store Message-ID: <20150124120044.6B8D21C0A35@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: vmprof Changeset: r75508:a9901f5effb2 Date: 2015-01-24 14:00 +0200 http://bitbucket.org/pypy/pypy/changeset/a9901f5effb2/ Log: Getting somewhere - a more sane way to record all the code objs and store them in the same file diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -9,6 +9,7 @@ from rpython.rlib.signature import signature from rpython.rlib.rarithmetic import r_uint, SHRT_MIN, SHRT_MAX, \ INT_MIN, INT_MAX, UINT_MAX, USHRT_MAX +from rpython.rlib.rweaklist import RWeakListMixin from pypy.interpreter.executioncontext import (ExecutionContext, ActionFlag, UserDelAction) @@ -366,6 +367,10 @@ # ____________________________________________________________ +class CodeObjWeakList(RWeakListMixin): + def __init__(self): + self.initialize() + class ObjSpace(object): """Base class for the interpreter-level implementations of object spaces. http://pypy.readthedocs.org/en/latest/objspace.html""" @@ -389,6 +394,7 @@ self.check_signal_action = None # changed by the signal module self.user_del_action = UserDelAction(self) self._code_of_sys_exc_info = None + self.all_code_objs = CodeObjWeakList() # can be overridden to a subclass self.initialize() @@ -666,6 +672,16 @@ assert ec is not None return ec + def register_code_object(self, pycode): + callback = self.getexecutioncontext().register_code_callback + if callback is not None: + callback(self, pycode) + self.all_code_objs.add_handle(pycode) + + def set_code_callback(self, callback): + ec = self.getexecutioncontext() + ec.register_code_callback = callback + def _freeze_(self): return True diff --git a/pypy/interpreter/executioncontext.py b/pypy/interpreter/executioncontext.py --- a/pypy/interpreter/executioncontext.py +++ b/pypy/interpreter/executioncontext.py @@ -33,6 +33,7 @@ self.profilefunc = None self.w_profilefuncarg = None self.thread_disappeared = False # might be set to True after os.fork() + self.register_code_callback = None @staticmethod def _mark_thread_disappeared(space): diff --git a/pypy/interpreter/pycode.py b/pypy/interpreter/pycode.py --- a/pypy/interpreter/pycode.py +++ b/pypy/interpreter/pycode.py @@ -14,9 +14,10 @@ CO_OPTIMIZED, CO_NEWLOCALS, CO_VARARGS, CO_VARKEYWORDS, CO_NESTED, CO_GENERATOR, CO_KILL_DOCSTRING, CO_YIELD_INSIDE_TRY) from pypy.tool.stdlib_opcode import opcodedesc, HAVE_ARGUMENT -from rpython.rlib.rarithmetic import intmask +from rpython.rlib.rarithmetic import intmask, r_longlong from rpython.rlib.objectmodel import compute_hash from rpython.rlib import jit +from rpython.rlib.debug import debug_start, debug_stop, debug_print class BytecodeCorruption(Exception): @@ -56,6 +57,11 @@ _immutable_fields_ = ["co_consts_w[*]", "co_names_w[*]", "co_varnames[*]", "co_freevars[*]", "co_cellvars[*]", "_args_as_cellvars[*]"] + if sys.maxint == 2147483647: + _unique_id = 0 # XXX this is wrong, it won't work on 32bit + else: + _unique_id = 0x7000000000000000 + def __init__(self, space, argcount, nlocals, stacksize, flags, code, consts, names, varnames, filename, name, firstlineno, lnotab, freevars, cellvars, @@ -83,7 +89,7 @@ self.magic = magic self._signature = cpython_code_signature(self) self._initialize() - self._vmprof_setup_maybe() + space.register_code_object(self) def _initialize(self): if self.co_cellvars: @@ -125,9 +131,12 @@ from pypy.objspace.std.mapdict import init_mapdict_cache init_mapdict_cache(self) - def _vmprof_setup_maybe(self): - # this is overridden only if _vmprof is enabled - pass + self._unique_id = PyCode._unique_id + PyCode._unique_id += 1 + + def _get_full_name(self): + return "py:%s:%d:%s" % (self.co_name, self.co_firstlineno, + self.co_filename) def _cleanup_(self): if (self.magic == cpython_magic and diff --git a/pypy/module/_vmprof/__init__.py b/pypy/module/_vmprof/__init__.py --- a/pypy/module/_vmprof/__init__.py +++ b/pypy/module/_vmprof/__init__.py @@ -14,4 +14,5 @@ def setup_after_space_initialization(self): # force the __extend__ hacks to occur early - import pypy.module._vmprof.interp_vmprof + from pypy.module._vmprof.interp_vmprof import VMProf + self.vmprof = VMProf() diff --git a/pypy/module/_vmprof/interp_vmprof.py b/pypy/module/_vmprof/interp_vmprof.py --- a/pypy/module/_vmprof/interp_vmprof.py +++ b/pypy/module/_vmprof/interp_vmprof.py @@ -1,17 +1,13 @@ -import py +import py, os, struct from rpython.rtyper.lltypesystem import lltype, rffi, llmemory from rpython.translator.tool.cbuild import ExternalCompilationInfo from rpython.rtyper.annlowlevel import cast_instance_to_gcref, cast_base_ptr_to_instance -from rpython.rlib.objectmodel import we_are_translated, CDefinedIntSymbolic -from rpython.rlib import jit, rposix -from rpython.tool.pairtype import extendabletype +from rpython.rlib.objectmodel import we_are_translated +from rpython.rlib import jit, rposix, entrypoint from pypy.interpreter.baseobjspace import W_Root -from pypy.interpreter.error import oefmt, wrap_oserror +from pypy.interpreter.error import oefmt, wrap_oserror, OperationError from pypy.interpreter.gateway import unwrap_spec from pypy.interpreter.pyframe import PyFrame -from pypy.interpreter.pycode import PyCode - -FALSE_BUT_NON_CONSTANT = CDefinedIntSymbolic('0', default=0) ROOT = py.path.local(__file__).join('..') SRC = ROOT.join('src') @@ -63,7 +59,7 @@ pypy_vmprof_init = rffi.llexternal("pypy_vmprof_init", [], lltype.Void, compilation_info=eci) vmprof_enable = rffi.llexternal("vmprof_enable", - [rffi.INT, rffi.INT, rffi.LONG], + [rffi.INT, rffi.INT, rffi.LONG, rffi.INT], rffi.INT, compilation_info=eci) vmprof_disable = rffi.llexternal("vmprof_disable", [], rffi.INT, compilation_info=eci) @@ -97,130 +93,85 @@ return original_execute_frame(frame, w_inputvalue, operr) - -class __extend__(PyCode): - __metaclass__ = extendabletype - - def _vmprof_setup_maybe(self): - self._vmprof_virtual_ip = _vmprof.get_next_virtual_IP() - self._vmprof_registered = 0 - -# avoid rtyper warnings -PyCode._vmprof_virtual_ip = 0 -PyCode._vmprof_registered = 0 - - - + at entrypoint.entrypoint_lowlevel('main', [llmemory.GCREF], + 'pypy_vmprof_get_virtual_ip', True) def get_virtual_ip(gc_frame): frame = cast_base_ptr_to_instance(PyFrame, gc_frame) if jit._get_virtualizable_token(frame): return rffi.cast(rffi.VOIDP, 0) virtual_ip = do_get_virtual_ip(frame) return rffi.cast(rffi.VOIDP, virtual_ip) -get_virtual_ip.c_name = 'pypy_vmprof_get_virtual_ip' -get_virtual_ip._dont_inline_ = True - -def strncpy(src, tgt, tgt_ofs, count): - if len(src) < count: - count = len(src) - i = 0 - while i < count: - tgt[i + tgt_ofs] = src[i] - i += 1 - return i - -def int2str(num, s, ofs): - if num == 0: - s[ofs] = '0' - return 1 - count = 0 - c = num - while c != 0: - count += 1 - c /= 10 - pos = ofs + count - 1 - c = num - while c != 0: - s[pos] = chr(ord('0') + c % 10) - c /= 10 - pos -= 1 - return count def do_get_virtual_ip(frame): - virtual_ip = frame.pycode._vmprof_virtual_ip - if not frame.pycode._vmprof_registered: - # we need to register this code object - name = frame.pycode.co_name - filename = frame.pycode.co_filename - firstlineno = frame.pycode.co_firstlineno - start = rffi.cast(rffi.VOIDP, virtual_ip) - end = start # ignored for now - # - # manually fill the C buffer; we cannot use str2charp because we - # cannot call malloc from a signal handler - strbuf = _vmprof.strbuf - ofs = strncpy("py:", _vmprof.strbuf, 0, len("py:")) - ofs += strncpy(filename, _vmprof.strbuf, ofs, 128) - _vmprof.strbuf[ofs] = ':' - ofs += 1 - ofs += int2str(firstlineno, _vmprof.strbuf, ofs) - _vmprof.strbuf[ofs] = ':' - ofs += 1 - ofs += strncpy(name, _vmprof.strbuf, ofs, 1024 - 1 - ofs) - _vmprof.strbuf[ofs] = '\x00' - vmprof_register_virtual_function(strbuf, start, end) - frame.pycode._vmprof_registered = 1 - # - return virtual_ip + return frame.pycode._unique_id class VMProf(object): def __init__(self): - self.virtual_ip = 0x7000000000000000 self.is_enabled = False self.ever_enabled = False - self.strbuf = lltype.malloc(rffi.CCHARP.TO, 1024, flavor='raw', immortal=True, zero=True) + self.mapping_so_far = [] # stored mapping in between runs + self.fileno = -1 - def get_next_virtual_IP(self): - self.virtual_ip += 1 - return self.virtual_ip - - @jit.dont_look_inside - def _annotate_get_virtual_ip(self): - if FALSE_BUT_NON_CONSTANT: - # make sure it's annotated - gcref = rffi.cast(llmemory.GCREF, self.virtual_ip) # just a random non-constant value - get_virtual_ip(gcref) - - def enable(self, space, fileno, symno, period): - self._annotate_get_virtual_ip() + def enable(self, space, fileno, period): if self.is_enabled: raise oefmt(space.w_ValueError, "_vmprof already enabled") + self.fileno = fileno self.is_enabled = True + self.write_header(fileno, period) if not self.ever_enabled: - pypy_vmprof_init() + if we_are_translated(): + pypy_vmprof_init() self.ever_enabled = True - res = vmprof_enable(fileno, symno, period) + for weakcode in space.all_code_objs.get_all_handles(): + code = weakcode() + if code: + self.register_code(space, code) + space.set_code_callback(self.register_code) + if we_are_translated(): + # does not work untranslated + res = vmprof_enable(fileno, -1, period, 0) + else: + res = 0 if res == -1: raise wrap_oserror(space, OSError(rposix.get_errno(), "_vmprof.enable")) + def write_header(self, fileno, period): + if period == -1: + period_usec = 1000000 / 100 # 100hz + else: + period_usec = period + os.write(fileno, struct.pack("lllll", 0, 3, 0, period_usec, 0)) + + def register_code(self, space, code): + if self.fileno == -1: + raise OperationError(space.w_RuntimeError, + space.wrap("vmprof not running")) + name = code._get_full_name() + s = '\x02' + struct.pack("ll", code._unique_id, len(name)) + name + os.write(self.fileno, s) + def disable(self, space): if not self.is_enabled: raise oefmt(space.w_ValueError, "_vmprof not enabled") self.is_enabled = False - res = vmprof_disable() + self.fileno = -1 + if we_are_translated(): + # does not work untranslated + res = vmprof_disable() + else: + res = 0 + space.set_code_callback(None) if res == -1: raise wrap_oserror(space, OSError(rposix.get_errno(), "_vmprof.disable")) -_vmprof = VMProf() - - at unwrap_spec(fileno=int, symno=int, period=int) -def enable(space, fileno, symno, period=-1): - _vmprof.enable(space, fileno, symno, period) + at unwrap_spec(fileno=int, period=int) +def enable(space, fileno, period=-1): + space.getbuiltinmodule('_vmprof').vmprof.enable(space, fileno, period) def disable(space): - _vmprof.disable(space) + space.getbuiltinmodule('_vmprof').vmprof.disable(space) diff --git a/pypy/module/_vmprof/src/vmprof.c b/pypy/module/_vmprof/src/vmprof.c --- a/pypy/module/_vmprof/src/vmprof.c +++ b/pypy/module/_vmprof/src/vmprof.c @@ -35,7 +35,7 @@ #define MAX_STACK_DEPTH 64 static FILE* profile_file; -static FILE* symbol_file; +static FILE* symbol_file = NULL; void* vmprof_mainloop_func; static ptrdiff_t mainloop_sp_offset; static vmprof_get_virtual_ip_t mainloop_get_virtual_ip; @@ -45,10 +45,19 @@ * functions to write a profile file compatible with gperftools * ************************************************************* */ + +#define MARKER_STACKTRACE '\x01' +#define MARKER_VIRTUAL_IP '\x02' +#define MARKER_TRAILER '\x03' + static void prof_word(FILE* f, long x) { fwrite(&x, sizeof(x), 1, f); } +static void prof_char(FILE *f, char x) { + fwrite(&x, sizeof(x), 1, f); +} + static void prof_header(FILE* f, long period_usec) { prof_word(f, 0); prof_word(f, 3); @@ -59,18 +68,13 @@ static void prof_write_stacktrace(FILE* f, void** stack, int depth, int count) { int i; + prof_char(f, MARKER_STACKTRACE); prof_word(f, count); prof_word(f, depth); for(i=0; i 10 + d = {} - def __init__(self, co_name): - self.co_name = co_name - self.co_filename = 'filename' - self.co_firstlineno = 13 - self._vmprof_setup_maybe() + exec """def foo(): + pass + """ in d -def test_get_virtual_ip(monkeypatch): - functions = [] - def register_virtual_function(name, start, end): - name = rffi.charp2str(name) - start = rffi.cast(lltype.Signed, start) - end = rffi.cast(lltype.Signed, end) - functions.append((name, start, end)) - monkeypatch.setattr(interp_vmprof, 'vmprof_register_virtual_function', register_virtual_function) - # - mycode = FakePyCode('foo') - assert mycode._vmprof_virtual_ip < 0 - myframe = FakePyFrame(mycode) + _vmprof.enable(self.tmpfileno2) - _vmprof.counter = 42 - ip = do_get_virtual_ip(myframe) - assert ip == mycode._vmprof_virtual_ip - assert functions == [('py:filename:13:foo', ip, ip)] + exec """def foo2(): + pass + """ in d - # the second time, we don't register it again - functions = [] - ip = do_get_virtual_ip(myframe) - assert ip == mycode._vmprof_virtual_ip - assert functions == [] - - # now, let's try with a long name - mycode = FakePyCode('abcde' + 'f' * 20000) - myframe = FakePyFrame(mycode) - functions = [] - ip2 = do_get_virtual_ip(myframe) - assert ip2 == mycode._vmprof_virtual_ip - assert ip2 < ip # because it was generated later - assert len(functions) == 1 - name, start, end = functions[0] - assert len(name) < 1025 - assert name == 'py:filename:13:abcde' + 'f' * (1024 - 20 - 1) - + _vmprof.disable() + s = open(self.tmpfilename2).read() + no_of_codes2 = count(s) + assert "py:foo:" in s + assert "py:foo2:" in s + assert no_of_codes2 >= no_of_codes + 2 # some extra codes from tests diff --git a/rpython/jit/backend/llsupport/assembler.py b/rpython/jit/backend/llsupport/assembler.py --- a/rpython/jit/backend/llsupport/assembler.py +++ b/rpython/jit/backend/llsupport/assembler.py @@ -278,6 +278,9 @@ # YYY very minor leak -- we need the counters to stay alive # forever, just because we want to report them at the end # of the process + + # XXX the numbers here are ALMOST unique, but not quite, use a counter + # or something struct = lltype.malloc(DEBUG_COUNTER, flavor='raw', track_allocation=False) struct.i = 0 From noreply at buildbot.pypy.org Sat Jan 24 14:01:48 2015 From: noreply at buildbot.pypy.org (fijal) Date: Sat, 24 Jan 2015 14:01:48 +0100 (CET) Subject: [pypy-commit] pypy vmprof: shuffle stuff around to make it more RPython Message-ID: <20150124130148.2572D1C0417@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: vmprof Changeset: r75509:b57dabe367b1 Date: 2015-01-24 15:01 +0200 http://bitbucket.org/pypy/pypy/changeset/b57dabe367b1/ Log: shuffle stuff around to make it more RPython diff --git a/pypy/interpreter/executioncontext.py b/pypy/interpreter/executioncontext.py --- a/pypy/interpreter/executioncontext.py +++ b/pypy/interpreter/executioncontext.py @@ -34,6 +34,10 @@ self.w_profilefuncarg = None self.thread_disappeared = False # might be set to True after os.fork() self.register_code_callback = None + if sys.maxint == 2147483647: + self._code_unique_id = 0 # XXX this is wrong, it won't work on 32bit + else: + self._code_unique_id = 0x7000000000000000 @staticmethod def _mark_thread_disappeared(space): diff --git a/pypy/interpreter/pycode.py b/pypy/interpreter/pycode.py --- a/pypy/interpreter/pycode.py +++ b/pypy/interpreter/pycode.py @@ -55,13 +55,9 @@ "CPython-style code objects." _immutable_ = True _immutable_fields_ = ["co_consts_w[*]", "co_names_w[*]", "co_varnames[*]", - "co_freevars[*]", "co_cellvars[*]", "_args_as_cellvars[*]"] - - if sys.maxint == 2147483647: - _unique_id = 0 # XXX this is wrong, it won't work on 32bit - else: - _unique_id = 0x7000000000000000 - + "co_freevars[*]", "co_cellvars[*]", + "_args_as_cellvars[*]"] + def __init__(self, space, argcount, nlocals, stacksize, flags, code, consts, names, varnames, filename, name, firstlineno, lnotab, freevars, cellvars, @@ -131,8 +127,10 @@ from pypy.objspace.std.mapdict import init_mapdict_cache init_mapdict_cache(self) - self._unique_id = PyCode._unique_id - PyCode._unique_id += 1 + ec = self.space.getexecutioncontext() + self._unique_id = ec._code_unique_id + ec._code_unique_id += 2 # so we have one bit that we can mark stuff + # with def _get_full_name(self): return "py:%s:%d:%s" % (self.co_name, self.co_firstlineno, diff --git a/pypy/module/_vmprof/interp_vmprof.py b/pypy/module/_vmprof/interp_vmprof.py --- a/pypy/module/_vmprof/interp_vmprof.py +++ b/pypy/module/_vmprof/interp_vmprof.py @@ -170,8 +170,14 @@ @unwrap_spec(fileno=int, period=int) def enable(space, fileno, period=-1): - space.getbuiltinmodule('_vmprof').vmprof.enable(space, fileno, period) + from pypy.module._vmprof import Module + mod_vmprof = space.getbuiltinmodule('_vmprof') + assert isinstance(mod_vmprof, Module) + mod_vmprof.vmprof.enable(space, fileno, period) def disable(space): - space.getbuiltinmodule('_vmprof').vmprof.disable(space) + from pypy.module._vmprof import Module + mod_vmprof = space.getbuiltinmodule('_vmprof') + assert isinstance(mod_vmprof, Module) + mod_vmprof.vmprof.disable(space) From noreply at buildbot.pypy.org Sat Jan 24 15:21:52 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 24 Jan 2015 15:21:52 +0100 (CET) Subject: [pypy-commit] stmgc bag: Test and fix Message-ID: <20150124142152.04F4C1C0417@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: bag Changeset: r1584:3665ef15024d Date: 2015-01-24 15:22 +0100 http://bitbucket.org/pypy/stmgc/changeset/3665ef15024d/ Log: Test and fix diff --git a/c7/stm/bag.c b/c7/stm/bag.c --- a/c7/stm/bag.c +++ b/c7/stm/bag.c @@ -46,6 +46,7 @@ uintptr_t *deque_left, *deque_middle, *deque_right; struct list_s *abort_list; uint64_t start_time; /* the transaction's unique_start_time */ + bool must_add_to_overflow_bags; }; char alignment[64]; /* 64-bytes alignment, to prevent false sharing */ } stm_bag_seg_t; @@ -73,6 +74,7 @@ bs->deque_right = &block->items[0]; LIST_CREATE(bs->abort_list); bs->start_time = 0; + bs->must_add_to_overflow_bags = false; /* currently young */ } return bag; } @@ -80,6 +82,18 @@ void stm_bag_free(stm_bag_t *bag) { int i; + + s_mutex_lock(); + for (i = 0; i < STM_NB_SEGMENTS; i++) { + stm_bag_seg_t *bs = &bag->by_segment[i]; + struct stm_segment_info_s *pub = get_segment(i + 1); + stm_thread_local_t *tl = pub->running_thread; + if (tl != NULL && tl->associated_segment_num == i + 1) { + stm_call_on_abort(tl, bs, NULL); + } + } + s_mutex_unlock(); + for (i = 0; i < STM_NB_SEGMENTS; i++) { stm_bag_seg_t *bs = &bag->by_segment[i]; struct deque_block_s *block = deque_block(bs->deque_left); @@ -91,17 +105,6 @@ LIST_FREE(bs->abort_list); } - s_mutex_lock(); - for (i = 0; i < STM_NB_SEGMENTS; i++) { - stm_bag_seg_t *bs = &bag->by_segment[i]; - struct stm_segment_info_s *pub = get_segment(i + 1); - stm_thread_local_t *tl = pub->running_thread; - if (tl->associated_segment_num == i + 1) { - stm_call_on_abort(tl, bs, NULL); - } - } - s_mutex_unlock(); - free(bag); } @@ -151,6 +154,8 @@ bs->deque_middle = bs->deque_right; list_clear(bs->abort_list); bs->start_time = STM_PSEGMENT->unique_start_time; + bs->must_add_to_overflow_bags = false; /* not current transaction + any more */ /* We're about to modify the bag, so register an abort callback now. */ @@ -166,6 +171,13 @@ { stm_bag_seg_t *bs = bag_check_start_time(bag); bag_add(bs, newobj); + + if (bs->must_add_to_overflow_bags) { + bs->must_add_to_overflow_bags = false; + if (STM_PSEGMENT->overflow_bags == NULL) + LIST_CREATE(STM_PSEGMENT->overflow_bags); + LIST_APPEND(STM_PSEGMENT->overflow_bags, bag); + } } object_t *stm_bag_try_pop(stm_bag_t *bag) @@ -201,6 +213,10 @@ stm_bag_seg_t *bs = &bag->by_segment[i]; deque_trace(bs->deque_middle, bs->deque_right, trace); + + /* this case should only be called if the bag is from the current + transaction (either in the nursery or already overflowed) */ + bs->must_add_to_overflow_bags = true; } else { int i; @@ -210,3 +226,10 @@ } } } + +static void collect_overflow_bags(void) +{ + LIST_FOREACH_R(STM_PSEGMENT->overflow_bags, stm_bag_t *, + stm_bag_tracefn(item, TRACE_FOR_MINOR_COLLECTION)); + LIST_FREE(STM_PSEGMENT->overflow_bags); +} diff --git a/c7/stm/bag.h b/c7/stm/bag.h new file mode 100644 --- /dev/null +++ b/c7/stm/bag.h @@ -0,0 +1,2 @@ + +static void collect_overflow_bags(void); diff --git a/c7/stm/core.h b/c7/stm/core.h --- a/c7/stm/core.h +++ b/c7/stm/core.h @@ -205,6 +205,10 @@ /* regular finalizers (objs from the current transaction only) */ struct finalizers_s *finalizers; + + /* list of bags that are overflow objects (current transaction but + outside the nursery) */ + struct list_s *overflow_bags; }; enum /* safe_point */ { diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c --- a/c7/stm/nursery.c +++ b/c7/stm/nursery.c @@ -578,6 +578,9 @@ if (STM_PSEGMENT->finalizers != NULL) collect_objs_still_young_but_with_finalizers(); + if (STM_PSEGMENT->overflow_bags != NULL) + collect_overflow_bags(); + collect_oldrefs_to_nursery(); assert(list_is_empty(STM_PSEGMENT->old_objects_with_cards)); diff --git a/c7/stm/setup.c b/c7/stm/setup.c --- a/c7/stm/setup.c +++ b/c7/stm/setup.c @@ -130,6 +130,7 @@ pr->callbacks_on_commit_and_abort[1] = tree_create(); pr->young_objects_with_light_finalizers = list_create(); pr->old_objects_with_light_finalizers = list_create(); + pr->overflow_bags = NULL; pr->overflow_number = GCFLAG_OVERFLOW_NUMBER_bit0 * i; highest_overflow_number = pr->overflow_number; pr->pub.transaction_read_version = 0xff; @@ -174,6 +175,7 @@ tree_free(pr->callbacks_on_commit_and_abort[1]); list_free(pr->young_objects_with_light_finalizers); list_free(pr->old_objects_with_light_finalizers); + list_free(pr->overflow_bags); } munmap(stm_object_pages, TOTAL_MEMORY); diff --git a/c7/stmgc.c b/c7/stmgc.c --- a/c7/stmgc.c +++ b/c7/stmgc.c @@ -16,6 +16,7 @@ #include "stm/weakref.h" #include "stm/marker.h" #include "stm/finalizer.h" +#include "stm/bag.h" #include "stm/misc.c" #include "stm/list.c" diff --git a/c7/test/test_bag.py b/c7/test/test_bag.py --- a/c7/test/test_bag.py +++ b/c7/test/test_bag.py @@ -93,6 +93,15 @@ assert lp1b != ffi.NULL assert stm_get_char(lp1b) == 'N' assert lp1b != lp1 + # + lp2 = stm_allocate(16) + stm_set_char(lp2, 'M') + b_add(b, lp2) + stm_minor_collect() + lp2b = b_pop(b) + assert lp2b != ffi.NULL + assert stm_get_char(lp2b) == 'M' + assert lp2b != lp2 def test_keepalive_major(self): self.start_transaction() diff --git a/c7/test/test_hashtable.py b/c7/test/test_hashtable.py --- a/c7/test/test_hashtable.py +++ b/c7/test/test_hashtable.py @@ -146,6 +146,16 @@ assert lp1b != ffi.NULL assert stm_get_char(lp1b) == 'N' assert lp1b != lp1 + # + lp2 = stm_allocate(16) + stm_set_char(lp2, 'M') + tl0 = self.tls[self.current_thread] + htset(h, 1235, lp2, tl0) + stm_minor_collect() + lp2b = htget(h, 1235) + assert lp2b != ffi.NULL + assert stm_get_char(lp2b) == 'M' + assert lp2b != lp2 def test_keepalive_major(self): lp1 = stm_allocate_old(16) From noreply at buildbot.pypy.org Sat Jan 24 16:16:14 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 24 Jan 2015 16:16:14 +0100 (CET) Subject: [pypy-commit] stmgc bag: Test and fix Message-ID: <20150124151614.7AEDD1C0417@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: bag Changeset: r1585:d94ccb7c1585 Date: 2015-01-24 16:16 +0100 http://bitbucket.org/pypy/stmgc/changeset/d94ccb7c1585/ Log: Test and fix diff --git a/c7/stm/bag.c b/c7/stm/bag.c --- a/c7/stm/bag.c +++ b/c7/stm/bag.c @@ -46,7 +46,7 @@ uintptr_t *deque_left, *deque_middle, *deque_right; struct list_s *abort_list; uint64_t start_time; /* the transaction's unique_start_time */ - bool must_add_to_overflow_bags; + bool must_add_to_modified_bags; }; char alignment[64]; /* 64-bytes alignment, to prevent false sharing */ } stm_bag_seg_t; @@ -74,7 +74,7 @@ bs->deque_right = &block->items[0]; LIST_CREATE(bs->abort_list); bs->start_time = 0; - bs->must_add_to_overflow_bags = false; /* currently young */ + bs->must_add_to_modified_bags = false; /* currently young */ } return bag; } @@ -154,8 +154,7 @@ bs->deque_middle = bs->deque_right; list_clear(bs->abort_list); bs->start_time = STM_PSEGMENT->unique_start_time; - bs->must_add_to_overflow_bags = false; /* not current transaction - any more */ + bs->must_add_to_modified_bags = true; /* We're about to modify the bag, so register an abort callback now. */ @@ -172,11 +171,11 @@ stm_bag_seg_t *bs = bag_check_start_time(bag); bag_add(bs, newobj); - if (bs->must_add_to_overflow_bags) { - bs->must_add_to_overflow_bags = false; - if (STM_PSEGMENT->overflow_bags == NULL) - LIST_CREATE(STM_PSEGMENT->overflow_bags); - LIST_APPEND(STM_PSEGMENT->overflow_bags, bag); + if (bs->must_add_to_modified_bags) { + bs->must_add_to_modified_bags = false; + if (STM_PSEGMENT->modified_bags == NULL) + LIST_CREATE(STM_PSEGMENT->modified_bags); + LIST_APPEND(STM_PSEGMENT->modified_bags, bag); } } @@ -214,9 +213,7 @@ deque_trace(bs->deque_middle, bs->deque_right, trace); - /* this case should only be called if the bag is from the current - transaction (either in the nursery or already overflowed) */ - bs->must_add_to_overflow_bags = true; + bs->must_add_to_modified_bags = true; } else { int i; @@ -227,9 +224,9 @@ } } -static void collect_overflow_bags(void) +static void collect_modified_bags(void) { - LIST_FOREACH_R(STM_PSEGMENT->overflow_bags, stm_bag_t *, + LIST_FOREACH_R(STM_PSEGMENT->modified_bags, stm_bag_t *, stm_bag_tracefn(item, TRACE_FOR_MINOR_COLLECTION)); - LIST_FREE(STM_PSEGMENT->overflow_bags); + LIST_FREE(STM_PSEGMENT->modified_bags); } diff --git a/c7/stm/bag.h b/c7/stm/bag.h --- a/c7/stm/bag.h +++ b/c7/stm/bag.h @@ -1,2 +1,2 @@ -static void collect_overflow_bags(void); +static void collect_modified_bags(void); diff --git a/c7/stm/core.h b/c7/stm/core.h --- a/c7/stm/core.h +++ b/c7/stm/core.h @@ -206,9 +206,8 @@ /* regular finalizers (objs from the current transaction only) */ struct finalizers_s *finalizers; - /* list of bags that are overflow objects (current transaction but - outside the nursery) */ - struct list_s *overflow_bags; + /* list of bags that are old but modified */ + struct list_s *modified_bags; }; enum /* safe_point */ { diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c --- a/c7/stm/nursery.c +++ b/c7/stm/nursery.c @@ -578,8 +578,8 @@ if (STM_PSEGMENT->finalizers != NULL) collect_objs_still_young_but_with_finalizers(); - if (STM_PSEGMENT->overflow_bags != NULL) - collect_overflow_bags(); + if (STM_PSEGMENT->modified_bags != NULL) + collect_modified_bags(); collect_oldrefs_to_nursery(); assert(list_is_empty(STM_PSEGMENT->old_objects_with_cards)); diff --git a/c7/stm/setup.c b/c7/stm/setup.c --- a/c7/stm/setup.c +++ b/c7/stm/setup.c @@ -130,7 +130,7 @@ pr->callbacks_on_commit_and_abort[1] = tree_create(); pr->young_objects_with_light_finalizers = list_create(); pr->old_objects_with_light_finalizers = list_create(); - pr->overflow_bags = NULL; + pr->modified_bags = NULL; pr->overflow_number = GCFLAG_OVERFLOW_NUMBER_bit0 * i; highest_overflow_number = pr->overflow_number; pr->pub.transaction_read_version = 0xff; @@ -175,7 +175,7 @@ tree_free(pr->callbacks_on_commit_and_abort[1]); list_free(pr->young_objects_with_light_finalizers); list_free(pr->old_objects_with_light_finalizers); - list_free(pr->overflow_bags); + list_free(pr->modified_bags); } munmap(stm_object_pages, TOTAL_MEMORY); diff --git a/c7/test/test_bag.py b/c7/test/test_bag.py --- a/c7/test/test_bag.py +++ b/c7/test/test_bag.py @@ -116,6 +116,19 @@ assert lp1b != ffi.NULL assert stm_get_char(lp1b) == 'N' assert lp1b != lp1 + # + self.commit_transaction() + self.start_transaction() + lp2 = stm_allocate(16) + stm_set_char(lp2, 'M') + b_add(b, lp2) + stm_minor_collect() + lp2b = b_pop(b) + assert lp2b != ffi.NULL + assert stm_get_char(lp2b) == 'M' + assert lp2b != lp2 + # + stm_major_collect() # to get rid of the bag object def test_transaction_local(self): self.start_transaction() From noreply at buildbot.pypy.org Sat Jan 24 16:16:43 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 24 Jan 2015 16:16:43 +0100 (CET) Subject: [pypy-commit] stmgc bag: future test about stealing Message-ID: <20150124151643.4CAAF1C0417@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: bag Changeset: r1586:b425e275952a Date: 2015-01-24 16:17 +0100 http://bitbucket.org/pypy/stmgc/changeset/b425e275952a/ Log: future test about stealing diff --git a/c7/test/test_bag.py b/c7/test/test_bag.py --- a/c7/test/test_bag.py +++ b/c7/test/test_bag.py @@ -181,3 +181,24 @@ assert stm_get_char(lp2) == 'N' # stm_major_collect() # to get rid of the bag object + + def test_stealing(self): + py.test.skip("in-progress") + self.start_transaction() + q = self.allocate_bag() + self.push_root(q) + lp1 = stm_allocate(16) + lp2 = stm_allocate(16) + stm_set_char(lp1, 'M') + stm_set_char(lp2, 'N') + b_add(q, lp1) + b_add(q, lp2) + self.commit_transaction() + q = self.pop_root() + # + self.switch(1) + self.start_transaction() + lp1 = b_pop(q) + lp2 = b_pop(q) + assert stm_get_char(lp1) == 'M' + assert stm_get_char(lp2) == 'N' From noreply at buildbot.pypy.org Sat Jan 24 16:41:37 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 24 Jan 2015 16:41:37 +0100 (CET) Subject: [pypy-commit] stmgc bag: Update the checkfence instructions Message-ID: <20150124154137.339971C01AE@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: bag Changeset: r1587:9e3ccc687bfd Date: 2015-01-24 16:42 +0100 http://bitbucket.org/pypy/stmgc/changeset/9e3ccc687bfd/ Log: Update the checkfence instructions diff --git a/checkfence/README b/checkfence/README --- a/checkfence/README +++ b/checkfence/README @@ -1,5 +1,5 @@ -Installing checkfence on Linux 64 ---------------------------------- +Installing checkfence on Linux 64 (Ubuntu 14.04) +------------------------------------------------ apt-get install bison flex ocaml ocaml-findlib @@ -34,7 +34,9 @@ cd checkfence/build edit the Makefile: ZCHAFFDIR=/path/to/zchaff64 - make opt + fix src/Parser.yy for bison 3.0 by replacing all "yyloc" with "yyla.location" + + make dbg # or "make opt", but this gives me a segfaulting version :-( Compiling C2LSL: @@ -48,7 +50,7 @@ Running the examples: - cd c4 + cd c7 ln -s /full/path/to/c2lsl ln -s /full/path/to/checkfence ./run test1.c test1.lsl diff --git a/checkfence/c7/run b/checkfence/c7/run --- a/checkfence/c7/run +++ b/checkfence/c7/run @@ -8,4 +8,4 @@ shift $CHECKFENCE_HOME/run/clean || exit 1 echo ------------------------------------------------------------------------- -$CHECKFENCE_HOME/run/checkfence -i _run.lsl "$@" || exit 1 +$CHECKFENCE_HOME/run/checkfence_g -i _run.lsl "$@" || exit 1 From noreply at buildbot.pypy.org Sat Jan 24 16:43:17 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 24 Jan 2015 16:43:17 +0100 (CET) Subject: [pypy-commit] stmgc bag: update Message-ID: <20150124154317.40A8F1C01AE@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: bag Changeset: r1588:a9a9f95664ef Date: 2015-01-24 16:43 +0100 http://bitbucket.org/pypy/stmgc/changeset/a9a9f95664ef/ Log: update diff --git a/checkfence/README b/checkfence/README --- a/checkfence/README +++ b/checkfence/README @@ -55,4 +55,4 @@ ln -s /full/path/to/checkfence ./run test1.c test1.lsl - Look at 'T0.bsc-overview.htm' in your web browser. + Look at 'T*.bsc-overview.htm' in your web browser. From noreply at buildbot.pypy.org Sat Jan 24 17:16:41 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 24 Jan 2015 17:16:41 +0100 (CET) Subject: [pypy-commit] stmgc bag: Uh, a nonsense pair of asserts? Message-ID: <20150124161641.A52401C0499@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: bag Changeset: r1589:745faca39603 Date: 2015-01-24 17:17 +0100 http://bitbucket.org/pypy/stmgc/changeset/745faca39603/ Log: Uh, a nonsense pair of asserts? diff --git a/checkfence/README b/checkfence/README --- a/checkfence/README +++ b/checkfence/README @@ -35,6 +35,7 @@ edit the Makefile: ZCHAFFDIR=/path/to/zchaff64 fix src/Parser.yy for bison 3.0 by replacing all "yyloc" with "yyla.location" + comment out src/Value.C:122: ASSERT(inOffset < 128); make dbg # or "make opt", but this gives me a segfaulting version :-( From noreply at buildbot.pypy.org Sun Jan 25 08:14:17 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 25 Jan 2015 08:14:17 +0100 (CET) Subject: [pypy-commit] pypy default: Bah, we have two different faq documents nowadays. At the very least, Message-ID: <20150125071417.D27531C00F4@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r75510:a43cdde1efe8 Date: 2015-01-25 08:14 +0100 http://bitbucket.org/pypy/pypy/changeset/a43cdde1efe8/ Log: Bah, we have two different faq documents nowadays. At the very least, link to each other. Otherwise the similar format and number of entries will confuse people (and me). diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst --- a/pypy/doc/faq.rst +++ b/pypy/doc/faq.rst @@ -3,6 +3,13 @@ .. contents:: +See also: `Frequently ask questions about RPython.`__ + +.. __: http://rpython.readthedocs.org/en/latest/faq.html + +--------------------------- + + What is PyPy? ------------- diff --git a/rpython/doc/faq.rst b/rpython/doc/faq.rst --- a/rpython/doc/faq.rst +++ b/rpython/doc/faq.rst @@ -3,6 +3,12 @@ .. contents:: +See also: `Frequently ask questions about PyPy.`__ + +.. __: http://pypy.readthedocs.org/en/latest/faq.html + +-------------------------- + What is RPython? ---------------- From noreply at buildbot.pypy.org Sun Jan 25 09:34:39 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 25 Jan 2015 09:34:39 +0100 (CET) Subject: [pypy-commit] stmgc bag: in-progress: add locks Message-ID: <20150125083439.6A8761C01AE@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: bag Changeset: r1590:d0a565d0fc07 Date: 2015-01-25 09:35 +0100 http://bitbucket.org/pypy/stmgc/changeset/d0a565d0fc07/ Log: in-progress: add locks diff --git a/c7/stm/bag.c b/c7/stm/bag.c --- a/c7/stm/bag.c +++ b/c7/stm/bag.c @@ -23,6 +23,8 @@ | already | | next items | added in this | | popped items | | to pop | transaction | +--------------+ +-----------------------+---------------+ + ^ ^ ^ + left middle right Adding objects puts them at the right end of the deque. Popping them takes them off the left end and stores a copy of the pointer into a @@ -42,11 +44,39 @@ typedef union { + /* Data describing the deque and abort_list belonging to the segment i. */ struct { - uintptr_t *deque_left, *deque_middle, *deque_right; + /* Left deque position: read/write by whoever has got the 'lock'. + Don't access at all without holding the lock. */ + uintptr_t *deque_left; + + /* Middle deque position: written only by segment i when it holds + the 'lock'. Can be read freely by segment i. Can be + read by the other segments when they hold the 'lock'. */ + uintptr_t *deque_middle; + + /* Right deque position: only accessed by the segment i. No + locking needed. */ + uintptr_t *deque_right; + + /* Abort list. Only accessed by the segment i. */ struct list_s *abort_list; - uint64_t start_time; /* the transaction's unique_start_time */ + + /* The segment i's transaction's unique_start_time, as it was + the last time we did a change to this stm_bag_seg_t. Used + to detect lazily when a commit occurred in-between. */ + uint64_t start_time; + + /* This flag is set to arm the bag-specific "write barrier". + When adding new items to the bag, when this flag is set we + must record the bag into the 'modified_bags' list, used for + minor collections, so that we can trace the newly added + items. */ bool must_add_to_modified_bags; + + /* The lock, to access deque_left and deque_middle as + explained above. */ + uint8_t lock; }; char alignment[64]; /* 64-bytes alignment, to prevent false sharing */ } stm_bag_seg_t; @@ -75,6 +105,7 @@ LIST_CREATE(bs->abort_list); bs->start_time = 0; bs->must_add_to_modified_bags = false; /* currently young */ + bs->lock = 0; } return bag; } @@ -128,11 +159,15 @@ bs->deque_right = bs->deque_middle; /* reinstall the items from the "abort_list" */ - LIST_FOREACH_F(bs->abort_list, object_t *, bag_add(bs, item)); - list_clear(bs->abort_list); + if (!list_is_empty(bs->abort_list)) { + LIST_FOREACH_F(bs->abort_list, object_t *, bag_add(bs, item)); + list_clear(bs->abort_list); - /* these items are not "added in this transaction" */ - bs->deque_middle = bs->deque_right; + /* these items are not "added in this transaction" */ + spinlock_acquire(bs->lock); + bs->deque_middle = bs->deque_right; + spinlock_release(bs->lock); + } } static stm_bag_seg_t *bag_check_start_time(stm_bag_t *bag) @@ -151,7 +186,11 @@ more "added in this transaction" entries. And the "already popped items" list is forgotten. */ - bs->deque_middle = bs->deque_right; + if (bs->deque_middle != bs->deque_right) { + spinlock_acquire(bs->lock); + bs->deque_middle = bs->deque_right; + spinlock_release(bs->lock); + } list_clear(bs->abort_list); bs->start_time = STM_PSEGMENT->unique_start_time; bs->must_add_to_modified_bags = true; @@ -182,7 +221,11 @@ object_t *stm_bag_try_pop(stm_bag_t *bag) { stm_bag_seg_t *bs = bag_check_start_time(bag); + + spinlock_acquire(bs->lock); + if (bs->deque_left == bs->deque_right) { + spinlock_release(bs->lock); return NULL; } @@ -196,8 +239,10 @@ } if (from_same_transaction) { bs->deque_middle = bs->deque_left; + spinlock_release(bs->lock); } else { + spinlock_release(bs->lock); LIST_APPEND(bs->abort_list, result); } return (object_t *)result; From noreply at buildbot.pypy.org Sun Jan 25 17:10:57 2015 From: noreply at buildbot.pypy.org (amauryfa) Date: Sun, 25 Jan 2015 17:10:57 +0100 (CET) Subject: [pypy-commit] pypy stdlib-2.7.9: Try to fix 32bit translation Message-ID: <20150125161057.134D51C00F4@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: stdlib-2.7.9 Changeset: r75511:e5c65536a65a Date: 2015-01-24 10:16 +0100 http://bitbucket.org/pypy/pypy/changeset/e5c65536a65a/ Log: Try to fix 32bit translation diff --git a/pypy/module/_ssl/interp_ssl.py b/pypy/module/_ssl/interp_ssl.py --- a/pypy/module/_ssl/interp_ssl.py +++ b/pypy/module/_ssl/interp_ssl.py @@ -512,7 +512,7 @@ out_ptr, len_ptr) if out_ptr[0]: return space.wrap( - rffi.charpsize2str(out_ptr[0], widen(len_ptr[0]))) + rffi.charpsize2str(out_ptr[0], intmask(len_ptr[0]))) _SSLSocket.typedef = TypeDef( "_ssl._SSLSocket", From noreply at buildbot.pypy.org Sun Jan 25 17:10:58 2015 From: noreply at buildbot.pypy.org (amauryfa) Date: Sun, 25 Jan 2015 17:10:58 +0100 (CET) Subject: [pypy-commit] pypy stdlib-2.7.9: Fix many errors in test_tk Message-ID: <20150125161058.321C11C00F4@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: stdlib-2.7.9 Changeset: r75512:bb36e19b9328 Date: 2015-01-24 21:55 +0100 http://bitbucket.org/pypy/pypy/changeset/bb36e19b9328/ Log: Fix many errors in test_tk diff --git a/lib_pypy/_tkinter/__init__.py b/lib_pypy/_tkinter/__init__.py --- a/lib_pypy/_tkinter/__init__.py +++ b/lib_pypy/_tkinter/__init__.py @@ -17,6 +17,7 @@ raise ImportError("Tk headers and development libraries are required") from .app import TkApp +from .tclobj import TclObject as Tcl_Obj TK_VERSION = tkffi.string(tklib.get_tk_version()) TCL_VERSION = tkffi.string(tklib.get_tcl_version()) diff --git a/lib_pypy/_tkinter/app.py b/lib_pypy/_tkinter/app.py --- a/lib_pypy/_tkinter/app.py +++ b/lib_pypy/_tkinter/app.py @@ -533,3 +533,12 @@ def quit(self): self.quitMainLoop = True + + def _createbytearray(self, buf): + """Convert Python string or any buffer compatible object to Tcl + byte-array object. Use it to pass binary data (e.g. image's + data) to Tcl/Tk commands.""" + cdata = tkffi.new("char[]", buf) + obj = tklib.Tcl_NewByteArrayObj(cdata, len(buf)) + return FromObj(self, obj) + diff --git a/lib_pypy/_tkinter/tclobj.py b/lib_pypy/_tkinter/tclobj.py --- a/lib_pypy/_tkinter/tclobj.py +++ b/lib_pypy/_tkinter/tclobj.py @@ -112,6 +112,16 @@ return self._string return tkffi.string(tklib.Tcl_GetString(self._value)) + def __repr__(self): + return "<%s object at 0x%x>" % ( + tkffi.string(self._value.typePtr.name), + tkffi.cast("intptr_t", self._value)) + + def __eq__(self, other): + if not isinstance(other, TclObject): + return NotImplemented + return self._value == other._value + @property def string(self): if self._string is None: diff --git a/lib_pypy/_tkinter/tklib.py b/lib_pypy/_tkinter/tklib.py --- a/lib_pypy/_tkinter/tklib.py +++ b/lib_pypy/_tkinter/tklib.py @@ -75,6 +75,7 @@ char *Tcl_GetString(Tcl_Obj* objPtr); char *Tcl_GetStringFromObj(Tcl_Obj* objPtr, int* lengthPtr); unsigned char *Tcl_GetByteArrayFromObj(Tcl_Obj* objPtr, int* lengthPtr); +Tcl_Obj *Tcl_NewByteArrayObj(unsigned char *bytes, int length); int Tcl_ExprBoolean(Tcl_Interp* interp, const char *expr, int *booleanPtr); int Tcl_ExprLong(Tcl_Interp* interp, const char *expr, long* longPtr); From noreply at buildbot.pypy.org Sun Jan 25 17:10:59 2015 From: noreply at buildbot.pypy.org (amauryfa) Date: Sun, 25 Jan 2015 17:10:59 +0100 (CET) Subject: [pypy-commit] pypy stdlib-2.7.9: Fix more tkinter tests Message-ID: <20150125161059.5E0AB1C00F4@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: stdlib-2.7.9 Changeset: r75513:4360c70a0171 Date: 2015-01-24 22:47 +0100 http://bitbucket.org/pypy/pypy/changeset/4360c70a0171/ Log: Fix more tkinter tests diff --git a/lib-python/2.7/lib-tk/test/test_tkinter/test_images.py b/lib-python/2.7/lib-tk/test/test_tkinter/test_images.py --- a/lib-python/2.7/lib-tk/test/test_tkinter/test_images.py +++ b/lib-python/2.7/lib-tk/test/test_tkinter/test_images.py @@ -37,6 +37,7 @@ self.assertEqual(image.height(), 16) self.assertIn('::img::test', self.root.image_names()) del image + support.gc_collect() self.assertNotIn('::img::test', self.root.image_names()) def test_create_from_data(self): @@ -51,6 +52,7 @@ self.assertEqual(image.height(), 16) self.assertIn('::img::test', self.root.image_names()) del image + support.gc_collect() self.assertNotIn('::img::test', self.root.image_names()) def assertEqualStrList(self, actual, expected): @@ -131,6 +133,7 @@ self.assertEqual(image['file'], testfile) self.assertIn('::img::test', self.root.image_names()) del image + support.gc_collect() self.assertNotIn('::img::test', self.root.image_names()) def check_create_from_data(self, ext): @@ -148,6 +151,7 @@ self.assertEqual(image['file'], '') self.assertIn('::img::test', self.root.image_names()) del image + support.gc_collect() self.assertNotIn('::img::test', self.root.image_names()) def test_create_from_ppm_file(self): diff --git a/lib-python/2.7/lib-tk/test/test_tkinter/test_variables.py b/lib-python/2.7/lib-tk/test/test_tkinter/test_variables.py --- a/lib-python/2.7/lib-tk/test/test_tkinter/test_variables.py +++ b/lib-python/2.7/lib-tk/test/test_tkinter/test_variables.py @@ -1,4 +1,5 @@ import unittest +from test.test_support import gc_collect from Tkinter import Variable, StringVar, IntVar, DoubleVar, BooleanVar, Tcl, TclError @@ -32,6 +33,7 @@ v = Variable(self.root, "sample string", "varname") self.assertTrue(self.info_exists("varname")) del v + gc_collect() self.assertFalse(self.info_exists("varname")) def test_dont_unset_not_existing(self): @@ -39,9 +41,11 @@ v1 = Variable(self.root, name="name") v2 = Variable(self.root, name="name") del v1 + gc_collect() self.assertFalse(self.info_exists("name")) # shouldn't raise exception del v2 + gc_collect() self.assertFalse(self.info_exists("name")) def test___eq__(self): diff --git a/lib_pypy/_tkinter/app.py b/lib_pypy/_tkinter/app.py --- a/lib_pypy/_tkinter/app.py +++ b/lib_pypy/_tkinter/app.py @@ -539,6 +539,8 @@ byte-array object. Use it to pass binary data (e.g. image's data) to Tcl/Tk commands.""" cdata = tkffi.new("char[]", buf) - obj = tklib.Tcl_NewByteArrayObj(cdata, len(buf)) - return FromObj(self, obj) + res = tklib.Tcl_NewByteArrayObj(cdata, len(buf)) + if not res: + self.raiseTclError() + return FromObj(self, res) From noreply at buildbot.pypy.org Sun Jan 25 17:11:00 2015 From: noreply at buildbot.pypy.org (amauryfa) Date: Sun, 25 Jan 2015 17:11:00 +0100 (CET) Subject: [pypy-commit] pypy stdlib-2.7.9: Fix last failures in test_tk Message-ID: <20150125161100.7139B1C00F4@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: stdlib-2.7.9 Changeset: r75514:3931bad51c08 Date: 2015-01-25 16:58 +0100 http://bitbucket.org/pypy/pypy/changeset/3931bad51c08/ Log: Fix last failures in test_tk diff --git a/lib_pypy/_tkinter/app.py b/lib_pypy/_tkinter/app.py --- a/lib_pypy/_tkinter/app.py +++ b/lib_pypy/_tkinter/app.py @@ -25,6 +25,8 @@ def varname_converter(input): if isinstance(input, TclObject): return input.string + if b'\0' in input: + raise ValueError("NUL character in string") return input @@ -542,5 +544,4 @@ res = tklib.Tcl_NewByteArrayObj(cdata, len(buf)) if not res: self.raiseTclError() - return FromObj(self, res) - + return TclObject(res) From noreply at buildbot.pypy.org Sun Jan 25 17:11:01 2015 From: noreply at buildbot.pypy.org (amauryfa) Date: Sun, 25 Jan 2015 17:11:01 +0100 (CET) Subject: [pypy-commit] pypy stdlib-2.7.9: Fix remaining failures in test_ttk_guionly Message-ID: <20150125161101.A59971C00F4@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: stdlib-2.7.9 Changeset: r75515:66226c524e36 Date: 2015-01-25 17:08 +0100 http://bitbucket.org/pypy/pypy/changeset/66226c524e36/ Log: Fix remaining failures in test_ttk_guionly diff --git a/lib-python/2.7/lib-tk/test/test_ttk/test_extensions.py b/lib-python/2.7/lib-tk/test/test_ttk/test_extensions.py --- a/lib-python/2.7/lib-tk/test/test_ttk/test_extensions.py +++ b/lib-python/2.7/lib-tk/test/test_ttk/test_extensions.py @@ -2,7 +2,7 @@ import unittest import Tkinter as tkinter import ttk -from test.test_support import requires, run_unittest, swap_attr +from test.test_support import requires, run_unittest, swap_attr, gc_collect from test_ttk.support import AbstractTkTest, destroy_default_root requires('gui') @@ -18,6 +18,7 @@ x = ttk.LabeledScale(self.root) var = x._variable._name x.destroy() + gc_collect() self.assertRaises(tkinter.TclError, x.tk.globalgetvar, var) # manually created variable @@ -25,11 +26,13 @@ name = myvar._name x = ttk.LabeledScale(self.root, variable=myvar) x.destroy() + gc_collect() if self.wantobjects: self.assertEqual(x.tk.globalgetvar(name), myvar.get()) else: self.assertEqual(float(x.tk.globalgetvar(name)), myvar.get()) del myvar + gc_collect() self.assertRaises(tkinter.TclError, x.tk.globalgetvar, name) # checking that the tracing callback is properly removed @@ -37,6 +40,7 @@ # LabeledScale will start tracing myvar x = ttk.LabeledScale(self.root, variable=myvar) x.destroy() + gc_collect() # Unless the tracing callback was removed, creating a new # LabeledScale with the same var will cause an error now. This # happens because the variable will be set to (possibly) a new @@ -216,6 +220,7 @@ optmenu.destroy() self.assertEqual(optmenu.tk.globalgetvar(name), var.get()) del var + gc_collect() self.assertRaises(tkinter.TclError, optmenu.tk.globalgetvar, name) diff --git a/lib-python/2.7/lib-tk/test/test_ttk/test_widgets.py b/lib-python/2.7/lib-tk/test/test_ttk/test_widgets.py --- a/lib-python/2.7/lib-tk/test/test_ttk/test_widgets.py +++ b/lib-python/2.7/lib-tk/test/test_ttk/test_widgets.py @@ -2,7 +2,7 @@ import Tkinter as tkinter from Tkinter import TclError import ttk -from test.test_support import requires, run_unittest +from test.test_support import requires, run_unittest, gc_collect import sys from test_functions import MockTclObj @@ -838,6 +838,7 @@ self.assertEqual(conv(self.scale.get()), var.get()) self.assertEqual(conv(self.scale.get()), max + 5) del var + gc_collect() # the same happens with the value option self.scale['value'] = max + 10 diff --git a/lib_pypy/_tkinter/tclobj.py b/lib_pypy/_tkinter/tclobj.py --- a/lib_pypy/_tkinter/tclobj.py +++ b/lib_pypy/_tkinter/tclobj.py @@ -114,8 +114,7 @@ def __repr__(self): return "<%s object at 0x%x>" % ( - tkffi.string(self._value.typePtr.name), - tkffi.cast("intptr_t", self._value)) + self.typename, tkffi.cast("intptr_t", self._value)) def __eq__(self, other): if not isinstance(other, TclObject): @@ -123,6 +122,10 @@ return self._value == other._value @property + def typename(self): + return tkffi.string(self._value.typePtr.name) + + @property def string(self): if self._string is None: length = tkffi.new("int*") From noreply at buildbot.pypy.org Sun Jan 25 19:57:04 2015 From: noreply at buildbot.pypy.org (amauryfa) Date: Sun, 25 Jan 2015 19:57:04 +0100 (CET) Subject: [pypy-commit] pypy stdlib-2.7.9: ssl: add SSLContext.load_dh_params() Message-ID: <20150125185704.66B6D1C0499@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: stdlib-2.7.9 Changeset: r75516:35f8bc7e346a Date: 2015-01-25 17:37 +0100 http://bitbucket.org/pypy/pypy/changeset/35f8bc7e346a/ Log: ssl: add SSLContext.load_dh_params() diff --git a/pypy/module/_ssl/interp_ssl.py b/pypy/module/_ssl/interp_ssl.py --- a/pypy/module/_ssl/interp_ssl.py +++ b/pypy/module/_ssl/interp_ssl.py @@ -1025,6 +1025,31 @@ if ret != 1: raise _ssl_seterror(space, None, -1) + @unwrap_spec(filepath=str) + def load_dh_params_w(self, space, filepath): + bio = libssl_BIO_new_file(filepath, "r") + if not bio: + libssl_ERR_clear_error() + errno = get_errno() + raise wrap_oserror(space, OSError(errno, '')) + try: + set_errno(0) + dh = libssl_PEM_read_bio_DHparams(bio, None, None, None) + finally: + libssl_BIO_free(bio) + if not dh: + errno = get_errno() + if errno != 0: + libssl_ERR_clear_error() + raise wrap_oserror(space, OSError(errno, '')) + else: + raise _ssl_seterror(space, None, 0) + try: + if libssl_SSL_CTX_set_tmp_dh(self.ctx, dh) == 0: + raise _ssl_seterror(space, None, 0) + finally: + libssl_DH_free(dh) + def load_verify_locations_w(self, space, w_cafile=None, w_capath=None, w_cadata=None): if space.is_none(w_cafile): @@ -1156,9 +1181,10 @@ __new__=interp2app(_SSLContext.descr_new), _wrap_socket=interp2app(_SSLContext.descr_wrap_socket), set_ciphers=interp2app(_SSLContext.descr_set_ciphers), - load_verify_locations=interp2app(_SSLContext.load_verify_locations_w), cert_store_stats=interp2app(_SSLContext.cert_store_stats_w), load_cert_chain=interp2app(_SSLContext.load_cert_chain_w), + load_dh_params=interp2app(_SSLContext.load_dh_params_w), + load_verify_locations=interp2app(_SSLContext.load_verify_locations_w), set_default_verify_paths=interp2app(_SSLContext.descr_set_default_verify_paths), _set_npn_protocols=interp2app(_SSLContext.set_npn_protocols_w), diff --git a/pypy/module/_ssl/test/test_ssl.py b/pypy/module/_ssl/test/test_ssl.py --- a/pypy/module/_ssl/test/test_ssl.py +++ b/pypy/module/_ssl/test/test_ssl.py @@ -1,11 +1,11 @@ from rpython.tool.udir import udir +import os class AppTestSSL: spaceconfig = dict(usemodules=('_ssl', '_socket', 'thread')) def setup_class(cls): - import os cls.w_nullbytecert = cls.space.wrap(os.path.join( os.path.dirname(__file__), 'nullbytecert.pem')) @@ -269,6 +269,8 @@ tmpfile = udir / "emptycert.pem" tmpfile.write(SSL_EMPTYCERT) cls.w_emptycert = cls.space.wrap(str(tmpfile)) + cls.w_dh512 = cls.space.wrap(os.path.join( + os.path.dirname(__file__), 'dh512.pem')) def test_load_cert_chain(self): import _ssl @@ -291,6 +293,14 @@ ctx.load_verify_locations(cadata=cacert_pem) assert ctx.cert_store_stats()["x509_ca"] + def test_load_dh_params(self): + import _ssl + ctx = _ssl._SSLContext(_ssl.PROTOCOL_TLSv1) + ctx.load_dh_params(self.dh512) + raises(TypeError, ctx.load_dh_params) + raises(TypeError, ctx.load_dh_params, None) + raises(_ssl.SSLError, ctx.load_dh_params, self.keycert) + SSL_CERTIFICATE = """ -----BEGIN CERTIFICATE----- MIICVDCCAb2gAwIBAgIJANfHOBkZr8JOMA0GCSqGSIb3DQEBBQUAMF8xCzAJBgNV diff --git a/rpython/rlib/ropenssl.py b/rpython/rlib/ropenssl.py --- a/rpython/rlib/ropenssl.py +++ b/rpython/rlib/ropenssl.py @@ -53,6 +53,7 @@ ASN1_ITEM = rffi.COpaquePtr('ASN1_ITEM') X509_NAME = rffi.COpaquePtr('X509_NAME') stack_st_X509_OBJECT = rffi.COpaquePtr('struct stack_st_X509_OBJECT') +DH = rffi.COpaquePtr('DH') class CConfigBootstrap: _compilation_info_ = eci @@ -356,6 +357,7 @@ ssl_external('BIO_s_file', [], BIO_METHOD) ssl_external('BIO_new', [BIO_METHOD], BIO) ssl_external('BIO_set_nbio', [BIO, rffi.INT], rffi.INT, macro=True) +ssl_external('BIO_new_file', [rffi.CCHARP, rffi.CCHARP], BIO) ssl_external('BIO_new_mem_buf', [rffi.VOIDP, rffi.INT], BIO) ssl_external('BIO_free', [BIO], rffi.INT) ssl_external('BIO_reset', [BIO], rffi.INT, macro=True) @@ -367,6 +369,11 @@ ssl_external('PEM_read_bio_X509_AUX', [BIO, rffi.VOIDP, rffi.VOIDP, rffi.VOIDP], X509) +ssl_external('PEM_read_bio_DHparams', + [BIO, rffi.VOIDP, rffi.VOIDP, rffi.VOIDP], DH) +ssl_external('SSL_CTX_set_tmp_dh', [SSL_CTX, DH], rffi.INT, macro=True) +ssl_external('DH_free', [DH], lltype.Void, releasegil=False) + if HAS_NPN: SSL_NEXT_PROTOS_ADV_CB = lltype.Ptr(lltype.FuncType( [SSL, rffi.CCHARPP, rffi.UINTP, rffi.VOIDP], rffi.INT)) From noreply at buildbot.pypy.org Sun Jan 25 19:57:05 2015 From: noreply at buildbot.pypy.org (amauryfa) Date: Sun, 25 Jan 2015 19:57:05 +0100 (CET) Subject: [pypy-commit] pypy stdlib-2.7.9: Lots of improvement to the SSLError class. Message-ID: <20150125185705.B263E1C0499@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: stdlib-2.7.9 Changeset: r75517:ad8cb0a374b6 Date: 2015-01-25 19:55 +0100 http://bitbucket.org/pypy/pypy/changeset/ad8cb0a374b6/ Log: Lots of improvement to the SSLError class. diff --git a/pypy/module/_ssl/interp_ssl.py b/pypy/module/_ssl/interp_ssl.py --- a/pypy/module/_ssl/interp_ssl.py +++ b/pypy/module/_ssl/interp_ssl.py @@ -3,12 +3,15 @@ from rpython.rlib.ropenssl import * from rpython.rlib.rposix import get_errno, set_errno from rpython.rlib.rweakref import RWeakValueDictionary +from rpython.rlib.objectmodel import specialize from rpython.rtyper.lltypesystem import lltype, rffi from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.error import OperationError, oefmt, wrap_oserror from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.typedef import TypeDef, GetSetProperty +from pypy.module._ssl.ssl_data import ( + LIBRARY_CODES_TO_NAMES, ERROR_CODES_TO_NAMES) from pypy.module._socket import interp_socket @@ -91,13 +94,32 @@ constants["OPENSSL_VERSION"] = SSLEAY_VERSION -def ssl_error(space, msg, errno=0, exc='w_sslerror'): - w_exception_class = get_exception_class(space, exc) - if not errno: - w_exception = space.call_function(w_exception_class, space.wrap(msg)) - else: +def ssl_error(space, msg, errno=0, w_errtype=None, errcode=0): + reason_str = None + lib_str = None + if errcode: + err_lib = libssl_ERR_GET_LIB(errcode) + err_reason = libssl_ERR_GET_REASON(errcode) + reason_str = ERROR_CODES_TO_NAMES.get((err_lib, err_reason), None) + lib_str = LIBRARY_CODES_TO_NAMES.get(err_lib, None) + msg = rffi.charp2str(libssl_ERR_reason_error_string(errcode)) + if not msg: + msg = "unknown error" + if reason_str and lib_str: + msg = "[%s: %s] %s" % (lib_str, reason_str, msg) + elif lib_str: + msg = "[%s] %s" % (lib_str, msg) + + w_exception_class = w_errtype or get_exception_class(space, 'w_sslerror') + if errno or errcode: w_exception = space.call_function(w_exception_class, space.wrap(errno), space.wrap(msg)) + else: + w_exception = space.call_function(w_exception_class, space.wrap(msg)) + space.setattr(w_exception, space.wrap("reason"), + space.wrap(reason_str) if reason_str else space.w_None) + space.setattr(w_exception, space.wrap("library"), + space.wrap(lib_str) if lib_str else space.w_None) return OperationError(w_exception_class, w_exception) class SSLNpnProtocols(object): @@ -794,22 +816,25 @@ if ss is None: errval = libssl_ERR_peek_last_error() - errstr = rffi.charp2str(libssl_ERR_error_string(errval, None)) - return ssl_error(space, errstr, errval) + return ssl_error(space, None, errcode=errval) elif ss.ssl: err = libssl_SSL_get_error(ss.ssl, ret) else: err = SSL_ERROR_SSL + w_errtype = None errstr = "" errval = 0 if err == SSL_ERROR_ZERO_RETURN: + w_errtype = get_exception_class(space, 'w_sslzeroreturnerror') errstr = "TLS/SSL connection has been closed" errval = PY_SSL_ERROR_ZERO_RETURN elif err == SSL_ERROR_WANT_READ: + w_errtype = get_exception_class(space, 'w_sslwantreaderror') errstr = "The operation did not complete (read)" errval = PY_SSL_ERROR_WANT_READ elif err == SSL_ERROR_WANT_WRITE: + w_errtype = get_exception_class(space, 'w_sslwantwriteerror') errstr = "The operation did not complete (write)" errval = PY_SSL_ERROR_WANT_WRITE elif err == SSL_ERROR_WANT_X509_LOOKUP: @@ -829,6 +854,7 @@ error = rsocket.last_error() return interp_socket.converted_error(space, error) else: + w_errtype = get_exception_class(space, 'w_sslsyscallerror') errstr = "Some I/O error occurred" errval = PY_SSL_ERROR_SYSCALL else: @@ -845,7 +871,13 @@ errstr = "Invalid error code" errval = PY_SSL_ERROR_INVALID_ERROR_CODE - return ssl_error(space, errstr, errval) + return ssl_error(space, errstr, errval, w_errtype=w_errtype) + +def SSLError_descr_str(space, w_exc): + w_strerror = space.getattr(w_exc, space.wrap("strerror")) + if not space.is_none(w_strerror): + return w_strerror + return space.str(space.getattr(w_exc, space.wrap("args"))) class Cache: @@ -853,6 +885,8 @@ w_socketerror = interp_socket.get_error(space, "error") self.w_sslerror = space.new_exception_class( "_ssl.SSLError", w_socketerror) + space.setattr(self.w_sslerror, space.wrap('__str__'), + space.wrap(interp2app(SSLError_descr_str))) self.w_sslzeroreturnerror = space.new_exception_class( "_ssl.SSLZeroReturnError", self.w_sslerror) self.w_sslwantreaderror = space.new_exception_class( @@ -865,6 +899,7 @@ "_ssl.SSLEOFError", self.w_sslerror) + at specialize.memo() def get_exception_class(space, name): return getattr(space.fromcache(Cache), name) diff --git a/pypy/module/_ssl/test/test_ssl.py b/pypy/module/_ssl/test/test_ssl.py --- a/pypy/module/_ssl/test/test_ssl.py +++ b/pypy/module/_ssl/test/test_ssl.py @@ -301,6 +301,61 @@ raises(TypeError, ctx.load_dh_params, None) raises(_ssl.SSLError, ctx.load_dh_params, self.keycert) + +class AppTestSSLError: + spaceconfig = dict(usemodules=('_ssl', '_socket', 'thread')) + + def setup_class(cls): + tmpfile = udir / "tmpfile.pem" + tmpfile.write(SSL_CERTIFICATE + SSL_PRIVATE_KEY) + cls.w_keycert = cls.space.wrap(str(tmpfile)) + + def test_str(self): + # The str() of a SSLError doesn't include the errno + import _ssl + e = _ssl.SSLError(1, "foo") + assert str(e) == "foo" + assert e.errno == 1 + # Same for a subclass + e = _ssl.SSLZeroReturnError(1, "foo") + assert str(e) == "foo" + assert e.errno == 1 + + def test_lib_reason(self): + # Test the library and reason attributes + import _ssl + ctx = _ssl._SSLContext(_ssl.PROTOCOL_TLSv1) + exc = raises(_ssl.SSLError, ctx.load_dh_params, self.keycert) + assert exc.value.library == 'PEM' + assert exc.value.reason == 'NO_START_LINE' + s = str(exc.value) + assert s.startswith("[PEM: NO_START_LINE] no start line") + + def test_subclass(self): + # Check that the appropriate SSLError subclass is raised + # (this only tests one of them) + import _ssl, _socket + ctx = _ssl._SSLContext(_ssl.PROTOCOL_TLSv1) + s = _socket.socket() + try: + s.bind(("127.0.0.1", 0)) + s.listen(5) + c = _socket.socket() + c.connect(s.getsockname()) + c.setblocking(False) + + c = ctx._wrap_socket(c, False) + try: + exc = raises(_ssl.SSLWantReadError, c.do_handshake) + msg= str(exc.value) + assert msg.startswith("The operation did not complete (read)") + # For compatibility + assert exc.value.errno == _ssl.SSL_ERROR_WANT_READ + finally: + c.shutdown() + finally: + s.close() + SSL_CERTIFICATE = """ -----BEGIN CERTIFICATE----- MIICVDCCAb2gAwIBAgIJANfHOBkZr8JOMA0GCSqGSIb3DQEBBQUAMF8xCzAJBgNV diff --git a/rpython/rlib/ropenssl.py b/rpython/rlib/ropenssl.py --- a/rpython/rlib/ropenssl.py +++ b/rpython/rlib/ropenssl.py @@ -337,6 +337,7 @@ ssl_external('ERR_get_error', [], rffi.INT) ssl_external('ERR_peek_last_error', [], rffi.INT) ssl_external('ERR_error_string', [rffi.ULONG, rffi.CCHARP], rffi.CCHARP) +ssl_external('ERR_reason_error_string', [rffi.ULONG], rffi.CCHARP) ssl_external('ERR_clear_error', [], lltype.Void) ssl_external('ERR_GET_LIB', [rffi.ULONG], rffi.INT, macro=True) ssl_external('ERR_GET_REASON', [rffi.ULONG], rffi.INT, macro=True) From noreply at buildbot.pypy.org Sun Jan 25 19:59:31 2015 From: noreply at buildbot.pypy.org (amauryfa) Date: Sun, 25 Jan 2015 19:59:31 +0100 (CET) Subject: [pypy-commit] pypy stdlib-2.7.9: Add missing file Message-ID: <20150125185931.CE04A1C03C6@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: stdlib-2.7.9 Changeset: r75518:21af94f28e21 Date: 2015-01-25 19:59 +0100 http://bitbucket.org/pypy/pypy/changeset/21af94f28e21/ Log: Add missing file diff --git a/pypy/module/_ssl/ssl_data.py b/pypy/module/_ssl/ssl_data.py new file mode 100644 --- /dev/null +++ b/pypy/module/_ssl/ssl_data.py @@ -0,0 +1,358 @@ +# This file is transformed from CPython: Modules/_ssl_data.h +# Itself generated by Tools/ssl/make_ssl_data.py +# Generated on 2012-05-16T23:56:40.981382 + +from rpython.rlib import ropenssl +from rpython.rtyper.tool import rffi_platform + +class CConfig: + _compilation_info_ = ropenssl.eci + +library_codes = "PEM SSL X509".split() +for code in library_codes: + setattr(CConfig, code, rffi_platform.ConstantInteger( + 'ERR_LIB_' + code)) + +error_codes = [ + ('PEM', 'BAD_BASE64_DECODE'), + ('PEM', 'BAD_DECRYPT'), + ('PEM', 'BAD_END_LINE'), + ('PEM', 'BAD_IV_CHARS'), + ('PEM', 'BAD_MAGIC_NUMBER'), + ('PEM', 'BAD_PASSWORD_READ'), + ('PEM', 'BAD_VERSION_NUMBER'), + ('PEM', 'BIO_WRITE_FAILURE'), + ('PEM', 'CIPHER_IS_NULL'), + ('PEM', 'ERROR_CONVERTING_PRIVATE_KEY'), + ('PEM', 'EXPECTING_PRIVATE_KEY_BLOB'), + ('PEM', 'EXPECTING_PUBLIC_KEY_BLOB'), + ('PEM', 'INCONSISTENT_HEADER'), + ('PEM', 'KEYBLOB_HEADER_PARSE_ERROR'), + ('PEM', 'KEYBLOB_TOO_SHORT'), + ('PEM', 'NOT_DEK_INFO'), + ('PEM', 'NOT_ENCRYPTED'), + ('PEM', 'NOT_PROC_TYPE'), + ('PEM', 'NO_START_LINE'), + ('PEM', 'PROBLEMS_GETTING_PASSWORD'), + ('PEM', 'PUBLIC_KEY_NO_RSA'), + ('PEM', 'PVK_DATA_TOO_SHORT'), + ('PEM', 'PVK_TOO_SHORT'), + ('PEM', 'READ_KEY'), + ('PEM', 'SHORT_HEADER'), + ('PEM', 'UNSUPPORTED_CIPHER'), + ('PEM', 'UNSUPPORTED_ENCRYPTION'), + ('PEM', 'UNSUPPORTED_KEY_COMPONENTS'), + ('SSL', 'APP_DATA_IN_HANDSHAKE'), + ('SSL', 'ATTEMPT_TO_REUSE_SESSION_IN_DIFFERENT_CONTEXT'), + ('SSL', 'BAD_ALERT_RECORD'), + ('SSL', 'BAD_AUTHENTICATION_TYPE'), + ('SSL', 'BAD_CHANGE_CIPHER_SPEC'), + ('SSL', 'BAD_CHECKSUM'), + ('SSL', 'BAD_DATA_RETURNED_BY_CALLBACK'), + ('SSL', 'BAD_DECOMPRESSION'), + ('SSL', 'BAD_DH_G_LENGTH'), + ('SSL', 'BAD_DH_PUB_KEY_LENGTH'), + ('SSL', 'BAD_DH_P_LENGTH'), + ('SSL', 'BAD_DIGEST_LENGTH'), + ('SSL', 'BAD_DSA_SIGNATURE'), + ('SSL', 'BAD_ECC_CERT'), + ('SSL', 'BAD_ECDSA_SIGNATURE'), + ('SSL', 'BAD_ECPOINT'), + ('SSL', 'BAD_HANDSHAKE_LENGTH'), + ('SSL', 'BAD_HELLO_REQUEST'), + ('SSL', 'BAD_LENGTH'), + ('SSL', 'BAD_MAC_DECODE'), + ('SSL', 'BAD_MAC_LENGTH'), + ('SSL', 'BAD_MESSAGE_TYPE'), + ('SSL', 'BAD_PACKET_LENGTH'), + ('SSL', 'BAD_PROTOCOL_VERSION_NUMBER'), + ('SSL', 'BAD_PSK_IDENTITY_HINT_LENGTH'), + ('SSL', 'BAD_RESPONSE_ARGUMENT'), + ('SSL', 'BAD_RSA_DECRYPT'), + ('SSL', 'BAD_RSA_ENCRYPT'), + ('SSL', 'BAD_RSA_E_LENGTH'), + ('SSL', 'BAD_RSA_MODULUS_LENGTH'), + ('SSL', 'BAD_RSA_SIGNATURE'), + ('SSL', 'BAD_SIGNATURE'), + ('SSL', 'BAD_SSL_FILETYPE'), + ('SSL', 'BAD_SSL_SESSION_ID_LENGTH'), + ('SSL', 'BAD_STATE'), + ('SSL', 'BAD_WRITE_RETRY'), + ('SSL', 'BIO_NOT_SET'), + ('SSL', 'BLOCK_CIPHER_PAD_IS_WRONG'), + ('SSL', 'BN_LIB'), + ('SSL', 'CA_DN_LENGTH_MISMATCH'), + ('SSL', 'CA_DN_TOO_LONG'), + ('SSL', 'CCS_RECEIVED_EARLY'), + ('SSL', 'CERTIFICATE_VERIFY_FAILED'), + ('SSL', 'CERT_LENGTH_MISMATCH'), + ('SSL', 'CHALLENGE_IS_DIFFERENT'), + ('SSL', 'CIPHER_CODE_WRONG_LENGTH'), + ('SSL', 'CIPHER_OR_HASH_UNAVAILABLE'), + ('SSL', 'CIPHER_TABLE_SRC_ERROR'), + ('SSL', 'CLIENTHELLO_TLSEXT'), + ('SSL', 'COMPRESSED_LENGTH_TOO_LONG'), + ('SSL', 'COMPRESSION_DISABLED'), + ('SSL', 'COMPRESSION_FAILURE'), + ('SSL', 'COMPRESSION_ID_NOT_WITHIN_PRIVATE_RANGE'), + ('SSL', 'COMPRESSION_LIBRARY_ERROR'), + ('SSL', 'CONNECTION_ID_IS_DIFFERENT'), + ('SSL', 'CONNECTION_TYPE_NOT_SET'), + ('SSL', 'COOKIE_MISMATCH'), + ('SSL', 'DATA_BETWEEN_CCS_AND_FINISHED'), + ('SSL', 'DATA_LENGTH_TOO_LONG'), + ('SSL', 'DECRYPTION_FAILED'), + ('SSL', 'DECRYPTION_FAILED_OR_BAD_RECORD_MAC'), + ('SSL', 'DH_PUBLIC_VALUE_LENGTH_IS_WRONG'), + ('SSL', 'DIGEST_CHECK_FAILED'), + ('SSL', 'DTLS_MESSAGE_TOO_BIG'), + ('SSL', 'DUPLICATE_COMPRESSION_ID'), + ('SSL', 'ECC_CERT_NOT_FOR_KEY_AGREEMENT'), + ('SSL', 'ECC_CERT_NOT_FOR_SIGNING'), + ('SSL', 'ECC_CERT_SHOULD_HAVE_RSA_SIGNATURE'), + ('SSL', 'ECC_CERT_SHOULD_HAVE_SHA1_SIGNATURE'), + ('SSL', 'ECGROUP_TOO_LARGE_FOR_CIPHER'), + ('SSL', 'ENCRYPTED_LENGTH_TOO_LONG'), + ('SSL', 'ERROR_GENERATING_TMP_RSA_KEY'), + ('SSL', 'ERROR_IN_RECEIVED_CIPHER_LIST'), + ('SSL', 'EXCESSIVE_MESSAGE_SIZE'), + ('SSL', 'EXTRA_DATA_IN_MESSAGE'), + ('SSL', 'GOT_A_FIN_BEFORE_A_CCS'), + ('SSL', 'HTTPS_PROXY_REQUEST'), + ('SSL', 'HTTP_REQUEST'), + ('SSL', 'ILLEGAL_PADDING'), + ('SSL', 'INCONSISTENT_COMPRESSION'), + ('SSL', 'INVALID_CHALLENGE_LENGTH'), + ('SSL', 'INVALID_COMMAND'), + ('SSL', 'INVALID_COMPRESSION_ALGORITHM'), + ('SSL', 'INVALID_PURPOSE'), + ('SSL', 'INVALID_STATUS_RESPONSE'), + ('SSL', 'INVALID_TICKET_KEYS_LENGTH'), + ('SSL', 'INVALID_TRUST'), + ('SSL', 'KEY_ARG_TOO_LONG'), + ('SSL', 'KRB5'), + ('SSL', 'KRB5_C_CC_PRINC'), + ('SSL', 'KRB5_C_GET_CRED'), + ('SSL', 'KRB5_C_INIT'), + ('SSL', 'KRB5_C_MK_REQ'), + ('SSL', 'KRB5_S_BAD_TICKET'), + ('SSL', 'KRB5_S_INIT'), + ('SSL', 'KRB5_S_RD_REQ'), + ('SSL', 'KRB5_S_TKT_EXPIRED'), + ('SSL', 'KRB5_S_TKT_NYV'), + ('SSL', 'KRB5_S_TKT_SKEW'), + ('SSL', 'LENGTH_MISMATCH'), + ('SSL', 'LENGTH_TOO_SHORT'), + ('SSL', 'LIBRARY_BUG'), + ('SSL', 'LIBRARY_HAS_NO_CIPHERS'), + ('SSL', 'MESSAGE_TOO_LONG'), + ('SSL', 'MISSING_DH_DSA_CERT'), + ('SSL', 'MISSING_DH_KEY'), + ('SSL', 'MISSING_DH_RSA_CERT'), + ('SSL', 'MISSING_DSA_SIGNING_CERT'), + ('SSL', 'MISSING_EXPORT_TMP_DH_KEY'), + ('SSL', 'MISSING_EXPORT_TMP_RSA_KEY'), + ('SSL', 'MISSING_RSA_CERTIFICATE'), + ('SSL', 'MISSING_RSA_ENCRYPTING_CERT'), + ('SSL', 'MISSING_RSA_SIGNING_CERT'), + ('SSL', 'MISSING_TMP_DH_KEY'), + ('SSL', 'MISSING_TMP_ECDH_KEY'), + ('SSL', 'MISSING_TMP_RSA_KEY'), + ('SSL', 'MISSING_TMP_RSA_PKEY'), + ('SSL', 'MISSING_VERIFY_MESSAGE'), + ('SSL', 'NON_SSLV2_INITIAL_PACKET'), + ('SSL', 'NO_CERTIFICATES_RETURNED'), + ('SSL', 'NO_CERTIFICATE_ASSIGNED'), + ('SSL', 'NO_CERTIFICATE_RETURNED'), + ('SSL', 'NO_CERTIFICATE_SET'), + ('SSL', 'NO_CERTIFICATE_SPECIFIED'), + ('SSL', 'NO_CIPHERS_AVAILABLE'), + ('SSL', 'NO_CIPHERS_PASSED'), + ('SSL', 'NO_CIPHERS_SPECIFIED'), + ('SSL', 'NO_CIPHER_LIST'), + ('SSL', 'NO_CIPHER_MATCH'), + ('SSL', 'NO_CLIENT_CERT_METHOD'), + ('SSL', 'NO_CLIENT_CERT_RECEIVED'), + ('SSL', 'NO_COMPRESSION_SPECIFIED'), + ('SSL', 'NO_GOST_CERTIFICATE_SENT_BY_PEER'), + ('SSL', 'NO_METHOD_SPECIFIED'), + ('SSL', 'NO_PRIVATEKEY'), + ('SSL', 'NO_PRIVATE_KEY_ASSIGNED'), + ('SSL', 'NO_PROTOCOLS_AVAILABLE'), + ('SSL', 'NO_PUBLICKEY'), + ('SSL', 'NO_RENEGOTIATION'), + ('SSL', 'NO_REQUIRED_DIGEST'), + ('SSL', 'NO_SHARED_CIPHER'), + ('SSL', 'NO_VERIFY_CALLBACK'), + ('SSL', 'NULL_SSL_CTX'), + ('SSL', 'NULL_SSL_METHOD_PASSED'), + ('SSL', 'OLD_SESSION_CIPHER_NOT_RETURNED'), + ('SSL', 'OLD_SESSION_COMPRESSION_ALGORITHM_NOT_RETURNED'), + ('SSL', 'ONLY_TLS_ALLOWED_IN_FIPS_MODE'), + ('SSL', 'OPAQUE_PRF_INPUT_TOO_LONG'), + ('SSL', 'PACKET_LENGTH_TOO_LONG'), + ('SSL', 'PARSE_TLSEXT'), + ('SSL', 'PATH_TOO_LONG'), + ('SSL', 'PEER_DID_NOT_RETURN_A_CERTIFICATE'), + ('SSL', 'PEER_ERROR'), + ('SSL', 'PEER_ERROR_CERTIFICATE'), + ('SSL', 'PEER_ERROR_NO_CERTIFICATE'), + ('SSL', 'PEER_ERROR_NO_CIPHER'), + ('SSL', 'PEER_ERROR_UNSUPPORTED_CERTIFICATE_TYPE'), + ('SSL', 'PRE_MAC_LENGTH_TOO_LONG'), + ('SSL', 'PROBLEMS_MAPPING_CIPHER_FUNCTIONS'), + ('SSL', 'PROTOCOL_IS_SHUTDOWN'), + ('SSL', 'PSK_IDENTITY_NOT_FOUND'), + ('SSL', 'PSK_NO_CLIENT_CB'), + ('SSL', 'PSK_NO_SERVER_CB'), + ('SSL', 'PUBLIC_KEY_ENCRYPT_ERROR'), + ('SSL', 'PUBLIC_KEY_IS_NOT_RSA'), + ('SSL', 'PUBLIC_KEY_NOT_RSA'), + ('SSL', 'READ_BIO_NOT_SET'), + ('SSL', 'READ_TIMEOUT_EXPIRED'), + ('SSL', 'READ_WRONG_PACKET_TYPE'), + ('SSL', 'RECORD_LENGTH_MISMATCH'), + ('SSL', 'RECORD_TOO_LARGE'), + ('SSL', 'RECORD_TOO_SMALL'), + ('SSL', 'RENEGOTIATE_EXT_TOO_LONG'), + ('SSL', 'RENEGOTIATION_ENCODING_ERR'), + ('SSL', 'RENEGOTIATION_MISMATCH'), + ('SSL', 'REQUIRED_CIPHER_MISSING'), + ('SSL', 'REQUIRED_COMPRESSSION_ALGORITHM_MISSING'), + ('SSL', 'REUSE_CERT_LENGTH_NOT_ZERO'), + ('SSL', 'REUSE_CERT_TYPE_NOT_ZERO'), + ('SSL', 'REUSE_CIPHER_LIST_NOT_ZERO'), + ('SSL', 'SCSV_RECEIVED_WHEN_RENEGOTIATING'), + ('SSL', 'SERVERHELLO_TLSEXT'), + ('SSL', 'SESSION_ID_CONTEXT_UNINITIALIZED'), + ('SSL', 'SHORT_READ'), + ('SSL', 'SIGNATURE_FOR_NON_SIGNING_CERTIFICATE'), + ('SSL', 'SSL23_DOING_SESSION_ID_REUSE'), + ('SSL', 'SSL2_CONNECTION_ID_TOO_LONG'), + ('SSL', 'SSL3_EXT_INVALID_ECPOINTFORMAT'), + ('SSL', 'SSL3_EXT_INVALID_SERVERNAME'), + ('SSL', 'SSL3_EXT_INVALID_SERVERNAME_TYPE'), + ('SSL', 'SSL3_SESSION_ID_TOO_LONG'), + ('SSL', 'SSL3_SESSION_ID_TOO_SHORT'), + ('SSL', 'SSLV3_ALERT_BAD_CERTIFICATE'), + ('SSL', 'SSLV3_ALERT_BAD_RECORD_MAC'), + ('SSL', 'SSLV3_ALERT_CERTIFICATE_EXPIRED'), + ('SSL', 'SSLV3_ALERT_CERTIFICATE_REVOKED'), + ('SSL', 'SSLV3_ALERT_CERTIFICATE_UNKNOWN'), + ('SSL', 'SSLV3_ALERT_DECOMPRESSION_FAILURE'), + ('SSL', 'SSLV3_ALERT_HANDSHAKE_FAILURE'), + ('SSL', 'SSLV3_ALERT_ILLEGAL_PARAMETER'), + ('SSL', 'SSLV3_ALERT_NO_CERTIFICATE'), + ('SSL', 'SSLV3_ALERT_UNEXPECTED_MESSAGE'), + ('SSL', 'SSLV3_ALERT_UNSUPPORTED_CERTIFICATE'), + ('SSL', 'SSL_CTX_HAS_NO_DEFAULT_SSL_VERSION'), + ('SSL', 'SSL_HANDSHAKE_FAILURE'), + ('SSL', 'SSL_LIBRARY_HAS_NO_CIPHERS'), + ('SSL', 'SSL_SESSION_ID_CALLBACK_FAILED'), + ('SSL', 'SSL_SESSION_ID_CONFLICT'), + ('SSL', 'SSL_SESSION_ID_CONTEXT_TOO_LONG'), + ('SSL', 'SSL_SESSION_ID_HAS_BAD_LENGTH'), + ('SSL', 'SSL_SESSION_ID_IS_DIFFERENT'), + ('SSL', 'TLSV1_ALERT_ACCESS_DENIED'), + ('SSL', 'TLSV1_ALERT_DECODE_ERROR'), + ('SSL', 'TLSV1_ALERT_DECRYPTION_FAILED'), + ('SSL', 'TLSV1_ALERT_DECRYPT_ERROR'), + ('SSL', 'TLSV1_ALERT_EXPORT_RESTRICTION'), + ('SSL', 'TLSV1_ALERT_INSUFFICIENT_SECURITY'), + ('SSL', 'TLSV1_ALERT_INTERNAL_ERROR'), + ('SSL', 'TLSV1_ALERT_NO_RENEGOTIATION'), + ('SSL', 'TLSV1_ALERT_PROTOCOL_VERSION'), + ('SSL', 'TLSV1_ALERT_RECORD_OVERFLOW'), + ('SSL', 'TLSV1_ALERT_UNKNOWN_CA'), + ('SSL', 'TLSV1_ALERT_USER_CANCELLED'), + ('SSL', 'TLSV1_BAD_CERTIFICATE_HASH_VALUE'), + ('SSL', 'TLSV1_BAD_CERTIFICATE_STATUS_RESPONSE'), + ('SSL', 'TLSV1_CERTIFICATE_UNOBTAINABLE'), + ('SSL', 'TLSV1_UNRECOGNIZED_NAME'), + ('SSL', 'TLSV1_UNSUPPORTED_EXTENSION'), + ('SSL', 'TLS_CLIENT_CERT_REQ_WITH_ANON_CIPHER'), + ('SSL', 'TLS_INVALID_ECPOINTFORMAT_LIST'), + ('SSL', 'TLS_PEER_DID_NOT_RESPOND_WITH_CERTIFICATE_LIST'), + ('SSL', 'TLS_RSA_ENCRYPTED_VALUE_LENGTH_IS_WRONG'), + ('SSL', 'TRIED_TO_USE_UNSUPPORTED_CIPHER'), + ('SSL', 'UNABLE_TO_DECODE_DH_CERTS'), + ('SSL', 'UNABLE_TO_DECODE_ECDH_CERTS'), + ('SSL', 'UNABLE_TO_EXTRACT_PUBLIC_KEY'), + ('SSL', 'UNABLE_TO_FIND_DH_PARAMETERS'), + ('SSL', 'UNABLE_TO_FIND_ECDH_PARAMETERS'), + ('SSL', 'UNABLE_TO_FIND_PUBLIC_KEY_PARAMETERS'), + ('SSL', 'UNABLE_TO_FIND_SSL_METHOD'), + ('SSL', 'UNABLE_TO_LOAD_SSL2_MD5_ROUTINES'), + ('SSL', 'UNABLE_TO_LOAD_SSL3_MD5_ROUTINES'), + ('SSL', 'UNABLE_TO_LOAD_SSL3_SHA1_ROUTINES'), + ('SSL', 'UNEXPECTED_MESSAGE'), + ('SSL', 'UNEXPECTED_RECORD'), + ('SSL', 'UNINITIALIZED'), + ('SSL', 'UNKNOWN_ALERT_TYPE'), + ('SSL', 'UNKNOWN_CERTIFICATE_TYPE'), + ('SSL', 'UNKNOWN_CIPHER_RETURNED'), + ('SSL', 'UNKNOWN_CIPHER_TYPE'), + ('SSL', 'UNKNOWN_KEY_EXCHANGE_TYPE'), + ('SSL', 'UNKNOWN_PKEY_TYPE'), + ('SSL', 'UNKNOWN_PROTOCOL'), + ('SSL', 'UNKNOWN_REMOTE_ERROR_TYPE'), + ('SSL', 'UNKNOWN_SSL_VERSION'), + ('SSL', 'UNKNOWN_STATE'), + ('SSL', 'UNSAFE_LEGACY_RENEGOTIATION_DISABLED'), + ('SSL', 'UNSUPPORTED_CIPHER'), + ('SSL', 'UNSUPPORTED_COMPRESSION_ALGORITHM'), + ('SSL', 'UNSUPPORTED_DIGEST_TYPE'), + ('SSL', 'UNSUPPORTED_ELLIPTIC_CURVE'), + ('SSL', 'UNSUPPORTED_PROTOCOL'), + ('SSL', 'UNSUPPORTED_SSL_VERSION'), + ('SSL', 'UNSUPPORTED_STATUS_TYPE'), + ('SSL', 'WRITE_BIO_NOT_SET'), + ('SSL', 'WRONG_CIPHER_RETURNED'), + ('SSL', 'WRONG_MESSAGE_TYPE'), + ('SSL', 'WRONG_NUMBER_OF_KEY_BITS'), + ('SSL', 'WRONG_SIGNATURE_LENGTH'), + ('SSL', 'WRONG_SIGNATURE_SIZE'), + ('SSL', 'WRONG_SSL_VERSION'), + ('SSL', 'WRONG_VERSION_NUMBER'), + ('SSL', 'X509_LIB'), + ('SSL', 'X509_VERIFICATION_SETUP_PROBLEMS'), + ('X509', 'BAD_X509_FILETYPE'), + ('X509', 'BASE64_DECODE_ERROR'), + ('X509', 'CANT_CHECK_DH_KEY'), + ('X509', 'CERT_ALREADY_IN_HASH_TABLE'), + ('X509', 'ERR_ASN1_LIB'), + ('X509', 'INVALID_DIRECTORY'), + ('X509', 'INVALID_FIELD_NAME'), + ('X509', 'INVALID_TRUST'), + ('X509', 'KEY_TYPE_MISMATCH'), + ('X509', 'KEY_VALUES_MISMATCH'), + ('X509', 'LOADING_CERT_DIR'), + ('X509', 'LOADING_DEFAULTS'), + ('X509', 'METHOD_NOT_SUPPORTED'), + ('X509', 'NO_CERT_SET_FOR_US_TO_VERIFY'), + ('X509', 'PUBLIC_KEY_DECODE_ERROR'), + ('X509', 'PUBLIC_KEY_ENCODE_ERROR'), + ('X509', 'SHOULD_RETRY'), + ('X509', 'UNABLE_TO_FIND_PARAMETERS_IN_CHAIN'), + ('X509', 'UNABLE_TO_GET_CERTS_PUBLIC_KEY'), + ('X509', 'UNKNOWN_KEY_TYPE'), + ('X509', 'UNKNOWN_NID'), + ('X509', 'UNKNOWN_PURPOSE_ID'), + ('X509', 'UNKNOWN_TRUST_ID'), + ('X509', 'UNSUPPORTED_ALGORITHM'), + ('X509', 'WRONG_LOOKUP_TYPE'), + ('X509', 'WRONG_TYPE'), +] +for lib, code in error_codes: + setattr(CConfig, code, rffi_platform.DefinedConstantInteger( + '%s_R_%s' % (lib, code))) + +cconfig = rffi_platform.configure(CConfig) + +LIBRARY_CODES_TO_NAMES = {} +for code in library_codes: + LIBRARY_CODES_TO_NAMES[cconfig[code]] = code +ERROR_CODES_TO_NAMES = {} +for lib, code in error_codes: + ERROR_CODES_TO_NAMES[cconfig[lib], cconfig[code]] = code + From noreply at buildbot.pypy.org Sun Jan 25 23:16:10 2015 From: noreply at buildbot.pypy.org (amauryfa) Date: Sun, 25 Jan 2015 23:16:10 +0100 (CET) Subject: [pypy-commit] pypy stdlib-2.7.9: Fix one deadlock in test_ssl Message-ID: <20150125221610.DEE231C0499@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: stdlib-2.7.9 Changeset: r75521:7100871097ec Date: 2015-01-25 21:50 +0100 http://bitbucket.org/pypy/pypy/changeset/7100871097ec/ Log: Fix one deadlock in test_ssl diff --git a/pypy/module/_ssl/interp_ssl.py b/pypy/module/_ssl/interp_ssl.py --- a/pypy/module/_ssl/interp_ssl.py +++ b/pypy/module/_ssl/interp_ssl.py @@ -505,6 +505,8 @@ the peer certificate, or None if no certificate was provided. This will return the certificate even if it wasn't validated. """ + if not self.handshake_done: + raise oefmt(space.w_ValueError, "hanshake not done yet") if not self.peer_cert: return space.w_None From noreply at buildbot.pypy.org Sun Jan 25 23:16:09 2015 From: noreply at buildbot.pypy.org (amauryfa) Date: Sun, 25 Jan 2015 23:16:09 +0100 (CET) Subject: [pypy-commit] pypy stdlib-2.7.9: Add SSLSocket.version() Message-ID: <20150125221609.BB8CE1C0499@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: stdlib-2.7.9 Changeset: r75520:360740259e7f Date: 2015-01-25 21:27 +0100 http://bitbucket.org/pypy/pypy/changeset/360740259e7f/ Log: Add SSLSocket.version() diff --git a/pypy/module/_ssl/interp_ssl.py b/pypy/module/_ssl/interp_ssl.py --- a/pypy/module/_ssl/interp_ssl.py +++ b/pypy/module/_ssl/interp_ssl.py @@ -548,6 +548,14 @@ return space.w_None return space.wrap(rffi.charp2str(short_name)) + def version_w(self, space): + if not self.ssl: + return space.w_None + version = libssl_SSL_get_version(self.ssl) + if not version: + return space.w_None + return space.wrap(rffi.charp2str(version)) + _SSLSocket.typedef = TypeDef( "_ssl._SSLSocket", @@ -560,6 +568,7 @@ shutdown=interp2app(_SSLSocket.shutdown), selected_npn_protocol = interp2app(_SSLSocket.selected_npn_protocol), compression = interp2app(_SSLSocket.compression_w), + version = interp2app(_SSLSocket.version_w), ) diff --git a/rpython/rlib/ropenssl.py b/rpython/rlib/ropenssl.py --- a/rpython/rlib/ropenssl.py +++ b/rpython/rlib/ropenssl.py @@ -283,6 +283,7 @@ ssl_external('SSL_set_read_ahead', [SSL, rffi.INT], lltype.Void) ssl_external('SSL_set_tlsext_host_name', [SSL, rffi.CCHARP], rffi.INT, macro=True) ssl_external('SSL_get_current_compression', [SSL], COMP_METHOD) +ssl_external('SSL_get_version', [SSL], rffi.CCHARP) ssl_external('SSL_get_peer_certificate', [SSL], X509) ssl_external('X509_get_subject_name', [X509], X509_NAME) From noreply at buildbot.pypy.org Sun Jan 25 23:16:08 2015 From: noreply at buildbot.pypy.org (amauryfa) Date: Sun, 25 Jan 2015 23:16:08 +0100 (CET) Subject: [pypy-commit] pypy stdlib-2.7.9: Add SSLSocket.compression() Message-ID: <20150125221608.783021C0499@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: stdlib-2.7.9 Changeset: r75519:57cdfe00b84e Date: 2015-01-25 21:23 +0100 http://bitbucket.org/pypy/pypy/changeset/57cdfe00b84e/ Log: Add SSLSocket.compression() diff --git a/pypy/module/_ssl/interp_ssl.py b/pypy/module/_ssl/interp_ssl.py --- a/pypy/module/_ssl/interp_ssl.py +++ b/pypy/module/_ssl/interp_ssl.py @@ -76,6 +76,7 @@ constants["OP_NO_SSLv2"] = SSL_OP_NO_SSLv2 constants["OP_NO_SSLv3"] = SSL_OP_NO_SSLv3 constants["OP_NO_TLSv1"] = SSL_OP_NO_TLSv1 +constants["OP_NO_COMPRESSION"] = SSL_OP_NO_COMPRESSION constants["HAS_SNI"] = HAS_SNI constants["HAS_ECDH"] = True # To break the test suite constants["HAS_NPN"] = HAS_NPN @@ -536,6 +537,17 @@ return space.wrap( rffi.charpsize2str(out_ptr[0], intmask(len_ptr[0]))) + def compression_w(self, space): + if not self.ssl: + return space.w_None + comp_method = libssl_SSL_get_current_compression(self.ssl) + if not comp_method or intmask(comp_method[0].c_type) == NID_undef: + return space.w_None + short_name = libssl_OBJ_nid2sn(comp_method[0].c_type) + if not short_name: + return space.w_None + return space.wrap(rffi.charp2str(short_name)) + _SSLSocket.typedef = TypeDef( "_ssl._SSLSocket", @@ -547,6 +559,7 @@ cipher=interp2app(_SSLSocket.cipher), shutdown=interp2app(_SSLSocket.shutdown), selected_npn_protocol = interp2app(_SSLSocket.selected_npn_protocol), + compression = interp2app(_SSLSocket.compression_w), ) diff --git a/rpython/rlib/ropenssl.py b/rpython/rlib/ropenssl.py --- a/rpython/rlib/ropenssl.py +++ b/rpython/rlib/ropenssl.py @@ -81,6 +81,8 @@ SSL_OP_NO_SSLv2 = rffi_platform.ConstantInteger("SSL_OP_NO_SSLv2") SSL_OP_NO_SSLv3 = rffi_platform.ConstantInteger("SSL_OP_NO_SSLv3") SSL_OP_NO_TLSv1 = rffi_platform.ConstantInteger("SSL_OP_NO_TLSv1") + SSL_OP_NO_COMPRESSION = rffi_platform.ConstantInteger( + "SSL_OP_NO_COMPRESSION") SSL_OP_DONT_INSERT_EMPTY_FRAGMENTS = rffi_platform.ConstantInteger( "SSL_OP_DONT_INSERT_EMPTY_FRAGMENTS") HAS_SNI = rffi_platform.Defined("SSL_CTRL_SET_TLSEXT_HOSTNAME") @@ -177,6 +179,9 @@ ('name', rffi.CCHARP), ]) + COMP_METHOD_st = rffi_platform.Struct( + 'struct comp_method_st', + [('type', rffi.INT),]) for k, v in rffi_platform.configure(CConfig).items(): globals()[k] = v @@ -199,6 +204,7 @@ GENERAL_NAMES = rffi.COpaquePtr('GENERAL_NAMES') GENERAL_NAME = rffi.CArrayPtr(GENERAL_NAME_st) OBJ_NAME = rffi.CArrayPtr(OBJ_NAME_st) +COMP_METHOD = rffi.CArrayPtr(COMP_METHOD_st) HAVE_OPENSSL_RAND = OPENSSL_VERSION_NUMBER >= 0x0090500f HAVE_OPENSSL_FINISHED = OPENSSL_VERSION_NUMBER >= 0x0090500f @@ -276,6 +282,7 @@ ssl_external('SSL_get_shutdown', [SSL], rffi.INT) ssl_external('SSL_set_read_ahead', [SSL, rffi.INT], lltype.Void) ssl_external('SSL_set_tlsext_host_name', [SSL, rffi.CCHARP], rffi.INT, macro=True) +ssl_external('SSL_get_current_compression', [SSL], COMP_METHOD) ssl_external('SSL_get_peer_certificate', [SSL], X509) ssl_external('X509_get_subject_name', [X509], X509_NAME) From noreply at buildbot.pypy.org Sun Jan 25 23:16:12 2015 From: noreply at buildbot.pypy.org (amauryfa) Date: Sun, 25 Jan 2015 23:16:12 +0100 (CET) Subject: [pypy-commit] pypy stdlib-2.7.9: SSLSocket.read() accepts a buffer, and works like a readinto() Message-ID: <20150125221612.207841C0499@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: stdlib-2.7.9 Changeset: r75522:d70a221356f9 Date: 2015-01-25 22:55 +0100 http://bitbucket.org/pypy/pypy/changeset/d70a221356f9/ Log: SSLSocket.read() accepts a buffer, and works like a readinto() diff --git a/pypy/module/_ssl/interp_ssl.py b/pypy/module/_ssl/interp_ssl.py --- a/pypy/module/_ssl/interp_ssl.py +++ b/pypy/module/_ssl/interp_ssl.py @@ -316,7 +316,7 @@ return space.wrap(count) @unwrap_spec(num_bytes=int) - def read(self, space, num_bytes): + def read(self, space, num_bytes, w_buffer=None): """read([len]) -> string Read up to len bytes from the SSL socket.""" @@ -334,6 +334,12 @@ raise ssl_error(space, "Socket closed without SSL shutdown handshake") + if w_buffer: + rwbuffer = space.getarg_w('w*', w_buffer) + num_bytes = min(num_bytes, rwbuffer.getlength()) + else: + rwbuffer = None + with rffi.scoped_alloc_buffer(num_bytes) as buf: while True: err = 0 @@ -347,7 +353,10 @@ sockstate = checkwait(space, self.w_socket, True) elif (err == SSL_ERROR_ZERO_RETURN and libssl_SSL_get_shutdown(self.ssl) == SSL_RECEIVED_SHUTDOWN): - return space.wrap("") + if rwbuffer: + return space.wrap(0) + else: + return space.wrap("") else: sockstate = SOCKET_OPERATION_OK @@ -366,7 +375,11 @@ result = buf.str(count) - return space.wrap(result) + if rwbuffer: + rwbuffer.setslice(0, result) + return space.wrap(count) + else: + return space.wrap(result) def _refresh_nonblocking(self, space): # just in case the blocking state of the socket has been changed From noreply at buildbot.pypy.org Sun Jan 25 23:16:13 2015 From: noreply at buildbot.pypy.org (amauryfa) Date: Sun, 25 Jan 2015 23:16:13 +0100 (CET) Subject: [pypy-commit] pypy stdlib-2.7.9: Add SSLSocket.tls_unique_cb() Message-ID: <20150125221613.3F4AC1C0499@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: stdlib-2.7.9 Changeset: r75523:88b9caf2a309 Date: 2015-01-25 23:13 +0100 http://bitbucket.org/pypy/pypy/changeset/88b9caf2a309/ Log: Add SSLSocket.tls_unique_cb() diff --git a/pypy/module/_ssl/interp_ssl.py b/pypy/module/_ssl/interp_ssl.py --- a/pypy/module/_ssl/interp_ssl.py +++ b/pypy/module/_ssl/interp_ssl.py @@ -571,6 +571,27 @@ return space.w_None return space.wrap(rffi.charp2str(version)) + def tls_unique_cb_w(self, space): + """Returns the 'tls-unique' channel binding data, as defined by RFC 5929. + If the TLS handshake is not yet complete, None is returned""" + + # In case of 'tls-unique' it will be 12 bytes for TLS, 36 + # bytes for older SSL, but let's be safe + CB_MAXLEN = 128 + + with lltype.scoped_alloc(rffi.CCHARP.TO, CB_MAXLEN) as buf: + if (libssl_SSL_session_reused(self.ssl) ^ + (self.socket_type == PY_SSL_CLIENT)): + # if session is resumed XOR we are the client + length = libssl_SSL_get_finished(self.ssl, buf, CB_MAXLEN) + else: + # if a new session XOR we are the server + length = libssl_SSL_get_peer_finished(self.ssl, buf, CB_MAXLEN) + + if length > 0: + return space.wrap(rffi.charpsize2str(buf, intmask(length))) + + _SSLSocket.typedef = TypeDef( "_ssl._SSLSocket", @@ -584,6 +605,7 @@ selected_npn_protocol = interp2app(_SSLSocket.selected_npn_protocol), compression = interp2app(_SSLSocket.compression_w), version = interp2app(_SSLSocket.version_w), + tls_unique_cb = interp2app(_SSLSocket.tls_unique_cb_w), ) diff --git a/pypy/module/_ssl/test/test_ssl.py b/pypy/module/_ssl/test/test_ssl.py --- a/pypy/module/_ssl/test/test_ssl.py +++ b/pypy/module/_ssl/test/test_ssl.py @@ -231,6 +231,22 @@ self.s.close() del ss; gc.collect() + def test_tls_unique_cb(self): + import ssl, sys, gc + ss = ssl.wrap_socket(self.s) + ss.do_handshake() + assert isinstance(ss.get_channel_binding(), bytes) + self.s.close() + del ss; gc.collect() + + def test_compression(self): + import ssl, sys, gc + ss = ssl.wrap_socket(self.s) + ss.do_handshake() + assert ss.compression() in [None, 'ZLIB', 'RLE'] + self.s.close() + del ss; gc.collect() + class AppTestConnectedSSL_Timeout(AppTestConnectedSSL): # Same tests, with a socket timeout diff --git a/rpython/rlib/ropenssl.py b/rpython/rlib/ropenssl.py --- a/rpython/rlib/ropenssl.py +++ b/rpython/rlib/ropenssl.py @@ -282,6 +282,9 @@ ssl_external('SSL_get_shutdown', [SSL], rffi.INT) ssl_external('SSL_set_read_ahead', [SSL, rffi.INT], lltype.Void) ssl_external('SSL_set_tlsext_host_name', [SSL, rffi.CCHARP], rffi.INT, macro=True) +ssl_external('SSL_session_reused', [SSL], rffi.INT, macro=True) +ssl_external('SSL_get_finished', [SSL, rffi.CCHARP, rffi.SIZE_T], rffi.SIZE_T) +ssl_external('SSL_get_peer_finished', [SSL, rffi.CCHARP, rffi.SIZE_T], rffi.SIZE_T) ssl_external('SSL_get_current_compression', [SSL], COMP_METHOD) ssl_external('SSL_get_version', [SSL], rffi.CCHARP) From noreply at buildbot.pypy.org Sun Jan 25 23:16:14 2015 From: noreply at buildbot.pypy.org (amauryfa) Date: Sun, 25 Jan 2015 23:16:14 +0100 (CET) Subject: [pypy-commit] pypy stdlib-2.7.9: Another missing file Message-ID: <20150125221614.614581C0499@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: stdlib-2.7.9 Changeset: r75524:dab31bf5b271 Date: 2015-01-25 23:15 +0100 http://bitbucket.org/pypy/pypy/changeset/dab31bf5b271/ Log: Another missing file diff --git a/pypy/module/_ssl/test/dh512.pem b/pypy/module/_ssl/test/dh512.pem new file mode 100644 --- /dev/null +++ b/pypy/module/_ssl/test/dh512.pem @@ -0,0 +1,9 @@ +-----BEGIN DH PARAMETERS----- +MEYCQQD1Kv884bEpQBgRjXyEpwpy1obEAxnIByl6ypUM2Zafq9AKUJsCRtMIPWak +XUGfnHy9iUsiGSa6q6Jew1XpKgVfAgEC +-----END DH PARAMETERS----- + +These are the 512 bit DH parameters from "Assigned Number for SKIP Protocols" +(http://www.skip-vpn.org/spec/numbers.html). +See there for how they were generated. +Note that g is not a generator, but this is not a problem since p is a safe prime. From noreply at buildbot.pypy.org Mon Jan 26 13:50:00 2015 From: noreply at buildbot.pypy.org (fijal) Date: Mon, 26 Jan 2015 13:50:00 +0100 (CET) Subject: [pypy-commit] pypy vmprof: fix the errno handling in vmprof module Message-ID: <20150126125000.8E8111C0113@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: vmprof Changeset: r75532:64c291c60695 Date: 2015-01-26 14:49 +0200 http://bitbucket.org/pypy/pypy/changeset/64c291c60695/ Log: fix the errno handling in vmprof module diff --git a/pypy/module/_vmprof/interp_vmprof.py b/pypy/module/_vmprof/interp_vmprof.py --- a/pypy/module/_vmprof/interp_vmprof.py +++ b/pypy/module/_vmprof/interp_vmprof.py @@ -60,7 +60,8 @@ compilation_info=eci) vmprof_enable = rffi.llexternal("vmprof_enable", [rffi.INT, rffi.INT, rffi.LONG, rffi.INT], - rffi.INT, compilation_info=eci) + rffi.INT, compilation_info=eci, + save_err=rffi.RFFI_SAVE_ERRNO) vmprof_disable = rffi.llexternal("vmprof_disable", [], rffi.INT, compilation_info=eci) @@ -135,7 +136,7 @@ else: res = 0 if res == -1: - raise wrap_oserror(space, OSError(rposix.get_errno(), + raise wrap_oserror(space, OSError(rposix.get_saved_errno(), "_vmprof.enable")) def write_header(self, fileno, period): From noreply at buildbot.pypy.org Mon Jan 26 13:55:13 2015 From: noreply at buildbot.pypy.org (fijal) Date: Mon, 26 Jan 2015 13:55:13 +0100 (CET) Subject: [pypy-commit] pypy vmprof: uh how did I miss it? Message-ID: <20150126125513.7148F1C0634@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: vmprof Changeset: r75533:14a7958e6b4b Date: 2015-01-26 14:54 +0200 http://bitbucket.org/pypy/pypy/changeset/14a7958e6b4b/ Log: uh how did I miss it? diff --git a/pypy/module/_vmprof/interp_vmprof.py b/pypy/module/_vmprof/interp_vmprof.py --- a/pypy/module/_vmprof/interp_vmprof.py +++ b/pypy/module/_vmprof/interp_vmprof.py @@ -63,7 +63,8 @@ rffi.INT, compilation_info=eci, save_err=rffi.RFFI_SAVE_ERRNO) vmprof_disable = rffi.llexternal("vmprof_disable", [], rffi.INT, - compilation_info=eci) + compilation_info=eci, + save_err=rffi.RFFI_SAVE_ERRNO) vmprof_register_virtual_function = rffi.llexternal( "vmprof_register_virtual_function", @@ -166,7 +167,7 @@ res = 0 space.set_code_callback(None) if res == -1: - raise wrap_oserror(space, OSError(rposix.get_errno(), + raise wrap_oserror(space, OSError(rposix.get_saved_errno(), "_vmprof.disable")) @unwrap_spec(fileno=int, period=int) From noreply at buildbot.pypy.org Mon Jan 26 14:33:43 2015 From: noreply at buildbot.pypy.org (fijal) Date: Mon, 26 Jan 2015 14:33:43 +0100 (CET) Subject: [pypy-commit] pypy vmprof: quick hack to write ints by hand Message-ID: <20150126133343.893041C05A0@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: vmprof Changeset: r75534:22a3080afde4 Date: 2015-01-26 15:33 +0200 http://bitbucket.org/pypy/pypy/changeset/22a3080afde4/ Log: quick hack to write ints by hand diff --git a/pypy/module/_vmprof/interp_vmprof.py b/pypy/module/_vmprof/interp_vmprof.py --- a/pypy/module/_vmprof/interp_vmprof.py +++ b/pypy/module/_vmprof/interp_vmprof.py @@ -1,9 +1,10 @@ -import py, os, struct +import py, os, sys from rpython.rtyper.lltypesystem import lltype, rffi, llmemory from rpython.translator.tool.cbuild import ExternalCompilationInfo from rpython.rtyper.annlowlevel import cast_instance_to_gcref, cast_base_ptr_to_instance from rpython.rlib.objectmodel import we_are_translated -from rpython.rlib import jit, rposix, entrypoint +from rpython.rlib import jit, rposix, entrypoint, rstruct +from rpython.rlib.rstring import StringBuilder from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.error import oefmt, wrap_oserror, OperationError from pypy.interpreter.gateway import unwrap_spec @@ -107,7 +108,21 @@ def do_get_virtual_ip(frame): return frame.pycode._unique_id - +def write_long_to_string_builder(l, b): + if sys.maxint == 2147483647: + b.append(chr(l & 0xff)) + b.append(chr((l >> 8) & 0xff)) + b.append(chr((l >> 16) & 0xff)) + b.append(chr((l >> 24) & 0xff)) + else: + b.append(chr(l & 0xff)) + b.append(chr((l >> 8) & 0xff)) + b.append(chr((l >> 16) & 0xff)) + b.append(chr((l >> 24) & 0xff)) + b.append(chr((l >> 32) & 0xff)) + b.append(chr((l >> 40) & 0xff)) + b.append(chr((l >> 48) & 0xff)) + b.append(chr((l >> 56) & 0xff)) class VMProf(object): def __init__(self): @@ -145,15 +160,25 @@ period_usec = 1000000 / 100 # 100hz else: period_usec = period - os.write(fileno, struct.pack("lllll", 0, 3, 0, period_usec, 0)) + b = StringBuilder() + write_long_to_string_builder(0, b) + write_long_to_string_builder(3, b) + write_long_to_string_builder(0, b) + write_long_to_string_builder(period_usec, b) + write_long_to_string_builder(0, b) + os.write(fileno, b.build()) def register_code(self, space, code): if self.fileno == -1: raise OperationError(space.w_RuntimeError, space.wrap("vmprof not running")) name = code._get_full_name() - s = '\x02' + struct.pack("ll", code._unique_id, len(name)) + name - os.write(self.fileno, s) + b = StringBuilder() + b.append('\x02') + write_long_to_string_builder(code._unique_id, b) + write_long_to_string_builder(len(name), b) + b.append(name) + os.write(self.fileno, b.build()) def disable(self, space): if not self.is_enabled: From noreply at buildbot.pypy.org Mon Jan 26 14:49:14 2015 From: noreply at buildbot.pypy.org (fijal) Date: Mon, 26 Jan 2015 14:49:14 +0100 (CET) Subject: [pypy-commit] pypy vmprof: bound-method-or-none is not RPython Message-ID: <20150126134914.166111C05A0@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: vmprof Changeset: r75535:df31af4fa024 Date: 2015-01-26 15:48 +0200 http://bitbucket.org/pypy/pypy/changeset/df31af4fa024/ Log: bound-method-or-none is not RPython diff --git a/pypy/module/_vmprof/interp_vmprof.py b/pypy/module/_vmprof/interp_vmprof.py --- a/pypy/module/_vmprof/interp_vmprof.py +++ b/pypy/module/_vmprof/interp_vmprof.py @@ -145,7 +145,7 @@ code = weakcode() if code: self.register_code(space, code) - space.set_code_callback(self.register_code) + space.set_code_callback(vmprof_register_code) if we_are_translated(): # does not work untranslated res = vmprof_enable(fileno, -1, period, 0) @@ -195,6 +195,12 @@ raise wrap_oserror(space, OSError(rposix.get_saved_errno(), "_vmprof.disable")) +def vmprof_register_code(space, code): + from pypy.module._vmprof import Module + mod_vmprof = space.getbuiltinmodule('_vmprof') + assert isinstance(mod_vmprof, Module) + mod_vmprof.vmprof.register_code(space, code) + @unwrap_spec(fileno=int, period=int) def enable(space, fileno, period=-1): from pypy.module._vmprof import Module From noreply at buildbot.pypy.org Mon Jan 26 15:20:22 2015 From: noreply at buildbot.pypy.org (rguillebert) Date: Mon, 26 Jan 2015 15:20:22 +0100 (CET) Subject: [pypy-commit] pypy object-dtype: Object ndarrays are now initialized with Nones Message-ID: <20150126142022.9B7F71C03F2@cobra.cs.uni-duesseldorf.de> Author: Romain Guillebert Branch: object-dtype Changeset: r75536:dca1f6200f69 Date: 2015-01-26 15:19 +0100 http://bitbucket.org/pypy/pypy/changeset/dca1f6200f69/ Log: Object ndarrays are now initialized with Nones diff --git a/pypy/module/micronumpy/base.py b/pypy/module/micronumpy/base.py --- a/pypy/module/micronumpy/base.py +++ b/pypy/module/micronumpy/base.py @@ -34,11 +34,13 @@ @staticmethod def from_shape(space, shape, dtype, order='C', w_instance=None, zero=True): - from pypy.module.micronumpy import concrete + from pypy.module.micronumpy import concrete, descriptor, boxes from pypy.module.micronumpy.strides import calc_strides strides, backstrides = calc_strides(shape, dtype.base, order) impl = concrete.ConcreteArray(shape, dtype.base, order, strides, backstrides, zero=zero) + if dtype == descriptor.get_dtype_cache(space).w_objectdtype: + impl.fill(space, boxes.W_ObjectBox(space.w_None)) if w_instance: return wrap_impl(space, space.type(w_instance), w_instance, impl) return W_NDimArray(impl) diff --git a/pypy/module/micronumpy/descriptor.py b/pypy/module/micronumpy/descriptor.py --- a/pypy/module/micronumpy/descriptor.py +++ b/pypy/module/micronumpy/descriptor.py @@ -586,8 +586,7 @@ if w_dtype is dtype.w_box_type: return dtype if space.isinstance_w(w_dtype, space.w_type): - raise oefmt(space.w_NotImplementedError, - "cannot create dtype with type '%N'", w_dtype) + return cache.w_objectdtype raise oefmt(space.w_TypeError, "data type not understood") diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -1345,9 +1345,12 @@ import sys class Polynomial(object): pass - if '__pypy__' in sys.builtin_module_names: - exc = raises(NotImplementedError, array, Polynomial()) - assert exc.value.message.find('unable to create dtype from objects') >= 0 - else: - a = array(Polynomial()) - assert a.shape == () + a = array(Polynomial()) + assert a.shape == () + + def test_uninitialized_object_array_is_filled_by_None(self): + import numpy as np + + a = np.ndarray([5], dtype="O") + + assert a[0] == None diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -1624,7 +1624,7 @@ _all_objs_for_tests = [] # for tests -class ObjectType(BaseType): +class ObjectType(Primitive, BaseType): T = lltype.Signed BoxType = boxes.W_ObjectBox @@ -1669,6 +1669,10 @@ def str_format(self, space, box): return space.str_w(space.repr(self.unbox(box))) + @staticmethod + def for_computation(v): + return v + class FlexibleType(BaseType): def get_element_size(self): return rffi.sizeof(self.T) diff --git a/pypy/module/micronumpy/ufuncs.py b/pypy/module/micronumpy/ufuncs.py --- a/pypy/module/micronumpy/ufuncs.py +++ b/pypy/module/micronumpy/ufuncs.py @@ -608,6 +608,7 @@ uint64_dtype = descriptor.get_dtype_cache(space).w_uint64dtype complex_dtype = descriptor.get_dtype_cache(space).w_complex128dtype float_dtype = descriptor.get_dtype_cache(space).w_float64dtype + object_dtype = descriptor.get_dtype_cache(space).w_objectdtype if isinstance(w_obj, boxes.W_GenericBox): dtype = w_obj.get_dtype(space) return find_binop_result_dtype(space, dtype, current_guess) @@ -638,9 +639,10 @@ return descriptor.variable_dtype(space, 'S%d' % space.len_w(w_obj)) return current_guess - raise oefmt(space.w_NotImplementedError, - 'unable to create dtype from objects, "%T" instance not ' - 'supported', w_obj) + return object_dtype + #raise oefmt(space.w_NotImplementedError, + # 'unable to create dtype from objects, "%T" instance not ' + # 'supported', w_obj) def ufunc_dtype_caller(space, ufunc_name, op_name, argcount, comparison_func, From noreply at buildbot.pypy.org Mon Jan 26 16:01:34 2015 From: noreply at buildbot.pypy.org (rguillebert) Date: Mon, 26 Jan 2015 16:01:34 +0100 (CET) Subject: [pypy-commit] pypy object-dtype: Test for adding 2 object arrays Message-ID: <20150126150134.186DF1C084F@cobra.cs.uni-duesseldorf.de> Author: Romain Guillebert Branch: object-dtype Changeset: r75537:70ad29712173 Date: 2015-01-26 16:01 +0100 http://bitbucket.org/pypy/pypy/changeset/70ad29712173/ Log: Test for adding 2 object arrays diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -1354,3 +1354,12 @@ a = np.ndarray([5], dtype="O") assert a[0] == None + + def test_object_arrays_add(self): + import numpy as np + + a = np.array(["foo"], dtype=object) + b = np.array(["bar"], dtype=object) + + res = a + b + assert res[0] == "foobar" From noreply at buildbot.pypy.org Mon Jan 26 17:11:49 2015 From: noreply at buildbot.pypy.org (rguillebert) Date: Mon, 26 Jan 2015 17:11:49 +0100 (CET) Subject: [pypy-commit] pypy object-dtype: space has to be passed to a lot of places now, this breaks everything Message-ID: <20150126161149.98E1B1C0113@cobra.cs.uni-duesseldorf.de> Author: Romain Guillebert Branch: object-dtype Changeset: r75538:79a2270b06a2 Date: 2015-01-26 17:11 +0100 http://bitbucket.org/pypy/pypy/changeset/79a2270b06a2/ Log: space has to be passed to a lot of places now, this breaks everything diff --git a/pypy/module/micronumpy/arrayops.py b/pypy/module/micronumpy/arrayops.py --- a/pypy/module/micronumpy/arrayops.py +++ b/pypy/module/micronumpy/arrayops.py @@ -83,7 +83,7 @@ x = convert_to_array(space, w_x) y = convert_to_array(space, w_y) if x.is_scalar() and y.is_scalar() and arr.is_scalar(): - if arr.get_dtype().itemtype.bool(arr.get_scalar_value()): + if arr.get_dtype().itemtype.bool(space, arr.get_scalar_value()): return x return y dtype = ufuncs.find_binop_result_dtype(space, x.get_dtype(), diff --git a/pypy/module/micronumpy/boxes.py b/pypy/module/micronumpy/boxes.py --- a/pypy/module/micronumpy/boxes.py +++ b/pypy/module/micronumpy/boxes.py @@ -228,7 +228,7 @@ return space.hex(self.descr_int(space)) def descr_nonzero(self, space): - return space.wrap(self.get_dtype(space).itemtype.bool(self)) + return space.wrap(self.get_dtype(space).itemtype.bool(space, self)) def _unaryop_impl(ufunc_name): def impl(self, space, w_out=None): @@ -412,10 +412,10 @@ return space.call_args(w_meth, __args__) def descr_get_real(self, space): - return self.get_dtype(space).itemtype.real(self) + return self.get_dtype(space).itemtype.real(space, self) def descr_get_imag(self, space): - return self.get_dtype(space).itemtype.imag(self) + return self.get_dtype(space).itemtype.imag(space, self) w_flags = None diff --git a/pypy/module/micronumpy/loop.py b/pypy/module/micronumpy/loop.py --- a/pypy/module/micronumpy/loop.py +++ b/pypy/module/micronumpy/loop.py @@ -72,7 +72,7 @@ if right_iter: w_right = right_iter.getitem(right_state).convert_to(space, calc_dtype) right_state = right_iter.next(right_state) - out_iter.setitem(out_state, func(calc_dtype, w_left, w_right).convert_to( + out_iter.setitem(out_state, func(space, calc_dtype, w_left, w_right).convert_to( space, res_dtype)) out_state = out_iter.next(out_state) return out @@ -94,7 +94,7 @@ call1_driver.jit_merge_point(shapelen=shapelen, func=func, calc_dtype=calc_dtype, res_dtype=res_dtype) elem = obj_iter.getitem(obj_state).convert_to(space, calc_dtype) - out_iter.setitem(out_state, func(calc_dtype, elem).convert_to(space, res_dtype)) + out_iter.setitem(out_state, func(space, calc_dtype, elem).convert_to(space, res_dtype)) out_state = out_iter.next(out_state) obj_state = obj_iter.next(obj_state) return out @@ -153,9 +153,9 @@ done_func=done_func, calc_dtype=calc_dtype) rval = obj_iter.getitem(obj_state).convert_to(space, calc_dtype) - if done_func is not None and done_func(calc_dtype, rval): + if done_func is not None and done_func(space, calc_dtype, rval): return rval - cur_value = func(calc_dtype, cur_value, rval) + cur_value = func(space, calc_dtype, cur_value, rval) obj_state = obj_iter.next(obj_state) return cur_value @@ -179,7 +179,7 @@ reduce_cum_driver.jit_merge_point(shapelen=shapelen, func=func, dtype=calc_dtype) rval = obj_iter.getitem(obj_state).convert_to(space, calc_dtype) - cur_value = func(calc_dtype, cur_value, rval) + cur_value = func(space, calc_dtype, cur_value, rval) out_iter.setitem(out_state, cur_value) out_state = out_iter.next(out_state) obj_state = obj_iter.next(obj_state) @@ -222,7 +222,7 @@ where_driver.jit_merge_point(shapelen=shapelen, dtype=dtype, arr_dtype=arr_dtype) w_cond = arr_iter.getitem(arr_state) - if arr_dtype.itemtype.bool(w_cond): + if arr_dtype.itemtype.bool(space, w_cond): w_val = x_iter.getitem(x_state).convert_to(space, dtype) else: w_val = y_iter.getitem(y_state).convert_to(space, dtype) @@ -354,7 +354,7 @@ dot_driver.jit_merge_point(dtype=dtype) lval = left_impl.getitem(i1).convert_to(space, dtype) rval = right_impl.getitem(i2).convert_to(space, dtype) - oval = dtype.itemtype.add(oval, dtype.itemtype.mul(lval, rval)) + oval = dtype.itemtype.add(space, oval, dtype.itemtype.mul(space, lval, rval)) i1 += s1 i2 += s2 outi.setitem(outs, oval) diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -32,10 +32,10 @@ def simple_unary_op(func): specialize.argtype(1)(func) @functools.wraps(func) - def dispatcher(self, v): + def dispatcher(self, space, v): return self.box( func( - self, + self, space, self.for_computation(self.unbox(v)), ) ) @@ -56,10 +56,10 @@ def complex_to_real_unary_op(func): specialize.argtype(1)(func) @functools.wraps(func) - def dispatcher(self, v): + def dispatcher(self, space, v): return self.box_component( func( - self, + self, space, self.for_computation(self.unbox(v)) ) ) @@ -68,9 +68,9 @@ def raw_unary_op(func): specialize.argtype(1)(func) @functools.wraps(func) - def dispatcher(self, v): + def dispatcher(self, space, v): return func( - self, + self, space, self.for_computation(self.unbox(v)) ) return dispatcher @@ -78,10 +78,11 @@ def simple_binary_op(func): specialize.argtype(1, 2)(func) @functools.wraps(func) - def dispatcher(self, v1, v2): + def dispatcher(self, space, v1, v2): return self.box( func( self, + space, self.for_computation(self.unbox(v1)), self.for_computation(self.unbox(v2)), ) @@ -91,10 +92,10 @@ def complex_binary_op(func): specialize.argtype(1, 2)(func) @functools.wraps(func) - def dispatcher(self, v1, v2): + def dispatcher(self, space, v1, v2): return self.box_complex( *func( - self, + self, space, self.for_computation(self.unbox(v1)), self.for_computation(self.unbox(v2)), ) @@ -104,8 +105,8 @@ def raw_binary_op(func): specialize.argtype(1, 2)(func) @functools.wraps(func) - def dispatcher(self, v1, v2): - return func(self, + def dispatcher(self, space, v1, v2): + return func(self, space, self.for_computation(self.unbox(v1)), self.for_computation(self.unbox(v2)) ) @@ -203,23 +204,23 @@ return self.box(v) @simple_binary_op - def add(self, v1, v2): + def add(self, space, v1, v2): return v1 + v2 @simple_binary_op - def sub(self, v1, v2): + def sub(self, space, v1, v2): return v1 - v2 @simple_binary_op - def mul(self, v1, v2): + def mul(self, space, v1, v2): return v1 * v2 @simple_unary_op - def pos(self, v): + def pos(self, space, v): return +v @simple_unary_op - def neg(self, v): + def neg(self, space, v): return -v def byteswap(self, w_v): @@ -227,85 +228,85 @@ return self.box(byteswap(self.unbox(w_v))) @simple_unary_op - def conj(self, v): + def conj(self, space, v): return v @simple_unary_op - def real(self, v): + def real(self, space, v): return v @simple_unary_op - def imag(self, v): + def imag(self, space, v): return 0 @simple_unary_op - def abs(self, v): + def abs(self, space, v): return abs(v) @raw_unary_op - def isnan(self, v): + def isnan(self, space, v): return False @raw_unary_op - def isinf(self, v): + def isinf(self, space, v): return False @raw_binary_op - def eq(self, v1, v2): + def eq(self, space, v1, v2): return v1 == v2 @raw_binary_op - def ne(self, v1, v2): + def ne(self, space, v1, v2): return v1 != v2 @raw_binary_op - def lt(self, v1, v2): + def lt(self, space, v1, v2): return v1 < v2 @raw_binary_op - def le(self, v1, v2): + def le(self, space, v1, v2): return v1 <= v2 @raw_binary_op - def gt(self, v1, v2): + def gt(self, space, v1, v2): return v1 > v2 @raw_binary_op - def ge(self, v1, v2): + def ge(self, space, v1, v2): return v1 >= v2 @raw_binary_op - def logical_and(self, v1, v2): + def logical_and(self, space, v1, v2): return bool(v1) and bool(v2) @raw_binary_op - def logical_or(self, v1, v2): + def logical_or(self, space, v1, v2): return bool(v1) or bool(v2) @raw_unary_op - def logical_not(self, v): + def logical_not(self, space, v): return not bool(v) @raw_binary_op - def logical_xor(self, v1, v2): + def logical_xor(self, space, v1, v2): a = bool(v1) b = bool(v2) return (not b and a) or (not a and b) @raw_unary_op - def bool(self, v): + def bool(self, space, v): return bool(v) @simple_binary_op - def max(self, v1, v2): + def max(self, space, v1, v2): return max(v1, v2) @simple_binary_op - def min(self, v1, v2): + def min(self, space, v1, v2): return min(v1, v2) @raw_unary_op - def rint(self, v): + def rint(self, space, v): float64 = Float64() return float64.rint(float64.box(v)) @@ -358,39 +359,39 @@ return self.box(True) @simple_binary_op - def lshift(self, v1, v2): + def lshift(self, space, v1, v2): return v1 << v2 @simple_binary_op - def rshift(self, v1, v2): + def rshift(self, space, v1, v2): return v1 >> v2 @simple_binary_op - def bitwise_and(self, v1, v2): + def bitwise_and(self, space, v1, v2): return v1 & v2 @simple_binary_op - def bitwise_or(self, v1, v2): + def bitwise_or(self, space, v1, v2): return v1 | v2 @simple_binary_op - def bitwise_xor(self, v1, v2): + def bitwise_xor(self, space, v1, v2): return v1 ^ v2 @simple_unary_op - def invert(self, v): + def invert(self, space, v): return not v @raw_unary_op - def isfinite(self, v): + def isfinite(self, space, v): return True @raw_unary_op - def signbit(self, v): + def signbit(self, space, v): return False @simple_unary_op - def reciprocal(self, v): + def reciprocal(self, space, v): if v: return 1 return 0 @@ -423,7 +424,7 @@ return self.box(0) @specialize.argtype(1, 2) - def div(self, b1, b2): + def div(self, space, b1, b2): v1 = self.for_computation(self.unbox(b1)) v2 = self.for_computation(self.unbox(b2)) if v2 == 0: @@ -435,7 +436,7 @@ return self.box(v1 / v2) @specialize.argtype(1, 2) - def floordiv(self, b1, b2): + def floordiv(self, space, b1, b2): v1 = self.for_computation(self.unbox(b1)) v2 = self.for_computation(self.unbox(b2)) if v2 == 0: @@ -447,12 +448,12 @@ return self.box(v1 // v2) @simple_binary_op - def mod(self, v1, v2): + def mod(self, space, v1, v2): return v1 % v2 @simple_binary_op - @jit.look_inside_iff(lambda self, v1, v2: jit.isconstant(v2)) - def pow(self, v1, v2): + @jit.look_inside_iff(lambda self, space, v1, v2: jit.isconstant(v2)) + def pow(self, space, v1, v2): if v2 < 0: return 0 res = 1 @@ -466,15 +467,15 @@ return res @simple_binary_op - def lshift(self, v1, v2): + def lshift(self, space, v1, v2): return v1 << v2 @simple_binary_op - def rshift(self, v1, v2): + def rshift(self, space, v1, v2): return v1 >> v2 @simple_unary_op - def sign(self, v): + def sign(self, space, v): if v > 0: return 1 elif v < 0: @@ -484,31 +485,31 @@ return 0 @raw_unary_op - def isfinite(self, v): + def isfinite(self, space, v): return True @raw_unary_op - def isnan(self, v): + def isnan(self, space, v): return False @raw_unary_op - def isinf(self, v): + def isinf(self, space, v): return False @simple_binary_op - def bitwise_and(self, v1, v2): + def bitwise_and(self, space, v1, v2): return v1 & v2 @simple_binary_op - def bitwise_or(self, v1, v2): + def bitwise_or(self, space, v1, v2): return v1 | v2 @simple_binary_op - def bitwise_xor(self, v1, v2): + def bitwise_xor(self, space, v1, v2): return v1 ^ v2 @simple_unary_op - def invert(self, v): + def invert(self, space, v): return ~v @specialize.argtype(1) @@ -541,7 +542,7 @@ return self.box(ans) @raw_unary_op - def signbit(self, v): + def signbit(self, space, v): return v < 0 class Int8(BaseType, Integer): @@ -662,7 +663,7 @@ return self.box(-1.0) @simple_binary_op - def div(self, v1, v2): + def div(self, space, v1, v2): try: return v1 / v2 except ZeroDivisionError: @@ -671,7 +672,7 @@ return rfloat.copysign(rfloat.INFINITY, v1 * v2) @simple_binary_op - def floordiv(self, v1, v2): + def floordiv(self, space, v1, v2): try: return math.floor(v1 / v2) except ZeroDivisionError: @@ -680,11 +681,11 @@ return rfloat.copysign(rfloat.INFINITY, v1 * v2) @simple_binary_op - def mod(self, v1, v2): + def mod(self, space, v1, v2): return math.fmod(v1, v2) @simple_binary_op - def pow(self, v1, v2): + def pow(self, space, v1, v2): try: return math.pow(v1, v2) except ValueError: @@ -696,58 +697,58 @@ return rfloat.INFINITY @simple_binary_op - def copysign(self, v1, v2): + def copysign(self, space, v1, v2): return math.copysign(v1, v2) @simple_unary_op - def sign(self, v): + def sign(self, space, v): if v == 0.0: return 0.0 return rfloat.copysign(1.0, v) @raw_unary_op - def signbit(self, v): + def signbit(self, space, v): return rfloat.copysign(1.0, v) < 0.0 @simple_unary_op - def fabs(self, v): + def fabs(self, space, v): return math.fabs(v) @simple_binary_op - def max(self, v1, v2): + def max(self, space, v1, v2): return v1 if v1 >= v2 or rfloat.isnan(v1) else v2 @simple_binary_op - def min(self, v1, v2): + def min(self, space, v1, v2): return v1 if v1 <= v2 or rfloat.isnan(v1) else v2 @simple_binary_op - def fmax(self, v1, v2): + def fmax(self, space, v1, v2): return v1 if v1 >= v2 or rfloat.isnan(v2) else v2 @simple_binary_op - def fmin(self, v1, v2): + def fmin(self, space, v1, v2): return v1 if v1 <= v2 or rfloat.isnan(v2) else v2 @simple_binary_op - def fmod(self, v1, v2): + def fmod(self, space, v1, v2): try: return math.fmod(v1, v2) except ValueError: return rfloat.NAN @simple_unary_op - def reciprocal(self, v): + def reciprocal(self, space, v): if v == 0.0: return rfloat.copysign(rfloat.INFINITY, v) return 1.0 / v @simple_unary_op - def floor(self, v): + def floor(self, space, v): return math.floor(v) @simple_unary_op - def ceil(self, v): + def ceil(self, space, v): return math.ceil(v) @specialize.argtype(1) @@ -761,89 +762,89 @@ return self.box(ans) @simple_unary_op - def trunc(self, v): + def trunc(self, space, v): if v < 0: return math.ceil(v) else: return math.floor(v) @simple_unary_op - def exp(self, v): + def exp(self, space, v): try: return math.exp(v) except OverflowError: return rfloat.INFINITY @simple_unary_op - def exp2(self, v): + def exp2(self, space, v): try: return math.pow(2, v) except OverflowError: return rfloat.INFINITY @simple_unary_op - def expm1(self, v): + def expm1(self, space, v): try: return rfloat.expm1(v) except OverflowError: return rfloat.INFINITY @simple_unary_op - def sin(self, v): + def sin(self, space, v): return math.sin(v) @simple_unary_op - def cos(self, v): + def cos(self, space, v): return math.cos(v) @simple_unary_op - def tan(self, v): + def tan(self, space, v): return math.tan(v) @simple_unary_op - def arcsin(self, v): + def arcsin(self, space, v): if not -1.0 <= v <= 1.0: return rfloat.NAN return math.asin(v) @simple_unary_op - def arccos(self, v): + def arccos(self, space, v): if not -1.0 <= v <= 1.0: return rfloat.NAN return math.acos(v) @simple_unary_op - def arctan(self, v): + def arctan(self, space, v): return math.atan(v) @simple_binary_op - def arctan2(self, v1, v2): + def arctan2(self, space, v1, v2): return math.atan2(v1, v2) @simple_unary_op - def sinh(self, v): + def sinh(self, space, v): return math.sinh(v) @simple_unary_op - def cosh(self, v): + def cosh(self, space, v): return math.cosh(v) @simple_unary_op - def tanh(self, v): + def tanh(self, space, v): return math.tanh(v) @simple_unary_op - def arcsinh(self, v): + def arcsinh(self, space, v): return math.asinh(v) @simple_unary_op - def arccosh(self, v): + def arccosh(self, space, v): if v < 1.0: return rfloat.NAN return math.acosh(v) @simple_unary_op - def arctanh(self, v): + def arctanh(self, space, v): if v == 1.0 or v == -1.0: return math.copysign(rfloat.INFINITY, v) if not -1.0 < v < 1.0: @@ -851,39 +852,39 @@ return math.atanh(v) @simple_unary_op - def sqrt(self, v): + def sqrt(self, space, v): try: return math.sqrt(v) except ValueError: return rfloat.NAN @simple_unary_op - def square(self, v): + def square(self, space, v): return v*v @raw_unary_op - def isnan(self, v): + def isnan(self, space, v): return rfloat.isnan(v) @raw_unary_op - def isinf(self, v): + def isinf(self, space, v): return rfloat.isinf(v) @raw_unary_op - def isfinite(self, v): + def isfinite(self, space, v): return rfloat.isfinite(v) @simple_unary_op - def radians(self, v): + def radians(self, space, v): return v * degToRad deg2rad = radians @simple_unary_op - def degrees(self, v): + def degrees(self, space, v): return v / degToRad @simple_unary_op - def log(self, v): + def log(self, space, v): try: return math.log(v) except ValueError: @@ -894,7 +895,7 @@ return rfloat.NAN @simple_unary_op - def log2(self, v): + def log2(self, space, v): try: return math.log(v) / log2 except ValueError: @@ -905,7 +906,7 @@ return rfloat.NAN @simple_unary_op - def log10(self, v): + def log10(self, space, v): try: return math.log10(v) except ValueError: @@ -916,7 +917,7 @@ return rfloat.NAN @simple_unary_op - def log1p(self, v): + def log1p(self, space, v): try: return rfloat.log1p(v) except OverflowError: @@ -925,7 +926,7 @@ return rfloat.NAN @simple_binary_op - def logaddexp(self, v1, v2): + def logaddexp(self, space, v1, v2): tmp = v1 - v2 if tmp > 0: return v1 + rfloat.log1p(math.exp(-tmp)) @@ -938,7 +939,7 @@ return log2e * rfloat.log1p(v) @simple_binary_op - def logaddexp2(self, v1, v2): + def logaddexp2(self, space, v1, v2): tmp = v1 - v2 if tmp > 0: return v1 + self.npy_log2_1p(math.pow(2, -tmp)) @@ -948,7 +949,7 @@ return v1 + v2 @simple_unary_op - def rint(self, v): + def rint(self, space, v): x = float(v) if rfloat.isfinite(x): import math @@ -1078,7 +1079,7 @@ real, imag = self.for_computation(self.unbox(box)) return space.newcomplex(real, imag) - def bool(self, v): + def bool(self, space, v): real, imag = self.for_computation(self.unbox(v)) return bool(real) or bool(imag) @@ -1155,19 +1156,19 @@ self._write(storage, i, offset, value) @complex_binary_op - def add(self, v1, v2): + def add(self, space, v1, v2): return rcomplex.c_add(v1, v2) @complex_binary_op - def sub(self, v1, v2): + def sub(self, space, v1, v2): return rcomplex.c_sub(v1, v2) @complex_binary_op - def mul(self, v1, v2): + def mul(self, space, v1, v2): return rcomplex.c_mul(v1, v2) @complex_binary_op - def div(self, v1, v2): + def div(self, space, v1, v2): try: return rcomplex.c_div(v1, v2) except ZeroDivisionError: @@ -1177,36 +1178,36 @@ return rfloat.INFINITY, rfloat.INFINITY @complex_unary_op - def pos(self, v): + def pos(self, space, v): return v @complex_unary_op - def neg(self, v): + def neg(self, space, v): return -v[0], -v[1] @complex_unary_op - def conj(self, v): + def conj(self, space, v): return v[0], -v[1] @complex_to_real_unary_op - def real(self, v): + def real(self, space, v): return v[0] @complex_to_real_unary_op - def imag(self, v): + def imag(self, space, v): return v[1] @complex_to_real_unary_op - def abs(self, v): + def abs(self, space, v): return rcomplex.c_abs(v[0], v[1]) @raw_unary_op - def isnan(self, v): + def isnan(self, space, v): '''a complex number is nan if one of the parts is nan''' return rfloat.isnan(v[0]) or rfloat.isnan(v[1]) @raw_unary_op - def isinf(self, v): + def isinf(self, space, v): '''a complex number is inf if one of the parts is inf''' return rfloat.isinf(v[0]) or rfloat.isinf(v[1]) @@ -1214,12 +1215,12 @@ return v1[0] == v2[0] and v1[1] == v2[1] @raw_binary_op - def eq(self, v1, v2): + def eq(self, space, v1, v2): #compare the parts, so nan == nan is False return self._eq(v1, v2) @raw_binary_op - def ne(self, v1, v2): + def ne(self, space, v1, v2): return not self._eq(v1, v2) def _lt(self, v1, v2): @@ -1231,38 +1232,38 @@ return False @raw_binary_op - def lt(self, v1, v2): + def lt(self, space, v1, v2): return self._lt(v1, v2) @raw_binary_op - def le(self, v1, v2): + def le(self, space, v1, v2): return self._lt(v1, v2) or self._eq(v1, v2) @raw_binary_op - def gt(self, v1, v2): + def gt(self, space, v1, v2): return self._lt(v2, v1) @raw_binary_op - def ge(self, v1, v2): + def ge(self, space, v1, v2): return self._lt(v2, v1) or self._eq(v2, v1) def _bool(self, v): return bool(v[0]) or bool(v[1]) @raw_binary_op - def logical_and(self, v1, v2): + def logical_and(self, space, v1, v2): return self._bool(v1) and self._bool(v2) @raw_binary_op - def logical_or(self, v1, v2): + def logical_or(self, space, v1, v2): return self._bool(v1) or self._bool(v2) @raw_unary_op - def logical_not(self, v): + def logical_not(self, space, v): return not self._bool(v) @raw_binary_op - def logical_xor(self, v1, v2): + def logical_xor(self, space, v1, v2): a = self._bool(v1) b = self._bool(v2) return (not b and a) or (not a and b) @@ -1278,7 +1279,7 @@ return v2 @complex_binary_op - def floordiv(self, v1, v2): + def floordiv(self, space, v1, v2): try: ab = v1[0]*v2[0] + v1[1]*v2[1] bb = v2[0]*v2[0] + v2[1]*v2[1] @@ -1291,7 +1292,7 @@ #def mod(self, v1, v2): # return math.fmod(v1, v2) - def pow(self, v1, v2): + def pow(self, space, v1, v2): y = self.for_computation(self.unbox(v2)) if y[1] == 0: if y[0] == 0: @@ -1316,7 +1317,7 @@ # rfloat.copysign(v1[1], v2[1])) @complex_unary_op - def sign(self, v): + def sign(self, space, v): ''' sign of complex number could be either the point closest to the unit circle or {-1,0,1}, for compatability with numpy we choose the latter @@ -1349,7 +1350,7 @@ # return rfloat.NAN @complex_unary_op - def reciprocal(self, v): + def reciprocal(self, space, v): if rfloat.isinf(v[1]) and rfloat.isinf(v[0]): return rfloat.NAN, rfloat.NAN if rfloat.isinf(v[0]): @@ -1390,7 +1391,7 @@ # return math.floor(v) @complex_unary_op - def exp(self, v): + def exp(self, space, v): if rfloat.isinf(v[1]): if rfloat.isinf(v[0]): if v[0] < 0: @@ -1407,7 +1408,7 @@ return rfloat.INFINITY, rfloat.NAN @complex_unary_op - def exp2(self, v): + def exp2(self, space, v): try: return rcomplex.c_pow((2,0), v) except OverflowError: @@ -1416,7 +1417,7 @@ return rfloat.NAN, rfloat.NAN @complex_unary_op - def expm1(self, v): + def expm1(self, space, v): # duplicate exp() so in the future it will be easier # to implement seterr if rfloat.isinf(v[1]): @@ -1437,7 +1438,7 @@ return rfloat.INFINITY, rfloat.NAN @complex_unary_op - def sin(self, v): + def sin(self, space, v): if rfloat.isinf(v[0]): if v[1] == 0.: return rfloat.NAN, 0. @@ -1448,7 +1449,7 @@ return rcomplex.c_sin(*v) @complex_unary_op - def cos(self, v): + def cos(self, space, v): if rfloat.isinf(v[0]): if v[1] == 0.: return rfloat.NAN, 0.0 @@ -1459,21 +1460,21 @@ return rcomplex.c_cos(*v) @complex_unary_op - def tan(self, v): + def tan(self, space, v): if rfloat.isinf(v[0]) and rfloat.isfinite(v[1]): return rfloat.NAN, rfloat.NAN return rcomplex.c_tan(*v) @complex_unary_op - def arcsin(self, v): + def arcsin(self, space, v): return rcomplex.c_asin(*v) @complex_unary_op - def arccos(self, v): + def arccos(self, space, v): return rcomplex.c_acos(*v) @complex_unary_op - def arctan(self, v): + def arctan(self, space, v): if v[0] == 0 and (v[1] == 1 or v[1] == -1): #This is the place to print a "runtime warning" return rfloat.NAN, math.copysign(rfloat.INFINITY, v[1]) @@ -1484,7 +1485,7 @@ # return rcomplex.c_atan2(v1, v2) @complex_unary_op - def sinh(self, v): + def sinh(self, space, v): if rfloat.isinf(v[1]): if rfloat.isfinite(v[0]): if v[0] == 0.0: @@ -1495,7 +1496,7 @@ return rcomplex.c_sinh(*v) @complex_unary_op - def cosh(self, v): + def cosh(self, space, v): if rfloat.isinf(v[1]): if rfloat.isfinite(v[0]): if v[0] == 0.0: @@ -1506,36 +1507,36 @@ return rcomplex.c_cosh(*v) @complex_unary_op - def tanh(self, v): + def tanh(self, space, v): if rfloat.isinf(v[1]) and rfloat.isfinite(v[0]): return rfloat.NAN, rfloat.NAN return rcomplex.c_tanh(*v) @complex_unary_op - def arcsinh(self, v): + def arcsinh(self, space, v): return rcomplex.c_asinh(*v) @complex_unary_op - def arccosh(self, v): + def arccosh(self, space, v): return rcomplex.c_acosh(*v) @complex_unary_op - def arctanh(self, v): + def arctanh(self, space, v): if v[1] == 0 and (v[0] == 1.0 or v[0] == -1.0): return (math.copysign(rfloat.INFINITY, v[0]), math.copysign(0., v[1])) return rcomplex.c_atanh(*v) @complex_unary_op - def sqrt(self, v): + def sqrt(self, space, v): return rcomplex.c_sqrt(*v) @complex_unary_op - def square(self, v): + def square(self, space, v): return rcomplex.c_mul(v,v) @raw_unary_op - def isfinite(self, v): + def isfinite(self, space, v): return rfloat.isfinite(v[0]) and rfloat.isfinite(v[1]) #@simple_unary_op @@ -1548,14 +1549,14 @@ # return v / degToRad @complex_unary_op - def log(self, v): + def log(self, space, v): try: return rcomplex.c_log(*v) except ValueError: return -rfloat.INFINITY, math.atan2(v[1], v[0]) @complex_unary_op - def log2(self, v): + def log2(self, space, v): try: r = rcomplex.c_log(*v) except ValueError: @@ -1563,14 +1564,14 @@ return r[0] / log2, r[1] / log2 @complex_unary_op - def log10(self, v): + def log10(self, space, v): try: return rcomplex.c_log10(*v) except ValueError: return -rfloat.INFINITY, math.atan2(v[1], v[0]) / log10 @complex_unary_op - def log1p(self, v): + def log1p(self, space, v): try: return rcomplex.c_log(v[0] + 1, v[1]) except OverflowError: @@ -1673,6 +1674,14 @@ def for_computation(v): return v + @simple_binary_op + def add(self, space, v1, v2): + return space.add(v1, v2) + + @raw_binary_op + def eq(self, space, v1, v2): + return space.eq_w(v1, v2) + class FlexibleType(BaseType): def get_element_size(self): return rffi.sizeof(self.T) @@ -1701,8 +1710,8 @@ def str_binary_op(func): specialize.argtype(1, 2)(func) @functools.wraps(func) - def dispatcher(self, v1, v2): - return func(self, + def dispatcher(self, space, v1, v2): + return func(self, space, self.to_str(v1), self.to_str(v2) ) @@ -1754,43 +1763,43 @@ return space.wrap(self.to_str(box)) @str_binary_op - def eq(self, v1, v2): + def eq(self, space, v1, v2): return v1 == v2 @str_binary_op - def ne(self, v1, v2): + def ne(self, space, v1, v2): return v1 != v2 @str_binary_op - def lt(self, v1, v2): + def lt(self, space, v1, v2): return v1 < v2 @str_binary_op - def le(self, v1, v2): + def le(self, space, v1, v2): return v1 <= v2 @str_binary_op - def gt(self, v1, v2): + def gt(self, space, v1, v2): return v1 > v2 @str_binary_op - def ge(self, v1, v2): + def ge(self, space, v1, v2): return v1 >= v2 @str_binary_op - def logical_and(self, v1, v2): + def logical_and(self, space, v1, v2): return bool(v1) and bool(v2) @str_binary_op - def logical_or(self, v1, v2): + def logical_or(self, space, v1, v2): return bool(v1) or bool(v2) @str_unary_op - def logical_not(self, v): + def logical_not(self, space, v): return not bool(v) @str_binary_op - def logical_xor(self, v1, v2): + def logical_xor(self, space, v1, v2): a = bool(v1) b = bool(v2) return (not b and a) or (not a and b) diff --git a/pypy/module/micronumpy/ufuncs.py b/pypy/module/micronumpy/ufuncs.py --- a/pypy/module/micronumpy/ufuncs.py +++ b/pypy/module/micronumpy/ufuncs.py @@ -11,12 +11,12 @@ from pypy.module.micronumpy.strides import shape_agreement -def done_if_true(dtype, val): - return dtype.itemtype.bool(val) +def done_if_true(space, dtype, val): + return dtype.itemtype.bool(space, val) -def done_if_false(dtype, val): - return not dtype.itemtype.bool(val) +def done_if_false(space, dtype, val): + return not dtype.itemtype.bool(space, val) def _get_dtype(space, w_npyobj): @@ -343,7 +343,7 @@ else: res_dtype = descriptor.get_dtype_cache(space).w_float64dtype if w_obj.is_scalar(): - w_val = self.func(calc_dtype, + w_val = self.func(space, calc_dtype, w_obj.get_scalar_value().convert_to(space, calc_dtype)) if out is None: return w_val @@ -450,7 +450,7 @@ else: res_dtype = calc_dtype if w_lhs.is_scalar() and w_rhs.is_scalar(): - arr = self.func(calc_dtype, + arr = self.func(space, calc_dtype, w_lhs.get_scalar_value().convert_to(space, calc_dtype), w_rhs.get_scalar_value().convert_to(space, calc_dtype) ) @@ -494,6 +494,10 @@ return dt1 if dt1 is None: return dt2 + + if dt1.num == NPY.OBJECT or dt2.num == NPY.OBJECT: + return descriptor.get_dtype_cache(space).w_objectdtype + # dt1.num should be <= dt2.num if dt1.num > dt2.num: dt1, dt2 = dt2, dt1 @@ -656,14 +660,14 @@ ufunc_name, dtype.get_name()) dtype_cache = descriptor.get_dtype_cache(space) if argcount == 1: - def impl(res_dtype, value): - res = get_op(res_dtype)(value) + def impl(space, res_dtype, value): + res = get_op(res_dtype)(space, value) if bool_result: return dtype_cache.w_booldtype.box(res) return res elif argcount == 2: - def impl(res_dtype, lvalue, rvalue): - res = get_op(res_dtype)(lvalue, rvalue) + def impl(space, res_dtype, lvalue, rvalue): + res = get_op(res_dtype)(space, lvalue, rvalue) if comparison_func: return dtype_cache.w_booldtype.box(res) return res From noreply at buildbot.pypy.org Mon Jan 26 17:22:05 2015 From: noreply at buildbot.pypy.org (fijal) Date: Mon, 26 Jan 2015 17:22:05 +0100 (CET) Subject: [pypy-commit] pypy vmprof: fix (why it does not explode on trunk? Message-ID: <20150126162205.E09BE1C03F2@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: vmprof Changeset: r75539:7d0b82938be8 Date: 2015-01-26 18:21 +0200 http://bitbucket.org/pypy/pypy/changeset/7d0b82938be8/ Log: fix (why it does not explode on trunk? diff --git a/rpython/jit/backend/x86/callbuilder.py b/rpython/jit/backend/x86/callbuilder.py --- a/rpython/jit/backend/x86/callbuilder.py +++ b/rpython/jit/backend/x86/callbuilder.py @@ -169,7 +169,7 @@ else: self.tlofs_reg = r12 self.mc.MOV_rs(self.tlofs_reg.value, - THREADLOCAL_OFS - self.current_esp) + THREADLOCAL_OFS - self.get_current_esp()) return self.tlofs_reg def save_stack_position(self): From noreply at buildbot.pypy.org Mon Jan 26 17:56:24 2015 From: noreply at buildbot.pypy.org (rguillebert) Date: Mon, 26 Jan 2015 17:56:24 +0100 (CET) Subject: [pypy-commit] pypy object-dtype: Add space everywhere Message-ID: <20150126165624.ACBBB1C084F@cobra.cs.uni-duesseldorf.de> Author: Romain Guillebert Branch: object-dtype Changeset: r75540:7cbb4af9b24f Date: 2015-01-26 17:56 +0100 http://bitbucket.org/pypy/pypy/changeset/7cbb4af9b24f/ Log: Add space everywhere diff --git a/pypy/module/micronumpy/arrayops.py b/pypy/module/micronumpy/arrayops.py --- a/pypy/module/micronumpy/arrayops.py +++ b/pypy/module/micronumpy/arrayops.py @@ -192,7 +192,7 @@ def count_nonzero(space, w_obj): - return space.wrap(loop.count_all_true(convert_to_array(space, w_obj))) + return space.wrap(loop.count_all_true(space, convert_to_array(space, w_obj))) def choose(space, w_arr, w_choices, w_out, w_mode): diff --git a/pypy/module/micronumpy/loop.py b/pypy/module/micronumpy/loop.py --- a/pypy/module/micronumpy/loop.py +++ b/pypy/module/micronumpy/loop.py @@ -268,10 +268,10 @@ out_indices = out_iter.indices(out_state) if out_indices[axis] == 0: if identity is not None: - w_val = func(dtype, identity, w_val) + w_val = func(space, dtype, identity, w_val) else: cur = temp_iter.getitem(temp_state) - w_val = func(dtype, cur, w_val) + w_val = func(space, dtype, cur, w_val) out_iter.setitem(out_state, w_val) out_state = out_iter.next(out_state) @@ -288,7 +288,7 @@ greens = ['shapelen', 'dtype'], reds = 'auto') - def argmin_argmax(arr): + def argmin_argmax(space, arr): result = 0 idx = 1 dtype = arr.get_dtype() @@ -299,8 +299,8 @@ while not iter.done(state): arg_driver.jit_merge_point(shapelen=shapelen, dtype=dtype) w_val = iter.getitem(state) - new_best = getattr(dtype.itemtype, op_name)(cur_best, w_val) - if dtype.itemtype.ne(new_best, cur_best): + new_best = getattr(dtype.itemtype, op_name)(space, cur_best, w_val) + if dtype.itemtype.ne(space, new_best, cur_best): result = idx cur_best = new_best state = iter.next(state) @@ -379,9 +379,9 @@ state = iter.next(state) return s -def count_all_true(arr): +def count_all_true(space, arr): if arr.is_scalar(): - return arr.get_dtype().itemtype.bool(arr.get_scalar_value()) + return arr.get_dtype().itemtype.bool(space, arr.get_scalar_value()) else: return count_all_true_concrete(arr.implementation) @@ -662,12 +662,12 @@ arr_state = arr_iter.next(arr_state) if min_iter is not None: w_min = min_iter.getitem(min_state).convert_to(space, dtype) - if dtype.itemtype.lt(w_v, w_min): + if dtype.itemtype.lt(space, w_v, w_min): w_v = w_min min_state = min_iter.next(min_state) if max_iter is not None: w_max = max_iter.getitem(max_state).convert_to(space, dtype) - if dtype.itemtype.gt(w_v, w_max): + if dtype.itemtype.gt(space, w_v, w_max): w_v = w_max max_state = max_iter.next(max_state) out_iter.setitem(out_state, w_v) @@ -750,7 +750,7 @@ last_key_val = key_iter.getitem(key_state) while not key_iter.done(key_state): key_val = key_iter.getitem(key_state) - if dtype.itemtype.lt(last_key_val, key_val): + if dtype.itemtype.lt(space, last_key_val, key_val): max_idx = size else: min_idx = 0 @@ -760,7 +760,7 @@ binsearch_driver.jit_merge_point(dtype=dtype) mid_idx = min_idx + ((max_idx - min_idx) >> 1) mid_val = arr.getitem(space, [mid_idx]).convert_to(space, dtype) - if op(mid_val, key_val): + if op(space, mid_val, key_val): min_idx = mid_idx + 1 else: max_idx = mid_idx diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py --- a/pypy/module/micronumpy/ndarray.py +++ b/pypy/module/micronumpy/ndarray.py @@ -1068,7 +1068,7 @@ raise oefmt(space.w_NotImplementedError, '%s not implemented for %s', op_name, self.get_dtype().get_name()) - return space.wrap(getattr(loop, op_name)(self)) + return space.wrap(getattr(loop, op_name)(space, self)) return func_with_new_name(impl, "reduce_%s_impl" % op_name) descr_argmax = _reduce_argmax_argmin_impl("max") diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -44,10 +44,10 @@ def complex_unary_op(func): specialize.argtype(1)(func) @functools.wraps(func) - def dispatcher(self, v): + def dispatcher(self, space, v): return self.box_complex( *func( - self, + self, space, self.for_computation(self.unbox(v)) ) ) @@ -308,7 +308,7 @@ @raw_unary_op def rint(self, space, v): float64 = Float64() - return float64.rint(float64.box(v)) + return float64.rint(space, float64.box(v)) class Bool(BaseType, Primitive): T = lltype.Bool @@ -513,7 +513,7 @@ return ~v @specialize.argtype(1) - def reciprocal(self, v): + def reciprocal(self, space, v): raw = self.for_computation(self.unbox(v)) ans = 0 if raw == 0: @@ -1268,13 +1268,13 @@ b = self._bool(v2) return (not b and a) or (not a and b) - def min(self, v1, v2): - if self.le(v1, v2) or self.isnan(v1): + def min(self, space, v1, v2): + if self.le(space, v1, v2) or self.isnan(space, v1): return v1 return v2 - def max(self, v1, v2): - if self.ge(v1, v2) or self.isnan(v1): + def max(self, space, v1, v2): + if self.ge(space, v1, v2) or self.isnan(space, v1): return v1 return v2 @@ -1300,14 +1300,14 @@ if y[0] == 1: return v1 if y[0] == 2: - return self.mul(v1, v1) + return self.mul(space, v1, v1) x = self.for_computation(self.unbox(v1)) if x[0] == 0 and x[1] == 0: if y[0] > 0 and y[1] == 0: return self.box_complex(0, 0) return self.box_complex(rfloat.NAN, rfloat.NAN) - b = self.for_computation(self.unbox(self.log(v1))) - return self.exp(self.box_complex(b[0] * y[0] - b[1] * y[1], + b = self.for_computation(self.unbox(self.log(space, v1))) + return self.exp(space, self.box_complex(b[0] * y[0] - b[1] * y[1], b[0] * y[1] + b[1] * y[0])) #complex copysign does not exist in numpy @@ -1332,13 +1332,13 @@ return 1,0 return -1,0 - def fmax(self, v1, v2): - if self.ge(v1, v2) or self.isnan(v2): + def fmax(self, space, v1, v2): + if self.ge(space, v1, v2) or self.isnan(space, v2): return v1 return v2 - def fmin(self, v1, v2): - if self.le(v1, v2) or self.isnan(v2): + def fmin(self, space, v1, v2): + if self.le(space, v1, v2) or self.isnan(space, v2): return v1 return v2 @@ -1363,7 +1363,7 @@ return rfloat.NAN, rfloat.NAN @specialize.argtype(1) - def round(self, v, decimals=0): + def round(self, space, v, decimals=0): ans = list(self.for_computation(self.unbox(v))) if rfloat.isfinite(ans[0]): ans[0] = rfloat.round_double(ans[0], decimals, half_even=True) @@ -1371,8 +1371,8 @@ ans[1] = rfloat.round_double(ans[1], decimals, half_even=True) return self.box_complex(ans[0], ans[1]) - def rint(self, v): - return self.round(v) + def rint(self, space, v): + return self.round(space, v) # No floor, ceil, trunc in numpy for complex #@simple_unary_op @@ -1804,7 +1804,7 @@ b = bool(v2) return (not b and a) or (not a and b) - def bool(self, v): + def bool(self, space, v): return bool(self.to_str(v)) def fill(self, storage, width, box, start, stop, offset): @@ -1997,7 +1997,7 @@ pieces.append(")") return "".join(pieces) - def eq(self, v1, v2): + def eq(self, space, v1, v2): assert isinstance(v1, boxes.W_VoidBox) assert isinstance(v2, boxes.W_VoidBox) s1 = v1.dtype.elsize @@ -2008,8 +2008,8 @@ return False return True - def ne(self, v1, v2): - return not self.eq(v1, v2) + def ne(self, space, v1, v2): + return not self.eq(space, v1, v2) for tp in [Int32, Int64]: if tp.T == lltype.Signed: From noreply at buildbot.pypy.org Mon Jan 26 18:48:50 2015 From: noreply at buildbot.pypy.org (fijal) Date: Mon, 26 Jan 2015 18:48:50 +0100 (CET) Subject: [pypy-commit] pypy vmprof: try to work on storing extra pieces in header Message-ID: <20150126174850.952851C05A0@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: vmprof Changeset: r75541:1aba5ad5fd5c Date: 2015-01-26 19:48 +0200 http://bitbucket.org/pypy/pypy/changeset/1aba5ad5fd5c/ Log: try to work on storing extra pieces in header diff --git a/pypy/module/_vmprof/src/get_custom_offset.c b/pypy/module/_vmprof/src/get_custom_offset.c --- a/pypy/module/_vmprof/src/get_custom_offset.c +++ b/pypy/module/_vmprof/src/get_custom_offset.c @@ -2,6 +2,8 @@ long pypy_jit_start_addr(); long pypy_jit_end_addr(); long pypy_jit_stack_depth_at_loc(long); +long find_codemap_at_addr(long); +long yield_bytecode_at_addr(long, long, long*); static ptrdiff_t vmprof_unw_get_custom_offset(void* ip) { long ip_l = (long)ip; @@ -11,3 +13,29 @@ } return pypy_jit_stack_depth_at_loc(ip); } + +static long vmprof_write_header_for_jit_addr(void **result, long n, + intptr_t addr, int max_depth) +{ + long codemap_pos; + long current_pos = 0; + intptr_t id; + + if (addr < pypy_jit_start_addr() || addr > pypy_jit_end_addr()) { + return n; + } + codemap_pos = find_codemap_at_addr(addr); + if (codemap_pos == -1) { + return n; + } + while (1) { + id = yield_bytecode_at_addr(codemap_pos, addr, ¤t_pos); + if (id == 0) { + return n; + } + result[n++] = id; + if (n >= max_depth) { + return n; + } + } +} diff --git a/pypy/module/_vmprof/src/vmprof.c b/pypy/module/_vmprof/src/vmprof.c --- a/pypy/module/_vmprof/src/vmprof.c +++ b/pypy/module/_vmprof/src/vmprof.c @@ -176,6 +176,8 @@ } result[n++] = ip; + n = vmprof_write_header_for_jit_addr(result, n, ip, max_depth); + if (vmprof_unw_step(&cursor) <= 0) { break; } From noreply at buildbot.pypy.org Mon Jan 26 18:52:27 2015 From: noreply at buildbot.pypy.org (fijal) Date: Mon, 26 Jan 2015 18:52:27 +0100 (CET) Subject: [pypy-commit] pypy vmprof: fixes Message-ID: <20150126175227.77DF51C084F@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: vmprof Changeset: r75542:23c533e6e79e Date: 2015-01-26 19:52 +0200 http://bitbucket.org/pypy/pypy/changeset/23c533e6e79e/ Log: fixes diff --git a/pypy/module/_vmprof/src/get_custom_offset.c b/pypy/module/_vmprof/src/get_custom_offset.c --- a/pypy/module/_vmprof/src/get_custom_offset.c +++ b/pypy/module/_vmprof/src/get_custom_offset.c @@ -2,34 +2,35 @@ long pypy_jit_start_addr(); long pypy_jit_end_addr(); long pypy_jit_stack_depth_at_loc(long); -long find_codemap_at_addr(long); -long yield_bytecode_at_addr(long, long, long*); +long pypy_find_codemap_at_addr(long); +long pypy_yield_bytecode_at_addr(long, long, long*); static ptrdiff_t vmprof_unw_get_custom_offset(void* ip) { - long ip_l = (long)ip; + intptr_t ip_l = (intptr_t)ip; - if (ip < pypy_jit_start_addr() || ip > pypy_jit_end_addr()) { + if (ip_l < pypy_jit_start_addr() || ip_l > pypy_jit_end_addr()) { return -1; } - return pypy_jit_stack_depth_at_loc(ip); + return (void*)pypy_jit_stack_depth_at_loc(ip_l); } static long vmprof_write_header_for_jit_addr(void **result, long n, - intptr_t addr, int max_depth) + void *ip, int max_depth) { long codemap_pos; long current_pos = 0; intptr_t id; + intptr_t addr = (intptr_t)ip; if (addr < pypy_jit_start_addr() || addr > pypy_jit_end_addr()) { return n; } - codemap_pos = find_codemap_at_addr(addr); + codemap_pos = pypy_find_codemap_at_addr(addr); if (codemap_pos == -1) { return n; } while (1) { - id = yield_bytecode_at_addr(codemap_pos, addr, ¤t_pos); + id = pypy_yield_bytecode_at_addr(codemap_pos, addr, ¤t_pos); if (id == 0) { return n; } From noreply at buildbot.pypy.org Mon Jan 26 18:53:48 2015 From: noreply at buildbot.pypy.org (fijal) Date: Mon, 26 Jan 2015 18:53:48 +0100 (CET) Subject: [pypy-commit] pypy vmprof: more consistent naming Message-ID: <20150126175348.3AFD91C084F@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: vmprof Changeset: r75543:32c9442d20e4 Date: 2015-01-26 19:53 +0200 http://bitbucket.org/pypy/pypy/changeset/32c9442d20e4/ Log: more consistent naming diff --git a/pypy/module/_vmprof/src/get_custom_offset.c b/pypy/module/_vmprof/src/get_custom_offset.c --- a/pypy/module/_vmprof/src/get_custom_offset.c +++ b/pypy/module/_vmprof/src/get_custom_offset.c @@ -3,7 +3,7 @@ long pypy_jit_end_addr(); long pypy_jit_stack_depth_at_loc(long); long pypy_find_codemap_at_addr(long); -long pypy_yield_bytecode_at_addr(long, long, long*); +long pypy_yield_codemap_at_addr(long, long, long*); static ptrdiff_t vmprof_unw_get_custom_offset(void* ip) { intptr_t ip_l = (intptr_t)ip; @@ -30,7 +30,7 @@ return n; } while (1) { - id = pypy_yield_bytecode_at_addr(codemap_pos, addr, ¤t_pos); + id = pypy_yield_codemap_at_addr(codemap_pos, addr, ¤t_pos); if (id == 0) { return n; } From noreply at buildbot.pypy.org Mon Jan 26 23:22:00 2015 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 26 Jan 2015 23:22:00 +0100 (CET) Subject: [pypy-commit] pypy default: Issue #1928 resolved Message-ID: <20150126222200.0FDDE1C06B1@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r75544:bbb6a5825c32 Date: 2015-01-26 23:21 +0100 http://bitbucket.org/pypy/pypy/changeset/bbb6a5825c32/ Log: Issue #1928 resolved A missing detail about the logic of locals2fast when we compare it with CPython's. Thanks Krono for having found a test case that we could use! diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py --- a/pypy/interpreter/pyframe.py +++ b/pypy/interpreter/pyframe.py @@ -524,9 +524,10 @@ # cellvars are values exported to inner scopes # freevars are values coming from outer scopes - freevarnames = list(self.pycode.co_cellvars) + # (see locals2fast for why CO_OPTIMIZED) + freevarnames = self.pycode.co_cellvars if self.pycode.co_flags & consts.CO_OPTIMIZED: - freevarnames.extend(self.pycode.co_freevars) + freevarnames = freevarnames + self.pycode.co_freevars for i in range(len(freevarnames)): name = freevarnames[i] cell = self.cells[i] @@ -555,7 +556,16 @@ self.setfastscope(new_fastlocals_w) - freevarnames = self.pycode.co_cellvars + self.pycode.co_freevars + freevarnames = self.pycode.co_cellvars + if self.pycode.co_flags & consts.CO_OPTIMIZED: + freevarnames = freevarnames + self.pycode.co_freevars + # If the namespace is unoptimized, then one of the + # following cases applies: + # 1. It does not contain free variables, because it + # uses import * or is a top-level namespace. + # 2. It is a class namespace. + # We don't want to accidentally copy free variables + # into the locals dict used by the class. for i in range(len(freevarnames)): name = freevarnames[i] cell = self.cells[i] diff --git a/pypy/interpreter/test/test_pyframe.py b/pypy/interpreter/test/test_pyframe.py --- a/pypy/interpreter/test/test_pyframe.py +++ b/pypy/interpreter/test/test_pyframe.py @@ -516,3 +516,21 @@ assert seen == [(1, f, firstline + 6, 'line', None), (1, f, firstline + 7, 'line', None), (1, f, firstline + 8, 'line', None)] + + def test_locals2fast_freevar_bug(self): + import sys + def f(n): + class A(object): + def g(self): + return n + n = 42 + return A() + res = f(10).g() + assert res == 10 + # + def trace(*args): + return trace + sys.settrace(trace) + res = f(10).g() + sys.settrace(None) + assert res == 10 From noreply at buildbot.pypy.org Tue Jan 27 17:01:44 2015 From: noreply at buildbot.pypy.org (cfbolz) Date: Tue, 27 Jan 2015 17:01:44 +0100 (CET) Subject: [pypy-commit] pypy typed-cells: preserve immutable optimization better Message-ID: <20150127160144.46BB91C01A9@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: typed-cells Changeset: r75545:f701f1ca17d3 Date: 2015-01-27 17:01 +0100 http://bitbucket.org/pypy/pypy/changeset/f701f1ca17d3/ Log: preserve immutable optimization better diff --git a/pypy/module/pypyjit/test_pypy_c/test_instance.py b/pypy/module/pypyjit/test_pypy_c/test_instance.py --- a/pypy/module/pypyjit/test_pypy_c/test_instance.py +++ b/pypy/module/pypyjit/test_pypy_c/test_instance.py @@ -30,7 +30,7 @@ jump(..., descr=...) """) - def test_load_attr(self): + def test_load_immutable_attr(self): src = ''' class A(object): pass diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -310,7 +310,8 @@ if type(w_value) is W_IntObject: if not self.can_contain_mutable_cell: self.can_contain_mutable_cell = True - return IntMutableCell(w_value.intval) + if self.ever_mutated: + return IntMutableCell(w_value.intval) return w_value def _copy_attr(self, obj, new_obj): diff --git a/pypy/objspace/std/test/test_mapdict.py b/pypy/objspace/std/test/test_mapdict.py --- a/pypy/objspace/std/test/test_mapdict.py +++ b/pypy/objspace/std/test/test_mapdict.py @@ -154,10 +154,16 @@ assert obj.map.ever_mutated == True assert obj.map is map1 -def test_mutcell(): +def test_mutcell_not_immutable(): from pypy.objspace.std.intobject import W_IntObject cls = Class() obj = cls.instantiate() + # make sure the attribute counts as mutable + obj.setdictvalue(space, "a", W_IntObject(4)) + obj.setdictvalue(space, "a", W_IntObject(5)) + assert obj.map.ever_mutated + + obj = cls.instantiate() obj.setdictvalue(space, "a", W_IntObject(5)) # not wrapped because of the FakeSpace :-( assert obj.getdictvalue(space, "a") == 5 @@ -176,6 +182,18 @@ assert mutcell2.intvalue == 7 assert mutcell2 is mutcell1 +def test_no_mutcell_if_immutable(): + # don't introduce an immutable cell if the attribute seems immutable + from pypy.objspace.std.intobject import W_IntObject + cls = Class() + obj = cls.instantiate() + obj.setdictvalue(space, "a", W_IntObject(5)) + assert not obj.map.ever_mutated + + assert obj.getdictvalue(space, "a").intval == 5 + mutcell = obj._mapdict_read_storage(0) + assert mutcell.intval == 5 + def test_mutcell_unwrap_only_if_needed(): from pypy.objspace.std.intobject import W_IntObject From noreply at buildbot.pypy.org Tue Jan 27 17:08:52 2015 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 27 Jan 2015 17:08:52 +0100 (CET) Subject: [pypy-commit] pypy default: fix Message-ID: <20150127160852.2B7631C01A9@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r75546:e1fb49129ded Date: 2015-01-27 17:08 +0100 http://bitbucket.org/pypy/pypy/changeset/e1fb49129ded/ Log: fix diff --git a/rpython/jit/backend/arm/opassembler.py b/rpython/jit/backend/arm/opassembler.py --- a/rpython/jit/backend/arm/opassembler.py +++ b/rpython/jit/backend/arm/opassembler.py @@ -1298,5 +1298,6 @@ self.load_reg(self.mc, res_loc, r.sp, ofs) scale = get_scale(size_loc.value) signed = (sign_loc.value != 0) - self._load_from_mem(res_loc, res_loc, ofs_loc, scale, signed, fcond) + self._load_from_mem(res_loc, res_loc, ofs_loc, imm(scale), signed, + fcond) return fcond From noreply at buildbot.pypy.org Tue Jan 27 17:19:47 2015 From: noreply at buildbot.pypy.org (cfbolz) Date: Tue, 27 Jan 2015 17:19:47 +0100 (CET) Subject: [pypy-commit] pypy typed-cells: fix test_pypy_c Message-ID: <20150127161947.EA7621C028E@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: typed-cells Changeset: r75547:a7cc774428ec Date: 2015-01-27 17:19 +0100 http://bitbucket.org/pypy/pypy/changeset/a7cc774428ec/ Log: fix test_pypy_c diff --git a/pypy/module/pypyjit/test_pypy_c/test_misc.py b/pypy/module/pypyjit/test_pypy_c/test_misc.py --- a/pypy/module/pypyjit/test_pypy_c/test_misc.py +++ b/pypy/module/pypyjit/test_pypy_c/test_misc.py @@ -117,12 +117,11 @@ guard_not_invalidated(descr=...) i13 = int_add_ovf(i8, i9) guard_no_overflow(descr=...) - i10p = getfield_gc_pure(p10, descr=...) - i10 = int_mul_ovf(2, i10p) + i10 = int_mul_ovf(2, i61) guard_no_overflow(descr=...) i14 = int_add_ovf(i13, i10) guard_no_overflow(descr=...) - setfield_gc(p7, p11, descr=...) + setfield_gc(p7, i11, descr=...) i17 = int_sub_ovf(i4, 1) guard_no_overflow(descr=...) --TICK-- diff --git a/pypy/module/pypyjit/test_pypy_c/test_thread.py b/pypy/module/pypyjit/test_pypy_c/test_thread.py --- a/pypy/module/pypyjit/test_pypy_c/test_thread.py +++ b/pypy/module/pypyjit/test_pypy_c/test_thread.py @@ -42,6 +42,7 @@ assert loop.match(""" i53 = int_lt(i48, i27) guard_true(i53, descr=...) + guard_not_invalidated(descr=...) i54 = int_add_ovf(i48, i47) guard_no_overflow(descr=...) --TICK-- From noreply at buildbot.pypy.org Tue Jan 27 17:58:21 2015 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 27 Jan 2015 17:58:21 +0100 (CET) Subject: [pypy-commit] pypy default: Compile the valgrind hints even if the header valgrind/valgrind.h is Message-ID: <20150127165821.C50831C0291@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r75548:798c650600ca Date: 2015-01-27 17:58 +0100 http://bitbucket.org/pypy/pypy/changeset/798c650600ca/ Log: Compile the valgrind hints even if the header valgrind/valgrind.h is not found at translation time --- by copy-and-paste. diff --git a/rpython/jit/backend/x86/valgrind.py b/rpython/jit/backend/x86/valgrind.py --- a/rpython/jit/backend/x86/valgrind.py +++ b/rpython/jit/backend/x86/valgrind.py @@ -6,6 +6,7 @@ from rpython.rtyper.lltypesystem import lltype, llmemory, rffi from rpython.translator.tool.cbuild import ExternalCompilationInfo from rpython.rlib.objectmodel import we_are_translated +from rpython.jit.backend.x86.arch import WORD eci = ExternalCompilationInfo(includes = ['valgrind/valgrind.h']) @@ -13,9 +14,91 @@ try: rffi_platform.verify_eci(eci) except rffi_platform.CompilationError: - VALGRIND_DISCARD_TRANSLATIONS = None -else: - VALGRIND_DISCARD_TRANSLATIONS = rffi.llexternal( + # Can't open 'valgrind/valgrind.h'. It is a bad idea to just go + # ahead and not compile the valgrind-specific hacks. Instead, + # we'll include manually the few needed macros from a hopefully + # standard valgrind.h file. + eci = ExternalCompilationInfo(post_include_bits = [r""" +/************ Valgrind support: only with GCC/clang for now ***********/ +/** This code is inserted only if valgrind/valgrind.h is not found **/ +/**********************************************************************/ +#ifdef __GNUC__ + +#if ${WORD} == 4 /* if 32-bit x86 */ + +#define VG__SPECIAL_INSTRUCTION_PREAMBLE \ + "roll $3, %%edi ; roll $13, %%edi\n\t" \ + "roll $29, %%edi ; roll $19, %%edi\n\t" +#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \ + _zzq_default, _zzq_request, \ + _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \ + __extension__ \ + ({volatile unsigned int _zzq_args[6]; \ + volatile unsigned int _zzq_result; \ + _zzq_args[0] = (unsigned int)(_zzq_request); \ + _zzq_args[1] = (unsigned int)(_zzq_arg1); \ + _zzq_args[2] = (unsigned int)(_zzq_arg2); \ + _zzq_args[3] = (unsigned int)(_zzq_arg3); \ + _zzq_args[4] = (unsigned int)(_zzq_arg4); \ + _zzq_args[5] = (unsigned int)(_zzq_arg5); \ + __asm__ volatile(VG__SPECIAL_INSTRUCTION_PREAMBLE \ + /* %EDX = client_request ( %EAX ) */ \ + "xchgl %%ebx,%%ebx" \ + : "=d" (_zzq_result) \ + : "a" (&_zzq_args[0]), "0" (_zzq_default) \ + : "cc", "memory" \ + ); \ + _zzq_result; \ + }) + +#else /* 64-bit x86-64 */ + +#define VG__SPECIAL_INSTRUCTION_PREAMBLE \ + "rolq $3, %%rdi ; rolq $13, %%rdi\n\t" \ + "rolq $61, %%rdi ; rolq $51, %%rdi\n\t" +#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \ + _zzq_default, _zzq_request, \ + _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \ + __extension__ \ + ({ volatile unsigned long long int _zzq_args[6]; \ + volatile unsigned long long int _zzq_result; \ + _zzq_args[0] = (unsigned long long int)(_zzq_request); \ + _zzq_args[1] = (unsigned long long int)(_zzq_arg1); \ + _zzq_args[2] = (unsigned long long int)(_zzq_arg2); \ + _zzq_args[3] = (unsigned long long int)(_zzq_arg3); \ + _zzq_args[4] = (unsigned long long int)(_zzq_arg4); \ + _zzq_args[5] = (unsigned long long int)(_zzq_arg5); \ + __asm__ volatile(VG__SPECIAL_INSTRUCTION_PREAMBLE \ + /* %RDX = client_request ( %RAX ) */ \ + "xchgq %%rbx,%%rbx" \ + : "=d" (_zzq_result) \ + : "a" (&_zzq_args[0]), "0" (_zzq_default) \ + : "cc", "memory" \ + ); \ + _zzq_result; \ + }) +#endif + +#define VALGRIND_DO_CLIENT_REQUEST_STMT(_zzq_request, _zzq_arg1, \ + _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \ + do { (void) VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \ + (_zzq_request), (_zzq_arg1), (_zzq_arg2), \ + (_zzq_arg3), (_zzq_arg4), (_zzq_arg5)); } while (0) + +#define VG_USERREQ__DISCARD_TRANSLATIONS 0x1002 +#define VALGRIND_DISCARD_TRANSLATIONS(_qzz_addr,_qzz_len) \ + VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DISCARD_TRANSLATIONS, \ + _qzz_addr, _qzz_len, 0, 0, 0) + +/**********************************************************************/ +#else /* if !__GNUC__ */ +#define VALGRIND_DISCARD_TRANSLATIONS(_qzz_addr,_qzz_len) do { } while(0) +#endif +/**********************************************************************/ +""".replace("${WORD}", str(WORD))]) + + +VALGRIND_DISCARD_TRANSLATIONS = rffi.llexternal( "VALGRIND_DISCARD_TRANSLATIONS", [llmemory.Address, lltype.Signed], lltype.Void, @@ -26,5 +109,5 @@ # ____________________________________________________________ def discard_translations(data, size): - if we_are_translated() and VALGRIND_DISCARD_TRANSLATIONS is not None: + if we_are_translated(): VALGRIND_DISCARD_TRANSLATIONS(llmemory.cast_int_to_adr(data), size) From noreply at buildbot.pypy.org Tue Jan 27 18:29:18 2015 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 27 Jan 2015 18:29:18 +0100 (CET) Subject: [pypy-commit] pypy default: Finally, found out why we have issue #900. Trying to fix it. Message-ID: <20150127172918.200241C0292@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r75549:40d147ae0b10 Date: 2015-01-27 18:28 +0100 http://bitbucket.org/pypy/pypy/changeset/40d147ae0b10/ Log: Finally, found out why we have issue #900. Trying to fix it. diff --git a/rpython/translator/c/src/asm_msvc.h b/rpython/translator/c/src/asm_msvc.h --- a/rpython/translator/c/src/asm_msvc.h +++ b/rpython/translator/c/src/asm_msvc.h @@ -2,3 +2,14 @@ #define PYPY_X86_CHECK_SSE2_DEFINED RPY_EXTERN void pypy_x86_check_sse2(void); #endif + + +/* Provides the same access to RDTSC as used by the JIT backend. This + is needed (at least if the JIT is enabled) because otherwise the + JIT-produced assembler would use RDTSC while the non-jitted code + would use QueryPerformanceCounter(), giving different incompatible + results. See issue #900. +*/ +#include +#pragma intrinsic(__rdtsc) +#define READ_TIMESTAMP(val) do { val = (long long)__rdtsc(); } while (0) From noreply at buildbot.pypy.org Tue Jan 27 19:46:13 2015 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 27 Jan 2015 19:46:13 +0100 (CET) Subject: [pypy-commit] pypy default: Turns out we can more or less write ctypes' from_buffer() method Message-ID: <20150127184613.E64EF1C009F@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r75550:48b86a2aba0e Date: 2015-01-27 19:45 +0100 http://bitbucket.org/pypy/pypy/changeset/48b86a2aba0e/ Log: Turns out we can more or less write ctypes' from_buffer() method with the existing RPython-provided logic. diff --git a/lib-python/2.7/ctypes/test/test_frombuffer.py b/lib-python/2.7/ctypes/test/test_frombuffer.py --- a/lib-python/2.7/ctypes/test/test_frombuffer.py +++ b/lib-python/2.7/ctypes/test/test_frombuffer.py @@ -11,7 +11,6 @@ self._init_called = True class Test(unittest.TestCase): - @xfail def test_fom_buffer(self): a = array.array("i", range(16)) x = (c_int * 16).from_buffer(a) @@ -34,7 +33,7 @@ del a; gc.collect(); gc.collect(); gc.collect() self.assertEqual(x[:], expected) - self.assertRaises(TypeError, + self.assertRaises((TypeError, ValueError), (c_char * 16).from_buffer, "a" * 16) @xfail diff --git a/lib_pypy/_ctypes/basics.py b/lib_pypy/_ctypes/basics.py --- a/lib_pypy/_ctypes/basics.py +++ b/lib_pypy/_ctypes/basics.py @@ -83,6 +83,13 @@ def in_dll(self, dll, name): return self.from_address(dll._handle.getaddressindll(name)) + def from_buffer(self, obj, offset=0): + # XXX missing size checks + raw_addr = buffer(obj, offset)._pypy_raw_address() + result = self.from_address(raw_addr) + result._ensure_objects()['ffffffff'] = obj + return result + class CArgObject(object): """ simple wrapper around buffer, just for the case of freeing it afterwards diff --git a/lib_pypy/_ctypes/structure.py b/lib_pypy/_ctypes/structure.py --- a/lib_pypy/_ctypes/structure.py +++ b/lib_pypy/_ctypes/structure.py @@ -1,3 +1,4 @@ +import sys import _rawffi from _ctypes.basics import _CData, _CDataMeta, keepalive_key,\ store_reference, ensure_objects, CArgObject @@ -178,6 +179,8 @@ instance = StructOrUnion.__new__(self) if isinstance(address, _rawffi.StructureInstance): address = address.buffer + # fix the address: turn it into as unsigned, in case it is negative + address = address & (sys.maxint * 2 + 1) instance.__dict__['_buffer'] = self._ffistruct.fromaddress(address) return instance From noreply at buildbot.pypy.org Tue Jan 27 19:51:27 2015 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 27 Jan 2015 19:51:27 +0100 (CET) Subject: [pypy-commit] pypy default: next test Message-ID: <20150127185127.C15AA1C009F@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r75551:fd84f7bb7315 Date: 2015-01-27 19:51 +0100 http://bitbucket.org/pypy/pypy/changeset/fd84f7bb7315/ Log: next test diff --git a/lib-python/2.7/ctypes/test/test_frombuffer.py b/lib-python/2.7/ctypes/test/test_frombuffer.py --- a/lib-python/2.7/ctypes/test/test_frombuffer.py +++ b/lib-python/2.7/ctypes/test/test_frombuffer.py @@ -36,7 +36,6 @@ self.assertRaises((TypeError, ValueError), (c_char * 16).from_buffer, "a" * 16) - @xfail def test_fom_buffer_with_offset(self): a = array.array("i", range(16)) x = (c_int * 15).from_buffer(a, sizeof(c_int)) diff --git a/lib_pypy/_ctypes/basics.py b/lib_pypy/_ctypes/basics.py --- a/lib_pypy/_ctypes/basics.py +++ b/lib_pypy/_ctypes/basics.py @@ -85,7 +85,12 @@ def from_buffer(self, obj, offset=0): # XXX missing size checks - raw_addr = buffer(obj, offset)._pypy_raw_address() + buf = buffer(obj, offset) + if len(buf) < self._sizeofinstances(): + raise ValueError( + "Buffer size too small (%d instead of at least %d bytes)" + % (len(buffer(obj)), self._sizeofinstances() + offset)) + raw_addr = buf._pypy_raw_address() result = self.from_address(raw_addr) result._ensure_objects()['ffffffff'] = obj return result From noreply at buildbot.pypy.org Tue Jan 27 20:01:35 2015 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 27 Jan 2015 20:01:35 +0100 (CET) Subject: [pypy-commit] pypy default: from_buffer_copy(). Message-ID: <20150127190135.DF72F1C009F@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r75552:d03ab3fafd19 Date: 2015-01-27 20:01 +0100 http://bitbucket.org/pypy/pypy/changeset/d03ab3fafd19/ Log: from_buffer_copy(). diff --git a/lib-python/2.7/ctypes/test/test_frombuffer.py b/lib-python/2.7/ctypes/test/test_frombuffer.py --- a/lib-python/2.7/ctypes/test/test_frombuffer.py +++ b/lib-python/2.7/ctypes/test/test_frombuffer.py @@ -2,7 +2,6 @@ import array import gc import unittest -from ctypes.test import xfail class X(Structure): _fields_ = [("c_int", c_int)] @@ -44,7 +43,6 @@ self.assertRaises(ValueError, lambda: (c_int * 16).from_buffer(a, sizeof(c_int))) self.assertRaises(ValueError, lambda: (c_int * 1).from_buffer(a, 16 * sizeof(c_int))) - @xfail def test_from_buffer_copy(self): a = array.array("i", range(16)) x = (c_int * 16).from_buffer_copy(a) @@ -69,7 +67,6 @@ x = (c_char * 16).from_buffer_copy("a" * 16) self.assertEqual(x[:], "a" * 16) - @xfail def test_fom_buffer_copy_with_offset(self): a = array.array("i", range(16)) x = (c_int * 15).from_buffer_copy(a, sizeof(c_int)) diff --git a/lib_pypy/_ctypes/basics.py b/lib_pypy/_ctypes/basics.py --- a/lib_pypy/_ctypes/basics.py +++ b/lib_pypy/_ctypes/basics.py @@ -84,17 +84,36 @@ return self.from_address(dll._handle.getaddressindll(name)) def from_buffer(self, obj, offset=0): - # XXX missing size checks - buf = buffer(obj, offset) - if len(buf) < self._sizeofinstances(): + size = self._sizeofinstances() + buf = buffer(obj, offset, size) + if len(buf) < size: raise ValueError( "Buffer size too small (%d instead of at least %d bytes)" - % (len(buffer(obj)), self._sizeofinstances() + offset)) + % (len(buf) + offset, size + offset)) raw_addr = buf._pypy_raw_address() result = self.from_address(raw_addr) result._ensure_objects()['ffffffff'] = obj return result + def from_buffer_copy(self, obj, offset=0): + size = self._sizeofinstances() + buf = buffer(obj, offset, size) + if len(buf) < size: + raise ValueError( + "Buffer size too small (%d instead of at least %d bytes)" + % (len(buf) + offset, size + offset)) + result = self() + dest = result._buffer.buffer + try: + raw_addr = buf._pypy_raw_address() + except ValueError: + _rawffi.rawstring2charp(dest, buf) + else: + from ctypes import memmove + memmove(dest, raw_addr, size) + return result + + class CArgObject(object): """ simple wrapper around buffer, just for the case of freeing it afterwards From noreply at buildbot.pypy.org Wed Jan 28 09:59:20 2015 From: noreply at buildbot.pypy.org (Raemi) Date: Wed, 28 Jan 2015 09:59:20 +0100 (CET) Subject: [pypy-commit] extradoc extradoc: add myself to leysin sprint Message-ID: <20150128085920.636EB1C0E1C@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: extradoc Changeset: r5490:507d5695c713 Date: 2015-01-28 10:00 +0100 http://bitbucket.org/pypy/extradoc/changeset/507d5695c713/ Log: add myself to leysin sprint diff --git a/sprintinfo/leysin-winter-2015/people.txt b/sprintinfo/leysin-winter-2015/people.txt --- a/sprintinfo/leysin-winter-2015/people.txt +++ b/sprintinfo/leysin-winter-2015/people.txt @@ -12,6 +12,7 @@ ==================== ============== ======================= Armin Rigo private Maciej Fijalkowski 20-28 Ermina +Remi Meier 21-28 Ermina ==================== ============== ======================= @@ -21,7 +22,6 @@ Name Arrive/Depart Accomodation ==================== ============== ===================== Romain Guillebert ? ? -Remi Meier ? ? Christian Clauss ? ? Johan Råde ? ? Antonio Cuni ? ? diff --git a/talk/icooolps2014/position-paper.pdf b/talk/icooolps2014/position-paper.pdf index cf64998cd6ea41afd4e7e312b959b6da1fcc7087..1d6db1831c3097a65c1076f397d99b2c24012b82 GIT binary patch [cut] From noreply at buildbot.pypy.org Wed Jan 28 11:57:18 2015 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 28 Jan 2015 11:57:18 +0100 (CET) Subject: [pypy-commit] stmgc default: Revert this change from 24ba707614c4. I think it is dangerous. Message-ID: <20150128105718.1E5D61C02BB@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1592:24b92a4c84be Date: 2015-01-28 11:57 +0100 http://bitbucket.org/pypy/stmgc/changeset/24b92a4c84be/ Log: Revert this change from 24ba707614c4. I think it is dangerous. diff --git a/c7/stm/gcpage.c b/c7/stm/gcpage.c --- a/c7/stm/gcpage.c +++ b/c7/stm/gcpage.c @@ -436,8 +436,14 @@ get_priv_segment(i)->modified_old_objects, object_t * /*item*/, ({ - if (!mark_visited_test_and_set(item)) - mark_trace(item, stm_object_pages); /* shared version */ + /* (arigo) I think that here we need to mark_trace() both + the shared version and the private version in all cases. + Even if the visited flag is already set, we don't know + which version was already traced... Chances are that + it was the stm_object_pages version, but we are not sure. + */ + mark_visited_test_and_set(item); + mark_trace(item, stm_object_pages); /* shared version */ mark_trace(item, base); /* private version */ })); } From noreply at buildbot.pypy.org Wed Jan 28 11:57:17 2015 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 28 Jan 2015 11:57:17 +0100 (CET) Subject: [pypy-commit] stmgc default: Enable or disable writing the profiling info after forks Message-ID: <20150128105717.06D701C02BB@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1591:3a3942c0f6e1 Date: 2015-01-28 11:50 +0100 http://bitbucket.org/pypy/stmgc/changeset/3a3942c0f6e1/ Log: Enable or disable writing the profiling info after forks diff --git a/c7/stm/prof.c b/c7/stm/prof.c --- a/c7/stm/prof.c +++ b/c7/stm/prof.c @@ -90,7 +90,7 @@ } } -int stm_set_timing_log(const char *profiling_file_name, +int stm_set_timing_log(const char *profiling_file_name, int fork_mode, int expand_marker(stm_loc_marker_t *, char *, int)) { close_timing_log(); @@ -116,6 +116,7 @@ if (!open_timing_log(profiling_file_name)) return -1; - profiling_basefn = strdup(profiling_file_name); + if (fork_mode != 0) + profiling_basefn = strdup(profiling_file_name); return 0; } diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -469,13 +469,15 @@ stm_loc_marker_t *markers); /* Calling this sets up a stmcb_timing_event callback that will produce - a binary file calling 'profiling_file_name'. After a fork(), it is - written to 'profiling_file_name.fork'. Call it with NULL to + a binary file called 'profiling_file_name'. Call it with + 'fork_mode == 0' for only the main process, and with + 'fork_mode == 1' to also write files called + 'profiling_file_name.fork' after a fork(). Call it with NULL to stop profiling. Returns -1 in case of error (see errno then). The optional 'expand_marker' function pointer is called to expand the marker's odd_number and object into data, starting at the given position and with the given maximum length. */ -int stm_set_timing_log(const char *profiling_file_name, +int stm_set_timing_log(const char *profiling_file_name, int fork_mode, int expand_marker(stm_loc_marker_t *, char *, int)); diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -154,7 +154,7 @@ stm_loc_marker_t *markers); stmcb_timing_event_fn stmcb_timing_event; -int stm_set_timing_log(const char *profiling_file_name, +int stm_set_timing_log(const char *profiling_file_name, int prof_mode, int expand_marker(stm_loc_marker_t *, char *, int)); void stm_push_marker(stm_thread_local_t *, uintptr_t, object_t *); diff --git a/c7/test/test_prof.py b/c7/test/test_prof.py --- a/c7/test/test_prof.py +++ b/c7/test/test_prof.py @@ -27,13 +27,13 @@ def test_simple(self): filename = os.path.join(str(udir), 'simple.prof') - r = lib.stm_set_timing_log(filename, ffi.NULL) + r = lib.stm_set_timing_log(filename, 0, ffi.NULL) assert r == 0 try: self.start_transaction() self.commit_transaction() finally: - lib.stm_set_timing_log(ffi.NULL, ffi.NULL) + lib.stm_set_timing_log(ffi.NULL, 0, ffi.NULL) result = read_log(filename) assert result[0][2] == lib.STM_TRANSACTION_START @@ -48,7 +48,7 @@ p[0] = chr(100 + marker.odd_number) return 1 filename = os.path.join(str(udir), 'contention.prof') - r = lib.stm_set_timing_log(filename, expand_marker) + r = lib.stm_set_timing_log(filename, 0, expand_marker) assert r == 0 try: p = stm_allocate_old(16) @@ -62,7 +62,7 @@ stm_set_char(p, 'B') # write py.test.raises(Conflict, self.commit_transaction) finally: - lib.stm_set_timing_log(ffi.NULL, ffi.NULL) + lib.stm_set_timing_log(ffi.NULL, 0, ffi.NULL) result = read_log(filename) id0 = result[0][1][0] From noreply at buildbot.pypy.org Wed Jan 28 12:09:20 2015 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 28 Jan 2015 12:09:20 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: update to stmgc/24b92a4c84be Message-ID: <20150128110920.99A701C02BB@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r75553:0789ec7eae51 Date: 2015-01-28 11:58 +0100 http://bitbucket.org/pypy/pypy/changeset/0789ec7eae51/ Log: update to stmgc/24b92a4c84be diff --git a/rpython/translator/stm/src_stm/revision b/rpython/translator/stm/src_stm/revision --- a/rpython/translator/stm/src_stm/revision +++ b/rpython/translator/stm/src_stm/revision @@ -1,1 +1,1 @@ -957947bc7ad9 +24b92a4c84be diff --git a/rpython/translator/stm/src_stm/stm/gcpage.c b/rpython/translator/stm/src_stm/stm/gcpage.c --- a/rpython/translator/stm/src_stm/stm/gcpage.c +++ b/rpython/translator/stm/src_stm/stm/gcpage.c @@ -437,6 +437,12 @@ get_priv_segment(i)->modified_old_objects, object_t * /*item*/, ({ + /* (arigo) I think that here we need to mark_trace() both + the shared version and the private version in all cases. + Even if the visited flag is already set, we don't know + which version was already traced... Chances are that + it was the stm_object_pages version, but we are not sure. + */ mark_visited_test_and_set(item); mark_trace(item, stm_object_pages); /* shared version */ mark_trace(item, base); /* private version */ diff --git a/rpython/translator/stm/src_stm/stm/hashtable.c b/rpython/translator/stm/src_stm/stm/hashtable.c --- a/rpython/translator/stm/src_stm/stm/hashtable.c +++ b/rpython/translator/stm/src_stm/stm/hashtable.c @@ -140,10 +140,10 @@ static void _stm_rehash_hashtable(stm_hashtable_t *hashtable, uintptr_t biggercount, - bool remove_unread) + int remove_unread_from_seg) { - dprintf(("rehash %p to %ld, remove_unread=%d\n", - hashtable, biggercount, (int)remove_unread)); + dprintf(("rehash %p to %ld, remove_unread_from_seg=%d\n", + hashtable, biggercount, remove_unread_from_seg)); size_t size = (offsetof(stm_hashtable_table_t, items) + biggercount * sizeof(stm_hashtable_entry_t *)); @@ -160,14 +160,18 @@ uintptr_t j, mask = table->mask; uintptr_t rc = biggertable->resize_counter; + char *segment_base = get_segment_base(remove_unread_from_seg); for (j = 0; j <= mask; j++) { stm_hashtable_entry_t *entry = table->items[j]; if (entry == NULL) continue; - if (remove_unread) { - if (entry->object == NULL && - !_stm_was_read_by_anybody((object_t *)entry)) + if (remove_unread_from_seg != 0) { + if (((struct stm_hashtable_entry_s *) + REAL_ADDRESS(segment_base, entry))->object == NULL && + !_stm_was_read_by_anybody((object_t *)entry)) { + dprintf((" removing dead %p\n", entry)); continue; + } } _insert_clean(biggertable, entry); rc -= 6; @@ -256,6 +260,7 @@ entry->userdata = stm_hashtable_entry_userdata; entry->index = index; entry->object = NULL; + hashtable->additions = STM_SEGMENT->segment_num; } else { /* for a non-nursery 'hashtableobj', we pretend that the @@ -293,11 +298,11 @@ e->index = index; e->object = NULL; } + hashtable->additions += 0x100; release_privatization_lock(); } write_fence(); /* make sure 'entry' is fully initialized here */ table->items[i] = entry; - hashtable->additions += 1; write_fence(); /* make sure 'table->items' is written here */ VOLATILE_TABLE(table)->resize_counter = rc - 6; /* unlock */ return entry; @@ -310,7 +315,7 @@ biggercount *= 4; else biggercount *= 2; - _stm_rehash_hashtable(hashtable, biggercount, /*remove_unread=*/false); + _stm_rehash_hashtable(hashtable, biggercount, /*remove_unread=*/0); goto restart; } } @@ -339,8 +344,10 @@ stm_hashtable_table_t *table = hashtable->table; assert(!IS_EVEN(table->resize_counter)); - if (hashtable->additions * 4 > table->mask) { - hashtable->additions = 0; + if ((hashtable->additions >> 8) * 4 > table->mask) { + int segment_num = (hashtable->additions & 0xFF); + if (!segment_num) segment_num = 1; + hashtable->additions = segment_num; uintptr_t initial_rc = (table->mask + 1) * 4 + 1; uintptr_t num_entries_times_6 = initial_rc - table->resize_counter; uintptr_t count = INITIAL_HASHTABLE_SIZE; @@ -350,7 +357,8 @@ can never grow larger than the current table size. */ assert(count <= table->mask + 1); - _stm_rehash_hashtable(hashtable, count, /*remove_unread=*/true); + dprintf(("compact with %ld items:\n", num_entries_times_6 / 6)); + _stm_rehash_hashtable(hashtable, count, /*remove_unread=*/segment_num); } table = hashtable->table; diff --git a/rpython/translator/stm/src_stm/stm/prof.c b/rpython/translator/stm/src_stm/stm/prof.c --- a/rpython/translator/stm/src_stm/stm/prof.c +++ b/rpython/translator/stm/src_stm/stm/prof.c @@ -91,7 +91,7 @@ } } -int stm_set_timing_log(const char *profiling_file_name, +int stm_set_timing_log(const char *profiling_file_name, int fork_mode, int expand_marker(stm_loc_marker_t *, char *, int)) { close_timing_log(); @@ -117,6 +117,7 @@ if (!open_timing_log(profiling_file_name)) return -1; - profiling_basefn = strdup(profiling_file_name); + if (fork_mode != 0) + profiling_basefn = strdup(profiling_file_name); return 0; } diff --git a/rpython/translator/stm/src_stm/stmgc.h b/rpython/translator/stm/src_stm/stmgc.h --- a/rpython/translator/stm/src_stm/stmgc.h +++ b/rpython/translator/stm/src_stm/stmgc.h @@ -159,7 +159,7 @@ The best is to use typedefs like above. The object_s part contains some fields reserved for the STM library. - Right now this is only one byte. + Right now this is only four bytes. */ struct object_s { @@ -470,13 +470,15 @@ stm_loc_marker_t *markers); /* Calling this sets up a stmcb_timing_event callback that will produce - a binary file calling 'profiling_file_name'. After a fork(), it is - written to 'profiling_file_name.fork'. Call it with NULL to + a binary file called 'profiling_file_name'. Call it with + 'fork_mode == 0' for only the main process, and with + 'fork_mode == 1' to also write files called + 'profiling_file_name.fork' after a fork(). Call it with NULL to stop profiling. Returns -1 in case of error (see errno then). The optional 'expand_marker' function pointer is called to expand the marker's odd_number and object into data, starting at the given position and with the given maximum length. */ -int stm_set_timing_log(const char *profiling_file_name, +int stm_set_timing_log(const char *profiling_file_name, int fork_mode, int expand_marker(stm_loc_marker_t *, char *, int)); From noreply at buildbot.pypy.org Wed Jan 28 12:09:21 2015 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 28 Jan 2015 12:09:21 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: Use 'PYPYSTM=filename' to produce only 'filename' from the main process, Message-ID: <20150128110921.CE8651C02BB@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r75554:73111c36b1f9 Date: 2015-01-28 12:07 +0100 http://bitbucket.org/pypy/pypy/changeset/73111c36b1f9/ Log: Use 'PYPYSTM=filename' to produce only 'filename' from the main process, and use 'PYPYSTM=filename+' to produce 'filename' in the main process and 'filename.fork' in all forked subprocesses. diff --git a/rpython/translator/stm/src_stm/extracode.h b/rpython/translator/stm/src_stm/extracode.h --- a/rpython/translator/stm/src_stm/extracode.h +++ b/rpython/translator/stm/src_stm/extracode.h @@ -171,6 +171,17 @@ g_co_lnotab_ofs = co_lnotab_ofs; char *filename = getenv("PYPYSTM"); - if (filename && filename[0]) - stm_set_timing_log(filename, &_stm_expand_marker_for_pypy); + if (filename && filename[0]) { + /* if PYPYSTM is set to a string ending in '+', we enable the + timing log also for forked subprocesses. */ + size_t n = strlen(filename); + char filename_copy[n]; + int fork_mode = (n > 1 && filename[n - 1] == '+'); + if (fork_mode) { + memcpy(filename_copy, filename, n - 1); + filename_copy[n - 1] = 0; + filename = filename_copy; + } + stm_set_timing_log(filename, fork_mode, &_stm_expand_marker_for_pypy); + } } From noreply at buildbot.pypy.org Wed Jan 28 14:24:42 2015 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 28 Jan 2015 14:24:42 +0100 (CET) Subject: [pypy-commit] stmgc default: Redo 24ba707614c4, with a way that really fails an assert if the Message-ID: <20150128132442.0AE771C0DC0@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1593:e7a6ff9e9da3 Date: 2015-01-28 14:24 +0100 http://bitbucket.org/pypy/stmgc/changeset/e7a6ff9e9da3/ Log: Redo 24ba707614c4, with a way that really fails an assert if the assumption is wrong. diff --git a/c7/stm/gcpage.c b/c7/stm/gcpage.c --- a/c7/stm/gcpage.c +++ b/c7/stm/gcpage.c @@ -425,24 +425,33 @@ { /* The modified objects are the ones that may exist in two different versions: one in the segment that modified it, and another in all - other segments. (It can also be more than two if we don't have - eager write locking.) + other segments. (It could also be more than two if we did't have + eager write locking, but for now we do.) */ long i; for (i = 1; i <= NB_SEGMENTS; i++) { + LIST_FOREACH_R( + get_priv_segment(i)->modified_old_objects, + object_t * /*item*/, + ({ + /* This function is called first, and there should not be + any duplicate in modified_old_objects. */ + if (mark_visited_test_and_set(item)) { + assert(!"duplicate in modified_old_objects!"); + } + })); + } + + /* Now that we have marked all modified_old_objects, trace them + (which will mark more objects). + */ + for (i = 1; i <= NB_SEGMENTS; i++) { char *base = get_segment_base(i); LIST_FOREACH_R( get_priv_segment(i)->modified_old_objects, object_t * /*item*/, ({ - /* (arigo) I think that here we need to mark_trace() both - the shared version and the private version in all cases. - Even if the visited flag is already set, we don't know - which version was already traced... Chances are that - it was the stm_object_pages version, but we are not sure. - */ - mark_visited_test_and_set(item); mark_trace(item, stm_object_pages); /* shared version */ mark_trace(item, base); /* private version */ })); From noreply at buildbot.pypy.org Wed Jan 28 15:17:51 2015 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 28 Jan 2015 15:17:51 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: update to stmgc/e7a6ff9e9da3 Message-ID: <20150128141751.EE2F81C023F@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r75555:770bd3fc0462 Date: 2015-01-28 14:26 +0100 http://bitbucket.org/pypy/pypy/changeset/770bd3fc0462/ Log: update to stmgc/e7a6ff9e9da3 diff --git a/rpython/translator/stm/src_stm/revision b/rpython/translator/stm/src_stm/revision --- a/rpython/translator/stm/src_stm/revision +++ b/rpython/translator/stm/src_stm/revision @@ -1,1 +1,1 @@ -24b92a4c84be +e7a6ff9e9da3 diff --git a/rpython/translator/stm/src_stm/stm/gcpage.c b/rpython/translator/stm/src_stm/stm/gcpage.c --- a/rpython/translator/stm/src_stm/stm/gcpage.c +++ b/rpython/translator/stm/src_stm/stm/gcpage.c @@ -426,24 +426,33 @@ { /* The modified objects are the ones that may exist in two different versions: one in the segment that modified it, and another in all - other segments. (It can also be more than two if we don't have - eager write locking.) + other segments. (It could also be more than two if we did't have + eager write locking, but for now we do.) */ long i; for (i = 1; i <= NB_SEGMENTS; i++) { + LIST_FOREACH_R( + get_priv_segment(i)->modified_old_objects, + object_t * /*item*/, + ({ + /* This function is called first, and there should not be + any duplicate in modified_old_objects. */ + if (mark_visited_test_and_set(item)) { + assert(!"duplicate in modified_old_objects!"); + } + })); + } + + /* Now that we have marked all modified_old_objects, trace them + (which will mark more objects). + */ + for (i = 1; i <= NB_SEGMENTS; i++) { char *base = get_segment_base(i); LIST_FOREACH_R( get_priv_segment(i)->modified_old_objects, object_t * /*item*/, ({ - /* (arigo) I think that here we need to mark_trace() both - the shared version and the private version in all cases. - Even if the visited flag is already set, we don't know - which version was already traced... Chances are that - it was the stm_object_pages version, but we are not sure. - */ - mark_visited_test_and_set(item); mark_trace(item, stm_object_pages); /* shared version */ mark_trace(item, base); /* private version */ })); From noreply at buildbot.pypy.org Wed Jan 28 15:17:53 2015 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 28 Jan 2015 15:17:53 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: Clarify the output of this tool Message-ID: <20150128141753.22D901C023F@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r75556:373d5d1dc50d Date: 2015-01-28 14:29 +0100 http://bitbucket.org/pypy/pypy/changeset/373d5d1dc50d/ Log: Clarify the output of this tool diff --git a/rpython/translator/stm/import_stmgc.py b/rpython/translator/stm/import_stmgc.py --- a/rpython/translator/stm/import_stmgc.py +++ b/rpython/translator/stm/import_stmgc.py @@ -41,11 +41,16 @@ stmgc_dest.join('revision').write('%s\n' % rev) print rev # - print 'The differences between which files are tracked are:' - os.system("bash -c 'diff <(cd '%s' && hg status -macn stm/ | sort)" - " <(cd '%s' && hg status -macn stm/ | sort)'" - % (stmgc_dest, stmgc_dir)) - print 'Unless none are listed, use "hg add" or "hg remove".' + g = os.popen("bash -c 'diff <(cd '%s' && hg status -macn stm/ | sort)" + " <(cd '%s' && hg status -macn stm/ | sort)'" + % (stmgc_dest, stmgc_dir), 'r') + diff = g.read() + g.close() + if diff: + print + print 'WARNING: The differences between which files are tracked are:' + print diff + print 'Use "hg add" or "hg remove".' if __name__ == '__main__': if len(sys.argv) != 2: From noreply at buildbot.pypy.org Wed Jan 28 15:17:54 2015 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 28 Jan 2015 15:17:54 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: Add missing finalizer to hashtable objects Message-ID: <20150128141754.55BF81C023F@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r75557:7973fb0d61dc Date: 2015-01-28 15:17 +0100 http://bitbucket.org/pypy/pypy/changeset/7973fb0d61dc/ Log: Add missing finalizer to hashtable objects diff --git a/rpython/rlib/rstm.py b/rpython/rlib/rstm.py --- a/rpython/rlib/rstm.py +++ b/rpython/rlib/rstm.py @@ -193,6 +193,7 @@ _HASHTABLE_OBJ = lltype.GcStruct('HASHTABLE_OBJ', ('ll_raw_hashtable', _STM_HASHTABLE_P), + rtti=True, adtmeths={'get': ll_hashtable_get, 'set': ll_hashtable_set}) @@ -218,10 +219,31 @@ else: p = lltype.nullptr(_STM_HASHTABLE_ENTRY) rgc.register_custom_trace_hook(_HASHTABLE_OBJ, lambda_hashtable_trace) + _register_light_finalizer_for_hashtable_obj() h = lltype.malloc(_HASHTABLE_OBJ) h.ll_raw_hashtable = llop.stm_hashtable_create(_STM_HASHTABLE_P, p) return h +def _register_light_finalizer_for_hashtable_obj(): + pass + +def _finalizer_for_hashtable_obj(p): + llop.stm_hashtable_free(lltype.Void, p.ll_raw_hashtable) + +class Entry(ExtRegistryEntry): + _about_ = _register_light_finalizer_for_hashtable_obj + + def compute_result_annotation(self): + pass + + def specialize_call(self, hop): + from rpython.rtyper.llannotation import SomePtr + args_s = [SomePtr(lltype.Ptr(_HASHTABLE_OBJ))] + funcptr = hop.rtyper.annotate_helper_fn(_finalizer_for_hashtable_obj, + args_s) + hop.exception_cannot_occur() + lltype.attachRuntimeTypeInfo(_HASHTABLE_OBJ, destrptr=funcptr) + NULL_GCREF = lltype.nullptr(llmemory.GCREF.TO) class HashtableForTest(object): From noreply at buildbot.pypy.org Wed Jan 28 15:31:22 2015 From: noreply at buildbot.pypy.org (rguillebert) Date: Wed, 28 Jan 2015 15:31:22 +0100 (CET) Subject: [pypy-commit] pypy object-dtype: Add missing space Message-ID: <20150128143122.545161C088C@cobra.cs.uni-duesseldorf.de> Author: Romain Guillebert Branch: object-dtype Changeset: r75558:eaa0525725fa Date: 2015-01-28 15:30 +0100 http://bitbucket.org/pypy/pypy/changeset/eaa0525725fa/ Log: Add missing space diff --git a/pypy/module/micronumpy/boxes.py b/pypy/module/micronumpy/boxes.py --- a/pypy/module/micronumpy/boxes.py +++ b/pypy/module/micronumpy/boxes.py @@ -326,7 +326,7 @@ if not space.is_none(w_out): raise OperationError(space.w_NotImplementedError, space.wrap( "out not supported")) - return self.get_dtype(space).itemtype.round(self, decimals) + return self.get_dtype(space).itemtype.round(space, self, decimals) def descr_astype(self, space, w_dtype): from pypy.module.micronumpy.descriptor import W_Dtype diff --git a/pypy/module/micronumpy/loop.py b/pypy/module/micronumpy/loop.py --- a/pypy/module/micronumpy/loop.py +++ b/pypy/module/micronumpy/loop.py @@ -684,7 +684,7 @@ while not arr_iter.done(arr_state): round_driver.jit_merge_point(shapelen=shapelen, dtype=dtype) w_v = arr_iter.getitem(arr_state).convert_to(space, dtype) - w_v = dtype.itemtype.round(w_v, decimals) + w_v = dtype.itemtype.round(space, w_v, decimals) out_iter.setitem(out_state, w_v) arr_state = arr_iter.next(arr_state) out_state = out_iter.next(out_state) diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py --- a/pypy/module/micronumpy/ndarray.py +++ b/pypy/module/micronumpy/ndarray.py @@ -113,7 +113,7 @@ if arr.get_size() > self.get_size(): raise OperationError(space.w_ValueError, space.wrap( "index out of range for array")) - size = loop.count_all_true(arr) + size = loop.count_all_true(space, arr) if arr.ndims() == 1: res_shape = [size] + self.get_shape()[1:] else: @@ -129,7 +129,7 @@ if idx.get_size() > self.get_size(): raise OperationError(space.w_ValueError, space.wrap( "index out of range for array")) - size = loop.count_all_true(idx) + size = loop.count_all_true(space, idx) if size > val.get_size() and val.get_size() != 1: raise oefmt(space.w_ValueError, "NumPy boolean array indexing assignment " diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -397,7 +397,7 @@ return 0 @specialize.argtype(1) - def round(self, v, decimals=0): + def round(self, space, v, decimals=0): if decimals != 0: return v return Float64().box(self.unbox(v)) @@ -525,7 +525,7 @@ return self.box(ans) @specialize.argtype(1) - def round(self, v, decimals=0): + def round(self, space, v, decimals=0): raw = self.for_computation(self.unbox(v)) if decimals < 0: # No ** in rpython @@ -752,7 +752,7 @@ return math.ceil(v) @specialize.argtype(1) - def round(self, v, decimals=0): + def round(self, space, v, decimals=0): raw = self.for_computation(self.unbox(v)) if rfloat.isinf(raw): return v From noreply at buildbot.pypy.org Wed Jan 28 15:38:45 2015 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 28 Jan 2015 15:38:45 +0100 (CET) Subject: [pypy-commit] pypy default: Add rgc.register_custom_light_finalizer(), similar to Message-ID: <20150128143845.C29541C0A33@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r75559:07bf50ea0b54 Date: 2015-01-28 15:32 +0100 http://bitbucket.org/pypy/pypy/changeset/07bf50ea0b54/ Log: Add rgc.register_custom_light_finalizer(), similar to rgc.register_custom_trace_hook(). diff --git a/rpython/rlib/rgc.py b/rpython/rlib/rgc.py --- a/rpython/rlib/rgc.py +++ b/rpython/rlib/rgc.py @@ -684,7 +684,7 @@ class RegisterGcTraceEntry(ExtRegistryEntry): _about_ = register_custom_trace_hook - def compute_result_annotation(self, *args_s): + def compute_result_annotation(self, s_tp, s_lambda_func): pass def specialize_call(self, hop): @@ -692,3 +692,26 @@ lambda_func = hop.args_s[1].const hop.exception_cannot_occur() hop.rtyper.custom_trace_funcs.append((TP, lambda_func())) + +def register_custom_light_finalizer(TP, lambda_func): + """ This function does not do anything, but called from any annotated + place, will tell that "func" is used as a lightweight finalizer for TP. + The func must be specified as "lambda: func" in this call, for internal + reasons. + """ + +class RegisterCustomLightFinalizer(ExtRegistryEntry): + _about_ = register_custom_light_finalizer + + def compute_result_annotation(self, s_tp, s_lambda_func): + pass + + def specialize_call(self, hop): + from rpython.rtyper.llannotation import SomePtr + TP = hop.args_s[0].const + lambda_func = hop.args_s[1].const + ll_func = lambda_func() + args_s = [SomePtr(lltype.Ptr(TP))] + funcptr = hop.rtyper.annotate_helper_fn(ll_func, args_s) + hop.exception_cannot_occur() + lltype.attachRuntimeTypeInfo(TP, destrptr=funcptr) diff --git a/rpython/translator/c/test/test_newgc.py b/rpython/translator/c/test/test_newgc.py --- a/rpython/translator/c/test/test_newgc.py +++ b/rpython/translator/c/test/test_newgc.py @@ -473,6 +473,31 @@ res = self.run('custom_trace', 0) assert res == 10000 + def define_custom_light_finalizer(cls): + from rpython.rtyper.annlowlevel import llhelper + # + T = lltype.Struct('T', ('count', lltype.Signed)) + t = lltype.malloc(T, zero=True, immortal=True, flavor='raw') + # + S = lltype.GcStruct('S', rtti=True) + def customlightfinlz(addr): + t.count += 1 + lambda_customlightfinlz = lambda: customlightfinlz + # + def setup(): + rgc.register_custom_light_finalizer(S, lambda_customlightfinlz) + for i in range(10000): + lltype.malloc(S) + def f(n): + setup() + llop.gc__collect(lltype.Void) + return t.count + return f + + def test_custom_light_finalizer(self): + res = self.run('custom_light_finalizer', 0) + assert res == 10000 + def define_weakref(cls): import weakref From noreply at buildbot.pypy.org Wed Jan 28 15:38:47 2015 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 28 Jan 2015 15:38:47 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: Add rgc.register_custom_light_finalizer(), similar to Message-ID: <20150128143847.08D4F1C0A33@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r75560:e8885a7a1297 Date: 2015-01-28 15:32 +0100 http://bitbucket.org/pypy/pypy/changeset/e8885a7a1297/ Log: Add rgc.register_custom_light_finalizer(), similar to rgc.register_custom_trace_hook(). (grafted from 07bf50ea0b54e4c94badcd771c924263046c2fcb) diff --git a/rpython/rlib/rgc.py b/rpython/rlib/rgc.py --- a/rpython/rlib/rgc.py +++ b/rpython/rlib/rgc.py @@ -694,7 +694,7 @@ class RegisterGcTraceEntry(ExtRegistryEntry): _about_ = register_custom_trace_hook - def compute_result_annotation(self, *args_s): + def compute_result_annotation(self, s_tp, s_lambda_func): pass def specialize_call(self, hop): @@ -702,3 +702,26 @@ lambda_func = hop.args_s[1].const hop.exception_cannot_occur() hop.rtyper.custom_trace_funcs.append((TP, lambda_func())) + +def register_custom_light_finalizer(TP, lambda_func): + """ This function does not do anything, but called from any annotated + place, will tell that "func" is used as a lightweight finalizer for TP. + The func must be specified as "lambda: func" in this call, for internal + reasons. + """ + +class RegisterCustomLightFinalizer(ExtRegistryEntry): + _about_ = register_custom_light_finalizer + + def compute_result_annotation(self, s_tp, s_lambda_func): + pass + + def specialize_call(self, hop): + from rpython.rtyper.llannotation import SomePtr + TP = hop.args_s[0].const + lambda_func = hop.args_s[1].const + ll_func = lambda_func() + args_s = [SomePtr(lltype.Ptr(TP))] + funcptr = hop.rtyper.annotate_helper_fn(ll_func, args_s) + hop.exception_cannot_occur() + lltype.attachRuntimeTypeInfo(TP, destrptr=funcptr) diff --git a/rpython/translator/c/test/test_newgc.py b/rpython/translator/c/test/test_newgc.py --- a/rpython/translator/c/test/test_newgc.py +++ b/rpython/translator/c/test/test_newgc.py @@ -473,6 +473,31 @@ res = self.run('custom_trace', 0) assert res == 10000 + def define_custom_light_finalizer(cls): + from rpython.rtyper.annlowlevel import llhelper + # + T = lltype.Struct('T', ('count', lltype.Signed)) + t = lltype.malloc(T, zero=True, immortal=True, flavor='raw') + # + S = lltype.GcStruct('S', rtti=True) + def customlightfinlz(addr): + t.count += 1 + lambda_customlightfinlz = lambda: customlightfinlz + # + def setup(): + rgc.register_custom_light_finalizer(S, lambda_customlightfinlz) + for i in range(10000): + lltype.malloc(S) + def f(n): + setup() + llop.gc__collect(lltype.Void) + return t.count + return f + + def test_custom_light_finalizer(self): + res = self.run('custom_light_finalizer', 0) + assert res == 10000 + def define_weakref(cls): import weakref From noreply at buildbot.pypy.org Wed Jan 28 15:38:48 2015 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 28 Jan 2015 15:38:48 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: Simplify the finalizer logic with rgc.register_custom_light_finalizer() Message-ID: <20150128143848.4223B1C0A33@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r75561:fb6a19fe09e1 Date: 2015-01-28 15:38 +0100 http://bitbucket.org/pypy/pypy/changeset/fb6a19fe09e1/ Log: Simplify the finalizer logic with rgc.register_custom_light_finalizer() diff --git a/rpython/rlib/rstm.py b/rpython/rlib/rstm.py --- a/rpython/rlib/rstm.py +++ b/rpython/rlib/rstm.py @@ -183,6 +183,18 @@ ('object', llmemory.GCREF)) @dont_look_inside +def ll_hashtable_create(): + # Pass a null pointer to _STM_HASHTABLE_ENTRY to stm_hashtable_create(). + # Make sure we see a malloc() of it, so that its typeid is correctly + # initialized. It can be done in a NonConstant(False) path so that + # the C compiler will actually drop it. + if _false: + p = lltype.malloc(_STM_HASHTABLE_ENTRY) + else: + p = lltype.nullptr(_STM_HASHTABLE_ENTRY) + return llop.stm_hashtable_create(_STM_HASHTABLE_P, p) + + at dont_look_inside def ll_hashtable_get(h, key): # 'key' must be a plain integer. Returns a GCREF. return llop.stm_hashtable_read(llmemory.GCREF, h, h.ll_raw_hashtable, key) @@ -204,46 +216,22 @@ llop.stm_hashtable_tracefn(lltype.Void, addr.address[0], visit_fn) lambda_hashtable_trace = lambda: ll_hashtable_trace +def ll_hashtable_finalizer(p): + llop.stm_hashtable_free(lltype.Void, p.ll_raw_hashtable) +lambda_hashtable_finlz = lambda: ll_hashtable_finalizer + _false = CDefinedIntSymbolic('0', default=0) # remains in the C code @dont_look_inside def create_hashtable(): if not we_are_translated(): return HashtableForTest() # for tests - # Pass a null pointer to _STM_HASHTABLE_ENTRY to stm_hashtable_create(). - # Make sure we see a malloc() of it, so that its typeid is correctly - # initialized. It can be done in a NonConstant(False) path so that - # the C compiler will actually drop it. - if _false: - p = lltype.malloc(_STM_HASHTABLE_ENTRY) - else: - p = lltype.nullptr(_STM_HASHTABLE_ENTRY) + rgc.register_custom_light_finalizer(_HASHTABLE_OBJ, lambda_hashtable_finlz) rgc.register_custom_trace_hook(_HASHTABLE_OBJ, lambda_hashtable_trace) - _register_light_finalizer_for_hashtable_obj() h = lltype.malloc(_HASHTABLE_OBJ) - h.ll_raw_hashtable = llop.stm_hashtable_create(_STM_HASHTABLE_P, p) + h.ll_raw_hashtable = ll_hashtable_create() return h -def _register_light_finalizer_for_hashtable_obj(): - pass - -def _finalizer_for_hashtable_obj(p): - llop.stm_hashtable_free(lltype.Void, p.ll_raw_hashtable) - -class Entry(ExtRegistryEntry): - _about_ = _register_light_finalizer_for_hashtable_obj - - def compute_result_annotation(self): - pass - - def specialize_call(self, hop): - from rpython.rtyper.llannotation import SomePtr - args_s = [SomePtr(lltype.Ptr(_HASHTABLE_OBJ))] - funcptr = hop.rtyper.annotate_helper_fn(_finalizer_for_hashtable_obj, - args_s) - hop.exception_cannot_occur() - lltype.attachRuntimeTypeInfo(_HASHTABLE_OBJ, destrptr=funcptr) - NULL_GCREF = lltype.nullptr(llmemory.GCREF.TO) class HashtableForTest(object): From noreply at buildbot.pypy.org Wed Jan 28 16:13:27 2015 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 28 Jan 2015 16:13:27 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: Trying to remove the conflicts on 'alive_loops'. Message-ID: <20150128151327.C73241C00BF@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r75562:b42c25c45a1c Date: 2015-01-28 16:13 +0100 http://bitbucket.org/pypy/pypy/changeset/b42c25c45a1c/ Log: Trying to remove the conflicts on 'alive_loops'. diff --git a/rpython/jit/metainterp/memmgr.py b/rpython/jit/metainterp/memmgr.py --- a/rpython/jit/metainterp/memmgr.py +++ b/rpython/jit/metainterp/memmgr.py @@ -1,7 +1,10 @@ import math from rpython.rlib.rarithmetic import r_int64 from rpython.rlib.debug import debug_start, debug_print, debug_stop -from rpython.rlib.objectmodel import we_are_translated +from rpython.rlib.objectmodel import we_are_translated, stm_ignored +from rpython.rlib.rgc import stm_is_enabled +from rpython.rtyper import annlowlevel +from rpython.rlib import rstm # # Logic to decide which loops are old and not used any more. @@ -37,7 +40,13 @@ # per second self.current_generation = r_int64(1) self.next_check = r_int64(-1) - self.alive_loops = {} + if not stm_is_enabled(): + self.alive_loops = {} + else: + # hash table mapping integers to looptokens + self.stm_alive_loops = rstm.ll_hashtable_create() + # lowest integer key used in stm_alive_loops + self.stm_lowest_key = 0 def set_max_age(self, max_age, check_frequency=0): if max_age <= 0: @@ -57,21 +66,61 @@ def keep_loop_alive(self, looptoken): if looptoken.generation != self.current_generation: - looptoken.generation = self.current_generation - self.alive_loops[looptoken] = None + # STM: never produce conflicts from this function. + with stm_ignored: + looptoken.generation = self.current_generation + if not stm_is_enabled(): + self.alive_loops[looptoken] = None + else: + next_key = rstm.stm_count() + gcref = annlowlevel.cast_instance_to_gcref(looptoken) + rstm.ll_hashtable_set(self.stm_alive_loops, next_key, gcref) def _kill_old_loops_now(self): debug_start("jit-mem-collect") - oldtotal = len(self.alive_loops) #print self.alive_loops.keys() debug_print("Current generation:", self.current_generation) + max_generation = self.current_generation - (self.max_age-1) + # + if not stm_is_enabled(): + oldtotal = len(self.alive_loops) + for looptoken in self.alive_loops.keys(): + if not self._must_keep_loop(looptoken, max_generation): + del self.alive_loops[looptoken] + newtotal = len(self.alive_loops) + else: + # this logic assumes that we are more or less the only running + # thread. Even if there are possible corner cases, they should + # not have worse results than a possibly early or late freeing + # of one loop, and only in corner cases. + from rpython.jit.metainterp.history import JitCellToken + stm_alive_loops = self.stm_alive_loops + keep_loops = set() + # + # all keys in 'stm_alive_loops' should be in the following range + old_count = self.stm_lowest_key + new_count = rstm.stm_count() + for key in range(old_count, new_count): + gcref = rstm.ll_hashtable_get(stm_alive_loops, key) + if not gcref: + continue + # make 'stm_alive_loops' empty, and add the loops that we + # must keep in the set 'keep_loops' + rstm.ll_hashtable_set(stm_alive_loops, key, rstm.NULL_GCREF) + looptoken = annlowlevel.cast_gcref_to_instance(JitCellToken, + gcref) + if self._must_keep_loop(looptoken): + keep_loops.add(looptoken) + newtotal = len(keep_loops) + # + # now re-add loops with key numbers that *end* at 'new_count' + for looptoken in keep_loops: + gcref = annlowlevel.cast_instance_to_gcref(looptoken) + rstm.ll_hashtable_set(stm_alive_loops, new_count, gcref) + new_count -= 1 + self.stm_lowest_key = new_count + 1 # lowest used key number + # debug_print("Loop tokens before:", oldtotal) - max_generation = self.current_generation - (self.max_age-1) - for looptoken in self.alive_loops.keys(): - if (0 <= looptoken.generation < max_generation or - looptoken.invalidated): - del self.alive_loops[looptoken] - newtotal = len(self.alive_loops) debug_print("Loop tokens freed: ", oldtotal - newtotal) debug_print("Loop tokens left: ", newtotal) #print self.alive_loops.keys() @@ -81,3 +130,7 @@ # a single one is not enough for all tests :-( rgc.collect(); rgc.collect(); rgc.collect() debug_stop("jit-mem-collect") + + def _must_keep_loop(self, looptoken, max_generation): + return not (0 <= looptoken.generation < max_generation or + looptoken.invalidated) From noreply at buildbot.pypy.org Wed Jan 28 16:24:30 2015 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 28 Jan 2015 16:24:30 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: fixes Message-ID: <20150128152430.9E90E1C00BF@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r75563:9904ef8572af Date: 2015-01-28 16:23 +0100 http://bitbucket.org/pypy/pypy/changeset/9904ef8572af/ Log: fixes diff --git a/rpython/jit/metainterp/memmgr.py b/rpython/jit/metainterp/memmgr.py --- a/rpython/jit/metainterp/memmgr.py +++ b/rpython/jit/metainterp/memmgr.py @@ -44,7 +44,7 @@ self.alive_loops = {} else: # hash table mapping integers to looptokens - self.stm_alive_loops = rstm.ll_hashtable_create() + self.stm_alive_loops = rstm.NULL_HASHTABLE # lowest integer key used in stm_alive_loops self.stm_lowest_key = 0 @@ -66,7 +66,8 @@ def keep_loop_alive(self, looptoken): if looptoken.generation != self.current_generation: - # STM: never produce conflicts from this function. + # STM: never produce conflicts from this function + # (except possibly the first time it is called) with stm_ignored: looptoken.generation = self.current_generation if not stm_is_enabled(): @@ -74,6 +75,8 @@ else: next_key = rstm.stm_count() gcref = annlowlevel.cast_instance_to_gcref(looptoken) + if not self.stm_alive_loops: + self.stm_alive_loops = rstm.ll_hashtable_create() rstm.ll_hashtable_set(self.stm_alive_loops, next_key, gcref) def _kill_old_loops_now(self): @@ -100,17 +103,18 @@ # all keys in 'stm_alive_loops' should be in the following range old_count = self.stm_lowest_key new_count = rstm.stm_count() - for key in range(old_count, new_count): - gcref = rstm.ll_hashtable_get(stm_alive_loops, key) - if not gcref: - continue - # make 'stm_alive_loops' empty, and add the loops that we - # must keep in the set 'keep_loops' - rstm.ll_hashtable_set(stm_alive_loops, key, rstm.NULL_GCREF) - looptoken = annlowlevel.cast_gcref_to_instance(JitCellToken, - gcref) - if self._must_keep_loop(looptoken): - keep_loops.add(looptoken) + if stm_alive_loops: + for key in range(old_count, new_count): + gcref = rstm.ll_hashtable_get(stm_alive_loops, key) + if not gcref: + continue + # make 'stm_alive_loops' empty, and add the loops that we + # must keep in the set 'keep_loops' + rstm.ll_hashtable_set(stm_alive_loops, key, rstm.NULL_GCREF) + looptoken = annlowlevel.cast_gcref_to_instance(JitCellToken, + gcref) + if self._must_keep_loop(looptoken): + keep_loops.add(looptoken) newtotal = len(keep_loops) # # now re-add loops with key numbers that *end* at 'new_count' diff --git a/rpython/rlib/rstm.py b/rpython/rlib/rstm.py --- a/rpython/rlib/rstm.py +++ b/rpython/rlib/rstm.py @@ -177,6 +177,7 @@ # ____________________________________________________________ _STM_HASHTABLE_P = rffi.COpaquePtr('stm_hashtable_t') +NULL_HASHTABLE = lltype.nullptr(_STM_HASHTABLE_P.TO) _STM_HASHTABLE_ENTRY = lltype.GcStruct('HASHTABLE_ENTRY', ('index', lltype.Unsigned), @@ -203,6 +204,11 @@ def ll_hashtable_set(h, key, value): llop.stm_hashtable_write(lltype.Void, h, h.ll_raw_hashtable, key, value) + at dont_look_inside +def ll_hashtable_free(h): + llop.stm_hashtable_free(lltype.Void, h) + +# ----- _HASHTABLE_OBJ = lltype.GcStruct('HASHTABLE_OBJ', ('ll_raw_hashtable', _STM_HASHTABLE_P), rtti=True, @@ -217,7 +223,7 @@ lambda_hashtable_trace = lambda: ll_hashtable_trace def ll_hashtable_finalizer(p): - llop.stm_hashtable_free(lltype.Void, p.ll_raw_hashtable) + ll_hashtable_free(p.ll_raw_hashtable) lambda_hashtable_finlz = lambda: ll_hashtable_finalizer _false = CDefinedIntSymbolic('0', default=0) # remains in the C code From noreply at buildbot.pypy.org Wed Jan 28 16:54:28 2015 From: noreply at buildbot.pypy.org (Raemi) Date: Wed, 28 Jan 2015 16:54:28 +0100 (CET) Subject: [pypy-commit] stmgc default: add explanation Message-ID: <20150128155428.2AB4F1C00BF@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: Changeset: r1594:ecc40e41135b Date: 2015-01-28 16:55 +0100 http://bitbucket.org/pypy/stmgc/changeset/ecc40e41135b/ Log: add explanation diff --git a/c8/stm/gcpage.c b/c8/stm/gcpage.c --- a/c8/stm/gcpage.c +++ b/c8/stm/gcpage.c @@ -345,7 +345,11 @@ assert(!is_new_object(item)); /* should never be in that list */ if (!mark_visited_test_and_set(item)) { - /* trace shared, committed version */ + /* trace shared, committed version: only do this if we didn't + trace it already. This is safe because we don't trace any + objs before mark_visit_from_modified_objects AND if we + do mark_and_trace on an obj that is modified in >1 segment, + the tracing always happens in seg0 (see mark_and_trace). */ mark_and_trace(item, stm_object_pages); } mark_and_trace(item, base); /* private, modified version */ From noreply at buildbot.pypy.org Wed Jan 28 16:55:30 2015 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 28 Jan 2015 16:55:30 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: Fixes, and partial revert: must use _HASHTABLE_OBJ everywhere in RPython, Message-ID: <20150128155530.C4B7A1C00BF@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r75564:963abcf27a42 Date: 2015-01-28 16:54 +0100 http://bitbucket.org/pypy/pypy/changeset/963abcf27a42/ Log: Fixes, and partial revert: must use _HASHTABLE_OBJ everywhere in RPython, not the low-level _STM_HASHTABLE_P, because the latter is not traced! diff --git a/rpython/jit/metainterp/memmgr.py b/rpython/jit/metainterp/memmgr.py --- a/rpython/jit/metainterp/memmgr.py +++ b/rpython/jit/metainterp/memmgr.py @@ -76,8 +76,8 @@ next_key = rstm.stm_count() gcref = annlowlevel.cast_instance_to_gcref(looptoken) if not self.stm_alive_loops: - self.stm_alive_loops = rstm.ll_hashtable_create() - rstm.ll_hashtable_set(self.stm_alive_loops, next_key, gcref) + self.stm_alive_loops = rstm.create_hashtable() + self.stm_alive_loops.set(next_key, gcref) def _kill_old_loops_now(self): debug_start("jit-mem-collect") @@ -103,14 +103,16 @@ # all keys in 'stm_alive_loops' should be in the following range old_count = self.stm_lowest_key new_count = rstm.stm_count() + oldtotal = 0 if stm_alive_loops: for key in range(old_count, new_count): - gcref = rstm.ll_hashtable_get(stm_alive_loops, key) + gcref = stm_alive_loops.get(key) if not gcref: continue # make 'stm_alive_loops' empty, and add the loops that we # must keep in the set 'keep_loops' - rstm.ll_hashtable_set(stm_alive_loops, key, rstm.NULL_GCREF) + stm_alive_loops.set(key, rstm.NULL_GCREF) + oldtotal += 1 looptoken = annlowlevel.cast_gcref_to_instance(JitCellToken, gcref) if self._must_keep_loop(looptoken): @@ -120,7 +122,7 @@ # now re-add loops with key numbers that *end* at 'new_count' for looptoken in keep_loops: gcref = annlowlevel.cast_instance_to_gcref(looptoken) - rstm.ll_hashtable_set(stm_alive_loops, new_count, gcref) + stm_alive_loops.set(new_count, gcref) new_count -= 1 self.stm_lowest_key = new_count + 1 # lowest used key number # diff --git a/rpython/rlib/rstm.py b/rpython/rlib/rstm.py --- a/rpython/rlib/rstm.py +++ b/rpython/rlib/rstm.py @@ -1,5 +1,5 @@ from rpython.rlib.objectmodel import we_are_translated, specialize -from rpython.rlib.objectmodel import CDefinedIntSymbolic +from rpython.rlib.objectmodel import CDefinedIntSymbolic, stm_ignored from rpython.rlib.nonconst import NonConstant from rpython.rlib import rgc from rpython.rtyper.lltypesystem import lltype, rffi, rstr, llmemory @@ -177,54 +177,38 @@ # ____________________________________________________________ _STM_HASHTABLE_P = rffi.COpaquePtr('stm_hashtable_t') -NULL_HASHTABLE = lltype.nullptr(_STM_HASHTABLE_P.TO) _STM_HASHTABLE_ENTRY = lltype.GcStruct('HASHTABLE_ENTRY', ('index', lltype.Unsigned), ('object', llmemory.GCREF)) @dont_look_inside -def ll_hashtable_create(): - # Pass a null pointer to _STM_HASHTABLE_ENTRY to stm_hashtable_create(). - # Make sure we see a malloc() of it, so that its typeid is correctly - # initialized. It can be done in a NonConstant(False) path so that - # the C compiler will actually drop it. - if _false: - p = lltype.malloc(_STM_HASHTABLE_ENTRY) - else: - p = lltype.nullptr(_STM_HASHTABLE_ENTRY) - return llop.stm_hashtable_create(_STM_HASHTABLE_P, p) - - at dont_look_inside -def ll_hashtable_get(h, key): +def _ll_hashtable_get(h, key): # 'key' must be a plain integer. Returns a GCREF. return llop.stm_hashtable_read(llmemory.GCREF, h, h.ll_raw_hashtable, key) @dont_look_inside -def ll_hashtable_set(h, key, value): +def _ll_hashtable_set(h, key, value): llop.stm_hashtable_write(lltype.Void, h, h.ll_raw_hashtable, key, value) - at dont_look_inside -def ll_hashtable_free(h): - llop.stm_hashtable_free(lltype.Void, h) - -# ----- _HASHTABLE_OBJ = lltype.GcStruct('HASHTABLE_OBJ', ('ll_raw_hashtable', _STM_HASHTABLE_P), rtti=True, - adtmeths={'get': ll_hashtable_get, - 'set': ll_hashtable_set}) + adtmeths={'get': _ll_hashtable_get, + 'set': _ll_hashtable_set}) +NULL_HASHTABLE = lltype.nullptr(_HASHTABLE_OBJ) -def ll_hashtable_trace(gc, obj, callback, arg): +def _ll_hashtable_trace(gc, obj, callback, arg): from rpython.memory.gctransform.stmframework import get_visit_function visit_fn = get_visit_function(callback, arg) addr = obj + llmemory.offsetof(_HASHTABLE_OBJ, 'll_raw_hashtable') llop.stm_hashtable_tracefn(lltype.Void, addr.address[0], visit_fn) -lambda_hashtable_trace = lambda: ll_hashtable_trace +lambda_hashtable_trace = lambda: _ll_hashtable_trace -def ll_hashtable_finalizer(p): - ll_hashtable_free(p.ll_raw_hashtable) -lambda_hashtable_finlz = lambda: ll_hashtable_finalizer +def _ll_hashtable_finalizer(h): + if h.ll_raw_hashtable: + llop.stm_hashtable_free(lltype.Void, h.ll_raw_hashtable) +lambda_hashtable_finlz = lambda: _ll_hashtable_finalizer _false = CDefinedIntSymbolic('0', default=0) # remains in the C code @@ -234,8 +218,17 @@ return HashtableForTest() # for tests rgc.register_custom_light_finalizer(_HASHTABLE_OBJ, lambda_hashtable_finlz) rgc.register_custom_trace_hook(_HASHTABLE_OBJ, lambda_hashtable_trace) + # Pass a null pointer to _STM_HASHTABLE_ENTRY to stm_hashtable_create(). + # Make sure we see a malloc() of it, so that its typeid is correctly + # initialized. It can be done in a NonConstant(False) path so that + # the C compiler will actually drop it. + if _false: + p = lltype.malloc(_STM_HASHTABLE_ENTRY) + else: + p = lltype.nullptr(_STM_HASHTABLE_ENTRY) h = lltype.malloc(_HASHTABLE_OBJ) - h.ll_raw_hashtable = ll_hashtable_create() + h.ll_raw_hashtable = lltype.nullptr(_STM_HASHTABLE_P.TO) + h.ll_raw_hashtable = llop.stm_hashtable_create(_STM_HASHTABLE_P, p) return h NULL_GCREF = lltype.nullptr(llmemory.GCREF.TO) From noreply at buildbot.pypy.org Wed Jan 28 16:57:40 2015 From: noreply at buildbot.pypy.org (rguillebert) Date: Wed, 28 Jan 2015 16:57:40 +0100 (CET) Subject: [pypy-commit] pypy object-dtype: Now that object arrays work, raise the proper exception Message-ID: <20150128155740.0995B1C00BF@cobra.cs.uni-duesseldorf.de> Author: Romain Guillebert Branch: object-dtype Changeset: r75565:7f1fc67f7b45 Date: 2015-01-28 16:55 +0100 http://bitbucket.org/pypy/pypy/changeset/7f1fc67f7b45/ Log: Now that object arrays work, raise the proper exception diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py --- a/pypy/module/micronumpy/ndarray.py +++ b/pypy/module/micronumpy/ndarray.py @@ -157,6 +157,8 @@ if (isinstance(w_item, W_NDimArray) or space.isinstance_w(w_item, space.w_list)): w_item = convert_to_array(space, w_item) + if w_item.implementation.dtype.num == NPY.OBJECT: + raise OperationError(space.w_TypeError, space.wrap("long() argument must be a string or a number, not 'object'")) # Mimic numpy's error message if shape is None: shape = w_item.get_shape() else: diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py --- a/pypy/module/micronumpy/test/test_ndarray.py +++ b/pypy/module/micronumpy/test/test_ndarray.py @@ -3135,11 +3135,7 @@ assert b[35] == 200 b[[slice(25, 30)]] = range(5) assert all(a[:5] == range(5)) - import sys - if '__pypy__' not in sys.builtin_module_names: - raises(TypeError, 'b[[[slice(25, 125)]]]') - else: - raises(NotImplementedError, 'b[[[slice(25, 125)]]]') + raises(TypeError, 'b[[[slice(25, 125)]]]') def test_cumsum(self): from numpy import arange From noreply at buildbot.pypy.org Wed Jan 28 17:25:48 2015 From: noreply at buildbot.pypy.org (rguillebert) Date: Wed, 28 Jan 2015 17:25:48 +0100 (CET) Subject: [pypy-commit] pypy object-dtype: Don't inherit Primitive Message-ID: <20150128162548.261841C0285@cobra.cs.uni-duesseldorf.de> Author: Romain Guillebert Branch: object-dtype Changeset: r75566:155d70548c64 Date: 2015-01-28 17:25 +0100 http://bitbucket.org/pypy/pypy/changeset/155d70548c64/ Log: Don't inherit Primitive diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -1625,7 +1625,7 @@ _all_objs_for_tests = [] # for tests -class ObjectType(Primitive, BaseType): +class ObjectType(BaseType): T = lltype.Signed BoxType = boxes.W_ObjectBox @@ -1660,6 +1660,11 @@ w_obj = _all_objs_for_tests[res] return w_obj + def fill(self, storage, width, box, start, stop, offset): + value = self.unbox(box) + for i in xrange(start, stop, width): + self._write(storage, i, offset, value) + def unbox(self, box): assert isinstance(box, self.BoxType) return box.w_obj From noreply at buildbot.pypy.org Wed Jan 28 17:29:02 2015 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 28 Jan 2015 17:29:02 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: fix Message-ID: <20150128162902.6A5C61C0285@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r75567:4ca0f3bc9268 Date: 2015-01-28 17:25 +0100 http://bitbucket.org/pypy/pypy/changeset/4ca0f3bc9268/ Log: fix diff --git a/rpython/jit/metainterp/memmgr.py b/rpython/jit/metainterp/memmgr.py --- a/rpython/jit/metainterp/memmgr.py +++ b/rpython/jit/metainterp/memmgr.py @@ -40,13 +40,17 @@ # per second self.current_generation = r_int64(1) self.next_check = r_int64(-1) - if not stm_is_enabled(): - self.alive_loops = {} - else: - # hash table mapping integers to looptokens - self.stm_alive_loops = rstm.NULL_HASHTABLE - # lowest integer key used in stm_alive_loops - self.stm_lowest_key = 0 + + # We can't use stm_is_enabled() here, because we have only one + # instance of MemoryManager built before translation. + # For the non-stm case, we'll use this: + self.alive_loops = {} + + # For the stm case, we'll use this: + # * hash table mapping integers to looptokens + self.stm_alive_loops = rstm.NULL_HASHTABLE + # * lowest integer key used in stm_alive_loops + self.stm_lowest_key = 0 def set_max_age(self, max_age, check_frequency=0): if max_age <= 0: From noreply at buildbot.pypy.org Wed Jan 28 17:30:03 2015 From: noreply at buildbot.pypy.org (msabramo) Date: Wed, 28 Jan 2015 17:30:03 +0100 (CET) Subject: [pypy-commit] cffi osx_use_homebrew: OS X with Homebrew: set PKG_CONFIG_PATH Message-ID: <20150128163003.058341C0285@cobra.cs.uni-duesseldorf.de> Author: Marc Abramowitz Branch: osx_use_homebrew Changeset: r1642:86f07caeaaf0 Date: 2015-01-27 17:12 -0800 http://bitbucket.org/cffi/cffi/changeset/86f07caeaaf0/ Log: OS X with Homebrew: set PKG_CONFIG_PATH so that it automatically finds a Homebrew-installed libffi Without this, I get: [marca at marca-mac2 cffi]$ python setup.py build Package libffi was not found in the pkg-config search path. Perhaps you should add the directory containing `libffi.pc' to the PKG_CONFIG_PATH environment variable No package 'libffi' found ... /usr/bin/clang -fno-strict-aliasing -fno-common -dynamic -g -DNDEBUG -g -fwrapv -O3 -Wall -Wstrict-prototypes -arch x86_64 -I/usr/include/ffi -I/usr/include/libffi -I/Library/Frameworks/Pytho n.framework/Versions/2.7/include/python2.7 -c c/_cffi_backend.c -o build/temp.macosx-10.6-intel-2.7/c/_cffi_backend.o c/_cffi_backend.c:13:10: fatal error: 'ffi.h' file not found #include ^ 1 error generated. error: command '/usr/bin/clang' failed with exit status 1 With this, I get: [marca at marca-mac2 cffi]$ python setup.py build_ext -i ... /usr/bin/clang -fno-strict-aliasing -fno-common -dynamic -g -DNDEBUG -g -fwrapv -O3 -Wall -Wstrict-prototypes -arch x86_64 -I/usr/local/Cellar/libffi/3.0.13/lib/libffi-3.0.13/include -I/Libra ry/Frameworks/Python.framework/Versions/2.7/include/python2.7 -c c/_cffi_backend.c -o build/temp.macosx-10.6-intel-2.7/c/_cffi_backend.o creating build/lib.macosx-10.6-intel-2.7 /usr/bin/clang -bundle -undefined dynamic_lookup -g -arch x86_64 build/temp.macosx-10.6-intel-2.7/c/_cffi_backend.o -L/usr/local/Cellar/libffi/3.0.13/lib -lffi -o build/lib.macosx-10.6-intel-2.7/_cffi_backend.so copying build/lib.macosx-10.6-intel-2.7/_cffi_backend.so -> [marca @marca-mac2 cffi]$ ls -l _cffi_backend.so -rwxr-xr-x+ 1 marca staff 99844 Jan 27 17:11 _cffi_backend.so* [marca at marca-mac2 cffi]$ python -c "import _cffi_backend; print(_cffi_backend)" diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -66,12 +66,24 @@ sys.stderr.write("The above error message can be safely ignored\n") def use_pkg_config(): + if sys.platform == 'darwin' and os.path.exists('/usr/local/bin/brew'): + use_homebrew_for_libffi() + _ask_pkg_config(include_dirs, '--cflags-only-I', '-I', sysroot=True) _ask_pkg_config(extra_compile_args, '--cflags-only-other') _ask_pkg_config(library_dirs, '--libs-only-L', '-L', sysroot=True) _ask_pkg_config(extra_link_args, '--libs-only-other') _ask_pkg_config(libraries, '--libs-only-l', '-l') +def use_homebrew_for_libffi(): + # We can build by setting: + # PKG_CONFIG_PATH = $(brew --prefix libffi)/lib/pkgconfig + with os.popen('brew --prefix libffi') as brew_prefix_cmd: + prefix = brew_prefix_cmd.read().strip() + pkgconfig = os.path.join(prefix, 'lib', 'pkgconfig') + os.environ['PKG_CONFIG_PATH'] = ( + os.environ.get('PKG_CONFIG_PATH', '') + ':' + pkgconfig) + if sys.platform == 'win32': COMPILE_LIBFFI = 'c/libffi_msvc' # from the CPython distribution From noreply at buildbot.pypy.org Wed Jan 28 17:30:04 2015 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 28 Jan 2015 17:30:04 +0100 (CET) Subject: [pypy-commit] cffi default: Merged in msabramo/cffi/osx_use_homebrew (pull request #55) Message-ID: <20150128163004.0AC131C0285@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1643:0de376abfc29 Date: 2015-01-28 17:30 +0100 http://bitbucket.org/cffi/cffi/changeset/0de376abfc29/ Log: Merged in msabramo/cffi/osx_use_homebrew (pull request #55) OS X with Homebrew: set PKG_CONFIG_PATH diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -66,12 +66,24 @@ sys.stderr.write("The above error message can be safely ignored\n") def use_pkg_config(): + if sys.platform == 'darwin' and os.path.exists('/usr/local/bin/brew'): + use_homebrew_for_libffi() + _ask_pkg_config(include_dirs, '--cflags-only-I', '-I', sysroot=True) _ask_pkg_config(extra_compile_args, '--cflags-only-other') _ask_pkg_config(library_dirs, '--libs-only-L', '-L', sysroot=True) _ask_pkg_config(extra_link_args, '--libs-only-other') _ask_pkg_config(libraries, '--libs-only-l', '-l') +def use_homebrew_for_libffi(): + # We can build by setting: + # PKG_CONFIG_PATH = $(brew --prefix libffi)/lib/pkgconfig + with os.popen('brew --prefix libffi') as brew_prefix_cmd: + prefix = brew_prefix_cmd.read().strip() + pkgconfig = os.path.join(prefix, 'lib', 'pkgconfig') + os.environ['PKG_CONFIG_PATH'] = ( + os.environ.get('PKG_CONFIG_PATH', '') + ':' + pkgconfig) + if sys.platform == 'win32': COMPILE_LIBFFI = 'c/libffi_msvc' # from the CPython distribution From noreply at buildbot.pypy.org Wed Jan 28 17:35:49 2015 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 28 Jan 2015 17:35:49 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: Meh, set() is not RPython Message-ID: <20150128163549.49D2D1C0285@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r75568:08f82ebbfe1c Date: 2015-01-28 17:34 +0100 http://bitbucket.org/pypy/pypy/changeset/08f82ebbfe1c/ Log: Meh, set() is not RPython diff --git a/rpython/jit/metainterp/memmgr.py b/rpython/jit/metainterp/memmgr.py --- a/rpython/jit/metainterp/memmgr.py +++ b/rpython/jit/metainterp/memmgr.py @@ -102,7 +102,7 @@ # of one loop, and only in corner cases. from rpython.jit.metainterp.history import JitCellToken stm_alive_loops = self.stm_alive_loops - keep_loops = set() + keep_loops = {} # # all keys in 'stm_alive_loops' should be in the following range old_count = self.stm_lowest_key @@ -120,7 +120,7 @@ looptoken = annlowlevel.cast_gcref_to_instance(JitCellToken, gcref) if self._must_keep_loop(looptoken): - keep_loops.add(looptoken) + keep_loops[looptoken] = None newtotal = len(keep_loops) # # now re-add loops with key numbers that *end* at 'new_count' From noreply at buildbot.pypy.org Wed Jan 28 17:58:37 2015 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 28 Jan 2015 17:58:37 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: next fix Message-ID: <20150128165837.717501C00BF@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r75569:b5eea6414a88 Date: 2015-01-28 17:44 +0100 http://bitbucket.org/pypy/pypy/changeset/b5eea6414a88/ Log: next fix diff --git a/rpython/jit/metainterp/memmgr.py b/rpython/jit/metainterp/memmgr.py --- a/rpython/jit/metainterp/memmgr.py +++ b/rpython/jit/metainterp/memmgr.py @@ -119,7 +119,7 @@ oldtotal += 1 looptoken = annlowlevel.cast_gcref_to_instance(JitCellToken, gcref) - if self._must_keep_loop(looptoken): + if self._must_keep_loop(looptoken, max_generation): keep_loops[looptoken] = None newtotal = len(keep_loops) # From noreply at buildbot.pypy.org Wed Jan 28 17:58:38 2015 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 28 Jan 2015 17:58:38 +0100 (CET) Subject: [pypy-commit] pypy default: fix incomplete comment Message-ID: <20150128165838.A271F1C00BF@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r75570:b3f3a6ece458 Date: 2015-01-28 17:58 +0100 http://bitbucket.org/pypy/pypy/changeset/b3f3a6ece458/ Log: fix incomplete comment diff --git a/rpython/translator/tool/cbuild.py b/rpython/translator/tool/cbuild.py --- a/rpython/translator/tool/cbuild.py +++ b/rpython/translator/tool/cbuild.py @@ -59,7 +59,11 @@ separately and linked later on. (If an .h file is needed for other .c files to access this, it can be put in includes.) - (export_symbols: killed, replaced by @rlib.entrypoint.export_symbol) + (export_symbols: killed; you need, depending on the case, to + add the RPY_EXTERN or RPY_EXPORTED macro just before the + declaration of each function in the C header file, as explained + in translator/c/src/precommondefs.h; or you need the decorator + @rlib.entrypoint.export_symbol) compile_extra: list of parameters which will be directly passed to the compiler From noreply at buildbot.pypy.org Wed Jan 28 18:49:37 2015 From: noreply at buildbot.pypy.org (cfbolz) Date: Wed, 28 Jan 2015 18:49:37 +0100 (CET) Subject: [pypy-commit] pypy default: used the with block version instead of a manual try...finally Message-ID: <20150128174937.E68E31C0285@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: Changeset: r75571:5ce6940b7455 Date: 2015-01-27 17:24 +0100 http://bitbucket.org/pypy/pypy/changeset/5ce6940b7455/ Log: used the with block version instead of a manual try...finally diff --git a/rpython/rlib/rfile.py b/rpython/rlib/rfile.py --- a/rpython/rlib/rfile.py +++ b/rpython/rlib/rfile.py @@ -477,8 +477,7 @@ @enforceargs(None, str) def write(self, value): self._check_closed() - ll_value, is_pinned, is_raw = rffi.get_nonmovingbuffer(value) - try: + with rffi.scoped_nonmovingbuffer(value) as ll_value: # note that since we got a nonmoving buffer, it is either raw # or already cannot move, so the arithmetics below are fine length = len(value) @@ -487,8 +486,6 @@ errno = rposix.get_saved_errno() c_clearerr(self._ll_file) raise IOError(errno, os.strerror(errno)) - finally: - rffi.free_nonmovingbuffer(value, ll_value, is_pinned, is_raw) def flush(self): self._check_closed() From noreply at buildbot.pypy.org Wed Jan 28 18:49:39 2015 From: noreply at buildbot.pypy.org (cfbolz) Date: Wed, 28 Jan 2015 18:49:39 +0100 (CET) Subject: [pypy-commit] pypy default: don't give all frames a .builtin module, even if honor__builtins__ is False Message-ID: <20150128174939.19BC21C0285@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: Changeset: r75572:a7ee096c07bf Date: 2015-01-28 18:48 +0100 http://bitbucket.org/pypy/pypy/changeset/a7ee096c07bf/ Log: don't give all frames a .builtin module, even if honor__builtins__ is False (the default). __setstate__ was forcing the existence of that attribute. diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py --- a/pypy/interpreter/pyframe.py +++ b/pypy/interpreter/pyframe.py @@ -439,7 +439,10 @@ f_back = space.interp_w(PyFrame, w_f_back, can_be_None=True) new_frame.f_backref = jit.non_virtual_ref(f_back) - new_frame.builtin = space.interp_w(Module, w_builtin) + if space.config.objspace.honor__builtins__: + new_frame.builtin = space.interp_w(Module, w_builtin) + else: + assert space.interp_w(Module, w_builtin) is space.builtin new_frame.set_blocklist([unpickle_block(space, w_blk) for w_blk in space.unpackiterable(w_blockstack)]) values_w = maker.slp_from_tuple_with_nulls(space, w_valuestack) From noreply at buildbot.pypy.org Wed Jan 28 23:43:33 2015 From: noreply at buildbot.pypy.org (bdkearns) Date: Wed, 28 Jan 2015 23:43:33 +0100 (CET) Subject: [pypy-commit] pypy stdlib-2.7.9: fix _ssl test_npn_protocol when run -A against cpython Message-ID: <20150128224333.C736A1C00BF@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: stdlib-2.7.9 Changeset: r75573:5b9e66ec2e72 Date: 2015-01-28 17:16 -0500 http://bitbucket.org/pypy/pypy/changeset/5b9e66ec2e72/ Log: fix _ssl test_npn_protocol when run -A against cpython diff --git a/pypy/module/_ssl/test/test_ssl.py b/pypy/module/_ssl/test/test_ssl.py --- a/pypy/module/_ssl/test/test_ssl.py +++ b/pypy/module/_ssl/test/test_ssl.py @@ -226,7 +226,7 @@ import socket, _ssl, gc ctx = _ssl._SSLContext(_ssl.PROTOCOL_TLSv1) ctx._set_npn_protocols(b'\x08http/1.1\x06spdy/2') - ss = ctx._wrap_socket(self.s, True, + ss = ctx._wrap_socket(self.s._sock, True, server_hostname="svn.python.org") self.s.close() del ss; gc.collect() From noreply at buildbot.pypy.org Wed Jan 28 23:43:35 2015 From: noreply at buildbot.pypy.org (bdkearns) Date: Wed, 28 Jan 2015 23:43:35 +0100 (CET) Subject: [pypy-commit] pypy stdlib-2.7.9: provide SSLContext get_verify_flags Message-ID: <20150128224335.24F6E1C00BF@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: stdlib-2.7.9 Changeset: r75574:140b8cf78407 Date: 2015-01-28 17:36 -0500 http://bitbucket.org/pypy/pypy/changeset/140b8cf78407/ Log: provide SSLContext get_verify_flags diff --git a/pypy/module/_ssl/interp_ssl.py b/pypy/module/_ssl/interp_ssl.py --- a/pypy/module/_ssl/interp_ssl.py +++ b/pypy/module/_ssl/interp_ssl.py @@ -1071,6 +1071,11 @@ "check_hostname is enabled.") libssl_SSL_CTX_set_verify(self.ctx, mode, None) + def descr_get_verify_flags(self, space): + store = libssl_SSL_CTX_get_cert_store(self.ctx) + flags = libssl_X509_VERIFY_PARAM_get_flags(store[0].c_param) + return space.wrap(flags) + def descr_get_check_hostname(self, space): return space.newbool(self.check_hostname) @@ -1142,7 +1147,7 @@ if libssl_SSL_CTX_set_tmp_dh(self.ctx, dh) == 0: raise _ssl_seterror(space, None, 0) finally: - libssl_DH_free(dh) + libssl_DH_free(dh) def load_verify_locations_w(self, space, w_cafile=None, w_capath=None, w_cadata=None): @@ -1286,6 +1291,7 @@ _SSLContext.descr_set_options), verify_mode=GetSetProperty(_SSLContext.descr_get_verify_mode, _SSLContext.descr_set_verify_mode), + verify_flags=GetSetProperty(_SSLContext.descr_get_verify_flags), check_hostname=GetSetProperty(_SSLContext.descr_get_check_hostname, _SSLContext.descr_set_check_hostname), ) diff --git a/pypy/module/_ssl/test/test_ssl.py b/pypy/module/_ssl/test/test_ssl.py --- a/pypy/module/_ssl/test/test_ssl.py +++ b/pypy/module/_ssl/test/test_ssl.py @@ -136,6 +136,9 @@ exc = raises(ValueError, "s.verify_mode = 1234") assert str(exc.value) == "invalid value for verify_mode" + assert type(s.verify_flags) is long + assert s.verify_flags == _ssl.VERIFY_DEFAULT + s.check_hostname = True assert s.check_hostname diff --git a/rpython/rlib/ropenssl.py b/rpython/rlib/ropenssl.py --- a/rpython/rlib/ropenssl.py +++ b/rpython/rlib/ropenssl.py @@ -52,6 +52,7 @@ ASN1_STRING = lltype.Ptr(lltype.ForwardReference()) ASN1_ITEM = rffi.COpaquePtr('ASN1_ITEM') X509_NAME = rffi.COpaquePtr('X509_NAME') +X509_VERIFY_PARAM = rffi.COpaquePtr('X509_VERIFY_PARAM') stack_st_X509_OBJECT = rffi.COpaquePtr('struct stack_st_X509_OBJECT') DH = rffi.COpaquePtr('DH') @@ -148,8 +149,8 @@ [('value', ASN1_STRING)]) x509_store_st = rffi_platform.Struct( 'struct x509_store_st', - [('objs', stack_st_X509_OBJECT)]) - + [('objs', stack_st_X509_OBJECT), + ('param', X509_VERIFY_PARAM)]) x509_object_st = rffi_platform.Struct( 'struct x509_object_st', [('type', rffi.INT)]) @@ -307,6 +308,7 @@ ssl_external('X509_get_ext', [X509, rffi.INT], X509_EXTENSION) ssl_external('X509V3_EXT_get', [X509_EXTENSION], X509V3_EXT_METHOD) +ssl_external('X509_VERIFY_PARAM_get_flags', [X509_VERIFY_PARAM], rffi.ULONG) ssl_external('X509_STORE_add_cert', [X509_STORE, X509], rffi.INT) ssl_external('OBJ_obj2txt', From noreply at buildbot.pypy.org Thu Jan 29 06:06:47 2015 From: noreply at buildbot.pypy.org (mattip) Date: Thu, 29 Jan 2015 06:06:47 +0100 (CET) Subject: [pypy-commit] pypy default: test, fix bogus shape, stride, backstride creation in SliceIterator Message-ID: <20150129050647.5A2191C023F@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: Changeset: r75575:7e9970164f05 Date: 2015-01-29 07:07 +0200 http://bitbucket.org/pypy/pypy/changeset/7e9970164f05/ Log: test, fix bogus shape,stride,backstride creation in SliceIterator diff --git a/pypy/module/micronumpy/nditer.py b/pypy/module/micronumpy/nditer.py --- a/pypy/module/micronumpy/nditer.py +++ b/pypy/module/micronumpy/nditer.py @@ -252,10 +252,6 @@ # Copy logic from npyiter_coalesce_axes, used in ufunc iterators # and in nditer's with 'external_loop' flag can_coalesce = True - if it.order == 'F': - fastest = 0 - else: - fastest = -1 for idim in range(it.ndim - 1): for op_it, _ in it.iters: if op_it is None: @@ -275,7 +271,7 @@ if can_coalesce: for i in range(len(it.iters)): new_iter = coalesce_iter(it.iters[i][0], it.op_flags[i], it, - it.order, fastest) + it.order) it.iters[i] = (new_iter, new_iter.reset()) if len(it.shape) > 1: if it.order == 'F': @@ -289,7 +285,7 @@ break # Always coalesce at least one for i in range(len(it.iters)): - new_iter = coalesce_iter(it.iters[i][0], it.op_flags[i], it, 'C', -1) + new_iter = coalesce_iter(it.iters[i][0], it.op_flags[i], it, 'C') it.iters[i] = (new_iter, new_iter.reset()) if len(it.shape) > 1: if it.order == 'F': @@ -300,12 +296,11 @@ it.shape = [1] -def coalesce_iter(old_iter, op_flags, it, order, fastest=-1, flat=True): +def coalesce_iter(old_iter, op_flags, it, order, flat=True): ''' We usually iterate through an array one value at a time. But after coalesce(), getoperand() will return a slice by removing - the fastest varying dimension from the beginning or end of the shape. - XXX - what happens on swapaxis arrays? + the fastest varying dimension(s) from the beginning or end of the shape. If flat is true, then the slice will be 1d, otherwise stack up the shape of the fastest varying dimension in the slice, so an iterator of a 'C' array of shape (2,4,3) after two calls to coalesce will iterate 2 times over a slice @@ -319,6 +314,9 @@ new_strides = strides[1:] new_backstrides = backstrides[1:] _stride = old_iter.slice_stride + [strides[0]] + _shape = old_iter.slice_shape + [shape[0]] + _backstride = old_iter.slice_backstride + [strides[0] * (shape[0] - 1)] + fastest = shape[0] else: new_shape = shape[:-1] new_strides = strides[:-1] @@ -326,14 +324,15 @@ # use the operand's iterator's rightmost stride, # even if it is not the fastest (for 'F' or swapped axis) _stride = [strides[-1]] + old_iter.slice_stride - _shape = [shape[fastest]] + old_iter.slice_shape - _backstride = [(_shape[fastest] - 1) * _stride[0]] + old_iter.slice_backstride + _shape = [shape[-1]] + old_iter.slice_shape + _backstride = [(shape[-1] - 1) * strides[-1]] + old_iter.slice_backstride + fastest = shape[-1] if flat: _shape = [support.product(_shape)] if len(_stride) > 1: _stride = [min(_stride[0], _stride[1])] _backstride = [(shape[0] - 1) * _stride[0]] - return SliceIter(old_iter.array, old_iter.size / shape[fastest], + return SliceIter(old_iter.array, old_iter.size / fastest, new_shape, new_strides, new_backstrides, _shape, _stride, _backstride, op_flags, it) diff --git a/pypy/module/micronumpy/test/test_ufuncs.py b/pypy/module/micronumpy/test/test_ufuncs.py --- a/pypy/module/micronumpy/test/test_ufuncs.py +++ b/pypy/module/micronumpy/test/test_ufuncs.py @@ -269,6 +269,26 @@ aout = ufunc_sum(ai) assert aout.shape == (3, 3) + def test_frompyfunc_fortran(self): + import numpy as np + def tofrom_fortran(in0, out0): + out0[:] = in0.T + + def lapack_like_times2(in0, out0): + a = np.empty(in0.T.shape, in0.dtype) + tofrom_fortran(in0, a) + a *= 2 + tofrom_fortran(a, out0) + + times2 = np.frompyfunc([lapack_like_times2], 1, 1, + signature='(m,n)->(m,n)', + dtypes=[np.dtype(float), np.dtype(float)], + stack_inputs=True, + ) + in0 = np.arange(3300, dtype=float).reshape(100, 33) + out0 = times2(in0) + assert out0.shape == in0.shape + assert (out0 == in0 * 2).all() def test_ufunc_kwargs(self): from numpy import ufunc, frompyfunc, arange, dtype diff --git a/pypy/module/micronumpy/ufuncs.py b/pypy/module/micronumpy/ufuncs.py --- a/pypy/module/micronumpy/ufuncs.py +++ b/pypy/module/micronumpy/ufuncs.py @@ -651,14 +651,10 @@ w_op_flags, w_op_dtypes, w_casting, w_op_axes, w_itershape) # coalesce each iterators, according to inner_dimensions - if nd_it.order == 'F': - fastest = 0 - else: - fastest = -1 for i in range(len(inargs) + len(outargs)): for j in range(self.core_num_dims[i]): new_iter = coalesce_iter(nd_it.iters[i][0], nd_it.op_flags[i], - nd_it, nd_it.order, fastest, flat=False) + nd_it, nd_it.order, flat=False) nd_it.iters[i] = (new_iter, new_iter.reset()) # do the iteration while not nd_it.done: From noreply at buildbot.pypy.org Thu Jan 29 12:09:25 2015 From: noreply at buildbot.pypy.org (fijal) Date: Thu, 29 Jan 2015 12:09:25 +0100 (CET) Subject: [pypy-commit] pypy vmprof: port changes from cpython Message-ID: <20150129110925.A7B821C00BF@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: vmprof Changeset: r75576:5242f8505dac Date: 2015-01-29 13:09 +0200 http://bitbucket.org/pypy/pypy/changeset/5242f8505dac/ Log: port changes from cpython diff --git a/pypy/module/_vmprof/interp_vmprof.py b/pypy/module/_vmprof/interp_vmprof.py --- a/pypy/module/_vmprof/interp_vmprof.py +++ b/pypy/module/_vmprof/interp_vmprof.py @@ -60,7 +60,8 @@ pypy_vmprof_init = rffi.llexternal("pypy_vmprof_init", [], lltype.Void, compilation_info=eci) vmprof_enable = rffi.llexternal("vmprof_enable", - [rffi.INT, rffi.INT, rffi.LONG, rffi.INT], + [rffi.INT, rffi.LONG, rffi.INT, + rffi.CCHARP, rffi.INT], rffi.INT, compilation_info=eci, save_err=rffi.RFFI_SAVE_ERRNO) vmprof_disable = rffi.llexternal("vmprof_disable", [], rffi.INT, @@ -148,7 +149,8 @@ space.set_code_callback(vmprof_register_code) if we_are_translated(): # does not work untranslated - res = vmprof_enable(fileno, -1, period, 0) + res = vmprof_enable(fileno, period, 0, + lltype.nullptr(rffi.CCHARP.TO), 0) else: res = 0 if res == -1: diff --git a/pypy/module/_vmprof/src/get_custom_offset.c b/pypy/module/_vmprof/src/get_custom_offset.c --- a/pypy/module/_vmprof/src/get_custom_offset.c +++ b/pypy/module/_vmprof/src/get_custom_offset.c @@ -5,7 +5,11 @@ long pypy_find_codemap_at_addr(long); long pypy_yield_codemap_at_addr(long, long, long*); -static ptrdiff_t vmprof_unw_get_custom_offset(void* ip) { +void vmprof_set_tramp_range(void* start, void* end) +{ +} + +static ptrdiff_t vmprof_unw_get_custom_offset(void* ip, unw_cursor_t *cp) { intptr_t ip_l = (intptr_t)ip; if (ip_l < pypy_jit_start_addr() || ip_l > pypy_jit_end_addr()) { diff --git a/pypy/module/_vmprof/src/vmprof.c b/pypy/module/_vmprof/src/vmprof.c --- a/pypy/module/_vmprof/src/vmprof.c +++ b/pypy/module/_vmprof/src/vmprof.c @@ -9,7 +9,9 @@ * * Tested only on gcc, linux, x86_64. * - * Copyright (C) 2014 Antonio Cuni - anto.cuni at gmail.com + * Copyright (C) 2014-2015 + * Antonio Cuni - anto.cuni at gmail.com + * Maciej Fijalkowski - fijall at gmail.com * */ @@ -34,8 +36,7 @@ #define MAX_FUNC_NAME 128 #define MAX_STACK_DEPTH 64 -static FILE* profile_file; -static FILE* symbol_file = NULL; +static FILE* profile_file = NULL; void* vmprof_mainloop_func; static ptrdiff_t mainloop_sp_offset; static vmprof_get_virtual_ip_t mainloop_get_virtual_ip; @@ -54,10 +55,6 @@ fwrite(&x, sizeof(x), 1, f); } -static void prof_char(FILE *f, char x) { - fwrite(&x, sizeof(x), 1, f); -} - static void prof_header(FILE* f, long period_usec) { prof_word(f, 0); prof_word(f, 3); @@ -68,7 +65,9 @@ static void prof_write_stacktrace(FILE* f, void** stack, int depth, int count) { int i; - prof_char(f, MARKER_STACKTRACE); + char marker = MARKER_STACKTRACE; + + fwrite(&marker, 1, 1, f); prof_word(f, count); prof_word(f, depth); for(i=0; i 1024) { + lgt = 1024; + } + buf[0] = MARKER_VIRTUAL_IP; + ((void **)(((void*)buf) + 1))[0] = start; + ((long *)(((void*)buf) + 1 + sizeof(long)))[0] = lgt - 2 * sizeof(long) - 1; + strncpy(buf + 2 * sizeof(long) + 1, name, 1024 - 2 * sizeof(long) - 1); + fwrite(buf, lgt, 1, profile_file); } diff --git a/pypy/module/_vmprof/src/vmprof.h b/pypy/module/_vmprof/src/vmprof.h --- a/pypy/module/_vmprof/src/vmprof.h +++ b/pypy/module/_vmprof/src/vmprof.h @@ -12,7 +12,11 @@ void vmprof_register_virtual_function(const char* name, void* start, void* end); -int vmprof_enable(int fd, int sym_fd, long period_usec, int write_header); +int vmprof_enable(int fd, long period_usec, int write_header, char* vips, + int vips_len); int vmprof_disable(void); +// XXX: this should be part of _vmprof (the CPython extension), not vmprof (the library) +void vmprof_set_tramp_range(void* start, void* end); + #endif From noreply at buildbot.pypy.org Thu Jan 29 14:05:47 2015 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 29 Jan 2015 14:05:47 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: muuuuuch faster Message-ID: <20150129130547.68C6C1C04BC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r75577:2b591ae2eab4 Date: 2015-01-29 14:05 +0100 http://bitbucket.org/pypy/pypy/changeset/2b591ae2eab4/ Log: muuuuuch faster diff --git a/pypy/stm/print_stm_log.py b/pypy/stm/print_stm_log.py --- a/pypy/stm/print_stm_log.py +++ b/pypy/stm/print_stm_log.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env pypy import sys import struct, re, linecache From noreply at buildbot.pypy.org Thu Jan 29 14:14:54 2015 From: noreply at buildbot.pypy.org (fijal) Date: Thu, 29 Jan 2015 14:14:54 +0100 (CET) Subject: [pypy-commit] extradoc extradoc: abstraxct Message-ID: <20150129131454.586051C00BF@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: extradoc Changeset: r5491:4a4b4ae9dceb Date: 2015-01-29 15:15 +0200 http://bitbucket.org/pypy/extradoc/changeset/4a4b4ae9dceb/ Log: abstraxct diff --git a/talk/divio-zurich-2015/abstract.rst b/talk/divio-zurich-2015/abstract.rst new file mode 100644 --- /dev/null +++ b/talk/divio-zurich-2015/abstract.rst @@ -0,0 +1,14 @@ +PyPy is a fast and compliant alternative implementation for the Python +programming language. In this talk I would like to present PyPy, it's goals, +it's history and the current status of running real world Python code. +The talk will cover some architectural details like the implementation +of our Just in Time compiler as well a bit more esoteric features like Software +Transactional Memory, which promises to provide a GIL less PyPy. It'll also +contain a fair bit about benchmarking and how to port/move your code to +PyPy based on a case study. + +Maciej Fijalkowski is a long time PyPy core developer, running a consulting +business around PyPy, baroquesoftware.com. He has been involved in various +pieces of PyPy, including garbage collector, just in time compiler, +assembler generation etc. + From noreply at buildbot.pypy.org Thu Jan 29 17:42:14 2015 From: noreply at buildbot.pypy.org (mattip) Date: Thu, 29 Jan 2015 17:42:14 +0100 (CET) Subject: [pypy-commit] pypy release-2.5.x: start 2.5.x release branch Message-ID: <20150129164214.9F8BB1C00BF@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: release-2.5.x Changeset: r75578:1cc42435f09b Date: 2015-01-29 18:07 +0200 http://bitbucket.org/pypy/pypy/changeset/1cc42435f09b/ Log: start 2.5.x release branch diff --git a/pypy/module/cpyext/include/patchlevel.h b/pypy/module/cpyext/include/patchlevel.h --- a/pypy/module/cpyext/include/patchlevel.h +++ b/pypy/module/cpyext/include/patchlevel.h @@ -29,7 +29,7 @@ #define PY_VERSION "2.7.8" /* PyPy version as a string */ -#define PYPY_VERSION "2.5.0-alpha0" +#define PYPY_VERSION "2.5.0-beta0" /* Subversion Revision number of this file (not of the repository). * Empty since Mercurial migration. */ diff --git a/pypy/module/sys/version.py b/pypy/module/sys/version.py --- a/pypy/module/sys/version.py +++ b/pypy/module/sys/version.py @@ -10,7 +10,7 @@ #XXX # sync CPYTHON_VERSION with patchlevel.h, package.py CPYTHON_API_VERSION = 1013 #XXX # sync with include/modsupport.h -PYPY_VERSION = (2, 5, 0, "alpha", 0) #XXX # sync patchlevel.h +PYPY_VERSION = (2, 5, 0, "beta", 0) #XXX # sync patchlevel.h if platform.name == 'msvc': COMPILER_INFO = 'MSC v.%d 32 bit' % (platform.version * 10 + 600) From noreply at buildbot.pypy.org Thu Jan 29 17:42:15 2015 From: noreply at buildbot.pypy.org (mattip) Date: Thu, 29 Jan 2015 17:42:15 +0100 (CET) Subject: [pypy-commit] pypy default: bump version to 2.6.0 Message-ID: <20150129164215.D044A1C00BF@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: Changeset: r75579:397b96217b85 Date: 2015-01-29 18:08 +0200 http://bitbucket.org/pypy/pypy/changeset/397b96217b85/ Log: bump version to 2.6.0 diff --git a/pypy/module/cpyext/include/patchlevel.h b/pypy/module/cpyext/include/patchlevel.h --- a/pypy/module/cpyext/include/patchlevel.h +++ b/pypy/module/cpyext/include/patchlevel.h @@ -29,7 +29,7 @@ #define PY_VERSION "2.7.8" /* PyPy version as a string */ -#define PYPY_VERSION "2.5.0-alpha0" +#define PYPY_VERSION "2.6.0-alpha0" /* Subversion Revision number of this file (not of the repository). * Empty since Mercurial migration. */ diff --git a/pypy/module/sys/version.py b/pypy/module/sys/version.py --- a/pypy/module/sys/version.py +++ b/pypy/module/sys/version.py @@ -10,7 +10,7 @@ #XXX # sync CPYTHON_VERSION with patchlevel.h, package.py CPYTHON_API_VERSION = 1013 #XXX # sync with include/modsupport.h -PYPY_VERSION = (2, 5, 0, "alpha", 0) #XXX # sync patchlevel.h +PYPY_VERSION = (2, 6, 0, "alpha", 0) #XXX # sync patchlevel.h if platform.name == 'msvc': COMPILER_INFO = 'MSC v.%d 32 bit' % (platform.version * 10 + 600) From noreply at buildbot.pypy.org Thu Jan 29 17:42:17 2015 From: noreply at buildbot.pypy.org (mattip) Date: Thu, 29 Jan 2015 17:42:17 +0100 (CET) Subject: [pypy-commit] pypy release-2.5.x: document the release (graft this to default when releasing) Message-ID: <20150129164217.111C31C00BF@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: release-2.5.x Changeset: r75580:00c006b179d5 Date: 2015-01-29 18:38 +0200 http://bitbucket.org/pypy/pypy/changeset/00c006b179d5/ Log: document the release (graft this to default when releasing) diff --git a/pypy/doc/conf.py b/pypy/doc/conf.py --- a/pypy/doc/conf.py +++ b/pypy/doc/conf.py @@ -65,9 +65,9 @@ # built documents. # # The short X.Y version. -version = '2.4' +version = '2.5' # The full version, including alpha/beta/rc tags. -release = '2.4.0' +release = '2.5.0' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/pypy/doc/how-to-release.rst b/pypy/doc/how-to-release.rst --- a/pypy/doc/how-to-release.rst +++ b/pypy/doc/how-to-release.rst @@ -22,12 +22,12 @@ will capture the revision number of this change for the release; some of the next updates may be done before or after branching; make sure things are ported back to the trunk and to the branch as - necessary; also update the version number in pypy/doc/conf.py, - and in pypy/doc/index.rst + necessary; also update the version number in pypy/doc/conf.py. * update pypy/doc/contributor.rst (and possibly LICENSE) pypy/doc/tool/makecontributor.py generates the list of contributors * rename pypy/doc/whatsnew_head.rst to whatsnew_VERSION.rst - and create a fresh whatsnew_head.rst after the release + create a fresh whatsnew_head.rst after the release + and add the new file to pypy/doc/index-of-whatsnew.rst * go to pypy/tool/release and run: force-builds.py * wait for builds to complete, make sure there are no failures @@ -42,6 +42,7 @@ prefer a clearly labeled source package * write release announcement pypy/doc/release-x.y(.z).txt the release announcement should contain a direct link to the download page + and add new files to pypy/doc/index-of-release-notes.rst * update pypy.org (under extradoc/pypy.org), rebuild and commit * post announcement on morepypy.blogspot.com diff --git a/pypy/doc/index-of-release-notes.rst b/pypy/doc/index-of-release-notes.rst --- a/pypy/doc/index-of-release-notes.rst +++ b/pypy/doc/index-of-release-notes.rst @@ -6,6 +6,7 @@ .. toctree:: + release-2.5.0.rst release-2.4.0.rst release-2.3.1.rst release-2.3.0.rst diff --git a/pypy/doc/index-of-whatsnew.rst b/pypy/doc/index-of-whatsnew.rst --- a/pypy/doc/index-of-whatsnew.rst +++ b/pypy/doc/index-of-whatsnew.rst @@ -7,6 +7,7 @@ .. toctree:: whatsnew-head.rst + whatsnew-2.5.0.rst whatsnew-2.4.0.rst whatsnew-2.3.1.rst whatsnew-2.3.0.rst diff --git a/pypy/doc/release-2.5.0.rst b/pypy/doc/release-2.5.0.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/release-2.5.0.rst @@ -0,0 +1,98 @@ +================================================= +PyPy 2.5 - XXXXXX +================================================= + +We're pleased to announce PyPy 2.5, which contains significant performance +enhancements and bug fixes. + +You can download the PyPy 2.5.0 release here: + + http://pypy.org/download.html + +We would like to thank our donors for the continued support of the PyPy +project, and for those who donate to our three sub-projects. +We've shown quite a bit of progress, but we're slowly running out of funds. +Please consider donating more, or even better convince your employer to donate, +so we can finish those projects! The three sub-projects are: + +* `Py3k`_ (supporting Python 3.x): We have released a Python 3.2.5 compatible version + we call PyPy3 2.3.1, and are working toward a Python 3.3 compatible version + +* `STM`_ (software transactional memory): We have released a first working version, + and continue to try out new promising paths of achieving a fast multithreaded Python + +* `NumPy`_ which requires installation of our fork of upstream numpy, + available `on bitbucket`_ + +.. _`Py3k`: http://pypy.org/py3donate.html +.. _`STM`: http://pypy.org/tmdonate2.html +.. _`NumPy`: http://pypy.org/numpydonate.html +.. _`on bitbucket`: https://www.bitbucket.org/pypy/numpy +.. _`the Python Software Foundation`: https://www.python.org/psf/ +.. _`match funds`: http://morepypy.blogspot.com/2014/09/python-software-foundation-matching.html + +What is PyPy? +============= + +PyPy is a very compliant Python interpreter, almost a drop-in replacement for +CPython 2.7. It's fast (`pypy and cpython 2.7.x`_ performance comparison) +due to its integrated tracing JIT compiler. + +This release supports **x86** machines on most common operating systems +(Linux 32/64, Mac OS X 64, Windows, and OpenBSD), +as well as newer **ARM** hardware (ARMv6 or ARMv7, with VFPv3) running Linux. + +While we support 32 bit python on Windows, work on the native Windows 64 +bit python is still stalling, we would welcome a volunteer +to `handle that`_. + +.. _`pypy and cpython 2.7.x`: http://speed.pypy.org +.. _`handle that`: http://doc.pypy.org/en/latest/windows.html#what-is-missing-for-a-full-64-bit-translation + +Highlights +========== + +The past six months have seen pypy mature and grow, as rpython becomes the goto +solution for writing dynamic language interpreters. Our separation of rpython +and the python interpreter PyPy is now much clearer in the `documentation`_ . + +We have improved warmup time as well as jitted code performance more than 10% +compared to pypy-2.4.0, due to internal cleanup and gc nursery improvements. + +Our integrated numpy support gained much of the GenericUfunc api in order to +support the lapack/blas linalg module of numpy. This dovetails with work in the +pypy/numpy repository to support linalg both through the (slower) cpyext capi +interface and also via (the faster) pure python cffi interface, using an +extended frompyfunc() api. + +Dictionaries are now ordered by default, see the `blog post`_ + +Issues reported with our previous release were fixed after reports from users on +our new issue tracker at https://bitbucket.org/pypy/pypy/issues or on IRC at +#pypy. Here is a summary of some of the user-facing changes; +for more information see `whats-new`_: + +* Our nightly translations use --shared by default, including on OS/X and linux + +* We now more carefully handle errno (and GetLastError, WSAGetLastError) tying +the handlers as close as possible to the external function call, in non-jitted +as well as jitted code. + +* Many issues were resolved_ since the 2.4.0 release in September 2014 + +.. _`documentation`: http://doc.pypy.org +.. _`blog post`: http://morepypy.blogspot.com/2015/01/faster-more-memory-efficient-and-more.html +.. _`whats-new`: http://doc.pypy.org/en/latest/whatsnew-2.5.0.html +.. _resolved: https://bitbucket.org/pypy/pypy/issues?status=resolved + +We have further improvements on the way: rpython file handling, +finishing numpy linalg compatibility, numpy object dtypes, a better profiler, +as well as support for Python stdlib 2.7.9. + +Please try it out and let us know what you think. We especially welcome +success stories, we know you are using PyPy, please tell us about it! + +Cheers + +The PyPy Team + diff --git a/pypy/doc/whatsnew-2.5.0.rst b/pypy/doc/whatsnew-2.5.0.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/whatsnew-2.5.0.rst @@ -0,0 +1,139 @@ +======================= +What's new in PyPy 2.5 +======================= + +.. this is a revision shortly after release-2.4.x +.. startrev: 7026746cbb1b + +.. branch: win32-fixes5 + +Fix c code generation for msvc so empty "{ }" are avoided in unions, +Avoid re-opening files created with NamedTemporaryFile, +Allocate by 4-byte chunks in rffi_platform, +Skip testing objdump if it does not exist, +and other small adjustments in own tests + +.. branch: rtyper-stuff + +Small internal refactorings in the rtyper. + +.. branch: var-in-Some + +Store annotations on the Variable objects, rather than in a big dict. +Introduce a new framework for double-dispatched annotation implementations. + +.. branch: ClassRepr + +Refactor ClassRepr and make normalizecalls independent of the rtyper. + +.. branch: remove-remaining-smm + +Remove all remaining multimethods. + +.. branch: improve-docs + +Split RPython documentation from PyPy documentation and clean up. There now is +a clearer separation between documentation for users, developers and people +interested in background information. + +.. branch: kill-multimethod + +Kill multimethod machinery, all multimethods were removed earlier. + +.. branch nditer-external_loop + +Implement `external_loop` arguement to numpy's nditer + +.. branch kill-rctime + +Rename pypy/module/rctime to pypy/module/time, since it contains the implementation of the 'time' module. + +.. branch: ssa-flow + +Use SSA form for flow graphs inside build_flow() and part of simplify_graph() + +.. branch: ufuncapi + +Implement most of the GenericUfunc api to support numpy linalg. The strategy is +to encourage use of pure python or cffi ufuncs by extending frompyfunc(). +See the docstring of frompyfunc for more details. This dovetails with a branch +of pypy/numpy - cffi-linalg which is a rewrite of the _umath_linalg module in +python, calling lapack from cffi. The branch also support traditional use of +cpyext GenericUfunc definitions in c. + +.. branch: all_ordered_dicts + +This makes ordered dicts the default dictionary implementation in +RPython and in PyPy. It polishes the basic idea of rordereddict.py +and then fixes various things, up to simplifying +collections.OrderedDict. + +Note: Python programs can rely on the guaranteed dict order in PyPy +now, but for compatibility with other Python implementations they +should still use collections.OrderedDict where that really matters. +Also, support for reversed() was *not* added to the 'dict' class; +use OrderedDict. + +Benchmark results: in the noise. A few benchmarks see good speed +improvements but the average is very close to parity. + +.. branch: berkerpeksag/fix-broken-link-in-readmerst-1415127402066 +.. branch: bigint-with-int-ops +.. branch: dstufft/update-pip-bootstrap-location-to-the-new-1420760611527 +.. branch: float-opt +.. branch: gc-incminimark-pinning + +This branch adds an interface rgc.pin which would (very temporarily) +make object non-movable. That's used by rffi.alloc_buffer and +rffi.get_nonmovable_buffer and improves performance considerably for +IO operations. + +.. branch: gc_no_cleanup_nursery + +A branch started by Wenzhu Man (SoC'14) and then done by fijal. It +removes the clearing of the nursery. The drawback is that new objects +are not automatically filled with zeros any longer, which needs some +care, mostly for GC references (which the GC tries to follow, so they +must not contain garbage). The benefit is a quite large speed-up. + +.. branch: improve-gc-tracing-hooks +.. branch: improve-ptr-conv-error +.. branch: intern-not-immortal + +Fix intern() to return mortal strings, like in CPython. + +.. branch: issue1922-take2 +.. branch: kill-exported-symbols-list +.. branch: kill-rctime +.. branch: kill_ll_termios +.. branch: look-into-all-modules +.. branch: nditer-external_loop +.. branch: numpy-generic-item +.. branch: osx-shared + +``--shared`` support on OS/X (thanks wouter) + +.. branch: portable-threadlocal +.. branch: pypy-dont-copy-ops +.. branch: recursion_and_inlining +.. branch: slim-down-resumedescr +.. branch: squeaky/use-cflags-for-compiling-asm +.. branch: unicode-fix +.. branch: zlib_zdict + +.. branch: errno-again + +Changes how errno, GetLastError, and WSAGetLastError are handled. +The idea is to tie reading the error status as close as possible to +the external function call. This fixes some bugs, both of the very +rare kind (e.g. errno on Linux might in theory be overwritten by +mmap(), called rarely during major GCs, if such a major GC occurs at +exactly the wrong time), and some of the less rare kind +(particularly on Windows tests). + +.. branch: osx-package.py +.. branch: package.py-helpful-error-message + +.. branch: typed-cells + +Improve performance of integer globals and class attributes. diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -2,138 +2,6 @@ What's new in PyPy 2.5+ ======================= -.. this is a revision shortly after release-2.4.x -.. startrev: 7026746cbb1b +.. this is a revision shortly after release-2.5.x +.. startrev: 397b96217b85 -.. branch: win32-fixes5 - -Fix c code generation for msvc so empty "{ }" are avoided in unions, -Avoid re-opening files created with NamedTemporaryFile, -Allocate by 4-byte chunks in rffi_platform, -Skip testing objdump if it does not exist, -and other small adjustments in own tests - -.. branch: rtyper-stuff - -Small internal refactorings in the rtyper. - -.. branch: var-in-Some - -Store annotations on the Variable objects, rather than in a big dict. -Introduce a new framework for double-dispatched annotation implementations. - -.. branch: ClassRepr - -Refactor ClassRepr and make normalizecalls independent of the rtyper. - -.. branch: remove-remaining-smm - -Remove all remaining multimethods. - -.. branch: improve-docs - -Split RPython documentation from PyPy documentation and clean up. There now is -a clearer separation between documentation for users, developers and people -interested in background information. - -.. branch: kill-multimethod - -Kill multimethod machinery, all multimethods were removed earlier. - -.. branch nditer-external_loop - -Implement `external_loop` arguement to numpy's nditer - -.. branch kill-rctime - -Rename pypy/module/rctime to pypy/module/time, since it contains the implementation of the 'time' module. - -.. branch: ssa-flow - -Use SSA form for flow graphs inside build_flow() and part of simplify_graph() - -.. branch: ufuncapi - -Implement most of the GenericUfunc api to support numpy linalg. The strategy is -to encourage use of pure python or cffi ufuncs by extending frompyfunc(). -See the docstring of frompyfunc for more details. This dovetails with a branch -of pypy/numpy - cffi-linalg which is a rewrite of the _umath_linalg module in -python, calling lapack from cffi. The branch also support traditional use of -cpyext GenericUfunc definitions in c. - -.. branch: all_ordered_dicts - -This makes ordered dicts the default dictionary implementation in -RPython and in PyPy. It polishes the basic idea of rordereddict.py -and then fixes various things, up to simplifying -collections.OrderedDict. - -Note: Python programs can rely on the guaranteed dict order in PyPy -now, but for compatibility with other Python implementations they -should still use collections.OrderedDict where that really matters. -Also, support for reversed() was *not* added to the 'dict' class; -use OrderedDict. - -Benchmark results: in the noise. A few benchmarks see good speed -improvements but the average is very close to parity. - -.. branch: berkerpeksag/fix-broken-link-in-readmerst-1415127402066 -.. branch: bigint-with-int-ops -.. branch: dstufft/update-pip-bootstrap-location-to-the-new-1420760611527 -.. branch: float-opt -.. branch: gc-incminimark-pinning - -This branch adds an interface rgc.pin which would (very temporarily) -make object non-movable. That's used by rffi.alloc_buffer and -rffi.get_nonmovable_buffer and improves performance considerably for -IO operations. - -.. branch: gc_no_cleanup_nursery - -A branch started by Wenzhu Man (SoC'14) and then done by fijal. It -removes the clearing of the nursery. The drawback is that new objects -are not automatically filled with zeros any longer, which needs some -care, mostly for GC references (which the GC tries to follow, so they -must not contain garbage). The benefit is a quite large speed-up. - -.. branch: improve-gc-tracing-hooks -.. branch: improve-ptr-conv-error -.. branch: intern-not-immortal - -Fix intern() to return mortal strings, like in CPython. - -.. branch: issue1922-take2 -.. branch: kill-exported-symbols-list -.. branch: kill-rctime -.. branch: kill_ll_termios -.. branch: look-into-all-modules -.. branch: nditer-external_loop -.. branch: numpy-generic-item -.. branch: osx-shared - -``--shared`` support on OS/X (thanks wouter) - -.. branch: portable-threadlocal -.. branch: pypy-dont-copy-ops -.. branch: recursion_and_inlining -.. branch: slim-down-resumedescr -.. branch: squeaky/use-cflags-for-compiling-asm -.. branch: unicode-fix -.. branch: zlib_zdict - -.. branch: errno-again - -Changes how errno, GetLastError, and WSAGetLastError are handled. -The idea is to tie reading the error status as close as possible to -the external function call. This fixes some bugs, both of the very -rare kind (e.g. errno on Linux might in theory be overwritten by -mmap(), called rarely during major GCs, if such a major GC occurs at -exactly the wrong time), and some of the less rare kind -(particularly on Windows tests). - -.. branch: osx-package.py -.. branch: package.py-helpful-error-message - -.. branch: typed-cells - -Improve performance of integer globals and class attributes. From noreply at buildbot.pypy.org Thu Jan 29 18:44:05 2015 From: noreply at buildbot.pypy.org (mattip) Date: Thu, 29 Jan 2015 18:44:05 +0100 (CET) Subject: [pypy-commit] pypy release-2.5.x: tweak PyPy3 release name (Samureus) Message-ID: <20150129174405.746501C01CA@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: release-2.5.x Changeset: r75581:b3eacc6c2a5f Date: 2015-01-29 19:27 +0200 http://bitbucket.org/pypy/pypy/changeset/b3eacc6c2a5f/ Log: tweak PyPy3 release name (Samureus) diff --git a/pypy/doc/release-2.5.0.rst b/pypy/doc/release-2.5.0.rst --- a/pypy/doc/release-2.5.0.rst +++ b/pypy/doc/release-2.5.0.rst @@ -16,7 +16,7 @@ so we can finish those projects! The three sub-projects are: * `Py3k`_ (supporting Python 3.x): We have released a Python 3.2.5 compatible version - we call PyPy3 2.3.1, and are working toward a Python 3.3 compatible version + we call PyPy3 2.4.0, and are working toward a Python 3.3 compatible version * `STM`_ (software transactional memory): We have released a first working version, and continue to try out new promising paths of achieving a fast multithreaded Python @@ -28,8 +28,6 @@ .. _`STM`: http://pypy.org/tmdonate2.html .. _`NumPy`: http://pypy.org/numpydonate.html .. _`on bitbucket`: https://www.bitbucket.org/pypy/numpy -.. _`the Python Software Foundation`: https://www.python.org/psf/ -.. _`match funds`: http://morepypy.blogspot.com/2014/09/python-software-foundation-matching.html What is PyPy? ============= @@ -52,22 +50,23 @@ Highlights ========== -The past six months have seen pypy mature and grow, as rpython becomes the goto -solution for writing dynamic language interpreters. Our separation of rpython -and the python interpreter PyPy is now much clearer in the `documentation`_ . +* The past months have seen pypy mature and grow, as rpython becomes the goto +solution for writing fast dynamic language interpreters. Our separation of +rpython and the python interpreter PyPy is now much clearer in the +`documentation`_ . -We have improved warmup time as well as jitted code performance more than 10% +* We have improved warmup time as well as jitted code performance: more than 10% compared to pypy-2.4.0, due to internal cleanup and gc nursery improvements. -Our integrated numpy support gained much of the GenericUfunc api in order to +* Our integrated numpy support gained much of the GenericUfunc api in order to support the lapack/blas linalg module of numpy. This dovetails with work in the pypy/numpy repository to support linalg both through the (slower) cpyext capi interface and also via (the faster) pure python cffi interface, using an extended frompyfunc() api. -Dictionaries are now ordered by default, see the `blog post`_ +* Dictionaries are now ordered by default, see the `blog post`_ -Issues reported with our previous release were fixed after reports from users on +* Issues reported with our previous release were fixed after reports from users on our new issue tracker at https://bitbucket.org/pypy/pypy/issues or on IRC at #pypy. Here is a summary of some of the user-facing changes; for more information see `whats-new`_: From noreply at buildbot.pypy.org Thu Jan 29 18:44:06 2015 From: noreply at buildbot.pypy.org (mattip) Date: Thu, 29 Jan 2015 18:44:06 +0100 (CET) Subject: [pypy-commit] pypy release-2.5.x: update contributor list, more than 10 new people joined! Message-ID: <20150129174406.AA85A1C01CA@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: release-2.5.x Changeset: r75582:87f177acec31 Date: 2015-01-29 19:44 +0200 http://bitbucket.org/pypy/pypy/changeset/87f177acec31/ Log: update contributor list, more than 10 new people joined! diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -42,19 +42,19 @@ Amaury Forgeot d'Arc Samuele Pedroni Alex Gaynor + Brian Kearns + Matti Picus + Philip Jenvey Michael Hudson David Schneider - Matti Picus - Brian Kearns - Philip Jenvey Holger Krekel Christian Tismer Hakan Ardo Benjamin Peterson Manuel Jacob + Ronan Lamy Anders Chrigstrom Eric van Riet Paap - Ronan Lamy Wim Lavrijsen Richard Emslie Alexander Schremmer @@ -68,9 +68,9 @@ Camillo Bruni Laura Creighton Toon Verwaest + Romain Guillebert Leonardo Santagada Seo Sanghyeon - Romain Guillebert Justin Peel Ronny Pfannschmidt David Edelsohn @@ -91,15 +91,16 @@ Michal Bendowski Jan de Mooij stian + Tyler Wade Michael Foord Stephan Diehl - Tyler Wade Stefan Schwarzer Valentino Volonghi Tomek Meka Patrick Maupin Bob Ippolito Bruno Gola + David Malcolm Jean-Paul Calderone Timo Paulssen Squeaky @@ -108,18 +109,19 @@ Marius Gedminas Martin Matusiak Konstantin Lopuhin + Wenzhu Man John Witulski - Wenzhu Man + Laurence Tratt + Ivan Sichmann Freitas Greg Price Dario Bertini Mark Pearse Simon Cross - Ivan Sichmann Freitas Andreas Stührk + Stefano Rivera Jean-Philippe St. Pierre Guido van Rossum Pavel Vinogradov - Stefano Rivera Paweł Piotr Przeradowski Paul deGrandis Ilya Osadchiy @@ -129,7 +131,6 @@ tav Taavi Burns Georg Brandl - Laurence Tratt Bert Freudenberg Stian Andreassen Wanja Saatkamp @@ -141,13 +142,12 @@ Jeremy Thurgood Rami Chowdhury Tobias Pape - David Malcolm Eugene Oden Henry Mason Vasily Kuznetsov Preston Timmons + David Ripton Jeff Terrace - David Ripton Dusty Phillips Lukas Renggli Guenter Jantzen @@ -166,13 +166,16 @@ Gintautas Miliauskas Michael Twomey Lucian Branescu Mihaila + Yichao Yu Gabriel Lavoie Olivier Dormond Jared Grubb Karl Bartel + Wouter van Heyst Brian Dorsey Victor Stinner Andrews Medina + anatoly techtonik Stuart Williams Jasper Schulz Christian Hudon @@ -182,12 +185,11 @@ Michael Cheng Justas Sadzevicius Gasper Zejn - anatoly techtonik Neil Shepperd + Stanislaw Halik Mikael Schönenberg Elmo M?ntynen Jonathan David Riehl - Stanislaw Halik Anders Qvist Corbin Simpson Chirag Jadwani @@ -196,10 +198,13 @@ Vincent Legoll Alan McIntyre Alexander Sedov + Attila Gobi Christopher Pope Christian Tismer Marc Abramowitz Dan Stromberg + Arjun Naik + Valentina Mukhamedzhanova Stefano Parmesan Alexis Daboville Jens-Uwe Mager @@ -213,8 +218,6 @@ Sylvain Thenault Nathan Taylor Vladimir Kryachko - Arjun Naik - Attila Gobi Jacek Generowicz Alejandro J. Cura Jacob Oscarson @@ -222,22 +225,23 @@ Ryan Gonzalez Ian Foote Kristjan Valur Jonsson + David Lievens Neil Blakey-Milner Lutz Paelike Lucio Torre Lars Wassermann - Valentina Mukhamedzhanova Henrik Vendelbo Dan Buch Miguel de Val Borro Artur Lisiecki Sergey Kishchenko - Yichao Yu Ignas Mikalajunas Christoph Gerum Martin Blais Lene Wagner Tomo Cocoa + Toni Mattis + Lucas Stadler roberto at goyle Yury V. Zaytsev Anna Katrina Dominguez @@ -265,23 +269,30 @@ Stephan Busemann Rafał Gałczyński Christian Muirhead + Berker Peksag James Lan shoma hosaka - Daniel Neuh?user - Matthew Miller + Daniel Neuhäuser + Ben Mather + halgari + Boglarka Vezer + Chris Pressey Buck Golemon Konrad Delong Dinu Gherman Chris Lambacher coolbutuseless at gmail.com + Jim Baker Rodrigo Araújo - Jim Baker + Nikolaos-Digenis Karagiannis James Robert Armin Ronacher Brett Cannon + Donald Stufft yrttyr aliceinwire OlivierBlanvillain + Dan Sanders Zooko Wilcox-O Hearn Tomer Chachamu Christopher Groskopf @@ -295,6 +306,7 @@ Markus Unterwaditzer Even Wiik Thomassen jbs + squeaky soareschen Kurt Griffiths Mike Bayer @@ -306,6 +318,7 @@ Anna Ravencroft Dan Crosta Julien Phalip + Roman Podoliaka Dan Loewenherz Heinrich-Heine University, Germany diff --git a/pypy/doc/contributor.rst b/pypy/doc/contributor.rst --- a/pypy/doc/contributor.rst +++ b/pypy/doc/contributor.rst @@ -12,19 +12,19 @@ Amaury Forgeot d'Arc Samuele Pedroni Alex Gaynor + Brian Kearns + Matti Picus + Philip Jenvey Michael Hudson David Schneider - Matti Picus - Brian Kearns - Philip Jenvey Holger Krekel Christian Tismer Hakan Ardo Benjamin Peterson Manuel Jacob + Ronan Lamy Anders Chrigstrom Eric van Riet Paap - Ronan Lamy Wim Lavrijsen Richard Emslie Alexander Schremmer @@ -38,9 +38,9 @@ Camillo Bruni Laura Creighton Toon Verwaest + Romain Guillebert Leonardo Santagada Seo Sanghyeon - Romain Guillebert Justin Peel Ronny Pfannschmidt David Edelsohn @@ -61,15 +61,16 @@ Michal Bendowski Jan de Mooij stian + Tyler Wade Michael Foord Stephan Diehl - Tyler Wade Stefan Schwarzer Valentino Volonghi Tomek Meka Patrick Maupin Bob Ippolito Bruno Gola + David Malcolm Jean-Paul Calderone Timo Paulssen Squeaky @@ -78,18 +79,19 @@ Marius Gedminas Martin Matusiak Konstantin Lopuhin + Wenzhu Man John Witulski - Wenzhu Man + Laurence Tratt + Ivan Sichmann Freitas Greg Price Dario Bertini Mark Pearse Simon Cross - Ivan Sichmann Freitas Andreas Stührk + Stefano Rivera Jean-Philippe St. Pierre Guido van Rossum Pavel Vinogradov - Stefano Rivera Paweł Piotr Przeradowski Paul deGrandis Ilya Osadchiy @@ -99,7 +101,6 @@ tav Taavi Burns Georg Brandl - Laurence Tratt Bert Freudenberg Stian Andreassen Wanja Saatkamp @@ -111,13 +112,12 @@ Jeremy Thurgood Rami Chowdhury Tobias Pape - David Malcolm Eugene Oden Henry Mason Vasily Kuznetsov Preston Timmons + David Ripton Jeff Terrace - David Ripton Dusty Phillips Lukas Renggli Guenter Jantzen @@ -136,13 +136,16 @@ Gintautas Miliauskas Michael Twomey Lucian Branescu Mihaila + Yichao Yu Gabriel Lavoie Olivier Dormond Jared Grubb Karl Bartel + Wouter van Heyst Brian Dorsey Victor Stinner Andrews Medina + anatoly techtonik Stuart Williams Jasper Schulz Christian Hudon @@ -152,12 +155,11 @@ Michael Cheng Justas Sadzevicius Gasper Zejn - anatoly techtonik Neil Shepperd + Stanislaw Halik Mikael Schönenberg Elmo M?ntynen Jonathan David Riehl - Stanislaw Halik Anders Qvist Corbin Simpson Chirag Jadwani @@ -166,10 +168,13 @@ Vincent Legoll Alan McIntyre Alexander Sedov + Attila Gobi Christopher Pope Christian Tismer Marc Abramowitz Dan Stromberg + Arjun Naik + Valentina Mukhamedzhanova Stefano Parmesan Alexis Daboville Jens-Uwe Mager @@ -183,8 +188,6 @@ Sylvain Thenault Nathan Taylor Vladimir Kryachko - Arjun Naik - Attila Gobi Jacek Generowicz Alejandro J. Cura Jacob Oscarson @@ -192,22 +195,23 @@ Ryan Gonzalez Ian Foote Kristjan Valur Jonsson + David Lievens Neil Blakey-Milner Lutz Paelike Lucio Torre Lars Wassermann - Valentina Mukhamedzhanova Henrik Vendelbo Dan Buch Miguel de Val Borro Artur Lisiecki Sergey Kishchenko - Yichao Yu Ignas Mikalajunas Christoph Gerum Martin Blais Lene Wagner Tomo Cocoa + Toni Mattis + Lucas Stadler roberto at goyle Yury V. Zaytsev Anna Katrina Dominguez @@ -235,23 +239,30 @@ Stephan Busemann Rafał Gałczyński Christian Muirhead + Berker Peksag James Lan shoma hosaka - Daniel Neuh?user - Matthew Miller + Daniel Neuhäuser + Ben Mather + halgari + Boglarka Vezer + Chris Pressey Buck Golemon Konrad Delong Dinu Gherman Chris Lambacher coolbutuseless at gmail.com + Jim Baker Rodrigo Araújo - Jim Baker + Nikolaos-Digenis Karagiannis James Robert Armin Ronacher Brett Cannon + Donald Stufft yrttyr aliceinwire OlivierBlanvillain + Dan Sanders Zooko Wilcox-O Hearn Tomer Chachamu Christopher Groskopf @@ -265,6 +276,7 @@ Markus Unterwaditzer Even Wiik Thomassen jbs + squeaky soareschen Kurt Griffiths Mike Bayer @@ -276,5 +288,6 @@ Anna Ravencroft Dan Crosta Julien Phalip + Roman Podoliaka Dan Loewenherz diff --git a/pypy/doc/release-2.5.0.rst b/pypy/doc/release-2.5.0.rst --- a/pypy/doc/release-2.5.0.rst +++ b/pypy/doc/release-2.5.0.rst @@ -10,7 +10,9 @@ http://pypy.org/download.html We would like to thank our donors for the continued support of the PyPy -project, and for those who donate to our three sub-projects. +project, and for those who donate to our three sub-projects, as well as our +volunteers and contributors (10 new commiters joined PyPy since the last +release). We've shown quite a bit of progress, but we're slowly running out of funds. Please consider donating more, or even better convince your employer to donate, so we can finish those projects! The three sub-projects are: From noreply at buildbot.pypy.org Thu Jan 29 22:03:14 2015 From: noreply at buildbot.pypy.org (mattip) Date: Thu, 29 Jan 2015 22:03:14 +0100 (CET) Subject: [pypy-commit] pypy release-2.5.x: formatting, add a section to project-ideas Message-ID: <20150129210314.145221C0134@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: release-2.5.x Changeset: r75583:0cb9ead08202 Date: 2015-01-29 23:03 +0200 http://bitbucket.org/pypy/pypy/changeset/0cb9ead08202/ Log: formatting, add a section to project-ideas diff --git a/pypy/doc/project-ideas.rst b/pypy/doc/project-ideas.rst --- a/pypy/doc/project-ideas.rst +++ b/pypy/doc/project-ideas.rst @@ -191,3 +191,37 @@ to make them work at all if they currently don't. A part of this work would be to get cpyext into a shape where it supports running Cython generated extensions. + +====================================== +Make more python modules pypy-freindly +====================================== + +Work has been started on a few popular python packages. Here is a partial +list of good work that needs to be finished: + +**matplotlib** https://github.com/mattip/matplotlib + + Status: the repo is an older version of matplotlib adapted to pypy and cpyext + + TODO: A suggested first step would be to merge the differences into + matplotlib/HEAD. The major problem is the use of a generic view into a + numpy ndarray. The int* fields would need to be converted into int[MAX_DIMS] + c-arrays and filled in. + +**wxPython** https://bitbucket.org/waedt/wxpython_cffi + + Status: A GSOC 2013 project to adapt the Phoenix sip build system to cffi + + TODO: Merge the latest version of the wrappers and finish the sip conversion + +**pygame** https://github.com/CTPUG/pygame_cffi + + Status: see blog post + + TODO: see the end of the blog post + +**pyopengl** https://bitbucket.org/duangle/pyopengl-cffi + + Status: unknown + + diff --git a/pypy/doc/release-2.5.0.rst b/pypy/doc/release-2.5.0.rst --- a/pypy/doc/release-2.5.0.rst +++ b/pypy/doc/release-2.5.0.rst @@ -53,31 +53,31 @@ ========== * The past months have seen pypy mature and grow, as rpython becomes the goto -solution for writing fast dynamic language interpreters. Our separation of -rpython and the python interpreter PyPy is now much clearer in the -`documentation`_ . + solution for writing fast dynamic language interpreters. Our separation of + rpython and the python interpreter PyPy is now much clearer in the + `documentation`_ . * We have improved warmup time as well as jitted code performance: more than 10% -compared to pypy-2.4.0, due to internal cleanup and gc nursery improvements. + compared to pypy-2.4.0, due to internal cleanup and gc nursery improvements. * Our integrated numpy support gained much of the GenericUfunc api in order to -support the lapack/blas linalg module of numpy. This dovetails with work in the -pypy/numpy repository to support linalg both through the (slower) cpyext capi -interface and also via (the faster) pure python cffi interface, using an -extended frompyfunc() api. + support the lapack/blas linalg module of numpy. This dovetails with work in the + pypy/numpy repository to support linalg both through the (slower) cpyext capi + interface and also via (the faster) pure python cffi interface, using an + extended frompyfunc() api. * Dictionaries are now ordered by default, see the `blog post`_ * Issues reported with our previous release were fixed after reports from users on -our new issue tracker at https://bitbucket.org/pypy/pypy/issues or on IRC at -#pypy. Here is a summary of some of the user-facing changes; -for more information see `whats-new`_: + our issue tracker at https://bitbucket.org/pypy/pypy/issues or on IRC at + #pypy. Here is a summary of some of the user-facing changes; + for more information see `whats-new`_: * Our nightly translations use --shared by default, including on OS/X and linux * We now more carefully handle errno (and GetLastError, WSAGetLastError) tying -the handlers as close as possible to the external function call, in non-jitted -as well as jitted code. + the handlers as close as possible to the external function call, in non-jitted + as well as jitted code. * Many issues were resolved_ since the 2.4.0 release in September 2014 From noreply at buildbot.pypy.org Thu Jan 29 22:05:11 2015 From: noreply at buildbot.pypy.org (rguillebert) Date: Thu, 29 Jan 2015 22:05:11 +0100 (CET) Subject: [pypy-commit] extradoc extradoc: Add my fosdem2015 talk (WIP) Message-ID: <20150129210511.ACFB51C0134@cobra.cs.uni-duesseldorf.de> Author: Romain Guillebert Branch: extradoc Changeset: r5492:48a04232f5e5 Date: 2015-01-29 22:04 +0100 http://bitbucket.org/pypy/extradoc/changeset/48a04232f5e5/ Log: Add my fosdem2015 talk (WIP) diff --git a/talk/fosdem2015/Makefile b/talk/fosdem2015/Makefile new file mode 100644 --- /dev/null +++ b/talk/fosdem2015/Makefile @@ -0,0 +1,18 @@ +# you can find rst2beamer.py here: +# https://bitbucket.org/antocuni/env/raw/default/bin/rst2beamer.py + +# WARNING: to work, it needs this patch for docutils +# https://sourceforge.net/tracker/?func=detail&atid=422032&aid=1459707&group_id=38414 + +talk.pdf: talk.rst author.latex stylesheet.latex + python `which rst2beamer.py` --stylesheet=stylesheet.latex --documentoptions=14pt talk.rst talk.latex || exit + #/home/antocuni/.virtualenvs/rst2beamer/bin/python `which rst2beamer.py` --stylesheet=stylesheet.latex --documentoptions=14pt talk.rst talk.latex || exit + sed 's/\\date{}/\\input{author.latex}/' -i talk.latex || exit + #sed 's/\\maketitle/\\input{title.latex}/' -i talk.latex || exit + pdflatex talk.latex || exit + +view: talk.pdf + evince talk.pdf & + +xpdf: talk.pdf + xpdf talk.pdf & diff --git a/talk/fosdem2015/author.latex b/talk/fosdem2015/author.latex new file mode 100644 --- /dev/null +++ b/talk/fosdem2015/author.latex @@ -0,0 +1,9 @@ +\definecolor{rrblitbackground}{rgb}{0.0, 0.0, 0.0} + +\title[PyPy and the future of the Python ecosystem]{PyPy and the future of the Python ecosystem} +\author[rguillebert] +{Romain Guillebert\\ +\includegraphics[width=80px]{../img/py-web-new.png}} + +\institute{Fosdem 2015} +\date{January 31st, 2015} diff --git a/talk/fosdem2015/beamerdefs.txt b/talk/fosdem2015/beamerdefs.txt new file mode 100644 --- /dev/null +++ b/talk/fosdem2015/beamerdefs.txt @@ -0,0 +1,108 @@ +.. colors +.. =========================== + +.. role:: green +.. role:: red + + +.. general useful commands +.. =========================== + +.. |pause| raw:: latex + + \pause + +.. |small| raw:: latex + + {\small + +.. |end_small| raw:: latex + + } + +.. |scriptsize| raw:: latex + + {\scriptsize + +.. |end_scriptsize| raw:: latex + + } + +.. |strike<| raw:: latex + + \sout{ + +.. closed bracket +.. =========================== + +.. |>| raw:: latex + + } + + +.. example block +.. =========================== + +.. |example<| raw:: latex + + \begin{exampleblock}{ + + +.. |end_example| raw:: latex + + \end{exampleblock} + + + +.. alert block +.. =========================== + +.. |alert<| raw:: latex + + \begin{alertblock}{ + + +.. |end_alert| raw:: latex + + \end{alertblock} + + + +.. columns +.. =========================== + +.. |column1| raw:: latex + + \begin{columns} + \begin{column}{0.45\textwidth} + +.. |column2| raw:: latex + + \end{column} + \begin{column}{0.45\textwidth} + + +.. |end_columns| raw:: latex + + \end{column} + \end{columns} + + + +.. |snake| image:: ../../img/py-web-new.png + :scale: 15% + + + +.. nested blocks +.. =========================== + +.. |nested| raw:: latex + + \begin{columns} + \begin{column}{0.85\textwidth} + +.. |end_nested| raw:: latex + + \end{column} + \end{columns} diff --git a/talk/fosdem2015/stylesheet.latex b/talk/fosdem2015/stylesheet.latex new file mode 100644 --- /dev/null +++ b/talk/fosdem2015/stylesheet.latex @@ -0,0 +1,9 @@ +\setbeamercovered{transparent} +\setbeamertemplate{navigation symbols}{} + +\definecolor{darkgreen}{rgb}{0, 0.5, 0.0} +\newcommand{\docutilsrolegreen}[1]{\color{darkgreen}#1\normalcolor} +\newcommand{\docutilsrolered}[1]{\color{red}#1\normalcolor} + +\newcommand{\green}[1]{\color{darkgreen}#1\normalcolor} +\newcommand{\red}[1]{\color{red}#1\normalcolor} diff --git a/talk/fosdem2015/talk.pdf b/talk/fosdem2015/talk.pdf new file mode 100644 index 0000000000000000000000000000000000000000..d82c099a9e525d280f1eb9a6182730411a0a206d GIT binary patch [cut] diff --git a/talk/fosdem2015/talk.rst b/talk/fosdem2015/talk.rst new file mode 100644 --- /dev/null +++ b/talk/fosdem2015/talk.rst @@ -0,0 +1,119 @@ +.. include:: beamerdefs.txt + +=========================================== +PyPy and the future of the Python ecosystem +=========================================== + +Intro +----- + +* @rguillebert + +* PyPy contributor for 4 years + +* Library compatibility is one of my main interests + + - Cython backend for PyPy + + - NumPyPy + + - PyMetabiosis + +* Hire me + +* How can we get better implementations ? + +* Without throwing away our language features and libraries + +Current situation (1/3) +----------------------- + +* CPython is by far the most popular implementations + + - Poor performance + + - No way to use multiple cores in a single process + +* PyPy has a fairly small marketshare + + - Better performance + + - PyPy-STM is a work in progress + +* According to PyPI stats, other implementations are virtually unused + +Current situation (2/3) +----------------------- + +* Go is pretty fast and is great at concurrency + +* Javascript is pretty fast + +* PHP is fast... + +Current situation (3/3) +----------------------- + +* It's pretty hard to switch between implementations because of C extensions () + +* C extensions are very useful but CPython can't evolve because of them + +* PyPy can evolve but has partial support of C extensions + +* CPython keeps its users captive with C extensions + +|pause| + +* More competition between implementations would benefit us + +Why can't other implementations implement the C API +--------------------------------------------------- + +* Libraries use more than the official API (Cython) + +* The official API makes assumptions on how the virtual machine is written + + - For example, the C API assumes that the virtual machine uses naive reference counting as its garbage collector + + - Naive reference counting is known for being inefficient and makes removing the GIL really hard (Python 1.4) + +* The C API itself is against performance and concurrency + +C APIs in other languages +------------------------- + +Can we implement a similar API ? +-------------------------------- + +* Yes ! + +* Not that many changes to the C API are required + +* It's even possible to do it in pure Python with CFFI + +* Designing it to make everyone happy is harder than to actually implement it + +* Making people port their extensions is hard + +* CPython would need to keep both APIs implement, at least for a while + +Where does PyPy fit in this ? +----------------------------- + +* The most flexible implementation + +* Already fast + +* Can already interact with C code easily + +* PyPy-STM + +What about short term ? +----------------------- + +* PyMetabiosis + +Thank you +--------- + +Questions ? diff --git a/talk/scipyindia2014/talk.pdf b/talk/scipyindia2014/talk.pdf index 4be849fc047dc21919a478732a2521894f5b01fc..5ed74a46ecdbe8247243ec67721e8309bbcc8fcf GIT binary patch [cut] diff --git a/talk/scipyindia2014/talk.rst b/talk/scipyindia2014/talk.rst --- a/talk/scipyindia2014/talk.rst +++ b/talk/scipyindia2014/talk.rst @@ -49,6 +49,16 @@ * Tracing Just-In-Time compiler +* Optimizes loops + +* Traces one iteration of a loop + +* Produces a linear trace of execution + +* Inlines almost everything + +* The trace is then optimized and compiled + * Removes overhead Demo @@ -166,6 +176,8 @@ * Is used the same way as numba, but different performance characteristics +* Needs a very recent version of PyPy + JitPy ----- @@ -199,6 +211,15 @@ * No more Global Interpreter Lock +Takeaway +-------- + +* Get PyPy at pypy.org (or from your favorite distribution) + +* Try it + +* Give us feedback (good or bad) + Thank You --------- From noreply at buildbot.pypy.org Fri Jan 30 01:51:26 2015 From: noreply at buildbot.pypy.org (rguillebert) Date: Fri, 30 Jan 2015 01:51:26 +0100 (CET) Subject: [pypy-commit] extradoc extradoc: More stuff Message-ID: <20150130005126.4FEAB1C0134@cobra.cs.uni-duesseldorf.de> Author: Romain Guillebert Branch: extradoc Changeset: r5493:6304fdfd89fb Date: 2015-01-30 01:51 +0100 http://bitbucket.org/pypy/extradoc/changeset/6304fdfd89fb/ Log: More stuff diff --git a/talk/fosdem2015/talk.pdf b/talk/fosdem2015/talk.pdf index d82c099a9e525d280f1eb9a6182730411a0a206d..d6bd101dba4bda4ec0c1ac9ea2c519ecd0b8d916 GIT binary patch [cut] diff --git a/talk/fosdem2015/talk.rst b/talk/fosdem2015/talk.rst --- a/talk/fosdem2015/talk.rst +++ b/talk/fosdem2015/talk.rst @@ -49,7 +49,7 @@ * Javascript is pretty fast -* PHP is fast... +* Even PHP is fast these days... Current situation (3/3) ----------------------- @@ -82,6 +82,10 @@ C APIs in other languages ------------------------- +* JNI / V8 + +* Lua / Julia + Can we implement a similar API ? -------------------------------- @@ -89,29 +93,58 @@ * Not that many changes to the C API are required -* It's even possible to do it in pure Python with CFFI +* It's even possible to have a C API written in pure Python with CFFI * Designing it to make everyone happy is harder than to actually implement it * Making people port their extensions is hard -* CPython would need to keep both APIs implement, at least for a while +* CPython would need to keep both APIs, at least for a while Where does PyPy fit in this ? ----------------------------- * The most flexible implementation -* Already fast +* RPython -* Can already interact with C code easily +The Jit +------- -* PyPy-STM +* speed.pypy.org -What about short term ? ------------------------ +* 6.9 times faster than CPython on our benchmarks -* PyMetabiosis +* Competes with other fast dynamic languages + +CFFI +---- + +* Interacting with C code is very important to the Python community + +* CFFI allows you to call C code and expose Python functions to C + +* Very fast on PyPy + +* As powerful as the C API + +STM +--- + +* Removing the GIL + +* Without having to deal with threads and locks + +* Still allows you to share memory between threads + +Short term C extension support +------------------------------ + +* We can bridge PyPy and CPython and let CPython deal with C extensions + +* PyMetabiosis demo + +* This will allow us to bring the entire scientific stack in a very short amount of time Thank you --------- From noreply at buildbot.pypy.org Fri Jan 30 10:34:26 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 30 Jan 2015 10:34:26 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: Attempt to regroup the stm functions in a single built-in module Message-ID: <20150130093426.114F41C0DC0@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r75584:708ba48c7890 Date: 2015-01-30 10:33 +0100 http://bitbucket.org/pypy/pypy/changeset/708ba48c7890/ Log: Attempt to regroup the stm functions in a single built-in module called 'pypystm'. Also kill 'lib_pypy/atomic.py'. diff --git a/lib_pypy/atomic.py b/lib_pypy/atomic.py deleted file mode 100644 --- a/lib_pypy/atomic.py +++ /dev/null @@ -1,24 +0,0 @@ -""" -API for accessing the multithreading extensions of PyPy -""" -import thread - -try: - from __pypy__ import thread as _thread - from __pypy__.thread import (atomic, getsegmentlimit, - hint_commit_soon, is_atomic) -except ImportError: - # Not a STM-enabled PyPy. We can still provide a version of 'atomic' - # that is good enough for our purposes. With this limited version, - # an atomic block in thread X will not prevent running thread Y, if - # thread Y is not within an atomic block at all. - atomic = thread.allocate_lock() - - def getsegmentlimit(): - return 1 - - def hint_commit_soon(): - pass - - def is_atomic(): - return atomic.locked() diff --git a/lib_pypy/transaction.py b/lib_pypy/transaction.py --- a/lib_pypy/transaction.py +++ b/lib_pypy/transaction.py @@ -15,7 +15,7 @@ import sys, thread, collections, cStringIO, linecache try: - from __pypy__.thread import atomic, is_atomic + from pypystm import atomic, is_atomic except ImportError: # Not a STM-enabled PyPy. We can use a regular lock for 'atomic', # which is good enough for our purposes. With this limited version, @@ -37,7 +37,7 @@ signals_enabled = _SignalsEnabled() try: - from __pypy__.thread import hint_commit_soon + from pypystm import hint_commit_soon except ImportError: # Not a STM-enabled PyPy. def hint_commit_soon(): @@ -102,7 +102,7 @@ def __init__(self): try: - from __pypy__.thread import getsegmentlimit + from pypystm import getsegmentlimit self.num_threads = getsegmentlimit() except ImportError: self.num_threads = 4 diff --git a/pypy/goal/targetpypystandalone.py b/pypy/goal/targetpypystandalone.py --- a/pypy/goal/targetpypystandalone.py +++ b/pypy/goal/targetpypystandalone.py @@ -226,9 +226,10 @@ # expose the following variables to ease debugging global space, entry_point - if config.translation.stm: + if config.translation.stm or config.objspace.usemodules.pypystm: + config.translation.stm = True config.translation.thread = True - config.objspace.usemodules._stm = True + config.objspace.usemodules.pypystm = True if config.objspace.allworkingmodules: from pypy.config.pypyoption import enable_allworkingmodules diff --git a/pypy/interpreter/executioncontext.py b/pypy/interpreter/executioncontext.py --- a/pypy/interpreter/executioncontext.py +++ b/pypy/interpreter/executioncontext.py @@ -34,7 +34,7 @@ self.w_profilefuncarg = None # if self.space.config.translation.stm: - from pypy.module._stm.ec import initialize_execution_context + from pypy.module.pypystm.ec import initialize_execution_context initialize_execution_context(self) self.thread_disappeared = False # might be set to True after os.fork() diff --git a/pypy/module/__pypy__/__init__.py b/pypy/module/__pypy__/__init__.py --- a/pypy/module/__pypy__/__init__.py +++ b/pypy/module/__pypy__/__init__.py @@ -29,22 +29,11 @@ class ThreadModule(MixedModule): appleveldefs = { 'signals_enabled': 'app_signal.signals_enabled', - 'atomic': 'app_atomic.atomic', - 'exclusive_atomic': 'app_atomic.exclusive_atomic', } interpleveldefs = { '_signals_enter': 'interp_signal.signals_enter', '_signals_exit': 'interp_signal.signals_exit', - '_atomic_enter': 'interp_atomic.atomic_enter', - '_exclusive_atomic_enter': 'interp_atomic.exclusive_atomic_enter', - '_atomic_exit': 'interp_atomic.atomic_exit', - 'getsegmentlimit': 'interp_atomic.getsegmentlimit', - 'hint_commit_soon': 'interp_atomic.hint_commit_soon', - 'is_atomic': 'interp_atomic.is_atomic', - 'error': 'space.fromcache(pypy.module.thread.error.Cache).w_error', } - def activate(self, space): - return self.space.config.objspace.usemodules.thread class IntOpModule(MixedModule): diff --git a/pypy/module/_stm/__init__.py b/pypy/module/pypystm/__init__.py rename from pypy/module/_stm/__init__.py rename to pypy/module/pypystm/__init__.py --- a/pypy/module/_stm/__init__.py +++ b/pypy/module/pypystm/__init__.py @@ -4,9 +4,19 @@ class Module(MixedModule): appleveldefs = { + 'atomic': 'app_atomic.atomic', + 'exclusive_atomic': 'app_atomic.exclusive_atomic', } interpleveldefs = { + '_atomic_enter': 'interp_atomic.atomic_enter', + '_exclusive_atomic_enter': 'interp_atomic.exclusive_atomic_enter', + '_atomic_exit': 'interp_atomic.atomic_exit', + 'getsegmentlimit': 'interp_atomic.getsegmentlimit', + 'hint_commit_soon': 'interp_atomic.hint_commit_soon', + 'is_atomic': 'interp_atomic.is_atomic', + 'error': 'space.fromcache(pypy.module.thread.error.Cache).w_error', + 'local': 'local.STMLocal', 'count': 'count.count', 'hashtable': 'hashtable.W_Hashtable', diff --git a/pypy/module/__pypy__/app_atomic.py b/pypy/module/pypystm/app_atomic.py rename from pypy/module/__pypy__/app_atomic.py rename to pypy/module/pypystm/app_atomic.py --- a/pypy/module/__pypy__/app_atomic.py +++ b/pypy/module/pypystm/app_atomic.py @@ -1,12 +1,12 @@ -from __pypy__ import thread +import pypystm class Atomic(object): - __enter__ = thread._atomic_enter - __exit__ = thread._atomic_exit + __enter__ = pypystm._atomic_enter + __exit__ = pypystm._atomic_exit class ExclusiveAtomic(object): - __enter__ = thread._exclusive_atomic_enter - __exit__ = thread._atomic_exit + __enter__ = pypystm._exclusive_atomic_enter + __exit__ = pypystm._atomic_exit atomic = Atomic() exclusive_atomic = ExclusiveAtomic() diff --git a/pypy/module/_stm/count.py b/pypy/module/pypystm/count.py rename from pypy/module/_stm/count.py rename to pypy/module/pypystm/count.py --- a/pypy/module/_stm/count.py +++ b/pypy/module/pypystm/count.py @@ -1,5 +1,5 @@ """ -_stm.count() +pypystm.count() """ from rpython.rlib import rstm diff --git a/pypy/module/_stm/ec.py b/pypy/module/pypystm/ec.py rename from pypy/module/_stm/ec.py rename to pypy/module/pypystm/ec.py --- a/pypy/module/_stm/ec.py +++ b/pypy/module/pypystm/ec.py @@ -20,7 +20,7 @@ """Called from ExecutionContext.__init__().""" if ec.space.config.translation.rweakref: from rpython.rlib import rweakref - from pypy.module._stm.local import STMLocal + from pypy.module.pypystm.local import STMLocal ec._thread_local_dicts = rweakref.RWeakKeyDictionary(STMLocal, W_Root) else: ec._thread_local_dicts = FakeWeakKeyDictionary() diff --git a/pypy/module/_stm/hashtable.py b/pypy/module/pypystm/hashtable.py rename from pypy/module/_stm/hashtable.py rename to pypy/module/pypystm/hashtable.py --- a/pypy/module/_stm/hashtable.py +++ b/pypy/module/pypystm/hashtable.py @@ -1,5 +1,5 @@ """ -The class _stm.hashtable, mapping integers to objects. +The class pypystm.hashtable, mapping integers to objects. """ from pypy.interpreter.baseobjspace import W_Root @@ -63,7 +63,7 @@ return space.wrap(r) W_Hashtable.typedef = TypeDef( - '_stm.hashtable', + 'pypystm.hashtable', __new__ = interp2app(W_Hashtable___new__), __getitem__ = interp2app(W_Hashtable.getitem_w), __setitem__ = interp2app(W_Hashtable.setitem_w), diff --git a/pypy/module/__pypy__/interp_atomic.py b/pypy/module/pypystm/interp_atomic.py rename from pypy/module/__pypy__/interp_atomic.py rename to pypy/module/pypystm/interp_atomic.py diff --git a/pypy/module/_stm/local.py b/pypy/module/pypystm/local.py rename from pypy/module/_stm/local.py rename to pypy/module/pypystm/local.py --- a/pypy/module/_stm/local.py +++ b/pypy/module/pypystm/local.py @@ -1,5 +1,5 @@ """ -The '_stm.local' class, used for 'thread._local' with STM. +The 'pypystm.local' class, used for 'thread._local' with STM. """ from pypy.interpreter.gateway import W_Root, interp2app @@ -10,7 +10,7 @@ def _fill_untranslated(ec): if not we_are_translated() and not hasattr(ec, '_thread_local_dicts'): - from pypy.module._stm.ec import initialize_execution_context + from pypy.module.pypystm.ec import initialize_execution_context initialize_execution_context(ec) @@ -67,7 +67,7 @@ # No arguments allowed pass -STMLocal.typedef = TypeDef("_stm.local", +STMLocal.typedef = TypeDef("pypystm.local", __doc__ = "Thread-local data", __new__ = interp2app(STMLocal.descr_local__new__.im_func), __init__ = interp2app(STMLocal.descr_local__init__), diff --git a/pypy/module/_stm/test/__init__.py b/pypy/module/pypystm/test/__init__.py rename from pypy/module/_stm/test/__init__.py rename to pypy/module/pypystm/test/__init__.py diff --git a/pypy/module/__pypy__/test/test_atomic.py b/pypy/module/pypystm/test/test_atomic.py rename from pypy/module/__pypy__/test/test_atomic.py rename to pypy/module/pypystm/test/test_atomic.py --- a/pypy/module/__pypy__/test/test_atomic.py +++ b/pypy/module/pypystm/test/test_atomic.py @@ -1,15 +1,13 @@ -from __future__ import with_statement -from pypy.module.thread.test.support import GenericTestThread -from rpython.rtyper.lltypesystem import rffi -class AppTestAtomic(GenericTestThread): +class AppTestAtomic: + spaceconfig = dict(usemodules=['pypystm', 'thread']) def test_simple(self): - from __pypy__ import thread - for atomic in thread.atomic, thread.exclusive_atomic: + import pypystm + for atomic in pypystm.atomic, pypystm.exclusive_atomic: with atomic: - assert thread.is_atomic() + assert pypystm.is_atomic() try: with atomic: raise ValueError @@ -17,40 +15,40 @@ pass def test_nest_composable_atomic(self): - from __pypy__ import thread - with thread.atomic: - with thread.atomic: - assert thread.is_atomic() - assert thread.is_atomic() - assert not thread.is_atomic() + import pypystm + with pypystm.atomic: + with pypystm.atomic: + assert pypystm.is_atomic() + assert pypystm.is_atomic() + assert not pypystm.is_atomic() def test_nest_composable_below_exclusive(self): - from __pypy__ import thread - with thread.exclusive_atomic: - with thread.atomic: - with thread.atomic: - assert thread.is_atomic() - assert thread.is_atomic() - assert thread.is_atomic() - assert not thread.is_atomic() + import pypystm + with pypystm.exclusive_atomic: + with pypystm.atomic: + with pypystm.atomic: + assert pypystm.is_atomic() + assert pypystm.is_atomic() + assert pypystm.is_atomic() + assert not pypystm.is_atomic() def test_nest_exclusive_fails(self): - from __pypy__ import thread + import pypystm try: - with thread.exclusive_atomic: - with thread.exclusive_atomic: - assert thread.is_atomic() - except thread.error, e: - assert not thread.is_atomic() + with pypystm.exclusive_atomic: + with pypystm.exclusive_atomic: + assert pypystm.is_atomic() + except pypystm.error, e: + assert not pypystm.is_atomic() assert e.message == "exclusive_atomic block can't be entered inside another atomic block" def test_nest_exclusive_fails2(self): - from __pypy__ import thread + import pypystm try: - with thread.atomic: - with thread.exclusive_atomic: - assert thread.is_atomic() - assert thread.is_atomic() - except thread.error, e: - assert not thread.is_atomic() + with pypystm.atomic: + with pypystm.exclusive_atomic: + assert pypystm.is_atomic() + assert pypystm.is_atomic() + except pypystm.error, e: + assert not pypystm.is_atomic() assert e.message == "exclusive_atomic block can't be entered inside another atomic block" diff --git a/pypy/module/_stm/test/test_count.py b/pypy/module/pypystm/test/test_count.py rename from pypy/module/_stm/test/test_count.py rename to pypy/module/pypystm/test/test_count.py --- a/pypy/module/_stm/test/test_count.py +++ b/pypy/module/pypystm/test/test_count.py @@ -1,10 +1,10 @@ class AppTestCount: - spaceconfig = dict(usemodules=['_stm']) + spaceconfig = dict(usemodules=['pypystm']) def test_count(self): - import _stm - x = _stm.count() - y = _stm.count() + import pypystm + x = pypystm.count() + y = pypystm.count() assert y == x + 1 diff --git a/pypy/module/_stm/test/test_hashtable.py b/pypy/module/pypystm/test/test_hashtable.py rename from pypy/module/_stm/test/test_hashtable.py rename to pypy/module/pypystm/test/test_hashtable.py --- a/pypy/module/_stm/test/test_hashtable.py +++ b/pypy/module/pypystm/test/test_hashtable.py @@ -1,11 +1,11 @@ class AppTestHashtable: - spaceconfig = dict(usemodules=['_stm']) + spaceconfig = dict(usemodules=['pypystm']) def test_simple(self): - import _stm - h = _stm.hashtable() + import pypystm + h = pypystm.hashtable() h[42+65536] = "bar" raises(KeyError, "h[42]") h[42] = "foo" @@ -18,8 +18,8 @@ raises(KeyError, "del h[42]") def test_get_setdefault(self): - import _stm - h = _stm.hashtable() + import pypystm + h = pypystm.hashtable() assert h.get(42) is None assert h.get(-43, None) is None assert h.get(44, 81) == 81 diff --git a/pypy/module/_stm/test/test_local.py b/pypy/module/pypystm/test/test_local.py rename from pypy/module/_stm/test/test_local.py rename to pypy/module/pypystm/test/test_local.py --- a/pypy/module/_stm/test/test_local.py +++ b/pypy/module/pypystm/test/test_local.py @@ -3,11 +3,11 @@ class AppTestSTMLocal(test_local.AppTestLocal): spaceconfig = test_local.AppTestLocal.spaceconfig.copy() - spaceconfig['usemodules'] += ('_stm',) + spaceconfig['usemodules'] += ('pypystm',) def setup_class(cls): test_local.AppTestLocal.setup_class.im_func(cls) cls.w__local = cls.space.appexec([], """(): - import _stm - return _stm.local + import pypystm + return pypystm.local """) diff --git a/pypy/module/_stm/test/test_time.py b/pypy/module/pypystm/test/test_time.py rename from pypy/module/_stm/test/test_time.py rename to pypy/module/pypystm/test/test_time.py --- a/pypy/module/_stm/test/test_time.py +++ b/pypy/module/pypystm/test/test_time.py @@ -1,13 +1,13 @@ class AppTestHashtable: - spaceconfig = dict(usemodules=['_stm']) + spaceconfig = dict(usemodules=['pypystm']) def test_simple(self): - import _stm - t1 = _stm.time() - t2 = _stm.time() + import pypystm + t1 = pypystm.time() + t2 = pypystm.time() assert t1 < t2 < t1 + 1 - t1 = _stm.clock() - t2 = _stm.clock() + t1 = pypystm.clock() + t2 = pypystm.clock() assert t1 < t2 < t1 + 1 diff --git a/pypy/module/_stm/threadlocals.py b/pypy/module/pypystm/threadlocals.py rename from pypy/module/_stm/threadlocals.py rename to pypy/module/pypystm/threadlocals.py diff --git a/pypy/module/_stm/time.py b/pypy/module/pypystm/time.py rename from pypy/module/_stm/time.py rename to pypy/module/pypystm/time.py --- a/pypy/module/_stm/time.py +++ b/pypy/module/pypystm/time.py @@ -1,5 +1,5 @@ """ -_stm.time(), _stm.clock() +pypystm.time(), pypystm.clock() """ from rpython.rtyper.lltypesystem import lltype, rffi @@ -43,7 +43,7 @@ """Similar to time.time(), but works without conflict. The drawback is that the returned times may appear out of order: this thread's transaction may commit before or after another thread's, -while _stm.time() called by both may return results in the opposite +while pypystm.time() called by both may return results in the opposite order (or even exactly equal results if you are unlucky).""" return space.wrap(pypy_clock_get_time()) @@ -51,6 +51,6 @@ """Similar to time.clock(), but works without conflict. The drawback is that the returned times may appear out of order: this thread's transaction may commit before or after another thread's, -while _stm.time() called by both may return results in the opposite +while pypystm.time() called by both may return results in the opposite order (or even exactly equal results if you are unlucky).""" return space.wrap(pypy_clock_get_clock()) diff --git a/pypy/module/thread/stm.py b/pypy/module/thread/stm.py --- a/pypy/module/thread/stm.py +++ b/pypy/module/thread/stm.py @@ -1,8 +1,8 @@ """ -Redirect some classes from pypy.module._stm. +Redirect some classes from pypy.module.pypystm. """ -from pypy.module._stm import threadlocals, local +from pypy.module.pypystm import threadlocals, local STMThreadLocals = threadlocals.STMThreadLocals STMLocal = local.STMLocal From noreply at buildbot.pypy.org Fri Jan 30 11:15:38 2015 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 30 Jan 2015 11:15:38 +0100 (CET) Subject: [pypy-commit] pypy default: test the same way we build after translate Message-ID: <20150130101538.5FF2E1C0134@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: Changeset: r75585:fa382e9b1c95 Date: 2015-01-30 12:07 +0200 http://bitbucket.org/pypy/pypy/changeset/fa382e9b1c95/ Log: test the same way we build after translate diff --git a/rpython/translator/c/test/test_standalone.py b/rpython/translator/c/test/test_standalone.py --- a/rpython/translator/c/test/test_standalone.py +++ b/rpython/translator/c/test/test_standalone.py @@ -808,12 +808,7 @@ t, cbuilder = self.compile(entry_point, shared=True) assert cbuilder.shared_library_name is not None assert cbuilder.shared_library_name != cbuilder.executable_name - if os.name == 'posix': - library_path = cbuilder.shared_library_name.dirpath() - if sys.platform == 'darwin': - monkeypatch.setenv('DYLD_LIBRARY_PATH', library_path) - else: - monkeypatch.setenv('LD_LIBRARY_PATH', library_path) + #Do not set LD_LIBRARY_PATH, make sure $ORIGIN flag is working out, err = cbuilder.cmdexec("a b") assert out == "3" From noreply at buildbot.pypy.org Fri Jan 30 12:43:07 2015 From: noreply at buildbot.pypy.org (rguillebert) Date: Fri, 30 Jan 2015 12:43:07 +0100 (CET) Subject: [pypy-commit] extradoc extradoc: Almost finished Message-ID: <20150130114307.362381C0FCB@cobra.cs.uni-duesseldorf.de> Author: Romain Guillebert Branch: extradoc Changeset: r5494:aa114ba3adbb Date: 2015-01-30 12:43 +0100 http://bitbucket.org/pypy/extradoc/changeset/aa114ba3adbb/ Log: Almost finished diff --git a/talk/fosdem2015/talk.pdf b/talk/fosdem2015/talk.pdf index d6bd101dba4bda4ec0c1ac9ea2c519ecd0b8d916..9910de377ab5fdf9a9af5f634ed4b9cbd35c0954 GIT binary patch [cut] diff --git a/talk/fosdem2015/talk.rst b/talk/fosdem2015/talk.rst --- a/talk/fosdem2015/talk.rst +++ b/talk/fosdem2015/talk.rst @@ -117,6 +117,8 @@ * Competes with other fast dynamic languages +* Pay the cost of what you use + CFFI ---- From noreply at buildbot.pypy.org Fri Jan 30 13:42:50 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 30 Jan 2015 13:42:50 +0100 (CET) Subject: [pypy-commit] pypy release-2.5.x: typo Message-ID: <20150130124250.1772C1C08E1@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: release-2.5.x Changeset: r75586:52462cc62df5 Date: 2015-01-30 13:42 +0100 http://bitbucket.org/pypy/pypy/changeset/52462cc62df5/ Log: typo diff --git a/pypy/doc/project-ideas.rst b/pypy/doc/project-ideas.rst --- a/pypy/doc/project-ideas.rst +++ b/pypy/doc/project-ideas.rst @@ -193,7 +193,7 @@ extensions. ====================================== -Make more python modules pypy-freindly +Make more python modules pypy-friendly ====================================== Work has been started on a few popular python packages. Here is a partial From noreply at buildbot.pypy.org Fri Jan 30 16:48:54 2015 From: noreply at buildbot.pypy.org (rguillebert) Date: Fri, 30 Jan 2015 16:48:54 +0100 (CET) Subject: [pypy-commit] extradoc extradoc: Add a conclusion Message-ID: <20150130154854.CEED41C0FD2@cobra.cs.uni-duesseldorf.de> Author: Romain Guillebert Branch: extradoc Changeset: r5495:40c8ae88cd7e Date: 2015-01-30 16:49 +0100 http://bitbucket.org/pypy/extradoc/changeset/40c8ae88cd7e/ Log: Add a conclusion diff --git a/talk/fosdem2015/talk.pdf b/talk/fosdem2015/talk.pdf index 9910de377ab5fdf9a9af5f634ed4b9cbd35c0954..e1d849071a9ecfed8044921f817cc8476db0d0a6 GIT binary patch [cut] diff --git a/talk/fosdem2015/talk.rst b/talk/fosdem2015/talk.rst --- a/talk/fosdem2015/talk.rst +++ b/talk/fosdem2015/talk.rst @@ -64,7 +64,7 @@ |pause| -* More competition between implementations would benefit us +* More competition between implementations would benefit everybody Why can't other implementations implement the C API --------------------------------------------------- @@ -146,7 +146,18 @@ * PyMetabiosis demo -* This will allow us to bring the entire scientific stack in a very short amount of time +* This should give PyPy another way to interact with CPython C extensions, better suited for bringing e.g. the entire scientific stack in + +Summary +------- + +* We can do better + +* PyPy is working on getting even better + +* Making an alternative implementation friendly ecosystem is quite hard + +* But rewarding Thank you --------- From noreply at buildbot.pypy.org Fri Jan 30 17:51:09 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 30 Jan 2015 17:51:09 +0100 (CET) Subject: [pypy-commit] pypy.org extradoc: update the values Message-ID: <20150130165109.6DC371C0FCB@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r563:895c01646e17 Date: 2015-01-30 17:51 +0100 http://bitbucket.org/pypy/pypy.org/changeset/895c01646e17/ Log: update the values diff --git a/don1.html b/don1.html --- a/don1.html +++ b/don1.html @@ -9,13 +9,13 @@ - $58609 of $105000 (55.8%) + $58667 of $105000 (55.9%)
diff --git a/don4.html b/don4.html --- a/don4.html +++ b/don4.html @@ -17,7 +17,7 @@ 2nd call: - $21731 of $80000 (27.2%) + $21741 of $80000 (27.2%)
From noreply at buildbot.pypy.org Fri Jan 30 18:08:05 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 30 Jan 2015 18:08:05 +0100 (CET) Subject: [pypy-commit] pypy default: (based on a suggestion by mjacob) Message-ID: <20150130170805.361031C0FDA@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r75587:e207c2d0f728 Date: 2015-01-30 18:07 +0100 http://bitbucket.org/pypy/pypy/changeset/e207c2d0f728/ Log: (based on a suggestion by mjacob) If an RPython class says "foo = None" at class-level, and "foo" is typed as something that turns into a GC Ptr, then don't put the "x.foo = NULL" after the malloc. Whatever the GC we use, the malloc must initialize such fields to NULL already. diff --git a/rpython/rtyper/rclass.py b/rpython/rtyper/rclass.py --- a/rpython/rtyper/rclass.py +++ b/rpython/rtyper/rclass.py @@ -711,8 +711,15 @@ continue value = self.classdef.classdesc.read_attribute(fldname, None) if value is not None: - cvalue = inputconst(r.lowleveltype, - r.convert_desc_or_const(value)) + ll_value = r.convert_desc_or_const(value) + # don't write NULL GC pointers: we know that the malloc + # done above initialized at least the GC Ptr fields to + # NULL already, and that's true for all our GCs + if (isinstance(r.lowleveltype, Ptr) and + r.lowleveltype.TO._gckind == 'gc' and + not ll_value): + continue + cvalue = inputconst(r.lowleveltype, ll_value) self.setfield(vptr, fldname, cvalue, llops, flags={'access_directly': True}) return vptr From noreply at buildbot.pypy.org Fri Jan 30 19:10:45 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 30 Jan 2015 19:10:45 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: Check that we don't try to compile with '--stm' on a 32-bit platform Message-ID: <20150130181045.918BD1C00BF@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r75588:a839ba4ff6e2 Date: 2015-01-30 19:10 +0100 http://bitbucket.org/pypy/pypy/changeset/a839ba4ff6e2/ Log: Check that we don't try to compile with '--stm' on a 32-bit platform or on non-linux diff --git a/rpython/config/translationoption.py b/rpython/config/translationoption.py --- a/rpython/config/translationoption.py +++ b/rpython/config/translationoption.py @@ -26,6 +26,7 @@ ROOTFINDERS = ["n/a", "shadowstack", "asmgcc"] IS_64_BITS = sys.maxint > 2147483647 +SUPPORT_STM = IS_64_BITS and sys.platform.startswith("linux") SUPPORT__THREAD = ( # whether the particular C compiler supports __thread sys.platform.startswith("linux")) # Linux works @@ -118,7 +119,8 @@ suggests=[("translation.gc", "stmgc")], # Boehm works too requires=[("translation.thread", True), ("translation.continuation", False), # XXX for now - ]), + ] + ([("'--stm requires 64-bit Linux!'", None)] + if not SUPPORT_STM else [])), BoolOption("sandbox", "Produce a fully-sandboxed executable", default=False, cmdline="--sandbox", requires=[("translation.thread", False)], From noreply at buildbot.pypy.org Fri Jan 30 20:29:52 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 30 Jan 2015 20:29:52 +0100 (CET) Subject: [pypy-commit] pypy default: typo Message-ID: <20150130192952.6CBF31C00BF@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r75589:dfd917c9f15c Date: 2015-01-30 20:29 +0100 http://bitbucket.org/pypy/pypy/changeset/dfd917c9f15c/ Log: typo diff --git a/pypy/module/pypyjit/interp_jit.py b/pypy/module/pypyjit/interp_jit.py --- a/pypy/module/pypyjit/interp_jit.py +++ b/pypy/module/pypyjit/interp_jit.py @@ -173,7 +173,7 @@ exiting (blackhole) steps, but just not from the final assembler. Note that the return value of the callable is ignored, because -there is no reasonable way to guess what it sound be in case the +there is no reasonable way to guess what it should be in case the function is not called. This is meant to be used notably in sys.settrace() for coverage- From noreply at buildbot.pypy.org Fri Jan 30 21:49:56 2015 From: noreply at buildbot.pypy.org (rlamy) Date: Fri, 30 Jan 2015 21:49:56 +0100 (CET) Subject: [pypy-commit] pypy default: Fix FreeBSD compile flags issues Message-ID: <20150130204956.891DB1C00BF@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: Changeset: r75590:9e7b2bbd471c Date: 2015-01-30 20:49 +0000 http://bitbucket.org/pypy/pypy/changeset/9e7b2bbd471c/ Log: Fix FreeBSD compile flags issues diff --git a/rpython/translator/platform/freebsd.py b/rpython/translator/platform/freebsd.py --- a/rpython/translator/platform/freebsd.py +++ b/rpython/translator/platform/freebsd.py @@ -12,6 +12,7 @@ cflags = tuple( ['-O3', '-pthread', '-fomit-frame-pointer'] + os.environ.get('CFLAGS', '').split()) + rpath_flags = ['-Wl,-rpath=\'$$ORIGIN/\'', '-Wl,-z,origin'] class Freebsd_64(Freebsd): shared_only = ('-fPIC',) diff --git a/rpython/translator/platform/posix.py b/rpython/translator/platform/posix.py --- a/rpython/translator/platform/posix.py +++ b/rpython/translator/platform/posix.py @@ -112,9 +112,9 @@ target_name = exe_name.basename if shared: - cflags = self.cflags + self.get_shared_only_compile_flags() + cflags = tuple(self.cflags) + self.get_shared_only_compile_flags() else: - cflags = self.cflags + self.standalone_only + cflags = tuple(self.cflags) + tuple(self.standalone_only) m = GnuMakefile(path) m.exe_name = path.join(exe_name.basename) From noreply at buildbot.pypy.org Sat Jan 31 00:45:10 2015 From: noreply at buildbot.pypy.org (amauryfa) Date: Sat, 31 Jan 2015 00:45:10 +0100 (CET) Subject: [pypy-commit] pypy stdlib-2.7.9: add SSLSocket.context property Message-ID: <20150130234510.DC6601C0FAB@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: stdlib-2.7.9 Changeset: r75591:2a8d2a10cc88 Date: 2015-01-26 09:03 +0100 http://bitbucket.org/pypy/pypy/changeset/2a8d2a10cc88/ Log: add SSLSocket.context property diff --git a/pypy/module/_ssl/interp_ssl.py b/pypy/module/_ssl/interp_ssl.py --- a/pypy/module/_ssl/interp_ssl.py +++ b/pypy/module/_ssl/interp_ssl.py @@ -9,7 +9,7 @@ from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.error import OperationError, oefmt, wrap_oserror from pypy.interpreter.gateway import interp2app, unwrap_spec -from pypy.interpreter.typedef import TypeDef, GetSetProperty +from pypy.interpreter.typedef import TypeDef, GetSetProperty, interp_attrproperty_w from pypy.module._ssl.ssl_data import ( LIBRARY_CODES_TO_NAMES, ERROR_CODES_TO_NAMES) from pypy.module._socket import interp_socket @@ -211,17 +211,17 @@ class _SSLSocket(W_Root): @staticmethod - def descr_new(space, sslctx, w_sock, socket_type, hostname, w_ssl_sock): + def descr_new(space, w_ctx, w_sock, socket_type, hostname, w_ssl_sock): self = _SSLSocket() self.space = space - self.ctx = sslctx + self.w_ctx = w_ctx self.peer_cert = lltype.nullptr(X509.TO) self.shutdown_seen_zero = False self.handshake_done = False sock_fd = space.int_w(space.call_method(w_sock, "fileno")) - self.ssl = libssl_SSL_new(sslctx.ctx) # new ssl struct + self.ssl = libssl_SSL_new(w_ctx.ctx) # new ssl struct libssl_SSL_set_fd(self.ssl, sock_fd) # set the socket for SSL # The ACCEPT_MOVING_WRITE_BUFFER flag is necessary because the address # of a str object may be changed by the garbage collector. @@ -606,6 +606,7 @@ compression = interp2app(_SSLSocket.compression_w), version = interp2app(_SSLSocket.version_w), tls_unique_cb = interp2app(_SSLSocket.tls_unique_cb_w), + context=interp_attrproperty_w("w_ctx", _SSLSocket), ) diff --git a/pypy/module/_ssl/test/test_ssl.py b/pypy/module/_ssl/test/test_ssl.py --- a/pypy/module/_ssl/test/test_ssl.py +++ b/pypy/module/_ssl/test/test_ssl.py @@ -73,7 +73,9 @@ if sys.version_info < (2, 7, 9): ss = _ssl.sslwrap(s, 0) else: - ss = _ssl._SSLContext(_ssl.PROTOCOL_TLSv1)._wrap_socket(s, 0) + ctx = _ssl._SSLContext(_ssl.PROTOCOL_TLSv1) + ss = ctx._wrap_socket(s, 0) + assert ss.context is ctx exc = raises(_socket.error, ss.do_handshake) if sys.platform == 'win32': assert exc.value.errno == 10057 # WSAENOTCONN From noreply at buildbot.pypy.org Sat Jan 31 00:45:12 2015 From: noreply at buildbot.pypy.org (amauryfa) Date: Sat, 31 Jan 2015 00:45:12 +0100 (CET) Subject: [pypy-commit] pypy stdlib-2.7.9: Fix ssl.context setter Message-ID: <20150130234512.13F9E1C0FAB@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: stdlib-2.7.9 Changeset: r75592:1e72ec33c3c4 Date: 2015-01-26 22:45 +0100 http://bitbucket.org/pypy/pypy/changeset/1e72ec33c3c4/ Log: Fix ssl.context setter diff --git a/pypy/module/_ssl/interp_ssl.py b/pypy/module/_ssl/interp_ssl.py --- a/pypy/module/_ssl/interp_ssl.py +++ b/pypy/module/_ssl/interp_ssl.py @@ -9,7 +9,7 @@ from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.error import OperationError, oefmt, wrap_oserror from pypy.interpreter.gateway import interp2app, unwrap_spec -from pypy.interpreter.typedef import TypeDef, GetSetProperty, interp_attrproperty_w +from pypy.interpreter.typedef import TypeDef, GetSetProperty from pypy.module._ssl.ssl_data import ( LIBRARY_CODES_TO_NAMES, ERROR_CODES_TO_NAMES) from pypy.module._socket import interp_socket @@ -591,6 +591,18 @@ if length > 0: return space.wrap(rffi.charpsize2str(buf, intmask(length))) + def descr_get_context(self, space): + return self.w_ctx + + def descr_set_context(self, space, w_ctx): + ctx = space.interp_w(_SSLContext, w_ctx) + if not HAS_SNI: + raise oefmt(space.w_NotImplementedError, + "setting a socket's context " + "is not supported by your OpenSSL library") + self.w_ctx = w_ctx + libssl_SSL_set_SSL_CTX(self.ssl, ctx.ctx) + _SSLSocket.typedef = TypeDef( "_ssl._SSLSocket", @@ -606,7 +618,8 @@ compression = interp2app(_SSLSocket.compression_w), version = interp2app(_SSLSocket.version_w), tls_unique_cb = interp2app(_SSLSocket.tls_unique_cb_w), - context=interp_attrproperty_w("w_ctx", _SSLSocket), + context=GetSetProperty(_SSLSocket.descr_get_context, + _SSLSocket.descr_set_context), ) diff --git a/rpython/rlib/ropenssl.py b/rpython/rlib/ropenssl.py --- a/rpython/rlib/ropenssl.py +++ b/rpython/rlib/ropenssl.py @@ -240,6 +240,7 @@ ssl_external('RAND_egd', [rffi.CCHARP], rffi.INT) ssl_external('SSL_CTX_new', [SSL_METHOD], SSL_CTX) ssl_external('SSL_get_SSL_CTX', [SSL], SSL_CTX) +ssl_external('SSL_set_SSL_CTX', [SSL, SSL_CTX], SSL_CTX) ssl_external('TLSv1_method', [], SSL_METHOD) ssl_external('SSLv2_method', [], SSL_METHOD) ssl_external('SSLv3_method', [], SSL_METHOD) From noreply at buildbot.pypy.org Sat Jan 31 00:45:13 2015 From: noreply at buildbot.pypy.org (amauryfa) Date: Sat, 31 Jan 2015 00:45:13 +0100 (CET) Subject: [pypy-commit] pypy stdlib-2.7.9: Add password parameter to ctx.load_cert_chain() Message-ID: <20150130234513.3D99C1C0FAB@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: stdlib-2.7.9 Changeset: r75593:ee52b4559ba7 Date: 2015-01-31 00:08 +0100 http://bitbucket.org/pypy/pypy/changeset/ee52b4559ba7/ Log: Add password parameter to ctx.load_cert_chain() diff --git a/pypy/module/_ssl/interp_ssl.py b/pypy/module/_ssl/interp_ssl.py --- a/pypy/module/_ssl/interp_ssl.py +++ b/pypy/module/_ssl/interp_ssl.py @@ -1,4 +1,4 @@ -from rpython.rlib import rpoll, rsocket +from rpython.rlib import rpoll, rsocket, rthread from rpython.rlib.rarithmetic import intmask, widen, r_uint from rpython.rlib.ropenssl import * from rpython.rlib.rposix import get_errno, set_errno @@ -999,6 +999,39 @@ libssl_BIO_free(cert) +# Data structure for the password callbacks +class PasswordInfo(object): + w_callable = None + password = None + operationerror = None +PWINFO_STORAGE = {} + +def _password_callback(buf, size, rwflag, userdata): + index = rffi.cast(lltype.Signed, userdata) + pw_info = PWINFO_STORAGE.get(index, None) + if not pw_info: + return rffi.cast(rffi.INT, -1) + space = pw_info.space + password = "" + if pw_info.w_callable: + try: + password = pw_info.space.str_w( + space.call_function(pw_info.w_callable)) + except OperationError as e: + pw_info.operationerror = e + return rffi.cast(rffi.INT, -1) + else: + password = pw_info.password + size = widen(size) + if len(password) > size: + pw_info.operationerror = oefmt( + space.w_ValueError, + "password cannot be longer than %d bytes", size) + return rffi.cast(rffi.INT, -1) + for i, c in enumerate(password): + buf[i] = c + return rffi.cast(rffi.INT, len(password)) + class _SSLContext(W_Root): @staticmethod @unwrap_spec(protocol=int) @@ -1101,7 +1134,8 @@ "CERT_OPTIONAL or CERT_REQUIRED") self.check_hostname = check_hostname - def load_cert_chain_w(self, space, w_certfile, w_keyfile=None): + def load_cert_chain_w(self, space, w_certfile, w_keyfile=None, + w_password=None): if space.is_none(w_certfile): certfile = None else: @@ -1110,33 +1144,63 @@ keyfile = certfile else: keyfile = space.str_w(w_keyfile) + pw_info = PasswordInfo() + pw_info.space = space + index = -1 + if not space.is_none(w_password): + index = rthread.get_ident() + PWINFO_STORAGE[index] = pw_info - set_errno(0) + if space.is_true(space.callable(w_password)): + pw_info.w_callable = w_password + else: + pw_info.password = space.str_w(w_password) - ret = libssl_SSL_CTX_use_certificate_chain_file(self.ctx, certfile) - if ret != 1: - errno = get_errno() - if errno: - libssl_ERR_clear_error() - raise wrap_oserror(space, OSError(errno, ''), - exception_name = 'w_IOError') - else: + libssl_SSL_CTX_set_default_passwd_cb( + self.ctx, _password_callback) + libssl_SSL_CTX_set_default_passwd_cb_userdata( + self.ctx, rffi.cast(rffi.VOIDP, index)) + + + try: + set_errno(0) + ret = libssl_SSL_CTX_use_certificate_chain_file(self.ctx, certfile) + if ret != 1: + if pw_info.operationerror: + libssl_ERR_clear_error() + raise pw_info.operationerror + errno = get_errno() + if errno: + libssl_ERR_clear_error() + raise wrap_oserror(space, OSError(errno, ''), + exception_name = 'w_IOError') + else: + raise _ssl_seterror(space, None, -1) + + ret = libssl_SSL_CTX_use_PrivateKey_file(self.ctx, keyfile, + SSL_FILETYPE_PEM) + if ret != 1: + if pw_info.operationerror: + libssl_ERR_clear_error() + raise pw_info.operationerror + errno = get_errno() + if errno: + libssl_ERR_clear_error() + raise wrap_oserror(space, OSError(errno, ''), + exception_name = 'w_IOError') + else: + raise _ssl_seterror(space, None, -1) + + ret = libssl_SSL_CTX_check_private_key(self.ctx) + if ret != 1: raise _ssl_seterror(space, None, -1) - - ret = libssl_SSL_CTX_use_PrivateKey_file(self.ctx, keyfile, - SSL_FILETYPE_PEM) - if ret != 1: - errno = get_errno() - if errno: - libssl_ERR_clear_error() - raise wrap_oserror(space, OSError(errno, ''), - exception_name = 'w_IOError') - else: - raise _ssl_seterror(space, None, -1) - - ret = libssl_SSL_CTX_check_private_key(self.ctx) - if ret != 1: - raise _ssl_seterror(space, None, -1) + finally: + if index >= 0: + del PWINFO_STORAGE[index] + libssl_SSL_CTX_set_default_passwd_cb( + self.ctx, lltype.nullptr(pem_password_cb.TO)) + libssl_SSL_CTX_set_default_passwd_cb_userdata( + self.ctx, None) @unwrap_spec(filepath=str) def load_dh_params_w(self, space, filepath): diff --git a/pypy/module/_ssl/test/test_ssl.py b/pypy/module/_ssl/test/test_ssl.py --- a/pypy/module/_ssl/test/test_ssl.py +++ b/pypy/module/_ssl/test/test_ssl.py @@ -290,6 +290,9 @@ tmpfile = udir / "emptycert.pem" tmpfile.write(SSL_EMPTYCERT) cls.w_emptycert = cls.space.wrap(str(tmpfile)) + tmpfile = udir / "cert.passwd.pem" + tmpfile.write(SSL_CERTIFICATE_PROTECTED) + cls.w_cert_protected = cls.space.wrap(str(tmpfile)) cls.w_dh512 = cls.space.wrap(os.path.join( os.path.dirname(__file__), 'dh512.pem')) @@ -301,6 +304,15 @@ raises(IOError, ctx.load_cert_chain, "inexistent.pem") raises(_ssl.SSLError, ctx.load_cert_chain, self.badcert) raises(_ssl.SSLError, ctx.load_cert_chain, self.emptycert) + # Password protected key and cert + raises(_ssl.SSLError, ctx.load_cert_chain, self.cert_protected, + password="badpass") + ctx.load_cert_chain(self.cert_protected, password="somepass") + ctx.load_cert_chain(self.cert_protected, password=lambda: "somepass") + raises(_ssl.SSLError, ctx.load_cert_chain, self.cert_protected, + password=lambda: "badpass") + raises(TypeError, ctx.load_cert_chain, self.cert_protected, + password=lambda: 3) def test_load_verify_locations(self): import _ssl @@ -452,3 +464,38 @@ -----END CERTIFICATE----- """ SSL_EMPTYCERT = "" +SSL_CERTIFICATE_PROTECTED = """ +-----BEGIN RSA PRIVATE KEY----- +Proc-Type: 4,ENCRYPTED +DEK-Info: DES-EDE3-CBC,1A8D9D2A02EC698A + +kJYbfZ8L0sfe9Oty3gw0aloNnY5E8fegRfQLZlNoxTl6jNt0nIwI8kDJ36CZgR9c +u3FDJm/KqrfUoz8vW+qEnWhSG7QPX2wWGPHd4K94Yz/FgrRzZ0DoK7XxXq9gOtVA +AVGQhnz32p+6WhfGsCr9ArXEwRZrTk/FvzEPaU5fHcoSkrNVAGX8IpSVkSDwEDQr +Gv17+cfk99UV1OCza6yKHoFkTtrC+PZU71LomBabivS2Oc4B9hYuSR2hF01wTHP+ +YlWNagZOOVtNz4oKK9x9eNQpmfQXQvPPTfusexKIbKfZrMvJoxcm1gfcZ0H/wK6P +6wmXSG35qMOOztCZNtperjs1wzEBXznyK8QmLcAJBjkfarABJX9vBEzZV0OUKhy+ +noORFwHTllphbmydLhu6ehLUZMHPhzAS5UN7srtpSN81eerDMy0RMUAwA7/PofX1 +94Me85Q8jP0PC9ETdsJcPqLzAPETEYu0ELewKRcrdyWi+tlLFrpE5KT/s5ecbl9l +7B61U4Kfd1PIXc/siINhU3A3bYK+845YyUArUOnKf1kEox7p1RpD7yFqVT04lRTo +cibNKATBusXSuBrp2G6GNuhWEOSafWCKJQAzgCYIp6ZTV2khhMUGppc/2H3CF6cO +zX0KtlPVZC7hLkB6HT8SxYUwF1zqWY7+/XPPdc37MeEZ87Q3UuZwqORLY+Z0hpgt +L5JXBCoklZhCAaN2GqwFLXtGiRSRFGY7xXIhbDTlE65Wv1WGGgDLMKGE1gOz3yAo +2jjG1+yAHJUdE69XTFHSqSkvaloA1W03LdMXZ9VuQJ/ySXCie6ABAQ== +-----END RSA PRIVATE KEY----- +-----BEGIN CERTIFICATE----- +MIICVDCCAb2gAwIBAgIJANfHOBkZr8JOMA0GCSqGSIb3DQEBBQUAMF8xCzAJBgNV +BAYTAlhZMRcwFQYDVQQHEw5DYXN0bGUgQW50aHJheDEjMCEGA1UEChMaUHl0aG9u +IFNvZnR3YXJlIEZvdW5kYXRpb24xEjAQBgNVBAMTCWxvY2FsaG9zdDAeFw0xMDEw +MDgyMzAxNTZaFw0yMDEwMDUyMzAxNTZaMF8xCzAJBgNVBAYTAlhZMRcwFQYDVQQH +Ew5DYXN0bGUgQW50aHJheDEjMCEGA1UEChMaUHl0aG9uIFNvZnR3YXJlIEZvdW5k +YXRpb24xEjAQBgNVBAMTCWxvY2FsaG9zdDCBnzANBgkqhkiG9w0BAQEFAAOBjQAw +gYkCgYEA21vT5isq7F68amYuuNpSFlKDPrMUCa4YWYqZRt2OZ+/3NKaZ2xAiSwr7 +6MrQF70t5nLbSPpqE5+5VrS58SY+g/sXLiFd6AplH1wJZwh78DofbFYXUggktFMt +pTyiX8jtP66bkcPkDADA089RI1TQR6Ca+n7HFa7c1fabVV6i3zkCAwEAAaMYMBYw +FAYDVR0RBA0wC4IJbG9jYWxob3N0MA0GCSqGSIb3DQEBBQUAA4GBAHPctQBEQ4wd +BJ6+JcpIraopLn8BGhbjNWj40mmRqWB/NAWF6M5ne7KpGAu7tLeG4hb1zLaldK8G +lxy2GPSRF6LFS48dpEj2HbMv2nvv6xxalDMJ9+DicWgAKTQ6bcX2j3GUkCR0g/T1 +CRlNBAAlvhKzO7Clpf9l0YKBEfraJByX +-----END CERTIFICATE----- +""" diff --git a/rpython/rlib/ropenssl.py b/rpython/rlib/ropenssl.py --- a/rpython/rlib/ropenssl.py +++ b/rpython/rlib/ropenssl.py @@ -261,6 +261,9 @@ ssl_external('SSL_CTX_load_verify_locations', [SSL_CTX, rffi.CCHARP, rffi.CCHARP], rffi.INT) ssl_external('SSL_CTX_check_private_key', [SSL_CTX], rffi.INT) ssl_external('SSL_CTX_set_session_id_context', [SSL_CTX, rffi.CCHARP, rffi.UINT], rffi.INT) +pem_password_cb = lltype.Ptr(lltype.FuncType([rffi.CCHARP, rffi.INT, rffi.INT, rffi.VOIDP], rffi.INT)) +ssl_external('SSL_CTX_set_default_passwd_cb', [SSL_CTX, pem_password_cb], lltype.Void) +ssl_external('SSL_CTX_set_default_passwd_cb_userdata', [SSL_CTX, rffi.VOIDP], lltype.Void) SSL_CTX_STATS_NAMES = """ number connect connect_good connect_renegotiate accept accept_good accept_renegotiate hits misses timeouts cache_full""".split() From noreply at buildbot.pypy.org Sat Jan 31 00:45:14 2015 From: noreply at buildbot.pypy.org (amauryfa) Date: Sat, 31 Jan 2015 00:45:14 +0100 (CET) Subject: [pypy-commit] pypy stdlib-2.7.9: A bad tls version now raises ValueError Message-ID: <20150130234514.6BFCA1C0FAB@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: stdlib-2.7.9 Changeset: r75594:07341897da21 Date: 2015-01-31 00:13 +0100 http://bitbucket.org/pypy/pypy/changeset/07341897da21/ Log: A bad tls version now raises ValueError diff --git a/pypy/module/_ssl/interp_ssl.py b/pypy/module/_ssl/interp_ssl.py --- a/pypy/module/_ssl/interp_ssl.py +++ b/pypy/module/_ssl/interp_ssl.py @@ -1045,7 +1045,7 @@ elif protocol == PY_SSL_VERSION_SSL23: method = libssl_SSLv23_method() else: - raise ssl_error(space, "invalid protocol version") + raise oefmt(space.w_ValueError, "invalid protocol version") ctx = libssl_SSL_CTX_new(method) if not ctx: raise ssl_error(space, "failed to allocate SSL context") diff --git a/pypy/module/_ssl/test/test_ssl.py b/pypy/module/_ssl/test/test_ssl.py --- a/pypy/module/_ssl/test/test_ssl.py +++ b/pypy/module/_ssl/test/test_ssl.py @@ -120,6 +120,7 @@ def test_context(self): import _ssl s = _ssl._SSLContext(_ssl.PROTOCOL_TLSv1) + raises(ValueError, _ssl._SSLContext, -1) assert type(s.options) is long assert s.options & _ssl.OP_NO_SSLv2 From noreply at buildbot.pypy.org Sat Jan 31 00:45:15 2015 From: noreply at buildbot.pypy.org (amauryfa) Date: Sat, 31 Jan 2015 00:45:15 +0100 (CET) Subject: [pypy-commit] pypy stdlib-2.7.9: Implement SSLContext.get_ca_certs() Message-ID: <20150130234515.A2C511C0FAB@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: stdlib-2.7.9 Changeset: r75595:d5649f3f7a3b Date: 2015-01-31 00:43 +0100 http://bitbucket.org/pypy/pypy/changeset/d5649f3f7a3b/ Log: Implement SSLContext.get_ca_certs() diff --git a/pypy/module/_ssl/interp_ssl.py b/pypy/module/_ssl/interp_ssl.py --- a/pypy/module/_ssl/interp_ssl.py +++ b/pypy/module/_ssl/interp_ssl.py @@ -525,16 +525,7 @@ if der: # return cert in DER-encoded format - with lltype.scoped_alloc(rffi.CCHARPP.TO, 1) as buf_ptr: - buf_ptr[0] = lltype.nullptr(rffi.CCHARP.TO) - length = libssl_i2d_X509(self.peer_cert, buf_ptr) - if length < 0: - raise _ssl_seterror(space, self, length) - try: - # this is actually an immutable bytes sequence - return space.wrap(rffi.charpsize2str(buf_ptr[0], length)) - finally: - libssl_OPENSSL_free(buf_ptr[0]) + return _certificate_to_der(space, self.peer_cert) else: verification = libssl_SSL_CTX_get_verify_mode( libssl_SSL_get_SSL_CTX(self.ssl)) @@ -622,37 +613,45 @@ _SSLSocket.descr_set_context), ) +def _certificate_to_der(space, certificate): + with lltype.scoped_alloc(rffi.CCHARPP.TO, 1) as buf_ptr: + buf_ptr[0] = lltype.nullptr(rffi.CCHARP.TO) + length = libssl_i2d_X509(certificate, buf_ptr) + if length < 0: + raise _ssl_seterror(space, None, 0) + try: + return space.wrap(rffi.charpsize2str(buf_ptr[0], length)) + finally: + libssl_OPENSSL_free(buf_ptr[0]) -def _decode_certificate(space, certificate, verbose=False): +def _decode_certificate(space, certificate): w_retval = space.newdict() w_peer = _create_tuple_for_X509_NAME( space, libssl_X509_get_subject_name(certificate)) space.setitem(w_retval, space.wrap("subject"), w_peer) - if verbose: - w_issuer = _create_tuple_for_X509_NAME( - space, libssl_X509_get_issuer_name(certificate)) - space.setitem(w_retval, space.wrap("issuer"), w_issuer) + w_issuer = _create_tuple_for_X509_NAME( + space, libssl_X509_get_issuer_name(certificate)) + space.setitem(w_retval, space.wrap("issuer"), w_issuer) - space.setitem(w_retval, space.wrap("version"), - space.wrap(libssl_X509_get_version(certificate))) + space.setitem(w_retval, space.wrap("version"), + space.wrap(libssl_X509_get_version(certificate))) biobuf = libssl_BIO_new(libssl_BIO_s_mem()) try: - if verbose: - libssl_BIO_reset(biobuf) - serialNumber = libssl_X509_get_serialNumber(certificate) - libssl_i2a_ASN1_INTEGER(biobuf, serialNumber) - # should not exceed 20 octets, 160 bits, so buf is big enough - with lltype.scoped_alloc(rffi.CCHARP.TO, 100) as buf: - length = libssl_BIO_gets(biobuf, buf, 99) - if length < 0: - raise _ssl_seterror(space, None, length) + libssl_BIO_reset(biobuf) + serialNumber = libssl_X509_get_serialNumber(certificate) + libssl_i2a_ASN1_INTEGER(biobuf, serialNumber) + # should not exceed 20 octets, 160 bits, so buf is big enough + with lltype.scoped_alloc(rffi.CCHARP.TO, 100) as buf: + length = libssl_BIO_gets(biobuf, buf, 99) + if length < 0: + raise _ssl_seterror(space, None, length) - w_serial = space.wrap(rffi.charpsize2str(buf, length)) - space.setitem(w_retval, space.wrap("serialNumber"), w_serial) + w_serial = space.wrap(rffi.charpsize2str(buf, length)) + space.setitem(w_retval, space.wrap("serialNumber"), w_serial) libssl_BIO_reset(biobuf) notBefore = libssl_X509_get_notBefore(certificate) @@ -977,8 +976,8 @@ return getattr(space.fromcache(Cache), name) - at unwrap_spec(filename=str, verbose=bool) -def _test_decode_cert(space, filename, verbose=True): + at unwrap_spec(filename=str) +def _test_decode_cert(space, filename): cert = libssl_BIO_new(libssl_BIO_s_file()) if not cert: raise ssl_error(space, "Can't malloc memory to read file") @@ -992,7 +991,7 @@ raise ssl_error(space, "Error decoding PEM-encoded file") try: - return _decode_certificate(space, x, verbose) + return _decode_certificate(space, x) finally: libssl_X509_free(x) finally: @@ -1352,6 +1351,27 @@ self.npn_protocols = SSLNpnProtocols(self.ctx, protos) + def get_ca_certs_w(self, space, w_binary_form=None): + if w_binary_form and space.is_true(w_binary_form): + binary_mode = True + else: + binary_mode = False + rlist = [] + store = libssl_SSL_CTX_get_cert_store(self.ctx) + for i in range(libssl_sk_X509_OBJECT_num(store[0].c_objs)): + obj = libssl_sk_X509_OBJECT_value(store[0].c_objs, i) + if intmask(obj.c_type) != X509_LU_X509: + # not a x509 cert + continue + # CA for any purpose + cert = libssl_pypy_X509_OBJECT_data_x509(obj) + if not libssl_X509_check_ca(cert): + continue + if binary_mode: + rlist.append(_certificate_to_der(space, cert)) + else: + rlist.append(_decode_certificate(space, cert)) + return space.newlist(rlist) _SSLContext.typedef = TypeDef( "_ssl._SSLContext", @@ -1364,6 +1384,7 @@ load_verify_locations=interp2app(_SSLContext.load_verify_locations_w), set_default_verify_paths=interp2app(_SSLContext.descr_set_default_verify_paths), _set_npn_protocols=interp2app(_SSLContext.set_npn_protocols_w), + get_ca_certs=interp2app(_SSLContext.get_ca_certs_w), options=GetSetProperty(_SSLContext.descr_get_options, _SSLContext.descr_set_options), diff --git a/pypy/module/_ssl/test/test_ssl.py b/pypy/module/_ssl/test/test_ssl.py --- a/pypy/module/_ssl/test/test_ssl.py +++ b/pypy/module/_ssl/test/test_ssl.py @@ -294,6 +294,9 @@ tmpfile = udir / "cert.passwd.pem" tmpfile.write(SSL_CERTIFICATE_PROTECTED) cls.w_cert_protected = cls.space.wrap(str(tmpfile)) + tmpfile = udir / "python.org.pem" + tmpfile.write(SVN_PYTHON_ORG_ROOT_CERT) + cls.w_python_org_cert = cls.space.wrap(str(tmpfile)) cls.w_dh512 = cls.space.wrap(os.path.join( os.path.dirname(__file__), 'dh512.pem')) @@ -327,6 +330,17 @@ ctx.load_verify_locations(cadata=cacert_pem) assert ctx.cert_store_stats()["x509_ca"] + def test_get_ca_certs(self): + import _ssl + ctx = _ssl._SSLContext(_ssl.PROTOCOL_TLSv1) + ctx.load_verify_locations(self.keycert) + assert ctx.get_ca_certs() == [] + ctx.load_verify_locations(self.python_org_cert) + certs = ctx.get_ca_certs() + assert len(certs) == 1 + print(certs) + assert len(certs[0]['issuer']) == 4 + def test_load_dh_params(self): import _ssl ctx = _ssl._SSLContext(_ssl.PROTOCOL_TLSv1) @@ -500,3 +514,46 @@ CRlNBAAlvhKzO7Clpf9l0YKBEfraJByX -----END CERTIFICATE----- """ +SVN_PYTHON_ORG_ROOT_CERT = """ +-----BEGIN CERTIFICATE----- +MIIHPTCCBSWgAwIBAgIBADANBgkqhkiG9w0BAQQFADB5MRAwDgYDVQQKEwdSb290 +IENBMR4wHAYDVQQLExVodHRwOi8vd3d3LmNhY2VydC5vcmcxIjAgBgNVBAMTGUNB +IENlcnQgU2lnbmluZyBBdXRob3JpdHkxITAfBgkqhkiG9w0BCQEWEnN1cHBvcnRA +Y2FjZXJ0Lm9yZzAeFw0wMzAzMzAxMjI5NDlaFw0zMzAzMjkxMjI5NDlaMHkxEDAO +BgNVBAoTB1Jvb3QgQ0ExHjAcBgNVBAsTFWh0dHA6Ly93d3cuY2FjZXJ0Lm9yZzEi +MCAGA1UEAxMZQ0EgQ2VydCBTaWduaW5nIEF1dGhvcml0eTEhMB8GCSqGSIb3DQEJ +ARYSc3VwcG9ydEBjYWNlcnQub3JnMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIIC +CgKCAgEAziLA4kZ97DYoB1CW8qAzQIxL8TtmPzHlawI229Z89vGIj053NgVBlfkJ +8BLPRoZzYLdufujAWGSuzbCtRRcMY/pnCujW0r8+55jE8Ez64AO7NV1sId6eINm6 +zWYyN3L69wj1x81YyY7nDl7qPv4coRQKFWyGhFtkZip6qUtTefWIonvuLwphK42y +fk1WpRPs6tqSnqxEQR5YYGUFZvjARL3LlPdCfgv3ZWiYUQXw8wWRBB0bF4LsyFe7 +w2t6iPGwcswlWyCR7BYCEo8y6RcYSNDHBS4CMEK4JZwFaz+qOqfrU0j36NK2B5jc +G8Y0f3/JHIJ6BVgrCFvzOKKrF11myZjXnhCLotLddJr3cQxyYN/Nb5gznZY0dj4k +epKwDpUeb+agRThHqtdB7Uq3EvbXG4OKDy7YCbZZ16oE/9KTfWgu3YtLq1i6L43q +laegw1SJpfvbi1EinbLDvhG+LJGGi5Z4rSDTii8aP8bQUWWHIbEZAWV/RRyH9XzQ +QUxPKZgh/TMfdQwEUfoZd9vUFBzugcMd9Zi3aQaRIt0AUMyBMawSB3s42mhb5ivU +fslfrejrckzzAeVLIL+aplfKkQABi6F1ITe1Yw1nPkZPcCBnzsXWWdsC4PDSy826 +YreQQejdIOQpvGQpQsgi3Hia/0PsmBsJUUtaWsJx8cTLc6nloQsCAwEAAaOCAc4w +ggHKMB0GA1UdDgQWBBQWtTIb1Mfz4OaO873SsDrusjkY0TCBowYDVR0jBIGbMIGY +gBQWtTIb1Mfz4OaO873SsDrusjkY0aF9pHsweTEQMA4GA1UEChMHUm9vdCBDQTEe +MBwGA1UECxMVaHR0cDovL3d3dy5jYWNlcnQub3JnMSIwIAYDVQQDExlDQSBDZXJ0 +IFNpZ25pbmcgQXV0aG9yaXR5MSEwHwYJKoZIhvcNAQkBFhJzdXBwb3J0QGNhY2Vy +dC5vcmeCAQAwDwYDVR0TAQH/BAUwAwEB/zAyBgNVHR8EKzApMCegJaAjhiFodHRw +czovL3d3dy5jYWNlcnQub3JnL3Jldm9rZS5jcmwwMAYJYIZIAYb4QgEEBCMWIWh0 +dHBzOi8vd3d3LmNhY2VydC5vcmcvcmV2b2tlLmNybDA0BglghkgBhvhCAQgEJxYl +aHR0cDovL3d3dy5jYWNlcnQub3JnL2luZGV4LnBocD9pZD0xMDBWBglghkgBhvhC +AQ0ESRZHVG8gZ2V0IHlvdXIgb3duIGNlcnRpZmljYXRlIGZvciBGUkVFIGhlYWQg +b3ZlciB0byBodHRwOi8vd3d3LmNhY2VydC5vcmcwDQYJKoZIhvcNAQEEBQADggIB +ACjH7pyCArpcgBLKNQodgW+JapnM8mgPf6fhjViVPr3yBsOQWqy1YPaZQwGjiHCc +nWKdpIevZ1gNMDY75q1I08t0AoZxPuIrA2jxNGJARjtT6ij0rPtmlVOKTV39O9lg +18p5aTuxZZKmxoGCXJzN600BiqXfEVWqFcofN8CCmHBh22p8lqOOLlQ+TyGpkO/c +gr/c6EWtTZBzCDyUZbAEmXZ/4rzCahWqlwQ3JNgelE5tDlG+1sSPypZt90Pf6DBl +Jzt7u0NDY8RD97LsaMzhGY4i+5jhe1o+ATc7iwiwovOVThrLm82asduycPAtStvY +sONvRUgzEv/+PDIqVPfE94rwiCPCR/5kenHA0R6mY7AHfqQv0wGP3J8rtsYIqQ+T +SCX8Ev2fQtzzxD72V7DX3WnRBnc0CkvSyqD/HMaMyRa+xMwyN2hzXwj7UfdJUzYF +CpUCTPJ5GhD22Dp1nPMd8aINcGeGG7MW9S/lpOt5hvk9C8JzC6WZrG/8Z7jlLwum +GCSNe9FINSkYQKyTYOGWhlC0elnYjyELn8+CkcY7v2vcB5G5l1YjqrZslMZIBjzk +zk6q5PYvCdxTby78dOs6Y5nCpqyJvKeyRKANihDjbPIky/qbn3BHLt4Ui9SyIAmW +omTxJBzcoTWcFbLUvFUufQb1nA5V9FrWk9p2rSVzTMVD +-----END CERTIFICATE----- +""" From noreply at buildbot.pypy.org Sat Jan 31 01:16:11 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 31 Jan 2015 01:16:11 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: Some lightweight redesign of the API, and more heavyweight redesign of the implementation Message-ID: <20150131001611.545641C0035@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r75596:8286e713d46c Date: 2015-01-31 01:15 +0100 http://bitbucket.org/pypy/pypy/changeset/8286e713d46c/ Log: Some lightweight redesign of the API, and more heavyweight redesign of the implementation into a version that should be more conflict- free diff --git a/lib_pypy/transaction.py b/lib_pypy/transaction.py --- a/lib_pypy/transaction.py +++ b/lib_pypy/transaction.py @@ -43,57 +43,58 @@ def hint_commit_soon(): return None - -def set_num_threads(num): - """Set the number of threads to use.""" - if num < 1: - raise ValueError("'num' must be at least 1, got %r" % (num,)) - if _thread_pool.in_transaction: - raise TransactionError("cannot change the number of threads " - "while running transactions") - _thread_pool.num_threads = num +try: + from pypystm import getsegmentlimit +except ImportError: + # Not a STM-enabled PyPy. + def getsegmentlimit(): + return 1 class TransactionError(Exception): pass -# XXX right now uses the same API as the old pypy-stm. This will -# be redesigned later. - def add(f, *args, **kwds): - """Register the call 'f(*args, **kwds)' as running a new - transaction. If we are currently running in a transaction too, the - new transaction will only start after the end of the current - transaction. Note that if the current transaction or another running - in the meantime raises an exception, all pending transactions are - cancelled. + """Register a new transaction that will be done by 'f(*args, **kwds)'. + Must be called within the transaction in the "with TransactionQueue()" + block, or within a transaction started by this one, directly or + indirectly. """ _thread_local.pending.append((f, args, kwds)) -def run(): - """Run the pending transactions, as well as all transactions started - by them, and so on. The order is random and undeterministic. Must - be called from the main program, i.e. not from within another - transaction. If at some point all transactions are done, returns. - If a transaction raises an exception, it propagates here; in this - case all pending transactions are cancelled. +class TransactionQueue(object): + """Use in 'with TransactionQueue():'. Creates a queue of + transactions. The first transaction in the queue is the content of + the 'with:' block, which is immediately started. + + Any transaction can register new transactions that will be run + after the current one is finished, using the global function add(). """ - tpool = _thread_pool - if tpool.in_transaction: - raise TransactionError("recursive invocation of transaction.run()") - if not _thread_local.pending: - return # nothing to do - try: - tpool.setup() - tpool.run() - finally: - tpool.teardown() - tpool.reraise() -def number_of_transactions_in_last_run(): - return _thread_pool.transactions_run + def __init__(self, nb_segments=0): + if nb_segments <= 0: + nb_segments = getsegmentlimit() + _thread_pool.ensure_threads(nb_segments) + + def __enter__(self): + if hasattr(_thread_local, "pending"): + raise TransactionError( + "recursive invocation of TransactionQueue()") + if is_atomic(): + raise TransactionError( + "invocation of TransactionQueue() from an atomic context") + _thread_local.pending = [] + atomic.__enter__() + + def __exit__(self, exc_type, exc_value, traceback): + atomic.__exit__(exc_type, exc_value, traceback) + pending = _thread_local.pending + del _thread_local.pending + if exc_type is None and len(pending) > 0: + _thread_pool.run(pending) + # ____________________________________________________________ @@ -101,152 +102,115 @@ class _ThreadPool(object): def __init__(self): - try: - from pypystm import getsegmentlimit - self.num_threads = getsegmentlimit() - except ImportError: - self.num_threads = 4 - self.in_transaction = False - self.transactions_run = None + self.lock_running = thread.allocate_lock() + self.lock_done_running = thread.allocate_lock() + self.lock_done_running.acquire() + self.nb_threads = 0 + self.deque = collections.deque() + self.locks = [] + self.lock_deque = thread.allocate_lock() + self.exception = [] - def setup(self): - # a mutex to protect parts of _grab_next_thing_to_do() - self.lock_mutex = thread.allocate_lock() - # this lock is released if and only if there are things to do in - # 'self.pending'; both are modified together, with the lock_mutex. - self.lock_pending = thread.allocate_lock() - # this lock is released when we are finished at the end - self.lock_if_released_then_finished = thread.allocate_lock() - self.lock_if_released_then_finished.acquire() + def ensure_threads(self, n): + if n > self.nb_threads: + with self.lock_running: + for i in range(self.nb_threads, n): + assert len(self.locks) == self.nb_threads + self.nb_threads += 1 + thread.start_new_thread(self.thread_runner, ()) + # The newly started thread should run immediately into + # the case 'if len(self.locks) == self.nb_threads:' + # and release this lock. Wait until it does. + self.lock_done_running.acquire() + + def run(self, pending): + # For now, can't run multiple threads with each an independent + # TransactionQueue(): they are serialized. + with self.lock_running: + assert self.exception == [] + assert len(self.deque) == 0 + deque = self.deque + with self.lock_deque: + deque.extend(pending) + try: + for i in range(len(pending)): + self.locks.pop().release() + except IndexError: # pop from empty list + pass + # + self.lock_done_running.acquire() + # + if self.exception: + exc_type, exc_value, exc_traceback = self.exception + del self.exception[:] + raise exc_type, exc_value, exc_traceback + + def thread_runner(self): + deque = self.deque + lock = thread.allocate_lock() + lock.acquire() + pending = [] + _thread_local.pending = pending + lock_deque = self.lock_deque + exception = self.exception # - self.pending = _thread_local.pending - # there must be pending items at the beginning, which means that - # 'lock_pending' can indeed be released - assert self.pending - _thread_local.pending = None - # - self.num_waiting_threads = 0 - self.transactions_run = 0 - self.finished = False - self.got_exception = [] - self.in_transaction = True + while True: + # + # Look at the deque and try to fetch the next item on the left. + # If empty, we add our lock to the 'locks' list. + lock_deque.acquire() + if deque: + next_transaction = deque.popleft() + lock_deque.release() + else: + self.locks.append(lock) + if len(self.locks) == self.nb_threads: + self.lock_done_running.release() + lock_deque.release() + # + # Now wait until our lock is released. + lock.acquire() + continue + # + # Now we have a next_transaction. Run it. + assert len(pending) == 0 + while True: + f, args, kwds = next_transaction + with atomic: + if len(exception) == 0: + try: + f(*args, **kwds) + except: + exception.extend(sys.exc_info()) + del next_transaction + # + # If no new 'pending' transactions have been added, exit + # this loop and go back to fetch more from the deque. + if len(pending) == 0: + break + # + # If we have some new 'pending' transactions, add them + # to the right of the deque and pop the next one from + # the left. As we do this atomically with the + # 'lock_deque', we are sure that the deque cannot be + # empty before the popleft(). (We do that even when + # 'len(pending) == 1' instead of simply assigning the + # single item to 'next_transaction', because it looks + # like a good idea to preserve some first-in-first-out + # approximation.) + with self.lock_deque: + deque.extend(pending) + next_transaction = deque.popleft() + try: + for i in range(1, len(pending)): + self.locks.pop().release() + except IndexError: # pop from empty list + pass + del pending[:] - def run(self): - # start the N threads - task_counters = [[0] for i in range(self.num_threads)] - for counter in task_counters: - thread.start_new_thread(self._run_thread, (counter,)) - # now wait. When we manage to acquire the following lock, then - # we are finished. - self.lock_if_released_then_finished.acquire() - self.transactions_run = sum(x[0] for x in task_counters) - - def teardown(self): - self.in_transaction = False - self.pending = None - self.lock_if_released_then_finished = None - self.lock_pending = None - self.lock_mutex = None - _thread_local.pending = collections.deque() - - def reraise(self): - exc = self.got_exception - self.got_exception = None - if exc: - raise exc[0], exc[1], exc[2] # exception, value, traceback - - def _run_thread(self, counter): - tloc_pending = _thread_local.pending - got_exception = self.got_exception - try: - while True: - self._do_it(self._grab_next_thing_to_do(tloc_pending), - got_exception) - counter[0] += 1 - except _Done: - pass - - def _grab_next_thing_to_do(self, tloc_pending): - if tloc_pending: - # grab the next thing to do from the thread-local deque - next = tloc_pending.popleft() - # add the rest, if any, to the global 'pending' - if tloc_pending: - # - self.lock_mutex.acquire() - if not self.pending: - # self.pending is empty so far, but we are adding stuff. - # we have to release the following lock. - self.lock_pending.release() - self.pending.extend(tloc_pending) - self.lock_mutex.release() - # - tloc_pending.clear() - return next - # - self.lock_mutex.acquire() - while True: - try: - next = self.pending.popleft() - except IndexError: - # self.pending is empty: wait until it no longer is. - pass - else: - # self.pending was not empty. If now it is empty, then - # fix the status of 'lock_pending'. - if not self.pending: - self.lock_pending.acquire() - self.lock_mutex.release() - return next - # - # first check if all N threads are waiting here. - assert not self.finished - self.num_waiting_threads += 1 - if self.num_waiting_threads == self.num_threads: - # yes, so finished! unlock this to wake up the other - # threads, which are all waiting on the following acquire(). - self.finished = True - self.lock_pending.release() - # - self.lock_mutex.release() - self.lock_pending.acquire() - self.lock_pending.release() - self.lock_mutex.acquire() - # - self.num_waiting_threads -= 1 - if self.finished: - last_one_to_leave = self.num_waiting_threads == 0 - self.lock_mutex.release() - if last_one_to_leave: - self.lock_if_released_then_finished.release() - raise _Done - - @staticmethod - def _do_it((f, args, kwds), got_exception): - # this is a staticmethod in order to make sure that we don't - # accidentally use 'self' in the atomic block. - try: - hint_commit_soon() - with signals_enabled: - with atomic: - if not got_exception: - f(*args, **kwds) - hint_commit_soon() - except: - got_exception[:] = sys.exc_info() _thread_pool = _ThreadPool() - - -class _Done(Exception): - pass - - -class _ThreadLocal(thread._local): - def __init__(self): - self.pending = collections.deque() - -_thread_local = _ThreadLocal() +_thread_local = thread._local() def XXXreport_abort_info(info): From noreply at buildbot.pypy.org Sat Jan 31 02:30:41 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 31 Jan 2015 02:30:41 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: In-progress: don't store the stm_location in the gcmap. It's nice to avoid Message-ID: <20150131013041.E30AC1C0FE5@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r75598:3b8f6c3b4fb3 Date: 2015-01-31 02:30 +0100 http://bitbucket.org/pypy/pypy/changeset/3b8f6c3b4fb3/ Log: In-progress: don't store the stm_location in the gcmap. It's nice to avoid more run-time writes of constants, but the logic to fetch it from there is completely missing and would be hard... diff --git a/rpython/jit/backend/llsupport/gcmap.py b/rpython/jit/backend/llsupport/gcmap.py --- a/rpython/jit/backend/llsupport/gcmap.py +++ b/rpython/jit/backend/llsupport/gcmap.py @@ -4,29 +4,15 @@ from rpython.rlib.rarithmetic import r_uint from rpython.jit.backend.llsupport.symbolic import WORD -GCMAP_STM_LOCATION = 2 # xxx add this only if stm - -def allocate_gcmap(assembler, frame_depth, fixed_size, stm_location=None): +def allocate_gcmap(assembler, frame_depth, fixed_size): size = frame_depth + fixed_size - malloc_size = (size // WORD // 8 + 1) + GCMAP_STM_LOCATION + 1 + malloc_size = (size // WORD // 8 + 1) + 1 rawgcmap = assembler.datablockwrapper.malloc_aligned(WORD * malloc_size, WORD) # set the length field rffi.cast(rffi.CArrayPtr(lltype.Signed), rawgcmap)[0] = malloc_size - 1 gcmap = rffi.cast(lltype.Ptr(jitframe.GCMAP), rawgcmap) # zero the area - for i in range(malloc_size - 3): + for i in range(malloc_size - 1): gcmap[i] = r_uint(0) - # write the stm_location in the last two words - raw_stm_location = extract_raw_stm_location(stm_location) - gcmap[malloc_size - 3], gcmap[malloc_size - 2] = raw_stm_location return gcmap - -def extract_raw_stm_location(stm_location): - if stm_location is not None: - num = rffi.cast(lltype.Unsigned, stm_location.num) - ref = rffi.cast(lltype.Unsigned, stm_location.ref) - else: - num = r_uint(0) - ref = r_uint(0) - return (num, ref) diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -5,7 +5,6 @@ from rpython.jit.backend.llsupport.assembler import (GuardToken, BaseAssembler, DEBUG_COUNTER, debug_bridge) from rpython.jit.backend.llsupport.asmmemmgr import MachineDataBlockWrapper -from rpython.jit.backend.llsupport.gcmap import extract_raw_stm_location from rpython.jit.metainterp.history import Const, Box, VOID from rpython.jit.metainterp.history import AbstractFailDescr, INT, REF, FLOAT from rpython.rtyper.lltypesystem import lltype, rffi, rstr, llmemory @@ -401,19 +400,23 @@ # current 'stm_location' so that it is found. The easiest # is to simply push it on the shadowstack, from its source # location as two extra arguments on the machine stack - # (at this point containing: [retaddr][ref][num][obj]...) + # (at this point containing: [usual STM_FRAME_FIXED_SIZE] + # [obj] + # [num] + # [ref] + # [retaddr]) # XXX this should also be done if 'for_frame' is true... - mc.MOV(esi, self.heap_shadowstack_top()) + mc.MOV_rs(esi.value, STM_SHADOWSTACK_BASE_OFS + 4 * WORD) + # esi = base address in the shadowstack + 1 + # write the marker to [esi - 1] and [esi + 7] mc.MOV_rs(edi.value, 2 * WORD) # [num] # do here the 'num = (num<<1) + 1' rather than at the caller # site, to increase the chances that it can use PUSH_i8 mc.LEA_ra(edi.value, (self.SEGMENT_NO, rx86.NO_BASE_REGISTER, edi.value, 1, +1)) - mc.MOV_mr((self.SEGMENT_NO, esi.value, 0), edi.value) + mc.MOV_mr((self.SEGMENT_NO, esi.value, -1), edi.value) mc.MOV_rs(edi.value, 1 * WORD) # [ref] - mc.MOV_mr((self.SEGMENT_NO, esi.value, WORD), edi.value) - mc.LEA_rm(esi.value, (self.SEGMENT_NO, esi.value, 2 * WORD)) - mc.MOV(self.heap_shadowstack_top(), esi) + mc.MOV_mr((self.SEGMENT_NO, esi.value, +7), edi.value) mc.MOV_rs(edi.value, 3 * WORD) # [obj] elif IS_X86_32: # we have 2 extra words on stack for retval and we pass 1 extra @@ -463,11 +466,6 @@ # if not for_frame: - if self.cpu.gc_ll_descr.stm: - # SUB touches CPU flags - mc.MOV(esi, self.heap_shadowstack_top()) - mc.LEA_rm(esi.value, (self.SEGMENT_NO, esi.value, -2 * WORD)) - mc.MOV(self.heap_shadowstack_top(), esi) if IS_X86_32: # ADD touches CPU flags mc.LEA_rs(esp.value, 2 * WORD) @@ -870,9 +868,14 @@ # again (ensured by the code calling the loop)) mc = self.mc mc.MOV(ebx, self.heap_shadowstack_top()) - mc.MOV_mr((self.SEGMENT_NO, ebx.value, 0), ebp.value) - # MOV [ebx], ebp if self.cpu.gc_ll_descr.stm: + # the first two words are usually the stm_location marker, + # but for now it can be invalid (as long as it's not fully + # random) + mc.MOV_mr((self.SEGMENT_NO, ebx.value, 0 * WORD), ebp.value) + mc.MOV_mr((self.SEGMENT_NO, ebx.value, 1 * WORD), ebp.value) + mc.MOV_mr((self.SEGMENT_NO, ebx.value, 2 * WORD), ebp.value) + # inlining stm_rewind_jmp_enterframe() r11 = X86_64_SCRATCH_REG rjh = self.heap_rjthread_head() @@ -880,13 +883,15 @@ mc.MOV(r11, rjh) # MOV r11, [rjthread.head] mc.MOV_sr(STM_SHADOWSTACK_BASE_OFS, ebx.value) # MOV [esp+ssbase], ebx - mc.ADD_ri(ebx.value, WORD-1) # ADD ebx, 7 + mc.ADD_ri(ebx.value, 3*WORD-1) # ADD ebx, 23 mc.MOV_sr(STM_PREV_OFS, r11.value) # MOV [esp+prev], r11 mc.MOV(self.heap_shadowstack_top(), ebx) # MOV [rootstacktop], ebx mc.LEA_rs(r11.value, STM_JMPBUF_OFS) # LEA r11, [esp+bufofs] mc.MOV(rjh, r11) # MOV [rjthread.head], r11 # else: + mc.MOV_mr((self.SEGMENT_NO, ebx.value, 0), ebp.value) + # MOV [ebx], ebp mc.ADD_ri(ebx.value, WORD) # ADD ebx, WORD mc.MOV(self.heap_shadowstack_top(), ebx) # MOV [rootstacktop], ebx @@ -1934,6 +1939,22 @@ self.mc.JMP(imm(target)) return startpos + def update_stm_location(self, extra_stack=0): + if self.cpu.gc_ll_descr.stm: + num, ref = self._regalloc.extract_raw_stm_location() + mc.MOV_rs(r11.value, STM_SHADOWSTACK_BASE_OFS + extra_stack) + # r11 = base address in the shadowstack + 1 + # write the marker to [esi - 1] and [esi + 7] + for (targetofs, number) in [(-1, num), (+7, ref)]: + if rx86.fits_in_32bits(number): + mc.MOV_mi((self.SEGMENT_NO, r11.value, targetofs), number) + else: + mc.MOV32_mi((self.SEGMENT_NO, r11.value, targetofs), + rffi.cast(lltype.Signed, + rffi.cast(rffi.INT, number))) + mc.MOV32_mi((self.SEGMENT_NO, r11.value, targetofs + 4), + number >> 32) + def push_gcmap(self, mc, gcmap, push=False, mov=False, store=False): if push: mc.PUSH(imm(rffi.cast(lltype.Signed, gcmap))) @@ -2267,10 +2288,9 @@ # still ok. The one or three words pushed here are removed # by the callee. assert IS_X86_64 - num, ref = extract_raw_stm_location( - self._regalloc.stm_location) - mc.PUSH(imm(rffi.cast(lltype.Signed, num))) - mc.PUSH(imm(rffi.cast(lltype.Signed, ref))) + num, ref = self._regalloc.extract_raw_stm_location() + mc.PUSH(imm(num)) + mc.PUSH(imm(ref)) if is_frame and align_stack: mc.SUB_ri(esp.value, 16 - WORD) # erase the return address mc.CALL(imm(self.wb_slowpath[helper_num])) @@ -2433,6 +2453,7 @@ self.mc.J_il8(rx86.Conditions['Z'], 0) # patched later jmp_adr = self.mc.get_relative_pos() # + self.update_stm_location() self.push_gcmap(self.mc, gcmap, store=True) # # first save away the 4 registers from 'cond_call_register_arguments' diff --git a/rpython/jit/backend/x86/callbuilder.py b/rpython/jit/backend/x86/callbuilder.py --- a/rpython/jit/backend/x86/callbuilder.py +++ b/rpython/jit/backend/x86/callbuilder.py @@ -84,6 +84,7 @@ self.asm.set_extra_stack_depth(self.mc, -self.current_esp) noregs = self.asm.cpu.gc_ll_descr.is_shadow_stack() gcmap = self.asm._regalloc.get_gcmap([eax], noregs=noregs) + self.asm.update_stm_location(-self.current_esp) self.asm.push_gcmap(self.mc, gcmap, store=True) def pop_gcmap(self): @@ -204,7 +205,7 @@ # in 'ebx'), and if not, we fall back to 'reacqgil_addr'. mc.J_il8(rx86.Conditions['NE'], 0) jne_location = mc.get_relative_pos() - # here, ecx is zero (so rpy_fastgil was not acquired) + # here, ecx (=old_value) is zero (so rpy_fastgil was not acquired) rst = gcrootmap.get_root_stack_top_addr() mc = self.mc mc.CMP(ebx, self.asm.heap_tl(rst)) @@ -212,6 +213,7 @@ je_location = mc.get_relative_pos() # revert the rpy_fastgil acquired above, so that the # general 'reacqgil_addr' below can acquire it again... + assert ecx is old_value mc.MOV(heap(self.asm.SEGMENT_NO, fastgil), ecx) # patch the JNE above offset = mc.get_relative_pos() - jne_location diff --git a/rpython/jit/backend/x86/regalloc.py b/rpython/jit/backend/x86/regalloc.py --- a/rpython/jit/backend/x86/regalloc.py +++ b/rpython/jit/backend/x86/regalloc.py @@ -325,10 +325,7 @@ self.xrm.position = i # if op.stm_location is not None: - if (self.stm_location is None or - self.stm_location.num != op.stm_location.num or - self.stm_location.ref != op.stm_location.ref): - self.stm_location = op.stm_location + self.stm_location = op.stm_location # if op.has_no_side_effect() and op.result not in self.longevity: i += 1 @@ -945,9 +942,18 @@ gc_ll_descr.get_nursery_top_addr(), lengthloc, itemsize, maxlength, gcmap, arraydescr) + def extract_raw_stm_location(self): + if self.stm_location is not None: + num = rffi.cast(lltype.Signed, self.stm_location.num) + ref = rffi.cast(lltype.Signed, self.stm_location.ref) + else: + num = 0 + ref = 0 + return (num, ref) + def get_empty_gcmap(self, frame_depth): return allocate_gcmap(self.assembler, frame_depth, - JITFRAME_FIXED_SIZE, self.stm_location) + JITFRAME_FIXED_SIZE) def get_gcmap(self, forbidden_regs=[], noregs=False): frame_depth = self.fm.get_frame_depth() From noreply at buildbot.pypy.org Sat Jan 31 05:02:54 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 31 Jan 2015 05:02:54 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: translation fixes Message-ID: <20150131040254.A2CFF1C0FD4@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r75599:c331dad2c6ab Date: 2015-01-31 05:01 +0100 http://bitbucket.org/pypy/pypy/changeset/c331dad2c6ab/ Log: translation fixes diff --git a/rpython/jit/backend/llsupport/jitframe.py b/rpython/jit/backend/llsupport/jitframe.py --- a/rpython/jit/backend/llsupport/jitframe.py +++ b/rpython/jit/backend/llsupport/jitframe.py @@ -138,10 +138,6 @@ if not gcmap: return # done gcmap_lgt = (gcmap + GCMAPLENGTHOFS).signed[0] - # - from rpython.jit.backend.llsupport.gcmap import GCMAP_STM_LOCATION - gcmap_lgt -= GCMAP_STM_LOCATION - # no = 0 while no < gcmap_lgt: cur = (gcmap + GCMAPBASEOFS + UNSIGN_SIZE * no).unsigned[0] diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -1939,7 +1939,7 @@ self.mc.JMP(imm(target)) return startpos - def update_stm_location(self, extra_stack=0): + def update_stm_location(self, mc, extra_stack=0): if self.cpu.gc_ll_descr.stm: num, ref = self._regalloc.extract_raw_stm_location() mc.MOV_rs(r11.value, STM_SHADOWSTACK_BASE_OFS + extra_stack) @@ -2453,7 +2453,7 @@ self.mc.J_il8(rx86.Conditions['Z'], 0) # patched later jmp_adr = self.mc.get_relative_pos() # - self.update_stm_location() + self.update_stm_location(self.mc) self.push_gcmap(self.mc, gcmap, store=True) # # first save away the 4 registers from 'cond_call_register_arguments' diff --git a/rpython/jit/backend/x86/callbuilder.py b/rpython/jit/backend/x86/callbuilder.py --- a/rpython/jit/backend/x86/callbuilder.py +++ b/rpython/jit/backend/x86/callbuilder.py @@ -84,7 +84,7 @@ self.asm.set_extra_stack_depth(self.mc, -self.current_esp) noregs = self.asm.cpu.gc_ll_descr.is_shadow_stack() gcmap = self.asm._regalloc.get_gcmap([eax], noregs=noregs) - self.asm.update_stm_location(-self.current_esp) + self.asm.update_stm_location(self.mc, -self.current_esp) self.asm.push_gcmap(self.mc, gcmap, store=True) def pop_gcmap(self): From noreply at buildbot.pypy.org Sat Jan 31 10:37:50 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 31 Jan 2015 10:37:50 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: Fix Message-ID: <20150131093750.371731C1033@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r75600:3790409ce24c Date: 2015-01-31 10:37 +0100 http://bitbucket.org/pypy/pypy/changeset/3790409ce24c/ Log: Fix diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -1944,8 +1944,8 @@ num, ref = self._regalloc.extract_raw_stm_location() mc.MOV_rs(r11.value, STM_SHADOWSTACK_BASE_OFS + extra_stack) # r11 = base address in the shadowstack + 1 - # write the marker to [esi - 1] and [esi + 7] - for (targetofs, number) in [(-1, num), (+7, ref)]: + # write the marker to [r11 - 1] and [r11 + 7] + for (targetofs, number) in [(-1, 2 * num + 1), (+7, ref)]: if rx86.fits_in_32bits(number): mc.MOV_mi((self.SEGMENT_NO, r11.value, targetofs), number) else: From noreply at buildbot.pypy.org Sat Jan 31 10:55:03 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 31 Jan 2015 10:55:03 +0100 (CET) Subject: [pypy-commit] pypy default: A skipped test about a case that is missing in malloc-removal Message-ID: <20150131095503.C51941C1065@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r75601:e3635a30ce4d Date: 2015-01-31 10:55 +0100 http://bitbucket.org/pypy/pypy/changeset/e3635a30ce4d/ Log: A skipped test about a case that is missing in malloc-removal diff --git a/rpython/translator/backendopt/test/test_malloc.py b/rpython/translator/backendopt/test/test_malloc.py --- a/rpython/translator/backendopt/test/test_malloc.py +++ b/rpython/translator/backendopt/test/test_malloc.py @@ -340,3 +340,15 @@ u[0].s.x = x return u[0].s.x graph = self.check(f, [int], [42], 42) + + def test_two_paths_one_with_constant(self): + py.test.skip("XXX implement me?") + def fn(n): + if n > 100: + tup = (0,) + else: + tup = (n,) + (n,) # <- flowspace + return tup[0] + + self.check(fn, [int], [42], 42) From noreply at buildbot.pypy.org Sat Jan 31 14:43:32 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 31 Jan 2015 14:43:32 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: Use pypystm.stmset() instead of stmidset (which should probably be Message-ID: <20150131134332.D750C1C0035@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r75608:b4619d9ff598 Date: 2015-01-31 14:43 +0100 http://bitbucket.org/pypy/pypy/changeset/b4619d9ff598/ Log: Use pypystm.stmset() instead of stmidset (which should probably be removed soon). Also, "try:" instead of crash. diff --git a/rpython/annotator/annrpython.py b/rpython/annotator/annrpython.py --- a/rpython/annotator/annrpython.py +++ b/rpython/annotator/annrpython.py @@ -17,7 +17,10 @@ log = py.log.Producer("annrpython") py.log.setconsumer("annrpython", ansi_log) -from transaction import stmidset +try: + from pypystm import stmset +except ImportError: + stmset = set class RPythonAnnotator(object): @@ -38,7 +41,7 @@ self.added_blocks = None # see processblock() below self.links_followed = {} # set of links that have ever been followed self.notify = {} # {block: {positions-to-reflow-from-when-done}} - self.fixed_graphs = stmidset() # set of graphs not to annotate again + self.fixed_graphs = stmset() # set of graphs not to annotate again self.blocked_blocks = {} # set of {blocked_block: (graph, index)} # --- the following information is recorded for debugging --- self.blocked_graphs = {} # set of graphs that have blocked blocks From noreply at buildbot.pypy.org Sat Jan 31 15:40:20 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 31 Jan 2015 15:40:20 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: tweak tweak tweak to make this list thread-local Message-ID: <20150131144020.9CD121C0035@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r75609:e764f42b5dcb Date: 2015-01-31 15:12 +0100 http://bitbucket.org/pypy/pypy/changeset/e764f42b5dcb/ Log: tweak tweak tweak to make this list thread-local diff --git a/rpython/rtyper/annlowlevel.py b/rpython/rtyper/annlowlevel.py --- a/rpython/rtyper/annlowlevel.py +++ b/rpython/rtyper/annlowlevel.py @@ -252,7 +252,7 @@ original_graph_count = len(translator.graphs) perform_normalizations(rtyper.annotator) for r in self.delayedreprs: - r.set_setup_delayed(False) + r.set_setup_delayed(False, rtyper) rtyper.call_all_setups() for p, repr, obj in self.delayedconsts: p._become(repr.convert_const(obj)) diff --git a/rpython/rtyper/rmodel.py b/rpython/rtyper/rmodel.py --- a/rpython/rtyper/rmodel.py +++ b/rpython/rtyper/rmodel.py @@ -79,12 +79,14 @@ def is_setup_delayed(self): return self._initialized == setupstate.DELAYED - def set_setup_delayed(self, flag): + def set_setup_delayed(self, flag, rtyper=None): assert self._initialized in (setupstate.NOTINITIALIZED, setupstate.DELAYED) if flag: self._initialized = setupstate.DELAYED else: + if self._initialized == setupstate.DELAYED: + rtyper._list_must_call_setup().append(self) self._initialized = setupstate.NOTINITIALIZED def set_setup_maybe_delayed(self): diff --git a/rpython/rtyper/rtyper.py b/rpython/rtyper/rtyper.py --- a/rpython/rtyper/rtyper.py +++ b/rpython/rtyper/rtyper.py @@ -32,6 +32,11 @@ from rpython.tool.pairtype import pair from rpython.translator.unsimplify import insert_empty_block +try: + from pypystm import stmset, stmdict +except ImportError: + stmset, stmdict = set, dict + class RPythonTyper(object): from rpython.rtyper.rmodel import log @@ -40,9 +45,9 @@ self.annotator = annotator self.lowlevel_ann_policy = LowLevelAnnotatorPolicy(self) self.type_system = LowLevelTypeSystem() - self.reprs = {} - self._reprs_must_call_setup = [] - self._seen_reprs_must_call_setup = {} + self.reprs = stmdict() + self._seen_reprs_must_call_setup = stmset() + self._all_lists_must_call_setup = [] self._dict_traits = {} self.rootclass_repr = RootClassRepr(self) self.rootclass_repr.setup() @@ -98,8 +103,16 @@ if repr in self._seen_reprs_must_call_setup: #warning("ignoring already seen repr for setup: %r" %(repr,)) return - self._reprs_must_call_setup.append(repr) - self._seen_reprs_must_call_setup[repr] = True + self._list_must_call_setup().append(repr) + self._seen_reprs_must_call_setup.add(repr) + + def _list_must_call_setup(self): + try: + lst = annmodel.TLS._reprs_must_call_setup + except AttributeError: + lst = annmodel.TLS._reprs_must_call_setup = [] + self._all_lists_must_call_setup.append(lst) + return lst def lltype_to_classdef_mapping(self): result = {} @@ -216,6 +229,8 @@ else: tracking = lambda block: None + self.call_all_setups(all_threads=True) + try: import transaction except ImportError: @@ -251,7 +266,7 @@ self.already_seen.update(dict.fromkeys(pending, True)) # make sure all reprs so far have had their setup() called - self.call_all_setups() + self.call_all_setups(all_threads=True) if self.typererrors: self.dump_typererrors(to_log=True) @@ -286,20 +301,23 @@ else: print minmsg - def call_all_setups(self): + def call_all_setups(self, all_threads=False): # make sure all reprs so far have had their setup() called must_setup_more = [] - delayed = [] - while self._reprs_must_call_setup: - r = self._reprs_must_call_setup.pop() - if r.is_setup_delayed(): - delayed.append(r) - else: - r.setup() - must_setup_more.append(r) + if all_threads: + lsts = self._all_lists_must_call_setup + else: + lsts = [self._list_must_call_setup()] + for lst in lsts: + while lst: + r = lst.pop() + if r.is_setup_delayed(): + pass # will be re-added in set_setup_delayed(False) + else: + r.setup() + must_setup_more.append(r) for r in must_setup_more: r.setup_final() - self._reprs_must_call_setup.extend(delayed) def setconcretetype(self, v): assert isinstance(v, Variable) From noreply at buildbot.pypy.org Sat Jan 31 16:03:51 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 31 Jan 2015 16:03:51 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: Fix this broken logic (duh!). If TLS.hash_level is missing it is assumed to be zero, but Message-ID: <20150131150351.947351C0141@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r75610:aa776127acf6 Date: 2015-01-31 16:03 +0100 http://bitbucket.org/pypy/pypy/changeset/aa776127acf6/ Log: Fix this broken logic (duh!). If TLS.hash_level is missing it is assumed to be zero, but then we must also check self.__cached_hash. diff --git a/rpython/rtyper/lltypesystem/lltype.py b/rpython/rtyper/lltypesystem/lltype.py --- a/rpython/rtyper/lltypesystem/lltype.py +++ b/rpython/rtyper/lltypesystem/lltype.py @@ -141,13 +141,15 @@ # NB. the __cached_hash should neither be used nor updated # if we enter with hash_level > 0, because the computed # __hash__ can be different in this situation. - hash_level = 0 try: hash_level = TLS.nested_hash_level - if hash_level == 0: + except AttributeError: + hash_level = 0 + if hash_level == 0: + try: return self.__cached_hash - except AttributeError: - pass + except AttributeError: + pass if hash_level >= 3: return 0 items = self.__dict__.items() From noreply at buildbot.pypy.org Sat Jan 31 16:26:19 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 31 Jan 2015 16:26:19 +0100 (CET) Subject: [pypy-commit] cffi default: Simplify the structure of this function, and fix NativeIO Message-ID: <20150131152619.79B701C024F@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1646:51caa9770248 Date: 2015-01-31 16:26 +0100 http://bitbucket.org/cffi/cffi/changeset/51caa9770248/ Log: Simplify the structure of this function, and fix NativeIO to encode to 'ascii' by default. diff --git a/cffi/verifier.py b/cffi/verifier.py --- a/cffi/verifier.py +++ b/cffi/verifier.py @@ -16,7 +16,11 @@ if sys.version_info >= (3,): NativeIO = io.StringIO else: - NativeIO = io.BytesIO + class NativeIO(io.BytesIO): + def write(self, s): + if isinstance(s, unicode): + s = s.encode('ascii') + super(NativeIO, self).write(s) class Verifier(object): @@ -150,35 +154,36 @@ self._vengine.collect_types() self._has_module = True - def _write_source(self, file=None): - # Write our source file to an in memory file. - self._vengine._f = NativeIO() + def _write_source_to(self, file): + self._vengine._f = file try: self._vengine.write_source_to_f() finally: - source_data = self._vengine._f.getvalue() del self._vengine._f - # Determine if this matches the current file - if file is None and os.path.exists(self.sourcefilename): - with open(self.sourcefilename, "r") as fp: - needs_written = not (fp.read() == source_data) + def _write_source(self, file=None): + if file is not None: + self._write_source_to(file) else: - needs_written = True + # Write our source file to an in memory file. + f = NativeIO() + self._write_source_to(f) + source_data = f.getvalue() - # Actually write the file out if it doesn't match - must_close = (file is None) - if needs_written: - if must_close: + # Determine if this matches the current file + if os.path.exists(self.sourcefilename): + with open(self.sourcefilename, "r") as fp: + needs_written = not (fp.read() == source_data) + else: + needs_written = True + + # Actually write the file out if it doesn't match + if needs_written: _ensure_dir(self.sourcefilename) - file = open(self.sourcefilename, "w") - try: - file.write(source_data) - finally: - if must_close: - file.close() + with open(self.sourcefilename, "w") as fp: + fp.write(source_data) - if must_close: + # Set this flag self._has_source = True def _compile_module(self): From noreply at buildbot.pypy.org Sat Jan 31 17:51:26 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 31 Jan 2015 17:51:26 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: Make 'policy' and 'position_key' thread-local Message-ID: <20150131165126.5ADC51C0141@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r75611:472276917c27 Date: 2015-01-31 17:51 +0100 http://bitbucket.org/pypy/pypy/changeset/472276917c27/ Log: Make 'policy' and 'position_key' thread-local diff --git a/rpython/annotator/annrpython.py b/rpython/annotator/annrpython.py --- a/rpython/annotator/annrpython.py +++ b/rpython/annotator/annrpython.py @@ -49,9 +49,9 @@ self.frozen = False if policy is None: from rpython.annotator.policy import AnnotatorPolicy - self.policy = AnnotatorPolicy() + self.default_policy = AnnotatorPolicy() else: - self.policy = policy + self.default_policy = policy if bookkeeper is None: bookkeeper = Bookkeeper(self) self.bookkeeper = bookkeeper @@ -101,14 +101,13 @@ result.append((graph, inputcells)) return annmodel.s_ImpossibleValue - prevpolicy = self.policy - self.policy = policy + prevpolicy = self.bookkeeper.change_policy(policy) self.bookkeeper.enter(None) try: desc.pycall(schedule, args, annmodel.s_ImpossibleValue) finally: self.bookkeeper.leave() - self.policy = prevpolicy + self.bookkeeper.change_policy(prevpolicy) [(graph, inputcells)] = result return graph, inputcells @@ -125,15 +124,16 @@ return graph def complete_helpers(self, policy): - saved = self.policy, self.added_blocks - self.policy = policy + saved = self.added_blocks + prevpolicy = self.bookkeeper.change_policy(policy) try: self.added_blocks = {} self.complete() # invoke annotation simplifications for the new blocks self.simplify(block_subset=self.added_blocks) finally: - self.policy, self.added_blocks = saved + self.bookkeeper.change_policy(prevpolicy) + self.added_blocks = saved def build_graph_types(self, flowgraph, inputcells, complete_now=True): checkgraph(flowgraph) @@ -200,7 +200,7 @@ """Process pending blocks until none is left.""" while True: self.complete_pending_blocks() - self.policy.no_more_blocks_to_annotate(self) + self.bookkeeper.get_policy().no_more_blocks_to_annotate(self) if not self.pendingblocks: break # finished # make sure that the return variables of all graphs is annotated diff --git a/rpython/annotator/bookkeeper.py b/rpython/annotator/bookkeeper.py --- a/rpython/annotator/bookkeeper.py +++ b/rpython/annotator/bookkeeper.py @@ -9,7 +9,7 @@ from rpython.flowspace.model import Constant from rpython.annotator.model import (SomeOrderedDict, SomeString, SomeChar, SomeFloat, unionof, SomeInstance, SomeDict, - SomeBuiltin, SomePBC, SomeInteger, TLS, SomeUnicodeCodePoint, + SomeBuiltin, SomePBC, SomeInteger, TLS, TlsClass, SomeUnicodeCodePoint, s_None, s_ImpossibleValue, SomeBool, SomeTuple, SomeImpossibleValue, SomeUnicodeString, SomeList, HarmlesslyBlocked, SomeWeakRef, SomeByteArray, SomeConstantType) @@ -53,7 +53,7 @@ def __init__(self, annotator): self.annotator = annotator - self.policy = annotator.policy + self.bkTLS = TlsClass() self.descs = {} # map Python objects to their XxxDesc wrappers self.methoddescs = {} # map (funcdesc, classdef) to the MethodDesc self.classdefs = [] # list of all ClassDefs @@ -75,17 +75,31 @@ delayed_imports() + def get_policy(self): + return getattr(self.bkTLS, 'policy', self.annotator.default_policy) + + def change_policy(self, new_policy): + if new_policy is None: + return None + old_policy = self.get_policy() + self.bkTLS.policy = new_policy + return old_policy + + @property + def position_key(self): + return self.bkTLS.position_key + def enter(self, position_key): """Start of an operation. The operation is uniquely identified by the given key.""" assert not hasattr(self, 'position_key'), "don't call enter() nestedly" - self.position_key = position_key + self.bkTLS.position_key = position_key TLS.bookkeeper = self def leave(self): """End of an operation.""" del TLS.bookkeeper - del self.position_key + del self.bkTLS.position_key def compute_at_fixpoint(self): # getbookkeeper() needs to work during this function, so provide @@ -570,7 +584,7 @@ return self.annotator.whereami(self.position_key) def event(self, what, x): - return self.annotator.policy.event(self, what, x) + return self.get_policy().event(self, what, x) def warning(self, msg): return self.annotator.warning(msg) diff --git a/rpython/annotator/description.py b/rpython/annotator/description.py --- a/rpython/annotator/description.py +++ b/rpython/annotator/description.py @@ -276,7 +276,7 @@ # get the specializer based on the tag of the 'pyobj' # (if any), according to the current policy tag = getattr(self.pyobj, '_annspecialcase_', None) - policy = self.bookkeeper.annotator.policy + policy = self.bookkeeper.get_policy() self.specializer = policy.get_specializer(tag) enforceargs = getattr(self.pyobj, '_annenforceargs_', None) signature = getattr(self.pyobj, '_signature_', None) diff --git a/rpython/annotator/model.py b/rpython/annotator/model.py --- a/rpython/annotator/model.py +++ b/rpython/annotator/model.py @@ -46,12 +46,11 @@ STATE = State() try: - import thread - TLS = thread._local() + from thread import _local as TlsClass except ImportError: - class Tls(object): + class TlsClass(object): pass - TLS = Tls() +TLS = TlsClass() class SomeObject(object): From noreply at buildbot.pypy.org Sat Jan 31 21:03:08 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 31 Jan 2015 21:03:08 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: Make 'added_blocks' a thread-local too Message-ID: <20150131200308.792021C028E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r75613:3da78a2719fc Date: 2015-01-31 17:57 +0100 http://bitbucket.org/pypy/pypy/changeset/3da78a2719fc/ Log: Make 'added_blocks' a thread-local too diff --git a/rpython/annotator/annrpython.py b/rpython/annotator/annrpython.py --- a/rpython/annotator/annrpython.py +++ b/rpython/annotator/annrpython.py @@ -38,7 +38,6 @@ self.translator = translator self.pendingblocks = {} # map {block: graph-containing-it} self.annotated = {} # set of blocks already seen - self.added_blocks = None # see processblock() below self.links_followed = {} # set of links that have ever been followed self.notify = {} # {block: {positions-to-reflow-from-when-done}} self.fixed_graphs = stmset() # set of graphs not to annotate again @@ -56,18 +55,6 @@ bookkeeper = Bookkeeper(self) self.bookkeeper = bookkeeper - def __getstate__(self): - attrs = """translator pendingblocks annotated links_followed - notify bookkeeper frozen policy added_blocks""".split() - ret = self.__dict__.copy() - for key, value in ret.items(): - if key not in attrs: - assert type(value) is dict, ( - "%r is not dict. please update %s.__getstate__" % - (key, self.__class__.__name__)) - ret[key] = {} - return ret - #___ convenience high-level interface __________________ def build_types(self, function, input_arg_types, complete_now=True, @@ -124,16 +111,15 @@ return graph def complete_helpers(self, policy): - saved = self.added_blocks + prevaddedblocks = self.bookkeeper.change_added_blocks({}) prevpolicy = self.bookkeeper.change_policy(policy) try: - self.added_blocks = {} self.complete() # invoke annotation simplifications for the new blocks - self.simplify(block_subset=self.added_blocks) + self.simplify(block_subset=self.bookkeeper.get_added_blocks()) finally: self.bookkeeper.change_policy(prevpolicy) - self.added_blocks = saved + self.bookkeeper.change_added_blocks(prevaddedblocks) def build_graph_types(self, flowgraph, inputcells, complete_now=True): checkgraph(flowgraph) @@ -204,8 +190,9 @@ if not self.pendingblocks: break # finished # make sure that the return variables of all graphs is annotated - if self.added_blocks is not None: - newgraphs = [self.annotated[block] for block in self.added_blocks] + added_blocks = self.bookkeeper.get_added_blocks() + if added_blocks is not None: + newgraphs = [self.annotated[block] for block in added_blocks] newgraphs = dict.fromkeys(newgraphs) got_blocked_blocks = False in newgraphs else: @@ -357,8 +344,9 @@ # The dict 'added_blocks' is used by rpython.annlowlevel to # detect which are the new blocks that annotating an additional # small helper creates. - if self.added_blocks is not None: - self.added_blocks[block] = True + added_blocks = self.bookkeeper.get_added_blocks() + if added_blocks is not None: + added_blocks[block] = True def reflowpendingblock(self, graph, block): assert not self.frozen diff --git a/rpython/annotator/bookkeeper.py b/rpython/annotator/bookkeeper.py --- a/rpython/annotator/bookkeeper.py +++ b/rpython/annotator/bookkeeper.py @@ -85,6 +85,14 @@ self.bkTLS.policy = new_policy return old_policy + def get_added_blocks(self): + return getattr(self.bkTLS, 'added_blocks', None) + + def change_added_blocks(self, new_added_blocks): + old_added_blocks = self.get_added_blocks() + self.bkTLS.added_blocks = new_added_blocks + return old_added_blocks + @property def position_key(self): return self.bkTLS.position_key @@ -107,7 +115,7 @@ self.enter(None) try: def call_sites(): - newblocks = self.annotator.added_blocks + newblocks = self.get_added_blocks() if newblocks is None: newblocks = self.annotator.annotated # all of them annotation = self.annotator.annotation From noreply at buildbot.pypy.org Sat Jan 31 21:03:09 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 31 Jan 2015 21:03:09 +0100 (CET) Subject: [pypy-commit] pypy default: "Would type annotations help PyPy's performance?" Message-ID: <20150131200309.9F9061C028E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r75614:0f851cef8d2f Date: 2015-01-31 21:03 +0100 http://bitbucket.org/pypy/pypy/changeset/0f851cef8d2f/ Log: "Would type annotations help PyPy's performance?" diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst --- a/pypy/doc/faq.rst +++ b/pypy/doc/faq.rst @@ -197,6 +197,55 @@ (now-dead) object are still true about the new object. + +Would type annotations help PyPy's performance? +----------------------------------------------- + +Two examples of type annotations that are being proposed for improved +performance are `Cython types`__ and `PEP 484 - Type Hints`__. + +.. __: http://docs.cython.org/src/reference/language_basics.html#declaring-data-types +.. __: https://www.python.org/dev/peps/pep-0484/ + +**Cython types** are, by construction, similar to C declarations. For +example, a local variable or an instance attribute can be declared +``"cdef int"`` to force a machine word to be used. This changes the +usual Python semantics (e.g. no overflow checks, and errors when +trying to write other types of objects there). It gives some extra +performance, but the exact benefits are unclear: right now +(January 2015) for example we are investigating a technique that would +store machine-word integers directly on instances, giving part of the +benefits without the user-supplied ``"cdef int"``. + +**PEP 484 - Type Hints,** on the other hand, is almost entirely +useless if you're looking at performance. First, as the name implies, +they are *hints:* they must still be checked at runtime, like PEP 484 +says. Or maybe you're fine with a mode in which you get very obscure +crashes when the type annotations are wrong; but even in that case the +speed benefits would be extremely minor. + +There are several reasons for why. One of them is that annotations +are at the wrong level (e.g. a PEP 484 "int" corresponds to Python 3's +int type, which does not necessarily fits inside one machine word; +even worse, an "int" annotation allows arbitrary int subclasses). +Another is that a lot more information is needed to produce good code +(e.g. "this ``f()`` called here really means this function there, and +will never be monkey-patched" -- same with ``len()`` or ``list()``, +btw). The third reason is that some "guards" in PyPy's JIT traces +don't really have an obvious corresponding type (e.g. "this dict is so +far using keys which don't override ``__hash__`` so a more efficient +implementation was used"). Many guards don't even any correspondence +with types at all ("this class attribute was not modified"; "the loop +counter did not reach zero so we don't need to release the GIL"; and +so on). + +As PyPy works right now, it is able to derive far more useful +information than can ever be given by PEP 484, and it works +automatically. As far as we know, this is true even if we would add +other techniques to PyPy, like a fast first-pass JIT. + + + .. _`prolog and javascript`: Can I use PyPy's translation toolchain for other languages besides Python? From noreply at buildbot.pypy.org Sat Jan 31 21:06:30 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 31 Jan 2015 21:06:30 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: next improvement Message-ID: <20150131200630.6C0B61C028E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r75615:e0d26d687dd1 Date: 2015-01-31 21:06 +0100 http://bitbucket.org/pypy/pypy/changeset/e0d26d687dd1/ Log: next improvement diff --git a/rpython/annotator/bookkeeper.py b/rpython/annotator/bookkeeper.py --- a/rpython/annotator/bookkeeper.py +++ b/rpython/annotator/bookkeeper.py @@ -131,11 +131,12 @@ for call_op in call_sites(): self.consider_call_site(call_op) - for pbc, args_s in self.emulated_pbc_calls.itervalues(): - args = simple_args(args_s) - self.consider_call_site_for_pbc(pbc, args, - s_ImpossibleValue, None) - self.emulated_pbc_calls = {} + if self.emulated_pbc_calls: + for pbc, args_s in self.emulated_pbc_calls.itervalues(): + args = simple_args(args_s) + self.consider_call_site_for_pbc(pbc, args, + s_ImpossibleValue, None) + self.emulated_pbc_calls.clear() finally: self.leave() From noreply at buildbot.pypy.org Sat Jan 31 22:15:15 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 31 Jan 2015 22:15:15 +0100 (CET) Subject: [pypy-commit] stmgc bag: intermediate check-in. the branch may be dropped after all, as Message-ID: <20150131211515.80DA31C02B3@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: bag Changeset: r1595:c30130b82726 Date: 2015-01-31 22:15 +0100 http://bitbucket.org/pypy/stmgc/changeset/c30130b82726/ Log: intermediate check-in. the branch may be dropped after all, as the need for "bags" recedes. diff --git a/c7/stm/bag.c b/c7/stm/bag.c --- a/c7/stm/bag.c +++ b/c7/stm/bag.c @@ -2,15 +2,16 @@ Design of stmgc's "bag" objects =============================== -A "bag" is an unordered list of objects. You can only add objects and -pop a random object. +A "bag" is an unordered list of objects. *This is not a STM-aware +object at all!* You can add objects and pop a random object. -Conflicts never occur, but popping may return "the bag looks empty", -which can be wrong in the serialized order. The caller should be -ready to handle this case. The guarantee is that if you get the -result "the bag looks empty" in all threads that may add objects to -it, and afterwards none of the threads adds any object, then at this -point the bag is really empty. +A typical use case is to collect the objects we want to add in some +regular STM list, and then when we commit, we copy the objects into +the bag. Note that we *copy*, not *move*: as usual, we must not +change the STM list outside a transaction. + +When we pop an object, we should arrange for it to be put back into +the bag if the transaction aborts. Implementation @@ -43,178 +44,79 @@ */ +typedef struct bag_node_s { + struct bag_node_s *next; + object_t *value; +} bag_node_t; + + typedef union { - /* Data describing the deque and abort_list belonging to the segment i. */ + /* Data describing the bag from the point of view of segment 'i'. */ + struct { - /* Left deque position: read/write by whoever has got the 'lock'. - Don't access at all without holding the lock. */ - uintptr_t *deque_left; - - /* Middle deque position: written only by segment i when it holds - the 'lock'. Can be read freely by segment i. Can be - read by the other segments when they hold the 'lock'. */ - uintptr_t *deque_middle; - - /* Right deque position: only accessed by the segment i. No - locking needed. */ - uintptr_t *deque_right; - - /* Abort list. Only accessed by the segment i. */ - struct list_s *abort_list; + bag_node_t *added; /* added in current transaction */ + bag_node_t *removed; /* removed in current transaction */ /* The segment i's transaction's unique_start_time, as it was the last time we did a change to this stm_bag_seg_t. Used to detect lazily when a commit occurred in-between. */ uint64_t start_time; - - /* This flag is set to arm the bag-specific "write barrier". - When adding new items to the bag, when this flag is set we - must record the bag into the 'modified_bags' list, used for - minor collections, so that we can trace the newly added - items. */ - bool must_add_to_modified_bags; - - /* The lock, to access deque_left and deque_middle as - explained above. */ - uint8_t lock; }; char alignment[64]; /* 64-bytes alignment, to prevent false sharing */ -} stm_bag_seg_t; + +} bag_seg_t; + struct stm_bag_s { - stm_bag_seg_t by_segment[STM_NB_SEGMENTS]; + ,,,,,,,,,,,,,,,,,,,, + bag_node_t *tail; /* the newest committed element in the bag */ + + struct { + } by_segment[NB_SEGMENTS]; }; stm_bag_t *stm_bag_create(void) { - int i; - stm_bag_t *bag; - void *mem; + stm_bag_t *bag = malloc(sizeof(stm_bag_t)); + assert(bag); /* XXX out of memory in stm_bag_create */ + memset(bag, 0, sizeof(stm_bag_t)); + return bag; +} - assert(sizeof(stm_bag_seg_t) == 64); - if (posix_memalign(&mem, sizeof(stm_bag_seg_t), sizeof(stm_bag_t)) != 0) - stm_fatalerror("out of memory in stm_bag_create"); /* XXX */ - - bag = (stm_bag_t *)mem; - for (i = 0; i < STM_NB_SEGMENTS; i++) { - stm_bag_seg_t *bs = &bag->by_segment[i]; - struct deque_block_s *block = deque_new_block(); - bs->deque_left = &block->items[0]; - bs->deque_middle = &block->items[0]; - bs->deque_right = &block->items[0]; - LIST_CREATE(bs->abort_list); - bs->start_time = 0; - bs->must_add_to_modified_bags = false; /* currently young */ - bs->lock = 0; +static void bag_node_free_rec(bag_node_t *p) +{ + while (p != NULL) { + bag_node_t *q = p->next; + free(p); + p = q; } - return bag; } void stm_bag_free(stm_bag_t *bag) { int i; - - s_mutex_lock(); - for (i = 0; i < STM_NB_SEGMENTS; i++) { - stm_bag_seg_t *bs = &bag->by_segment[i]; - struct stm_segment_info_s *pub = get_segment(i + 1); - stm_thread_local_t *tl = pub->running_thread; - if (tl != NULL && tl->associated_segment_num == i + 1) { - stm_call_on_abort(tl, bs, NULL); - } + bag_node_free_rec(bag->tail); + for (i = 0; i < NB_SEGMENTS; i++) { + bag_node_free_rec(bag->by_segment[i].added); + bag_node_free_rec(bag->by_segment[i].removed); } - s_mutex_unlock(); - - for (i = 0; i < STM_NB_SEGMENTS; i++) { - stm_bag_seg_t *bs = &bag->by_segment[i]; - struct deque_block_s *block = deque_block(bs->deque_left); - while (block != NULL) { - struct deque_block_s *nextblock = block->next; - deque_free_block(block); - block = nextblock; - } - LIST_FREE(bs->abort_list); - } - + bag_node_free_rec(bag->head); free(bag); } -static void bag_add(stm_bag_seg_t *bs, object_t *newobj) -{ - struct deque_block_s *block = deque_block(bs->deque_right); - *bs->deque_right++ = (uintptr_t)newobj; - - if (bs->deque_right == &block->items[DEQUE_BLOCK_SIZE]) { - if (block->next == NULL) - block->next = deque_new_block(); - bs->deque_right = &block->next->items[0]; - } -} - -static void bag_abort_callback(void *key) -{ - stm_bag_seg_t *bs = (stm_bag_seg_t *)key; - - /* remove the "added in this transaction" items */ - bs->deque_right = bs->deque_middle; - - /* reinstall the items from the "abort_list" */ - if (!list_is_empty(bs->abort_list)) { - LIST_FOREACH_F(bs->abort_list, object_t *, bag_add(bs, item)); - list_clear(bs->abort_list); - - /* these items are not "added in this transaction" */ - spinlock_acquire(bs->lock); - bs->deque_middle = bs->deque_right; - spinlock_release(bs->lock); - } -} - -static stm_bag_seg_t *bag_check_start_time(stm_bag_t *bag) -{ - int i = STM_SEGMENT->segment_num - 1; - stm_bag_seg_t *bs = &bag->by_segment[i]; - - if (bs->start_time != STM_PSEGMENT->unique_start_time) { - /* There was a commit or an abort since the last operation - on the same bag in the same segment. If there was an - abort, bag_abort_callback() should have been called to - reset the state. Assume that any non-reset state is - there because of a commit. - - The middle pointer moves to the right: there are no - more "added in this transaction" entries. And the - "already popped items" list is forgotten. - */ - if (bs->deque_middle != bs->deque_right) { - spinlock_acquire(bs->lock); - bs->deque_middle = bs->deque_right; - spinlock_release(bs->lock); - } - list_clear(bs->abort_list); - bs->start_time = STM_PSEGMENT->unique_start_time; - bs->must_add_to_modified_bags = true; - - /* We're about to modify the bag, so register an abort - callback now. */ - stm_thread_local_t *tl = STM_SEGMENT->running_thread; - assert(tl->associated_segment_num == STM_SEGMENT->segment_num); - stm_call_on_abort(tl, bs, &bag_abort_callback); - } - - return bs; -} - void stm_bag_add(stm_bag_t *bag, object_t *newobj) { - stm_bag_seg_t *bs = bag_check_start_time(bag); - bag_add(bs, newobj); + uint32_t i = STM_SEGMENT->segment_num - 1; + bag_node_t **p_added = &bag->by_segment[i].added; + bag_node_t *p = malloc(sizeof(bag_node_t)); + assert(p); /* XXX */ - if (bs->must_add_to_modified_bags) { - bs->must_add_to_modified_bags = false; - if (STM_PSEGMENT->modified_bags == NULL) - LIST_CREATE(STM_PSEGMENT->modified_bags); - LIST_APPEND(STM_PSEGMENT->modified_bags, bag); + p->value = newobj; + while (1) { + bag_node_t *old = *p_added; + p->next = old; + if (__sync_bool_compare_and_swap(p_added, old, p)) + break; } } @@ -225,6 +127,23 @@ spinlock_acquire(bs->lock); if (bs->deque_left == bs->deque_right) { + /* look up inside other segments without locks; this might get + occasional nonsense, but it should not matter here */ + int i; + stm_bag_seg_t *src = NULL; + for (i = 0; i < STM_NB_SEGMENTS; i++) { + stm_bag_seg_t *other = &bag->by_segment[i]; + uintptr_t *left = other->deque_left; + uintptr_t *middle = other->deque_left; + ...; + + if (other->deque_left != other->deque_right) { + src = other; + if (other->deque_ + } + } + + spinlock_release(bs->lock); return NULL; } From noreply at buildbot.pypy.org Sat Jan 31 22:16:37 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 31 Jan 2015 22:16:37 +0100 (CET) Subject: [pypy-commit] stmgc hashtable-iter: Trying out ideas for how to do iteration over hashtables Message-ID: <20150131211637.C65AE1C02B3@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: hashtable-iter Changeset: r1596:0d06d3eacd37 Date: 2015-01-31 22:17 +0100 http://bitbucket.org/pypy/stmgc/changeset/0d06d3eacd37/ Log: Trying out ideas for how to do iteration over hashtables From noreply at buildbot.pypy.org Sat Jan 31 22:47:08 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 31 Jan 2015 22:47:08 +0100 (CET) Subject: [pypy-commit] stmgc hashtable-iter: document the plan Message-ID: <20150131214708.DBF1C1C028E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: hashtable-iter Changeset: r1597:8da7f5322135 Date: 2015-01-31 22:47 +0100 http://bitbucket.org/pypy/stmgc/changeset/8da7f5322135/ Log: document the plan diff --git a/c7/stm/hashtable.c b/c7/stm/hashtable.c --- a/c7/stm/hashtable.c +++ b/c7/stm/hashtable.c @@ -12,22 +12,30 @@ collision). The main operations on a hashtable are reading or writing an object at a -given index. It might support in the future enumerating the indexes of -non-NULL objects. +given index. It also supports iterating its non-NULL entries. There are two markers for every index (a read and a write marker). This is unlike regular arrays, which have only two markers in total. +Additionally, we use the read marker for the hashtable object itself +to mean "we are iterating". When a transaction has got this "global" +read marker and another transaction attempts to create a new key/value +pair via stm_hashtable_{lookup,read,write}, the call immediately fails +with a read/write conflict. This gives priority to the iterating +transaction rather than the modifying transaction, which is probably +what we want. + Implementation -------------- First idea: have the hashtable in raw memory, pointing to "entry" -objects. The entry objects themselves point to the user-specified -objects. The entry objects have the read/write markers. Every entry -object, once created, stays around. It is only removed by the next -major GC if it points to NULL and its read/write markers are not set -in any currently-running transaction. +objects (which are regular, GC- and STM-managed objects). The entry +objects themselves point to the user-specified objects. The entry +objects hold the read/write markers. Every entry object, once +created, stays around. It is only removed by the next major GC if it +points to NULL and its read/write markers are not set in any +currently-running transaction. References ---------- @@ -54,8 +62,12 @@ The field 'resize_counter' also works as a write lock: changes go via the intermediate value RESIZING_LOCK (0). + + In addition, 'resize_counter' can be the negative of the odd + number that it would normally be, as a hint to force the check + of the global read marker, as set by iteration. */ - uintptr_t resize_counter; + intptr_t resize_counter; stm_hashtable_entry_t *items[INITIAL_HASHTABLE_SIZE]; } stm_hashtable_table_t; @@ -88,7 +100,7 @@ void stm_hashtable_free(stm_hashtable_t *hashtable) { - uintptr_t rc = hashtable->initial_table.resize_counter; + intptr_t rc = hashtable->initial_table.resize_counter; free(hashtable); while (IS_EVEN(rc)) { assert(rc != RESIZING_LOCK); @@ -150,15 +162,16 @@ assert(biggertable); // XXX stm_hashtable_table_t *table = hashtable->table; - table->resize_counter = (uintptr_t)biggertable; + table->resize_counter = (intptr_t)biggertable; /* ^^^ this unlocks the table by writing a non-zero value to table->resize_counter, but the new value is a pointer to the new bigger table, so IS_EVEN() is still true */ + assert(IS_EVEN(table->resize_counter)); init_table(biggertable, biggercount); uintptr_t j, mask = table->mask; - uintptr_t rc = biggertable->resize_counter; + intptr_t rc = biggertable->resize_counter; char *segment_base = get_segment_base(remove_unread_from_seg); for (j = 0; j <= mask; j++) { stm_hashtable_entry_t *entry = table->items[j]; @@ -175,6 +188,7 @@ _insert_clean(biggertable, entry); rc -= 6; } + assert(rc > 0); biggertable->resize_counter = rc; write_fence(); /* make sure that 'biggertable' is valid here, @@ -218,7 +232,7 @@ } /* here, we didn't find the 'entry' with the correct index. */ - uintptr_t rc = VOLATILE_TABLE(table)->resize_counter; + intptr_t rc = VOLATILE_TABLE(table)->resize_counter; /* if rc is RESIZING_LOCK (which is 0, so even), a concurrent thread is writing to the hashtable. Or, if rc is another even number, it is @@ -307,6 +321,7 @@ return entry; } else { + //xxxxxxxxxxxxxxxxxxxxxxx; /* if rc is smaller than 6, we must allocate a new bigger table. */ uintptr_t biggercount = table->mask + 1; @@ -364,7 +379,7 @@ assert(!IS_EVEN(table->resize_counter)); if (table != &hashtable->initial_table) { - uintptr_t rc = hashtable->initial_table.resize_counter; + intptr_t rc = hashtable->initial_table.resize_counter; while (1) { assert(IS_EVEN(rc)); assert(rc != RESIZING_LOCK); @@ -375,7 +390,8 @@ rc = old_table->resize_counter; free(old_table); } - hashtable->initial_table.resize_counter = (uintptr_t)table; + hashtable->initial_table.resize_counter = (intptr_t)table; + assert(IS_EVEN(hashtable->initial_table.resize_counter)); } } From noreply at buildbot.pypy.org Sat Jan 31 23:53:45 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 31 Jan 2015 23:53:45 +0100 (CET) Subject: [pypy-commit] stmgc hashtable-iter: in-progress Message-ID: <20150131225345.A430B1C028E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: hashtable-iter Changeset: r1598:6fd4e2f21b1e Date: 2015-01-31 23:54 +0100 http://bitbucket.org/pypy/stmgc/changeset/6fd4e2f21b1e/ Log: in-progress diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -431,13 +431,12 @@ continue; /* no need to check: is pending immediate abort */ char *remote_base = get_segment_base(i); - uint8_t remote_version = get_segment(i)->transaction_read_version; LIST_FOREACH_R( STM_PSEGMENT->modified_old_objects, object_t * /*item*/, ({ - if (was_read_remote(remote_base, item, remote_version)) { + if (was_read_remote(remote_base, item)) { /* A write-read conflict! */ dprintf(("write-read conflict on %p, our seg: %d, other: %ld\n", item, STM_SEGMENT->segment_num, i)); diff --git a/c7/stm/core.h b/c7/stm/core.h --- a/c7/stm/core.h +++ b/c7/stm/core.h @@ -281,9 +281,17 @@ static stm_thread_local_t *abort_with_mutex_no_longjmp(void); static void abort_data_structures_from_segment_num(int segment_num); -static inline bool was_read_remote(char *base, object_t *obj, - uint8_t other_transaction_read_version) +static inline bool was_read_local(object_t *obj) { + return ((stm_read_marker_t *)(((uintptr_t)obj) >> 4))->rm == + STM_SEGMENT->transaction_read_version; +} + +static inline bool was_read_remote(char *base, object_t *obj) +{ + uint8_t other_transaction_read_version = + ((struct stm_segment_info_s *)REAL_ADDRESS(base, STM_PSEGMENT)) + ->transaction_read_version; uint8_t rm = ((struct stm_read_marker_s *) (base + (((uintptr_t)obj) >> 4)))->rm; assert(rm <= other_transaction_read_version); diff --git a/c7/stm/hashtable.c b/c7/stm/hashtable.c --- a/c7/stm/hashtable.c +++ b/c7/stm/hashtable.c @@ -63,9 +63,9 @@ The field 'resize_counter' also works as a write lock: changes go via the intermediate value RESIZING_LOCK (0). - In addition, 'resize_counter' can be the negative of the odd - number that it would normally be, as a hint to force the check - of the global read marker, as set by iteration. + In addition, 'resize_counter' can be the negation of the odd + number that it would normally be: in this case it is "probably + write-protected" (see stm_hashtable_next()). */ intptr_t resize_counter; @@ -151,7 +151,8 @@ static void _stm_rehash_hashtable(stm_hashtable_t *hashtable, uintptr_t biggercount, - int remove_unread_from_seg) + int remove_unread_from_seg, + bool rc_must_be_negative) { dprintf(("rehash %p to %ld, remove_unread_from_seg=%d\n", hashtable, biggercount, remove_unread_from_seg)); @@ -189,7 +190,7 @@ rc -= 6; } assert(rc > 0); - biggertable->resize_counter = rc; + biggertable->resize_counter = rc_must_be_negative ? -rc : rc; write_fence(); /* make sure that 'biggertable' is valid here, and make sure 'table->resize_counter' is updated @@ -233,6 +234,7 @@ /* here, we didn't find the 'entry' with the correct index. */ intptr_t rc = VOLATILE_TABLE(table)->resize_counter; + bool rc_must_be_negative = false; /* if rc is RESIZING_LOCK (which is 0, so even), a concurrent thread is writing to the hashtable. Or, if rc is another even number, it is @@ -264,6 +266,7 @@ /* if rc is greater than 6, there is enough room for a new item in the current table. */ + retry_adding: if (rc > 6) { /* we can only enter here once! If we allocate stuff, we may run the GC, and so 'hashtableobj' might move afterwards. */ @@ -317,11 +320,12 @@ write_fence(); /* make sure 'entry' is fully initialized here */ table->items[i] = entry; write_fence(); /* make sure 'table->items' is written here */ - VOLATILE_TABLE(table)->resize_counter = rc - 6; /* unlock */ + rc -= 6; + VOLATILE_TABLE(table)->resize_counter = ( + rc_must_be_negative ? -rc : rc); /* unlock */ return entry; } - else { - //xxxxxxxxxxxxxxxxxxxxxxx; + else if (rc > 0) { /* if rc is smaller than 6, we must allocate a new bigger table. */ uintptr_t biggercount = table->mask + 1; @@ -329,9 +333,36 @@ biggercount *= 4; else biggercount *= 2; - _stm_rehash_hashtable(hashtable, biggercount, /*remove_unread=*/0); + _stm_rehash_hashtable(hashtable, biggercount, /*remove_unread=*/0, + rc_must_be_negative); goto restart; } + else { + assert(rc < 0); + assert(!_is_in_nursery(hashtableobj)); + + /* if rc is negative, the hashtable is probably write-protected. + Check if the read marker of the 'hashtableobj' is set in + another segment. + */ + int j, my_segment = STM_SEGMENT->segment_num; + for (j = 1; j <= NB_SEGMENTS; j++) { + if (j != my_segment) { + if (was_read_remote(get_segment_base(j), hashtableobj)) { + xxxxxxxxxxxx conflict xxxxxxxxxxx; + } + } + } + + /* if even in this thread's segment it was not read, then there + is no point in keeping it write-protected. So we set + 'rc_must_be_negative', i.e. keep it write-protected, iff + it was read locally. + */ + rc_must_be_negative = was_read_local(hashtableobj); + rc = -rc; + goto retry_adding; + } } object_t *stm_hashtable_read(object_t *hobj, stm_hashtable_t *hashtable, @@ -353,6 +384,77 @@ e->object = nvalue; } +struct stm_hashtable_entry_s * +stm_hashtable_next(object_t *hobj, stm_hashtable_t *hashtable, + uintptr_t *pposition, stm_thread_local_t *tl) +{ + /* this assumes the simple c7 model whereby commits only occur with + all other transaction paused at a known point. */ + stm_hashtable_table_t *table; + intptr_t rc; + + /* First set the read marker. It will be left as long as we're running + the same transaction. Note that this code assumes that nothing else + can set the read marker! Also, if 'hobj' is still in the nursery, + it was created by this transaction and there is nothing to do. + */ + if (!_is_in_nursery(hobj) && !was_read_local(hobj)) { + + stm_read(hobj); + + /* Change the 'resize_counter' field to its negative value. This + must be done after we've set the read marker. */ + restart: + table = VOLATILE_HASHTABLE(hashtable)->table; + rc = VOLATILE_TABLE(table)->resize_counter; + if (IS_EVEN(rc)) { + spin_loop(); + goto restart; + } + if (!__sync_bool_compare_and_swap(&table->resize_counter, rc, + rc > 0 ? -rc : rc)) + goto restart; + /* Note that we did a compare-and-swap even if rc was already + negative. This is needed for its memory-ordering effect, + to ensure that from now on the other threads do see our + read marker set. */ + } + else { + /* Read marker already set. Assume (and assert) that we + already set a negative value into 'resize_counter'. + Changes of 'table' or 'resize_counter' under our feet + should not be possible here. + */ + table = hashtable->table; + + if (!_is_in_nursery(hobj)) { + assert(!IS_EVEN(table->resize_counter) && + table->resize_counter < 0); + } + } + + /* At this point, the hashtable is write-protected: no other + thread may add new key/value objects nor grow/replace the + 'table'. The hashtable will remain write-protected as long as + this transaction is running. Note that *this* thread is + allowed to continue modifying the hashtable (unless another + thread did also set a write protection). + */ + uintptr_t position = *pposition; + uintptr_t mask = table->mask; + stm_hashtable_entry_t *entry; + + while (position <= mask) { + entry = table->items[position++]; + if (entry != NULL) { + *pposition = position; + return entry; + } + } + *pposition = (uintptr_t)-1; + return NULL; +} + static void _stm_compact_hashtable(stm_hashtable_t *hashtable) { stm_hashtable_table_t *table = hashtable->table; @@ -372,7 +474,8 @@ assert(count <= table->mask + 1); dprintf(("compact with %ld items:\n", num_entries_times_6 / 6)); - _stm_rehash_hashtable(hashtable, count, /*remove_unread=*/segment_num); + _stm_rehash_hashtable(hashtable, count, /*remove_unread=*/segment_num, + /*rc_must_be_negative=*/false); } table = hashtable->table; diff --git a/c7/stm/misc.c b/c7/stm/misc.c --- a/c7/stm/misc.c +++ b/c7/stm/misc.c @@ -31,8 +31,7 @@ bool _stm_was_read(object_t *obj) { - return was_read_remote(STM_SEGMENT->segment_base, obj, - STM_SEGMENT->transaction_read_version); + return was_read_local(obj); } bool _stm_was_written(object_t *obj) diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -545,6 +545,8 @@ object_t *stm_hashtable_read(object_t *, stm_hashtable_t *, uintptr_t key); void stm_hashtable_write(object_t *, stm_hashtable_t *, uintptr_t key, object_t *nvalue, stm_thread_local_t *); +struct stm_hashtable_entry_s *stm_hashtable_next( + object_t *, stm_hashtable_t *, uintptr_t *pposition, stm_thread_local_t *); extern uint32_t stm_hashtable_entry_userdata; void stm_hashtable_tracefn(stm_hashtable_t *, void (object_t **));