From noreply at buildbot.pypy.org Thu Aug 1 10:42:17 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 1 Aug 2013 10:42:17 +0200 (CEST) Subject: [pypy-commit] pypy flow-no-local-exception: Kill support for implicitly checking for IndexError if and only if the Message-ID: <20130801084217.C2F351C02E4@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: flow-no-local-exception Changeset: r65867:8b92e00fe8ad Date: 2013-07-31 11:53 +0200 http://bitbucket.org/pypy/pypy/changeset/8b92e00fe8ad/ Log: Kill support for implicitly checking for IndexError if and only if the getitem/setitem is present in a "try: except IndexError" in the same graph. The idea is to make "except IndexError" invalid in RPython and be explicit. From noreply at buildbot.pypy.org Thu Aug 1 10:42:19 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 1 Aug 2013 10:42:19 +0200 (CEST) Subject: [pypy-commit] pypy flow-no-local-exception: Fix the flow space and the annotator Message-ID: <20130801084219.66A331C02E4@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: flow-no-local-exception Changeset: r65868:82f7913e20b5 Date: 2013-07-31 12:26 +0200 http://bitbucket.org/pypy/pypy/changeset/82f7913e20b5/ Log: Fix the flow space and the annotator diff --git a/rpython/annotator/binaryop.py b/rpython/annotator/binaryop.py --- a/rpython/annotator/binaryop.py +++ b/rpython/annotator/binaryop.py @@ -32,7 +32,6 @@ 'and_', 'or_', 'xor', 'lshift', 'rshift', 'getitem', 'setitem', 'delitem', - 'getitem_idx', 'getitem_key', 'getitem_idx_key', 'inplace_add', 'inplace_sub', 'inplace_mul', 'inplace_truediv', 'inplace_floordiv', 'inplace_div', 'inplace_mod', @@ -181,20 +180,6 @@ else: return obj - # checked getitems - - def _getitem_can_only_throw(s_c1, s_o2): - impl = pair(s_c1, s_o2).getitem - return read_can_only_throw(impl, s_c1, s_o2) - - def getitem_idx_key((s_c1, s_o2)): - impl = pair(s_c1, s_o2).getitem - return impl() - getitem_idx_key.can_only_throw = _getitem_can_only_throw - - getitem_idx = getitem_idx_key - getitem_key = getitem_idx_key - class __extend__(pairtype(SomeType, SomeType)): @@ -419,9 +404,11 @@ class __extend__(pairtype(SomeByteArray, SomeInteger)): def getitem((s_b, s_i)): return SomeInteger() + getitem.can_only_throw = [] def setitem((s_b, s_i), s_i2): assert isinstance(s_i2, SomeInteger) + setitem.can_only_throw = [] class __extend__(pairtype(SomeString, SomeByteArray), pairtype(SomeByteArray, SomeString), @@ -614,11 +601,12 @@ try: return tup1.items[int2.const] except IndexError: - return s_ImpossibleValue + raise Exception("tuple of %d elements indexed with [%s]" % ( + len(tup1.items), int2.const)) else: getbookkeeper().count("tuple_random_getitem", tup1) return unionof(*tup1.items) - getitem.can_only_throw = [IndexError] + getitem.can_only_throw = [] class __extend__(pairtype(SomeList, SomeInteger)): @@ -631,25 +619,16 @@ return lst1.listdef.read_item() getitem.can_only_throw = [] - getitem_key = getitem - - def getitem_idx((lst1, int2)): - getbookkeeper().count("list_getitem", int2) - return lst1.listdef.read_item() - getitem_idx.can_only_throw = [IndexError] - - getitem_idx_key = getitem_idx - def setitem((lst1, int2), s_value): getbookkeeper().count("list_setitem", int2) lst1.listdef.mutate() lst1.listdef.generalize(s_value) - setitem.can_only_throw = [IndexError] + setitem.can_only_throw = [] def delitem((lst1, int2)): getbookkeeper().count("list_delitem", int2) lst1.listdef.resize() - delitem.can_only_throw = [IndexError] + delitem.can_only_throw = [] class __extend__(pairtype(SomeString, SomeInteger)): @@ -658,15 +637,6 @@ return SomeChar(no_nul=str1.no_nul) getitem.can_only_throw = [] - getitem_key = getitem - - def getitem_idx((str1, int2)): - getbookkeeper().count("str_getitem", int2) - return SomeChar(no_nul=str1.no_nul) - getitem_idx.can_only_throw = [IndexError] - - getitem_idx_key = getitem_idx - def mul((str1, int2)): # xxx do we want to support this getbookkeeper().count("str_mul", str1, int2) return SomeString(no_nul=str1.no_nul) @@ -677,15 +647,6 @@ return SomeUnicodeCodePoint() getitem.can_only_throw = [] - getitem_key = getitem - - def getitem_idx((str1, int2)): - getbookkeeper().count("str_getitem", int2) - return SomeUnicodeCodePoint() - getitem_idx.can_only_throw = [IndexError] - - getitem_idx_key = getitem_idx - def mul((str1, int2)): # xxx do we want to support this getbookkeeper().count("str_mul", str1, int2) return SomeUnicodeString() diff --git a/rpython/annotator/test/test_annrpython.py b/rpython/annotator/test/test_annrpython.py --- a/rpython/annotator/test/test_annrpython.py +++ b/rpython/annotator/test/test_annrpython.py @@ -634,10 +634,10 @@ def test_operation_always_raising(self): def operation_always_raising(n): - lst = [] + dct = {} try: - return lst[n] - except IndexError: + return dct[n] + except KeyError: return 24 a = self.RPythonAnnotator() s = a.build_types(operation_always_raising, [int]) @@ -799,13 +799,13 @@ def f(l): try: l[0] - except (KeyError, IndexError),e: + except KeyError, e: # ignored because 'l' is a list return e return None a = self.RPythonAnnotator() s = a.build_types(f, [somelist(annmodel.s_Int)]) - assert s.classdef is a.bookkeeper.getuniqueclassdef(IndexError) # KeyError ignored because l is a list + assert s == annmodel.s_None def test_freeze_protocol(self): class Stuff: diff --git a/rpython/flowspace/objspace.py b/rpython/flowspace/objspace.py --- a/rpython/flowspace/objspace.py +++ b/rpython/flowspace/objspace.py @@ -186,6 +186,11 @@ if check_class in (NotImplementedError, AssertionError): raise FlowingError(self.frame, "Catching %s is not valid in RPython" % check_class.__name__) + if check_class == IndexError: + raise FlowingError(self.frame, + "Catching IndexError is not valid any more in RPython. " + "You should check explicitly that the index is valid " + "before you use it") if not isinstance(check_class, tuple): # the simple case return self.exception_issubclass_w(w_exc_type, w_check_class) diff --git a/rpython/flowspace/operation.py b/rpython/flowspace/operation.py --- a/rpython/flowspace/operation.py +++ b/rpython/flowspace/operation.py @@ -247,18 +247,17 @@ op_appendices = { OverflowError: 'ovf', - IndexError: 'idx', KeyError: 'key', ZeroDivisionError: 'zer', ValueError: 'val', } -# specifying IndexError, and KeyError beyond Exception, +# specifying KeyError beyond Exception, # allows the annotator to be more precise, see test_reraiseAnything/KeyError in # the annotator tests -op.getitem.canraise = [IndexError, KeyError, Exception] -op.setitem.canraise = [IndexError, KeyError, Exception] -op.delitem.canraise = [IndexError, KeyError, Exception] +op.getitem.canraise = [KeyError, Exception] +op.setitem.canraise = [KeyError, Exception] +op.delitem.canraise = [KeyError, Exception] op.contains.canraise = [Exception] # from an r_dict def _add_exceptions(names, exc): diff --git a/rpython/flowspace/test/test_objspace.py b/rpython/flowspace/test/test_objspace.py --- a/rpython/flowspace/test/test_objspace.py +++ b/rpython/flowspace/test/test_objspace.py @@ -329,7 +329,7 @@ found[link.args[0].value] = True else: found[link.exitcase] = None - assert found == {IndexError: True, KeyError: True, Exception: None} + assert found == {KeyError: True, Exception: None} def reraiseAnything(x): try: @@ -372,7 +372,7 @@ #__________________________________________________________ def raise1(msg): - raise IndexError + raise ValueError def test_raise1(self): x = self.codetest(self.raise1) @@ -381,7 +381,7 @@ ops = x.startblock.operations assert len(ops) == 2 assert ops[0].opname == 'simple_call' - assert ops[0].args == [Constant(IndexError)] + assert ops[0].args == [Constant(ValueError)] assert ops[1].opname == 'type' assert ops[1].args == [ops[0].result] assert x.startblock.exits[0].args == [ops[1].result, ops[0].result] @@ -389,7 +389,7 @@ #__________________________________________________________ def raise2(msg): - raise IndexError, msg + raise ValueError, msg def test_raise2(self): x = self.codetest(self.raise2) @@ -397,7 +397,7 @@ #__________________________________________________________ def raise3(msg): - raise IndexError(msg) + raise ValueError(msg) def test_raise3(self): x = self.codetest(self.raise3) @@ -421,7 +421,7 @@ def raise_and_catch_1(exception_instance): try: raise exception_instance - except IndexError: + except ValueError: return -1 return 0 @@ -432,7 +432,7 @@ def catch_simple_call(): try: user_defined_function() - except IndexError: + except ValueError: return -1 return 0 @@ -443,7 +443,7 @@ def multiple_catch_simple_call(): try: user_defined_function() - except (IndexError, OSError): + except (ValueError, OSError): return -1 return 0 @@ -455,7 +455,7 @@ links = entrymap[graph.returnblock] assert len(links) == 3 assert (dict.fromkeys([link.exitcase for link in links]) == - dict.fromkeys([None, IndexError, OSError])) + dict.fromkeys([None, ValueError, OSError])) links = entrymap[graph.exceptblock] assert len(links) == 1 assert links[0].exitcase is Exception @@ -815,7 +815,7 @@ raise graph = self.codetest(f) simplify_graph(graph) - assert self.all_operations(graph) == {'getitem_idx_key': 1} + assert self.all_operations(graph) == {'getitem': 1} g = lambda: None def f(c, x): @@ -825,7 +825,7 @@ g() graph = self.codetest(f) simplify_graph(graph) - assert self.all_operations(graph) == {'getitem_idx_key': 1, + assert self.all_operations(graph) == {'getitem': 1, 'simple_call': 2} def f(c, x): @@ -833,9 +833,8 @@ return c[x] except IndexError: raise - graph = self.codetest(f) - simplify_graph(graph) - assert self.all_operations(graph) == {'getitem_idx': 1} + py.test.raises(FlowingError, self.codetest, f) + # 'except IndexError' is not RPython any more def f(c, x): try: @@ -844,7 +843,7 @@ raise graph = self.codetest(f) simplify_graph(graph) - assert self.all_operations(graph) == {'getitem_key': 1} + assert self.all_operations(graph) == {'getitem': 1} def f(c, x): try: @@ -863,16 +862,7 @@ graph = self.codetest(f) simplify_graph(graph) self.show(graph) - assert self.all_operations(graph) == {'getitem_idx_key': 1} - - def f(c, x): - try: - return c[x] - except IndexError: - return -1 - graph = self.codetest(f) - simplify_graph(graph) - assert self.all_operations(graph) == {'getitem_idx': 1} + assert self.all_operations(graph) == {'getitem': 1} def f(c, x): try: @@ -881,7 +871,7 @@ return -1 graph = self.codetest(f) simplify_graph(graph) - assert self.all_operations(graph) == {'getitem_key': 1} + assert self.all_operations(graph) == {'getitem': 1} def f(c, x): try: diff --git a/rpython/translator/simplify.py b/rpython/translator/simplify.py --- a/rpython/translator/simplify.py +++ b/rpython/translator/simplify.py @@ -206,21 +206,6 @@ exits.append(link) block.recloseblock(*(preserve + exits)) -def transform_xxxitem(graph): - # xxx setitem too - for block in graph.iterblocks(): - if block.operations and block.exitswitch == c_last_exception: - last_op = block.operations[-1] - if last_op.opname == 'getitem': - postfx = [] - for exit in block.exits: - if exit.exitcase is IndexError: - postfx.append('idx') - elif exit.exitcase is KeyError: - postfx.append('key') - if postfx: - last_op.opname = last_op.opname + '_' + '_'.join(postfx) - def remove_dead_exceptions(graph): """Exceptions can be removed if they are unreachable""" @@ -984,7 +969,6 @@ remove_identical_vars, transform_ovfcheck, simplify_exceptions, - transform_xxxitem, remove_dead_exceptions, ] From noreply at buildbot.pypy.org Thu Aug 1 10:42:20 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 1 Aug 2013 10:42:20 +0200 (CEST) Subject: [pypy-commit] pypy flow-no-local-exception: RPython test fixes Message-ID: <20130801084220.B32481C02E4@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: flow-no-local-exception Changeset: r65869:52641ef0a6dd Date: 2013-07-31 12:34 +0200 http://bitbucket.org/pypy/pypy/changeset/52641ef0a6dd/ Log: RPython test fixes diff --git a/rpython/rtyper/test/test_llinterp.py b/rpython/rtyper/test/test_llinterp.py --- a/rpython/rtyper/test/test_llinterp.py +++ b/rpython/rtyper/test/test_llinterp.py @@ -162,22 +162,22 @@ def test_raise(): res = interpret(raise_exception, [41]) assert res == 41 - interpret_raises(IndexError, raise_exception, [42]) + interpret_raises(KeyError, raise_exception, [42]) interpret_raises(ValueError, raise_exception, [43]) def test_call_raise(): res = interpret(call_raise, [41]) assert res == 41 - interpret_raises(IndexError, call_raise, [42]) + interpret_raises(KeyError, call_raise, [42]) interpret_raises(ValueError, call_raise, [43]) def test_call_raise_twice(): res = interpret(call_raise_twice, [6, 7]) assert res == 13 - interpret_raises(IndexError, call_raise_twice, [6, 42]) + interpret_raises(KeyError, call_raise_twice, [6, 42]) res = interpret(call_raise_twice, [6, 43]) assert res == 1006 - interpret_raises(IndexError, call_raise_twice, [42, 7]) + interpret_raises(KeyError, call_raise_twice, [42, 7]) interpret_raises(ValueError, call_raise_twice, [43, 7]) def test_call_raise_intercept(): @@ -459,7 +459,7 @@ def raise_exception(i): if i == 42: - raise IndexError + raise KeyError elif i == 43: raise ValueError return i @@ -478,7 +478,7 @@ def call_raise_intercept(i): try: return raise_exception(i) - except IndexError: + except KeyError: return i except ValueError: raise TypeError diff --git a/rpython/rtyper/test/test_rstr.py b/rpython/rtyper/test/test_rstr.py --- a/rpython/rtyper/test/test_rstr.py +++ b/rpython/rtyper/test/test_rstr.py @@ -29,26 +29,6 @@ assert res == expected assert res.__class__ is expected.__class__ - def test_implicit_index_error(self): - const = self.const - def fn(i): - s = const('hello') - try: - return s[i] - except IndexError: - return const('*') - for i in range(-5, 5): - res = self.interpret(fn, [i]) - expected = fn(i) - assert res == expected - assert res.__class__ is expected.__class__ - res = self.interpret(fn, [5]) - assert res == '*' - res = self.interpret(fn, [6]) - assert res == '*' - res = self.interpret(fn, [-42]) - assert res == '*' - def test_nonzero(self): const = self.const def fn(i, j): @@ -903,63 +883,6 @@ s.count(s, -10) py.test.raises(TyperError, self.interpret, f, ()) - def test_getitem_exc(self): - const = self.const - def f(x): - s = const("z") - return s[x] - - res = self.interpret(f, [0]) - assert res == 'z' - try: - self.interpret_raises(IndexError, f, [1]) - except (AssertionError,), e: - pass - else: - assert False - - def f(x): - s = const("z") - try: - return s[x] - except IndexError: - return const('X') - except Exception: - return const(' ') - - res = self.interpret(f, [0]) - assert res == 'z' - res = self.interpret(f, [1]) - assert res == 'X' - - def f(x): - s = const("z") - try: - return s[x] - except Exception: - return const(' ') - - res = self.interpret(f, [0]) - assert res == 'z' - res = self.interpret(f, [1]) - assert res == ' ' - - def f(x): - s = const("z") - try: - return s[x] - except ValueError: - return const(' ') - - res = self.interpret(f, [0]) - assert res == 'z' - try: - self.interpret_raises(IndexError, f, [1]) - except (AssertionError,), e: - pass - else: - assert False - def test_fold_concat(self): const = self.const def g(tail): @@ -1134,4 +1057,4 @@ array = lltype.malloc(TP, 12, flavor='raw') self.interpret(f, [array, 4]) assert list(array) == list('abc'*4) - lltype.free(array, flavor='raw') \ No newline at end of file + lltype.free(array, flavor='raw') From noreply at buildbot.pypy.org Thu Aug 1 10:42:22 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 1 Aug 2013 10:42:22 +0200 (CEST) Subject: [pypy-commit] pypy flow-no-local-exception: Adapt lists Message-ID: <20130801084222.02FCF1C02E4@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: flow-no-local-exception Changeset: r65870:608476b35886 Date: 2013-07-31 12:53 +0200 http://bitbucket.org/pypy/pypy/changeset/608476b35886/ Log: Adapt lists diff --git a/pypy/objspace/std/iterobject.py b/pypy/objspace/std/iterobject.py --- a/pypy/objspace/std/iterobject.py +++ b/pypy/objspace/std/iterobject.py @@ -81,7 +81,7 @@ """Sequence iterator specialized for lists.""" def descr_next(self, space): - from pypy.objspace.std.listobject import W_ListObject + from pypy.objspace.std.listobject import W_ListObject, ListIndexError w_seq = self.w_seq if w_seq is None: raise OperationError(space.w_StopIteration, space.w_None) @@ -89,7 +89,7 @@ index = self.index try: w_item = w_seq.getitem(index) - except IndexError: + except ListIndexError: self.w_seq = None raise OperationError(space.w_StopIteration, space.w_None) self.index = index + 1 @@ -108,9 +108,10 @@ if self.tupleitems is None: raise OperationError(space.w_StopIteration, space.w_None) index = self.index - try: + assert index >= 0 + if index < len(self.tupleitems): w_item = self.tupleitems[index] - except IndexError: + else: self.tupleitems = None self.w_seq = None raise OperationError(space.w_StopIteration, space.w_None) diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -29,6 +29,7 @@ from pypy.objspace.std.unicodeobject import W_UnicodeObject from pypy.objspace.std.util import get_positive_index, negate from rpython.rlib import debug, jit, rerased +from rpython.rlib.rarithmetic import r_uint from rpython.rlib.listsort import make_timsort_class from rpython.rlib.objectmodel import ( instantiate, newlist_hint, resizelist_hint, specialize) @@ -37,6 +38,10 @@ __all__ = ['W_ListObject', 'make_range_list', 'make_empty_list_with_size'] +class ListIndexError(Exception): + """A custom RPython class, raised by getitem() and similar methods.""" + + UNROLL_CUTOFF = 5 @@ -245,13 +250,13 @@ def getitem(self, index): """Returns the wrapped object that is found in the list at the given index. The index must be unwrapped. - May raise IndexError.""" + May raise ListIndexError.""" return self.strategy.getitem(self, index) def getslice(self, start, stop, step, length): """Returns a slice of the list defined by the arguments. Arguments must be normalized (i.e. using normalize_simple_slice or W_Slice.indices4). - May raise IndexError.""" + May raise ListIndexError.""" return self.strategy.getslice(self, start, stop, step, length) def getitems(self): @@ -309,7 +314,7 @@ def pop(self, index): """Pops an item from the list. Index must be normalized. - May raise IndexError.""" + May raise ListIndexError.""" return self.strategy.pop(self, index) def pop_end(self): @@ -318,7 +323,7 @@ def setitem(self, index, w_item): """Inserts a wrapped item at the given (unwrapped) index. - May raise IndexError.""" + May raise ListIndexError.""" self.strategy.setitem(self, index, w_item) def setslice(self, start, step, slicelength, sequence_w): @@ -491,7 +496,7 @@ try: index = space.getindex_w(w_index, space.w_IndexError, "list index") return self.getitem(index) - except IndexError: + except ListIndexError: raise OperationError(space.w_IndexError, space.wrap("list index out of range")) @@ -519,7 +524,7 @@ idx = space.getindex_w(w_index, space.w_IndexError, "list index") try: self.setitem(idx, w_any) - except IndexError: + except ListIndexError: raise OperationError(space.w_IndexError, space.wrap("list index out of range")) @@ -546,7 +551,7 @@ idx += self.length() try: self.pop(idx) - except IndexError: + except ListIndexError: raise OperationError(space.w_IndexError, space.wrap("list index out of range")) @@ -597,7 +602,7 @@ index += length try: return self.pop(index) - except IndexError: + except ListIndexError: raise OperationError(space.w_IndexError, space.wrap("pop index out of range")) @@ -865,7 +870,7 @@ return 0 def getitem(self, w_list, index): - raise IndexError + raise ListIndexError def getslice(self, w_list, start, stop, step, length): # will never be called because the empty list case is already caught in @@ -913,10 +918,10 @@ def pop(self, w_list, index): # will not be called because IndexError was already raised in # list_pop__List_ANY - raise IndexError + raise ListIndexError def setitem(self, w_list, index, w_item): - raise IndexError + raise ListIndexError def setslice(self, w_list, start, step, slicelength, w_other): strategy = w_other.strategy @@ -1052,9 +1057,9 @@ if i < 0: i += length if i < 0: - raise IndexError + raise ListIndexError elif i >= length: - raise IndexError + raise ListIndexError return start + i * step def getitems_int(self, w_list): @@ -1235,13 +1240,21 @@ def length(self, w_list): return len(self.unerase(w_list.lstorage)) + @staticmethod + def _getidx(l, index): + ulength = r_uint(len(l)) + uindex = r_uint(index) + if uindex >= ulength: + # out of bounds -or- negative index + uindex += ulength + if uindex >= ulength: + raise ListIndexError + return uindex + def getitem(self, w_list, index): l = self.unerase(w_list.lstorage) - try: - r = l[index] - except IndexError: # make RPython raise the exception - raise - return self.wrap(r) + uindex = self._getidx(l, index) + return self.wrap(l[uindex]) @jit.look_inside_iff(lambda self, w_list: jit.loop_unrolling_heuristic(w_list, w_list.length(), @@ -1276,11 +1289,10 @@ subitems_w = [self._none_value] * length l = self.unerase(w_list.lstorage) for i in range(length): - try: - subitems_w[i] = l[start] - start += step - except IndexError: - raise + # I believe that the following 'l[start]' cannot raise + # an IndexError + subitems_w[i] = l[start] + start += step storage = self.erase(subitems_w) return W_ListObject.from_storage_and_strategy( self.space, storage, self) @@ -1319,10 +1331,8 @@ l = self.unerase(w_list.lstorage) if self.is_correct_type(w_item): - try: - l[index] = self.unwrap(w_item) - except IndexError: - raise + uindex = self._getidx(l, index) + l[uindex] = self.unwrap(w_item) return w_list.switch_to_object_strategy() @@ -1432,15 +1442,8 @@ def pop(self, w_list, index): l = self.unerase(w_list.lstorage) - # not sure if RPython raises IndexError on pop - # so check again here - if index < 0: - raise IndexError - try: - item = l.pop(index) - except IndexError: - raise - + uindex = self._getidx(l, index) + item = l.pop(uindex) w_item = self.wrap(item) return w_item From noreply at buildbot.pypy.org Thu Aug 1 10:42:23 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 1 Aug 2013 10:42:23 +0200 (CEST) Subject: [pypy-commit] pypy flow-no-local-exception: Kill code. Message-ID: <20130801084223.86D0E1C02E4@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: flow-no-local-exception Changeset: r65871:31b884f1dbac Date: 2013-07-31 13:20 +0200 http://bitbucket.org/pypy/pypy/changeset/31b884f1dbac/ Log: Kill code. diff --git a/rpython/rtyper/lltypesystem/rlist.py b/rpython/rtyper/lltypesystem/rlist.py --- a/rpython/rtyper/lltypesystem/rlist.py +++ b/rpython/rtyper/lltypesystem/rlist.py @@ -6,7 +6,7 @@ GcStruct, Void, Signed, malloc, typeOf, nullptr, typeMethod) from rpython.rtyper.rlist import (AbstractBaseListRepr, AbstractListRepr, AbstractFixedSizeListRepr, AbstractListIteratorRepr, ll_setitem_nonneg, - ADTIList, ADTIFixedList, dum_nocheck) + ADTIList, ADTIFixedList) from rpython.rtyper.rmodel import Repr, inputconst, externalvsinternal from rpython.tool.pairtype import pairtype, pair @@ -394,10 +394,9 @@ assert v_sizehint is None cno = inputconst(Signed, len(items_v)) v_result = llops.gendirectcall(LIST.ll_newlist, cno) - v_func = inputconst(Void, dum_nocheck) for i, v_item in enumerate(items_v): ci = inputconst(Signed, i) - llops.gendirectcall(ll_setitem_nonneg, v_func, v_result, ci, v_item) + llops.gendirectcall(ll_setitem_nonneg, v_result, ci, v_item) return v_result # special operations for list comprehension optimization diff --git a/rpython/rtyper/rlist.py b/rpython/rtyper/rlist.py --- a/rpython/rtyper/rlist.py +++ b/rpython/rtyper/rlist.py @@ -32,10 +32,6 @@ }) -def dum_checkidx(): pass -def dum_nocheck(): pass - - class __extend__(annmodel.SomeList): def rtyper_makerepr(self, rtyper): listitem = self.listdef.listitem @@ -206,11 +202,6 @@ hop.gendirectcall(ll_extend, v_lst1, v_lst2) def rtype_method_pop(self, hop): - if hop.has_implicit_exception(IndexError): - spec = dum_checkidx - else: - spec = dum_nocheck - v_func = hop.inputconst(Void, spec) if hop.nb_args == 2: args = hop.inputargs(self, Signed) assert hasattr(args[1], 'concretetype') @@ -226,7 +217,7 @@ args = hop.inputargs(self) llfn = ll_pop_default hop.exception_is_here() - v_res = hop.gendirectcall(llfn, v_func, *args) + v_res = hop.gendirectcall(llfn, *args) return self.recast(hop.llops, v_res) @@ -243,53 +234,31 @@ class __extend__(pairtype(AbstractBaseListRepr, IntegerRepr)): - def rtype_getitem((r_lst, r_int), hop, checkidx=False): + def rtype_getitem((r_lst, r_int), hop): v_lst, v_index = hop.inputargs(r_lst, Signed) - if checkidx: - hop.exception_is_here() - else: - hop.exception_cannot_occur() - if hop.args_s[0].listdef.listitem.mutated or checkidx: + hop.exception_cannot_occur() + if hop.args_s[0].listdef.listitem.mutated: if hop.args_s[1].nonneg: llfn = ll_getitem_nonneg else: llfn = ll_getitem - if checkidx: - spec = dum_checkidx - else: - spec = dum_nocheck - c_func_marker = hop.inputconst(Void, spec) - v_res = hop.gendirectcall(llfn, c_func_marker, v_lst, v_index) else: - # this is the 'foldable' version, which is not used when - # we check for IndexError + # this is the 'foldable' version if hop.args_s[1].nonneg: llfn = ll_getitem_foldable_nonneg else: llfn = ll_getitem_foldable - v_res = hop.gendirectcall(llfn, v_lst, v_index) + v_res = hop.gendirectcall(llfn, v_lst, v_index) return r_lst.recast(hop.llops, v_res) - rtype_getitem_key = rtype_getitem - - def rtype_getitem_idx((r_lst, r_int), hop): - return pair(r_lst, r_int).rtype_getitem(hop, checkidx=True) - - rtype_getitem_idx_key = rtype_getitem_idx - def rtype_setitem((r_lst, r_int), hop): - if hop.has_implicit_exception(IndexError): - spec = dum_checkidx - else: - spec = dum_nocheck - v_func = hop.inputconst(Void, spec) v_lst, v_index, v_item = hop.inputargs(r_lst, Signed, r_lst.item_repr) if hop.args_s[1].nonneg: llfn = ll_setitem_nonneg else: llfn = ll_setitem hop.exception_is_here() - return hop.gendirectcall(llfn, v_func, v_lst, v_index, v_item) + return hop.gendirectcall(llfn, v_lst, v_index, v_item) def rtype_mul((r_lst, r_int), hop): cRESLIST = hop.inputconst(Void, hop.r_result.LIST) @@ -300,18 +269,13 @@ class __extend__(pairtype(AbstractListRepr, IntegerRepr)): def rtype_delitem((r_lst, r_int), hop): - if hop.has_implicit_exception(IndexError): - spec = dum_checkidx - else: - spec = dum_nocheck - v_func = hop.inputconst(Void, spec) v_lst, v_index = hop.inputargs(r_lst, Signed) if hop.args_s[1].nonneg: llfn = ll_delitem_nonneg else: llfn = ll_delitem hop.exception_is_here() - return hop.gendirectcall(llfn, v_func, v_lst, v_index) + return hop.gendirectcall(llfn, v_lst, v_index) def rtype_inplace_mul((r_lst, r_int), hop): v_lst, v_factor = hop.inputargs(r_lst, Signed) @@ -582,22 +546,16 @@ l.ll_setitem_fast(index, newitem) ll_insert_nonneg.oopspec = 'list.insert(l, index, newitem)' -def ll_pop_nonneg(func, l, index): +def ll_pop_nonneg(l, index): ll_assert(index >= 0, "unexpectedly negative list pop index") - if func is dum_checkidx: - if index >= l.ll_length(): - raise IndexError - else: - ll_assert(index < l.ll_length(), "list pop index out of bound") + ll_assert(index < l.ll_length(), "list pop index out of bound") res = l.ll_getitem_fast(index) - ll_delitem_nonneg(dum_nocheck, l, index) + ll_delitem_nonneg(l, index) return res ll_pop_nonneg.oopspec = 'list.pop(l, index)' -def ll_pop_default(func, l): +def ll_pop_default(l): length = l.ll_length() - if func is dum_checkidx and (length == 0): - raise IndexError ll_assert(length > 0, "pop from empty list") index = length - 1 newlength = index @@ -608,10 +566,8 @@ l._ll_resize_le(newlength) return res -def ll_pop_zero(func, l): +def ll_pop_zero(l): length = l.ll_length() - if func is dum_checkidx and (length == 0): - raise IndexError ll_assert(length > 0, "pop(0) from empty list") newlength = length - 1 res = l.ll_getitem_fast(0) @@ -628,18 +584,14 @@ return res ll_pop_zero.oopspec = 'list.pop(l, 0)' -def ll_pop(func, l, index): +def ll_pop(l, index): length = l.ll_length() if index < 0: index += length - if func is dum_checkidx: - if index < 0 or index >= length: - raise IndexError - else: - ll_assert(index >= 0, "negative list pop index out of bound") - ll_assert(index < length, "list pop index out of bound") + ll_assert(index >= 0, "negative list pop index out of bound") + ll_assert(index < length, "list pop index out of bound") res = l.ll_getitem_fast(index) - ll_delitem_nonneg(dum_nocheck, l, index) + ll_delitem_nonneg(l, index) return res @jit.look_inside_iff(lambda l: jit.isvirtual(l)) @@ -654,32 +606,18 @@ i += 1 length_1_i -= 1 -def ll_getitem_nonneg(func, l, index): +def ll_getitem_nonneg(l, index): ll_assert(index >= 0, "unexpectedly negative list getitem index") - if func is dum_checkidx: - if index >= l.ll_length(): - raise IndexError return l.ll_getitem_fast(index) ll_getitem_nonneg._always_inline_ = True # no oopspec -- the function is inlined by the JIT -def ll_getitem(func, l, index): - if func is dum_checkidx: - length = l.ll_length() # common case: 0 <= index < length - if r_uint(index) >= r_uint(length): - # Failed, so either (-length <= index < 0), or we have to raise - # IndexError. First add 'length' to get the final index, then - # check that we now have (0 <= index < length). - index = r_uint(index) + r_uint(length) - if index >= r_uint(length): - raise IndexError - index = intmask(index) - else: - # We don't want checking, but still want to support index < 0. - # Only call ll_length() if needed. - if index < 0: - index += l.ll_length() - ll_assert(index >= 0, "negative list getitem index out of bound") +def ll_getitem(l, index): + # We don't want checking, but still want to support index < 0. + # Only call ll_length() if needed. + if index < 0: + index += l.ll_length() + ll_assert(index >= 0, "negative list getitem index out of bound") return l.ll_getitem_fast(index) # no oopspec -- the function is inlined by the JIT @@ -695,38 +633,23 @@ ll_getitem_foldable._always_inline_ = True # no oopspec -- the function is inlined by the JIT -def ll_setitem_nonneg(func, l, index, newitem): +def ll_setitem_nonneg(l, index, newitem): ll_assert(index >= 0, "unexpectedly negative list setitem index") - if func is dum_checkidx: - if index >= l.ll_length(): - raise IndexError l.ll_setitem_fast(index, newitem) ll_setitem_nonneg._always_inline_ = True # no oopspec -- the function is inlined by the JIT -def ll_setitem(func, l, index, newitem): - if func is dum_checkidx: - length = l.ll_length() - if r_uint(index) >= r_uint(length): # see comments in ll_getitem(). - index = r_uint(index) + r_uint(length) - if index >= r_uint(length): - raise IndexError - index = intmask(index) - else: - if index < 0: - index += l.ll_length() - ll_assert(index >= 0, "negative list setitem index out of bound") +def ll_setitem(l, index, newitem): + if index < 0: + index += l.ll_length() + ll_assert(index >= 0, "negative list setitem index out of bound") l.ll_setitem_fast(index, newitem) # no oopspec -- the function is inlined by the JIT -def ll_delitem_nonneg(func, l, index): +def ll_delitem_nonneg(l, index): ll_assert(index >= 0, "unexpectedly negative list delitem index") length = l.ll_length() - if func is dum_checkidx: - if index >= length: - raise IndexError - else: - ll_assert(index < length, "list delitem index out of bound") + ll_assert(index < length, "list delitem index out of bound") newlength = length - 1 j = index j1 = j+1 @@ -741,19 +664,11 @@ l._ll_resize_le(newlength) ll_delitem_nonneg.oopspec = 'list.delitem(l, index)' -def ll_delitem(func, l, index): - if func is dum_checkidx: - length = l.ll_length() - if r_uint(index) >= r_uint(length): # see comments in ll_getitem(). - index = r_uint(index) + r_uint(length) - if index >= r_uint(length): - raise IndexError - index = intmask(index) - else: - if index < 0: - index += l.ll_length() - ll_assert(index >= 0, "negative list delitem index out of bound") - ll_delitem_nonneg(dum_nocheck, l, index) +def ll_delitem(l, index): + if index < 0: + index += l.ll_length() + ll_assert(index >= 0, "negative list delitem index out of bound") + ll_delitem_nonneg(l, index) # no oopspec -- the function is inlined by the JIT def ll_extend(l1, l2): @@ -989,7 +904,7 @@ def ll_listremove(lst, obj, eqfn): index = ll_listindex(lst, obj, eqfn) # raises ValueError if obj not in lst - ll_delitem_nonneg(dum_nocheck, lst, index) + ll_delitem_nonneg(lst, index) def ll_inplace_mul(l, factor): if factor == 1: diff --git a/rpython/rtyper/rmodel.py b/rpython/rtyper/rmodel.py --- a/rpython/rtyper/rmodel.py +++ b/rpython/rtyper/rmodel.py @@ -292,14 +292,6 @@ return inputconst(Bool, hop.s_result.const) return hop.rtyper.type_system.generic_is(robj1, robj2, hop) - # default implementation for checked getitems - - def rtype_getitem_idx_key((r_c1, r_o1), hop): - return pair(r_c1, r_o1).rtype_getitem(hop) - - rtype_getitem_idx = rtype_getitem_idx_key - rtype_getitem_key = rtype_getitem_idx_key - # ____________________________________________________________ diff --git a/rpython/rtyper/rrange.py b/rpython/rtyper/rrange.py --- a/rpython/rtyper/rrange.py +++ b/rpython/rtyper/rrange.py @@ -1,7 +1,6 @@ from rpython.flowspace.model import Constant from rpython.rtyper.error import TyperError from rpython.rtyper.lltypesystem.lltype import Signed, Void, Ptr -from rpython.rtyper.rlist import dum_nocheck, dum_checkidx from rpython.rtyper.rmodel import Repr, IntegerRepr, IteratorRepr from rpython.tool.pairtype import pairtype @@ -31,11 +30,6 @@ class __extend__(pairtype(AbstractRangeRepr, IntegerRepr)): def rtype_getitem((r_rng, r_int), hop): - if hop.has_implicit_exception(IndexError): - spec = dum_checkidx - else: - spec = dum_nocheck - v_func = hop.inputconst(Void, spec) v_lst, v_index = hop.inputargs(r_rng, Signed) if r_rng.step != 0: cstep = hop.inputconst(Signed, r_rng.step) @@ -46,7 +40,7 @@ else: llfn = ll_rangeitem hop.exception_is_here() - return hop.gendirectcall(llfn, v_func, v_lst, v_index, cstep) + return hop.gendirectcall(llfn, v_lst, v_index, cstep) # ____________________________________________________________ # @@ -70,22 +64,13 @@ result = 0 return result -def ll_rangeitem_nonneg(func, l, index, step): - if func is dum_checkidx and index >= _ll_rangelen(l.start, l.stop, step): - raise IndexError +def ll_rangeitem_nonneg(l, index, step): return l.start + index * step -def ll_rangeitem(func, l, index, step): - if func is dum_checkidx: +def ll_rangeitem(l, index, step): + if index < 0: length = _ll_rangelen(l.start, l.stop, step) - if index < 0: - index += length - if index < 0 or index >= length: - raise IndexError - else: - if index < 0: - length = _ll_rangelen(l.start, l.stop, step) - index += length + index += length return l.start + index * step # ____________________________________________________________ diff --git a/rpython/rtyper/rstr.py b/rpython/rtyper/rstr.py --- a/rpython/rtyper/rstr.py +++ b/rpython/rtyper/rstr.py @@ -446,32 +446,16 @@ class __extend__(pairtype(AbstractStringRepr, IntegerRepr)): - def rtype_getitem((r_str, r_int), hop, checkidx=False): + def rtype_getitem((r_str, r_int), hop): string_repr = r_str.repr v_str, v_index = hop.inputargs(string_repr, Signed) - if checkidx: - if hop.args_s[1].nonneg: - llfn = r_str.ll.ll_stritem_nonneg_checked - else: - llfn = r_str.ll.ll_stritem_checked + if hop.args_s[1].nonneg: + llfn = r_str.ll.ll_stritem_nonneg else: - if hop.args_s[1].nonneg: - llfn = r_str.ll.ll_stritem_nonneg - else: - llfn = r_str.ll.ll_stritem - if checkidx: - hop.exception_is_here() - else: - hop.exception_cannot_occur() + llfn = r_str.ll.ll_stritem + hop.exception_cannot_occur() return hop.gendirectcall(llfn, v_str, v_index) - rtype_getitem_key = rtype_getitem - - def rtype_getitem_idx((r_str, r_int), hop): - return pair(r_str, r_int).rtype_getitem(hop, checkidx=True) - - rtype_getitem_idx_key = rtype_getitem_idx - def rtype_mul((r_str, r_int), hop): str_repr = r_str.repr v_str, v_int = hop.inputargs(str_repr, Signed) @@ -837,27 +821,12 @@ return bool(s) and cls.ll_strlen(s) != 0 ll_str_is_true = classmethod(ll_str_is_true) - def ll_stritem_nonneg_checked(cls, s, i): - if i >= cls.ll_strlen(s): - raise IndexError - return cls.ll_stritem_nonneg(s, i) - ll_stritem_nonneg_checked = classmethod(ll_stritem_nonneg_checked) - def ll_stritem(cls, s, i): if i < 0: i += cls.ll_strlen(s) return cls.ll_stritem_nonneg(s, i) ll_stritem = classmethod(ll_stritem) - def ll_stritem_checked(cls, s, i): - length = cls.ll_strlen(s) - if i < 0: - i += length - if i >= length or i < 0: - raise IndexError - return cls.ll_stritem_nonneg(s, i) - ll_stritem_checked = classmethod(ll_stritem_checked) - def parse_fmt_string(fmt): # we support x, d, s, f, [r] it = iter(fmt) diff --git a/rpython/rtyper/test/test_rlist.py b/rpython/rtyper/test/test_rlist.py --- a/rpython/rtyper/test/test_rlist.py +++ b/rpython/rtyper/test/test_rlist.py @@ -14,18 +14,6 @@ from rpython.translator.translator import TranslationContext -# undo the specialization parameter -for n1 in 'get set del'.split(): - for n2 in '', '_nonneg': - name = 'll_%sitem%s' % (n1, n2) - globals()['_' + name] = globals()[name] - exec """if 1: - def %s(*args): - return _%s(dum_checkidx, *args) -""" % (name, name) -del n1, n2, name - - class BaseTestListImpl: def check_list(self, l1, expected): diff --git a/rpython/rtyper/test/test_rrange.py b/rpython/rtyper/test/test_rrange.py --- a/rpython/rtyper/test/test_rrange.py +++ b/rpython/rtyper/test/test_rrange.py @@ -1,5 +1,5 @@ from rpython.rlib.rarithmetic import intmask -from rpython.rtyper.rrange import ll_rangelen, ll_rangeitem, ll_rangeitem_nonneg, dum_nocheck +from rpython.rtyper.rrange import ll_rangelen, ll_rangeitem, ll_rangeitem_nonneg from rpython.rtyper.lltypesystem import rrange from rpython.rtyper.test.tool import BaseRtypingTest @@ -17,11 +17,11 @@ RANGE = rrange.RangeRepr(step).RANGE l = rrange.ll_newrange(RANGE, start, stop) assert ll_rangelen(l, step) == length - lst = [ll_rangeitem(dum_nocheck, l, i, step) for i in range(length)] + lst = [ll_rangeitem(l, i, step) for i in range(length)] assert lst == expected - lst = [ll_rangeitem_nonneg(dum_nocheck, l, i, step) for i in range(length)] + lst = [ll_rangeitem_nonneg(l, i, step) for i in range(length)] assert lst == expected - lst = [ll_rangeitem(dum_nocheck, l, i-length, step) for i in range(length)] + lst = [ll_rangeitem(l, i-length, step) for i in range(length)] assert lst == expected for start in (-10, 0, 1, 10): From noreply at buildbot.pypy.org Thu Aug 1 10:42:24 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 1 Aug 2013 10:42:24 +0200 (CEST) Subject: [pypy-commit] pypy default: Bah. A mistake of the ootype removal process. Message-ID: <20130801084224.C23DB1C02E4@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r65872:0765febf0718 Date: 2013-08-01 10:41 +0200 http://bitbucket.org/pypy/pypy/changeset/0765febf0718/ Log: Bah. A mistake of the ootype removal process. diff --git a/rpython/rtyper/test/test_runicode.py b/rpython/rtyper/test/test_runicode.py --- a/rpython/rtyper/test/test_runicode.py +++ b/rpython/rtyper/test/test_runicode.py @@ -8,7 +8,7 @@ # ====> test_rstr.py -class BaseTestRUnicode(AbstractTestRstr, BaseRtypingTest): +class TestRUnicode(AbstractTestRstr, BaseRtypingTest): const = unicode constchar = unichr From noreply at buildbot.pypy.org Thu Aug 1 10:43:28 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 1 Aug 2013 10:43:28 +0200 (CEST) Subject: [pypy-commit] pypy flow-no-local-exception: in-progress Message-ID: <20130801084328.CB9861C02E4@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: flow-no-local-exception Changeset: r65873:9156c47391d7 Date: 2013-08-01 10:42 +0200 http://bitbucket.org/pypy/pypy/changeset/9156c47391d7/ Log: in-progress diff --git a/pypy/objspace/std/bytearrayobject.py b/pypy/objspace/std/bytearrayobject.py --- a/pypy/objspace/std/bytearrayobject.py +++ b/pypy/objspace/std/bytearrayobject.py @@ -16,6 +16,7 @@ from pypy.objspace.std.stringobject import W_StringObject from pypy.objspace.std.unicodeobject import W_UnicodeObject from pypy.objspace.std.util import get_positive_index +from pypy.objspace.std.util import ListIndexError, getuindex from rpython.rlib.rstring import StringBuilder @@ -93,9 +94,9 @@ except AttributeError: w_IndexError = None index = space.getindex_w(w_index, w_IndexError, "bytearray index") - try: + if index < len(w_bytearray.data): return space.newint(ord(w_bytearray.data[index])) - except IndexError: + else: raise OperationError(space.w_IndexError, space.wrap("bytearray index out of range")) @@ -394,13 +395,14 @@ def bytearray_pop__Bytearray_Int(space, w_bytearray, w_idx): index = space.int_w(w_idx) try: - result = w_bytearray.data.pop(index) - except IndexError: + uindex = getuindex(w_bytearray.data, index) + except ListIndexError: if not w_bytearray.data: raise OperationError(space.w_IndexError, space.wrap( "pop from empty bytearray")) raise OperationError(space.w_IndexError, space.wrap( "pop index out of range")) + result = w_bytearray.data.pop(uindex) return space.wrap(ord(result)) def bytearray_remove__Bytearray_ANY(space, w_bytearray, w_char): @@ -570,11 +572,13 @@ def setitem__Bytearray_ANY_ANY(space, w_bytearray, w_index, w_item): from pypy.objspace.std.bytearraytype import getbytevalue idx = space.getindex_w(w_index, space.w_IndexError, "bytearray index") + byte = getbytevalue(space, w_item) try: - w_bytearray.data[idx] = getbytevalue(space, w_item) - except IndexError: + uindex = getuindex(w_bytearray.data, idx) + except ListIndexError: raise OperationError(space.w_IndexError, space.wrap("bytearray index out of range")) + w_bytearray.data[uindex] = byte def setitem__Bytearray_Slice_ANY(space, w_bytearray, w_slice, w_other): oldsize = len(w_bytearray.data) @@ -585,11 +589,11 @@ def delitem__Bytearray_ANY(space, w_bytearray, w_idx): idx = space.getindex_w(w_idx, space.w_IndexError, "bytearray index") try: - del w_bytearray.data[idx] - except IndexError: + uindex = getuindex(w_bytearray.data, idx) + except ListIndexError: raise OperationError(space.w_IndexError, space.wrap("bytearray deletion index out of range")) - return space.w_None + del w_bytearray.data[uindex] def delitem__Bytearray_Slice(space, w_bytearray, w_slice): start, stop, step, slicelength = w_slice.indices4(space, diff --git a/pypy/objspace/std/formatting.py b/pypy/objspace/std/formatting.py --- a/pypy/objspace/std/formatting.py +++ b/pypy/objspace/std/formatting.py @@ -25,15 +25,14 @@ def nextinputvalue(self): # return the next value in the tuple of input arguments - try: + if self.values_pos < len(self.values_w): w_result = self.values_w[self.values_pos] - except IndexError: + self.values_pos += 1 + return w_result + else: space = self.space raise OperationError(space.w_TypeError, space.wrap( 'not enough arguments for format string')) - else: - self.values_pos += 1 - return w_result def checkconsumed(self): if self.values_pos < len(self.values_w) and self.w_valuedict is None: @@ -168,9 +167,9 @@ def peekchr(self): # return the 'current' character - try: + if self.fmtpos < len(self.fmt): return self.fmt[self.fmtpos] - except IndexError: + else: space = self.space raise OperationError(space.w_ValueError, space.wrap("incomplete format")) @@ -185,9 +184,9 @@ i0 = i pcount = 1 while 1: - try: + if i < len(fmt): c = fmt[i] - except IndexError: + else: space = self.space raise OperationError(space.w_ValueError, space.wrap("incomplete format key")) diff --git a/pypy/objspace/std/frame.py b/pypy/objspace/std/frame.py --- a/pypy/objspace/std/frame.py +++ b/pypy/objspace/std/frame.py @@ -9,6 +9,7 @@ from pypy.objspace.std import intobject from pypy.objspace.std.multimethod import FailedToImplement from pypy.objspace.std.listobject import W_ListObject +from pypy.objspace.std.util import ListIndexError class BaseFrame(PyFrame): @@ -43,7 +44,7 @@ if type(w_1) is W_ListObject and type(w_2) is intobject.W_IntObject: try: w_result = w_1.getitem(w_2.intval) - except IndexError: + except ListIndexError: raise OperationError(f.space.w_IndexError, f.space.wrap("list index out of range")) else: diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -28,8 +28,8 @@ from pypy.objspace.std.tupleobject import W_AbstractTupleObject from pypy.objspace.std.unicodeobject import W_UnicodeObject from pypy.objspace.std.util import get_positive_index, negate +from pypy.objspace.std.util import ListIndexError, getuindex from rpython.rlib import debug, jit, rerased -from rpython.rlib.rarithmetic import r_uint from rpython.rlib.listsort import make_timsort_class from rpython.rlib.objectmodel import ( instantiate, newlist_hint, resizelist_hint, specialize) @@ -38,10 +38,6 @@ __all__ = ['W_ListObject', 'make_range_list', 'make_empty_list_with_size'] -class ListIndexError(Exception): - """A custom RPython class, raised by getitem() and similar methods.""" - - UNROLL_CUTOFF = 5 @@ -1240,20 +1236,9 @@ def length(self, w_list): return len(self.unerase(w_list.lstorage)) - @staticmethod - def _getidx(l, index): - ulength = r_uint(len(l)) - uindex = r_uint(index) - if uindex >= ulength: - # out of bounds -or- negative index - uindex += ulength - if uindex >= ulength: - raise ListIndexError - return uindex - def getitem(self, w_list, index): l = self.unerase(w_list.lstorage) - uindex = self._getidx(l, index) + uindex = getuindex(l, index) return self.wrap(l[uindex]) @jit.look_inside_iff(lambda self, w_list: @@ -1331,7 +1316,7 @@ l = self.unerase(w_list.lstorage) if self.is_correct_type(w_item): - uindex = self._getidx(l, index) + uindex = getuindex(l, index) l[uindex] = self.unwrap(w_item) return @@ -1442,7 +1427,7 @@ def pop(self, w_list, index): l = self.unerase(w_list.lstorage) - uindex = self._getidx(l, index) + uindex = getuindex(l, index) item = l.pop(uindex) w_item = self.wrap(item) return w_item diff --git a/pypy/objspace/std/marshal_impl.py b/pypy/objspace/std/marshal_impl.py --- a/pypy/objspace/std/marshal_impl.py +++ b/pypy/objspace/std/marshal_impl.py @@ -282,9 +282,9 @@ def unmarshal_stringref(space, u, tc): idx = u.get_int() - try: + if 0 <= idx < len(u.stringtable_w): return u.stringtable_w[idx] - except IndexError: + else: raise_exception(space, 'bad marshal data') register(TYPE_STRINGREF, unmarshal_stringref) diff --git a/pypy/objspace/std/newformat.py b/pypy/objspace/std/newformat.py --- a/pypy/objspace/std/newformat.py +++ b/pypy/objspace/std/newformat.py @@ -201,7 +201,7 @@ if empty: index = self.auto_numbering self.auto_numbering += 1 - if index == -1: + if index < 0: kwarg = name[:i] if self.is_unicode: try: @@ -216,9 +216,9 @@ except KeyError: raise OperationError(space.w_KeyError, space.wrap(arg_key)) else: - try: + if index < len(self.args): w_arg = self.args[index] - except IndexError: + else: w_msg = space.wrap("index out of range") raise OperationError(space.w_IndexError, w_msg) return self._resolve_lookups(w_arg, name, i, end) diff --git a/pypy/objspace/std/tupleobject.py b/pypy/objspace/std/tupleobject.py --- a/pypy/objspace/std/tupleobject.py +++ b/pypy/objspace/std/tupleobject.py @@ -10,7 +10,7 @@ from pypy.objspace.std.inttype import wrapint from pypy.objspace.std.sliceobject import W_SliceObject, normalize_simple_slice from pypy.objspace.std.stdtypedef import StdTypeDef -from pypy.objspace.std.util import negate +from pypy.objspace.std.util import negate, getuindex, ListIndexError from rpython.rlib import jit from rpython.rlib.debug import make_sure_not_resized from rpython.rlib.rarithmetic import intmask @@ -283,10 +283,11 @@ def getitem(self, space, index): try: - return self.wrappeditems[index] - except IndexError: + uindex = getuindex(self.wrappeditems, index) + except ListIndexError: raise OperationError(space.w_IndexError, space.wrap("tuple index out of range")) + return self.wrappeditems[index] def wraptuple(space, list_w): diff --git a/pypy/objspace/std/util.py b/pypy/objspace/std/util.py --- a/pypy/objspace/std/util.py +++ b/pypy/objspace/std/util.py @@ -1,3 +1,6 @@ +from rpython.rlib.rarithmetic import r_uint + + def negate(f): """Create a function which calls `f` and negates its result. When the result is ``space.w_NotImplemented``, ``space.w_NotImplemented`` is @@ -25,3 +28,20 @@ where = length assert where >= 0 return where + + +class ListIndexError(Exception): + """A custom RPython class, raised by getitem() and similar methods + from listobject.py, and from getuindex() below.""" + +def getuindex(lst, index): + ulength = r_uint(len(lst)) + uindex = r_uint(index) + if uindex >= ulength: + # Failed, so either (-length <= index < 0), or we have to raise + # ListIndexError. First add 'length' to get the final index, then + # check that we now have (0 <= index < length). + uindex += ulength + if uindex >= ulength: + raise ListIndexError + return uindex From noreply at buildbot.pypy.org Thu Aug 1 11:03:55 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 1 Aug 2013 11:03:55 +0200 (CEST) Subject: [pypy-commit] cffi default: SIZE_OF_LONG may differ from SIZE_OF_PTR Message-ID: <20130801090355.65E9E1C3666@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1301:dae3c39125de Date: 2013-08-01 11:03 +0200 http://bitbucket.org/cffi/cffi/changeset/dae3c39125de/ Log: SIZE_OF_LONG may differ from SIZE_OF_PTR diff --git a/testing/backend_tests.py b/testing/backend_tests.py --- a/testing/backend_tests.py +++ b/testing/backend_tests.py @@ -537,13 +537,13 @@ for c_type, expected_size in [ ('char', 1), ('unsigned int', 4), - ('char *', SIZE_OF_LONG), + ('char *', SIZE_OF_PTR), ('int[5]', 20), ('struct foo', 12), ('union foo', 4), ]: size = ffi.sizeof(c_type) - assert size == expected_size + assert size == expected_size, (size, expected_size, ctype) def test_sizeof_cdata(self): ffi = FFI(backend=self.Backend()) From noreply at buildbot.pypy.org Thu Aug 1 11:19:51 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 1 Aug 2013 11:19:51 +0200 (CEST) Subject: [pypy-commit] cffi win64: A branch to fix the win64 issues. Message-ID: <20130801091951.9401E1C01A6@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: win64 Changeset: r1302:89277a8d0543 Date: 2013-08-01 11:19 +0200 http://bitbucket.org/cffi/cffi/changeset/89277a8d0543/ Log: A branch to fix the win64 issues. From noreply at buildbot.pypy.org Thu Aug 1 11:19:52 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 1 Aug 2013 11:19:52 +0200 (CEST) Subject: [pypy-commit] cffi win64: I fail to see how "cif->bytes ? cif->bytes : 40" means "at least 40". Message-ID: <20130801091952.E18BE1C135D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: win64 Changeset: r1303:a01a34a12473 Date: 2013-08-01 11:19 +0200 http://bitbucket.org/cffi/cffi/changeset/a01a34a12473/ Log: I fail to see how "cif->bytes ? cif->bytes : 40" means "at least 40". It seems to mean only "not 0" to me. diff --git a/c/libffi_msvc/ffi.c b/c/libffi_msvc/ffi.c --- a/c/libffi_msvc/ffi.c +++ b/c/libffi_msvc/ffi.c @@ -221,8 +221,7 @@ #else case FFI_SYSV: /*@-usedef@*/ - /* Function call needs at least 40 bytes stack size, on win64 AMD64 */ - return ffi_call_AMD64(ffi_prep_args, &ecif, cif->bytes ? cif->bytes : 40, + return ffi_call_AMD64(ffi_prep_args, &ecif, cif->bytes, cif->flags, ecif.rvalue, fn); /*@=usedef@*/ break; diff --git a/c/libffi_msvc/prep_cif.c b/c/libffi_msvc/prep_cif.c --- a/c/libffi_msvc/prep_cif.c +++ b/c/libffi_msvc/prep_cif.c @@ -168,6 +168,12 @@ #endif } +#ifdef _WIN64 + /* Function call needs at least 40 bytes stack size, on win64 AMD64 */ + if (bytes < 40) + bytes = 40; +#endif + cif->bytes = bytes; /* Perform machine dependent cif processing */ From noreply at buildbot.pypy.org Thu Aug 1 11:46:42 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 1 Aug 2013 11:46:42 +0200 (CEST) Subject: [pypy-commit] cffi win64: Bah? Message-ID: <20130801094642.CAFDD1C0130@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: win64 Changeset: r1304:985f8322ba45 Date: 2013-08-01 11:46 +0200 http://bitbucket.org/cffi/cffi/changeset/985f8322ba45/ Log: Bah? diff --git a/c/libffi_msvc/types.c b/c/libffi_msvc/types.c --- a/c/libffi_msvc/types.c +++ b/c/libffi_msvc/types.c @@ -43,7 +43,7 @@ FFI_INTEGRAL_TYPEDEF(float, 4, 4, FFI_TYPE_FLOAT); #if defined ALPHA || defined SPARC64 || defined X86_64 || defined S390X \ - || defined IA64 + || defined IA64 || defined _WIN64 FFI_INTEGRAL_TYPEDEF(pointer, 8, 8, FFI_TYPE_POINTER); From noreply at buildbot.pypy.org Thu Aug 1 11:57:21 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 1 Aug 2013 11:57:21 +0200 (CEST) Subject: [pypy-commit] cffi win64: Attempted fix Message-ID: <20130801095721.342761C02E4@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: win64 Changeset: r1305:2451ececc8cd Date: 2013-08-01 11:57 +0200 http://bitbucket.org/cffi/cffi/changeset/2451ececc8cd/ Log: Attempted fix diff --git a/c/libffi_msvc/ffi.c b/c/libffi_msvc/ffi.c --- a/c/libffi_msvc/ffi.c +++ b/c/libffi_msvc/ffi.c @@ -377,6 +377,11 @@ /* because we're little endian, this is what it turns into. */ +#ifdef _WIN64 + if (z > 8) + *p_argv = *((void**) argp); /* indirect */ + else +#endif *p_argv = (void*) argp; p_argv++; From noreply at buildbot.pypy.org Thu Aug 1 12:06:58 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 1 Aug 2013 12:06:58 +0200 (CEST) Subject: [pypy-commit] cffi win64: Fix Message-ID: <20130801100658.801411C02E4@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: win64 Changeset: r1306:a0bbd364d7a2 Date: 2013-08-01 12:06 +0200 http://bitbucket.org/cffi/cffi/changeset/a0bbd364d7a2/ Log: Fix diff --git a/c/libffi_msvc/ffi.c b/c/libffi_msvc/ffi.c --- a/c/libffi_msvc/ffi.c +++ b/c/libffi_msvc/ffi.c @@ -340,6 +340,9 @@ result types except for floats; we have to 'mov xmm0, rax' in the caller to correct this. */ + if ( cif->flags == FFI_TYPE_STRUCT ) { + resp = *(void **)resp; + } return *(void **)resp; #endif } From noreply at buildbot.pypy.org Thu Aug 1 12:22:39 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 1 Aug 2013 12:22:39 +0200 (CEST) Subject: [pypy-commit] cffi win64: Backed out changeset a0bbd364d7a2 Message-ID: <20130801102239.2BBBD1C135D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: win64 Changeset: r1307:9c93e3a75495 Date: 2013-08-01 12:22 +0200 http://bitbucket.org/cffi/cffi/changeset/9c93e3a75495/ Log: Backed out changeset a0bbd364d7a2 diff --git a/c/libffi_msvc/ffi.c b/c/libffi_msvc/ffi.c --- a/c/libffi_msvc/ffi.c +++ b/c/libffi_msvc/ffi.c @@ -340,9 +340,6 @@ result types except for floats; we have to 'mov xmm0, rax' in the caller to correct this. */ - if ( cif->flags == FFI_TYPE_STRUCT ) { - resp = *(void **)resp; - } return *(void **)resp; #endif } From noreply at buildbot.pypy.org Thu Aug 1 14:44:13 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 1 Aug 2013 14:44:13 +0200 (CEST) Subject: [pypy-commit] cffi win64: I have seriously no clue how Win64 ctypes work Message-ID: <20130801124413.62AB01C039A@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: win64 Changeset: r1308:484df1511013 Date: 2013-08-01 14:44 +0200 http://bitbucket.org/cffi/cffi/changeset/484df1511013/ Log: I have seriously no clue how Win64 ctypes work diff --git a/c/libffi_msvc/ffi.c b/c/libffi_msvc/ffi.c --- a/c/libffi_msvc/ffi.c +++ b/c/libffi_msvc/ffi.c @@ -246,7 +246,7 @@ #else static void __fastcall #endif -ffi_closure_SYSV (ffi_closure *closure, int *argp) +ffi_closure_SYSV (ffi_closure *closure, char *argp) { // this is our return value storage long double res; @@ -256,7 +256,7 @@ void **arg_area; unsigned short rtype; void *resp = (void*)&res; - void *args = &argp[1]; + void *args = argp + sizeof(void *); cif = closure->cif; arg_area = (void**) alloca (cif->nargs * sizeof (void*)); From noreply at buildbot.pypy.org Thu Aug 1 15:23:47 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 1 Aug 2013 15:23:47 +0200 (CEST) Subject: [pypy-commit] cffi win64: Close branch, ready to merge Message-ID: <20130801132347.749D61C0130@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: win64 Changeset: r1309:98257c799f70 Date: 2013-08-01 15:23 +0200 http://bitbucket.org/cffi/cffi/changeset/98257c799f70/ Log: Close branch, ready to merge From noreply at buildbot.pypy.org Thu Aug 1 15:23:49 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 1 Aug 2013 15:23:49 +0200 (CEST) Subject: [pypy-commit] cffi default: hg merge win64 Message-ID: <20130801132349.267321C0130@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1310:d66fc88471be Date: 2013-08-01 15:23 +0200 http://bitbucket.org/cffi/cffi/changeset/d66fc88471be/ Log: hg merge win64 diff --git a/c/libffi_msvc/ffi.c b/c/libffi_msvc/ffi.c --- a/c/libffi_msvc/ffi.c +++ b/c/libffi_msvc/ffi.c @@ -221,8 +221,7 @@ #else case FFI_SYSV: /*@-usedef@*/ - /* Function call needs at least 40 bytes stack size, on win64 AMD64 */ - return ffi_call_AMD64(ffi_prep_args, &ecif, cif->bytes ? cif->bytes : 40, + return ffi_call_AMD64(ffi_prep_args, &ecif, cif->bytes, cif->flags, ecif.rvalue, fn); /*@=usedef@*/ break; @@ -247,7 +246,7 @@ #else static void __fastcall #endif -ffi_closure_SYSV (ffi_closure *closure, int *argp) +ffi_closure_SYSV (ffi_closure *closure, char *argp) { // this is our return value storage long double res; @@ -257,7 +256,7 @@ void **arg_area; unsigned short rtype; void *resp = (void*)&res; - void *args = &argp[1]; + void *args = argp + sizeof(void *); cif = closure->cif; arg_area = (void**) alloca (cif->nargs * sizeof (void*)); @@ -378,6 +377,11 @@ /* because we're little endian, this is what it turns into. */ +#ifdef _WIN64 + if (z > 8) + *p_argv = *((void**) argp); /* indirect */ + else +#endif *p_argv = (void*) argp; p_argv++; diff --git a/c/libffi_msvc/prep_cif.c b/c/libffi_msvc/prep_cif.c --- a/c/libffi_msvc/prep_cif.c +++ b/c/libffi_msvc/prep_cif.c @@ -168,6 +168,12 @@ #endif } +#ifdef _WIN64 + /* Function call needs at least 40 bytes stack size, on win64 AMD64 */ + if (bytes < 40) + bytes = 40; +#endif + cif->bytes = bytes; /* Perform machine dependent cif processing */ diff --git a/c/libffi_msvc/types.c b/c/libffi_msvc/types.c --- a/c/libffi_msvc/types.c +++ b/c/libffi_msvc/types.c @@ -43,7 +43,7 @@ FFI_INTEGRAL_TYPEDEF(float, 4, 4, FFI_TYPE_FLOAT); #if defined ALPHA || defined SPARC64 || defined X86_64 || defined S390X \ - || defined IA64 + || defined IA64 || defined _WIN64 FFI_INTEGRAL_TYPEDEF(pointer, 8, 8, FFI_TYPE_POINTER); From noreply at buildbot.pypy.org Thu Aug 1 15:26:19 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 1 Aug 2013 15:26:19 +0200 (CEST) Subject: [pypy-commit] cffi windows: Close this experimental branch. Took manually 05896a3af290 and Message-ID: <20130801132619.79DA81C0130@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: windows Changeset: r1311:c6f0e2c2baf4 Date: 2013-08-01 15:26 +0200 http://bitbucket.org/cffi/cffi/changeset/c6f0e2c2baf4/ Log: Close this experimental branch. Took manually 05896a3af290 and 55d1624ba4be from there. From noreply at buildbot.pypy.org Thu Aug 1 15:27:54 2013 From: noreply at buildbot.pypy.org (bivab) Date: Thu, 1 Aug 2013 15:27:54 +0200 (CEST) Subject: [pypy-commit] pypy default: update version in the docs Message-ID: <20130801132754.E39C51C0130@cobra.cs.uni-duesseldorf.de> Author: David Schneider Branch: Changeset: r65874:22a0e7be1d30 Date: 2013-08-01 15:26 +0200 http://bitbucket.org/pypy/pypy/changeset/22a0e7be1d30/ Log: update version in the docs diff --git a/pypy/doc/conf.py b/pypy/doc/conf.py --- a/pypy/doc/conf.py +++ b/pypy/doc/conf.py @@ -45,9 +45,9 @@ # built documents. # # The short X.Y version. -version = '2.0' +version = '2.1' # The full version, including alpha/beta/rc tags. -release = '2.0.2' +release = '2.1.0' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/pypy/doc/getting-started-python.rst b/pypy/doc/getting-started-python.rst --- a/pypy/doc/getting-started-python.rst +++ b/pypy/doc/getting-started-python.rst @@ -104,8 +104,8 @@ executable. The executable behaves mostly like a normal Python interpreter:: $ ./pypy-c - Python 2.7.3 (7e4f0faa3d51, Nov 22 2012, 10:35:18) - [PyPy 2.0.0 with GCC 4.7.1] on linux2 + Python 2.7.3 (480845e6b1dd, Jul 31 2013, 11:05:31) + [PyPy 2.1.0 with GCC 4.7.1] on linux2 Type "help", "copyright", "credits" or "license" for more information. And now for something completely different: ``RPython magically makes you rich and famous (says so on the tin)'' @@ -171,7 +171,7 @@ the ``bin/pypy`` executable. To install PyPy system wide on unix-like systems, it is recommended to put the -whole hierarchy alone (e.g. in ``/opt/pypy2.0``) and put a symlink to the +whole hierarchy alone (e.g. in ``/opt/pypy2.1``) and put a symlink to the ``pypy`` executable into ``/usr/bin`` or ``/usr/local/bin`` If the executable fails to find suitable libraries, it will report diff --git a/pypy/doc/getting-started.rst b/pypy/doc/getting-started.rst --- a/pypy/doc/getting-started.rst +++ b/pypy/doc/getting-started.rst @@ -53,10 +53,10 @@ PyPy is ready to be executed as soon as you unpack the tarball or the zip file, with no need to install it in any specific location:: - $ tar xf pypy-2.0.tar.bz2 - $ ./pypy-2.0/bin/pypy - Python 2.7.3 (7e4f0faa3d51, Nov 22 2012, 10:35:18) - [PyPy 2.0.0 with GCC 4.7.1] on linux2 + $ tar xf pypy-2.1.tar.bz2 + $ ./pypy-2.1/bin/pypy + Python 2.7.3 (480845e6b1dd, Jul 31 2013, 11:05:31) + [PyPy 2.1.0 with GCC 4.4.3] on linux2 Type "help", "copyright", "credits" or "license" for more information. And now for something completely different: ``PyPy is an exciting technology that lets you to write fast, portable, multi-platform interpreters with less @@ -75,14 +75,14 @@ $ curl -O https://raw.github.com/pypa/pip/master/contrib/get-pip.py - $ ./pypy-2.0/bin/pypy distribute_setup.py + $ ./pypy-2.1/bin/pypy distribute_setup.py - $ ./pypy-2.0/bin/pypy get-pip.py + $ ./pypy-2.1/bin/pypy get-pip.py - $ ./pypy-2.0/bin/pip install pygments # for example + $ ./pypy-2.1/bin/pip install pygments # for example -3rd party libraries will be installed in ``pypy-2.0/site-packages``, and -the scripts in ``pypy-2.0/bin``. +3rd party libraries will be installed in ``pypy-2.1/site-packages``, and +the scripts in ``pypy-2.1/bin``. Installing using virtualenv --------------------------- diff --git a/pypy/doc/index.rst b/pypy/doc/index.rst --- a/pypy/doc/index.rst +++ b/pypy/doc/index.rst @@ -40,7 +40,7 @@ * `FAQ`_: some frequently asked questions. -* `Release 2.0.2`_: the latest official release +* `Release 2.1.0`_: the latest official release * `PyPy Blog`_: news and status info about PyPy @@ -110,7 +110,7 @@ .. _`Getting Started`: getting-started.html .. _`Papers`: extradoc.html .. _`Videos`: video-index.html -.. _`Release 2.0.2`: http://pypy.org/download.html +.. _`Release 2.1.0`: http://pypy.org/download.html .. _`speed.pypy.org`: http://speed.pypy.org .. _`RPython toolchain`: translation.html .. _`potential project ideas`: project-ideas.html From noreply at buildbot.pypy.org Thu Aug 1 15:27:56 2013 From: noreply at buildbot.pypy.org (bivab) Date: Thu, 1 Aug 2013 15:27:56 +0200 (CEST) Subject: [pypy-commit] pypy default: bump version Message-ID: <20130801132756.2C4FC1C0130@cobra.cs.uni-duesseldorf.de> Author: David Schneider Branch: Changeset: r65875:02941220a2d4 Date: 2013-08-01 15:27 +0200 http://bitbucket.org/pypy/pypy/changeset/02941220a2d4/ Log: bump version diff --git a/pypy/module/cpyext/include/patchlevel.h b/pypy/module/cpyext/include/patchlevel.h --- a/pypy/module/cpyext/include/patchlevel.h +++ b/pypy/module/cpyext/include/patchlevel.h @@ -29,7 +29,7 @@ #define PY_VERSION "2.7.3" /* PyPy version as a string */ -#define PYPY_VERSION "2.1.0-alpha0" +#define PYPY_VERSION "2.2.0-alpha0" /* Subversion Revision number of this file (not of the repository). * Empty since Mercurial migration. */ diff --git a/pypy/module/sys/version.py b/pypy/module/sys/version.py --- a/pypy/module/sys/version.py +++ b/pypy/module/sys/version.py @@ -11,7 +11,7 @@ #XXX # sync CPYTHON_VERSION with patchlevel.h, package.py CPYTHON_API_VERSION = 1013 #XXX # sync with include/modsupport.h -PYPY_VERSION = (2, 1, 0, "alpha", 0) #XXX # sync patchlevel.h +PYPY_VERSION = (2, 2, 0, "alpha", 0) #XXX # sync patchlevel.h if platform.name == 'msvc': COMPILER_INFO = 'MSC v.%d 32 bit' % (platform.version * 10 + 600) From noreply at buildbot.pypy.org Thu Aug 1 15:41:57 2013 From: noreply at buildbot.pypy.org (bivab) Date: Thu, 1 Aug 2013 15:41:57 +0200 (CEST) Subject: [pypy-commit] pypy default: typo Message-ID: <20130801134157.0E6411C02E4@cobra.cs.uni-duesseldorf.de> Author: David Schneider Branch: Changeset: r65876:9d4bbb3734b2 Date: 2013-08-01 15:41 +0200 http://bitbucket.org/pypy/pypy/changeset/9d4bbb3734b2/ Log: typo diff --git a/pypy/doc/release-2.1.0.rst b/pypy/doc/release-2.1.0.rst --- a/pypy/doc/release-2.1.0.rst +++ b/pypy/doc/release-2.1.0.rst @@ -15,7 +15,7 @@ .. _`Raspberry Pi Foundation`: http://www.raspberrypi.org -The first beta of PyPy3 2.1, targetting version 3 of the Python language, was +The first beta of PyPy3 2.1, targeting version 3 of the Python language, was just released, more details can be found `here`_. .. _`here`: http://morepypy.blogspot.com/2013/07/pypy3-21-beta-1.html From noreply at buildbot.pypy.org Thu Aug 1 16:08:16 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 1 Aug 2013 16:08:16 +0200 (CEST) Subject: [pypy-commit] pypy kill-gen-store-back-in: hg merge default Message-ID: <20130801140816.31EE31C039A@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: kill-gen-store-back-in Changeset: r65877:6ef6da5b23ea Date: 2013-08-01 16:06 +0200 http://bitbucket.org/pypy/pypy/changeset/6ef6da5b23ea/ Log: hg merge default diff too long, truncating to 2000 out of 2110 lines diff --git a/lib_pypy/_curses.py b/lib_pypy/_curses.py --- a/lib_pypy/_curses.py +++ b/lib_pypy/_curses.py @@ -966,7 +966,7 @@ r, g, b = ffi.new("short *"), ffi.new("short *"), ffi.new("short *") if lib.color_content(color, r, g, b) == lib.ERR: raise error("Argument 1 was out of range. Check value of COLORS.") - return (r, g, b) + return (r[0], g[0], b[0]) def color_pair(n): @@ -1121,6 +1121,7 @@ term = ffi.NULL err = ffi.new("int *") if lib.setupterm(term, fd, err) == lib.ERR: + err = err[0] if err == 0: raise error("setupterm: could not find terminal") elif err == -1: diff --git a/pypy/doc/conf.py b/pypy/doc/conf.py --- a/pypy/doc/conf.py +++ b/pypy/doc/conf.py @@ -45,9 +45,9 @@ # built documents. # # The short X.Y version. -version = '2.0' +version = '2.1' # The full version, including alpha/beta/rc tags. -release = '2.0.2' +release = '2.1.0' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/pypy/doc/getting-started-python.rst b/pypy/doc/getting-started-python.rst --- a/pypy/doc/getting-started-python.rst +++ b/pypy/doc/getting-started-python.rst @@ -104,8 +104,8 @@ executable. The executable behaves mostly like a normal Python interpreter:: $ ./pypy-c - Python 2.7.3 (7e4f0faa3d51, Nov 22 2012, 10:35:18) - [PyPy 2.0.0 with GCC 4.7.1] on linux2 + Python 2.7.3 (480845e6b1dd, Jul 31 2013, 11:05:31) + [PyPy 2.1.0 with GCC 4.7.1] on linux2 Type "help", "copyright", "credits" or "license" for more information. And now for something completely different: ``RPython magically makes you rich and famous (says so on the tin)'' @@ -171,7 +171,7 @@ the ``bin/pypy`` executable. To install PyPy system wide on unix-like systems, it is recommended to put the -whole hierarchy alone (e.g. in ``/opt/pypy2.0``) and put a symlink to the +whole hierarchy alone (e.g. in ``/opt/pypy2.1``) and put a symlink to the ``pypy`` executable into ``/usr/bin`` or ``/usr/local/bin`` If the executable fails to find suitable libraries, it will report diff --git a/pypy/doc/getting-started.rst b/pypy/doc/getting-started.rst --- a/pypy/doc/getting-started.rst +++ b/pypy/doc/getting-started.rst @@ -53,10 +53,10 @@ PyPy is ready to be executed as soon as you unpack the tarball or the zip file, with no need to install it in any specific location:: - $ tar xf pypy-2.0.tar.bz2 - $ ./pypy-2.0/bin/pypy - Python 2.7.3 (7e4f0faa3d51, Nov 22 2012, 10:35:18) - [PyPy 2.0.0 with GCC 4.7.1] on linux2 + $ tar xf pypy-2.1.tar.bz2 + $ ./pypy-2.1/bin/pypy + Python 2.7.3 (480845e6b1dd, Jul 31 2013, 11:05:31) + [PyPy 2.1.0 with GCC 4.4.3] on linux2 Type "help", "copyright", "credits" or "license" for more information. And now for something completely different: ``PyPy is an exciting technology that lets you to write fast, portable, multi-platform interpreters with less @@ -75,14 +75,14 @@ $ curl -O https://raw.github.com/pypa/pip/master/contrib/get-pip.py - $ ./pypy-2.0/bin/pypy distribute_setup.py + $ ./pypy-2.1/bin/pypy distribute_setup.py - $ ./pypy-2.0/bin/pypy get-pip.py + $ ./pypy-2.1/bin/pypy get-pip.py - $ ./pypy-2.0/bin/pip install pygments # for example + $ ./pypy-2.1/bin/pip install pygments # for example -3rd party libraries will be installed in ``pypy-2.0/site-packages``, and -the scripts in ``pypy-2.0/bin``. +3rd party libraries will be installed in ``pypy-2.1/site-packages``, and +the scripts in ``pypy-2.1/bin``. Installing using virtualenv --------------------------- diff --git a/pypy/doc/index.rst b/pypy/doc/index.rst --- a/pypy/doc/index.rst +++ b/pypy/doc/index.rst @@ -40,7 +40,7 @@ * `FAQ`_: some frequently asked questions. -* `Release 2.0.2`_: the latest official release +* `Release 2.1.0`_: the latest official release * `PyPy Blog`_: news and status info about PyPy @@ -110,7 +110,7 @@ .. _`Getting Started`: getting-started.html .. _`Papers`: extradoc.html .. _`Videos`: video-index.html -.. _`Release 2.0.2`: http://pypy.org/download.html +.. _`Release 2.1.0`: http://pypy.org/download.html .. _`speed.pypy.org`: http://speed.pypy.org .. _`RPython toolchain`: translation.html .. _`potential project ideas`: project-ideas.html diff --git a/pypy/doc/release-2.1.0.rst b/pypy/doc/release-2.1.0.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/release-2.1.0.rst @@ -0,0 +1,89 @@ +============================ +PyPy 2.1 - Considered ARMful +============================ + +We're pleased to announce PyPy 2.1, which targets version 2.7.3 of the Python +language. This is the first release with official support for ARM processors in the JIT. +This release also contains several bugfixes and performance improvements. + +You can download the PyPy 2.1 release here: + + http://pypy.org/download.html + +We would like to thank the `Raspberry Pi Foundation`_ for supporting the work +to finish PyPy's ARM support. + +.. _`Raspberry Pi Foundation`: http://www.raspberrypi.org + +The first beta of PyPy3 2.1, targeting version 3 of the Python language, was +just released, more details can be found `here`_. + +.. _`here`: http://morepypy.blogspot.com/2013/07/pypy3-21-beta-1.html + +What is PyPy? +============= + +PyPy is a very compliant Python interpreter, almost a drop-in replacement for +CPython 2.7. It's fast (`pypy 2.1 and cpython 2.7.2`_ performance comparison) +due to its integrated tracing JIT compiler. + +This release supports x86 machines running Linux 32/64, Mac OS X 64 or Windows +32. This release also supports ARM machines running Linux 32bit - anything with +``ARMv6`` (like the Raspberry Pi) or ``ARMv7`` (like the Beagleboard, +Chromebook, Cubieboard, etc.) that supports ``VFPv3`` should work. Both +hard-float ``armhf/gnueabihf`` and soft-float ``armel/gnueabi`` builds are +provided. ``armhf`` builds for Raspbian are created using the Raspberry Pi +`custom cross-compilation toolchain `_ +based on ``gcc-arm-linux-gnueabihf`` and should work on ``ARMv6`` and +``ARMv7`` devices running Debian or Raspbian. ``armel`` builds are built +using the ``gcc-arm-linux-gnuebi`` toolchain provided by Ubuntu and +currently target ``ARMv7``. + +Windows 64 work is still stalling, we would welcome a volunteer +to handle that. + +.. _`pypy 2.1 and cpython 2.7.2`: http://speed.pypy.org + +Highlights +========== + +* JIT support for ARM, architecture versions 6 and 7, hard- and soft-float ABI + +* Stacklet support for ARM + +* Support for os.statvfs and os.fstatvfs on unix systems + +* Improved logging performance + +* Faster sets for objects + +* Interpreter improvements + +* During packaging, compile the CFFI based TK extension + +* Pickling of numpy arrays and dtypes + +* Subarrays for numpy + +* Bugfixes to numpy + +* Bugfixes to cffi and ctypes + +* Bugfixes to the x86 stacklet support + +* Fixed issue `1533`_: fix an RPython-level OverflowError for space.float_w(w_big_long_number). + +* Fixed issue `1552`_: GreenletExit should inherit from BaseException. + +* Fixed issue `1537`_: numpypy __array_interface__ + +* Fixed issue `1238`_: Writing to an SSL socket in PyPy sometimes failed with a "bad write retry" message. + +.. _`1533`: https://bugs.pypy.org/issue1533 +.. _`1552`: https://bugs.pypy.org/issue1552 +.. _`1537`: https://bugs.pypy.org/issue1537 +.. _`1238`: https://bugs.pypy.org/issue1238 + +Cheers, + +David Schneider for the PyPy team. diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -48,3 +48,13 @@ Allow subclassing ndarray, i.e. matrix .. branch: kill-ootype + +.. branch: fast-slowpath +Added an abstraction for functions with a fast and slow path in the JIT. This +speeds up list.append() and list.pop(). + +.. branch: curses_fixes + +.. branch: foldable-getarrayitem-indexerror +Constant-fold reading out of constant tuples in PyPy. + diff --git a/pypy/interpreter/pycode.py b/pypy/interpreter/pycode.py --- a/pypy/interpreter/pycode.py +++ b/pypy/interpreter/pycode.py @@ -31,7 +31,7 @@ # Magic numbers for the bytecode version in code objects. # See comments in pypy/module/imp/importing. cpython_magic, = struct.unpack(" 0 def test_ndmin(self): from numpypy import array diff --git a/pypy/module/micronumpy/tool/numready/main.py b/pypy/module/micronumpy/tool/numready/main.py --- a/pypy/module/micronumpy/tool/numready/main.py +++ b/pypy/module/micronumpy/tool/numready/main.py @@ -78,6 +78,11 @@ items.add(Item(name, kind, subitems)) return items +def get_version_str(python): + args = [python, '-c', 'import sys; print sys.version'] + lines = subprocess.check_output(args).splitlines() + return lines[0] + def split(lst): SPLIT = 5 lgt = len(lst) // SPLIT + 1 @@ -93,6 +98,7 @@ def main(argv): cpy_items = find_numpy_items("/usr/bin/python") pypy_items = find_numpy_items(argv[1], "numpypy") + ver = get_version_str(argv[1]) all_items = [] msg = "{:d}/{:d} names".format(len(pypy_items), len(cpy_items)) + " " @@ -113,7 +119,8 @@ env = jinja2.Environment( loader=jinja2.FileSystemLoader(os.path.dirname(__file__)) ) - html = env.get_template("page.html").render(all_items=split(sorted(all_items)), msg=msg) + html = env.get_template("page.html").render(all_items=split(sorted(all_items)), + msg=msg, ver=ver) if len(argv) > 2: with open(argv[2], 'w') as f: f.write(html.encode("utf-8")) diff --git a/pypy/module/micronumpy/tool/numready/page.html b/pypy/module/micronumpy/tool/numready/page.html --- a/pypy/module/micronumpy/tool/numready/page.html +++ b/pypy/module/micronumpy/tool/numready/page.html @@ -34,6 +34,7 @@

NumPyPy Status

+

Version: {{ ver }}

Overall: {{ msg }}

diff --git a/pypy/module/pypyjit/test_pypy_c/test_containers.py b/pypy/module/pypyjit/test_pypy_c/test_containers.py --- a/pypy/module/pypyjit/test_pypy_c/test_containers.py +++ b/pypy/module/pypyjit/test_pypy_c/test_containers.py @@ -209,6 +209,22 @@ opnames = log.opnames(loop.allops()) assert opnames.count('new_with_vtable') == 0 + def test_constfold_tuple(self): + code = """if 1: + tup = tuple(range(10000)) + l = [1, 2, 3, 4, 5, 6, "a"] + def main(n): + while n > 0: + sub = tup[1] # ID: getitem + l[1] = n # kill cache of tup[1] + n -= sub + """ + log = self.run(code, [1000]) + loop, = log.loops_by_filename(self.filepath) + ops = loop.ops_by_id('getitem', include_guard_not_invalidated=False) + assert log.opnames(ops) == [] + + def test_specialised_tuple(self): def main(n): import pypyjit diff --git a/pypy/module/sys/version.py b/pypy/module/sys/version.py --- a/pypy/module/sys/version.py +++ b/pypy/module/sys/version.py @@ -11,7 +11,7 @@ #XXX # sync CPYTHON_VERSION with patchlevel.h, package.py CPYTHON_API_VERSION = 1013 #XXX # sync with include/modsupport.h -PYPY_VERSION = (2, 1, 0, "alpha", 0) #XXX # sync patchlevel.h +PYPY_VERSION = (2, 2, 0, "alpha", 0) #XXX # sync patchlevel.h if platform.name == 'msvc': COMPILER_INFO = 'MSC v.%d 32 bit' % (platform.version * 10 + 600) diff --git a/pypy/module/test_lib_pypy/test_curses.py b/pypy/module/test_lib_pypy/test_curses.py new file mode 100644 --- /dev/null +++ b/pypy/module/test_lib_pypy/test_curses.py @@ -0,0 +1,48 @@ +from lib_pypy import _curses + +import pytest + +lib = _curses.lib + + +def test_color_content(monkeypatch): + def lib_color_content(color, r, g, b): + r[0], g[0], b[0] = 42, 43, 44 + return lib.OK + + monkeypatch.setattr(_curses, '_ensure_initialised_color', lambda: None) + monkeypatch.setattr(lib, 'color_content', lib_color_content) + + assert _curses.color_content(None) == (42, 43, 44) + + +def test_setupterm(monkeypatch): + def make_setupterm(err_no): + def lib_setupterm(term, fd, err): + err[0] = err_no + + return lib.ERR + + return lib_setupterm + + monkeypatch.setattr(_curses, '_initialised_setupterm', False) + monkeypatch.setattr(lib, 'setupterm', make_setupterm(0)) + + with pytest.raises(Exception) as exc_info: + _curses.setupterm() + + assert "could not find terminal" in exc_info.value.args[0] + + monkeypatch.setattr(lib, 'setupterm', make_setupterm(-1)) + + with pytest.raises(Exception) as exc_info: + _curses.setupterm() + + assert "could not find terminfo database" in exc_info.value.args[0] + + monkeypatch.setattr(lib, 'setupterm', make_setupterm(42)) + + with pytest.raises(Exception) as exc_info: + _curses.setupterm() + + assert "unknown error" in exc_info.value.args[0] diff --git a/pypy/objspace/descroperation.py b/pypy/objspace/descroperation.py --- a/pypy/objspace/descroperation.py +++ b/pypy/objspace/descroperation.py @@ -472,6 +472,14 @@ return space.wrap(1) def coerce(space, w_obj1, w_obj2): + w_res = space.try_coerce(w_obj1, w_obj2) + if w_res is None: + raise OperationError(space.w_TypeError, + space.wrap("coercion failed")) + return w_res + + def try_coerce(space, w_obj1, w_obj2): + """Returns a wrapped 2-tuple or a real None if it failed.""" w_typ1 = space.type(w_obj1) w_typ2 = space.type(w_obj2) w_left_src, w_left_impl = space.lookup_in_type_where(w_typ1, '__coerce__') @@ -488,8 +496,7 @@ if w_res is None or space.is_w(w_res, space.w_None): w_res = _invoke_binop(space, w_right_impl, w_obj2, w_obj1) if w_res is None or space.is_w(w_res, space.w_None): - raise OperationError(space.w_TypeError, - space.wrap("coercion failed")) + return None if (not space.isinstance_w(w_res, space.w_tuple) or space.len_w(w_res) != 2): raise OperationError(space.w_TypeError, diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -702,11 +702,13 @@ find_jmp = jit.JitDriver(greens = [], reds = 'auto', name = 'list.find') class ListStrategy(object): - sizehint = -1 def __init__(self, space): self.space = space + def get_sizehint(self): + return -1 + def init_from_list_w(self, w_list, list_w): raise NotImplementedError @@ -894,7 +896,7 @@ else: strategy = self.space.fromcache(ObjectListStrategy) - storage = strategy.get_empty_storage(self.sizehint) + storage = strategy.get_empty_storage(self.get_sizehint()) w_list.strategy = strategy w_list.lstorage = storage @@ -974,6 +976,9 @@ self.sizehint = sizehint ListStrategy.__init__(self, space) + def get_sizehint(self): + return self.sizehint + def _resize_hint(self, w_list, hint): assert hint >= 0 self.sizehint = hint diff --git a/pypy/tool/jitlogparser/parser.py b/pypy/tool/jitlogparser/parser.py --- a/pypy/tool/jitlogparser/parser.py +++ b/pypy/tool/jitlogparser/parser.py @@ -396,7 +396,7 @@ comm = loop.comment comm = comm.lower() if comm.startswith('# bridge'): - m = re.search('guard (-?[\da-f]+)', comm) + m = re.search('guard 0x(-?[\da-f]+)', comm) name = 'guard ' + m.group(1) elif "(" in comm: name = comm[2:comm.find('(')-1] @@ -460,4 +460,4 @@ if __name__ == '__main__': import_log(sys.argv[1]) - + diff --git a/pypy/tool/release/force-builds.py b/pypy/tool/release/force-builds.py --- a/pypy/tool/release/force-builds.py +++ b/pypy/tool/release/force-builds.py @@ -19,6 +19,7 @@ BUILDERS = [ 'own-linux-x86-32', 'own-linux-x86-64', + 'own-linux-armhf', # 'own-macosx-x86-32', # 'pypy-c-app-level-linux-x86-32', # 'pypy-c-app-level-linux-x86-64', diff --git a/rpython/annotator/builtin.py b/rpython/annotator/builtin.py --- a/rpython/annotator/builtin.py +++ b/rpython/annotator/builtin.py @@ -210,8 +210,6 @@ r.const, = answers return r -##def builtin_callable(s_obj): -## return SomeBool() def builtin_tuple(s_iterable): if isinstance(s_iterable, SomeTuple): diff --git a/rpython/config/translationoption.py b/rpython/config/translationoption.py --- a/rpython/config/translationoption.py +++ b/rpython/config/translationoption.py @@ -151,11 +151,6 @@ BoolOption("no__thread", "don't use __thread for implementing TLS", default=False, cmdline="--no__thread", negation=False), -## --- not supported since a long time. Use the env vars CFLAGS/LDFLAGS. -## StrOption("compilerflags", "Specify flags for the C compiler", -## cmdline="--cflags"), -## StrOption("linkerflags", "Specify flags for the linker (C backend only)", -## cmdline="--ldflags"), IntOption("make_jobs", "Specify -j argument to make for compilation" " (C backend only)", cmdline="--make-jobs", default=detect_number_of_processors()), diff --git a/rpython/jit/backend/llsupport/test/test_gc.py b/rpython/jit/backend/llsupport/test/test_gc.py --- a/rpython/jit/backend/llsupport/test/test_gc.py +++ b/rpython/jit/backend/llsupport/test/test_gc.py @@ -137,18 +137,6 @@ self.gc_ll_descr = gc_ll_descr self.fake_cpu = FakeCPU() -## def test_args_for_new(self): -## S = lltype.GcStruct('S', ('x', lltype.Signed)) -## sizedescr = get_size_descr(self.gc_ll_descr, S) -## args = self.gc_ll_descr.args_for_new(sizedescr) -## for x in args: -## assert lltype.typeOf(x) == lltype.Signed -## A = lltype.GcArray(lltype.Signed) -## arraydescr = get_array_descr(self.gc_ll_descr, A) -## args = self.gc_ll_descr.args_for_new(sizedescr) -## for x in args: -## assert lltype.typeOf(x) == lltype.Signed - def test_gc_malloc(self): S = lltype.GcStruct('S', ('x', lltype.Signed)) sizedescr = descr.get_size_descr(self.gc_ll_descr, S) diff --git a/rpython/jit/backend/llsupport/test/test_gc_integration.py b/rpython/jit/backend/llsupport/test/test_gc_integration.py --- a/rpython/jit/backend/llsupport/test/test_gc_integration.py +++ b/rpython/jit/backend/llsupport/test/test_gc_integration.py @@ -88,7 +88,7 @@ else: assert nos == [0, 1, 25] elif self.cpu.backend_name.startswith('arm'): - assert nos == [9, 10, 47] + assert nos == [0, 1, 47] else: raise Exception("write the data here") assert frame.jf_frame[nos[0]] diff --git a/rpython/jit/backend/model.py b/rpython/jit/backend/model.py --- a/rpython/jit/backend/model.py +++ b/rpython/jit/backend/model.py @@ -187,10 +187,6 @@ # with Voids removed raise NotImplementedError - def methdescrof(self, SELFTYPE, methname): - # must return a subclass of history.AbstractMethDescr - raise NotImplementedError - def typedescrof(self, TYPE): raise NotImplementedError diff --git a/rpython/jit/backend/test/test_ll_random.py b/rpython/jit/backend/test/test_ll_random.py --- a/rpython/jit/backend/test/test_ll_random.py +++ b/rpython/jit/backend/test/test_ll_random.py @@ -502,6 +502,7 @@ # 3. raising call and wrong guard_exception # 4. raising call and guard_no_exception # 5. non raising call and guard_exception +# (6. test of a cond_call, always non-raising and guard_no_exception) class BaseCallOperation(test_random.AbstractOperation): def non_raising_func_code(self, builder, r): @@ -648,6 +649,34 @@ builder.guard_op = op builder.loop.operations.append(op) +# 6. a conditional call (for now always with no exception raised) +class CondCallOperation(BaseCallOperation): + def produce_into(self, builder, r): + fail_subset = builder.subset_of_intvars(r) + v_cond = builder.get_bool_var(r) + subset = builder.subset_of_intvars(r)[:4] + for i in range(len(subset)): + if r.random() < 0.35: + subset[i] = ConstInt(r.random_integer()) + # + seen = [] + def call_me(*args): + if len(seen) == 0: + seen.append(args) + else: + assert seen[0] == args + # + TP = lltype.FuncType([lltype.Signed] * len(subset), lltype.Void) + ptr = llhelper(lltype.Ptr(TP), call_me) + c_addr = ConstAddr(llmemory.cast_ptr_to_adr(ptr), builder.cpu) + args = [v_cond, c_addr] + subset + descr = self.getcalldescr(builder, TP) + self.put(builder, args, descr) + op = ResOperation(rop.GUARD_NO_EXCEPTION, [], None, + descr=builder.getfaildescr()) + op.setfailargs(fail_subset) + builder.loop.operations.append(op) + # ____________________________________________________________ OPERATIONS = test_random.OPERATIONS[:] @@ -684,6 +713,7 @@ OPERATIONS.append(RaisingCallOperationGuardNoException(rop.CALL)) OPERATIONS.append(RaisingCallOperationWrongGuardException(rop.CALL)) OPERATIONS.append(CallOperationException(rop.CALL)) + OPERATIONS.append(CondCallOperation(rop.COND_CALL)) OPERATIONS.append(GuardNonNullClassOperation(rop.GUARD_NONNULL_CLASS)) LLtypeOperationBuilder.OPERATIONS = OPERATIONS diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -14,7 +14,7 @@ from rpython.rlib.jit import AsmInfo from rpython.jit.backend.model import CompiledLoopToken from rpython.jit.backend.x86.regalloc import (RegAlloc, get_ebp_ofs, - gpr_reg_mgr_cls, xmm_reg_mgr_cls, _register_arguments) + gpr_reg_mgr_cls, xmm_reg_mgr_cls) from rpython.jit.backend.llsupport.regalloc import (get_scale, valid_addressing_size) from rpython.jit.backend.x86.arch import (FRAME_FIXED_SIZE, WORD, IS_X86_64, JITFRAME_FIXED_SIZE, IS_X86_32, @@ -155,17 +155,24 @@ come. """ mc = codebuf.MachineCodeBlockWrapper() - self._push_all_regs_to_frame(mc, [], supports_floats, callee_only) + # copy registers to the frame, with the exception of the + # 'cond_call_register_arguments' and eax, because these have already + # been saved by the caller. Note that this is not symmetrical: + # these 5 registers are saved by the caller but restored here at + # the end of this function. + self._push_all_regs_to_frame(mc, cond_call_register_arguments + [eax], + supports_floats, callee_only) if IS_X86_64: - mc.SUB(esp, imm(WORD)) + mc.SUB(esp, imm(WORD)) # alignment self.set_extra_stack_depth(mc, 2 * WORD) + # the arguments are already in the correct registers else: - # we want space for 3 arguments + call + alignment - # the caller is responsible for putting arguments in the right spot + # we want space for 4 arguments + call + alignment mc.SUB(esp, imm(WORD * 7)) self.set_extra_stack_depth(mc, 8 * WORD) + # store the arguments at the correct place in the stack for i in range(4): - mc.MOV_sr(i * WORD, _register_arguments[i].value) + mc.MOV_sr(i * WORD, cond_call_register_arguments[i].value) mc.CALL(eax) if IS_X86_64: mc.ADD(esp, imm(WORD)) @@ -173,8 +180,8 @@ mc.ADD(esp, imm(WORD * 7)) self.set_extra_stack_depth(mc, 0) self._reload_frame_if_necessary(mc, align_stack=True) - self._pop_all_regs_from_frame(mc, [], supports_floats, - callee_only) + self._pop_all_regs_from_frame(mc, [], supports_floats, callee_only) + self.pop_gcmap(mc) # push_gcmap(store=True) done by the caller mc.RET() return mc.materialize(self.cpu.asmmemmgr, []) @@ -1756,7 +1763,7 @@ regs = gpr_reg_mgr_cls.save_around_call_regs else: regs = gpr_reg_mgr_cls.all_regs - for i, gpr in enumerate(regs): + for gpr in regs: if gpr not in ignored_regs: v = gpr_reg_mgr_cls.all_reg_indexes[gpr.value] mc.MOV_br(v * WORD + base_ofs, gpr.value) @@ -1778,7 +1785,7 @@ regs = gpr_reg_mgr_cls.save_around_call_regs else: regs = gpr_reg_mgr_cls.all_regs - for i, gpr in enumerate(regs): + for gpr in regs: if gpr not in ignored_regs: v = gpr_reg_mgr_cls.all_reg_indexes[gpr.value] mc.MOV_rb(gpr.value, v * WORD + base_ofs) @@ -2157,11 +2164,33 @@ def label(self): self._check_frame_depth_debug(self.mc) - def cond_call(self, op, gcmap, cond_loc, call_loc): - self.mc.TEST(cond_loc, cond_loc) + def cond_call(self, op, gcmap, loc_cond, imm_func, arglocs): + self.mc.TEST(loc_cond, loc_cond) self.mc.J_il8(rx86.Conditions['Z'], 0) # patched later jmp_adr = self.mc.get_relative_pos() + # self.push_gcmap(self.mc, gcmap, store=True) + # + # first save away the 4 registers from 'cond_call_register_arguments' + # plus the register 'eax' + base_ofs = self.cpu.get_baseofs_of_frame_field() + should_be_saved = self._regalloc.rm.reg_bindings.values() + for gpr in cond_call_register_arguments + [eax]: + if gpr not in should_be_saved: + continue + v = gpr_reg_mgr_cls.all_reg_indexes[gpr.value] + self.mc.MOV_br(v * WORD + base_ofs, gpr.value) + # + # load the 0-to-4 arguments into these registers + from rpython.jit.backend.x86.jump import remap_frame_layout + remap_frame_layout(self, arglocs, + cond_call_register_arguments[:len(arglocs)], + X86_64_SCRATCH_REG if IS_X86_64 else None) + # + # load the constant address of the function to call into eax + self.mc.MOV(eax, imm_func) + # + # figure out which variant of cond_call_slowpath to call, and call it callee_only = False floats = False if self._regalloc is not None: @@ -2174,11 +2203,13 @@ floats = True cond_call_adr = self.cond_call_slowpath[floats * 2 + callee_only] self.mc.CALL(imm(cond_call_adr)) - self.pop_gcmap(self.mc) - # never any result value + # restoring the registers saved above, and doing pop_gcmap(), is left + # to the cond_call_slowpath helper. We never have any result value. offset = self.mc.get_relative_pos() - jmp_adr assert 0 < offset <= 127 self.mc.overwrite(jmp_adr-1, chr(offset)) + # XXX if the next operation is a GUARD_NO_EXCEPTION, we should + # somehow jump over it too in the fast path def malloc_cond(self, nursery_free_adr, nursery_top_adr, size, gcmap): assert size & (WORD-1) == 0 # must be correctly aligned @@ -2353,5 +2384,7 @@ os.write(2, '[x86/asm] %s\n' % msg) raise NotImplementedError(msg) +cond_call_register_arguments = [edi, esi, edx, ecx] + class BridgeAlreadyCompiled(Exception): pass diff --git a/rpython/jit/backend/x86/regalloc.py b/rpython/jit/backend/x86/regalloc.py --- a/rpython/jit/backend/x86/regalloc.py +++ b/rpython/jit/backend/x86/regalloc.py @@ -119,8 +119,6 @@ for _i, _reg in enumerate(gpr_reg_mgr_cls.all_regs): gpr_reg_mgr_cls.all_reg_indexes[_reg.value] = _i -_register_arguments = [edi, esi, edx, ecx] - class RegAlloc(BaseRegalloc): @@ -801,23 +799,26 @@ consider_cond_call_gc_wb_array = consider_cond_call_gc_wb def consider_cond_call(self, op): + # A 32-bit-only, asmgcc-only issue: 'cond_call_register_arguments' + # contains edi and esi, which are also in asmgcroot.py:ASM_FRAMEDATA. + # We must make sure that edi and esi do not contain GC pointers. + if IS_X86_32 and self.assembler._is_asmgcc(): + for box, loc in self.rm.reg_bindings.items(): + if (loc == edi or loc == esi) and box.type == REF: + self.rm.force_spill_var(box) + assert box not in self.rm.reg_bindings + # assert op.result is None args = op.getarglist() - assert 2 <= len(args) <= 4 + 2 - tmpbox = TempBox() - self.rm.force_allocate_reg(tmpbox, selected_reg=eax) + assert 2 <= len(args) <= 4 + 2 # maximum 4 arguments + loc_cond = self.make_sure_var_in_reg(args[0], args) v = args[1] assert isinstance(v, Const) - imm = self.rm.convert_to_imm(v) - self.assembler.regalloc_mov(imm, eax) - args_so_far = [tmpbox] - for i in range(2, len(args)): - reg = _register_arguments[i - 2] - self.make_sure_var_in_reg(args[i], args_so_far, selected_reg=reg) - args_so_far.append(args[i]) - loc_cond = self.make_sure_var_in_reg(args[0], args) - self.assembler.cond_call(op, self.get_gcmap([eax]), loc_cond, eax) - self.rm.possibly_free_var(tmpbox) + imm_func = self.rm.convert_to_imm(v) + arglocs = [self.loc(args[i]) for i in range(2, len(args))] + gcmap = self.get_gcmap() + self.rm.possibly_free_var(args[0]) + self.assembler.cond_call(op, gcmap, loc_cond, imm_func, arglocs) def consider_call_malloc_nursery(self, op): size_box = op.getarg(0) diff --git a/rpython/jit/backend/x86/test/test_rx86_32_auto_encoding.py b/rpython/jit/backend/x86/test/test_rx86_32_auto_encoding.py --- a/rpython/jit/backend/x86/test/test_rx86_32_auto_encoding.py +++ b/rpython/jit/backend/x86/test/test_rx86_32_auto_encoding.py @@ -190,10 +190,6 @@ # for args in args_lists: suffix = "" - ## all = instr.as_all_suffixes - ## for m, extra in args: - ## if m in (i386.MODRM, i386.MODRM8) or all: - ## suffix = suffixes[sizes[m]] + suffix if (argmodes and not self.is_xmm_insn and not instrname.startswith('FSTP')): suffix = suffixes[self.WORD] diff --git a/rpython/jit/codewriter/test/test_longlong.py b/rpython/jit/codewriter/test/test_longlong.py --- a/rpython/jit/codewriter/test/test_longlong.py +++ b/rpython/jit/codewriter/test/test_longlong.py @@ -236,17 +236,3 @@ assert list(op1.args[3]) == vlist assert op1.result == v_result - -##def test_singlefloat_constants(): -## v_x = varoftype(TYPE) -## vlist = [v_x, const(rffi.cast(TYPE, 7))] -## v_result = varoftype(TYPE) -## op = SpaceOperation('llong_add', vlist, v_result) -## tr = Transformer(FakeCPU(), FakeBuiltinCallControl()) -## op1 = tr.rewrite_operation(op) -## # -## assert op1.opname == 'residual_call_irf_f' -## assert list(op1.args[2]) == [] -## assert list(op1.args[3]) == [] -## assert list(op1.args[4]) == vlist -## assert op1.result == v_result diff --git a/rpython/jit/metainterp/executor.py b/rpython/jit/metainterp/executor.py --- a/rpython/jit/metainterp/executor.py +++ b/rpython/jit/metainterp/executor.py @@ -283,22 +283,6 @@ # ____________________________________________________________ -##def do_force_token(cpu): -## raise NotImplementedError - -##def do_virtual_ref(cpu, box1, box2): -## raise NotImplementedError - -##def do_virtual_ref_finish(cpu, box1, box2): -## raise NotImplementedError - -##def do_debug_merge_point(cpu, box1): -## from rpython.jit.metainterp.warmspot import get_stats -## loc = box1._get_str() -## get_stats().add_merge_point_location(loc) - -# ____________________________________________________________ - def _make_execute_list(): execute_by_num_args = {} diff --git a/rpython/jit/metainterp/history.py b/rpython/jit/metainterp/history.py --- a/rpython/jit/metainterp/history.py +++ b/rpython/jit/metainterp/history.py @@ -34,7 +34,6 @@ return 'int' # singlefloats are stored in an int if TYPE in (lltype.Float, lltype.SingleFloat): raise NotImplementedError("type %s not supported" % TYPE) - # XXX fix this for oo... if (TYPE != llmemory.Address and rffi.sizeof(TYPE) > rffi.sizeof(lltype.Signed)): if supports_longlong and TYPE is not lltype.LongFloat: @@ -170,18 +169,11 @@ def __init__(self, identifier=None): self.identifier = identifier # for testing + class BasicFailDescr(AbstractFailDescr): def __init__(self, identifier=None): self.identifier = identifier # for testing -class AbstractMethDescr(AbstractDescr): - # the base class of the result of cpu.methdescrof() - jitcodes = None - def setup(self, jitcodes): - # jitcodes maps { runtimeClass -> jitcode for runtimeClass.methname } - self.jitcodes = jitcodes - def get_jitcode_for_class(self, oocls): - return self.jitcodes[oocls] class Const(AbstractValue): __slots__ = () diff --git a/rpython/jit/metainterp/optimizeopt/rewrite.py b/rpython/jit/metainterp/optimizeopt/rewrite.py --- a/rpython/jit/metainterp/optimizeopt/rewrite.py +++ b/rpython/jit/metainterp/optimizeopt/rewrite.py @@ -411,18 +411,6 @@ def optimize_INSTANCE_PTR_NE(self, op): self._optimize_oois_ooisnot(op, True, True) -## def optimize_INSTANCEOF(self, op): -## value = self.getvalue(op.args[0]) -## realclassbox = value.get_constant_class(self.optimizer.cpu) -## if realclassbox is not None: -## checkclassbox = self.optimizer.cpu.typedescr2classbox(op.descr) -## result = self.optimizer.cpu.ts.subclassOf(self.optimizer.cpu, -## realclassbox, -## checkclassbox) -## self.make_constant_int(op.result, result) -## return -## self.emit_operation(op) - def optimize_CALL(self, op): # dispatch based on 'oopspecindex' to a method that handles # specifically the given oopspec call. For non-oopspec calls, diff --git a/rpython/jit/metainterp/optimizeopt/virtualize.py b/rpython/jit/metainterp/optimizeopt/virtualize.py --- a/rpython/jit/metainterp/optimizeopt/virtualize.py +++ b/rpython/jit/metainterp/optimizeopt/virtualize.py @@ -723,7 +723,6 @@ self.make_constant_int(op.result, value.getlength()) else: value.ensure_nonnull() - ###self.optimize_default(op) self.emit_operation(op) def optimize_GETARRAYITEM_GC(self, op): diff --git a/rpython/jit/metainterp/optimizeopt/virtualstate.py b/rpython/jit/metainterp/optimizeopt/virtualstate.py --- a/rpython/jit/metainterp/optimizeopt/virtualstate.py +++ b/rpython/jit/metainterp/optimizeopt/virtualstate.py @@ -195,11 +195,10 @@ raise BadVirtualState if not value.is_virtual(): raise BadVirtualState + if len(self.fieldstate) > value.getlength(): + raise BadVirtualState for i in range(len(self.fieldstate)): - try: - v = value.get_item_value(i) - except IndexError: - raise BadVirtualState + v = value.get_item_value(i) s = self.fieldstate[i] if s.position > self.position: s.enum_forced_boxes(boxes, v, optimizer) @@ -269,13 +268,13 @@ raise BadVirtualState if not value.is_virtual(): raise BadVirtualState + if len(self.fielddescrs) > len(value._items): + raise BadVirtualState p = 0 for i in range(len(self.fielddescrs)): for j in range(len(self.fielddescrs[i])): try: v = value._items[i][self.fielddescrs[i][j]] - except IndexError: - raise BadVirtualState except KeyError: raise BadVirtualState s = self.fieldstate[p] diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -1281,10 +1281,6 @@ def setup_resume_at_op(self, pc): self.pc = pc - ## values = ' '.join([box.repr_rpython() for box in self.env]) - ## log('setup_resume_at_op %s:%d [%s] %d' % (self.jitcode.name, - ## self.pc, values, - ## self.exception_target)) def run_one_step(self): # Execute the frame forward. This method contains a loop that leaves diff --git a/rpython/jit/metainterp/test/test_immutable.py b/rpython/jit/metainterp/test/test_immutable.py --- a/rpython/jit/metainterp/test/test_immutable.py +++ b/rpython/jit/metainterp/test/test_immutable.py @@ -69,6 +69,28 @@ self.check_operations_history(getfield_gc=0, getfield_gc_pure=1, getarrayitem_gc=0, getarrayitem_gc_pure=1) + def test_array_index_error(self): + class X(object): + _immutable_fields_ = ["y[*]"] + + def __init__(self, x): + self.y = x + + def get(self, index): + try: + return self.y[index] + except IndexError: + return -41 + + def f(index): + l = [1, 2, 3, 4] + l[2] = 30 + a = escape(X(l)) + return a.get(index) + res = self.interp_operations(f, [2], listops=True) + assert res == 30 + self.check_operations_history(getfield_gc=0, getfield_gc_pure=1, + getarrayitem_gc=0, getarrayitem_gc_pure=1) def test_array_in_immutable(self): class X(object): diff --git a/rpython/jit/tl/targettlc.py b/rpython/jit/tl/targettlc.py --- a/rpython/jit/tl/targettlc.py +++ b/rpython/jit/tl/targettlc.py @@ -2,7 +2,6 @@ import py py.path.local(__file__) from rpython.jit.tl.tlc import interp, interp_nonjit, ConstantPool -from rpython.jit.codewriter.policy import JitPolicy from rpython.jit.backend.hlinfo import highleveljitinfo @@ -54,14 +53,10 @@ return decode_program(f.readall()) def target(driver, args): - return entry_point, None + return entry_point # ____________________________________________________________ -def jitpolicy(driver): - """Returns the JIT policy to use when translating.""" - return JitPolicy() - if __name__ == '__main__': import sys sys.exit(entry_point(sys.argv)) diff --git a/rpython/jit/tl/targettlr.py b/rpython/jit/tl/targettlr.py --- a/rpython/jit/tl/targettlr.py +++ b/rpython/jit/tl/targettlr.py @@ -29,15 +29,10 @@ return bytecode def target(driver, args): - return entry_point, None + return entry_point # ____________________________________________________________ -from rpython.jit.codewriter.policy import JitPolicy - -def jitpolicy(driver): - return JitPolicy() - if __name__ == '__main__': import sys sys.exit(entry_point(sys.argv)) diff --git a/rpython/jit/tl/tla/targettla.py b/rpython/jit/tl/tla/targettla.py --- a/rpython/jit/tl/tla/targettla.py +++ b/rpython/jit/tl/tla/targettla.py @@ -28,9 +28,6 @@ def target(driver, args): return entry_point, None -def jitpolicy(driver): - from rpython.jit.codewriter.policy import JitPolicy - return JitPolicy() # ____________________________________________________________ diff --git a/rpython/memory/gc/minimark.py b/rpython/memory/gc/minimark.py --- a/rpython/memory/gc/minimark.py +++ b/rpython/memory/gc/minimark.py @@ -991,9 +991,12 @@ # after a minor or major collection, no object should be in the nursery ll_assert(not self.is_in_nursery(obj), "object in nursery after collection") - # similarily, all objects should have this flag: - ll_assert(self.header(obj).tid & GCFLAG_TRACK_YOUNG_PTRS != 0, - "missing GCFLAG_TRACK_YOUNG_PTRS") + # similarily, all objects should have this flag, except if they + # don't have any GC pointer + typeid = self.get_type_id(obj) + if self.has_gcptr(typeid): + ll_assert(self.header(obj).tid & GCFLAG_TRACK_YOUNG_PTRS != 0, + "missing GCFLAG_TRACK_YOUNG_PTRS") # the GCFLAG_VISITED should not be set between collections ll_assert(self.header(obj).tid & GCFLAG_VISITED == 0, "unexpected GCFLAG_VISITED") diff --git a/rpython/memory/gctransform/asmgcroot.py b/rpython/memory/gctransform/asmgcroot.py --- a/rpython/memory/gctransform/asmgcroot.py +++ b/rpython/memory/gctransform/asmgcroot.py @@ -729,6 +729,10 @@ # - frame address (actually the addr of the retaddr of the current function; # that's the last word of the frame in memory) # +# On 64 bits, it is an array of 7 values instead of 5: +# +# - %rbx, %r12, %r13, %r14, %r15, %rbp; and the frame address +# if IS_64_BITS: CALLEE_SAVED_REGS = 6 diff --git a/rpython/memory/gctransform/framework.py b/rpython/memory/gctransform/framework.py --- a/rpython/memory/gctransform/framework.py +++ b/rpython/memory/gctransform/framework.py @@ -299,10 +299,6 @@ else: malloc_fixedsize_meth = None self.malloc_fixedsize_ptr = self.malloc_fixedsize_clear_ptr -## self.malloc_varsize_ptr = getfn( -## GCClass.malloc_varsize.im_func, -## [s_gc] + [annmodel.SomeInteger(nonneg=True) for i in range(5)] -## + [annmodel.SomeBool()], s_gcref) self.malloc_varsize_clear_ptr = getfn( GCClass.malloc_varsize_clear.im_func, [s_gc, s_typeid16] diff --git a/rpython/memory/gctransform/refcounting.py b/rpython/memory/gctransform/refcounting.py --- a/rpython/memory/gctransform/refcounting.py +++ b/rpython/memory/gctransform/refcounting.py @@ -12,22 +12,6 @@ counts = {} -## def print_call_chain(ob): -## import sys -## f = sys._getframe(1) -## stack = [] -## flag = False -## while f: -## if f.f_locals.get('self') is ob: -## stack.append((f.f_code.co_name, f.f_locals.get('TYPE'))) -## if not flag: -## counts[f.f_code.co_name] = counts.get(f.f_code.co_name, 0) + 1 -## print counts -## flag = True -## f = f.f_back -## stack.reverse() -## for i, (a, b) in enumerate(stack): -## print ' '*i, a, repr(b)[:100-i-len(a)], id(b) ADDRESS_VOID_FUNC = lltype.FuncType([llmemory.Address], lltype.Void) diff --git a/rpython/memory/gctypelayout.py b/rpython/memory/gctypelayout.py --- a/rpython/memory/gctypelayout.py +++ b/rpython/memory/gctypelayout.py @@ -203,6 +203,8 @@ offsets = offsets_to_gc_pointers(TYPE) infobits = index info.ofstoptrs = builder.offsets2table(offsets, TYPE) + if len(offsets) > 0: + infobits |= T_HAS_GCPTR # fptrs = builder.special_funcptr_for_type(TYPE) if fptrs: @@ -216,7 +218,7 @@ infobits |= T_HAS_FINALIZER | T_HAS_LIGHTWEIGHT_FINALIZER if "custom_trace" in fptrs: extra.customtracer = fptrs["custom_trace"] - infobits |= T_HAS_CUSTOM_TRACE + infobits |= T_HAS_CUSTOM_TRACE | T_HAS_GCPTR info.extra = extra # if not TYPE._is_varsize(): @@ -249,15 +251,13 @@ else: offsets = () if len(offsets) > 0: - infobits |= T_HAS_GCPTR_IN_VARSIZE + infobits |= T_HAS_GCPTR_IN_VARSIZE | T_HAS_GCPTR varinfo.varofstoptrs = builder.offsets2table(offsets, ARRAY.OF) varinfo.varitemsize = llmemory.sizeof(ARRAY.OF) if builder.is_weakref_type(TYPE): infobits |= T_IS_WEAKREF if is_subclass_of_object(TYPE): infobits |= T_IS_RPYTHON_INSTANCE - if infobits | T_HAS_GCPTR_IN_VARSIZE or offsets: - infobits |= T_HAS_GCPTR info.infobits = infobits | T_KEY_VALUE # ____________________________________________________________ diff --git a/rpython/rlib/longlong2float.py b/rpython/rlib/longlong2float.py --- a/rpython/rlib/longlong2float.py +++ b/rpython/rlib/longlong2float.py @@ -68,14 +68,12 @@ uint2singlefloat = rffi.llexternal( "pypy__uint2singlefloat", [rffi.UINT], rffi.FLOAT, _callable=uint2singlefloat_emulator, compilation_info=eci, - _nowrapper=True, elidable_function=True, sandboxsafe=True, - oo_primitive="pypy__uint2singlefloat") + _nowrapper=True, elidable_function=True, sandboxsafe=True) singlefloat2uint = rffi.llexternal( "pypy__singlefloat2uint", [rffi.FLOAT], rffi.UINT, _callable=singlefloat2uint_emulator, compilation_info=eci, - _nowrapper=True, elidable_function=True, sandboxsafe=True, - oo_primitive="pypy__singlefloat2uint") + _nowrapper=True, elidable_function=True, sandboxsafe=True) class Float2LongLongEntry(ExtRegistryEntry): diff --git a/rpython/rlib/rlocale.py b/rpython/rlib/rlocale.py --- a/rpython/rlib/rlocale.py +++ b/rpython/rlib/rlocale.py @@ -193,11 +193,11 @@ raise LocaleError("unsupported locale setting") return rffi.charp2str(ll_result) -isalpha = external('isalpha', [rffi.INT], rffi.INT, oo_primitive='locale_isalpha') -isupper = external('isupper', [rffi.INT], rffi.INT, oo_primitive='locale_isupper') -islower = external('islower', [rffi.INT], rffi.INT, oo_primitive='locale_islower') -tolower = external('tolower', [rffi.INT], rffi.INT, oo_primitive='locale_tolower') -isalnum = external('isalnum', [rffi.INT], rffi.INT, oo_primitive='locale_isalnum') +isalpha = external('isalpha', [rffi.INT], rffi.INT) +isupper = external('isupper', [rffi.INT], rffi.INT) +islower = external('islower', [rffi.INT], rffi.INT) +tolower = external('tolower', [rffi.INT], rffi.INT) +isalnum = external('isalnum', [rffi.INT], rffi.INT) if HAVE_LANGINFO: _nl_langinfo = external('nl_langinfo', [rffi.INT], rffi.CCHARP) diff --git a/rpython/rlib/rmarshal.py b/rpython/rlib/rmarshal.py --- a/rpython/rlib/rmarshal.py +++ b/rpython/rlib/rmarshal.py @@ -429,26 +429,3 @@ expected_length = len(itemloaders) unroll_item_loaders = unrolling_iterable(enumerate(itemloaders)) add_loader(s_tuple, load_tuple) - - -## -- not used any more right now -- -##class __extend__(pairtype(MTag, controllerentry.SomeControlledInstance)): -## # marshal a ControlledInstance by marshalling the underlying object - -## def install_marshaller((tag, s_obj)): -## def dump_controlled_instance(buf, x): -## real_obj = controllerentry.controlled_instance_unbox(controller, x) -## realdumper(buf, real_obj) - -## controller = s_obj.controller -## realdumper = get_marshaller(s_obj.s_real_obj) -## add_dumper(s_obj, dump_controlled_instance) - -## def install_unmarshaller((tag, s_obj)): -## def load_controlled_instance(loader): -## real_obj = realloader(loader) -## return controllerentry.controlled_instance_box(controller, -## real_obj) -## controller = s_obj.controller -## realloader = get_loader(s_obj.s_real_obj) -## add_loader(s_obj, load_controlled_instance) diff --git a/rpython/rlib/streamio.py b/rpython/rlib/streamio.py --- a/rpython/rlib/streamio.py +++ b/rpython/rlib/streamio.py @@ -185,11 +185,8 @@ SetEndOfFile = rffi.llexternal('SetEndOfFile', [HANDLE], BOOL, compilation_info=_eci) - # HACK: These implementations are specific to MSVCRT and the C backend. - # When generating on CLI or JVM, these are patched out. - # See PyPyTarget.target() in targetpypystandalone.py def _setfd_binary(fd): - #Allow this to succeed on invalid fd's + # Allow this to succeed on invalid fd's if rposix.is_valid_fd(fd): _setmode(fd, os.O_BINARY) diff --git a/rpython/rtyper/lltypesystem/lltype.py b/rpython/rtyper/lltypesystem/lltype.py --- a/rpython/rtyper/lltypesystem/lltype.py +++ b/rpython/rtyper/lltypesystem/lltype.py @@ -343,9 +343,6 @@ def _short_name(self): return "%s %s" % (self.__class__.__name__, self._name) -## def _defl(self, parent=None, parentindex=None): -## return _struct(self, parent=parent, parentindex=parentindex) - def _allocate(self, initialization, parent=None, parentindex=None): return _struct(self, initialization=initialization, parent=parent, parentindex=parentindex) @@ -1029,15 +1026,6 @@ parent = container._parentstructure() if parent is not None: return parent, container._parent_index -## if isinstance(parent, _struct): -## for name in parent._TYPE._names: -## if getattr(parent, name) is container: -## return parent, name -## raise RuntimeError("lost ourselves") -## if isinstance(parent, _array): -## raise TypeError("cannot fish a pointer to an array item or an " -## "inlined substructure of it") -## raise AssertionError("don't know about %r" % (parent,)) else: return None, None diff --git a/rpython/rtyper/lltypesystem/opimpl.py b/rpython/rtyper/lltypesystem/opimpl.py --- a/rpython/rtyper/lltypesystem/opimpl.py +++ b/rpython/rtyper/lltypesystem/opimpl.py @@ -452,10 +452,6 @@ def op_cast_int_to_adr(int): return llmemory.cast_int_to_adr(int) -##def op_cast_int_to_adr(x): -## assert type(x) is int -## return llmemory.cast_int_to_adr(x) - def op_convert_float_bytes_to_longlong(a): from rpython.rlib.longlong2float import float2longlong return float2longlong(a) diff --git a/rpython/rtyper/lltypesystem/rclass.py b/rpython/rtyper/lltypesystem/rclass.py --- a/rpython/rtyper/lltypesystem/rclass.py +++ b/rpython/rtyper/lltypesystem/rclass.py @@ -54,7 +54,7 @@ # ... // extra instance attributes # } # -# there's also a nongcobject +# there's also a nongcobject OBJECT_VTABLE = lltype.ForwardReference() CLASSTYPE = Ptr(OBJECT_VTABLE) @@ -284,16 +284,11 @@ cname = inputconst(Void, mangled_name) return llops.genop('getfield', [v_vtable, cname], resulttype=r) - def rtype_issubtype(self, hop): + def rtype_issubtype(self, hop): class_repr = get_type_repr(self.rtyper) v_cls1, v_cls2 = hop.inputargs(class_repr, class_repr) if isinstance(v_cls2, Constant): cls2 = v_cls2.value - # XXX re-implement the following optimization -## if cls2.subclassrange_max == cls2.subclassrange_min: -## # a class with no subclass -## return hop.genop('ptr_eq', [v_cls1, v_cls2], resulttype=Bool) -## else: minid = hop.inputconst(Signed, cls2.subclassrange_min) maxid = hop.inputconst(Signed, cls2.subclassrange_max) return hop.gendirectcall(ll_issubclass_const, v_cls1, minid, @@ -313,7 +308,7 @@ else: ForwardRef = lltype.FORWARDREF_BY_FLAVOR[LLFLAVOR[gcflavor]] self.object_type = ForwardRef() - + self.iprebuiltinstances = identity_dict() self.lowleveltype = Ptr(self.object_type) self.gcflavor = gcflavor diff --git a/rpython/rtyper/lltypesystem/rffi.py b/rpython/rtyper/lltypesystem/rffi.py --- a/rpython/rtyper/lltypesystem/rffi.py +++ b/rpython/rtyper/lltypesystem/rffi.py @@ -62,8 +62,8 @@ compilation_info=ExternalCompilationInfo(), sandboxsafe=False, threadsafe='auto', _nowrapper=False, calling_conv='c', - oo_primitive=None, elidable_function=False, - macro=None, random_effects_on_gcobjs='auto'): + elidable_function=False, macro=None, + random_effects_on_gcobjs='auto'): """Build an external function that will invoke the C function 'name' with the given 'args' types and 'result' type. @@ -97,8 +97,6 @@ if elidable_function: _callable._elidable_function_ = True kwds = {} - if oo_primitive: - kwds['oo_primitive'] = oo_primitive has_callback = False for ARG in args: @@ -651,6 +649,10 @@ # char * CCHARP = lltype.Ptr(lltype.Array(lltype.Char, hints={'nolength': True})) +# const char * +CONST_CCHARP = lltype.Ptr(lltype.Array(lltype.Char, hints={'nolength': True, + 'render_as_const': True})) + # wchar_t * CWCHARP = lltype.Ptr(lltype.Array(lltype.UniChar, hints={'nolength': True})) diff --git a/rpython/rtyper/lltypesystem/rpbc.py b/rpython/rtyper/lltypesystem/rpbc.py --- a/rpython/rtyper/lltypesystem/rpbc.py +++ b/rpython/rtyper/lltypesystem/rpbc.py @@ -181,9 +181,6 @@ funcdesc = self.rtyper.annotator.bookkeeper.getdesc(value) return self.convert_desc(funcdesc) -## def convert_to_concrete_llfn(self, v, shape, index, llop): -## return v - def rtype_simple_call(self, hop): return self.call('simple_call', hop) @@ -321,15 +318,6 @@ c_table = conversion_table(r_from, r_to) if c_table: assert v.concretetype is Char -## from rpython.rtyper.lltypesystem.rstr import string_repr -## s = repr(llops.rtyper.annotator.annotated.get(llops.originalblock)) -## if 'LOAD_GLOBAL' in s: -## import pdb; pdb.set_trace() -## print >> myf, 'static small conv', s -## print 'static small conv', s -## llops.genop('debug_print', -## [Constant(string_repr.convert_const("dynamic small conv" + s), -## string_repr.lowleveltype)]) v_int = llops.genop('cast_char_to_int', [v], resulttype=Signed) return llops.genop('getarrayitem', [c_table, v_int], diff --git a/rpython/rtyper/module/test/test_ll_os.py b/rpython/rtyper/module/test/test_ll_os.py --- a/rpython/rtyper/module/test/test_ll_os.py +++ b/rpython/rtyper/module/test/test_ll_os.py @@ -53,8 +53,7 @@ expected = os.statvfs('.') except OSError, e: py.test.skip("the underlying os.statvfs() failed: %s" % e) - data = getllimpl(os.statvfs)('.') - assert data == expected + getllimpl(os.statvfs)('.') def test_fstatvfs(): if not hasattr(os, 'fstatvfs'): @@ -63,8 +62,7 @@ expected = os.fstatvfs(0) except OSError, e: py.test.skip("the underlying os.fstatvfs() failed: %s" % e) - data = getllimpl(os.fstatvfs)(0) - assert data == expected + getllimpl(os.fstatvfs)(0) def test_utimes(): if os.name != 'nt': diff --git a/rpython/rtyper/rlist.py b/rpython/rtyper/rlist.py --- a/rpython/rtyper/rlist.py +++ b/rpython/rtyper/rlist.py @@ -247,27 +247,22 @@ v_lst, v_index = hop.inputargs(r_lst, Signed) if checkidx: hop.exception_is_here() + spec = dum_checkidx else: + spec = dum_nocheck hop.exception_cannot_occur() - if hop.args_s[0].listdef.listitem.mutated or checkidx: - if hop.args_s[1].nonneg: - llfn = ll_getitem_nonneg - else: - llfn = ll_getitem - if checkidx: - spec = dum_checkidx - else: - spec = dum_nocheck - c_func_marker = hop.inputconst(Void, spec) - v_res = hop.gendirectcall(llfn, c_func_marker, v_lst, v_index) + if hop.args_s[0].listdef.listitem.mutated: + basegetitem = ll_getitem_fast else: - # this is the 'foldable' version, which is not used when - # we check for IndexError - if hop.args_s[1].nonneg: - llfn = ll_getitem_foldable_nonneg - else: - llfn = ll_getitem_foldable - v_res = hop.gendirectcall(llfn, v_lst, v_index) + basegetitem = ll_getitem_foldable_nonneg + + if hop.args_s[1].nonneg: + llfn = ll_getitem_nonneg + else: + llfn = ll_getitem + c_func_marker = hop.inputconst(Void, spec) + c_basegetitem = hop.inputconst(Void, basegetitem) + v_res = hop.gendirectcall(llfn, c_func_marker, c_basegetitem, v_lst, v_index) return r_lst.recast(hop.llops, v_res) rtype_getitem_key = rtype_getitem @@ -326,15 +321,6 @@ return NotImplemented return v -## # TODO: move it to lltypesystem -## def rtype_is_((r_lst1, r_lst2), hop): -## if r_lst1.lowleveltype != r_lst2.lowleveltype: -## # obscure logic, the is can be true only if both are None -## v_lst1, v_lst2 = hop.inputargs(r_lst1, r_lst2) -## return hop.gendirectcall(ll_both_none, v_lst1, v_lst2) - -## return pairtype(Repr, Repr).rtype_is_(pair(r_lst1, r_lst2), hop) - def rtype_eq((r_lst1, r_lst2), hop): assert r_lst1.item_repr == r_lst2.item_repr v_lst1, v_lst2 = hop.inputargs(r_lst1, r_lst2) @@ -663,16 +649,16 @@ i += 1 length_1_i -= 1 -def ll_getitem_nonneg(func, l, index): +def ll_getitem_nonneg(func, basegetitem, l, index): ll_assert(index >= 0, "unexpectedly negative list getitem index") if func is dum_checkidx: if index >= l.ll_length(): raise IndexError - return l.ll_getitem_fast(index) + return basegetitem(l, index) ll_getitem_nonneg._always_inline_ = True # no oopspec -- the function is inlined by the JIT -def ll_getitem(func, l, index): +def ll_getitem(func, basegetitem, l, index): if func is dum_checkidx: length = l.ll_length() # common case: 0 <= index < length if r_uint(index) >= r_uint(length): @@ -689,21 +675,18 @@ if index < 0: index += l.ll_length() ll_assert(index >= 0, "negative list getitem index out of bound") + return basegetitem(l, index) +# no oopspec -- the function is inlined by the JIT + +def ll_getitem_fast(l, index): return l.ll_getitem_fast(index) -# no oopspec -- the function is inlined by the JIT +ll_getitem_fast._always_inline_ = True def ll_getitem_foldable_nonneg(l, index): ll_assert(index >= 0, "unexpectedly negative list getitem index") return l.ll_getitem_fast(index) ll_getitem_foldable_nonneg.oopspec = 'list.getitem_foldable(l, index)' -def ll_getitem_foldable(l, index): - if index < 0: - index += l.ll_length() - return ll_getitem_foldable_nonneg(l, index) -ll_getitem_foldable._always_inline_ = True -# no oopspec -- the function is inlined by the JIT - def ll_setitem_nonneg(func, l, index, newitem): ll_assert(index >= 0, "unexpectedly negative list setitem index") if func is dum_checkidx: diff --git a/rpython/rtyper/rpbc.py b/rpython/rtyper/rpbc.py --- a/rpython/rtyper/rpbc.py +++ b/rpython/rtyper/rpbc.py @@ -60,14 +60,6 @@ t = () return tuple([self.__class__, self.can_be_None]+lst)+t -##builtin_descriptor_type = ( -## type(len), # type 'builtin_function_or_method' -## type(list.append), # type 'method_descriptor' -## type(type(None).__repr__), # type 'wrapper_descriptor' -## type(type.__dict__['__dict__']), # type 'getset_descriptor' -## type(type.__dict__['__flags__']), # type 'member_descriptor' -## ) - # ____________________________________________________________ class ConcreteCallTableRow(dict): @@ -196,16 +188,6 @@ funcdesc = self.s_pbc.any_description() return funcdesc.get_s_signatures(shape) -## def function_signatures(self): -## if self._function_signatures is None: -## self._function_signatures = {} -## for func in self.s_pbc.prebuiltinstances: -## if func is not None: -## self._function_signatures[func] = getsignature(self.rtyper, -## func) -## assert self._function_signatures -## return self._function_signatures - def convert_desc(self, funcdesc): # get the whole "column" of the call table corresponding to this desc try: @@ -876,16 +858,6 @@ return hop2 # ____________________________________________________________ -##def getsignature(rtyper, func): -## f = rtyper.getcallable(func) -## graph = rtyper.type_system_deref(f).graph -## rinputs = [rtyper.bindingrepr(v) for v in graph.getargs()] -## if graph.getreturnvar() in rtyper.annotator.bindings: -## rresult = rtyper.bindingrepr(graph.getreturnvar()) -## else: -## rresult = Void -## return f, rinputs, rresult - def samesig(funcs): import inspect argspec = inspect.getargspec(funcs[0]) diff --git a/rpython/rtyper/rptr.py b/rpython/rtyper/rptr.py --- a/rpython/rtyper/rptr.py +++ b/rpython/rtyper/rptr.py @@ -9,16 +9,12 @@ class __extend__(annmodel.SomePtr): def rtyper_makerepr(self, rtyper): -## if self.is_constant() and not self.const: # constant NULL -## return nullptr_repr -## else: return PtrRepr(self.ll_ptrtype) + def rtyper_makekey(self): -## if self.is_constant() and not self.const: -## return None -## else: return self.__class__, self.ll_ptrtype + class __extend__(annmodel.SomeInteriorPtr): def rtyper_makerepr(self, rtyper): return InteriorPtrRepr(self.ll_ptrtype) @@ -154,22 +150,6 @@ vlist = hop.inputargs(r_ptr, lltype.Signed, hop.args_r[2]) hop.genop('setarrayitem', vlist) -# ____________________________________________________________ -# -# Null Pointers - -##class NullPtrRepr(Repr): -## lowleveltype = lltype.Void - -## def rtype_is_true(self, hop): -## return hop.inputconst(lltype.Bool, False) - -##nullptr_repr = NullPtrRepr() - -##class __extend__(pairtype(NullPtrRepr, PtrRepr)): -## def convert_from_to((r_null, r_ptr), v, llops): -## # nullptr to general pointer -## return inputconst(r_ptr, _ptr(r_ptr.lowleveltype, None)) # ____________________________________________________________ # diff --git a/rpython/rtyper/rstr.py b/rpython/rtyper/rstr.py --- a/rpython/rtyper/rstr.py +++ b/rpython/rtyper/rstr.py @@ -676,13 +676,6 @@ get_ll_fasthash_function = get_ll_hash_function -## def rtype_len(_, hop): -## return hop.inputconst(Signed, 1) -## -## def rtype_is_true(_, hop): -## assert not hop.args_s[0].can_be_None -## return hop.inputconst(Bool, True) - def rtype_ord(_, hop): rstr = hop.rtyper.type_system.rstr vlist = hop.inputargs(rstr.unichar_repr) @@ -694,10 +687,6 @@ pairtype(AbstractUniCharRepr, AbstractCharRepr)): def rtype_eq(_, hop): return _rtype_unchr_compare_template(hop, 'eq') def rtype_ne(_, hop): return _rtype_unchr_compare_template(hop, 'ne') -## def rtype_lt(_, hop): return _rtype_unchr_compare_template(hop, 'lt') -## def rtype_le(_, hop): return _rtype_unchr_compare_template(hop, 'le') -## def rtype_gt(_, hop): return _rtype_unchr_compare_template(hop, 'gt') -## def rtype_ge(_, hop): return _rtype_unchr_compare_template(hop, 'ge') #Helper functions for comparisons diff --git a/rpython/rtyper/rtyper.py b/rpython/rtyper/rtyper.py --- a/rpython/rtyper/rtyper.py +++ b/rpython/rtyper/rtyper.py @@ -58,7 +58,6 @@ self.classdef_to_pytypeobject = {} self.concrete_calltables = {} self.class_pbc_attributes = {} - self.oo_meth_impls = {} self.cache_dummy_values = {} self.lltype2vtable = {} self.typererrors = [] @@ -77,14 +76,6 @@ except: self.seed = 0 self.order = None - # the following code would invoke translator.goal.order, which is - # not up-to-date any more: -## RTYPERORDER = os.getenv('RTYPERORDER') -## if RTYPERORDER: -## order_module = RTYPERORDER.split(',')[0] -## self.order = __import__(order_module, {}, {}, ['*']).order -## s = 'Using %s.%s for order' % (self.order.__module__, self.order.__name__) -## self.log.info(s) def getconfig(self): return self.annotator.translator.config diff --git a/rpython/rtyper/rvirtualizable.py b/rpython/rtyper/rvirtualizable.py --- a/rpython/rtyper/rvirtualizable.py +++ b/rpython/rtyper/rvirtualizable.py @@ -25,9 +25,6 @@ def _setup_repr_llfields(self): raise NotImplementedError -## def set_vable(self, llops, vinst, force_cast=False): -## raise NotImplementedError - def _setup_repr(self): if self.top_of_virtualizable_hierarchy: hints = {'virtualizable_accessor': self.accessor} @@ -45,11 +42,6 @@ # not need it, but it doesn't hurt to have it anyway self.my_redirected_fields = self.rbase.my_redirected_fields -## def new_instance(self, llops, classcallhop=None): -## vptr = self._super().new_instance(llops, classcallhop) -## self.set_vable(llops, vptr) -## return vptr - def hook_access_field(self, vinst, cname, llops, flags): #if not flags.get('access_directly'): if self.my_redirected_fields.get(cname.value): diff --git a/rpython/rtyper/test/test_nongc.py b/rpython/rtyper/test/test_nongc.py --- a/rpython/rtyper/test/test_nongc.py +++ b/rpython/rtyper/test/test_nongc.py @@ -230,6 +230,3 @@ assert isinstance(s, annmodel.SomeAddress) rtyper = RPythonTyper(a) rtyper.specialize() -## from rpython.memory.lladdress import _address -## res = interpret(malloc_and_free, [_address()]) -## assert res == _address() diff --git a/rpython/rtyper/test/test_rlist.py b/rpython/rtyper/test/test_rlist.py --- a/rpython/rtyper/test/test_rlist.py +++ b/rpython/rtyper/test/test_rlist.py @@ -14,15 +14,19 @@ from rpython.translator.translator import TranslationContext -# undo the specialization parameter +# undo the specialization parameters for n1 in 'get set del'.split(): + if n1 == "get": + extraarg = "ll_getitem_fast, " + else: + extraarg = "" for n2 in '', '_nonneg': name = 'll_%sitem%s' % (n1, n2) globals()['_' + name] = globals()[name] exec """if 1: def %s(*args): - return _%s(dum_checkidx, *args) -""" % (name, name) + return _%s(dum_checkidx, %s*args) +""" % (name, name, extraarg) del n1, n2, name @@ -1400,7 +1404,7 @@ block = graph.startblock op = block.operations[-1] assert op.opname == 'direct_call' - func = op.args[0].value._obj._callable + func = op.args[2].value assert ('foldable' in func.func_name) == \ ("y[*]" in immutable_fields) @@ -1511,8 +1515,8 @@ block = graph.startblock lst1_getitem_op = block.operations[-3] # XXX graph fishing lst2_getitem_op = block.operations[-2] - func1 = lst1_getitem_op.args[0].value._obj._callable - func2 = lst2_getitem_op.args[0].value._obj._callable + func1 = lst1_getitem_op.args[2].value + func2 = lst2_getitem_op.args[2].value assert func1.oopspec == 'list.getitem_foldable(l, index)' assert not hasattr(func2, 'oopspec') diff --git a/rpython/rtyper/test/test_runicode.py b/rpython/rtyper/test/test_runicode.py --- a/rpython/rtyper/test/test_runicode.py +++ b/rpython/rtyper/test/test_runicode.py @@ -8,7 +8,7 @@ # ====> test_rstr.py -class BaseTestRUnicode(AbstractTestRstr, BaseRtypingTest): +class TestRUnicode(AbstractTestRstr, BaseRtypingTest): const = unicode constchar = unichr diff --git a/rpython/translator/backendopt/test/test_raisingop2direct_call.py b/rpython/translator/backendopt/test/test_raisingop2direct_call.py --- a/rpython/translator/backendopt/test/test_raisingop2direct_call.py +++ b/rpython/translator/backendopt/test/test_raisingop2direct_call.py @@ -51,18 +51,6 @@ res = fn(-5, 2) assert res == -3 - # this becomes an int_floordiv_ovf_zer already? -## def g(x, y): -## try: -## return ovfcheck(x//y) -## except OverflowError: -## return 123 -## gn = get_runner(g, 'int_floordiv_ovf', [int, int]) -## res = gn(-sys.maxint-1, -1) -## assert res == 123 -## res = gn(-5, 2) -## assert res == -3 - def h(x, y): try: return ovfcheck(x//y) diff --git a/rpython/translator/backendopt/test/test_removenoops.py b/rpython/translator/backendopt/test/test_removenoops.py --- a/rpython/translator/backendopt/test/test_removenoops.py +++ b/rpython/translator/backendopt/test/test_removenoops.py @@ -97,9 +97,8 @@ def test_remove_unaryops(): - # We really want to use remove_unaryops for things like ooupcast and - # oodowncast in dynamically typed languages, but it's easier to test - # it with operations on ints here. + # We really want to use remove_unaryops for more complex operations, but + # it's easier to test it with operations on ints here. def f(x): i = llop.int_invert(lltype.Signed, x) i = llop.int_add(lltype.Signed, x, 1) diff --git a/rpython/translator/c/node.py b/rpython/translator/c/node.py --- a/rpython/translator/c/node.py +++ b/rpython/translator/c/node.py @@ -358,6 +358,8 @@ self.fullptrtypename = 'void *@' else: self.fullptrtypename = self.itemtypename.replace('@', '*@') + if ARRAY._hints.get("render_as_const"): + self.fullptrtypename = 'const ' + self.fullptrtypename def setup(self): """Array loops are forbidden by ForwardReference.become() because diff --git a/rpython/translator/c/test/test_lltyped.py b/rpython/translator/c/test/test_lltyped.py --- a/rpython/translator/c/test/test_lltyped.py +++ b/rpython/translator/c/test/test_lltyped.py @@ -1,5 +1,6 @@ import py from rpython.rtyper.lltypesystem.lltype import * +from rpython.rtyper.lltypesystem import rffi from rpython.translator.c.test.test_genc import compile from rpython.tool.sourcetools import func_with_new_name @@ -314,14 +315,14 @@ from rpython.rtyper.lltypesystem.rstr import STR from rpython.rtyper.lltypesystem import rffi, llmemory, lltype P = lltype.Ptr(lltype.FixedSizeArray(lltype.Char, 1)) - + def f(): a = llstr("xyz") b = (llmemory.cast_ptr_to_adr(a) + llmemory.offsetof(STR, 'chars') + llmemory.itemoffsetof(STR.chars, 0)) buf = rffi.cast(rffi.VOIDP, b) return buf[2] - + fn = self.getcompiled(f, []) res = fn() assert res == 'z' @@ -941,3 +942,21 @@ assert fn(0) == 10 assert fn(1) == 10 + 521 assert fn(2) == 10 + 34 + + def test_const_char_star(self): + from rpython.translator.tool.cbuild import ExternalCompilationInfo + + eci = ExternalCompilationInfo(includes=["stdlib.h"]) + atoi = rffi.llexternal('atoi', [rffi.CONST_CCHARP], rffi.INT, + compilation_info=eci) + + def f(n): + s = malloc(rffi.CCHARP.TO, 2, flavor='raw') + s[0] = '9' + s[1] = '\0' + res = atoi(rffi.cast(rffi.CONST_CCHARP, s)) + free(s, flavor='raw') + return res + + fn = self.getcompiled(f, [int]) + assert fn(0) == 9 diff --git a/rpython/translator/driver.py b/rpython/translator/driver.py --- a/rpython/translator/driver.py +++ b/rpython/translator/driver.py @@ -354,8 +354,12 @@ """ Generate bytecodes for JIT and flow the JIT helper functions lltype version """ - get_policy = self.extra['jitpolicy'] - self.jitpolicy = get_policy(self) + from rpython.jit.codewriter.policy import JitPolicy + get_policy = self.extra.get('jitpolicy', None) + if get_policy is None: + self.jitpolicy = JitPolicy() + else: + self.jitpolicy = get_policy(self) # from rpython.jit.metainterp.warmspot import apply_jit apply_jit(self.translator, policy=self.jitpolicy, @@ -544,9 +548,14 @@ try: From noreply at buildbot.pypy.org Thu Aug 1 16:31:18 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 1 Aug 2013 16:31:18 +0200 (CEST) Subject: [pypy-commit] cffi default: Py3k fix Message-ID: <20130801143118.DA2A21C0130@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1312:6493499556ea Date: 2013-08-01 16:30 +0200 http://bitbucket.org/cffi/cffi/changeset/6493499556ea/ Log: Py3k fix diff --git a/c/_cffi_backend.c b/c/_cffi_backend.c --- a/c/_cffi_backend.c +++ b/c/_cffi_backend.c @@ -5325,11 +5325,11 @@ if (PyType_Ready(&MiniBuffer_Type) < 0) INITERROR; - v = PyString_FromString("_cffi_backend"); + v = PyText_FromString("_cffi_backend"); if (v == NULL || PyDict_SetItemString(CData_Type.tp_dict, "__module__", v) < 0) INITERROR; - v = PyString_FromString(""); + v = PyText_FromString(""); if (v == NULL || PyDict_SetItemString(CData_Type.tp_dict, "__name__", v) < 0) INITERROR; From noreply at buildbot.pypy.org Thu Aug 1 16:36:12 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 1 Aug 2013 16:36:12 +0200 (CEST) Subject: [pypy-commit] cffi default: Write "status of Win64". Message-ID: <20130801143612.AC06F1C0130@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1313:0df92079477e Date: 2013-08-01 16:36 +0200 http://bitbucket.org/cffi/cffi/changeset/0df92079477e/ Log: Write "status of Win64". diff --git a/doc/source/index.rst b/doc/source/index.rst --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -171,6 +171,9 @@ distutils doesn't support .asm files. This can be resolved by applying the patch from `Python issue 7546`_. +Status: Win64 received very basic testing and we applied a few essential +fixes in cffi 0.7. Please report any other issue. + .. _`issue 9`: https://bitbucket.org/cffi/cffi/issue/9 .. _`Python issue 7546`: http://bugs.python.org/issue7546 From noreply at buildbot.pypy.org Thu Aug 1 17:05:36 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 1 Aug 2013 17:05:36 +0200 (CEST) Subject: [pypy-commit] cffi default: Don't care too much about the ctypes backend Message-ID: <20130801150536.1A8141C1380@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1314:b3024e60ca54 Date: 2013-08-01 17:05 +0200 http://bitbucket.org/cffi/cffi/changeset/b3024e60ca54/ Log: Don't care too much about the ctypes backend diff --git a/testing/test_function.py b/testing/test_function.py --- a/testing/test_function.py +++ b/testing/test_function.py @@ -383,6 +383,8 @@ assert x == math.sin(1.23) + 100 def test_free_callback_cycle(self): + if self.Backend is CTypesBackend: + py.test.skip("seems to fail with the ctypes backend on windows") import weakref def make_callback(data): container = [data] From noreply at buildbot.pypy.org Thu Aug 1 17:06:57 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 1 Aug 2013 17:06:57 +0200 (CEST) Subject: [pypy-commit] cffi release-0.7: A release branch Message-ID: <20130801150657.50BDC1C1380@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: release-0.7 Changeset: r1315:758ce3691553 Date: 2013-08-01 17:06 +0200 http://bitbucket.org/cffi/cffi/changeset/758ce3691553/ Log: A release branch From noreply at buildbot.pypy.org Thu Aug 1 17:09:48 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 1 Aug 2013 17:09:48 +0200 (CEST) Subject: [pypy-commit] cffi default: Remove the hg tags. We're using hg branches 'release-*' nowadays. Message-ID: <20130801150948.C81B61C0130@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1316:f929c468a63e Date: 2013-08-01 17:09 +0200 http://bitbucket.org/cffi/cffi/changeset/f929c468a63e/ Log: Remove the hg tags. We're using hg branches 'release-*' nowadays. diff --git a/.hgtags b/.hgtags deleted file mode 100644 --- a/.hgtags +++ /dev/null @@ -1,7 +0,0 @@ -ca6e81df7f1ea58d891129ad016a8888c08f238b release-0.1 -a8636625e33b0f84c3744f80d49e84b175a0a215 release-0.2 -6a0f0a476101210a76f4bc4d33c5bbb0f8f979fd release-0.2.1 -5f31908df6c97a1f70f3fcd4d489d98dc2b30f04 release-0.3 -bd4b6090aea035a6093e684858aa7bd54a6270ec release-0.4 -037096d1bdaa213c2adebf3a4124ad56dba8ba82 release-0.4.1 -3691a2e644c98fc8753ffb96c4ff2d5d3e57bd17 release-0.4.2 From noreply at buildbot.pypy.org Thu Aug 1 17:13:15 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 1 Aug 2013 17:13:15 +0200 (CEST) Subject: [pypy-commit] cffi default: Removed tag release-0.4.2 Message-ID: <20130801151315.942EC1C0130@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1317:b2960f95e1b7 Date: 2013-08-01 17:12 +0200 http://bitbucket.org/cffi/cffi/changeset/b2960f95e1b7/ Log: Removed tag release-0.4.2 diff --git a/.hgtags b/.hgtags new file mode 100644 --- /dev/null +++ b/.hgtags @@ -0,0 +1,2 @@ +3691a2e644c98fc8753ffb96c4ff2d5d3e57bd17 release-0.4.2 +0000000000000000000000000000000000000000 release-0.4.2 From noreply at buildbot.pypy.org Thu Aug 1 17:13:16 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 1 Aug 2013 17:13:16 +0200 (CEST) Subject: [pypy-commit] cffi default: Removed tag release-0.4.1 Message-ID: <20130801151316.B0EBC1C0130@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1318:960e22982b11 Date: 2013-08-01 17:12 +0200 http://bitbucket.org/cffi/cffi/changeset/960e22982b11/ Log: Removed tag release-0.4.1 diff --git a/.hgtags b/.hgtags --- a/.hgtags +++ b/.hgtags @@ -1,2 +1,4 @@ 3691a2e644c98fc8753ffb96c4ff2d5d3e57bd17 release-0.4.2 0000000000000000000000000000000000000000 release-0.4.2 +037096d1bdaa213c2adebf3a4124ad56dba8ba82 release-0.4.1 +0000000000000000000000000000000000000000 release-0.4.1 From noreply at buildbot.pypy.org Thu Aug 1 17:13:17 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 1 Aug 2013 17:13:17 +0200 (CEST) Subject: [pypy-commit] cffi default: Removed tag release-0.4 Message-ID: <20130801151317.D7E131C0130@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1319:e05904fc0876 Date: 2013-08-01 17:12 +0200 http://bitbucket.org/cffi/cffi/changeset/e05904fc0876/ Log: Removed tag release-0.4 diff --git a/.hgtags b/.hgtags --- a/.hgtags +++ b/.hgtags @@ -2,3 +2,5 @@ 0000000000000000000000000000000000000000 release-0.4.2 037096d1bdaa213c2adebf3a4124ad56dba8ba82 release-0.4.1 0000000000000000000000000000000000000000 release-0.4.1 +bd4b6090aea035a6093e684858aa7bd54a6270ec release-0.4 +0000000000000000000000000000000000000000 release-0.4 From noreply at buildbot.pypy.org Thu Aug 1 17:13:18 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 1 Aug 2013 17:13:18 +0200 (CEST) Subject: [pypy-commit] cffi default: Removed tag release-0.3 Message-ID: <20130801151318.DD4F21C0130@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1320:5b2d9013e349 Date: 2013-08-01 17:12 +0200 http://bitbucket.org/cffi/cffi/changeset/5b2d9013e349/ Log: Removed tag release-0.3 diff --git a/.hgtags b/.hgtags --- a/.hgtags +++ b/.hgtags @@ -4,3 +4,5 @@ 0000000000000000000000000000000000000000 release-0.4.1 bd4b6090aea035a6093e684858aa7bd54a6270ec release-0.4 0000000000000000000000000000000000000000 release-0.4 +5f31908df6c97a1f70f3fcd4d489d98dc2b30f04 release-0.3 +0000000000000000000000000000000000000000 release-0.3 From noreply at buildbot.pypy.org Thu Aug 1 17:13:20 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 1 Aug 2013 17:13:20 +0200 (CEST) Subject: [pypy-commit] cffi default: Removed tag release-0.2.1 Message-ID: <20130801151320.583D11C0130@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1321:ae01456bdd99 Date: 2013-08-01 17:12 +0200 http://bitbucket.org/cffi/cffi/changeset/ae01456bdd99/ Log: Removed tag release-0.2.1 diff --git a/.hgtags b/.hgtags --- a/.hgtags +++ b/.hgtags @@ -6,3 +6,5 @@ 0000000000000000000000000000000000000000 release-0.4 5f31908df6c97a1f70f3fcd4d489d98dc2b30f04 release-0.3 0000000000000000000000000000000000000000 release-0.3 +6a0f0a476101210a76f4bc4d33c5bbb0f8f979fd release-0.2.1 +0000000000000000000000000000000000000000 release-0.2.1 From noreply at buildbot.pypy.org Thu Aug 1 17:13:21 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 1 Aug 2013 17:13:21 +0200 (CEST) Subject: [pypy-commit] cffi default: Removed tag release-0.2 Message-ID: <20130801151321.5FECE1C0130@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1322:16df080fd634 Date: 2013-08-01 17:12 +0200 http://bitbucket.org/cffi/cffi/changeset/16df080fd634/ Log: Removed tag release-0.2 diff --git a/.hgtags b/.hgtags --- a/.hgtags +++ b/.hgtags @@ -8,3 +8,5 @@ 0000000000000000000000000000000000000000 release-0.3 6a0f0a476101210a76f4bc4d33c5bbb0f8f979fd release-0.2.1 0000000000000000000000000000000000000000 release-0.2.1 +a8636625e33b0f84c3744f80d49e84b175a0a215 release-0.2 +0000000000000000000000000000000000000000 release-0.2 From noreply at buildbot.pypy.org Thu Aug 1 17:13:22 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 1 Aug 2013 17:13:22 +0200 (CEST) Subject: [pypy-commit] cffi default: Removed tag release-0.1 Message-ID: <20130801151322.626081C0130@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1323:27a653aae7f3 Date: 2013-08-01 17:12 +0200 http://bitbucket.org/cffi/cffi/changeset/27a653aae7f3/ Log: Removed tag release-0.1 diff --git a/.hgtags b/.hgtags --- a/.hgtags +++ b/.hgtags @@ -10,3 +10,5 @@ 0000000000000000000000000000000000000000 release-0.2.1 a8636625e33b0f84c3744f80d49e84b175a0a215 release-0.2 0000000000000000000000000000000000000000 release-0.2 +ca6e81df7f1ea58d891129ad016a8888c08f238b release-0.1 +0000000000000000000000000000000000000000 release-0.1 From noreply at buildbot.pypy.org Thu Aug 1 17:22:15 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 1 Aug 2013 17:22:15 +0200 (CEST) Subject: [pypy-commit] cffi release-0.7: Update the MD5/SHA Message-ID: <20130801152215.9BEBC1C0130@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: release-0.7 Changeset: r1324:f798b582b04a Date: 2013-08-01 17:21 +0200 http://bitbucket.org/cffi/cffi/changeset/f798b582b04a/ Log: Update the MD5/SHA diff --git a/doc/source/index.rst b/doc/source/index.rst --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -94,9 +94,9 @@ - Or grab the most current version by following the instructions below. - - MD5: ... + - MD5: 2110516c65f7c9e6f324241c322178c8 - - SHA: ... + - SHA: 772205729d9ef620adf48f351eb79f3d0ab2d014 * Or get it from the `Bitbucket page`_: ``hg clone https://bitbucket.org/cffi/cffi`` From noreply at buildbot.pypy.org Thu Aug 1 17:34:01 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 1 Aug 2013 17:34:01 +0200 (CEST) Subject: [pypy-commit] cffi default: The Win64 problem with the .asm was fixed by checking in the .obj file. Message-ID: <20130801153401.D843D1C0130@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1325:7fc02dbc82e6 Date: 2013-08-01 17:33 +0200 http://bitbucket.org/cffi/cffi/changeset/7fc02dbc82e6/ Log: The Win64 problem with the .asm was fixed by checking in the .obj file. diff --git a/doc/source/index.rst b/doc/source/index.rst --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -165,15 +165,16 @@ Windows 64 ++++++++++ -Win32 works and is tested at least each official release. However, it -seems that compiling it for Win64 (explicitly *not* in Win32 mode) does -not work out of the box. According to `issue 9`_, this is because -distutils doesn't support .asm files. This can be resolved by applying -the patch from `Python issue 7546`_. +Win32 works and is tested at least each official release. Status: Win64 received very basic testing and we applied a few essential fixes in cffi 0.7. Please report any other issue. +Note as usual that this is only about running the 64-bit version of +Python on the 64-bit OS. If you're running the 32-bit version (the +common case apparently), then you're running Win32 as far as we're +concerned. + .. _`issue 9`: https://bitbucket.org/cffi/cffi/issue/9 .. _`Python issue 7546`: http://bugs.python.org/issue7546 From noreply at buildbot.pypy.org Thu Aug 1 17:34:03 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 1 Aug 2013 17:34:03 +0200 (CEST) Subject: [pypy-commit] cffi release-0.7: The Win64 problem with the .asm was fixed by checking in the .obj file. Message-ID: <20130801153403.010BF1C0130@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: release-0.7 Changeset: r1326:fec86009869d Date: 2013-08-01 17:33 +0200 http://bitbucket.org/cffi/cffi/changeset/fec86009869d/ Log: The Win64 problem with the .asm was fixed by checking in the .obj file. diff --git a/doc/source/index.rst b/doc/source/index.rst --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -165,15 +165,16 @@ Windows 64 ++++++++++ -Win32 works and is tested at least each official release. However, it -seems that compiling it for Win64 (explicitly *not* in Win32 mode) does -not work out of the box. According to `issue 9`_, this is because -distutils doesn't support .asm files. This can be resolved by applying -the patch from `Python issue 7546`_. +Win32 works and is tested at least each official release. Status: Win64 received very basic testing and we applied a few essential fixes in cffi 0.7. Please report any other issue. +Note as usual that this is only about running the 64-bit version of +Python on the 64-bit OS. If you're running the 32-bit version (the +common case apparently), then you're running Win32 as far as we're +concerned. + .. _`issue 9`: https://bitbucket.org/cffi/cffi/issue/9 .. _`Python issue 7546`: http://bugs.python.org/issue7546 From noreply at buildbot.pypy.org Thu Aug 1 17:45:07 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 1 Aug 2013 17:45:07 +0200 (CEST) Subject: [pypy-commit] cffi default: Add a warning Message-ID: <20130801154507.53DB51C029A@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1327:b95881437e90 Date: 2013-08-01 17:44 +0200 http://bitbucket.org/cffi/cffi/changeset/b95881437e90/ Log: Add a warning diff --git a/doc/source/index.rst b/doc/source/index.rst --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -1212,7 +1212,9 @@ cdata object returned by ``new_handle()`` has *ownership*, in the same sense as ``ffi.new()`` or ``ffi.gc()``: the association ``void * -> python_object`` is only valid as long as *this* exact cdata returned by -``new_handle()`` is alive. *New in version 0.7.* +``new_handle()`` is alive. *Calling ffi.from_handle(p) is invalid and +will likely crash if the cdata object returned by new_handle() is not +kept alive!* *New in version 0.7.* .. "versionadded:: 0.7" --- inlined in the previous paragraph From noreply at buildbot.pypy.org Thu Aug 1 17:45:08 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 1 Aug 2013 17:45:08 +0200 (CEST) Subject: [pypy-commit] cffi release-0.7: Add a warning Message-ID: <20130801154508.79F721C029A@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: release-0.7 Changeset: r1328:a1b0ea18224f Date: 2013-08-01 17:44 +0200 http://bitbucket.org/cffi/cffi/changeset/a1b0ea18224f/ Log: Add a warning diff --git a/doc/source/index.rst b/doc/source/index.rst --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -1212,7 +1212,9 @@ cdata object returned by ``new_handle()`` has *ownership*, in the same sense as ``ffi.new()`` or ``ffi.gc()``: the association ``void * -> python_object`` is only valid as long as *this* exact cdata returned by -``new_handle()`` is alive. *New in version 0.7.* +``new_handle()`` is alive. *Calling ffi.from_handle(p) is invalid and +will likely crash if the cdata object returned by new_handle() is not +kept alive!* *New in version 0.7.* .. "versionadded:: 0.7" --- inlined in the previous paragraph From noreply at buildbot.pypy.org Thu Aug 1 17:49:20 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 1 Aug 2013 17:49:20 +0200 (CEST) Subject: [pypy-commit] cffi default: Fix the doc Message-ID: <20130801154920.01CEC1C029A@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1329:2162ca1d7b7e Date: 2013-08-01 17:49 +0200 http://bitbucket.org/cffi/cffi/changeset/2162ca1d7b7e/ Log: Fix the doc diff --git a/doc/source/index.rst b/doc/source/index.rst --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -134,7 +134,8 @@ ``libffi`` is notoriously messy to install and use --- to the point that CPython includes its own copy to avoid relying on external packages. -CFFI does the same for Windows, but (so far) not for other platforms. +CFFI does the same for Windows, but not for other platforms (which should +have their own working libffi's). Modern Linuxes work out of the box thanks to ``pkg-config``. Here are some (user-supplied) instructions for other platforms. From noreply at buildbot.pypy.org Thu Aug 1 17:55:13 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 1 Aug 2013 17:55:13 +0200 (CEST) Subject: [pypy-commit] cffi release-0.7: Fix the doc Message-ID: <20130801155513.EF1821C02E4@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: release-0.7 Changeset: r1330:9c50b9b732df Date: 2013-08-01 17:49 +0200 http://bitbucket.org/cffi/cffi/changeset/9c50b9b732df/ Log: Fix the doc diff --git a/doc/source/index.rst b/doc/source/index.rst --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -134,7 +134,8 @@ ``libffi`` is notoriously messy to install and use --- to the point that CPython includes its own copy to avoid relying on external packages. -CFFI does the same for Windows, but (so far) not for other platforms. +CFFI does the same for Windows, but not for other platforms (which should +have their own working libffi's). Modern Linuxes work out of the box thanks to ``pkg-config``. Here are some (user-supplied) instructions for other platforms. From noreply at buildbot.pypy.org Thu Aug 1 19:31:25 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Thu, 1 Aug 2013 19:31:25 +0200 (CEST) Subject: [pypy-commit] pypy kill-typesystem: kill trivial getter Message-ID: <20130801173125.6B0621C029A@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: kill-typesystem Changeset: r65878:dec98a0d9628 Date: 2013-07-29 02:47 +0100 http://bitbucket.org/pypy/pypy/changeset/dec98a0d9628/ Log: kill trivial getter diff --git a/rpython/jit/metainterp/jitexc.py b/rpython/jit/metainterp/jitexc.py --- a/rpython/jit/metainterp/jitexc.py +++ b/rpython/jit/metainterp/jitexc.py @@ -62,7 +62,7 @@ def _get_standard_error(rtyper, Class): - exdata = rtyper.getexceptiondata() + exdata = rtyper.exceptiondata clsdef = rtyper.annotator.bookkeeper.getuniqueclassdef(Class) evalue = exdata.get_standard_ll_exc_instance(rtyper, clsdef) return evalue diff --git a/rpython/memory/gctransform/transform.py b/rpython/memory/gctransform/transform.py --- a/rpython/memory/gctransform/transform.py +++ b/rpython/memory/gctransform/transform.py @@ -105,7 +105,7 @@ self.minimalgctransformer = None def get_lltype_of_exception_value(self): - exceptiondata = self.translator.rtyper.getexceptiondata() + exceptiondata = self.translator.rtyper.exceptiondata return exceptiondata.lltype_of_exception_value def need_minimal_transform(self, graph): @@ -479,11 +479,11 @@ flags = hop.spaceop.args[1].value flavor = flags['flavor'] meth = getattr(self, 'gct_fv_%s_malloc' % flavor, None) - assert meth, "%s has no support for malloc with flavor %r" % (self, flavor) + assert meth, "%s has no support for malloc with flavor %r" % (self, flavor) c_size = rmodel.inputconst(lltype.Signed, llmemory.sizeof(TYPE)) v_raw = meth(hop, flags, TYPE, c_size) hop.cast_result(v_raw) - + def gct_fv_raw_malloc(self, hop, flags, TYPE, c_size): v_raw = hop.genop("direct_call", [self.raw_malloc_fixedsize_ptr, c_size], resulttype=llmemory.Address) @@ -506,7 +506,7 @@ flags.update(add_flags) flavor = flags['flavor'] meth = getattr(self, 'gct_fv_%s_malloc_varsize' % flavor, None) - assert meth, "%s has no support for malloc_varsize with flavor %r" % (self, flavor) + assert meth, "%s has no support for malloc_varsize with flavor %r" % (self, flavor) return self.varsize_malloc_helper(hop, flags, meth, []) def gct_malloc_nonmovable(self, *args, **kwds): diff --git a/rpython/rtyper/llinterp.py b/rpython/rtyper/llinterp.py --- a/rpython/rtyper/llinterp.py +++ b/rpython/rtyper/llinterp.py @@ -286,7 +286,7 @@ rtyper = self.llinterpreter.typer bk = rtyper.annotator.bookkeeper classdef = bk.getuniqueclassdef(rstackovf._StackOverflow) - exdata = rtyper.getexceptiondata() + exdata = rtyper.exceptiondata evalue = exdata.get_standard_ll_exc_instance(rtyper, classdef) etype = exdata.fn_type_of_exc_inst(evalue) e = LLException(etype, evalue) @@ -335,7 +335,7 @@ elif catch_exception: link = block.exits[0] if e: - exdata = self.llinterpreter.typer.getexceptiondata() + exdata = self.llinterpreter.typer.exceptiondata cls = e.args[0] inst = e.args[1] for link in block.exits[1:]: @@ -440,7 +440,7 @@ else: extraargs = () typer = self.llinterpreter.typer - exdata = typer.getexceptiondata() + exdata = typer.exceptiondata if isinstance(exc, OSError): self.op_direct_call(exdata.fn_raise_OSError, exc.errno) assert False, "op_direct_call above should have raised" diff --git a/rpython/rtyper/rtyper.py b/rpython/rtyper/rtyper.py --- a/rpython/rtyper/rtyper.py +++ b/rpython/rtyper/rtyper.py @@ -100,9 +100,6 @@ self._reprs_must_call_setup.append(repr) self._seen_reprs_must_call_setup[repr] = True - def getexceptiondata(self): - return self.exceptiondata # built at the end of specialize() - def lltype_to_classdef_mapping(self): result = {} for (classdef, _), repr in self.instance_reprs.iteritems(): diff --git a/rpython/translator/backendopt/inline.py b/rpython/translator/backendopt/inline.py --- a/rpython/translator/backendopt/inline.py +++ b/rpython/translator/backendopt/inline.py @@ -329,7 +329,7 @@ # this rewiring does not always succeed. in the cases where it doesn't # there will be generic code inserted rclass = self.translator.rtyper.type_system.rclass - excdata = self.translator.rtyper.getexceptiondata() + excdata = self.translator.rtyper.exceptiondata exc_match = excdata.fn_exception_match for link in self.entrymap[self.graph_to_inline.exceptblock]: if link.prevblock.exits[0] is not link: @@ -358,7 +358,7 @@ #XXXXX don't look: insert blocks that do exception matching #for the cases where direct matching did not work exc_match = Constant( - self.translator.rtyper.getexceptiondata().fn_exception_match) + self.translator.rtyper.exceptiondata.fn_exception_match) exc_match.concretetype = typeOf(exc_match.value) blocks = [] for i, link in enumerate(afterblock.exits[1:]): diff --git a/rpython/translator/c/database.py b/rpython/translator/c/database.py --- a/rpython/translator/c/database.py +++ b/rpython/translator/c/database.py @@ -357,7 +357,7 @@ yield node def get_lltype_of_exception_value(self): - exceptiondata = self.translator.rtyper.getexceptiondata() + exceptiondata = self.translator.rtyper.exceptiondata return exceptiondata.lltype_of_exception_value def getstructdeflist(self): diff --git a/rpython/translator/c/extfunc.py b/rpython/translator/c/extfunc.py --- a/rpython/translator/c/extfunc.py +++ b/rpython/translator/c/extfunc.py @@ -10,9 +10,7 @@ # Note about *.im_func: The annotator and the rtyper expect direct # references to functions, so we cannot insert classmethods here. -EXTERNALS = { - 'LL_flush_icache': 'LL_flush_icache', - } +EXTERNALS = {'LL_flush_icache': 'LL_flush_icache'} #______________________________________________________ @@ -93,7 +91,7 @@ def predeclare_exception_data(db, rtyper): # Exception-related types and constants - exceptiondata = rtyper.getexceptiondata() + exceptiondata = rtyper.exceptiondata exctransformer = db.exctransformer yield ('RPYTHON_EXCEPTION_VTABLE', exceptiondata.lltype_of_exception_type) diff --git a/rpython/translator/exceptiontransform.py b/rpython/translator/exceptiontransform.py --- a/rpython/translator/exceptiontransform.py +++ b/rpython/translator/exceptiontransform.py @@ -49,7 +49,7 @@ def __init__(self, translator): self.translator = translator self.raise_analyzer = canraise.RaiseAnalyzer(translator) - edata = translator.rtyper.getexceptiondata() + edata = translator.rtyper.exceptiondata self.lltype_of_exception_value = edata.lltype_of_exception_value self.lltype_of_exception_type = edata.lltype_of_exception_type self.mixlevelannotator = MixLevelHelperAnnotator(translator.rtyper) @@ -169,7 +169,7 @@ exception_policy="exc_helper", **kwds) def get_builtin_exception(self, Class): - edata = self.translator.rtyper.getexceptiondata() + edata = self.translator.rtyper.exceptiondata rclass = self.translator.rtyper.type_system.rclass bk = self.translator.annotator.bookkeeper error_def = bk.getuniqueclassdef(Class) From noreply at buildbot.pypy.org Thu Aug 1 19:31:26 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Thu, 1 Aug 2013 19:31:26 +0200 (CEST) Subject: [pypy-commit] pypy kill-typesystem: merge lltypesystem.exceptiondata into rtyper.exceptiondata Message-ID: <20130801173126.E2F591C029A@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: kill-typesystem Changeset: r65879:1d43382f86dd Date: 2013-07-29 13:16 +0100 http://bitbucket.org/pypy/pypy/changeset/1d43382f86dd/ Log: merge lltypesystem.exceptiondata into rtyper.exceptiondata diff --git a/rpython/rtyper/exceptiondata.py b/rpython/rtyper/exceptiondata.py --- a/rpython/rtyper/exceptiondata.py +++ b/rpython/rtyper/exceptiondata.py @@ -1,7 +1,8 @@ from rpython.annotator import model as annmodel from rpython.rlib import rstackovf from rpython.rtyper import rclass - +from rpython.rtyper.lltypesystem.rclass import (ll_issubclass, ll_type, + ll_cast_to_object) # the exceptions that can be implicitely raised by some operations standardexceptions = { @@ -27,7 +28,7 @@ pass -class AbstractExceptionData: +class ExceptionData(object): """Public information for the code generators to help with exceptions.""" standardexceptions = standardexceptions @@ -75,3 +76,24 @@ clsdef = self.rtyper.annotator.bookkeeper.getuniqueclassdef( exceptionclass) return self.get_standard_ll_exc_instance(self.rtyper, clsdef) + + def make_helpers(self, rtyper): + # create helper functionptrs + self.fn_exception_match = self.make_exception_matcher(rtyper) + self.fn_type_of_exc_inst = self.make_type_of_exc_inst(rtyper) + self.fn_raise_OSError = self.make_raise_OSError(rtyper) + + def make_exception_matcher(self, rtyper): + # ll_exception_matcher(real_exception_vtable, match_exception_vtable) + s_typeptr = annmodel.SomePtr(self.lltype_of_exception_type) + helper_fn = rtyper.annotate_helper_fn(ll_issubclass, [s_typeptr, s_typeptr]) + return helper_fn + + def make_type_of_exc_inst(self, rtyper): + # ll_type_of_exc_inst(exception_instance) -> exception_vtable + s_excinst = annmodel.SomePtr(self.lltype_of_exception_value) + helper_fn = rtyper.annotate_helper_fn(ll_type, [s_excinst]) + return helper_fn + + def cast_exception(self, TYPE, value): + return ll_cast_to_object(value) diff --git a/rpython/rtyper/lltypesystem/exceptiondata.py b/rpython/rtyper/lltypesystem/exceptiondata.py deleted file mode 100644 --- a/rpython/rtyper/lltypesystem/exceptiondata.py +++ /dev/null @@ -1,31 +0,0 @@ -from rpython.annotator import model as annmodel -from rpython.rtyper.lltypesystem import rclass -from rpython.rtyper.lltypesystem.lltype import (Array, malloc, Ptr, FuncType, - functionptr, Signed) -from rpython.rtyper.exceptiondata import AbstractExceptionData -from rpython.annotator.classdef import FORCE_ATTRIBUTES_INTO_CLASSES - - -class ExceptionData(AbstractExceptionData): - """Public information for the code generators to help with exceptions.""" - - def make_helpers(self, rtyper): - # create helper functionptrs - self.fn_exception_match = self.make_exception_matcher(rtyper) - self.fn_type_of_exc_inst = self.make_type_of_exc_inst(rtyper) - self.fn_raise_OSError = self.make_raise_OSError(rtyper) - - def make_exception_matcher(self, rtyper): - # ll_exception_matcher(real_exception_vtable, match_exception_vtable) - s_typeptr = annmodel.SomePtr(self.lltype_of_exception_type) - helper_fn = rtyper.annotate_helper_fn(rclass.ll_issubclass, [s_typeptr, s_typeptr]) - return helper_fn - - def make_type_of_exc_inst(self, rtyper): - # ll_type_of_exc_inst(exception_instance) -> exception_vtable - s_excinst = annmodel.SomePtr(self.lltype_of_exception_value) - helper_fn = rtyper.annotate_helper_fn(rclass.ll_type, [s_excinst]) - return helper_fn - - def cast_exception(self, TYPE, value): - return rclass.ll_cast_to_object(value) diff --git a/rpython/rtyper/rtyper.py b/rpython/rtyper/rtyper.py --- a/rpython/rtyper/rtyper.py +++ b/rpython/rtyper/rtyper.py @@ -20,6 +20,7 @@ from rpython.flowspace.model import Variable, Constant, SpaceOperation, c_last_exception from rpython.rtyper.annlowlevel import annotate_lowlevel_helper, LowLevelAnnotatorPolicy from rpython.rtyper.error import TyperError +from rpython.rtyper.exceptiondata import ExceptionData from rpython.rtyper.lltypesystem.lltype import (Signed, Void, LowLevelType, Ptr, ContainerType, FuncType, functionptr, typeOf, RuntimeTypeInfo, attachRuntimeTypeInfo, Primitive) @@ -56,10 +57,7 @@ self.typererror_count = 0 # make the primitive_to_repr constant mapping self.primitive_to_repr = {} - if self.type_system.offers_exceptiondata: - self.exceptiondata = self.type_system.exceptiondata.ExceptionData(self) - else: - self.exceptiondata = None + self.exceptiondata = ExceptionData(self) try: self.seed = int(os.getenv('RTYPERSEED')) diff --git a/rpython/rtyper/typesystem.py b/rpython/rtyper/typesystem.py --- a/rpython/rtyper/typesystem.py +++ b/rpython/rtyper/typesystem.py @@ -9,8 +9,6 @@ class TypeSystem(object): __metaclass__ = extendabletype - offers_exceptiondata = True - def __getattr__(self, name): """Lazy import to avoid circular dependencies.""" def load(modname): @@ -21,7 +19,7 @@ return None if name in ('rclass', 'rpbc', 'rbuiltin', 'rtuple', 'rlist', 'rslice', 'rdict', 'rrange', 'rstr', - 'll_str', 'rbuilder', 'rbytearray', 'exceptiondata'): + 'll_str', 'rbuilder', 'rbytearray'): mod = load(name) if mod is not None: setattr(self, name, mod) From noreply at buildbot.pypy.org Thu Aug 1 19:31:28 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Thu, 1 Aug 2013 19:31:28 +0200 (CEST) Subject: [pypy-commit] pypy kill-typesystem: Simplify rtyper.exceptiondata Message-ID: <20130801173128.619D41C029A@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: kill-typesystem Changeset: r65880:e6cb10211932 Date: 2013-07-29 15:32 +0100 http://bitbucket.org/pypy/pypy/changeset/e6cb10211932/ Log: Simplify rtyper.exceptiondata diff --git a/rpython/rtyper/exceptiondata.py b/rpython/rtyper/exceptiondata.py --- a/rpython/rtyper/exceptiondata.py +++ b/rpython/rtyper/exceptiondata.py @@ -5,24 +5,10 @@ ll_cast_to_object) # the exceptions that can be implicitely raised by some operations -standardexceptions = { - TypeError : True, - OverflowError : True, - ValueError : True, - ZeroDivisionError: True, - MemoryError : True, - IOError : True, - OSError : True, - StopIteration : True, - KeyError : True, - IndexError : True, - AssertionError : True, - RuntimeError : True, - UnicodeDecodeError: True, - UnicodeEncodeError: True, - NotImplementedError: True, - rstackovf._StackOverflow: True, - } +standardexceptions = set([TypeError, OverflowError, ValueError, + ZeroDivisionError, MemoryError, IOError, OSError, StopIteration, KeyError, + IndexError, AssertionError, RuntimeError, UnicodeDecodeError, + UnicodeEncodeError, NotImplementedError, rstackovf._StackOverflow]) class UnknownException(Exception): pass @@ -30,6 +16,7 @@ class ExceptionData(object): """Public information for the code generators to help with exceptions.""" + standardexceptions = standardexceptions def __init__(self, rtyper): @@ -67,7 +54,7 @@ rclass = rtyper.type_system.rclass r_inst = rclass.getinstancerepr(rtyper, clsdef) example = r_inst.get_reusable_prebuilt_instance() - example = self.cast_exception(self.lltype_of_exception_value, example) + example = ll_cast_to_object(example) return example def get_standard_ll_exc_instance_by_class(self, exceptionclass): @@ -94,6 +81,3 @@ s_excinst = annmodel.SomePtr(self.lltype_of_exception_value) helper_fn = rtyper.annotate_helper_fn(ll_type, [s_excinst]) return helper_fn - - def cast_exception(self, TYPE, value): - return ll_cast_to_object(value) From noreply at buildbot.pypy.org Thu Aug 1 19:31:29 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Thu, 1 Aug 2013 19:31:29 +0200 (CEST) Subject: [pypy-commit] pypy kill-typesystem: rm attr type_system.rclass Message-ID: <20130801173129.9D8FA1C029A@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: kill-typesystem Changeset: r65881:981476ac5e16 Date: 2013-07-31 16:46 +0100 http://bitbucket.org/pypy/pypy/changeset/981476ac5e16/ Log: rm attr type_system.rclass diff --git a/rpython/rtyper/exceptiondata.py b/rpython/rtyper/exceptiondata.py --- a/rpython/rtyper/exceptiondata.py +++ b/rpython/rtyper/exceptiondata.py @@ -51,8 +51,8 @@ return helper_fn def get_standard_ll_exc_instance(self, rtyper, clsdef): - rclass = rtyper.type_system.rclass - r_inst = rclass.getinstancerepr(rtyper, clsdef) + from rpython.rtyper.lltypesystem.rclass import getinstancerepr + r_inst = getinstancerepr(rtyper, clsdef) example = r_inst.get_reusable_prebuilt_instance() example = ll_cast_to_object(example) return example diff --git a/rpython/rtyper/lltypesystem/rstr.py b/rpython/rtyper/lltypesystem/rstr.py --- a/rpython/rtyper/lltypesystem/rstr.py +++ b/rpython/rtyper/lltypesystem/rstr.py @@ -317,7 +317,7 @@ def ll_str2bytearray(str): from rpython.rtyper.lltypesystem.rbytearray import BYTEARRAY - + lgt = len(str.chars) b = malloc(BYTEARRAY, lgt) for i in range(lgt): @@ -974,7 +974,7 @@ argsiter = iter(sourcevarsrepr) - InstanceRepr = hop.rtyper.type_system.rclass.InstanceRepr + from rpython.rtyper.lltypesystem.rclass import InstanceRepr for i, thing in enumerate(things): if isinstance(thing, tuple): code = thing[0] @@ -1007,7 +1007,6 @@ else: raise TyperError("%%%s is not RPython" % (code,)) else: - from rpython.rtyper.lltypesystem.rstr import string_repr, unicode_repr if is_unicode: vchunk = inputconst(unicode_repr, thing) else: diff --git a/rpython/rtyper/rclass.py b/rpython/rtyper/rclass.py --- a/rpython/rtyper/rclass.py +++ b/rpython/rtyper/rclass.py @@ -54,7 +54,8 @@ try: result = rtyper.class_reprs[classdef] except KeyError: - result = rtyper.type_system.rclass.ClassRepr(rtyper, classdef) + from rpython.rtyper.lltypesystem.rclass import ClassRepr + result = ClassRepr(rtyper, classdef) rtyper.class_reprs[classdef] = result rtyper.add_pendingsetup(result) return result @@ -103,7 +104,8 @@ from rpython.rtyper.lltypesystem import rtagged return rtagged.TaggedInstanceRepr(rtyper, classdef, unboxed[0]) else: - return rtyper.type_system.rclass.InstanceRepr(rtyper, classdef, gcflavor) + from rpython.rtyper.lltypesystem.rclass import InstanceRepr + return InstanceRepr(rtyper, classdef, gcflavor) class MissingRTypeAttribute(TyperError): diff --git a/rpython/translator/backendopt/inline.py b/rpython/translator/backendopt/inline.py --- a/rpython/translator/backendopt/inline.py +++ b/rpython/translator/backendopt/inline.py @@ -328,7 +328,7 @@ def rewire_exceptblock_with_guard(self, afterblock, copiedexceptblock): # this rewiring does not always succeed. in the cases where it doesn't # there will be generic code inserted - rclass = self.translator.rtyper.type_system.rclass + from rpython.rtyper.lltypesystem import rclass excdata = self.translator.rtyper.exceptiondata exc_match = excdata.fn_exception_match for link in self.entrymap[self.graph_to_inline.exceptblock]: From noreply at buildbot.pypy.org Thu Aug 1 19:31:30 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Thu, 1 Aug 2013 19:31:30 +0200 (CEST) Subject: [pypy-commit] pypy kill-typesystem: kill the lazy import hack in TypeSystem Message-ID: <20130801173130.D7AA01C029A@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: kill-typesystem Changeset: r65882:cc3d8a522a8b Date: 2013-08-01 18:21 +0100 http://bitbucket.org/pypy/pypy/changeset/cc3d8a522a8b/ Log: kill the lazy import hack in TypeSystem diff --git a/rpython/rlib/debug.py b/rpython/rlib/debug.py --- a/rpython/rlib/debug.py +++ b/rpython/rlib/debug.py @@ -127,8 +127,8 @@ return None def specialize_call(self, hop): + from rpython.rtyper.lltypesystem.rstr import string_repr fn = self.instance - string_repr = hop.rtyper.type_system.rstr.string_repr vlist = hop.inputargs(string_repr) hop.exception_cannot_occur() t = hop.rtyper.annotator.translator @@ -190,7 +190,7 @@ def compute_result_annotation(self): return None - + def specialize_call(self, hop): hop.exception_cannot_occur() return hop.genop('debug_flush', []) @@ -278,7 +278,7 @@ from rpython.annotator.annrpython import log log.WARNING('make_sure_not_resized called, but has no effect since list_comprehension is off') return s_arg - + def specialize_call(self, hop): hop.exception_cannot_occur() return hop.inputarg(hop.args_r[0], arg=0) @@ -294,7 +294,7 @@ class DictMarkEntry(ExtRegistryEntry): _about_ = mark_dict_non_null - + def compute_result_annotation(self, s_dict): from rpython.annotator.model import SomeDict diff --git a/rpython/rlib/rstring.py b/rpython/rlib/rstring.py --- a/rpython/rlib/rstring.py +++ b/rpython/rlib/rstring.py @@ -362,7 +362,8 @@ return SomeString() def rtyper_makerepr(self, rtyper): - return rtyper.type_system.rbuilder.stringbuilder_repr + from rpython.rtyper.lltypesystem.rbuilder import stringbuilder_repr + return stringbuilder_repr def rtyper_makekey(self): return self.__class__, @@ -398,7 +399,8 @@ return SomeUnicodeString() def rtyper_makerepr(self, rtyper): - return rtyper.type_system.rbuilder.unicodebuilder_repr + from rpython.rtyper.lltypesystem.rbuilder import unicodebuilder_repr + return unicodebuilder_repr def rtyper_makekey(self): return self.__class__, diff --git a/rpython/rtyper/rbuiltin.py b/rpython/rtyper/rbuiltin.py --- a/rpython/rtyper/rbuiltin.py +++ b/rpython/rtyper/rbuiltin.py @@ -93,7 +93,8 @@ except (KeyError, TypeError): pass try: - return rtyper.type_system.rbuiltin.BUILTIN_TYPER[self.builtinfunc] + from rpython.rtyper.lltypesystem.rbuiltin import BUILTIN_TYPER as ll_BUILTIN_TYPER + return ll_BUILTIN_TYPER[self.builtinfunc] except (KeyError, TypeError): pass if extregistry.is_registered(self.builtinfunc): diff --git a/rpython/rtyper/rbytearray.py b/rpython/rtyper/rbytearray.py --- a/rpython/rtyper/rbytearray.py +++ b/rpython/rtyper/rbytearray.py @@ -57,4 +57,5 @@ return self.__class__, def rtyper_makerepr(self, rtyper): - return rtyper.type_system.rbytearray.bytearray_repr + from rpython.rtyper.lltypesystem.rbytearray import bytearray_repr + return bytearray_repr diff --git a/rpython/rtyper/rdict.py b/rpython/rtyper/rdict.py --- a/rpython/rtyper/rdict.py +++ b/rpython/rtyper/rdict.py @@ -5,23 +5,20 @@ class __extend__(annmodel.SomeDict): def rtyper_makerepr(self, rtyper): - dictkey = self.dictdef.dictkey + from rpython.rtyper.lltypesystem.rdict import DictRepr + dictkey = self.dictdef.dictkey dictvalue = self.dictdef.dictvalue - s_key = dictkey .s_value - s_value = dictvalue.s_value + s_key = dictkey.s_value + s_value = dictvalue.s_value force_non_null = self.dictdef.force_non_null if dictkey.custom_eq_hash: custom_eq_hash = lambda: (rtyper.getrepr(dictkey.s_rdict_eqfn), rtyper.getrepr(dictkey.s_rdict_hashfn)) else: custom_eq_hash = None - return rtyper.type_system.rdict.DictRepr(rtyper, - lambda: rtyper.getrepr(s_key), - lambda: rtyper.getrepr(s_value), - dictkey, - dictvalue, - custom_eq_hash, - force_non_null) + return DictRepr(rtyper, lambda: rtyper.getrepr(s_key), + lambda: rtyper.getrepr(s_value), dictkey, dictvalue, + custom_eq_hash, force_non_null) def rtyper_makekey(self): self.dictdef.dictkey .dont_change_any_more = True @@ -29,7 +26,6 @@ return (self.__class__, self.dictdef.dictkey, self.dictdef.dictvalue) - class AbstractDictRepr(rmodel.Repr): def pickrepr(self, item_repr): @@ -41,7 +37,8 @@ pickkeyrepr = pickrepr def compact_repr(self): - return 'DictR %s %s' % (self.key_repr.compact_repr(), self.value_repr.compact_repr()) + return 'DictR %s %s' % (self.key_repr.compact_repr(), + self.value_repr.compact_repr()) def recast_value(self, llops, v): return llops.convertvar(v, self.value_repr, self.external_value_repr) @@ -51,10 +48,11 @@ def rtype_newdict(hop): + from rpython.rtyper.lltypesystem.rdict import ll_newdict hop.inputargs() # no arguments expected r_dict = hop.r_result cDICT = hop.inputconst(lltype.Void, r_dict.DICT) - v_result = hop.gendirectcall(hop.rtyper.type_system.rdict.ll_newdict, cDICT) + v_result = hop.gendirectcall(ll_newdict, cDICT) return v_result diff --git a/rpython/rtyper/rfloat.py b/rpython/rtyper/rfloat.py --- a/rpython/rtyper/rfloat.py +++ b/rpython/rtyper/rfloat.py @@ -77,8 +77,8 @@ class __extend__(pairtype(AbstractStringRepr, FloatRepr)): def rtype_mod(_, hop): - rstr = hop.rtyper.type_system.rstr - return rstr.do_stringformat(hop, [(hop.args_v[1], hop.args_r[1])]) + from rpython.rtyper.lltypesystem.rstr import do_stringformat + return do_stringformat(hop, [(hop.args_v[1], hop.args_r[1])]) #Helpers FloatRepr,FloatRepr @@ -90,7 +90,6 @@ vlist = hop.inputargs(Float, Float) return hop.genop('float_'+func, vlist, resulttype=Bool) -# class __extend__(FloatRepr): diff --git a/rpython/rtyper/rint.py b/rpython/rtyper/rint.py --- a/rpython/rtyper/rint.py +++ b/rpython/rtyper/rint.py @@ -371,18 +371,18 @@ return ll_int2dec(i) def rtype_hex(self, hop): + from rpython.rtyper.lltypesystem.ll_str import ll_int2hex self = self.as_int varg = hop.inputarg(self, 0) true = inputconst(Bool, True) - fn = hop.rtyper.type_system.ll_str.ll_int2hex - return hop.gendirectcall(fn, varg, true) + return hop.gendirectcall(ll_int2hex, varg, true) def rtype_oct(self, hop): + from rpython.rtyper.lltypesystem.ll_str import ll_int2oct self = self.as_int varg = hop.inputarg(self, 0) true = inputconst(Bool, True) - fn = hop.rtyper.type_system.ll_str.ll_int2oct - return hop.gendirectcall(fn, varg, true) + return hop.gendirectcall(ll_int2oct, varg, true) def ll_hash_int(n): return intmask(n) diff --git a/rpython/rtyper/rlist.py b/rpython/rtyper/rlist.py --- a/rpython/rtyper/rlist.py +++ b/rpython/rtyper/rlist.py @@ -41,17 +41,18 @@ listitem = self.listdef.listitem s_value = listitem.s_value if (listitem.range_step is not None and not listitem.mutated and - not isinstance(s_value, annmodel.SomeImpossibleValue)): - return rtyper.type_system.rrange.RangeRepr(listitem.range_step) + not isinstance(s_value, annmodel.SomeImpossibleValue)): + from rpython.rtyper.lltypesystem.rrange import RangeRepr + return RangeRepr(listitem.range_step) else: # cannot do the rtyper.getrepr() call immediately, for the case # of recursive structures -- i.e. if the listdef contains itself - rlist = rtyper.type_system.rlist + from rpython.rtyper.lltypesystem.rlist import ListRepr, FixedSizeListRepr item_repr = lambda: rtyper.getrepr(listitem.s_value) if self.listdef.listitem.resized: - return rlist.ListRepr(rtyper, item_repr, listitem) + return ListRepr(rtyper, item_repr, listitem) else: - return rlist.FixedSizeListRepr(rtyper, item_repr, listitem) + return FixedSizeListRepr(rtyper, item_repr, listitem) def rtyper_makekey(self): self.listdef.listitem.dont_change_any_more = True @@ -339,12 +340,12 @@ def rtype_newlist(hop, v_sizehint=None): + from rpython.rtyper.lltypesystem.rlist import newlist nb_args = hop.nb_args r_list = hop.r_result r_listitem = r_list.item_repr items_v = [hop.inputarg(r_listitem, arg=i) for i in range(nb_args)] - return hop.rtyper.type_system.rlist.newlist(hop.llops, r_list, items_v, - v_sizehint=v_sizehint) + return newlist(hop.llops, r_list, items_v, v_sizehint=v_sizehint) def rtype_alloc_and_set(hop): r_list = hop.r_result @@ -382,10 +383,10 @@ return v_lst1 def rtype_extend_with_str_slice((r_lst1, r_str2), hop): + from rpython.rtyper.lltypesystem.rstr import string_repr if r_lst1.item_repr.lowleveltype not in (Char, UniChar): raise TyperError('"lst += string" only supported with a list ' 'of chars or unichars') - string_repr = r_lst1.rtyper.type_system.rstr.string_repr v_lst1 = hop.inputarg(r_lst1, arg=0) v_str2 = hop.inputarg(string_repr, arg=3) kind, vlist = hop.decompose_slice_args() @@ -398,10 +399,10 @@ class __extend__(pairtype(AbstractListRepr, AbstractCharRepr)): def rtype_extend_with_char_count((r_lst1, r_chr2), hop): + from rpython.rtyper.lltypesystem.rstr import char_repr if r_lst1.item_repr.lowleveltype not in (Char, UniChar): raise TyperError('"lst += string" only supported with a list ' 'of chars or unichars') - char_repr = r_lst1.rtyper.type_system.rstr.char_repr v_lst1, v_chr, v_count = hop.inputargs(r_lst1, char_repr, Signed) hop.gendirectcall(ll_extend_with_char_count, v_lst1, v_chr, v_count) return v_lst1 diff --git a/rpython/rtyper/rpbc.py b/rpython/rtyper/rpbc.py --- a/rpython/rtyper/rpbc.py +++ b/rpython/rtyper/rpbc.py @@ -12,8 +12,7 @@ def small_cand(rtyper, s_pbc): - if 1 < len(s_pbc.descriptions) < rtyper.getconfig().translation.withsmallfuncsets and \ - hasattr(rtyper.type_system.rpbc, 'SmallFunctionSetPBCRepr'): + if 1 < len(s_pbc.descriptions) < rtyper.getconfig().translation.withsmallfuncsets: callfamily = s_pbc.any_description().getcallfamily() concretetable, uniquerows = get_concrete_calltable(rtyper, callfamily) if len(uniquerows) == 1 and (not s_pbc.subset_of or small_cand(rtyper, s_pbc.subset_of)): @@ -22,6 +21,9 @@ class __extend__(annmodel.SomePBC): def rtyper_makerepr(self, rtyper): + from rpython.rtyper.lltypesystem.rpbc import (FunctionsPBCRepr, + SmallFunctionSetPBCRepr, ClassesPBCRepr, MethodsPBCRepr, + MethodOfFrozenPBCRepr) if self.isNone(): return none_frozen_pbc_repr kind = self.getKind() @@ -32,20 +34,20 @@ if sample.overridden: getRepr = OverriddenFunctionPBCRepr else: - getRepr = rtyper.type_system.rpbc.FunctionsPBCRepr + getRepr = FunctionsPBCRepr if small_cand(rtyper, self): - getRepr = rtyper.type_system.rpbc.SmallFunctionSetPBCRepr + getRepr = SmallFunctionSetPBCRepr else: getRepr = getFrozenPBCRepr elif issubclass(kind, description.ClassDesc): # user classes - getRepr = rtyper.type_system.rpbc.ClassesPBCRepr + getRepr = ClassesPBCRepr elif issubclass(kind, description.MethodDesc): - getRepr = rtyper.type_system.rpbc.MethodsPBCRepr + getRepr = MethodsPBCRepr elif issubclass(kind, description.FrozenDesc): getRepr = getFrozenPBCRepr elif issubclass(kind, description.MethodOfFrozenDesc): - getRepr = rtyper.type_system.rpbc.MethodOfFrozenPBCRepr + getRepr = MethodOfFrozenPBCRepr else: raise TyperError("unexpected PBC kind %r" % (kind,)) @@ -350,6 +352,8 @@ return rtype_call_specialcase(hop) def getFrozenPBCRepr(rtyper, s_pbc): + from rpython.rtyper.lltypesystem.rpbc import ( + MultipleUnrelatedFrozenPBCRepr, MultipleFrozenPBCRepr) descs = list(s_pbc.descriptions) assert len(descs) >= 1 if len(descs) == 1 and not s_pbc.can_be_None: @@ -362,15 +366,13 @@ try: return rtyper.pbc_reprs['unrelated'] except KeyError: - rpbc = rtyper.type_system.rpbc - result = rpbc.MultipleUnrelatedFrozenPBCRepr(rtyper) + result = MultipleUnrelatedFrozenPBCRepr(rtyper) rtyper.pbc_reprs['unrelated'] = result return result try: return rtyper.pbc_reprs[access] except KeyError: - result = rtyper.type_system.rpbc.MultipleFrozenPBCRepr(rtyper, - access) + result = MultipleFrozenPBCRepr(rtyper, access) rtyper.pbc_reprs[access] = result rtyper.add_pendingsetup(result) return result @@ -612,9 +614,10 @@ return inputconst(Void, None) def rtype_is_((robj1, rnone2), hop): + from rpython.rtyper.lltypesystem.rpbc import rtype_is_None if hop.s_result.is_constant(): return hop.inputconst(Bool, hop.s_result.const) - return hop.rtyper.type_system.rpbc.rtype_is_None(robj1, rnone2, hop) + return rtype_is_None(robj1, rnone2, hop) class __extend__(pairtype(NoneFrozenPBCRepr, Repr)): @@ -622,10 +625,10 @@ return inputconst(r_to, None) def rtype_is_((rnone1, robj2), hop): + from rpython.rtyper.lltypesystem.rpbc import rtype_is_None if hop.s_result.is_constant(): return hop.inputconst(Bool, hop.s_result.const) - return hop.rtyper.type_system.rpbc.rtype_is_None( - robj2, rnone1, hop, pos=1) + return rtype_is_None(robj2, rnone1, hop, pos=1) # ____________________________________________________________ diff --git a/rpython/rtyper/rstr.py b/rpython/rtyper/rstr.py --- a/rpython/rtyper/rstr.py +++ b/rpython/rtyper/rstr.py @@ -88,26 +88,33 @@ class __extend__(annmodel.SomeString): def rtyper_makerepr(self, rtyper): - return rtyper.type_system.rstr.string_repr + from rpython.rtyper.lltypesystem.rstr import string_repr + return string_repr + def rtyper_makekey(self): return self.__class__, class __extend__(annmodel.SomeUnicodeString): def rtyper_makerepr(self, rtyper): - return rtyper.type_system.rstr.unicode_repr + from rpython.rtyper.lltypesystem.rstr import unicode_repr + return unicode_repr def rtyper_makekey(self): return self.__class__, class __extend__(annmodel.SomeChar): def rtyper_makerepr(self, rtyper): - return rtyper.type_system.rstr.char_repr + from rpython.rtyper.lltypesystem.rstr import char_repr + return char_repr + def rtyper_makekey(self): return self.__class__, class __extend__(annmodel.SomeUnicodeCodePoint): def rtyper_makerepr(self, rtyper): - return rtyper.type_system.rstr.unichar_repr + from rpython.rtyper.lltypesystem.rstr import unichar_repr + return unichar_repr + def rtyper_makekey(self): return self.__class__, @@ -271,12 +278,14 @@ raise NotImplementedError def rtype_method_join(self, hop): + from rpython.rtyper.lltypesystem.rlist import BaseListRepr + from rpython.rtyper.lltypesystem.rstr import char_repr, unichar_repr hop.exception_cannot_occur() rstr = hop.args_r[0] if hop.s_result.is_constant(): return inputconst(rstr.repr, hop.s_result.const) r_lst = hop.args_r[1] - if not isinstance(r_lst, hop.rtyper.type_system.rlist.BaseListRepr): + if not isinstance(r_lst, BaseListRepr): raise TyperError("string.join of non-list: %r" % r_lst) v_str, v_lst = hop.inputargs(rstr.repr, r_lst) v_length, v_items = self._list_length_items(hop, v_lst, r_lst.lowleveltype) @@ -284,8 +293,8 @@ if hop.args_s[0].is_constant() and hop.args_s[0].const == '': if r_lst.item_repr == rstr.repr: llfn = self.ll.ll_join_strs - elif (r_lst.item_repr == hop.rtyper.type_system.rstr.char_repr or - r_lst.item_repr == hop.rtyper.type_system.rstr.unichar_repr): + elif (r_lst.item_repr == char_repr or + r_lst.item_repr == unichar_repr): v_tp = hop.inputconst(Void, self.lowleveltype) return hop.gendirectcall(self.ll.ll_join_chars, v_length, v_items, v_tp) @@ -655,8 +664,8 @@ #Helper functions for comparisons def _rtype_compare_template(hop, func): - rstr = hop.rtyper.type_system.rstr - vlist = hop.inputargs(rstr.char_repr, rstr.char_repr) + from rpython.rtyper.lltypesystem.rstr import char_repr + vlist = hop.inputargs(char_repr, char_repr) return hop.genop('char_' + func, vlist, resulttype=Bool) class __extend__(AbstractUniCharRepr): @@ -677,8 +686,8 @@ get_ll_fasthash_function = get_ll_hash_function def rtype_ord(_, hop): - rstr = hop.rtyper.type_system.rstr - vlist = hop.inputargs(rstr.unichar_repr) + from rpython.rtyper.lltypesystem.rstr import unichar_repr + vlist = hop.inputargs(unichar_repr) return hop.genop('cast_unichar_to_int', vlist, resulttype=Signed) @@ -691,8 +700,8 @@ #Helper functions for comparisons def _rtype_unchr_compare_template(hop, func): - rstr = hop.rtyper.type_system.rstr - vlist = hop.inputargs(rstr.unichar_repr, rstr.unichar_repr) + from rpython.rtyper.lltypesystem.rstr import unichar_repr + vlist = hop.inputargs(unichar_repr, unichar_repr) return hop.genop('unichar_' + func, vlist, resulttype=Bool) @@ -702,16 +711,17 @@ class __extend__(pairtype(AbstractCharRepr, AbstractStringRepr), pairtype(AbstractUniCharRepr, AbstractUnicodeRepr)): def convert_from_to((r_from, r_to), v, llops): - rstr = llops.rtyper.type_system.rstr - if (r_from == rstr.char_repr and r_to == rstr.string_repr) or\ - (r_from == rstr.unichar_repr and r_to == rstr.unicode_repr): + from rpython.rtyper.lltypesystem.rstr import ( + string_repr, unicode_repr, char_repr, unichar_repr) + if (r_from == char_repr and r_to == string_repr) or\ + (r_from == unichar_repr and r_to == unicode_repr): return llops.gendirectcall(r_from.ll.ll_chr2str, v) return NotImplemented class __extend__(pairtype(AbstractStringRepr, AbstractCharRepr)): def convert_from_to((r_from, r_to), v, llops): - rstr = llops.rtyper.type_system.rstr - if r_from == rstr.string_repr and r_to == rstr.char_repr: + from rpython.rtyper.lltypesystem.rstr import string_repr, char_repr + if r_from == string_repr and r_to == char_repr: c_zero = inputconst(Signed, 0) return llops.gendirectcall(r_from.ll.ll_stritem_nonneg, v, c_zero) return NotImplemented diff --git a/rpython/rtyper/rtuple.py b/rpython/rtyper/rtuple.py --- a/rpython/rtyper/rtuple.py +++ b/rpython/rtyper/rtuple.py @@ -13,8 +13,8 @@ class __extend__(annmodel.SomeTuple): def rtyper_makerepr(self, rtyper): - repr_class = rtyper.type_system.rtuple.TupleRepr - return repr_class(rtyper, [rtyper.getrepr(s_item) for s_item in self.items]) + from rpython.rtyper.lltypesystem.rtuple import TupleRepr + return TupleRepr(rtyper, [rtyper.getrepr(s_item) for s_item in self.items]) def rtyper_makekey_ex(self, rtyper): keys = [rtyper.makekey(s_item) for s_item in self.items] diff --git a/rpython/rtyper/rtyper.py b/rpython/rtyper/rtyper.py --- a/rpython/rtyper/rtyper.py +++ b/rpython/rtyper/rtyper.py @@ -578,7 +578,8 @@ return pair(r_arg1, r_arg2).rtype_extend_with_char_count(hop) def translate_op_newtuple(self, hop): - return self.type_system.rtuple.rtype_newtuple(hop) + from rpython.rtyper.lltypesystem.rtuple import rtype_newtuple + return rtype_newtuple(hop) def translate_op_instantiate1(self, hop): from rpython.rtyper.lltypesystem import rclass diff --git a/rpython/rtyper/typesystem.py b/rpython/rtyper/typesystem.py --- a/rpython/rtyper/typesystem.py +++ b/rpython/rtyper/typesystem.py @@ -9,24 +9,6 @@ class TypeSystem(object): __metaclass__ = extendabletype - def __getattr__(self, name): - """Lazy import to avoid circular dependencies.""" - def load(modname): - try: - return __import__("rpython.rtyper.%s.%s" % (self.name, modname), - None, None, ['__doc__']) - except ImportError: - return None - if name in ('rclass', 'rpbc', 'rbuiltin', 'rtuple', 'rlist', - 'rslice', 'rdict', 'rrange', 'rstr', - 'll_str', 'rbuilder', 'rbytearray'): - mod = load(name) - if mod is not None: - setattr(self, name, mod) - return mod - - raise AttributeError(name) - def derefType(self, T): raise NotImplementedError() diff --git a/rpython/translator/exceptiontransform.py b/rpython/translator/exceptiontransform.py --- a/rpython/translator/exceptiontransform.py +++ b/rpython/translator/exceptiontransform.py @@ -6,6 +6,7 @@ c_last_exception, SpaceOperation, FunctionGraph, mkentrymap from rpython.rtyper.lltypesystem import lltype, llmemory, rffi from rpython.rtyper.lltypesystem import lloperation +from rpython.rtyper.lltypesystem.rclass import ll_inst_type from rpython.rtyper import rtyper from rpython.rtyper.rmodel import inputconst from rpython.rlib.rarithmetic import r_uint, r_longlong, r_ulonglong @@ -55,7 +56,6 @@ self.mixlevelannotator = MixLevelHelperAnnotator(translator.rtyper) exc_data, null_type, null_value = self.setup_excdata() - rclass = translator.rtyper.type_system.rclass (assertion_error_ll_exc_type, assertion_error_ll_exc) = self.get_builtin_exception(AssertionError) (n_i_error_ll_exc_type, @@ -105,7 +105,7 @@ def rpyexc_restore_exception(evalue): if evalue: - exc_data.exc_type = rclass.ll_inst_type(evalue) + exc_data.exc_type = ll_inst_type(evalue) exc_data.exc_value = evalue self.rpyexc_occured_ptr = self.build_func( @@ -170,12 +170,11 @@ def get_builtin_exception(self, Class): edata = self.translator.rtyper.exceptiondata - rclass = self.translator.rtyper.type_system.rclass bk = self.translator.annotator.bookkeeper error_def = bk.getuniqueclassdef(Class) error_ll_exc = edata.get_standard_ll_exc_instance( self.translator.rtyper, error_def) - error_ll_exc_type = rclass.ll_inst_type(error_ll_exc) + error_ll_exc_type = ll_inst_type(error_ll_exc) return error_ll_exc_type, error_ll_exc def transform_completely(self): From noreply at buildbot.pypy.org Thu Aug 1 19:51:34 2013 From: noreply at buildbot.pypy.org (fijal) Date: Thu, 1 Aug 2013 19:51:34 +0200 (CEST) Subject: [pypy-commit] benchmarks single-run: Kill a whole lot of stupid mess - now we can have one interpreter benchmarked Message-ID: <20130801175134.B77131C3095@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: single-run Changeset: r215:9b79fbc02de1 Date: 2013-08-01 19:49 +0200 http://bitbucket.org/pypy/benchmarks/changeset/9b79fbc02de1/ Log: Kill a whole lot of stupid mess - now we can have one interpreter benchmarked diff --git a/benchmarks.py b/benchmarks.py --- a/benchmarks.py +++ b/benchmarks.py @@ -1,7 +1,7 @@ import os import logging from unladen_swallow.perf import SimpleBenchmark, MeasureGeneric -from unladen_swallow.perf import RawResult, SimpleComparisonResult, avg, ResultError +from unladen_swallow.perf import RawResult, SimpleResult, avg, ResultError import subprocess def relative(*args): @@ -50,7 +50,7 @@ *args, **kwargs) except subprocess.CalledProcessError, e: return ResultError(e) - return SimpleComparisonResult(avg(base_data[0]), -1, -1) + return SimpleResult(avg(base_data[0])) BM.func_name = 'BM_' + bm_name d[BM.func_name] = BM diff --git a/runner.py b/runner.py --- a/runner.py +++ b/runner.py @@ -15,17 +15,14 @@ 'rietveld', 'html5lib', 'ai'] BENCHMARK_SET += perf._FindAllBenchmarks(benchmarks.__dict__).keys() -CHANGED = 'changed' -BASELINE = 'baseline' - class WrongBenchmark(Exception): pass -def run_and_store(benchmark_set, result_filename, changed_path, revision=0, +def run_and_store(benchmark_set, result_filename, path, revision=0, options='', branch='default', args='', upload=False, - fast=False, baseline_path=sys.executable, full_store=False): + fast=False, full_store=False): funcs = perf.BENCH_FUNCS.copy() funcs.update(perf._FindAllBenchmarks(benchmarks.__dict__)) opts = ['-b', ','.join(benchmark_set), @@ -37,7 +34,7 @@ opts += ['--args', args] if full_store: opts += ['--no_statistics'] - opts += [baseline_path, changed_path] + opts += [path] results = perf.main(opts, funcs) f = open(str(result_filename), "w") results = [(name, result.__class__.__name__, result.__dict__) @@ -51,73 +48,6 @@ f.close() return results - -def get_upload_options(options): - """ - returns a dict with 2 keys: CHANGED, BASELINE. The values are - dicts with the keys - * 'upload' (boolean) - * 'project' (string) - * 'executable' (string) - * 'urls (list of strings). - * 'branch' (string) - * 'revision' (string) - - This correspondents to the the --upload* and --upload-baseline* - options. - - raises: AssertionError if upload is specified, but not the - corresponding executable or revision. - """ - - if options.upload_baseline_revision is None: - options.upload_baseline_revision = options.upload_revision - - upload_options = {} - - for run in [CHANGED, BASELINE]: - - def get_upload_option(name): - attr_name = 'upload' - if run == BASELINE: - attr_name = '%s_baseline' % attr_name - if name: - attr_name = '%s_%s' % (attr_name, name) - return getattr(options, attr_name) - - urls = get_upload_option('urls') - urls = [url.strip() for url in urls.split(',') if url.strip()] - upload = get_upload_option(None) - project = get_upload_option('project') - executable = get_upload_option('executable') - branch = get_upload_option('branch') - revision = get_upload_option('revision') - if upload: - if executable is None: - raise AssertionError('If you want to --upload[-baseline] you ' - 'have to specify the corresponding ' - '--upload[-baseline]-executable') - if revision is None: - raise AssertionError('If you want to upload the result you ' - 'have to specify a --revision (or ' - '--upload-baseline-revision if you ' - 'want to upload the baseline result') - if ((run == BASELINE and 'nullpython.py' in options.baseline) or - (run == CHANGED and 'nullpython.py' in options.changed)): - raise AssertionError("Don't upload data from the nullpython " - "dummy interpreter. It won't run any " - "real benchmarks.") - - upload_options[run] = { - 'upload': upload, - 'project': project, - 'executable': executable, - 'urls': urls, - 'branch': branch, - 'revision': revision} - return upload_options - - def main(argv): import optparse parser = optparse.OptionParser( @@ -137,13 +67,8 @@ ". (default: Run all listed benchmarks)" ) % ", ".join(sorted(BENCHMARK_SET))) benchmark_group.add_option( - '-c', '--changed', default=sys.executable, - help=('pypy-c or another modified python interpreter to run against. ' - 'Also named the "changed" interpreter. (default: the python ' - 'used to run this script)')) - benchmark_group.add_option( - '--baseline', default=sys.executable, action='store', - help=('Baseline interpreter. (default: the python used to ' + '-p', '--python', default=sys.executable, action='store', + help=('Interpreter. (default: the python used to ' 'run this script)')) benchmark_group.add_option( '-o', '--output-filename', default="result.json", @@ -182,89 +107,28 @@ help="Run the benchmarks with the --no-statistics flag.") parser.add_option_group(benchmark_group) - # upload changed options - upload_group = optparse.OptionGroup( - parser, 'Upload Options', - ('Options for uploading the result of the "changed" python to ' - 'codespeed. The information about revision and branch will ' - 'be taken from the options --revision and --branch.')) - upload_group.add_option( - "--upload", default=None, action="store_true", - help=("Upload results to speed.pypy.org (unless " - "--upload-url is given).")) - upload_group.add_option( - "--upload-urls", default="http://speed.pypy.org/", - help=("Comma seperated urls of the codespeed instances " - "to upload to. (default: http://speed.pypy.org/)")) - upload_group.add_option( - "--upload-project", default="PyPy", - help="The project name in codespeed. (default: PyPy)") - upload_group.add_option( - "--upload-executable", default=None, - help=("The executable name in codespeed. (required if --upload " - "is given)")) - parser.add_option_group(upload_group) + parser.add_option("--upload-url", action="store", default=None, + help="Upload to url or None") + parser.add_option("--upload-revision", action="store", default=None, + help="Upload revision") + parser.add_option("--upload-branch", action="store", default=None, + help="Upload branch") + parser.add_option("--upload-project", action="store", default="PyPy") + parser.add_option("--upload-executable", action="store", default="pypy-c") parser.add_option( "--force-host", default=None, action="store", - help=("Force the hostname. This option will also be used when " - "uploading the baseline result.")) + help=("Force the hostname.")) parser.add_option("--niceness", default=None, type="int", help="Set absolute niceness for process") - # upload baseline group - upload_baseline_group = optparse.OptionGroup( - parser, 'Upload Baseline Options', - ('Options for uploading the result of the "baseline" python to ' - 'codespeed. The hostname of the --force-host option will be used ' - 'in the baseline upload too.')) - upload_baseline_group.add_option( - "--upload-baseline", default=None, action="store_true", - help=("Also upload results or the baseline benchmark " - "to speed.pypy.org (unless " - "--upload-baseline-url is given).")) - upload_baseline_group.add_option( - "--upload-baseline-urls", - default="http://speed.pypy.org/", - help=("Comma seperated urls of the codespeed instances " - "to upload to. (default: http://speed.pypy.org/)")) - upload_baseline_group.add_option( - "--upload-baseline-project", default="PyPy", - help="The project name in codespeed (default: PyPy).") - upload_baseline_group.add_option( - "--upload-baseline-executable", default=None, - help=("The executable name in codespeed. (required if " - "--upload-baseline is given)")) - upload_baseline_group.add_option( - '--upload-baseline-branch', default='default', - action='store', - help=("The name of the branch used for the baseline " - "run. (default: 'default'")) - upload_baseline_group.add_option( - '--upload-baseline-revision', action='store', - default=None, - help=("The revision of the baseline. (required if --upload-baseline " - "is given)")) - parser.add_option_group(upload_baseline_group) - - # Backward compoatibility options - deprecated_group = optparse.OptionGroup( - parser, 'Deprecated Options', - 'Still here for backward compatibility.') - deprecated_group.add_option( - '-p', '--pypy-c', default=sys.executable, - dest='changed', help='Deprecated alias for -c/--changed') - parser.add_option_group(deprecated_group) - options, args = parser.parse_args(argv) - upload_options = get_upload_options(options) benchmarks = options.benchmarks.split(',') for benchmark in benchmarks: if benchmark not in BENCHMARK_SET: raise WrongBenchmark(benchmark) - changed_path = options.changed - baseline_path = options.baseline + path = options.python fast = options.fast args = options.args full_store = options.full_store @@ -277,25 +141,20 @@ if options.niceness is not None: os.nice(options.niceness - os.nice(0)) - results = run_and_store(benchmarks, output_filename, changed_path, + results = run_and_store(benchmarks, output_filename, path, revision, args=args, fast=fast, - baseline_path=baseline_path, full_store=full_store, branch=branch) - for run in [CHANGED, BASELINE]: - upload = upload_options[run]['upload'] - urls = upload_options[run]['urls'] - project = upload_options[run]['project'] - executable = upload_options[run]['executable'] - branch = upload_options[run]['branch'] or 'default' - revision = upload_options[run]['revision'] + if options.upload_url: + branch = options.upload_branch or 'default' + revision = options.upload_revision - if upload: - # prevent to upload results from the nullpython dummy - host = force_host if force_host else socket.gethostname() - for url in urls: - print save(project, revision, results, executable, host, url, - changed=(run == CHANGED), branch=branch) + # prevent to upload results from the nullpython dummy + host = force_host if force_host else socket.gethostname() + print save(options.upload_project, + revision, results, options.upload_executable, host, + options.upload_url, + branch=branch) if __name__ == '__main__': diff --git a/saveresults.py b/saveresults.py --- a/saveresults.py +++ b/saveresults.py @@ -30,7 +30,7 @@ def save(project, revision, results, executeable, host, url, testing=False, - changed=True, branch='default'): + branch='default'): testparams = [] #Parse data data = {} @@ -41,21 +41,12 @@ res_type = b[1] results = b[2] value = 0 - if res_type == "SimpleComparisonResult": - if changed: - value = results['changed_time'] - else: - value = results['base_time'] - elif res_type == "ComparisonResult": - if changed: - value = results['avg_changed'] - else: - value = results['avg_base'] + if res_type == "SimpleResult": + value = results['time'] + elif res_type == "Result": + value = results['avg_time'] elif res_type == "RawResult": - if changed: - value = results["changed_times"] - else: - value = results["base_times"] + value = results["times"] if value: assert len(value) == 1 value = value[0] @@ -74,11 +65,6 @@ if value is None: print "Ignoring skipped result", data continue - if res_type == "ComparisonResult": - if changed: - data['std_dev'] = results['std_changed'] - else: - data['std_dev'] = results['std_base'] if testing: testparams.append(data) else: diff --git a/unladen_swallow/perf.py b/unladen_swallow/perf.py --- a/unladen_swallow/perf.py +++ b/unladen_swallow/perf.py @@ -340,38 +340,18 @@ self._done.wait() return self._usage -class ComparisonResult(object): +class Result(object): """ An object representing a result of run. Can be converted to a string by calling string_representation """ - def __init__(self, min_base, min_changed, delta_min, avg_base, - avg_changed, delta_avg, t_msg, std_base, std_changed, - delta_std, timeline_link): - self.min_base = min_base - self.min_changed = min_changed - self.delta_min = delta_min - self.avg_base = avg_base - self.avg_changed = avg_changed - self.delta_avg = delta_avg - self.t_msg = t_msg - self.std_base = std_base - self.std_changed = std_changed - self.delta_std = delta_std - self.timeline_link = timeline_link - - def get_timeline(self): - if self.timeline_link is None: - return "" - return "Timeline: %(timeline_link)s" + def __init__(self, times, min_time, avg_time, std_time): + self.times = times + self.min_time = min_time + self.avg_time = avg_time + self.std_time = std_time def string_representation(self): - return (("Min: %(min_base)f -> %(min_changed)f:" + - " %(delta_min)s\n" + - "Avg: %(avg_base)f -> %(avg_changed)f:" + - " %(delta_avg)s\n" + self.t_msg + - "Stddev: %(std_base).5f -> %(std_changed).5f:" + - " %(delta_std)s\n" + self.get_timeline()) - % self.__dict__) + return "Time: %(min_time)f +- %(std_time)f" % self.__dict__ class ResultError(object): def __init__(self, e): @@ -397,14 +377,12 @@ " %(delta_max)s\n" + self.get_usage_over_time()) % self.__dict__) -class SimpleComparisonResult(object): - def __init__(self, base_time, changed_time, time_delta): - self.base_time = base_time - self.changed_time = changed_time - self.time_delta = time_delta +class SimpleResult(object): + def __init__(self, time): + self.time = time def string_representation(self): - return ("%(base_time)f -> %(changed_time)f: %(time_delta)s" + return ("%(time)f" % self.__dict__) class RawResult(object): @@ -420,15 +398,11 @@ max_base, max_changed = max(base_usage), max(changed_usage) delta_max = QuantityDelta(max_base, max_changed) - chart_link = GetChart(SummarizeData(base_usage), - SummarizeData(changed_usage), - options) - - return MemoryUsageResult(max_base, max_changed, delta_max, chart_link) + return MemoryUsageResult(max_base, max_changed, delta_max, "") ### Utility functions -def SimpleBenchmark(benchmark_function, base_python, changed_python, options, +def SimpleBenchmark(benchmark_function, python, options, *args, **kwargs): """Abstract out the body for most simple benchmarks. @@ -442,8 +416,6 @@ Args: benchmark_function: callback that takes (python_path, options) and returns a (times, memory_usage) 2-tuple. - base_python: path to the reference Python binary. - changed_python: path to the experimental Python binary. options: optparse.Values instance. *args, **kwargs: will be passed through to benchmark_function. @@ -452,65 +424,12 @@ Comes with string_representation method. """ try: - changed_data = benchmark_function(changed_python, options, - *args, **kwargs) - base_data = benchmark_function(base_python, options, - *args, **kwargs) + data = benchmark_function(python, options, + *args, **kwargs) except subprocess.CalledProcessError, e: return ResultError(e) - return CompareBenchmarkData(base_data, changed_data, options) - - -def GetChart(base_data, changed_data, options, chart_margin=100): - """Build a Google Chart API URL for the given data. - - Args: - base_data: data points for the base binary. - changed_data: data points for the changed binary. - options: optparse.Values instance. - chart_margin: optional integer margin to add/sub from the max/min. - - Returns: - Google Chart API URL as a string. - """ - if options.no_charts: - return None - # We use these to scale the graph. - min_data = min(min(base_data), min(changed_data)) - chart_margin - max_data = max(max(base_data), max(changed_data)) + chart_margin - # Google-bound data, formatted as desired by the Chart API. - data_for_google = (",".join(map(str, base_data)) + "|" + - ",".join(map(str, changed_data))) - - # Come up with labels for the X axis; not too many, though, or they'll be - # unreadable. - max_len = max(len(base_data), len(changed_data)) - points = SummarizeData(range(1, max_len + 1), points=5) - if points[0] != 1: - points.insert(0, 1) - x_axis_labels = "".join("|%d" % i for i in points) - - # Parameters for the Google Chart API. See - # http://code.google.com/apis/chart/ for more details. - # cht=lc: line graph with visible axes. - # chs: dimensions of the graph, in pixels. - # chdl: labels for the graph lines. - # chco: colors for the graph lines. - # chds: minimum and maximum values for the vertical axis. - # chxr: minimum and maximum values for the vertical axis labels. - # chd=t: the data sets, |-separated. - # chxt: which axes to draw. - # chxl: labels for the axes. - base_binary = options.base_binary - changed_binary = options.changed_binary - raw_url = ("http://chart.apis.google.com/chart?cht=lc&chs=700x400&chxt=x,y&" - "chxr=1,%(min_data)s,%(max_data)s&chco=FF0000,0000FF&" - "chdl=%(base_binary)s|%(changed_binary)s&" - "chds=%(min_data)s,%(max_data)s&chd=t:%(data_for_google)s&" - "chxl=0:%(x_axis_labels)s" - % locals()) - return ShortenUrl(raw_url) + return CompareBenchmarkData(data, options) def ShortenUrl(url): @@ -656,7 +575,7 @@ return fixed_env -def CompareMultipleRuns(base_times, changed_times, options): +def CompareMultipleRuns(times, options): """Compare multiple control vs experiment runs of the same benchmark. Args: @@ -668,54 +587,26 @@ A string summarizing the difference between the runs, suitable for human consumption. """ - if len(base_times) != len(changed_times): - print "Base:" - print base_times - print "Changed:" - print changed_times - raise Exception("length did not match") if options.no_statistics: - return RawResult(base_times, changed_times) - if len(base_times) == 1: + return RawResult(times) + if len(times) == 1: # With only one data point, we can't do any of the interesting stats # below. - base_time, changed_time = base_times[0], changed_times[0] - time_delta = TimeDelta(base_time, changed_time) - return SimpleComparisonResult(base_time, changed_time, time_delta) + return SimpleResult(times[0]) - # Create a chart showing iteration times over time. We round the times so - # as not to exceed the GET limit for Google's chart server. - timeline_link = GetChart([round(t, 2) for t in base_times], - [round(t, 2) for t in changed_times], - options, chart_margin=1) + times = sorted(times) - base_times = sorted(base_times) - changed_times = sorted(changed_times) + min_time = times[0] + avg_time = avg(times) + std_time = SampleStdDev(times) - min_base, min_changed = base_times[0], changed_times[0] - avg_base, avg_changed = avg(base_times), avg(changed_times) - std_base = SampleStdDev(base_times) - std_changed = SampleStdDev(changed_times) - delta_min = TimeDelta(min_base, min_changed) - delta_avg = TimeDelta(avg_base, avg_changed) - delta_std = QuantityDelta(std_base, std_changed) + return Result(times, min_time, avg_time, std_time) - t_msg = "Not significant\n" - significant, t_score = IsSignificant(base_times, changed_times) - if significant: - t_msg = "Significant (t=%f, a=0.95)\n" % t_score - - return ComparisonResult(min_base, min_changed, delta_min, avg_base, - avg_changed, delta_avg, t_msg, std_base, - std_changed, delta_std, timeline_link) - -def CompareBenchmarkData(base_data, changed_data, options): +def CompareBenchmarkData(data, options): """Compare performance and memory usage. Args: - base_data: 2-tuple of (times, mem_usage) where times is an iterable - of floats; mem_usage is a list of memory usage samples. - changed_data: 2-tuple of (times, mem_usage) where times is an iterable + data: 2-tuple of (times, mem_usage) where times is an iterable of floats; mem_usage is a list of memory usage samples. options: optparse.Values instance. @@ -723,17 +614,16 @@ Human-readable summary of the difference between the base and changed binaries. """ - base_times, base_mem = base_data - changed_times, changed_mem = changed_data + times, mem = data # We suppress performance data when running with --track_memory. if options.track_memory: - if base_mem is not None: - assert changed_mem is not None + if mem is not None: + XXX # we don't track memory return CompareMemoryUsage(base_mem, changed_mem, options) return "Benchmark does not report memory usage yet" - return CompareMultipleRuns(base_times, changed_times, options) + return CompareMultipleRuns(times, options) def CallAndCaptureOutput(command, env=None, track_memory=False, inherit_env=[]): @@ -1516,25 +1406,6 @@ should_run.remove(bm) return should_run -def ParsePythonArgsOption(python_args_opt): - """Parses the --args option. - - Args: - python_args_opt: the string passed to the -a option on the command line. - - Returns: - A pair of lists: (base_python_args, changed_python_args). - """ - args_pair = python_args_opt.split(",") - base_args = args_pair[0].split() # On whitespace. - changed_args = base_args - if len(args_pair) == 2: - changed_args = args_pair[1].split() - elif len(args_pair) > 2: - logging.warning("Didn't expect two or more commas in --args flag: %s", - python_args_opt) - return base_args, changed_args - def ParseEnvVars(option, opt_str, value, parser): """Parser callback to --inherit_env var names""" parser.values.inherit_env = [v for v in value.split(",") if v] @@ -1586,15 +1457,16 @@ help=("Don't perform statistics - return raw data")) options, args = parser.parse_args(argv) - if len(args) != 2: + if len(args) != 1: parser.error("incorrect number of arguments") - base, changed = args + base, = args options.base_binary = base - options.changed_binary = changed - base_args, changed_args = ParsePythonArgsOption(options.args) - base_cmd_prefix = [base] + base_args - changed_cmd_prefix = [changed] + changed_args + base_args = options.args + if base_args: + base_cmd_prefix = [base] + base_args.split(" ") + else: + base_cmd_prefix = [base] logging.basicConfig(level=logging.INFO) @@ -1614,7 +1486,7 @@ print "Running %s..." % name # PyPy specific modification: let the func to return a list of results # for sub-benchmarks - bench_result = func(base_cmd_prefix, changed_cmd_prefix, options) + bench_result = func(base_cmd_prefix, options) name = getattr(func, 'benchmark_name', name) if isinstance(bench_result, list): for subname, subresult in bench_result: From noreply at buildbot.pypy.org Thu Aug 1 19:58:41 2013 From: noreply at buildbot.pypy.org (fijal) Date: Thu, 1 Aug 2013 19:58:41 +0200 (CEST) Subject: [pypy-commit] benchmarks single-run: fix one more Message-ID: <20130801175841.649151C3666@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: single-run Changeset: r216:b4435f049efb Date: 2013-08-01 19:58 +0200 http://bitbucket.org/pypy/benchmarks/changeset/b4435f049efb/ Log: fix one more diff --git a/benchmarks.py b/benchmarks.py --- a/benchmarks.py +++ b/benchmarks.py @@ -177,34 +177,32 @@ return result BM_translate.benchmark_name = 'trans2' -def BM_cpython_doc(base_python, changed_python, options): +def BM_cpython_doc(python, options): from unladen_swallow.perf import RawResult import subprocess, shutil - t = [] - for python in [base_python, changed_python]: - maindir = relative('lib/cpython-doc') - builddir = os.path.join(os.path.join(maindir, 'tools'), 'build') - try: - shutil.rmtree(builddir) - except OSError: - pass - build = relative('lib/cpython-doc/tools/sphinx-build.py') - os.mkdir(builddir) - docdir = os.path.join(builddir, 'doctrees') - os.mkdir(docdir) - htmldir = os.path.join(builddir, 'html') - os.mkdir(htmldir) - args = base_python + [build, '-b', 'html', '-d', docdir, maindir, htmldir] - proc = subprocess.Popen(args, stderr=subprocess.PIPE, stdout=subprocess.PIPE) - out, err = proc.communicate() - retcode = proc.poll() - if retcode != 0: - print out - print err - raise Exception("sphinx-build.py failed") - t.append(float(out.splitlines()[-1])) - return RawResult([t[0]], [t[1]]) + maindir = relative('lib/cpython-doc') + builddir = os.path.join(os.path.join(maindir, 'tools'), 'build') + try: + shutil.rmtree(builddir) + except OSError: + pass + build = relative('lib/cpython-doc/tools/sphinx-build.py') + os.mkdir(builddir) + docdir = os.path.join(builddir, 'doctrees') + os.mkdir(docdir) + htmldir = os.path.join(builddir, 'html') + os.mkdir(htmldir) + args = python + [build, '-b', 'html', '-d', docdir, maindir, htmldir] + proc = subprocess.Popen(args, stderr=subprocess.PIPE, stdout=subprocess.PIPE) + out, err = proc.communicate() + retcode = proc.poll() + if retcode != 0: + print out + print err + raise Exception("sphinx-build.py failed") + res = float(out.splitlines()[-1]) + return RawResult(res) BM_cpython_doc.benchmark_name = 'sphinx' From noreply at buildbot.pypy.org Thu Aug 1 20:04:35 2013 From: noreply at buildbot.pypy.org (fijal) Date: Thu, 1 Aug 2013 20:04:35 +0200 (CEST) Subject: [pypy-commit] benchmarks single-run: fix Message-ID: <20130801180435.B2CBC1C3666@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: single-run Changeset: r217:e1ff20a040f8 Date: 2013-08-01 20:04 +0200 http://bitbucket.org/pypy/benchmarks/changeset/e1ff20a040f8/ Log: fix diff --git a/benchmarks.py b/benchmarks.py --- a/benchmarks.py +++ b/benchmarks.py @@ -172,7 +172,7 @@ result = [] for name, time in timings: - data = RawResult([time], None) + data = RawResult([time]) result.append((name, data)) return result BM_translate.benchmark_name = 'trans2' @@ -202,7 +202,7 @@ print err raise Exception("sphinx-build.py failed") res = float(out.splitlines()[-1]) - return RawResult(res) + return RawResult([res]) BM_cpython_doc.benchmark_name = 'sphinx' diff --git a/unladen_swallow/perf.py b/unladen_swallow/perf.py --- a/unladen_swallow/perf.py +++ b/unladen_swallow/perf.py @@ -386,12 +386,11 @@ % self.__dict__) class RawResult(object): - def __init__(self, base_times, changed_times): - self.base_times = base_times - self.changed_times = changed_times + def __init__(self, times): + self.times = times def string_representation(self): - return "Raw results: %s %s" % (self.base_times, self.changed_times) + return "Raw results: %s" % (self.times,) def CompareMemoryUsage(base_usage, changed_usage, options): """Like CompareMultipleRuns, but for memory usage.""" From noreply at buildbot.pypy.org Thu Aug 1 20:11:41 2013 From: noreply at buildbot.pypy.org (fijal) Date: Thu, 1 Aug 2013 20:11:41 +0200 (CEST) Subject: [pypy-commit] benchmarks single-run: one more Message-ID: <20130801181141.90DAF1C0130@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: single-run Changeset: r218:428d7feb4b7f Date: 2013-08-01 20:11 +0200 http://bitbucket.org/pypy/benchmarks/changeset/428d7feb4b7f/ Log: one more diff --git a/benchmarks.py b/benchmarks.py --- a/benchmarks.py +++ b/benchmarks.py @@ -44,13 +44,13 @@ bm_path = relative('own', name + '.py') return MeasureGeneric(python, options, bm_path, **opts) - def BM(base_python, changed_python, options, *args, **kwargs): + def BM(python, options, *args, **kwargs): try: - base_data = benchmark_function(base_python, options, - *args, **kwargs) + data = benchmark_function(python, options, + *args, **kwargs) except subprocess.CalledProcessError, e: return ResultError(e) - return SimpleResult(avg(base_data[0])) + return SimpleResult(avg(data)) BM.func_name = 'BM_' + bm_name d[BM.func_name] = BM From noreply at buildbot.pypy.org Thu Aug 1 20:19:00 2013 From: noreply at buildbot.pypy.org (fijal) Date: Thu, 1 Aug 2013 20:19:00 +0200 (CEST) Subject: [pypy-commit] benchmarks single-run: fixes Message-ID: <20130801181901.003D21C366B@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: single-run Changeset: r219:b8682859cf0c Date: 2013-08-01 20:18 +0200 http://bitbucket.org/pypy/benchmarks/changeset/b8682859cf0c/ Log: fixes diff --git a/benchmarks.py b/benchmarks.py --- a/benchmarks.py +++ b/benchmarks.py @@ -1,7 +1,7 @@ import os import logging from unladen_swallow.perf import SimpleBenchmark, MeasureGeneric -from unladen_swallow.perf import RawResult, SimpleResult, avg, ResultError +from unladen_swallow.perf import RawResult, ResultError, _FindAllBenchmarks import subprocess def relative(*args): @@ -50,7 +50,7 @@ *args, **kwargs) except subprocess.CalledProcessError, e: return ResultError(e) - return SimpleResult(avg(data)) + return RawResult(data) BM.func_name = 'BM_' + bm_name d[BM.func_name] = BM @@ -217,3 +217,6 @@ extra_args=['--benchmark=LU', '100', '200']) _register_new_bm_base_only('scimark', 'scimark_FFT', globals(), extra_args=['--benchmark=FFT', '1024', '1000']) + +if __name__ == '__main__': + print sorted(_FindAllBenchmarks(globals()).keys()) From noreply at buildbot.pypy.org Thu Aug 1 20:22:12 2013 From: noreply at buildbot.pypy.org (fijal) Date: Thu, 1 Aug 2013 20:22:12 +0200 (CEST) Subject: [pypy-commit] benchmarks single-run: add a special group Message-ID: <20130801182212.4DD991C366B@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: single-run Changeset: r220:354c8deb826a Date: 2013-08-01 20:21 +0200 http://bitbucket.org/pypy/benchmarks/changeset/354c8deb826a/ Log: add a special group diff --git a/runner.py b/runner.py --- a/runner.py +++ b/runner.py @@ -15,6 +15,11 @@ 'rietveld', 'html5lib', 'ai'] BENCHMARK_SET += perf._FindAllBenchmarks(benchmarks.__dict__).keys() +BENCHMARK_SET_FAST = [] +for bench in BENCHMARK_SET: + if not bench.startswith('scrimark') and not bench.startswith('translate'): + BENCHMARK_SET_FAST.append(bench) +del bench class WrongBenchmark(Exception): pass @@ -140,6 +145,8 @@ if options.niceness is not None: os.nice(options.niceness - os.nice(0)) + if benchmarks == 'fast': + benchmarks = ", ".join(sorted(BENCHMARK_SET_FAST)) results = run_and_store(benchmarks, output_filename, path, revision, args=args, fast=fast, From noreply at buildbot.pypy.org Thu Aug 1 20:23:28 2013 From: noreply at buildbot.pypy.org (fijal) Date: Thu, 1 Aug 2013 20:23:28 +0200 (CEST) Subject: [pypy-commit] benchmarks single-run: more fixes Message-ID: <20130801182328.6FB501C366B@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: single-run Changeset: r221:5af4233fd7ff Date: 2013-08-01 20:23 +0200 http://bitbucket.org/pypy/benchmarks/changeset/5af4233fd7ff/ Log: more fixes diff --git a/benchmarks.py b/benchmarks.py --- a/benchmarks.py +++ b/benchmarks.py @@ -50,7 +50,7 @@ *args, **kwargs) except subprocess.CalledProcessError, e: return ResultError(e) - return RawResult(data) + return RawResult(data[0]) BM.func_name = 'BM_' + bm_name d[BM.func_name] = BM From noreply at buildbot.pypy.org Thu Aug 1 20:37:53 2013 From: noreply at buildbot.pypy.org (mattip) Date: Thu, 1 Aug 2013 20:37:53 +0200 (CEST) Subject: [pypy-commit] pypy mro-reorder-numpypy-str: reorder mro for numpypy string_ Message-ID: <20130801183753.55ADE1C3670@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: mro-reorder-numpypy-str Changeset: r65883:30146f8b14a7 Date: 2013-08-01 18:07 +0300 http://bitbucket.org/pypy/pypy/changeset/30146f8b14a7/ Log: reorder mro for numpypy string_ diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -379,7 +379,10 @@ return self class W_CharacterBox(W_FlexibleBox): - pass + def convert_to(self, dtype): + # XXX assert dtype is str type + return self + class W_StringBox(W_CharacterBox): def descr__new__string_box(space, w_subtype, w_arg): @@ -684,12 +687,12 @@ __module__ = "numpypy", ) -W_StringBox.typedef = TypeDef("string_", (str_typedef, W_CharacterBox.typedef), +W_StringBox.typedef = TypeDef("string_", (W_CharacterBox.typedef, str_typedef), __module__ = "numpypy", __new__ = interp2app(W_StringBox.descr__new__string_box.im_func), ) -W_UnicodeBox.typedef = TypeDef("unicode_", (unicode_typedef, W_CharacterBox.typedef), +W_UnicodeBox.typedef = TypeDef("unicode_", (W_CharacterBox.typedef, unicode_typedef), __module__ = "numpypy", __new__ = interp2app(W_UnicodeBox.descr__new__unicode_box.im_func), ) diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -234,6 +234,9 @@ def is_record_type(self): return self.fields is not None + def is_str_type(self): + return self.num == 18 + def is_str_or_unicode(self): return (self.num == 18 or self.num == 19) diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -328,7 +328,11 @@ w_out = None w_lhs = convert_to_array(space, w_lhs) w_rhs = convert_to_array(space, w_rhs) - if (w_lhs.get_dtype().is_flexible_type() or \ + if w_lhs.get_dtype().is_str_type() and \ + w_rhs.get_dtype().is_str_type() and \ + self.comparison_func: + pass + elif (w_lhs.get_dtype().is_flexible_type() or \ w_rhs.get_dtype().is_flexible_type()): raise OperationError(space.w_TypeError, space.wrap( 'unsupported operand dtypes %s and %s for "%s"' % \ diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -1689,6 +1689,22 @@ def get_size(self): return self.size +def str_unary_op(func): + specialize.argtype(1)(func) + @functools.wraps(func) + def dispatcher(self, v1): + return func(self, self.to_str(v1)) + return dispatcher + +def str_binary_op(func): + specialize.argtype(1, 2)(func) + @functools.wraps(func) + def dispatcher(self, v1, v2): + return func(self, + self.to_str(v1), + self.to_str(v2) + ) + return dispatcher class StringType(BaseType, BaseStringType): T = lltype.Char @@ -1697,6 +1713,7 @@ def coerce(self, space, dtype, w_item): from pypy.module.micronumpy.interp_dtype import new_string_dtype arg = space.str_w(space.str(w_item)) + print 'coerce "%s"' %arg arr = VoidBoxStorage(len(arg), new_string_dtype(space, len(arg))) for i in range(len(arg)): arr.storage[i] = arg[i] @@ -1734,10 +1751,53 @@ builder.append("'") return builder.build() - # XXX move to base class when UnicodeType is supported + # XXX move the rest of this to base class when UnicodeType is supported def to_builtin_type(self, space, box): return space.wrap(self.to_str(box)) + @str_binary_op + def eq(self, v1, v2): + return v1 == v2 + + @str_binary_op + def ne(self, v1, v2): + return v1 != v2 + + @str_binary_op + def lt(self, v1, v2): + return v1 < v2 + + @str_binary_op + def le(self, v1, v2): + return v1 <= v2 + + @str_binary_op + def gt(self, v1, v2): + return v1 > v2 + + @str_binary_op + def ge(self, v1, v2): + return v1 >= v2 + + @str_binary_op + def logical_and(self, v1, v2): + return bool(v1) and bool(v2) + + @str_binary_op + def logical_or(self, v1, v2): + return bool(v1) or bool(v2) + + @str_unary_op + def logical_not(self, v): + return not bool(v) + + @str_binary_op + def logical_xor(self, v1, v2): + return bool(v1) ^ bool(v2) + + def bool(self, v): + return bool(self.to_str(v)) + def build_and_convert(self, space, mydtype, box): assert isinstance(box, interp_boxes.W_GenericBox) if box.get_dtype(space).is_str_or_unicode(): @@ -1753,6 +1813,13 @@ arr.storage[j] = '\x00' return interp_boxes.W_StringBox(arr, 0, arr.dtype) +NonNativeStringType = StringType + +class UnicodeType(BaseType, BaseStringType): + T = lltype.UniChar + +NonNativeUnicodeType = UnicodeType + class VoidType(BaseType, BaseStringType): T = lltype.Char @@ -1798,12 +1865,6 @@ return W_NDimArray(implementation) NonNativeVoidType = VoidType -NonNativeStringType = StringType - -class UnicodeType(BaseType, BaseStringType): - T = lltype.UniChar - -NonNativeUnicodeType = UnicodeType class RecordType(BaseType): From noreply at buildbot.pypy.org Thu Aug 1 20:37:54 2013 From: noreply at buildbot.pypy.org (mattip) Date: Thu, 1 Aug 2013 20:37:54 +0200 (CEST) Subject: [pypy-commit] pypy mro-reorder-numpypy-str: passes tests and translates, more tests needed Message-ID: <20130801183754.A632B1C3670@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: mro-reorder-numpypy-str Changeset: r65884:f57517c2e8fc Date: 2013-08-01 21:36 +0300 http://bitbucket.org/pypy/pypy/changeset/f57517c2e8fc/ Log: passes tests and translates, more tests needed diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -387,7 +387,6 @@ class W_StringBox(W_CharacterBox): def descr__new__string_box(space, w_subtype, w_arg): from pypy.module.micronumpy.interp_dtype import new_string_dtype - arg = space.str_w(space.str(w_arg)) arr = VoidBoxStorage(len(arg), new_string_dtype(space, len(arg))) for i in range(len(arg)): diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -328,18 +328,19 @@ w_out = None w_lhs = convert_to_array(space, w_lhs) w_rhs = convert_to_array(space, w_rhs) - if w_lhs.get_dtype().is_str_type() and \ - w_rhs.get_dtype().is_str_type() and \ + w_ldtype = w_lhs.get_dtype() + w_rdtype = w_rhs.get_dtype() + if w_ldtype.is_str_type() and w_rdtype.is_str_type() and \ self.comparison_func: pass - elif (w_lhs.get_dtype().is_flexible_type() or \ - w_rhs.get_dtype().is_flexible_type()): + elif (w_ldtype.is_flexible_type() or \ + w_rdtype.is_flexible_type()): raise OperationError(space.w_TypeError, space.wrap( 'unsupported operand dtypes %s and %s for "%s"' % \ - (w_rhs.get_dtype().get_name(), w_lhs.get_dtype().get_name(), + (w_rdtype.get_name(), w_ldtype.get_name(), self.name))) calc_dtype = find_binop_result_dtype(space, - w_lhs.get_dtype(), w_rhs.get_dtype(), + w_ldtype, w_rdtype, int_only=self.int_only, promote_to_float=self.promote_to_float, promote_bools=self.promote_bools, diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -740,6 +740,7 @@ class AppTestStrUnicodeDtypes(BaseNumpyAppTest): def test_str_unicode(self): + py.test.skip('numpypy differs from numpy') from numpypy import str_, unicode_, character, flexible, generic assert str_.mro() == [str_, str, basestring, character, flexible, generic, object] diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -1712,8 +1712,9 @@ @jit.unroll_safe def coerce(self, space, dtype, w_item): from pypy.module.micronumpy.interp_dtype import new_string_dtype + if isinstance(w_item, interp_boxes.W_StringBox): + return w_item arg = space.str_w(space.str(w_item)) - print 'coerce "%s"' %arg arr = VoidBoxStorage(len(arg), new_string_dtype(space, len(arg))) for i in range(len(arg)): arr.storage[i] = arg[i] @@ -1735,7 +1736,7 @@ builder = StringBuilder() assert isinstance(item, interp_boxes.W_StringBox) i = item.ofs - end = i+self.size + end = i + min(self.size, item.arr.size) while i < end: assert isinstance(item.arr.storage[i], str) if item.arr.storage[i] == '\x00': @@ -1757,6 +1758,7 @@ @str_binary_op def eq(self, v1, v2): + print 'string eq',v1,v2 return v1 == v2 @str_binary_op From noreply at buildbot.pypy.org Thu Aug 1 20:38:32 2013 From: noreply at buildbot.pypy.org (fijal) Date: Thu, 1 Aug 2013 20:38:32 +0200 (CEST) Subject: [pypy-commit] benchmarks single-run: one more fix Message-ID: <20130801183832.80EF11C3670@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: single-run Changeset: r222:7ff9ed5173d8 Date: 2013-08-01 20:38 +0200 http://bitbucket.org/pypy/benchmarks/changeset/7ff9ed5173d8/ Log: one more fix diff --git a/unladen_swallow/perf.py b/unladen_swallow/perf.py --- a/unladen_swallow/perf.py +++ b/unladen_swallow/perf.py @@ -964,19 +964,17 @@ return SimpleBenchmark(MeasureSpitfireWithPsyco, *args, **kwargs) -def BM_SlowSpitfire(base_python, changed_python, options): +def BM_SlowSpitfire(python, options): extra_args = ["--disable_psyco"] spitfire_env = {"PYTHONPATH": Relative("lib/spitfire")} try: - changed_data = MeasureSpitfire(changed_python, options, - spitfire_env, extra_args) - base_data = MeasureSpitfire(base_python, options, - spitfire_env, extra_args) + data = MeasureSpitfire(python, options, + spitfire_env, extra_args) except subprocess.CalledProcessError, e: return str(e) - return CompareBenchmarkData(base_data, changed_data, options) + return CompareBenchmarkData(data, options) def MeasurePickle(python, options, extra_args): From noreply at buildbot.pypy.org Thu Aug 1 20:52:46 2013 From: noreply at buildbot.pypy.org (fijal) Date: Thu, 1 Aug 2013 20:52:46 +0200 (CEST) Subject: [pypy-commit] benchmarks single-run: the last one I think Message-ID: <20130801185246.DAFD41C0130@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: single-run Changeset: r223:a4f27cab0673 Date: 2013-08-01 20:52 +0200 http://bitbucket.org/pypy/benchmarks/changeset/a4f27cab0673/ Log: the last one I think diff --git a/benchmarks.py b/benchmarks.py --- a/benchmarks.py +++ b/benchmarks.py @@ -141,7 +141,7 @@ ('database', 0.4) ] -def BM_translate(base_python, changed_python, options): +def BM_translate(python, options): """ Run translate.py and returns a benchmark result for each of the phases. Note that we run it only with ``base_python`` (which corresponds to @@ -151,7 +151,7 @@ translate_py = relative('lib/pypy/rpython/bin/rpython') target = relative('lib/pypy/pypy/goal/targetpypystandalone.py') #targetnop = relative('lib/pypy/pypy/translator/goal/targetnopstandalone.py') - args = base_python + [translate_py, '--source', '--dont-write-c-files', '-O2', target] + args = python + [translate_py, '--source', '--dont-write-c-files', '-O2', target] logging.info('Running %s', ' '.join(args)) environ = os.environ.copy() environ['PYTHONPATH'] = relative('lib/pypy') From noreply at buildbot.pypy.org Thu Aug 1 21:21:52 2013 From: noreply at buildbot.pypy.org (fijal) Date: Thu, 1 Aug 2013 21:21:52 +0200 (CEST) Subject: [pypy-commit] benchmarks single-run: merge default Message-ID: <20130801192152.344931C0130@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: single-run Changeset: r224:e466ac6b92c3 Date: 2013-08-01 21:21 +0200 http://bitbucket.org/pypy/benchmarks/changeset/e466ac6b92c3/ Log: merge default diff --git a/lib/pypy/rpython/flowspace/flowcontext.py b/lib/pypy/rpython/flowspace/flowcontext.py --- a/lib/pypy/rpython/flowspace/flowcontext.py +++ b/lib/pypy/rpython/flowspace/flowcontext.py @@ -800,6 +800,9 @@ self.popvalue() return next_instr + def JUMP_IF_NOT_DEBUG(self, target, next_instr): + return next_instr + def GET_ITER(self, oparg, next_instr): w_iterable = self.popvalue() w_iterator = self.space.iter(w_iterable) diff --git a/runner.py b/runner.py --- a/runner.py +++ b/runner.py @@ -66,7 +66,6 @@ 'json file.')) benchmark_group.add_option( "-b", "--benchmarks", metavar="BM_LIST", - default=','.join(BENCHMARK_SET), help=("Comma-separated list of benchmarks to run" " Valid benchmarks are: %s" ". (default: Run all listed benchmarks)" @@ -76,6 +75,10 @@ help=('Interpreter. (default: the python used to ' 'run this script)')) benchmark_group.add_option( + "-f", "--benchmarks-file", metavar="BM_FILE", + help=("Read the list of benchmarks to run from this file (one " + "benchmark name per line). Do not specify both this and -b.")) + benchmark_group.add_option( '-o', '--output-filename', default="result.json", action="store", help=('Specify the output filename to store resulting json. ' @@ -129,6 +132,26 @@ options, args = parser.parse_args(argv) benchmarks = options.benchmarks.split(',') + if options.benchmarks is not None: + if options.benchmarks_file is not None: + parser.error( + '--benchmarks and --benchmarks-file are mutually exclusive') + else: + benchmarks = [benchmark.strip() + for benchmark in options.benchmarks.split(',')] + else: + if options.benchmarks_file is not None: + benchmarks = [] + try: + bm_file = open(options.benchmarks_file, 'rt') + except IOError as e: + parser.error('error opening benchmarks file: %s' % e) + with bm_file: + for line in bm_file: + benchmarks.append(line.strip()) + else: + benchmarks = list(BENCHMARK_SET) + for benchmark in benchmarks: if benchmark not in BENCHMARK_SET: raise WrongBenchmark(benchmark) diff --git a/unladen_swallow/perf.py b/unladen_swallow/perf.py --- a/unladen_swallow/perf.py +++ b/unladen_swallow/perf.py @@ -167,7 +167,10 @@ """ assert len(sample1) == len(sample2) error = PooledSampleVariance(sample1, sample2) / len(sample1) - return (avg(sample1) - avg(sample2)) / math.sqrt(error * 2) + try: + return (avg(sample1) - avg(sample2)) / math.sqrt(error * 2) + except ZeroDivisionError: + return 0.0 def IsSignificant(sample1, sample2): From noreply at buildbot.pypy.org Thu Aug 1 21:24:14 2013 From: noreply at buildbot.pypy.org (fijal) Date: Thu, 1 Aug 2013 21:24:14 +0200 (CEST) Subject: [pypy-commit] benchmarks single-run: fix merge Message-ID: <20130801192414.6FFC01C0130@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: single-run Changeset: r225:14b5d59b71d3 Date: 2013-08-01 21:24 +0200 http://bitbucket.org/pypy/benchmarks/changeset/14b5d59b71d3/ Log: fix merge diff --git a/runner.py b/runner.py --- a/runner.py +++ b/runner.py @@ -131,7 +131,6 @@ options, args = parser.parse_args(argv) - benchmarks = options.benchmarks.split(',') if options.benchmarks is not None: if options.benchmarks_file is not None: parser.error( From noreply at buildbot.pypy.org Thu Aug 1 21:24:34 2013 From: noreply at buildbot.pypy.org (fijal) Date: Thu, 1 Aug 2013 21:24:34 +0200 (CEST) Subject: [pypy-commit] benchmarks single-run: kill a hack Message-ID: <20130801192434.6A0A41C0130@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: single-run Changeset: r226:cdd4c0fae47a Date: 2013-08-01 21:24 +0200 http://bitbucket.org/pypy/benchmarks/changeset/cdd4c0fae47a/ Log: kill a hack diff --git a/runner.py b/runner.py --- a/runner.py +++ b/runner.py @@ -15,12 +15,6 @@ 'rietveld', 'html5lib', 'ai'] BENCHMARK_SET += perf._FindAllBenchmarks(benchmarks.__dict__).keys() -BENCHMARK_SET_FAST = [] -for bench in BENCHMARK_SET: - if not bench.startswith('scrimark') and not bench.startswith('translate'): - BENCHMARK_SET_FAST.append(bench) -del bench - class WrongBenchmark(Exception): pass @@ -167,8 +161,6 @@ if options.niceness is not None: os.nice(options.niceness - os.nice(0)) - if benchmarks == 'fast': - benchmarks = ", ".join(sorted(BENCHMARK_SET_FAST)) results = run_and_store(benchmarks, output_filename, path, revision, args=args, fast=fast, From noreply at buildbot.pypy.org Thu Aug 1 21:55:35 2013 From: noreply at buildbot.pypy.org (mattip) Date: Thu, 1 Aug 2013 21:55:35 +0200 (CEST) Subject: [pypy-commit] pypy mro-reorder-numpypy-str: remove W_StringBox delegation Message-ID: <20130801195535.76FA01C135D@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: mro-reorder-numpypy-str Changeset: r65885:eb6ab033bef4 Date: 2013-08-01 22:03 +0300 http://bitbucket.org/pypy/pypy/changeset/eb6ab033bef4/ Log: remove W_StringBox delegation diff --git a/pypy/module/micronumpy/stdobjspace.py b/pypy/module/micronumpy/stdobjspace.py deleted file mode 100644 --- a/pypy/module/micronumpy/stdobjspace.py +++ /dev/null @@ -1,11 +0,0 @@ - -from pypy.objspace.std import stringobject -from pypy.module.micronumpy import interp_boxes - -def delegate_stringbox2stringobj(space, w_box): - return space.wrap(w_box.dtype.itemtype.to_str(w_box)) - -def register_delegates(typeorder): - typeorder[interp_boxes.W_StringBox] = [ - (stringobject.W_StringObject, delegate_stringbox2stringobj), - ] diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -740,7 +740,7 @@ class AppTestStrUnicodeDtypes(BaseNumpyAppTest): def test_str_unicode(self): - py.test.skip('numpypy differs from numpy') + skip('numpypy differs from numpy') from numpypy import str_, unicode_, character, flexible, generic assert str_.mro() == [str_, str, basestring, character, flexible, generic, object] diff --git a/pypy/objspace/std/model.py b/pypy/objspace/std/model.py --- a/pypy/objspace/std/model.py +++ b/pypy/objspace/std/model.py @@ -133,10 +133,6 @@ # when trying to dispatch multimethods. # XXX build these lists a bit more automatically later - if config.objspace.usemodules.micronumpy: - from pypy.module.micronumpy.stdobjspace import register_delegates - register_delegates(self.typeorder) - self.typeorder[boolobject.W_BoolObject] += [ (intobject.W_IntObject, boolobject.delegate_Bool2IntObject), (floatobject.W_FloatObject, floatobject.delegate_Bool2Float), From noreply at buildbot.pypy.org Thu Aug 1 21:55:36 2013 From: noreply at buildbot.pypy.org (mattip) Date: Thu, 1 Aug 2013 21:55:36 +0200 (CEST) Subject: [pypy-commit] pypy mro-reorder-numpypy-str: a failing test Message-ID: <20130801195536.DFDE11C1360@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: mro-reorder-numpypy-str Changeset: r65886:08e2d7b190ce Date: 2013-08-01 22:21 +0300 http://bitbucket.org/pypy/pypy/changeset/08e2d7b190ce/ Log: a failing test diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -2755,6 +2755,15 @@ assert a[2] == 'ab' raises(TypeError, a, 'sum') raises(TypeError, 'a+a') + b = array(['abcdefg', 'ab', 'cd']) + assert a[2] == b[1] + assert bool(a[1]) + + def test_to_str(self): + from numpypy import array + a = array(['abc', 'def', 'ab'], 'S3') + b = array(['abcdef', 'ab', 'cd']) + assert b[0] != a[0] def test_string_scalar(self): from numpypy import array diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -1758,11 +1758,11 @@ @str_binary_op def eq(self, v1, v2): - print 'string eq',v1,v2 return v1 == v2 @str_binary_op def ne(self, v1, v2): + print 'string neq',v1,v2 return v1 != v2 @str_binary_op @@ -1798,6 +1798,7 @@ return bool(v1) ^ bool(v2) def bool(self, v): + print 'string bool',v return bool(self.to_str(v)) def build_and_convert(self, space, mydtype, box): From noreply at buildbot.pypy.org Thu Aug 1 22:54:53 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 1 Aug 2013 22:54:53 +0200 (CEST) Subject: [pypy-commit] cffi default: Fix for win64 Message-ID: <20130801205453.B81BD1C029A@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1331:1e1f88442f65 Date: 2013-08-01 22:54 +0200 http://bitbucket.org/cffi/cffi/changeset/1e1f88442f65/ Log: Fix for win64 diff --git a/c/libffi_msvc/ffi.c b/c/libffi_msvc/ffi.c --- a/c/libffi_msvc/ffi.c +++ b/c/libffi_msvc/ffi.c @@ -379,7 +379,12 @@ #ifdef _WIN64 if (z > 8) - *p_argv = *((void**) argp); /* indirect */ + { + /* On Win64, if a single argument takes more than 8 bytes, + then it is always passed by reference. */ + *p_argv = *((void**) argp); + z = 8; + } else #endif *p_argv = (void*) argp; From noreply at buildbot.pypy.org Thu Aug 1 23:02:50 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 1 Aug 2013 23:02:50 +0200 (CEST) Subject: [pypy-commit] cffi release-0.7: Fix for win64 Message-ID: <20130801210250.A64571C029A@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: release-0.7 Changeset: r1332:951a965555c7 Date: 2013-08-01 22:54 +0200 http://bitbucket.org/cffi/cffi/changeset/951a965555c7/ Log: Fix for win64 diff --git a/c/libffi_msvc/ffi.c b/c/libffi_msvc/ffi.c --- a/c/libffi_msvc/ffi.c +++ b/c/libffi_msvc/ffi.c @@ -379,7 +379,12 @@ #ifdef _WIN64 if (z > 8) - *p_argv = *((void**) argp); /* indirect */ + { + /* On Win64, if a single argument takes more than 8 bytes, + then it is always passed by reference. */ + *p_argv = *((void**) argp); + z = 8; + } else #endif *p_argv = (void*) argp; From noreply at buildbot.pypy.org Thu Aug 1 23:02:51 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 1 Aug 2013 23:02:51 +0200 (CEST) Subject: [pypy-commit] cffi release-0.7: Update the version Message-ID: <20130801210251.CE2511C02E4@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: release-0.7 Changeset: r1333:9a6dac51d4f7 Date: 2013-08-01 23:00 +0200 http://bitbucket.org/cffi/cffi/changeset/9a6dac51d4f7/ Log: Update the version diff --git a/cffi/__init__.py b/cffi/__init__.py --- a/cffi/__init__.py +++ b/cffi/__init__.py @@ -4,5 +4,5 @@ from .api import FFI, CDefError, FFIError from .ffiplatform import VerificationError, VerificationMissing -__version__ = "0.7" -__version_info__ = (0, 7) +__version__ = "0.7.1" +__version_info__ = (0, 7, 1) diff --git a/doc/source/conf.py b/doc/source/conf.py --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -47,7 +47,7 @@ # The short X.Y version. version = '0.7' # The full version, including alpha/beta/rc tags. -release = '0.7' +release = '0.7.1' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/doc/source/index.rst b/doc/source/index.rst --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -90,13 +90,13 @@ Download and Installation: -* http://pypi.python.org/packages/source/c/cffi/cffi-0.7.tar.gz +* http://pypi.python.org/packages/source/c/cffi/cffi-0.7.1.tar.gz - Or grab the most current version by following the instructions below. - - MD5: 2110516c65f7c9e6f324241c322178c8 + - MD5: ... - - SHA: 772205729d9ef620adf48f351eb79f3d0ab2d014 + - SHA: ... * Or get it from the `Bitbucket page`_: ``hg clone https://bitbucket.org/cffi/cffi`` diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -90,7 +90,7 @@ `Mailing list `_ """, - version='0.7', + version='0.7.1', packages=['cffi'], zip_safe=False, diff --git a/testing/test_version.py b/testing/test_version.py --- a/testing/test_version.py +++ b/testing/test_version.py @@ -7,6 +7,7 @@ BACKEND_VERSIONS = { '0.4.2': '0.4', # did not change + '0.7.1': '0.7', # did not change } def test_version(): @@ -21,7 +22,7 @@ content = open(p).read() # v = cffi.__version__ - assert ("version = '%s'\n" % v) in content + assert ("version = '%s'\n" % BACKEND_VERSIONS.get(v, v)) in content assert ("release = '%s'\n" % v) in content def test_doc_version_file(): @@ -44,4 +45,5 @@ v = cffi.__version__ p = os.path.join(parent, 'c', 'test_c.py') content = open(p).read() - assert ('assert __version__ == "%s"' % v) in content + assert (('assert __version__ == "%s"' % BACKEND_VERSIONS.get(v, v)) + in content) From noreply at buildbot.pypy.org Thu Aug 1 23:02:52 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 1 Aug 2013 23:02:52 +0200 (CEST) Subject: [pypy-commit] cffi default: Update the version Message-ID: <20130801210252.EF7721C029A@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1334:e26dd90056f3 Date: 2013-08-01 23:00 +0200 http://bitbucket.org/cffi/cffi/changeset/e26dd90056f3/ Log: Update the version diff --git a/cffi/__init__.py b/cffi/__init__.py --- a/cffi/__init__.py +++ b/cffi/__init__.py @@ -4,5 +4,5 @@ from .api import FFI, CDefError, FFIError from .ffiplatform import VerificationError, VerificationMissing -__version__ = "0.7" -__version_info__ = (0, 7) +__version__ = "0.7.1" +__version_info__ = (0, 7, 1) diff --git a/doc/source/conf.py b/doc/source/conf.py --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -47,7 +47,7 @@ # The short X.Y version. version = '0.7' # The full version, including alpha/beta/rc tags. -release = '0.7' +release = '0.7.1' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/doc/source/index.rst b/doc/source/index.rst --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -90,7 +90,7 @@ Download and Installation: -* http://pypi.python.org/packages/source/c/cffi/cffi-0.7.tar.gz +* http://pypi.python.org/packages/source/c/cffi/cffi-0.7.1.tar.gz - Or grab the most current version by following the instructions below. diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -90,7 +90,7 @@ `Mailing list `_ """, - version='0.7', + version='0.7.1', packages=['cffi'], zip_safe=False, diff --git a/testing/test_version.py b/testing/test_version.py --- a/testing/test_version.py +++ b/testing/test_version.py @@ -7,6 +7,7 @@ BACKEND_VERSIONS = { '0.4.2': '0.4', # did not change + '0.7.1': '0.7', # did not change } def test_version(): @@ -21,7 +22,7 @@ content = open(p).read() # v = cffi.__version__ - assert ("version = '%s'\n" % v) in content + assert ("version = '%s'\n" % BACKEND_VERSIONS.get(v, v)) in content assert ("release = '%s'\n" % v) in content def test_doc_version_file(): @@ -44,4 +45,5 @@ v = cffi.__version__ p = os.path.join(parent, 'c', 'test_c.py') content = open(p).read() - assert ('assert __version__ == "%s"' % v) in content + assert (('assert __version__ == "%s"' % BACKEND_VERSIONS.get(v, v)) + in content) From noreply at buildbot.pypy.org Thu Aug 1 23:05:06 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 1 Aug 2013 23:05:06 +0200 (CEST) Subject: [pypy-commit] cffi release-0.7: Update MD5/SHA1 Message-ID: <20130801210506.1FAB91C029A@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: release-0.7 Changeset: r1335:3e8d24e74a6f Date: 2013-08-01 23:04 +0200 http://bitbucket.org/cffi/cffi/changeset/3e8d24e74a6f/ Log: Update MD5/SHA1 diff --git a/doc/source/index.rst b/doc/source/index.rst --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -94,9 +94,9 @@ - Or grab the most current version by following the instructions below. - - MD5: ... + - MD5: dcfbb32d9a757d515801463602e4c533 - - SHA: ... + - SHA: 44fa6b50d37b0b5be6a0bee7950a59ba9e373fb8 * Or get it from the `Bitbucket page`_: ``hg clone https://bitbucket.org/cffi/cffi`` From noreply at buildbot.pypy.org Fri Aug 2 00:50:34 2013 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 2 Aug 2013 00:50:34 +0200 (CEST) Subject: [pypy-commit] cffi default: Remove this temporary warning Message-ID: <20130801225034.E21AD1C0130@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1336:09b29fe4d7ff Date: 2013-08-01 23:07 +0200 http://bitbucket.org/cffi/cffi/changeset/09b29fe4d7ff/ Log: Remove this temporary warning diff --git a/doc/source/index.rst b/doc/source/index.rst --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -78,8 +78,6 @@ with CFFI). * pycparser >= 2.06: http://code.google.com/p/pycparser/ - (Note that in old downloads of 2.08, the tarball contained an - installation issue; it was fixed without changing the version number.) * a C compiler is required to use CFFI during development, but not to run correctly-installed programs that use CFFI. From noreply at buildbot.pypy.org Fri Aug 2 00:50:36 2013 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 2 Aug 2013 00:50:36 +0200 (CEST) Subject: [pypy-commit] cffi release-0.7: Bah, fix Message-ID: <20130801225036.1E7461C01A6@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: release-0.7 Changeset: r1337:4cc4c30496d6 Date: 2013-08-02 00:45 +0200 http://bitbucket.org/cffi/cffi/changeset/4cc4c30496d6/ Log: Bah, fix diff --git a/cffi/api.py b/cffi/api.py --- a/cffi/api.py +++ b/cffi/api.py @@ -54,7 +54,8 @@ # _cffi_backend.so compiled. import _cffi_backend as backend from . import __version__ - assert backend.__version__ == __version__ + assert (backend.__version__ == __version__ or + backend.__version__ == __version__[:3]) # (If you insist you can also try to pass the option # 'backend=backend_ctypes.CTypesBackend()', but don't # rely on it! It's probably not going to work well.) From noreply at buildbot.pypy.org Fri Aug 2 00:50:37 2013 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 2 Aug 2013 00:50:37 +0200 (CEST) Subject: [pypy-commit] cffi default: Bah, fix Message-ID: <20130801225037.3FB321C0130@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1338:e6d841bc4677 Date: 2013-08-02 00:45 +0200 http://bitbucket.org/cffi/cffi/changeset/e6d841bc4677/ Log: Bah, fix diff --git a/cffi/api.py b/cffi/api.py --- a/cffi/api.py +++ b/cffi/api.py @@ -54,7 +54,8 @@ # _cffi_backend.so compiled. import _cffi_backend as backend from . import __version__ - assert backend.__version__ == __version__ + assert (backend.__version__ == __version__ or + backend.__version__ == __version__[:3]) # (If you insist you can also try to pass the option # 'backend=backend_ctypes.CTypesBackend()', but don't # rely on it! It's probably not going to work well.) From noreply at buildbot.pypy.org Fri Aug 2 00:50:38 2013 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 2 Aug 2013 00:50:38 +0200 (CEST) Subject: [pypy-commit] cffi release-0.7: Update the version number to 0.7.2 Message-ID: <20130801225038.626731C0130@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: release-0.7 Changeset: r1339:d4d50eef7198 Date: 2013-08-02 00:47 +0200 http://bitbucket.org/cffi/cffi/changeset/d4d50eef7198/ Log: Update the version number to 0.7.2 diff --git a/cffi/__init__.py b/cffi/__init__.py --- a/cffi/__init__.py +++ b/cffi/__init__.py @@ -4,5 +4,5 @@ from .api import FFI, CDefError, FFIError from .ffiplatform import VerificationError, VerificationMissing -__version__ = "0.7.1" -__version_info__ = (0, 7, 1) +__version__ = "0.7.2" +__version_info__ = (0, 7, 2) diff --git a/doc/source/conf.py b/doc/source/conf.py --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -47,7 +47,7 @@ # The short X.Y version. version = '0.7' # The full version, including alpha/beta/rc tags. -release = '0.7.1' +release = '0.7.2' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/doc/source/index.rst b/doc/source/index.rst --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -90,13 +90,13 @@ Download and Installation: -* http://pypi.python.org/packages/source/c/cffi/cffi-0.7.1.tar.gz +* http://pypi.python.org/packages/source/c/cffi/cffi-0.7.2.tar.gz - Or grab the most current version by following the instructions below. - - MD5: dcfbb32d9a757d515801463602e4c533 + - MD5: ... - - SHA: 44fa6b50d37b0b5be6a0bee7950a59ba9e373fb8 + - SHA: ... * Or get it from the `Bitbucket page`_: ``hg clone https://bitbucket.org/cffi/cffi`` diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -90,7 +90,7 @@ `Mailing list `_ """, - version='0.7.1', + version='0.7.2', packages=['cffi'], zip_safe=False, diff --git a/testing/test_version.py b/testing/test_version.py --- a/testing/test_version.py +++ b/testing/test_version.py @@ -8,6 +8,7 @@ BACKEND_VERSIONS = { '0.4.2': '0.4', # did not change '0.7.1': '0.7', # did not change + '0.7.2': '0.7', # did not change } def test_version(): From noreply at buildbot.pypy.org Fri Aug 2 00:50:39 2013 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 2 Aug 2013 00:50:39 +0200 (CEST) Subject: [pypy-commit] cffi release-0.7: Update MD5/SHA1. Message-ID: <20130801225039.823921C0130@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: release-0.7 Changeset: r1340:1f576889e281 Date: 2013-08-02 00:50 +0200 http://bitbucket.org/cffi/cffi/changeset/1f576889e281/ Log: Update MD5/SHA1. diff --git a/doc/source/index.rst b/doc/source/index.rst --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -94,9 +94,9 @@ - Or grab the most current version by following the instructions below. - - MD5: ... + - MD5: d329f5cb2053fd31dafc02e2c9ef0299 - - SHA: ... + - SHA: e2277124f88039a9969e54fd3cc2aa6afbdea1b5 * Or get it from the `Bitbucket page`_: ``hg clone https://bitbucket.org/cffi/cffi`` From noreply at buildbot.pypy.org Fri Aug 2 00:51:25 2013 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 2 Aug 2013 00:51:25 +0200 (CEST) Subject: [pypy-commit] cffi default: Update the version number to 0.7.2 Message-ID: <20130801225125.7F7451C0130@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1341:e5c3342ce174 Date: 2013-08-02 00:47 +0200 http://bitbucket.org/cffi/cffi/changeset/e5c3342ce174/ Log: Update the version number to 0.7.2 diff --git a/cffi/__init__.py b/cffi/__init__.py --- a/cffi/__init__.py +++ b/cffi/__init__.py @@ -4,5 +4,5 @@ from .api import FFI, CDefError, FFIError from .ffiplatform import VerificationError, VerificationMissing -__version__ = "0.7.1" -__version_info__ = (0, 7, 1) +__version__ = "0.7.2" +__version_info__ = (0, 7, 2) diff --git a/doc/source/conf.py b/doc/source/conf.py --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -47,7 +47,7 @@ # The short X.Y version. version = '0.7' # The full version, including alpha/beta/rc tags. -release = '0.7.1' +release = '0.7.2' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/doc/source/index.rst b/doc/source/index.rst --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -88,7 +88,7 @@ Download and Installation: -* http://pypi.python.org/packages/source/c/cffi/cffi-0.7.1.tar.gz +* http://pypi.python.org/packages/source/c/cffi/cffi-0.7.2.tar.gz - Or grab the most current version by following the instructions below. diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -90,7 +90,7 @@ `Mailing list `_ """, - version='0.7.1', + version='0.7.2', packages=['cffi'], zip_safe=False, diff --git a/testing/test_version.py b/testing/test_version.py --- a/testing/test_version.py +++ b/testing/test_version.py @@ -8,6 +8,7 @@ BACKEND_VERSIONS = { '0.4.2': '0.4', # did not change '0.7.1': '0.7', # did not change + '0.7.2': '0.7', # did not change } def test_version(): From noreply at buildbot.pypy.org Fri Aug 2 05:19:00 2013 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 2 Aug 2013 05:19:00 +0200 (CEST) Subject: [pypy-commit] pypy mro-reorder-numpypy-str: fix, make to_str() independent of self Message-ID: <20130802031900.220111C0130@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: mro-reorder-numpypy-str Changeset: r65887:0e0cfbd7c508 Date: 2013-08-02 06:17 +0300 http://bitbucket.org/pypy/pypy/changeset/0e0cfbd7c508/ Log: fix, make to_str() independent of self diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -2761,9 +2761,9 @@ def test_to_str(self): from numpypy import array - a = array(['abc', 'def', 'ab'], 'S3') - b = array(['abcdef', 'ab', 'cd']) - assert b[0] != a[0] + a = array(['abc','abc', 'def', 'ab'], 'S3') + b = array(['mnopqr','abcdef', 'ab', 'cd']) + assert b[1] != a[1] def test_string_scalar(self): from numpypy import array diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -1723,6 +1723,7 @@ @jit.unroll_safe def store(self, arr, i, offset, box): assert isinstance(box, interp_boxes.W_StringBox) + # XXX simplify to range(box.dtype.get_size()) ? for k in range(min(self.size, box.arr.size-offset)): arr.storage[k + i] = box.arr.storage[k + offset] @@ -1736,7 +1737,7 @@ builder = StringBuilder() assert isinstance(item, interp_boxes.W_StringBox) i = item.ofs - end = i + min(self.size, item.arr.size) + end = i + item.dtype.get_size() while i < end: assert isinstance(item.arr.storage[i], str) if item.arr.storage[i] == '\x00': @@ -1762,7 +1763,6 @@ @str_binary_op def ne(self, v1, v2): - print 'string neq',v1,v2 return v1 != v2 @str_binary_op @@ -1798,7 +1798,6 @@ return bool(v1) ^ bool(v2) def bool(self, v): - print 'string bool',v return bool(self.to_str(v)) def build_and_convert(self, space, mydtype, box): From noreply at buildbot.pypy.org Fri Aug 2 07:11:33 2013 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 2 Aug 2013 07:11:33 +0200 (CEST) Subject: [pypy-commit] pypy mro-reorder-numpypy-str: document branch Message-ID: <20130802051133.C32011C0793@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: mro-reorder-numpypy-str Changeset: r65888:e080a2fb9496 Date: 2013-08-02 06:27 +0300 http://bitbucket.org/pypy/pypy/changeset/e080a2fb9496/ Log: document branch diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -58,3 +58,7 @@ .. branch: foldable-getarrayitem-indexerror Constant-fold reading out of constant tuples in PyPy. +.. branch: mro-reorder-numpypy-str +No longer delegate numpy string_ methods to space.StringObject, in numpy +this works by kind of by accident. Support for merging the refactor-str-types +branch From noreply at buildbot.pypy.org Fri Aug 2 07:11:34 2013 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 2 Aug 2013 07:11:34 +0200 (CEST) Subject: [pypy-commit] pypy mro-reorder-numpypy-str: close branch about to be merged Message-ID: <20130802051134.C1BB01C1352@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: mro-reorder-numpypy-str Changeset: r65889:9b3308c15c49 Date: 2013-08-02 07:01 +0300 http://bitbucket.org/pypy/pypy/changeset/9b3308c15c49/ Log: close branch about to be merged From noreply at buildbot.pypy.org Fri Aug 2 07:11:36 2013 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 2 Aug 2013 07:11:36 +0200 (CEST) Subject: [pypy-commit] pypy default: merge mro-reorder-numpypy-str which implements str compare methods rather than delegate Message-ID: <20130802051136.234AC1C3259@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r65890:8307eb0e0210 Date: 2013-08-02 07:03 +0300 http://bitbucket.org/pypy/pypy/changeset/8307eb0e0210/ Log: merge mro-reorder-numpypy-str which implements str compare methods rather than delegate diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -58,3 +58,7 @@ .. branch: foldable-getarrayitem-indexerror Constant-fold reading out of constant tuples in PyPy. +.. branch: mro-reorder-numpypy-str +No longer delegate numpy string_ methods to space.StringObject, in numpy +this works by kind of by accident. Support for merging the refactor-str-types +branch diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -379,12 +379,14 @@ return self class W_CharacterBox(W_FlexibleBox): - pass + def convert_to(self, dtype): + # XXX assert dtype is str type + return self + class W_StringBox(W_CharacterBox): def descr__new__string_box(space, w_subtype, w_arg): from pypy.module.micronumpy.interp_dtype import new_string_dtype - arg = space.str_w(space.str(w_arg)) arr = VoidBoxStorage(len(arg), new_string_dtype(space, len(arg))) for i in range(len(arg)): @@ -684,12 +686,12 @@ __module__ = "numpypy", ) -W_StringBox.typedef = TypeDef("string_", (str_typedef, W_CharacterBox.typedef), +W_StringBox.typedef = TypeDef("string_", (W_CharacterBox.typedef, str_typedef), __module__ = "numpypy", __new__ = interp2app(W_StringBox.descr__new__string_box.im_func), ) -W_UnicodeBox.typedef = TypeDef("unicode_", (unicode_typedef, W_CharacterBox.typedef), +W_UnicodeBox.typedef = TypeDef("unicode_", (W_CharacterBox.typedef, unicode_typedef), __module__ = "numpypy", __new__ = interp2app(W_UnicodeBox.descr__new__unicode_box.im_func), ) diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -234,6 +234,9 @@ def is_record_type(self): return self.fields is not None + def is_str_type(self): + return self.num == 18 + def is_str_or_unicode(self): return (self.num == 18 or self.num == 19) diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -328,14 +328,19 @@ w_out = None w_lhs = convert_to_array(space, w_lhs) w_rhs = convert_to_array(space, w_rhs) - if (w_lhs.get_dtype().is_flexible_type() or \ - w_rhs.get_dtype().is_flexible_type()): + w_ldtype = w_lhs.get_dtype() + w_rdtype = w_rhs.get_dtype() + if w_ldtype.is_str_type() and w_rdtype.is_str_type() and \ + self.comparison_func: + pass + elif (w_ldtype.is_flexible_type() or \ + w_rdtype.is_flexible_type()): raise OperationError(space.w_TypeError, space.wrap( 'unsupported operand dtypes %s and %s for "%s"' % \ - (w_rhs.get_dtype().get_name(), w_lhs.get_dtype().get_name(), + (w_rdtype.get_name(), w_ldtype.get_name(), self.name))) calc_dtype = find_binop_result_dtype(space, - w_lhs.get_dtype(), w_rhs.get_dtype(), + w_ldtype, w_rdtype, int_only=self.int_only, promote_to_float=self.promote_to_float, promote_bools=self.promote_bools, diff --git a/pypy/module/micronumpy/stdobjspace.py b/pypy/module/micronumpy/stdobjspace.py deleted file mode 100644 --- a/pypy/module/micronumpy/stdobjspace.py +++ /dev/null @@ -1,11 +0,0 @@ - -from pypy.objspace.std import stringobject -from pypy.module.micronumpy import interp_boxes - -def delegate_stringbox2stringobj(space, w_box): - return space.wrap(w_box.dtype.itemtype.to_str(w_box)) - -def register_delegates(typeorder): - typeorder[interp_boxes.W_StringBox] = [ - (stringobject.W_StringObject, delegate_stringbox2stringobj), - ] diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -740,6 +740,7 @@ class AppTestStrUnicodeDtypes(BaseNumpyAppTest): def test_str_unicode(self): + skip('numpypy differs from numpy') from numpypy import str_, unicode_, character, flexible, generic assert str_.mro() == [str_, str, basestring, character, flexible, generic, object] diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -2755,6 +2755,15 @@ assert a[2] == 'ab' raises(TypeError, a, 'sum') raises(TypeError, 'a+a') + b = array(['abcdefg', 'ab', 'cd']) + assert a[2] == b[1] + assert bool(a[1]) + + def test_to_str(self): + from numpypy import array + a = array(['abc','abc', 'def', 'ab'], 'S3') + b = array(['mnopqr','abcdef', 'ab', 'cd']) + assert b[1] != a[1] def test_string_scalar(self): from numpypy import array diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -1689,6 +1689,22 @@ def get_size(self): return self.size +def str_unary_op(func): + specialize.argtype(1)(func) + @functools.wraps(func) + def dispatcher(self, v1): + return func(self, self.to_str(v1)) + return dispatcher + +def str_binary_op(func): + specialize.argtype(1, 2)(func) + @functools.wraps(func) + def dispatcher(self, v1, v2): + return func(self, + self.to_str(v1), + self.to_str(v2) + ) + return dispatcher class StringType(BaseType, BaseStringType): T = lltype.Char @@ -1696,6 +1712,8 @@ @jit.unroll_safe def coerce(self, space, dtype, w_item): from pypy.module.micronumpy.interp_dtype import new_string_dtype + if isinstance(w_item, interp_boxes.W_StringBox): + return w_item arg = space.str_w(space.str(w_item)) arr = VoidBoxStorage(len(arg), new_string_dtype(space, len(arg))) for i in range(len(arg)): @@ -1705,6 +1723,7 @@ @jit.unroll_safe def store(self, arr, i, offset, box): assert isinstance(box, interp_boxes.W_StringBox) + # XXX simplify to range(box.dtype.get_size()) ? for k in range(min(self.size, box.arr.size-offset)): arr.storage[k + i] = box.arr.storage[k + offset] @@ -1718,7 +1737,7 @@ builder = StringBuilder() assert isinstance(item, interp_boxes.W_StringBox) i = item.ofs - end = i+self.size + end = i + item.dtype.get_size() while i < end: assert isinstance(item.arr.storage[i], str) if item.arr.storage[i] == '\x00': @@ -1734,10 +1753,53 @@ builder.append("'") return builder.build() - # XXX move to base class when UnicodeType is supported + # XXX move the rest of this to base class when UnicodeType is supported def to_builtin_type(self, space, box): return space.wrap(self.to_str(box)) + @str_binary_op + def eq(self, v1, v2): + return v1 == v2 + + @str_binary_op + def ne(self, v1, v2): + return v1 != v2 + + @str_binary_op + def lt(self, v1, v2): + return v1 < v2 + + @str_binary_op + def le(self, v1, v2): + return v1 <= v2 + + @str_binary_op + def gt(self, v1, v2): + return v1 > v2 + + @str_binary_op + def ge(self, v1, v2): + return v1 >= v2 + + @str_binary_op + def logical_and(self, v1, v2): + return bool(v1) and bool(v2) + + @str_binary_op + def logical_or(self, v1, v2): + return bool(v1) or bool(v2) + + @str_unary_op + def logical_not(self, v): + return not bool(v) + + @str_binary_op + def logical_xor(self, v1, v2): + return bool(v1) ^ bool(v2) + + def bool(self, v): + return bool(self.to_str(v)) + def build_and_convert(self, space, mydtype, box): assert isinstance(box, interp_boxes.W_GenericBox) if box.get_dtype(space).is_str_or_unicode(): @@ -1753,6 +1815,13 @@ arr.storage[j] = '\x00' return interp_boxes.W_StringBox(arr, 0, arr.dtype) +NonNativeStringType = StringType + +class UnicodeType(BaseType, BaseStringType): + T = lltype.UniChar + +NonNativeUnicodeType = UnicodeType + class VoidType(BaseType, BaseStringType): T = lltype.Char @@ -1798,12 +1867,6 @@ return W_NDimArray(implementation) NonNativeVoidType = VoidType -NonNativeStringType = StringType - -class UnicodeType(BaseType, BaseStringType): - T = lltype.UniChar - -NonNativeUnicodeType = UnicodeType class RecordType(BaseType): diff --git a/pypy/objspace/std/model.py b/pypy/objspace/std/model.py --- a/pypy/objspace/std/model.py +++ b/pypy/objspace/std/model.py @@ -133,10 +133,6 @@ # when trying to dispatch multimethods. # XXX build these lists a bit more automatically later - if config.objspace.usemodules.micronumpy: - from pypy.module.micronumpy.stdobjspace import register_delegates - register_delegates(self.typeorder) - self.typeorder[boolobject.W_BoolObject] += [ (intobject.W_IntObject, boolobject.delegate_Bool2IntObject), (floatobject.W_FloatObject, floatobject.delegate_Bool2Float), From noreply at buildbot.pypy.org Fri Aug 2 07:53:09 2013 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 2 Aug 2013 07:53:09 +0200 (CEST) Subject: [pypy-commit] pypy default: uncomment one passing test, add a commented failing one Message-ID: <20130802055309.B15971C00F4@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r65891:3530f88a209f Date: 2013-08-02 08:52 +0300 http://bitbucket.org/pypy/pypy/changeset/3530f88a209f/ Log: uncomment one passing test, add a commented failing one diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -2758,6 +2758,10 @@ b = array(['abcdefg', 'ab', 'cd']) assert a[2] == b[1] assert bool(a[1]) + c = array(['ab','cdefg','hi','jk']) + # not implemented yet + #c[0] += c[3] + #assert c[0] == 'abjk' def test_to_str(self): from numpypy import array @@ -2775,8 +2779,7 @@ assert str(a.dtype) == '|S1' a = array('x', dtype='c') assert str(a.dtype) == '|S1' - # XXX can sort flexible types, why not comparison? - #assert a == 'x' + assert a == 'x' def test_flexible_repr(self): from numpypy import array From noreply at buildbot.pypy.org Fri Aug 2 11:47:48 2013 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 2 Aug 2013 11:47:48 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: List some optimization opportunities outside the JIT. Message-ID: <20130802094748.3D3BB1C00F4@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c4 Changeset: r65892:3c7b8932bffc Date: 2013-08-02 11:45 +0200 http://bitbucket.org/pypy/pypy/changeset/3c7b8932bffc/ Log: List some optimization opportunities outside the JIT. diff --git a/TODO b/TODO --- a/TODO +++ b/TODO @@ -26,3 +26,34 @@ ------------------------------------------------------------ optimize the static placement of the STM_XxxBARRIERs + +------------------------------------------------------------ + + + +Current optimization opportunities (outside the JIT) +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +tweak translator/stm/ to improve placement of barriers, at least at +whole-function level, but maybe cross-function; and reintroduce tweaks +to the PyFrame object (make sure it's always written and don't put more +barriers) + +in parallel, tweak the API of stmgc: think about adding +stm_repeat_read_barrier, and support "tentative" write_barrier calls +that are not actually followed by a write (checked by comparing the +object contents) + +in the interpreter, e.g. BINARY_ADD calls space.add() which possibly +(but rarely) can cause a transaction break, thus requiring that the +frame be write-barrier()-ed again. I'm thinking about alternatives for +this case: e.g. have a separate stack of objects, and the top-most +object on this stack is always in write mode. so just after a +transaction break, we force a write barrier on the top object of the +stack. this would be needed to avoid the usually-pointless write +barriers on the PyFrame everywhere in the interpreter + +running valgrind we can see X% of the time in the read or write +barriers, but it would be interesting to know also the time spent in the +fast-path, as well as splitting it based e.g. on the RPython type of +object. From noreply at buildbot.pypy.org Fri Aug 2 11:48:54 2013 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 2 Aug 2013 11:48:54 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: The nursery allocs don't have a fast-path right now Message-ID: <20130802094854.2A08A1C00F4@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c4 Changeset: r65893:382899f1930a Date: 2013-08-02 11:48 +0200 http://bitbucket.org/pypy/pypy/changeset/382899f1930a/ Log: The nursery allocs don't have a fast-path right now diff --git a/TODO b/TODO --- a/TODO +++ b/TODO @@ -57,3 +57,5 @@ barriers, but it would be interesting to know also the time spent in the fast-path, as well as splitting it based e.g. on the RPython type of object. + +reimplement the fast-path of the nursery allocations in the GC From noreply at buildbot.pypy.org Fri Aug 2 11:52:04 2013 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Fri, 2 Aug 2013 11:52:04 +0200 (CEST) Subject: [pypy-commit] pypy refactor-str-types: hg merge default Message-ID: <20130802095204.2B7A41C00F4@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: refactor-str-types Changeset: r65894:6d17d2b8ae2b Date: 2013-08-02 11:38 +0200 http://bitbucket.org/pypy/pypy/changeset/6d17d2b8ae2b/ Log: hg merge default diff --git a/pypy/doc/conf.py b/pypy/doc/conf.py --- a/pypy/doc/conf.py +++ b/pypy/doc/conf.py @@ -45,9 +45,9 @@ # built documents. # # The short X.Y version. -version = '2.0' +version = '2.1' # The full version, including alpha/beta/rc tags. -release = '2.0.2' +release = '2.1.0' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/pypy/doc/getting-started-python.rst b/pypy/doc/getting-started-python.rst --- a/pypy/doc/getting-started-python.rst +++ b/pypy/doc/getting-started-python.rst @@ -104,8 +104,8 @@ executable. The executable behaves mostly like a normal Python interpreter:: $ ./pypy-c - Python 2.7.3 (7e4f0faa3d51, Nov 22 2012, 10:35:18) - [PyPy 2.0.0 with GCC 4.7.1] on linux2 + Python 2.7.3 (480845e6b1dd, Jul 31 2013, 11:05:31) + [PyPy 2.1.0 with GCC 4.7.1] on linux2 Type "help", "copyright", "credits" or "license" for more information. And now for something completely different: ``RPython magically makes you rich and famous (says so on the tin)'' @@ -171,7 +171,7 @@ the ``bin/pypy`` executable. To install PyPy system wide on unix-like systems, it is recommended to put the -whole hierarchy alone (e.g. in ``/opt/pypy2.0``) and put a symlink to the +whole hierarchy alone (e.g. in ``/opt/pypy2.1``) and put a symlink to the ``pypy`` executable into ``/usr/bin`` or ``/usr/local/bin`` If the executable fails to find suitable libraries, it will report diff --git a/pypy/doc/getting-started.rst b/pypy/doc/getting-started.rst --- a/pypy/doc/getting-started.rst +++ b/pypy/doc/getting-started.rst @@ -53,10 +53,10 @@ PyPy is ready to be executed as soon as you unpack the tarball or the zip file, with no need to install it in any specific location:: - $ tar xf pypy-2.0.tar.bz2 - $ ./pypy-2.0/bin/pypy - Python 2.7.3 (7e4f0faa3d51, Nov 22 2012, 10:35:18) - [PyPy 2.0.0 with GCC 4.7.1] on linux2 + $ tar xf pypy-2.1.tar.bz2 + $ ./pypy-2.1/bin/pypy + Python 2.7.3 (480845e6b1dd, Jul 31 2013, 11:05:31) + [PyPy 2.1.0 with GCC 4.4.3] on linux2 Type "help", "copyright", "credits" or "license" for more information. And now for something completely different: ``PyPy is an exciting technology that lets you to write fast, portable, multi-platform interpreters with less @@ -75,14 +75,14 @@ $ curl -O https://raw.github.com/pypa/pip/master/contrib/get-pip.py - $ ./pypy-2.0/bin/pypy distribute_setup.py + $ ./pypy-2.1/bin/pypy distribute_setup.py - $ ./pypy-2.0/bin/pypy get-pip.py + $ ./pypy-2.1/bin/pypy get-pip.py - $ ./pypy-2.0/bin/pip install pygments # for example + $ ./pypy-2.1/bin/pip install pygments # for example -3rd party libraries will be installed in ``pypy-2.0/site-packages``, and -the scripts in ``pypy-2.0/bin``. +3rd party libraries will be installed in ``pypy-2.1/site-packages``, and +the scripts in ``pypy-2.1/bin``. Installing using virtualenv --------------------------- diff --git a/pypy/doc/index.rst b/pypy/doc/index.rst --- a/pypy/doc/index.rst +++ b/pypy/doc/index.rst @@ -40,7 +40,7 @@ * `FAQ`_: some frequently asked questions. -* `Release 2.0.2`_: the latest official release +* `Release 2.1.0`_: the latest official release * `PyPy Blog`_: news and status info about PyPy @@ -110,7 +110,7 @@ .. _`Getting Started`: getting-started.html .. _`Papers`: extradoc.html .. _`Videos`: video-index.html -.. _`Release 2.0.2`: http://pypy.org/download.html +.. _`Release 2.1.0`: http://pypy.org/download.html .. _`speed.pypy.org`: http://speed.pypy.org .. _`RPython toolchain`: translation.html .. _`potential project ideas`: project-ideas.html diff --git a/pypy/doc/release-2.1.0.rst b/pypy/doc/release-2.1.0.rst --- a/pypy/doc/release-2.1.0.rst +++ b/pypy/doc/release-2.1.0.rst @@ -15,7 +15,7 @@ .. _`Raspberry Pi Foundation`: http://www.raspberrypi.org -The first beta of PyPy3 2.1, targetting version 3 of the Python language, was +The first beta of PyPy3 2.1, targeting version 3 of the Python language, was just released, more details can be found `here`_. .. _`here`: http://morepypy.blogspot.com/2013/07/pypy3-21-beta-1.html diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -58,3 +58,7 @@ .. branch: foldable-getarrayitem-indexerror Constant-fold reading out of constant tuples in PyPy. +.. branch: mro-reorder-numpypy-str +No longer delegate numpy string_ methods to space.StringObject, in numpy +this works by kind of by accident. Support for merging the refactor-str-types +branch diff --git a/pypy/module/cpyext/include/patchlevel.h b/pypy/module/cpyext/include/patchlevel.h --- a/pypy/module/cpyext/include/patchlevel.h +++ b/pypy/module/cpyext/include/patchlevel.h @@ -29,7 +29,7 @@ #define PY_VERSION "2.7.3" /* PyPy version as a string */ -#define PYPY_VERSION "2.1.0-alpha0" +#define PYPY_VERSION "2.2.0-alpha0" /* Subversion Revision number of this file (not of the repository). * Empty since Mercurial migration. */ diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -379,12 +379,14 @@ return self class W_CharacterBox(W_FlexibleBox): - pass + def convert_to(self, dtype): + # XXX assert dtype is str type + return self + class W_StringBox(W_CharacterBox): def descr__new__string_box(space, w_subtype, w_arg): from pypy.module.micronumpy.interp_dtype import new_string_dtype - arg = space.str_w(space.str(w_arg)) arr = VoidBoxStorage(len(arg), new_string_dtype(space, len(arg))) for i in range(len(arg)): @@ -684,12 +686,12 @@ __module__ = "numpypy", ) -W_StringBox.typedef = TypeDef("string_", (W_BytesObject.typedef, W_CharacterBox.typedef), +W_StringBox.typedef = TypeDef("string_", (W_CharacterBox.typedef, W_BytesObject.typedef), __module__ = "numpypy", __new__ = interp2app(W_StringBox.descr__new__string_box.im_func), ) -W_UnicodeBox.typedef = TypeDef("unicode_", (W_UnicodeObject.typedef, W_CharacterBox.typedef), +W_UnicodeBox.typedef = TypeDef("unicode_", (W_CharacterBox.typedef, W_UnicodeObject.typedef), __module__ = "numpypy", __new__ = interp2app(W_UnicodeBox.descr__new__unicode_box.im_func), ) diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -234,6 +234,9 @@ def is_record_type(self): return self.fields is not None + def is_str_type(self): + return self.num == 18 + def is_str_or_unicode(self): return (self.num == 18 or self.num == 19) diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -328,14 +328,19 @@ w_out = None w_lhs = convert_to_array(space, w_lhs) w_rhs = convert_to_array(space, w_rhs) - if (w_lhs.get_dtype().is_flexible_type() or \ - w_rhs.get_dtype().is_flexible_type()): + w_ldtype = w_lhs.get_dtype() + w_rdtype = w_rhs.get_dtype() + if w_ldtype.is_str_type() and w_rdtype.is_str_type() and \ + self.comparison_func: + pass + elif (w_ldtype.is_flexible_type() or \ + w_rdtype.is_flexible_type()): raise OperationError(space.w_TypeError, space.wrap( 'unsupported operand dtypes %s and %s for "%s"' % \ - (w_rhs.get_dtype().get_name(), w_lhs.get_dtype().get_name(), + (w_rdtype.get_name(), w_ldtype.get_name(), self.name))) calc_dtype = find_binop_result_dtype(space, - w_lhs.get_dtype(), w_rhs.get_dtype(), + w_ldtype, w_rdtype, int_only=self.int_only, promote_to_float=self.promote_to_float, promote_bools=self.promote_bools, diff --git a/pypy/module/micronumpy/stdobjspace.py b/pypy/module/micronumpy/stdobjspace.py deleted file mode 100644 --- a/pypy/module/micronumpy/stdobjspace.py +++ /dev/null @@ -1,11 +0,0 @@ - -from pypy.objspace.std import bytesobject -from pypy.module.micronumpy import interp_boxes - -def delegate_stringbox2stringobj(space, w_box): - return space.wrap(w_box.dtype.itemtype.to_str(w_box)) - -def register_delegates(typeorder): - typeorder[interp_boxes.W_StringBox] = [ - (bytesobject.W_BytesObject, delegate_stringbox2stringobj), - ] diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -740,6 +740,7 @@ class AppTestStrUnicodeDtypes(BaseNumpyAppTest): def test_str_unicode(self): + skip('numpypy differs from numpy') from numpypy import str_, unicode_, character, flexible, generic assert str_.mro() == [str_, str, basestring, character, flexible, generic, object] diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -2755,6 +2755,19 @@ assert a[2] == 'ab' raises(TypeError, a, 'sum') raises(TypeError, 'a+a') + b = array(['abcdefg', 'ab', 'cd']) + assert a[2] == b[1] + assert bool(a[1]) + c = array(['ab','cdefg','hi','jk']) + # not implemented yet + #c[0] += c[3] + #assert c[0] == 'abjk' + + def test_to_str(self): + from numpypy import array + a = array(['abc','abc', 'def', 'ab'], 'S3') + b = array(['mnopqr','abcdef', 'ab', 'cd']) + assert b[1] != a[1] def test_string_scalar(self): from numpypy import array @@ -2766,8 +2779,7 @@ assert str(a.dtype) == '|S1' a = array('x', dtype='c') assert str(a.dtype) == '|S1' - # XXX can sort flexible types, why not comparison? - #assert a == 'x' + assert a == 'x' def test_flexible_repr(self): from numpypy import array diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -1689,6 +1689,22 @@ def get_size(self): return self.size +def str_unary_op(func): + specialize.argtype(1)(func) + @functools.wraps(func) + def dispatcher(self, v1): + return func(self, self.to_str(v1)) + return dispatcher + +def str_binary_op(func): + specialize.argtype(1, 2)(func) + @functools.wraps(func) + def dispatcher(self, v1, v2): + return func(self, + self.to_str(v1), + self.to_str(v2) + ) + return dispatcher class StringType(BaseType, BaseStringType): T = lltype.Char @@ -1696,6 +1712,8 @@ @jit.unroll_safe def coerce(self, space, dtype, w_item): from pypy.module.micronumpy.interp_dtype import new_string_dtype + if isinstance(w_item, interp_boxes.W_StringBox): + return w_item arg = space.str_w(space.str(w_item)) arr = VoidBoxStorage(len(arg), new_string_dtype(space, len(arg))) for i in range(len(arg)): @@ -1705,6 +1723,7 @@ @jit.unroll_safe def store(self, arr, i, offset, box): assert isinstance(box, interp_boxes.W_StringBox) + # XXX simplify to range(box.dtype.get_size()) ? for k in range(min(self.size, box.arr.size-offset)): arr.storage[k + i] = box.arr.storage[k + offset] @@ -1718,7 +1737,7 @@ builder = StringBuilder() assert isinstance(item, interp_boxes.W_StringBox) i = item.ofs - end = i+self.size + end = i + item.dtype.get_size() while i < end: assert isinstance(item.arr.storage[i], str) if item.arr.storage[i] == '\x00': @@ -1734,10 +1753,53 @@ builder.append("'") return builder.build() - # XXX move to base class when UnicodeType is supported + # XXX move the rest of this to base class when UnicodeType is supported def to_builtin_type(self, space, box): return space.wrap(self.to_str(box)) + @str_binary_op + def eq(self, v1, v2): + return v1 == v2 + + @str_binary_op + def ne(self, v1, v2): + return v1 != v2 + + @str_binary_op + def lt(self, v1, v2): + return v1 < v2 + + @str_binary_op + def le(self, v1, v2): + return v1 <= v2 + + @str_binary_op + def gt(self, v1, v2): + return v1 > v2 + + @str_binary_op + def ge(self, v1, v2): + return v1 >= v2 + + @str_binary_op + def logical_and(self, v1, v2): + return bool(v1) and bool(v2) + + @str_binary_op + def logical_or(self, v1, v2): + return bool(v1) or bool(v2) + + @str_unary_op + def logical_not(self, v): + return not bool(v) + + @str_binary_op + def logical_xor(self, v1, v2): + return bool(v1) ^ bool(v2) + + def bool(self, v): + return bool(self.to_str(v)) + def build_and_convert(self, space, mydtype, box): assert isinstance(box, interp_boxes.W_GenericBox) if box.get_dtype(space).is_str_or_unicode(): @@ -1753,6 +1815,13 @@ arr.storage[j] = '\x00' return interp_boxes.W_StringBox(arr, 0, arr.dtype) +NonNativeStringType = StringType + +class UnicodeType(BaseType, BaseStringType): + T = lltype.UniChar + +NonNativeUnicodeType = UnicodeType + class VoidType(BaseType, BaseStringType): T = lltype.Char @@ -1798,12 +1867,6 @@ return W_NDimArray(implementation) NonNativeVoidType = VoidType -NonNativeStringType = StringType - -class UnicodeType(BaseType, BaseStringType): - T = lltype.UniChar - -NonNativeUnicodeType = UnicodeType class RecordType(BaseType): diff --git a/pypy/module/sys/version.py b/pypy/module/sys/version.py --- a/pypy/module/sys/version.py +++ b/pypy/module/sys/version.py @@ -11,7 +11,7 @@ #XXX # sync CPYTHON_VERSION with patchlevel.h, package.py CPYTHON_API_VERSION = 1013 #XXX # sync with include/modsupport.h -PYPY_VERSION = (2, 1, 0, "alpha", 0) #XXX # sync patchlevel.h +PYPY_VERSION = (2, 2, 0, "alpha", 0) #XXX # sync patchlevel.h if platform.name == 'msvc': COMPILER_INFO = 'MSC v.%d 32 bit' % (platform.version * 10 + 600) diff --git a/pypy/objspace/std/model.py b/pypy/objspace/std/model.py --- a/pypy/objspace/std/model.py +++ b/pypy/objspace/std/model.py @@ -131,10 +131,6 @@ # when trying to dispatch multimethods. # XXX build these lists a bit more automatically later - if config.objspace.usemodules.micronumpy: - from pypy.module.micronumpy.stdobjspace import register_delegates - register_delegates(self.typeorder) - self.typeorder[boolobject.W_BoolObject] += [ (intobject.W_IntObject, boolobject.delegate_Bool2IntObject), (floatobject.W_FloatObject, floatobject.delegate_Bool2Float), diff --git a/rpython/rtyper/test/test_runicode.py b/rpython/rtyper/test/test_runicode.py --- a/rpython/rtyper/test/test_runicode.py +++ b/rpython/rtyper/test/test_runicode.py @@ -8,7 +8,7 @@ # ====> test_rstr.py -class BaseTestRUnicode(AbstractTestRstr, BaseRtypingTest): +class TestRUnicode(AbstractTestRstr, BaseRtypingTest): const = unicode constchar = unichr From noreply at buildbot.pypy.org Fri Aug 2 12:19:11 2013 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 2 Aug 2013 12:19:11 +0200 (CEST) Subject: [pypy-commit] pypy pypy-pyarray: extend cpyext to support PyArray Message-ID: <20130802101911.2E3E21C1352@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: pypy-pyarray Changeset: r65895:f2c984556c1d Date: 2013-08-02 12:44 +0300 http://bitbucket.org/pypy/pypy/changeset/f2c984556c1d/ Log: extend cpyext to support PyArray From noreply at buildbot.pypy.org Fri Aug 2 12:19:12 2013 From: noreply at buildbot.pypy.org (shmuller) Date: Fri, 2 Aug 2013 12:19:12 +0200 (CEST) Subject: [pypy-commit] pypy pypy-pyarray: - Add cpyext implementation of Numpy PyArray_* C-API Message-ID: <20130802101912.48A4D1C1352@cobra.cs.uni-duesseldorf.de> Author: Stefan H. Muller Branch: pypy-pyarray Changeset: r65896:afe17c743cf7 Date: 2013-07-28 15:12 +0200 http://bitbucket.org/pypy/pypy/changeset/afe17c743cf7/ Log: - Add cpyext implementation of Numpy PyArray_* C-API * pypy/module/cpyext/include/numpy/arrayobject.h * pypy/module/cpyext/ndarrayobject.py * pypy/module/cpyext/test/test_ndarrayobject.py - pypy/module/cpyext/api.py: copy_header_files() now copies the numpy subdirectory as well. - pypy/module/micronumpy/interp_dtype.py: DtypeCache.dtypes_by_num: * Keep in dictionary form, since otherwise not all dtypes can be reached. - lib_pypy/numpy.py, lib_pypy/numpypy/__init__.py: * "import numpy" now displays a warning but falls back to "import numpypy as numpy" *without* raising an ImportError. - pypy/module/cpyext/include/boolobject.h and complexobject.h: * Add #define's for PyIntObject and PyComplexObject. diff --git a/lib_pypy/numpy.py b/lib_pypy/numpy.py --- a/lib_pypy/numpy.py +++ b/lib_pypy/numpy.py @@ -1,5 +1,14 @@ -raise ImportError( +import warnings + +warnings.warn( "The 'numpy' module of PyPy is in-development and not complete. " - "To try it out anyway, you can either import from 'numpypy', " - "or just write 'import numpypy' first in your program and then " - "import from 'numpy' as usual.") + "To avoid this warning, write 'import numpypy as numpy'. ") + +from numpypy import * + +import os + +def get_include(): + head, tail = os.path.split(os.path.dirname(os.path.abspath(__file__))) + return os.path.join(head, 'include') + diff --git a/lib_pypy/numpypy/__init__.py b/lib_pypy/numpypy/__init__.py --- a/lib_pypy/numpypy/__init__.py +++ b/lib_pypy/numpypy/__init__.py @@ -10,5 +10,5 @@ __all__ += core.__all__ __all__ += lib.__all__ -import sys -sys.modules.setdefault('numpy', sys.modules['numpypy']) +#import sys +#sys.modules.setdefault('numpy', sys.modules['numpypy']) diff --git a/pypy/module/cpyext/__init__.py b/pypy/module/cpyext/__init__.py --- a/pypy/module/cpyext/__init__.py +++ b/pypy/module/cpyext/__init__.py @@ -36,6 +36,7 @@ import pypy.module.cpyext.object import pypy.module.cpyext.stringobject import pypy.module.cpyext.tupleobject +import pypy.module.cpyext.ndarrayobject import pypy.module.cpyext.setobject import pypy.module.cpyext.dictobject import pypy.module.cpyext.intobject diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -130,11 +130,7 @@ udir.join('pypy_macros.h').write("/* Will be filled later */\n") globals().update(rffi_platform.configure(CConfig_constants)) -def copy_header_files(dstdir): - assert dstdir.check(dir=True) - headers = include_dir.listdir('*.h') + include_dir.listdir('*.inl') - for name in ("pypy_decl.h", "pypy_macros.h"): - headers.append(udir.join(name)) +def _copy_header_files(headers, dstdir): for header in headers: target = dstdir.join(header.basename) try: @@ -145,6 +141,25 @@ target.chmod(0444) # make the file read-only, to make sure that nobody # edits it by mistake +def copy_header_files(dstdir): + # XXX: 20 lines of code to recursively copy a directory, really?? + assert dstdir.check(dir=True) + headers = include_dir.listdir('*.h') + include_dir.listdir('*.inl') + for name in ("pypy_decl.h", "pypy_macros.h"): + headers.append(udir.join(name)) + _copy_header_files(headers, dstdir) + + try: + dstdir.mkdir('numpy') + except py.error.EEXIST: + pass + numpy_dstdir = dstdir / 'numpy' + + numpy_include_dir = include_dir / 'numpy' + numpy_headers = numpy_include_dir.listdir('*.h') + numpy_include_dir.listdir('*.inl') + _copy_header_files(numpy_headers, numpy_dstdir) + + class NotSpecified(object): pass _NOT_SPECIFIED = NotSpecified() diff --git a/pypy/module/cpyext/include/boolobject.h b/pypy/module/cpyext/include/boolobject.h --- a/pypy/module/cpyext/include/boolobject.h +++ b/pypy/module/cpyext/include/boolobject.h @@ -7,6 +7,8 @@ extern "C" { #endif +#define PyBoolObject PyIntObject + #define Py_False ((PyObject *) &_Py_ZeroStruct) #define Py_True ((PyObject *) &_Py_TrueStruct) diff --git a/pypy/module/cpyext/include/complexobject.h b/pypy/module/cpyext/include/complexobject.h --- a/pypy/module/cpyext/include/complexobject.h +++ b/pypy/module/cpyext/include/complexobject.h @@ -6,6 +6,9 @@ extern "C" { #endif +/* fake PyComplexObject so that code that doesn't do direct field access works */ +#define PyComplexObject PyObject + typedef struct Py_complex_t { double real; double imag; diff --git a/pypy/module/cpyext/include/numpy/arrayobject.h b/pypy/module/cpyext/include/numpy/arrayobject.h new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/include/numpy/arrayobject.h @@ -0,0 +1,52 @@ + +/* NDArray object interface - S. H. Muller, 2013/07/26 */ + +#ifndef Py_NDARRAYOBJECT_H +#define Py_NDARRAYOBJECT_H +#ifdef __cplusplus +extern "C" { +#endif + +/* fake PyArrayObject so that code that doesn't do direct field access works */ +#define PyArrayObject PyObject + +#ifndef npy_intp +#define npy_intp long +#endif +#ifndef import_array +#define import_array() +#endif + +/* copied from numpy/ndarraytypes.h + * keep numbers in sync with micronumpy.interp_dtype.DTypeCache + */ +enum NPY_TYPES { NPY_BOOL=0, + NPY_BYTE, NPY_UBYTE, + NPY_SHORT, NPY_USHORT, + NPY_INT, NPY_UINT, + NPY_LONG, NPY_ULONG, + NPY_LONGLONG, NPY_ULONGLONG, + NPY_FLOAT, NPY_DOUBLE, NPY_LONGDOUBLE, + NPY_CFLOAT, NPY_CDOUBLE, NPY_CLONGDOUBLE, + NPY_OBJECT=17, + NPY_STRING, NPY_UNICODE, + NPY_VOID, + /* + * New 1.6 types appended, may be integrated + * into the above in 2.0. + */ + NPY_DATETIME, NPY_TIMEDELTA, NPY_HALF, + + NPY_NTYPES, + NPY_NOTYPE, + NPY_CHAR, /* special flag */ + NPY_USERDEF=256, /* leave room for characters */ + + /* The number of types not including the new 1.6 types */ + NPY_NTYPES_ABI_COMPATIBLE=21 +}; + +#ifdef __cplusplus +} +#endif +#endif /* !Py_NDARRAYOBJECT_H */ diff --git a/pypy/module/cpyext/ndarrayobject.py b/pypy/module/cpyext/ndarrayobject.py new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/ndarrayobject.py @@ -0,0 +1,106 @@ +""" +Numpy C-API for PyPy - S. H. Muller, 2013/07/26 +""" + +from rpython.rtyper.lltypesystem import rffi, lltype +from pypy.module.cpyext.api import cpython_api, Py_ssize_t, CANNOT_FAIL +from pypy.module.cpyext.pyobject import PyObject +from pypy.module.micronumpy.interp_numarray import W_NDimArray, convert_to_array +from pypy.module.micronumpy.interp_dtype import get_dtype_cache +from pypy.module.micronumpy.arrayimpl.scalar import Scalar +from rpython.rlib.rawstorage import RAW_STORAGE_PTR + +# the asserts are needed, otherwise the translation fails + + at cpython_api([PyObject], Py_ssize_t, error=CANNOT_FAIL) +def PyArray_NDIM(space, w_array): + assert isinstance(w_array, W_NDimArray) + return len(w_array.get_shape()) + + at cpython_api([PyObject, Py_ssize_t], Py_ssize_t, error=CANNOT_FAIL) +def PyArray_DIM(space, w_array, n): + assert isinstance(w_array, W_NDimArray) + return w_array.get_shape()[n] + + at cpython_api([PyObject], Py_ssize_t, error=CANNOT_FAIL) +def PyArray_SIZE(space, w_array): + assert isinstance(w_array, W_NDimArray) + return w_array.get_size() + + at cpython_api([PyObject], Py_ssize_t, error=CANNOT_FAIL) +def PyArray_ITEMSIZE(space, w_array): + assert isinstance(w_array, W_NDimArray) + return w_array.get_dtype().get_size() + + at cpython_api([PyObject], Py_ssize_t, error=CANNOT_FAIL) +def PyArray_NBYTES(space, w_array): + assert isinstance(w_array, W_NDimArray) + return w_array.get_size() * w_array.get_dtype().get_size() + + at cpython_api([PyObject], Py_ssize_t, error=CANNOT_FAIL) +def PyArray_TYPE(space, w_array): + assert isinstance(w_array, W_NDimArray) + return w_array.get_dtype().num + + + at cpython_api([PyObject], rffi.VOIDP, error=CANNOT_FAIL) +def PyArray_DATA(space, w_array): + # fails on scalars - see PyArray_FromAny() + assert isinstance(w_array, W_NDimArray) + return rffi.cast(rffi.VOIDP, w_array.implementation.storage) + + + at cpython_api([PyObject, rffi.VOIDP, Py_ssize_t, Py_ssize_t, Py_ssize_t, rffi.VOIDP], + PyObject) +def PyArray_FromAny(space, w_obj, dtype, min_depth, max_depth, requirements, context): + # ignore all additional arguments for now + w_array = convert_to_array(space, w_obj) + if w_array.is_scalar(): + # since PyArray_DATA() fails on scalars, create a 1D array and set empty + # shape. So the following combination works for *reading* scalars: + # PyObject *arr = PyArray_FromAny(obj); + # int nd = PyArray_NDIM(arr); + # void *data = PyArray_DATA(arr); + impl = w_array.implementation + w_array = W_NDimArray.from_shape(space, [1], impl.dtype) + w_array.implementation.setitem(0, impl.value) + w_array.implementation.shape = [] + return w_array + + + at cpython_api([Py_ssize_t, rffi.LONGP, Py_ssize_t], PyObject) +def PyArray_SimpleNew(space, nd, dims, typenum): + dtype = get_dtype_cache(space).dtypes_by_num[typenum] + shape = [] + for i in range(nd): + # back-and-forth wrapping needed to translate + shape.append(space.int_w(space.wrap(dims[i]))) + + return W_NDimArray.from_shape(space, shape, dtype) + + +def simple_new_from_data(space, nd, dims, typenum, data, owning): + dtype = get_dtype_cache(space).dtypes_by_num[typenum] + storage = rffi.cast(RAW_STORAGE_PTR, data) + if nd == 0: + w_val = dtype.itemtype.box_raw_data(storage) + return W_NDimArray(Scalar(dtype, w_val)) + else: + shape = [] + for i in range(nd): + # back-and-forth wrapping needed to translate + shape.append(space.int_w(space.wrap(dims[i]))) + + return W_NDimArray.from_shape_and_storage(space, shape, storage, dtype, owning=owning) + + at cpython_api([Py_ssize_t, rffi.LONGP, Py_ssize_t, rffi.VOIDP], PyObject) +def PyArray_SimpleNewFromData(space, nd, dims, typenum, data): + return simple_new_from_data(space, nd, dims, typenum, data, owning=False) + + at cpython_api([Py_ssize_t, rffi.LONGP, Py_ssize_t, rffi.VOIDP], PyObject) +def PyArray_SimpleNewFromDataOwning(space, nd, dims, typenum, data): + # Variant to take over ownership of the memory, equivalent to: + # PyObject *arr = PyArray_SimpleNewFromData(nd, dims, typenum, data); + # ((PyArrayObject*)arr)->flags |= NPY_OWNDATA; + return simple_new_from_data(space, nd, dims, typenum, data, owning=True) + diff --git a/pypy/module/cpyext/test/test_ndarrayobject.py b/pypy/module/cpyext/test/test_ndarrayobject.py new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/test/test_ndarrayobject.py @@ -0,0 +1,179 @@ +import py + +from pypy.module.cpyext.pyobject import PyObject +from pypy.module.cpyext.test.test_api import BaseApiTest +from rpython.rtyper.lltypesystem import rffi, lltype + +from pypy.module.micronumpy.interp_numarray import W_NDimArray +from pypy.module.micronumpy.interp_dtype import get_dtype_cache + +def scalar(space): + dtype = get_dtype_cache(space).w_float64dtype + return W_NDimArray.new_scalar(space, dtype, space.wrap(10.)) + +def array(space, shape): + dtype = get_dtype_cache(space).w_float64dtype + return W_NDimArray.from_shape(space, shape, dtype, order='C') + +def iarray(space, shape): + dtype = get_dtype_cache(space).w_int64dtype + return W_NDimArray.from_shape(space, shape, dtype, order='C') + + +NULL = lltype.nullptr(rffi.VOIDP.TO) + +class TestNDArrayObject(BaseApiTest): + + def test_NDIM(self, space, api): + a = array(space, [10, 5, 3]) + assert api.PyArray_NDIM(a) == 3 + + def test_DIM(self, space, api): + a = array(space, [10, 5, 3]) + assert api.PyArray_DIM(a, 1) == 5 + + def test_SIZE(self, space, api): + a = array(space, [10, 5, 3]) + assert api.PyArray_SIZE(a) == 150 + + def test_ITEMSIZE(self, space, api): + a = array(space, [10, 5, 3]) + assert api.PyArray_ITEMSIZE(a) == 8 + + def test_NBYTES(self, space, api): + a = array(space, [10, 5, 3]) + assert api.PyArray_NBYTES(a) == 1200 + + def test_TYPE(self, space, api): + a = array(space, [10, 5, 3]) + assert api.PyArray_TYPE(a) == 12 + + def test_DATA(self, space, api): + a = array(space, [10, 5, 3]) + addr = api.PyArray_DATA(a) + addr2 = rffi.cast(rffi.VOIDP, a.implementation.storage) + assert addr == addr2 + + def test_FromAny_scalar(self, space, api): + a0 = scalar(space) + assert a0.implementation.get_scalar_value().value == 10. + + a = api.PyArray_FromAny(a0, NULL, 0, 0, 0, NULL) + assert api.PyArray_NDIM(a) == 0 + + ptr = rffi.cast(rffi.DOUBLEP, api.PyArray_DATA(a)) + assert ptr[0] == 10. + + def test_FromAny(self, space, api): + a = array(space, [10, 5, 3]) + assert api.PyArray_FromAny(a, NULL, 0, 0, 0, NULL) is a + + def test_list_from_fixedptr(self, space, api): + A = lltype.GcArray(lltype.Float) + ptr = lltype.malloc(A, 3) + assert isinstance(ptr, lltype._ptr) + ptr[0] = 10. + ptr[1] = 5. + ptr[2] = 3. + l = list(ptr) + assert l == [10., 5., 3.] + + def test_list_from_openptr(self, space, api): + nd = 3 + a = array(space, [nd]) + ptr = rffi.cast(rffi.DOUBLEP, api.PyArray_DATA(a)) + ptr[0] = 10. + ptr[1] = 5. + ptr[2] = 3. + l = [] + for i in range(nd): + l.append(ptr[i]) + assert l == [10., 5., 3.] + + def test_SimpleNew_scalar(self, space, api): + ptr_s = lltype.nullptr(rffi.LONGP.TO) + a = api.PyArray_SimpleNew(0, ptr_s, 12) + + dtype = get_dtype_cache(space).w_float64dtype + + a.set_scalar_value(dtype.itemtype.box(10.)) + assert a.get_scalar_value().value == 10. + + def test_SimpleNewFromData_scalar(self, space, api): + a = array(space, [1]) + num = api.PyArray_TYPE(a) + ptr_a = api.PyArray_DATA(a) + + x = rffi.cast(rffi.DOUBLEP, ptr_a) + x[0] = float(10.) + + ptr_s = lltype.nullptr(rffi.LONGP.TO) + + res = api.PyArray_SimpleNewFromData(0, ptr_s, num, ptr_a) + assert res.is_scalar() + assert res.get_scalar_value().value == 10. + + def test_SimpleNew(self, space, api): + shape = [10, 5, 3] + nd = len(shape) + + s = iarray(space, [nd]) + ptr_s = rffi.cast(rffi.LONGP, api.PyArray_DATA(s)) + ptr_s[0] = 10 + ptr_s[1] = 5 + ptr_s[2] = 3 + + a = api.PyArray_SimpleNew(nd, ptr_s, 12) + + #assert list(api.PyArray_DIMS(a))[:3] == shape + + ptr_a = api.PyArray_DATA(a) + + x = rffi.cast(rffi.DOUBLEP, ptr_a) + for i in range(150): + x[i] = float(i) + + for i in range(150): + assert x[i] == float(i) + + def test_SimpleNewFromData(self, space, api): + shape = [10, 5, 3] + nd = len(shape) + + s = iarray(space, [nd]) + ptr_s = rffi.cast(rffi.LONGP, api.PyArray_DATA(s)) + ptr_s[0] = 10 + ptr_s[1] = 5 + ptr_s[2] = 3 + + a = array(space, shape) + num = api.PyArray_TYPE(a) + ptr_a = api.PyArray_DATA(a) + + x = rffi.cast(rffi.DOUBLEP, ptr_a) + for i in range(150): + x[i] = float(i) + + res = api.PyArray_SimpleNewFromData(nd, ptr_s, num, ptr_a) + assert api.PyArray_TYPE(res) == num + assert api.PyArray_DATA(res) == ptr_a + for i in range(nd): + assert api.PyArray_DIM(res, i) == shape[i] + ptr_r = rffi.cast(rffi.DOUBLEP, api.PyArray_DATA(res)) + for i in range(150): + assert ptr_r[i] == float(i) + + def test_SimpleNewFromData_complex(self, space, api): + a = array(space, [2]) + ptr_a = api.PyArray_DATA(a) + + x = rffi.cast(rffi.DOUBLEP, ptr_a) + x[0] = 3. + x[1] = 4. + + ptr_s = lltype.nullptr(rffi.LONGP.TO) + + res = api.PyArray_SimpleNewFromData(0, ptr_s, 15, ptr_a) + assert res.get_scalar_value().real == 3. + assert res.get_scalar_value().imag == 4. + diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -804,10 +804,12 @@ self.dtypes_by_name[alias] = dtype self.dtypes_by_name[dtype.char] = dtype - self.dtypes_by_num = [dtype for dtype in - sorted(self.dtypes_by_num.values(), key=lambda dtype: dtype.num) - if dtype.num <= self.w_float64dtype.num] - assert len(self.dtypes_by_num) == self.w_float64dtype.num + 1 + # shmuller 2013/07/22: Cannot find complex data types after conversion to + # list! Solution: Keep in dictionary form. + #self.dtypes_by_num = [dtype for dtype in + # sorted(self.dtypes_by_num.values(), key=lambda dtype: dtype.num) + # if dtype.num <= self.w_float64dtype.num] + #assert len(self.dtypes_by_num) == self.w_float64dtype.num + 1 typeinfo_full = { 'LONGLONG': self.w_int64dtype, From noreply at buildbot.pypy.org Fri Aug 2 12:19:13 2013 From: noreply at buildbot.pypy.org (shmuller) Date: Fri, 2 Aug 2013 12:19:13 +0200 (CEST) Subject: [pypy-commit] pypy pypy-pyarray: - cpyext/ndarrayobject.py: Rename PyArray_* routines to _PyArray_* to Message-ID: <20130802101913.3B71A1C1352@cobra.cs.uni-duesseldorf.de> Author: Stefan H. Muller Branch: pypy-pyarray Changeset: r65897:bf28f2841084 Date: 2013-07-28 21:49 +0200 http://bitbucket.org/pypy/pypy/changeset/bf28f2841084/ Log: - cpyext/ndarrayobject.py: Rename PyArray_* routines to _PyArray_* to avoid name clashes with other implementations of numpy in the future. diff --git a/pypy/module/cpyext/include/numpy/arrayobject.h b/pypy/module/cpyext/include/numpy/arrayobject.h --- a/pypy/module/cpyext/include/numpy/arrayobject.h +++ b/pypy/module/cpyext/include/numpy/arrayobject.h @@ -17,6 +17,23 @@ #define import_array() #endif +#ifndef PyArray_NDIM + +#define PyArray_NDIM _PyArray_NDIM +#define PyArray_DIM _PyArray_DIM +#define PyArray_SIZE _PyArray_SIZE +#define PyArray_ITEMSIZE _PyArray_ITEMSIZE +#define PyArray_NBYTES _PyArray_NBYTES +#define PyArray_TYPE _PyArray_TYPE +#define PyArray_DATA _PyArray_DATA +#define PyArray_FromAny _PyArray_FromAny + +#define PyArray_SimpleNew _PyArray_SimpleNew +#define PyArray_SimpleNewFromData _PyArray_SimpleNewFromData +#define PyArray_SimpleNewFromDataOwning _PyArray_SimpleNewFromDataOwning + +#endif + /* copied from numpy/ndarraytypes.h * keep numbers in sync with micronumpy.interp_dtype.DTypeCache */ diff --git a/pypy/module/cpyext/ndarrayobject.py b/pypy/module/cpyext/ndarrayobject.py --- a/pypy/module/cpyext/ndarrayobject.py +++ b/pypy/module/cpyext/ndarrayobject.py @@ -13,38 +13,38 @@ # the asserts are needed, otherwise the translation fails @cpython_api([PyObject], Py_ssize_t, error=CANNOT_FAIL) -def PyArray_NDIM(space, w_array): +def _PyArray_NDIM(space, w_array): assert isinstance(w_array, W_NDimArray) return len(w_array.get_shape()) @cpython_api([PyObject, Py_ssize_t], Py_ssize_t, error=CANNOT_FAIL) -def PyArray_DIM(space, w_array, n): +def _PyArray_DIM(space, w_array, n): assert isinstance(w_array, W_NDimArray) return w_array.get_shape()[n] @cpython_api([PyObject], Py_ssize_t, error=CANNOT_FAIL) -def PyArray_SIZE(space, w_array): +def _PyArray_SIZE(space, w_array): assert isinstance(w_array, W_NDimArray) return w_array.get_size() @cpython_api([PyObject], Py_ssize_t, error=CANNOT_FAIL) -def PyArray_ITEMSIZE(space, w_array): +def _PyArray_ITEMSIZE(space, w_array): assert isinstance(w_array, W_NDimArray) return w_array.get_dtype().get_size() @cpython_api([PyObject], Py_ssize_t, error=CANNOT_FAIL) -def PyArray_NBYTES(space, w_array): +def _PyArray_NBYTES(space, w_array): assert isinstance(w_array, W_NDimArray) return w_array.get_size() * w_array.get_dtype().get_size() @cpython_api([PyObject], Py_ssize_t, error=CANNOT_FAIL) -def PyArray_TYPE(space, w_array): +def _PyArray_TYPE(space, w_array): assert isinstance(w_array, W_NDimArray) return w_array.get_dtype().num @cpython_api([PyObject], rffi.VOIDP, error=CANNOT_FAIL) -def PyArray_DATA(space, w_array): +def _PyArray_DATA(space, w_array): # fails on scalars - see PyArray_FromAny() assert isinstance(w_array, W_NDimArray) return rffi.cast(rffi.VOIDP, w_array.implementation.storage) @@ -52,7 +52,7 @@ @cpython_api([PyObject, rffi.VOIDP, Py_ssize_t, Py_ssize_t, Py_ssize_t, rffi.VOIDP], PyObject) -def PyArray_FromAny(space, w_obj, dtype, min_depth, max_depth, requirements, context): +def _PyArray_FromAny(space, w_obj, dtype, min_depth, max_depth, requirements, context): # ignore all additional arguments for now w_array = convert_to_array(space, w_obj) if w_array.is_scalar(): @@ -69,7 +69,7 @@ @cpython_api([Py_ssize_t, rffi.LONGP, Py_ssize_t], PyObject) -def PyArray_SimpleNew(space, nd, dims, typenum): +def _PyArray_SimpleNew(space, nd, dims, typenum): dtype = get_dtype_cache(space).dtypes_by_num[typenum] shape = [] for i in range(nd): @@ -94,11 +94,11 @@ return W_NDimArray.from_shape_and_storage(space, shape, storage, dtype, owning=owning) @cpython_api([Py_ssize_t, rffi.LONGP, Py_ssize_t, rffi.VOIDP], PyObject) -def PyArray_SimpleNewFromData(space, nd, dims, typenum, data): +def _PyArray_SimpleNewFromData(space, nd, dims, typenum, data): return simple_new_from_data(space, nd, dims, typenum, data, owning=False) @cpython_api([Py_ssize_t, rffi.LONGP, Py_ssize_t, rffi.VOIDP], PyObject) -def PyArray_SimpleNewFromDataOwning(space, nd, dims, typenum, data): +def _PyArray_SimpleNewFromDataOwning(space, nd, dims, typenum, data): # Variant to take over ownership of the memory, equivalent to: # PyObject *arr = PyArray_SimpleNewFromData(nd, dims, typenum, data); # ((PyArrayObject*)arr)->flags |= NPY_OWNDATA; diff --git a/pypy/module/cpyext/test/test_ndarrayobject.py b/pypy/module/cpyext/test/test_ndarrayobject.py --- a/pypy/module/cpyext/test/test_ndarrayobject.py +++ b/pypy/module/cpyext/test/test_ndarrayobject.py @@ -26,31 +26,31 @@ def test_NDIM(self, space, api): a = array(space, [10, 5, 3]) - assert api.PyArray_NDIM(a) == 3 + assert api._PyArray_NDIM(a) == 3 def test_DIM(self, space, api): a = array(space, [10, 5, 3]) - assert api.PyArray_DIM(a, 1) == 5 + assert api._PyArray_DIM(a, 1) == 5 def test_SIZE(self, space, api): a = array(space, [10, 5, 3]) - assert api.PyArray_SIZE(a) == 150 + assert api._PyArray_SIZE(a) == 150 def test_ITEMSIZE(self, space, api): a = array(space, [10, 5, 3]) - assert api.PyArray_ITEMSIZE(a) == 8 + assert api._PyArray_ITEMSIZE(a) == 8 def test_NBYTES(self, space, api): a = array(space, [10, 5, 3]) - assert api.PyArray_NBYTES(a) == 1200 + assert api._PyArray_NBYTES(a) == 1200 def test_TYPE(self, space, api): a = array(space, [10, 5, 3]) - assert api.PyArray_TYPE(a) == 12 + assert api._PyArray_TYPE(a) == 12 def test_DATA(self, space, api): a = array(space, [10, 5, 3]) - addr = api.PyArray_DATA(a) + addr = api._PyArray_DATA(a) addr2 = rffi.cast(rffi.VOIDP, a.implementation.storage) assert addr == addr2 @@ -58,15 +58,15 @@ a0 = scalar(space) assert a0.implementation.get_scalar_value().value == 10. - a = api.PyArray_FromAny(a0, NULL, 0, 0, 0, NULL) - assert api.PyArray_NDIM(a) == 0 + a = api._PyArray_FromAny(a0, NULL, 0, 0, 0, NULL) + assert api._PyArray_NDIM(a) == 0 - ptr = rffi.cast(rffi.DOUBLEP, api.PyArray_DATA(a)) + ptr = rffi.cast(rffi.DOUBLEP, api._PyArray_DATA(a)) assert ptr[0] == 10. def test_FromAny(self, space, api): a = array(space, [10, 5, 3]) - assert api.PyArray_FromAny(a, NULL, 0, 0, 0, NULL) is a + assert api._PyArray_FromAny(a, NULL, 0, 0, 0, NULL) is a def test_list_from_fixedptr(self, space, api): A = lltype.GcArray(lltype.Float) @@ -81,7 +81,7 @@ def test_list_from_openptr(self, space, api): nd = 3 a = array(space, [nd]) - ptr = rffi.cast(rffi.DOUBLEP, api.PyArray_DATA(a)) + ptr = rffi.cast(rffi.DOUBLEP, api._PyArray_DATA(a)) ptr[0] = 10. ptr[1] = 5. ptr[2] = 3. @@ -92,7 +92,7 @@ def test_SimpleNew_scalar(self, space, api): ptr_s = lltype.nullptr(rffi.LONGP.TO) - a = api.PyArray_SimpleNew(0, ptr_s, 12) + a = api._PyArray_SimpleNew(0, ptr_s, 12) dtype = get_dtype_cache(space).w_float64dtype @@ -101,15 +101,15 @@ def test_SimpleNewFromData_scalar(self, space, api): a = array(space, [1]) - num = api.PyArray_TYPE(a) - ptr_a = api.PyArray_DATA(a) + num = api._PyArray_TYPE(a) + ptr_a = api._PyArray_DATA(a) x = rffi.cast(rffi.DOUBLEP, ptr_a) x[0] = float(10.) ptr_s = lltype.nullptr(rffi.LONGP.TO) - res = api.PyArray_SimpleNewFromData(0, ptr_s, num, ptr_a) + res = api._PyArray_SimpleNewFromData(0, ptr_s, num, ptr_a) assert res.is_scalar() assert res.get_scalar_value().value == 10. @@ -118,16 +118,16 @@ nd = len(shape) s = iarray(space, [nd]) - ptr_s = rffi.cast(rffi.LONGP, api.PyArray_DATA(s)) + ptr_s = rffi.cast(rffi.LONGP, api._PyArray_DATA(s)) ptr_s[0] = 10 ptr_s[1] = 5 ptr_s[2] = 3 - a = api.PyArray_SimpleNew(nd, ptr_s, 12) + a = api._PyArray_SimpleNew(nd, ptr_s, 12) - #assert list(api.PyArray_DIMS(a))[:3] == shape + #assert list(api._PyArray_DIMS(a))[:3] == shape - ptr_a = api.PyArray_DATA(a) + ptr_a = api._PyArray_DATA(a) x = rffi.cast(rffi.DOUBLEP, ptr_a) for i in range(150): @@ -141,31 +141,31 @@ nd = len(shape) s = iarray(space, [nd]) - ptr_s = rffi.cast(rffi.LONGP, api.PyArray_DATA(s)) + ptr_s = rffi.cast(rffi.LONGP, api._PyArray_DATA(s)) ptr_s[0] = 10 ptr_s[1] = 5 ptr_s[2] = 3 a = array(space, shape) - num = api.PyArray_TYPE(a) - ptr_a = api.PyArray_DATA(a) + num = api._PyArray_TYPE(a) + ptr_a = api._PyArray_DATA(a) x = rffi.cast(rffi.DOUBLEP, ptr_a) for i in range(150): x[i] = float(i) - res = api.PyArray_SimpleNewFromData(nd, ptr_s, num, ptr_a) - assert api.PyArray_TYPE(res) == num - assert api.PyArray_DATA(res) == ptr_a + res = api._PyArray_SimpleNewFromData(nd, ptr_s, num, ptr_a) + assert api._PyArray_TYPE(res) == num + assert api._PyArray_DATA(res) == ptr_a for i in range(nd): - assert api.PyArray_DIM(res, i) == shape[i] - ptr_r = rffi.cast(rffi.DOUBLEP, api.PyArray_DATA(res)) + assert api._PyArray_DIM(res, i) == shape[i] + ptr_r = rffi.cast(rffi.DOUBLEP, api._PyArray_DATA(res)) for i in range(150): assert ptr_r[i] == float(i) def test_SimpleNewFromData_complex(self, space, api): a = array(space, [2]) - ptr_a = api.PyArray_DATA(a) + ptr_a = api._PyArray_DATA(a) x = rffi.cast(rffi.DOUBLEP, ptr_a) x[0] = 3. @@ -173,7 +173,7 @@ ptr_s = lltype.nullptr(rffi.LONGP.TO) - res = api.PyArray_SimpleNewFromData(0, ptr_s, 15, ptr_a) + res = api._PyArray_SimpleNewFromData(0, ptr_s, 15, ptr_a) assert res.get_scalar_value().real == 3. assert res.get_scalar_value().imag == 4. From noreply at buildbot.pypy.org Fri Aug 2 12:19:14 2013 From: noreply at buildbot.pypy.org (shmuller) Date: Fri, 2 Aug 2013 12:19:14 +0200 (CEST) Subject: [pypy-commit] pypy pypy-pyarray: - cpyext/ndarrayobject.py: Add support for PyArray_STRIDE() and Message-ID: <20130802101914.263B11C1352@cobra.cs.uni-duesseldorf.de> Author: Stefan H. Muller Branch: pypy-pyarray Changeset: r65898:e5e7c8d419cc Date: 2013-07-30 11:59 +0200 http://bitbucket.org/pypy/pypy/changeset/e5e7c8d419cc/ Log: - cpyext/ndarrayobject.py: Add support for PyArray_STRIDE() and PyArray_FromObject(). - cpyext/include/numpy: Add constants needed by matplotlib. - cpyext/include/complexobject.h: Replace macro with function for const correctness. - lib_pypy/numpy.py: Add __version__. I felt like it's 1.6.2. diff --git a/lib_pypy/numpy.py b/lib_pypy/numpy.py --- a/lib_pypy/numpy.py +++ b/lib_pypy/numpy.py @@ -8,6 +8,8 @@ import os +__version__ = '1.6.2' + def get_include(): head, tail = os.path.split(os.path.dirname(os.path.abspath(__file__))) return os.path.join(head, 'include') diff --git a/pypy/module/cpyext/include/complexobject.h b/pypy/module/cpyext/include/complexobject.h --- a/pypy/module/cpyext/include/complexobject.h +++ b/pypy/module/cpyext/include/complexobject.h @@ -16,6 +16,7 @@ /* generated function */ PyAPI_FUNC(void) _PyComplex_AsCComplex(PyObject *, Py_complex *); +PyAPI_FUNC(PyObject *) _PyComplex_FromCComplex(Py_complex *); Py_LOCAL_INLINE(Py_complex) PyComplex_AsCComplex(PyObject *obj) { @@ -24,7 +25,12 @@ return result; } -#define PyComplex_FromCComplex(c) _PyComplex_FromCComplex(&c) +// shmuller 2013/07/30: Make a function, since macro will fail in C++ due to +// const correctness if called with "const Py_complex" +//#define PyComplex_FromCComplex(c) _PyComplex_FromCComplex(&c) +PyObject *PyComplex_FromCComplex(Py_complex c) { + return _PyComplex_FromCComplex(&c); +} #ifdef __cplusplus } diff --git a/pypy/module/cpyext/include/numpy/arrayobject.h b/pypy/module/cpyext/include/numpy/arrayobject.h --- a/pypy/module/cpyext/include/numpy/arrayobject.h +++ b/pypy/module/cpyext/include/numpy/arrayobject.h @@ -7,6 +7,10 @@ extern "C" { #endif +#include "old_defines.h" + +#define NPY_INLINE + /* fake PyArrayObject so that code that doesn't do direct field access works */ #define PyArrayObject PyObject @@ -19,14 +23,20 @@ #ifndef PyArray_NDIM +#define PyArray_ISCONTIGUOUS(arr) (1) + #define PyArray_NDIM _PyArray_NDIM #define PyArray_DIM _PyArray_DIM +#define PyArray_STRIDE _PyArray_STRIDE #define PyArray_SIZE _PyArray_SIZE #define PyArray_ITEMSIZE _PyArray_ITEMSIZE #define PyArray_NBYTES _PyArray_NBYTES #define PyArray_TYPE _PyArray_TYPE #define PyArray_DATA _PyArray_DATA -#define PyArray_FromAny _PyArray_FromAny + +#define PyArray_FromAny _PyArray_FromAny +#define PyArray_FromObject _PyArray_FromObject +#define PyArray_ContiguousFromObject PyArray_FromObject #define PyArray_SimpleNew _PyArray_SimpleNew #define PyArray_SimpleNewFromData _PyArray_SimpleNewFromData @@ -63,6 +73,19 @@ NPY_NTYPES_ABI_COMPATIBLE=21 }; +#define NPY_INT8 NPY_BYTE +#define NPY_UINT8 NPY_UBYTE +#define NPY_INT16 NPY_SHORT +#define NPY_UINT16 NPY_USHORT +#define NPY_INT32 NPY_INT +#define NPY_UINT32 NPY_UINT +#define NPY_INT64 NPY_LONG +#define NPY_UINT64 NPY_ULONG +#define NPY_FLOAT32 NPY_FLOAT +#define NPY_FLOAT64 NPY_DOUBLE +#define NPY_COMPLEX32 NPY_CFLOAT +#define NPY_COMPLEX64 NPY_CDOUBLE + #ifdef __cplusplus } #endif diff --git a/pypy/module/cpyext/include/numpy/npy_3kcompat.h b/pypy/module/cpyext/include/numpy/npy_3kcompat.h new file mode 100644 diff --git a/pypy/module/cpyext/include/numpy/old_defines.h b/pypy/module/cpyext/include/numpy/old_defines.h new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/include/numpy/old_defines.h @@ -0,0 +1,187 @@ +/* This header is deprecated as of NumPy 1.7 */ +#ifndef OLD_DEFINES_H +#define OLD_DEFINES_H + +#if defined(NPY_NO_DEPRECATED_API) && NPY_NO_DEPRECATED_API >= NPY_1_7_API_VERSION +#error The header "old_defines.h" is deprecated as of NumPy 1.7. +#endif + +#define NDARRAY_VERSION NPY_VERSION + +#define PyArray_MIN_BUFSIZE NPY_MIN_BUFSIZE +#define PyArray_MAX_BUFSIZE NPY_MAX_BUFSIZE +#define PyArray_BUFSIZE NPY_BUFSIZE + +#define PyArray_PRIORITY NPY_PRIORITY +#define PyArray_SUBTYPE_PRIORITY NPY_PRIORITY +#define PyArray_NUM_FLOATTYPE NPY_NUM_FLOATTYPE + +#define NPY_MAX PyArray_MAX +#define NPY_MIN PyArray_MIN + +#define PyArray_TYPES NPY_TYPES +#define PyArray_BOOL NPY_BOOL +#define PyArray_BYTE NPY_BYTE +#define PyArray_UBYTE NPY_UBYTE +#define PyArray_SHORT NPY_SHORT +#define PyArray_USHORT NPY_USHORT +#define PyArray_INT NPY_INT +#define PyArray_UINT NPY_UINT +#define PyArray_LONG NPY_LONG +#define PyArray_ULONG NPY_ULONG +#define PyArray_LONGLONG NPY_LONGLONG +#define PyArray_ULONGLONG NPY_ULONGLONG +#define PyArray_HALF NPY_HALF +#define PyArray_FLOAT NPY_FLOAT +#define PyArray_DOUBLE NPY_DOUBLE +#define PyArray_LONGDOUBLE NPY_LONGDOUBLE +#define PyArray_CFLOAT NPY_CFLOAT +#define PyArray_CDOUBLE NPY_CDOUBLE +#define PyArray_CLONGDOUBLE NPY_CLONGDOUBLE +#define PyArray_OBJECT NPY_OBJECT +#define PyArray_STRING NPY_STRING +#define PyArray_UNICODE NPY_UNICODE +#define PyArray_VOID NPY_VOID +#define PyArray_DATETIME NPY_DATETIME +#define PyArray_TIMEDELTA NPY_TIMEDELTA +#define PyArray_NTYPES NPY_NTYPES +#define PyArray_NOTYPE NPY_NOTYPE +#define PyArray_CHAR NPY_CHAR +#define PyArray_USERDEF NPY_USERDEF +#define PyArray_NUMUSERTYPES NPY_NUMUSERTYPES + +#define PyArray_INTP NPY_INTP +#define PyArray_UINTP NPY_UINTP + +#define PyArray_INT8 NPY_INT8 +#define PyArray_UINT8 NPY_UINT8 +#define PyArray_INT16 NPY_INT16 +#define PyArray_UINT16 NPY_UINT16 +#define PyArray_INT32 NPY_INT32 +#define PyArray_UINT32 NPY_UINT32 + +#ifdef NPY_INT64 +#define PyArray_INT64 NPY_INT64 +#define PyArray_UINT64 NPY_UINT64 +#endif + +#ifdef NPY_INT128 +#define PyArray_INT128 NPY_INT128 +#define PyArray_UINT128 NPY_UINT128 +#endif + +#ifdef NPY_FLOAT16 +#define PyArray_FLOAT16 NPY_FLOAT16 +#define PyArray_COMPLEX32 NPY_COMPLEX32 +#endif + +#ifdef NPY_FLOAT80 +#define PyArray_FLOAT80 NPY_FLOAT80 +#define PyArray_COMPLEX160 NPY_COMPLEX160 +#endif + +#ifdef NPY_FLOAT96 +#define PyArray_FLOAT96 NPY_FLOAT96 +#define PyArray_COMPLEX192 NPY_COMPLEX192 +#endif + +#ifdef NPY_FLOAT128 +#define PyArray_FLOAT128 NPY_FLOAT128 +#define PyArray_COMPLEX256 NPY_COMPLEX256 +#endif + +#define PyArray_FLOAT32 NPY_FLOAT32 +#define PyArray_COMPLEX64 NPY_COMPLEX64 +#define PyArray_FLOAT64 NPY_FLOAT64 +#define PyArray_COMPLEX128 NPY_COMPLEX128 + + +#define PyArray_TYPECHAR NPY_TYPECHAR +#define PyArray_BOOLLTR NPY_BOOLLTR +#define PyArray_BYTELTR NPY_BYTELTR +#define PyArray_UBYTELTR NPY_UBYTELTR +#define PyArray_SHORTLTR NPY_SHORTLTR +#define PyArray_USHORTLTR NPY_USHORTLTR +#define PyArray_INTLTR NPY_INTLTR +#define PyArray_UINTLTR NPY_UINTLTR +#define PyArray_LONGLTR NPY_LONGLTR +#define PyArray_ULONGLTR NPY_ULONGLTR +#define PyArray_LONGLONGLTR NPY_LONGLONGLTR +#define PyArray_ULONGLONGLTR NPY_ULONGLONGLTR +#define PyArray_HALFLTR NPY_HALFLTR +#define PyArray_FLOATLTR NPY_FLOATLTR +#define PyArray_DOUBLELTR NPY_DOUBLELTR +#define PyArray_LONGDOUBLELTR NPY_LONGDOUBLELTR +#define PyArray_CFLOATLTR NPY_CFLOATLTR +#define PyArray_CDOUBLELTR NPY_CDOUBLELTR +#define PyArray_CLONGDOUBLELTR NPY_CLONGDOUBLELTR +#define PyArray_OBJECTLTR NPY_OBJECTLTR +#define PyArray_STRINGLTR NPY_STRINGLTR +#define PyArray_STRINGLTR2 NPY_STRINGLTR2 +#define PyArray_UNICODELTR NPY_UNICODELTR +#define PyArray_VOIDLTR NPY_VOIDLTR +#define PyArray_DATETIMELTR NPY_DATETIMELTR +#define PyArray_TIMEDELTALTR NPY_TIMEDELTALTR +#define PyArray_CHARLTR NPY_CHARLTR +#define PyArray_INTPLTR NPY_INTPLTR +#define PyArray_UINTPLTR NPY_UINTPLTR +#define PyArray_GENBOOLLTR NPY_GENBOOLLTR +#define PyArray_SIGNEDLTR NPY_SIGNEDLTR +#define PyArray_UNSIGNEDLTR NPY_UNSIGNEDLTR +#define PyArray_FLOATINGLTR NPY_FLOATINGLTR +#define PyArray_COMPLEXLTR NPY_COMPLEXLTR + +#define PyArray_QUICKSORT NPY_QUICKSORT +#define PyArray_HEAPSORT NPY_HEAPSORT +#define PyArray_MERGESORT NPY_MERGESORT +#define PyArray_SORTKIND NPY_SORTKIND +#define PyArray_NSORTS NPY_NSORTS + +#define PyArray_NOSCALAR NPY_NOSCALAR +#define PyArray_BOOL_SCALAR NPY_BOOL_SCALAR +#define PyArray_INTPOS_SCALAR NPY_INTPOS_SCALAR +#define PyArray_INTNEG_SCALAR NPY_INTNEG_SCALAR +#define PyArray_FLOAT_SCALAR NPY_FLOAT_SCALAR +#define PyArray_COMPLEX_SCALAR NPY_COMPLEX_SCALAR +#define PyArray_OBJECT_SCALAR NPY_OBJECT_SCALAR +#define PyArray_SCALARKIND NPY_SCALARKIND +#define PyArray_NSCALARKINDS NPY_NSCALARKINDS + +#define PyArray_ANYORDER NPY_ANYORDER +#define PyArray_CORDER NPY_CORDER +#define PyArray_FORTRANORDER NPY_FORTRANORDER +#define PyArray_ORDER NPY_ORDER + +#define PyDescr_ISBOOL PyDataType_ISBOOL +#define PyDescr_ISUNSIGNED PyDataType_ISUNSIGNED +#define PyDescr_ISSIGNED PyDataType_ISSIGNED +#define PyDescr_ISINTEGER PyDataType_ISINTEGER +#define PyDescr_ISFLOAT PyDataType_ISFLOAT +#define PyDescr_ISNUMBER PyDataType_ISNUMBER +#define PyDescr_ISSTRING PyDataType_ISSTRING +#define PyDescr_ISCOMPLEX PyDataType_ISCOMPLEX +#define PyDescr_ISPYTHON PyDataType_ISPYTHON +#define PyDescr_ISFLEXIBLE PyDataType_ISFLEXIBLE +#define PyDescr_ISUSERDEF PyDataType_ISUSERDEF +#define PyDescr_ISEXTENDED PyDataType_ISEXTENDED +#define PyDescr_ISOBJECT PyDataType_ISOBJECT +#define PyDescr_HASFIELDS PyDataType_HASFIELDS + +#define PyArray_LITTLE NPY_LITTLE +#define PyArray_BIG NPY_BIG +#define PyArray_NATIVE NPY_NATIVE +#define PyArray_SWAP NPY_SWAP +#define PyArray_IGNORE NPY_IGNORE + +#define PyArray_NATBYTE NPY_NATBYTE +#define PyArray_OPPBYTE NPY_OPPBYTE + +#define PyArray_MAX_ELSIZE NPY_MAX_ELSIZE + +#define PyArray_USE_PYMEM NPY_USE_PYMEM + +#define PyArray_RemoveLargest PyArray_RemoveSmallest + +#define PyArray_UCS4 npy_ucs4 + +#endif diff --git a/pypy/module/cpyext/ndarrayobject.py b/pypy/module/cpyext/ndarrayobject.py --- a/pypy/module/cpyext/ndarrayobject.py +++ b/pypy/module/cpyext/ndarrayobject.py @@ -5,7 +5,7 @@ from rpython.rtyper.lltypesystem import rffi, lltype from pypy.module.cpyext.api import cpython_api, Py_ssize_t, CANNOT_FAIL from pypy.module.cpyext.pyobject import PyObject -from pypy.module.micronumpy.interp_numarray import W_NDimArray, convert_to_array +from pypy.module.micronumpy.interp_numarray import W_NDimArray, convert_to_array, wrap_impl from pypy.module.micronumpy.interp_dtype import get_dtype_cache from pypy.module.micronumpy.arrayimpl.scalar import Scalar from rpython.rlib.rawstorage import RAW_STORAGE_PTR @@ -22,6 +22,11 @@ assert isinstance(w_array, W_NDimArray) return w_array.get_shape()[n] + at cpython_api([PyObject, Py_ssize_t], Py_ssize_t, error=CANNOT_FAIL) +def _PyArray_STRIDE(space, w_array, n): + assert isinstance(w_array, W_NDimArray) + return w_array.implementation.get_strides()[n] + @cpython_api([PyObject], Py_ssize_t, error=CANNOT_FAIL) def _PyArray_SIZE(space, w_array): assert isinstance(w_array, W_NDimArray) @@ -67,6 +72,18 @@ w_array.implementation.shape = [] return w_array + at cpython_api([PyObject, Py_ssize_t, Py_ssize_t, Py_ssize_t], PyObject) +def _PyArray_FromObject(space, w_obj, typenum, min_depth, max_depth): + # ignore min_depth and max_depth for now + dtype = get_dtype_cache(space).dtypes_by_num[typenum] + w_array = convert_to_array(space, w_obj) + impl = w_array.implementation + if w_array.is_scalar(): + return W_NDimArray.new_scalar(space, dtype, impl.value) + else: + new_impl = impl.astype(space, dtype) + return wrap_impl(space, space.type(w_array), w_array, new_impl) + @cpython_api([Py_ssize_t, rffi.LONGP, Py_ssize_t], PyObject) def _PyArray_SimpleNew(space, nd, dims, typenum): From noreply at buildbot.pypy.org Fri Aug 2 12:19:15 2013 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 2 Aug 2013 12:19:15 +0200 (CEST) Subject: [pypy-commit] pypy pypy-pyarray: numpypy version Message-ID: <20130802101915.0BE781C1352@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: pypy-pyarray Changeset: r65899:f14e4bc3790f Date: 2013-08-02 13:16 +0300 http://bitbucket.org/pypy/pypy/changeset/f14e4bc3790f/ Log: numpypy version diff --git a/lib_pypy/numpy.py b/lib_pypy/numpy.py --- a/lib_pypy/numpy.py +++ b/lib_pypy/numpy.py @@ -8,7 +8,7 @@ import os -__version__ = '1.6.2' +__version__ = '1.7' def get_include(): head, tail = os.path.split(os.path.dirname(os.path.abspath(__file__))) From noreply at buildbot.pypy.org Fri Aug 2 13:38:39 2013 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 2 Aug 2013 13:38:39 +0200 (CEST) Subject: [pypy-commit] pypy refactor-str-types: Fix annotation error in test_ztranslation Message-ID: <20130802113839.EA1401C34E0@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: refactor-str-types Changeset: r65900:1253eceb9564 Date: 2013-08-02 13:38 +0200 http://bitbucket.org/pypy/pypy/changeset/1253eceb9564/ Log: Fix annotation error in test_ztranslation diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -4,7 +4,7 @@ from pypy.interpreter.typedef import TypeDef, GetSetProperty from pypy.objspace.std.bytesobject import W_BytesObject from pypy.objspace.std.floattype import float_typedef -from pypy.objspace.std.unicodeobject import W_UnicodeObject, unicode_from_object +from pypy.objspace.std.unicodeobject import W_UnicodeObject from pypy.objspace.std.inttype import int_typedef from pypy.objspace.std.complextype import complex_typedef from rpython.rlib.rarithmetic import LONG_BIT @@ -398,7 +398,7 @@ def descr__new__unicode_box(space, w_subtype, w_arg): from pypy.module.micronumpy.interp_dtype import new_unicode_dtype - arg = space.unicode_w(unicode_from_object(space, w_arg)) + arg = space.unicode_w(space.unicode_from_object(w_arg)) # XXX size computations, we need tests anyway arr = VoidBoxStorage(len(arg), new_unicode_dtype(space, len(arg))) # XXX not this way, we need store diff --git a/pypy/objspace/fake/objspace.py b/pypy/objspace/fake/objspace.py --- a/pypy/objspace/fake/objspace.py +++ b/pypy/objspace/fake/objspace.py @@ -290,6 +290,9 @@ ec._py_repr = None return ec + def unicode_from_object(self, w_obj): + return w_some_obj() + # ---------- def translates(self, func=None, argtypes=None, seeobj_w=[], **kwds): diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py --- a/pypy/objspace/std/objspace.py +++ b/pypy/objspace/std/objspace.py @@ -586,6 +586,10 @@ return w_obj.intval return ObjSpace.getindex_w(self, w_obj, w_exception, objdescr) + def unicode_from_object(self, w_obj): + from pypy.objspace.std.unicodeobject import unicode_from_object + return unicode_from_object(self, w_obj) + def call_method(self, w_obj, methname, *arg_w): if self.config.objspace.opcodes.CALL_METHOD: return callmethod.call_method_opt(self, w_obj, methname, *arg_w) From noreply at buildbot.pypy.org Fri Aug 2 14:35:03 2013 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 2 Aug 2013 14:35:03 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: Mention vtune Message-ID: <20130802123503.4C16F1C1352@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c4 Changeset: r65901:e78bff019f55 Date: 2013-08-02 14:34 +0200 http://bitbucket.org/pypy/pypy/changeset/e78bff019f55/ Log: Mention vtune diff --git a/TODO b/TODO --- a/TODO +++ b/TODO @@ -56,6 +56,6 @@ running valgrind we can see X% of the time in the read or write barriers, but it would be interesting to know also the time spent in the fast-path, as well as splitting it based e.g. on the RPython type of -object. +object. See also vtune. reimplement the fast-path of the nursery allocations in the GC From noreply at buildbot.pypy.org Fri Aug 2 14:40:41 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Fri, 2 Aug 2013 14:40:41 +0200 (CEST) Subject: [pypy-commit] pypy kill-typesystem: merge lltypesystem.rbuiltin into rtyper.rbuiltin Message-ID: <20130802124041.BBD2F1C0793@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: kill-typesystem Changeset: r65902:e2f9c9c033b3 Date: 2013-08-02 03:52 +0100 http://bitbucket.org/pypy/pypy/changeset/e2f9c9c033b3/ Log: merge lltypesystem.rbuiltin into rtyper.rbuiltin diff --git a/rpython/rtyper/lltypesystem/rbuiltin.py b/rpython/rtyper/lltypesystem/rbuiltin.py deleted file mode 100644 --- a/rpython/rtyper/lltypesystem/rbuiltin.py +++ /dev/null @@ -1,90 +0,0 @@ -from rpython.annotator import model as annmodel -from rpython.rlib import objectmodel -from rpython.rtyper.lltypesystem import lltype, rclass -from rpython.rtyper.lltypesystem.rdict import rtype_r_dict -from rpython.rtyper.rmodel import TyperError - - -def rtype_builtin_isinstance(hop): - hop.exception_cannot_occur() - if hop.s_result.is_constant(): - return hop.inputconst(lltype.Bool, hop.s_result.const) - - if hop.args_s[1].is_constant() and hop.args_s[1].const == list: - if hop.args_s[0].knowntype != list: - raise TyperError("isinstance(x, list) expects x to be known statically to be a list or None") - rlist = hop.args_r[0] - vlist = hop.inputarg(rlist, arg=0) - cnone = hop.inputconst(rlist, None) - return hop.genop('ptr_ne', [vlist, cnone], resulttype=lltype.Bool) - - assert isinstance(hop.args_r[0], rclass.InstanceRepr) - return hop.args_r[0].rtype_isinstance(hop) - -def ll_instantiate(typeptr): # NB. used by rpbc.ClassesPBCRepr as well - my_instantiate = typeptr.instantiate - return my_instantiate() - -def rtype_instantiate(hop): - hop.exception_cannot_occur() - s_class = hop.args_s[0] - assert isinstance(s_class, annmodel.SomePBC) - if len(s_class.descriptions) != 1: - # instantiate() on a variable class - vtypeptr, = hop.inputargs(rclass.get_type_repr(hop.rtyper)) - v_inst = hop.gendirectcall(ll_instantiate, vtypeptr) - return hop.genop('cast_pointer', [v_inst], # v_type implicit in r_result - resulttype = hop.r_result.lowleveltype) - - classdef = s_class.any_description().getuniqueclassdef() - return rclass.rtype_new_instance(hop.rtyper, classdef, hop.llops) - -def rtype_builtin_hasattr(hop): - hop.exception_cannot_occur() - if hop.s_result.is_constant(): - return hop.inputconst(lltype.Bool, hop.s_result.const) - - raise TyperError("hasattr is only suported on a constant") - -BUILTIN_TYPER = {} -BUILTIN_TYPER[objectmodel.instantiate] = rtype_instantiate -BUILTIN_TYPER[isinstance] = rtype_builtin_isinstance -BUILTIN_TYPER[hasattr] = rtype_builtin_hasattr -BUILTIN_TYPER[objectmodel.r_dict] = rtype_r_dict - -# _________________________________________________________________ -# weakrefs - -import weakref -from rpython.rtyper.lltypesystem import llmemory - -def rtype_weakref_create(hop): - # Note: this code also works for the RPython-level calls 'weakref.ref(x)'. - vlist = hop.inputargs(hop.args_r[0]) - hop.exception_cannot_occur() - return hop.genop('weakref_create', vlist, resulttype=llmemory.WeakRefPtr) - -def rtype_weakref_deref(hop): - c_ptrtype, v_wref = hop.inputargs(lltype.Void, hop.args_r[1]) - assert v_wref.concretetype == llmemory.WeakRefPtr - hop.exception_cannot_occur() - return hop.genop('weakref_deref', [v_wref], resulttype=c_ptrtype.value) - -def rtype_cast_ptr_to_weakrefptr(hop): - vlist = hop.inputargs(hop.args_r[0]) - hop.exception_cannot_occur() - return hop.genop('cast_ptr_to_weakrefptr', vlist, - resulttype=llmemory.WeakRefPtr) - -def rtype_cast_weakrefptr_to_ptr(hop): - c_ptrtype, v_wref = hop.inputargs(lltype.Void, hop.args_r[1]) - assert v_wref.concretetype == llmemory.WeakRefPtr - hop.exception_cannot_occur() - return hop.genop('cast_weakrefptr_to_ptr', [v_wref], - resulttype=c_ptrtype.value) - -BUILTIN_TYPER[weakref.ref] = rtype_weakref_create -BUILTIN_TYPER[llmemory.weakref_create] = rtype_weakref_create -BUILTIN_TYPER[llmemory.weakref_deref] = rtype_weakref_deref -BUILTIN_TYPER[llmemory.cast_ptr_to_weakrefptr] = rtype_cast_ptr_to_weakrefptr -BUILTIN_TYPER[llmemory.cast_weakrefptr_to_ptr] = rtype_cast_weakrefptr_to_ptr diff --git a/rpython/rtyper/rbuiltin.py b/rpython/rtyper/rbuiltin.py --- a/rpython/rtyper/rbuiltin.py +++ b/rpython/rtyper/rbuiltin.py @@ -3,7 +3,8 @@ from rpython.rlib import rarithmetic, objectmodel from rpython.rtyper import raddress, rptr, extregistry, rrange from rpython.rtyper.error import TyperError -from rpython.rtyper.lltypesystem import lltype, llmemory +from rpython.rtyper.lltypesystem import lltype, llmemory, rclass +from rpython.rtyper.lltypesystem.rdict import rtype_r_dict from rpython.rtyper.rmodel import Repr from rpython.tool.pairtype import pairtype @@ -92,11 +93,6 @@ return BUILTIN_TYPER[self.builtinfunc] except (KeyError, TypeError): pass - try: - from rpython.rtyper.lltypesystem.rbuiltin import BUILTIN_TYPER as ll_BUILTIN_TYPER - return ll_BUILTIN_TYPER[self.builtinfunc] - except (KeyError, TypeError): - pass if extregistry.is_registered(self.builtinfunc): entry = extregistry.lookup(self.builtinfunc) return entry.specialize_call @@ -692,3 +688,86 @@ BUILTIN_TYPER[llmemory.cast_adr_to_ptr] = rtype_cast_adr_to_ptr BUILTIN_TYPER[llmemory.cast_adr_to_int] = rtype_cast_adr_to_int BUILTIN_TYPER[llmemory.cast_int_to_adr] = rtype_cast_int_to_adr + +def rtype_builtin_isinstance(hop): + hop.exception_cannot_occur() + if hop.s_result.is_constant(): + return hop.inputconst(lltype.Bool, hop.s_result.const) + + if hop.args_s[1].is_constant() and hop.args_s[1].const == list: + if hop.args_s[0].knowntype != list: + raise TyperError("isinstance(x, list) expects x to be known statically to be a list or None") + rlist = hop.args_r[0] + vlist = hop.inputarg(rlist, arg=0) + cnone = hop.inputconst(rlist, None) + return hop.genop('ptr_ne', [vlist, cnone], resulttype=lltype.Bool) + + assert isinstance(hop.args_r[0], rclass.InstanceRepr) + return hop.args_r[0].rtype_isinstance(hop) + +def ll_instantiate(typeptr): # NB. used by rpbc.ClassesPBCRepr as well + my_instantiate = typeptr.instantiate + return my_instantiate() + +def rtype_instantiate(hop): + hop.exception_cannot_occur() + s_class = hop.args_s[0] + assert isinstance(s_class, annmodel.SomePBC) + if len(s_class.descriptions) != 1: + # instantiate() on a variable class + vtypeptr, = hop.inputargs(rclass.get_type_repr(hop.rtyper)) + v_inst = hop.gendirectcall(ll_instantiate, vtypeptr) + return hop.genop('cast_pointer', [v_inst], # v_type implicit in r_result + resulttype = hop.r_result.lowleveltype) + + classdef = s_class.any_description().getuniqueclassdef() + return rclass.rtype_new_instance(hop.rtyper, classdef, hop.llops) + +def rtype_builtin_hasattr(hop): + hop.exception_cannot_occur() + if hop.s_result.is_constant(): + return hop.inputconst(lltype.Bool, hop.s_result.const) + + raise TyperError("hasattr is only suported on a constant") + +BUILTIN_TYPER[objectmodel.instantiate] = rtype_instantiate +BUILTIN_TYPER[isinstance] = rtype_builtin_isinstance +BUILTIN_TYPER[hasattr] = rtype_builtin_hasattr +BUILTIN_TYPER[objectmodel.r_dict] = rtype_r_dict + +# _________________________________________________________________ +# weakrefs + +import weakref +from rpython.rtyper.lltypesystem import llmemory + +def rtype_weakref_create(hop): + # Note: this code also works for the RPython-level calls 'weakref.ref(x)'. + vlist = hop.inputargs(hop.args_r[0]) + hop.exception_cannot_occur() + return hop.genop('weakref_create', vlist, resulttype=llmemory.WeakRefPtr) + +def rtype_weakref_deref(hop): + c_ptrtype, v_wref = hop.inputargs(lltype.Void, hop.args_r[1]) + assert v_wref.concretetype == llmemory.WeakRefPtr + hop.exception_cannot_occur() + return hop.genop('weakref_deref', [v_wref], resulttype=c_ptrtype.value) + +def rtype_cast_ptr_to_weakrefptr(hop): + vlist = hop.inputargs(hop.args_r[0]) + hop.exception_cannot_occur() + return hop.genop('cast_ptr_to_weakrefptr', vlist, + resulttype=llmemory.WeakRefPtr) + +def rtype_cast_weakrefptr_to_ptr(hop): + c_ptrtype, v_wref = hop.inputargs(lltype.Void, hop.args_r[1]) + assert v_wref.concretetype == llmemory.WeakRefPtr + hop.exception_cannot_occur() + return hop.genop('cast_weakrefptr_to_ptr', [v_wref], + resulttype=c_ptrtype.value) + +BUILTIN_TYPER[weakref.ref] = rtype_weakref_create +BUILTIN_TYPER[llmemory.weakref_create] = rtype_weakref_create +BUILTIN_TYPER[llmemory.weakref_deref] = rtype_weakref_deref +BUILTIN_TYPER[llmemory.cast_ptr_to_weakrefptr] = rtype_cast_ptr_to_weakrefptr +BUILTIN_TYPER[llmemory.cast_weakrefptr_to_ptr] = rtype_cast_weakrefptr_to_ptr From noreply at buildbot.pypy.org Fri Aug 2 15:05:13 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Fri, 2 Aug 2013 15:05:13 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: invalidate all 'R' variables after a write barrier. Can be improved with aliasing info and other tricks Message-ID: <20130802130513.1AED31C3259@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r65903:008566532c4f Date: 2013-08-02 11:35 +0200 http://bitbucket.org/pypy/pypy/changeset/008566532c4f/ Log: invalidate all 'R' variables after a write barrier. Can be improved with aliasing info and other tricks diff --git a/rpython/jit/backend/llsupport/stmrewrite.py b/rpython/jit/backend/llsupport/stmrewrite.py --- a/rpython/jit/backend/llsupport/stmrewrite.py +++ b/rpython/jit/backend/llsupport/stmrewrite.py @@ -115,6 +115,14 @@ for v, c in self.known_category.items(): if c == 'W': self.known_category[v] = 'R' + + def clear_readable_statuses(self, reason): + # XXX: needs aliasing info to be better + # XXX: move to optimizeopt to only invalidate same typed vars? + for v, c in self.known_category.items(): + if c == 'R': + self.known_category[v] = 'P' + def gen_write_barrier(self, v): raise NotImplementedError @@ -123,6 +131,10 @@ v_base = self.unconstifyptr(v_base) assert isinstance(v_base, BoxPtr) source_category = self.known_category.get(v_base, 'P') + if target_category == 'W': + # if *any* of the readable vars is the same object, + # it must repeat the read_barrier now + self.clear_readable_statuses(v_base) mpcat = self.more_precise_categories[source_category] try: write_barrier_descr = mpcat[target_category] diff --git a/rpython/translator/stm/writebarrier.py b/rpython/translator/stm/writebarrier.py --- a/rpython/translator/stm/writebarrier.py +++ b/rpython/translator/stm/writebarrier.py @@ -126,6 +126,15 @@ newoperations.append(newop) v_holder[0] = w category[w] = to + if to == 'W': + # if any of the other vars in the same path + # points to the same object, they must lose + # their read-status now + for u in block.getvariables(): + if get_category(u) == 'R' \ + and u.concretetype == v.concretetype: + category[u] = 'P' + # newop = SpaceOperation(op.opname, [renamings_get(v) for v in op.args], From noreply at buildbot.pypy.org Fri Aug 2 15:05:15 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Fri, 2 Aug 2013 15:05:15 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: Merge Message-ID: <20130802130515.20E7E1C3333@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r65905:d14443d77df9 Date: 2013-08-02 15:04 +0200 http://bitbucket.org/pypy/pypy/changeset/d14443d77df9/ Log: Merge diff --git a/TODO b/TODO --- a/TODO +++ b/TODO @@ -26,3 +26,36 @@ ------------------------------------------------------------ optimize the static placement of the STM_XxxBARRIERs + +------------------------------------------------------------ + + + +Current optimization opportunities (outside the JIT) +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +tweak translator/stm/ to improve placement of barriers, at least at +whole-function level, but maybe cross-function; and reintroduce tweaks +to the PyFrame object (make sure it's always written and don't put more +barriers) + +in parallel, tweak the API of stmgc: think about adding +stm_repeat_read_barrier, and support "tentative" write_barrier calls +that are not actually followed by a write (checked by comparing the +object contents) + +in the interpreter, e.g. BINARY_ADD calls space.add() which possibly +(but rarely) can cause a transaction break, thus requiring that the +frame be write-barrier()-ed again. I'm thinking about alternatives for +this case: e.g. have a separate stack of objects, and the top-most +object on this stack is always in write mode. so just after a +transaction break, we force a write barrier on the top object of the +stack. this would be needed to avoid the usually-pointless write +barriers on the PyFrame everywhere in the interpreter + +running valgrind we can see X% of the time in the read or write +barriers, but it would be interesting to know also the time spent in the +fast-path, as well as splitting it based e.g. on the RPython type of +object. See also vtune. + +reimplement the fast-path of the nursery allocations in the GC From noreply at buildbot.pypy.org Fri Aug 2 15:05:14 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Fri, 2 Aug 2013 15:05:14 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: add resop for incrementing debug counters Message-ID: <20130802130514.37A351C32DE@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r65904:2f66010fc1df Date: 2013-08-02 15:02 +0200 http://bitbucket.org/pypy/pypy/changeset/2f66010fc1df/ Log: add resop for incrementing debug counters diff --git a/rpython/jit/backend/llgraph/runner.py b/rpython/jit/backend/llgraph/runner.py --- a/rpython/jit/backend/llgraph/runner.py +++ b/rpython/jit/backend/llgraph/runner.py @@ -961,6 +961,9 @@ def execute_cond_call_stm_b(self, descr, a): py.test.skip("cond_call_stm_b not supported") + def execute_increment_debug_counter(self, descr, a): + pass + def execute_keepalive(self, descr, x): pass diff --git a/rpython/jit/backend/llsupport/assembler.py b/rpython/jit/backend/llsupport/assembler.py --- a/rpython/jit/backend/llsupport/assembler.py +++ b/rpython/jit/backend/llsupport/assembler.py @@ -269,23 +269,16 @@ if op.getopnum() == rop.LABEL: self._append_debugging_code(newoperations, 'l', number, op.getdescr()) - if not self.cpu.gc_ll_descr.stm: - # XXX: find a workaround to ignore inserting $INEV for - # raw accesses here - operations = newoperations + operations = newoperations return operations def _append_debugging_code(self, operations, tp, number, token): counter = self._register_counter(tp, number, token) c_adr = ConstInt(rffi.cast(lltype.Signed, counter)) - box = BoxInt() - box2 = BoxInt() - ops = [ResOperation(rop.GETFIELD_RAW, [c_adr], - box, descr=self.debug_counter_descr), - ResOperation(rop.INT_ADD, [box, ConstInt(1)], box2), - ResOperation(rop.SETFIELD_RAW, [c_adr, box2], - None, descr=self.debug_counter_descr)] - operations.extend(ops) + operations.append( + ResOperation(rop.INCREMENT_DEBUG_COUNTER, + [c_adr], None, descr=self.debug_counter_descr)) + def _register_counter(self, tp, number, token): # YYY very minor leak -- we need the counters to stay alive diff --git a/rpython/jit/backend/llsupport/stmrewrite.py b/rpython/jit/backend/llsupport/stmrewrite.py --- a/rpython/jit/backend/llsupport/stmrewrite.py +++ b/rpython/jit/backend/llsupport/stmrewrite.py @@ -45,6 +45,9 @@ for op in operations: if op.getopnum() == rop.DEBUG_MERGE_POINT: continue + if op.getopnum() == rop.INCREMENT_DEBUG_COUNTER: + self.newops.append(op) + continue # ---------- ptr_eq ---------- if op.getopnum() in (rop.PTR_EQ, rop.INSTANCE_PTR_EQ, rop.PTR_NE, rop.INSTANCE_PTR_NE): diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -1609,6 +1609,15 @@ ofs_loc) self.load_from_mem(resloc, src_addr, fieldsize_loc, sign_loc) + def genop_discard_increment_debug_counter(self, op, arglocs): + assert IS_X86_64 + # I'm getting lazy. mem_reg_plus_const does not support + # ebp as a register, but that is what we get from the regalloc + # (mostly?) -> change to SCRATCH_REG + base_loc, ofs_loc, size_loc = arglocs + self.mc.MOV(X86_64_SCRATCH_REG, base_loc) + self.mc.INC_m((X86_64_SCRATCH_REG.value, ofs_loc.getint())) + def genop_discard_setfield_gc(self, op, arglocs): base_loc, ofs_loc, size_loc, value_loc = arglocs assert isinstance(size_loc, ImmedLoc) diff --git a/rpython/jit/backend/x86/regalloc.py b/rpython/jit/backend/x86/regalloc.py --- a/rpython/jit/backend/x86/regalloc.py +++ b/rpython/jit/backend/x86/regalloc.py @@ -1008,6 +1008,14 @@ consider_getfield_raw_pure = consider_getfield_gc consider_getfield_gc_pure = consider_getfield_gc + def consider_increment_debug_counter(self, op): + ofs, size, _ = unpack_fielddescr(op.getdescr()) + ofs_loc = imm(ofs) + size_loc = imm(size) + base_loc = self.loc(op.getarg(0)) + self.perform_discard(op, [base_loc, ofs_loc, size_loc]) + + def consider_getarrayitem_gc(self, op): itemsize, ofs, sign = unpack_arraydescr(op.getdescr()) args = op.getarglist() diff --git a/rpython/jit/backend/x86/rx86.py b/rpython/jit/backend/x86/rx86.py --- a/rpython/jit/backend/x86/rx86.py +++ b/rpython/jit/backend/x86/rx86.py @@ -472,6 +472,8 @@ # ------------------------------ Arithmetic ------------------------------ + INC_m = insn(rex_w, '\xFF', orbyte(0), mem_reg_plus_const(1)) + ADD_ri,ADD_rr,ADD_rb,_,_,ADD_rm,_,ADD_rj,_,_ = common_modes(0) OR_ri, OR_rr, OR_rb, _,_,OR_rm, _,OR_rj, _,_ = common_modes(1) AND_ri,AND_rr,AND_rb,_,_,AND_rm,_,AND_rj,_,_ = common_modes(4) diff --git a/rpython/jit/metainterp/executor.py b/rpython/jit/metainterp/executor.py --- a/rpython/jit/metainterp/executor.py +++ b/rpython/jit/metainterp/executor.py @@ -344,6 +344,7 @@ continue if value in (rop.FORCE_TOKEN, rop.CALL_ASSEMBLER, + rop.INCREMENT_DEBUG_COUNTER, rop.COND_CALL_GC_WB, rop.COND_CALL_GC_WB_ARRAY, rop.COND_CALL_STM_B, diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py --- a/rpython/jit/metainterp/resoperation.py +++ b/rpython/jit/metainterp/resoperation.py @@ -490,6 +490,7 @@ 'MARK_OPAQUE_PTR/1b', '_NOSIDEEFFECT_LAST', # ----- end of no_side_effect operations ----- + 'INCREMENT_DEBUG_COUNTER/1d', 'SETARRAYITEM_GC/3d', 'SETARRAYITEM_RAW/3d', 'SETINTERIORFIELD_GC/3d', From noreply at buildbot.pypy.org Fri Aug 2 15:22:51 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Fri, 2 Aug 2013 15:22:51 +0200 (CEST) Subject: [pypy-commit] pypy kill-typesystem: merge lltypesystem.rtupletype into lltypesystem.rtuple Message-ID: <20130802132251.A981D1C35A6@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: kill-typesystem Changeset: r65906:6cc0c7449118 Date: 2013-08-02 14:05 +0100 http://bitbucket.org/pypy/pypy/changeset/6cc0c7449118/ Log: merge lltypesystem.rtupletype into lltypesystem.rtuple diff --git a/rpython/rtyper/lltypesystem/rtuple.py b/rpython/rtyper/lltypesystem/rtuple.py --- a/rpython/rtyper/lltypesystem/rtuple.py +++ b/rpython/rtyper/lltypesystem/rtuple.py @@ -3,7 +3,6 @@ from rpython.rtyper.rtuple import AbstractTupleRepr, AbstractTupleIteratorRepr from rpython.rtyper.lltypesystem.lltype import \ Ptr, GcStruct, Void, Signed, malloc, typeOf, nullptr -from rpython.rtyper.lltypesystem.rtupletype import TUPLE_TYPE from rpython.rtyper.lltypesystem import rstr # ____________________________________________________________ @@ -17,6 +16,16 @@ # ... # } +def TUPLE_TYPE(field_lltypes): + if len(field_lltypes) == 0: + return Void # empty tuple + else: + fields = [('item%d' % i, TYPE) for i, TYPE in enumerate(field_lltypes)] + kwds = {'hints': {'immutable': True, + 'noidentity': True}} + return Ptr(GcStruct('tuple%d' % len(field_lltypes), *fields, **kwds)) + + class TupleRepr(AbstractTupleRepr): rstr_ll = rstr.LLHelpers diff --git a/rpython/rtyper/lltypesystem/rtupletype.py b/rpython/rtyper/lltypesystem/rtupletype.py deleted file mode 100644 --- a/rpython/rtyper/lltypesystem/rtupletype.py +++ /dev/null @@ -1,15 +0,0 @@ -# Helper to build the lowleveltype corresponding to an RPython tuple. -# This is not in rtuple.py so that it can be imported without bringing -# the whole rtyper in. - -from rpython.rtyper.lltypesystem.lltype import Void, Ptr, GcStruct - - -def TUPLE_TYPE(field_lltypes): - if len(field_lltypes) == 0: - return Void # empty tuple - else: - fields = [('item%d' % i, TYPE) for i, TYPE in enumerate(field_lltypes)] - kwds = {'hints': {'immutable': True, - 'noidentity': True}} - return Ptr(GcStruct('tuple%d' % len(field_lltypes), *fields, **kwds)) diff --git a/rpython/rtyper/module/ll_os_stat.py b/rpython/rtyper/module/ll_os_stat.py --- a/rpython/rtyper/module/ll_os_stat.py +++ b/rpython/rtyper/module/ll_os_stat.py @@ -13,7 +13,7 @@ from rpython.rtyper.annlowlevel import hlstr from rpython.rtyper.extfunc import extdef from rpython.rtyper.lltypesystem import rffi, lltype -from rpython.rtyper.lltypesystem.rtupletype import TUPLE_TYPE +from rpython.rtyper.lltypesystem.rtuple import TUPLE_TYPE from rpython.rtyper.tool import rffi_platform as platform from rpython.tool.pairtype import pairtype from rpython.tool.sourcetools import func_renamer diff --git a/rpython/rtyper/test/test_rtuple.py b/rpython/rtyper/test/test_rtuple.py --- a/rpython/rtyper/test/test_rtuple.py +++ b/rpython/rtyper/test/test_rtuple.py @@ -1,15 +1,15 @@ -from rpython.rtyper.lltypesystem import rtupletype +from rpython.rtyper.lltypesystem.rtuple import TUPLE_TYPE, TupleRepr from rpython.rtyper.lltypesystem.lltype import Signed, Bool from rpython.rtyper.rbool import bool_repr from rpython.rtyper.rint import signed_repr from rpython.rtyper.test.tool import BaseRtypingTest +from rpython.rlib.objectmodel import compute_hash from rpython.translator.translator import TranslationContext def test_rtuple(): - from rpython.rtyper.lltypesystem.rtuple import TupleRepr rtuple = TupleRepr(None, [signed_repr, bool_repr]) - assert rtuple.lowleveltype == rtupletype.TUPLE_TYPE([Signed, Bool]) + assert rtuple.lowleveltype == TUPLE_TYPE([Signed, Bool]) # ____________________________________________________________ @@ -171,7 +171,6 @@ assert r_AB_tup.lowleveltype == r_BA_tup.lowleveltype def test_tuple_hash(self): - from rpython.rlib.objectmodel import compute_hash def f(i, j): return compute_hash((i, j)) @@ -180,7 +179,6 @@ assert res1 != res2 def test_constant_tuple_hash_str(self): - from rpython.rlib.objectmodel import compute_hash def f(i): if i: t = (None, "abc") @@ -312,7 +310,6 @@ assert res is True def test_tuple_hash_2(self): - from rpython.rlib.objectmodel import compute_hash def f(n): return compute_hash((n, 6)) == compute_hash((3, n*2)) res = self.interpret(f, [3]) From noreply at buildbot.pypy.org Fri Aug 2 15:22:52 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Fri, 2 Aug 2013 15:22:52 +0200 (CEST) Subject: [pypy-commit] pypy kill-typesystem: fix Message-ID: <20130802132252.BB0A71C35A6@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: kill-typesystem Changeset: r65907:790238d43b27 Date: 2013-08-02 14:22 +0100 http://bitbucket.org/pypy/pypy/changeset/790238d43b27/ Log: fix diff --git a/rpython/translator/backendopt/mallocv.py b/rpython/translator/backendopt/mallocv.py --- a/rpython/translator/backendopt/mallocv.py +++ b/rpython/translator/backendopt/mallocv.py @@ -252,7 +252,7 @@ def __init__(self, graphs, rtyper, verbose=False): self.graphs = graphs self.rtyper = rtyper - self.excdata = rtyper.getexceptiondata() + self.excdata = rtyper.exceptiondata self.graphbuilders = {} self.specialized_graphs = {} self.specgraphorigin = {} From noreply at buildbot.pypy.org Fri Aug 2 16:26:51 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Fri, 2 Aug 2013 16:26:51 +0200 (CEST) Subject: [pypy-commit] pypy kill-typesystem: merge lltypesystem.rtuple into rtyper.rtuple Message-ID: <20130802142651.EA2971C32DE@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: kill-typesystem Changeset: r65908:2d755cc6e1da Date: 2013-08-02 15:26 +0100 http://bitbucket.org/pypy/pypy/changeset/2d755cc6e1da/ Log: merge lltypesystem.rtuple into rtyper.rtuple diff --git a/rpython/rtyper/callparse.py b/rpython/rtyper/callparse.py --- a/rpython/rtyper/callparse.py +++ b/rpython/rtyper/callparse.py @@ -137,7 +137,7 @@ return self.holders def _emit(self, repr, hop): - assert isinstance(repr, rtuple.AbstractTupleRepr) + assert isinstance(repr, rtuple.TupleRepr) tupleitems_v = [] for h in self.holders: v = h.emit(repr.items_r[len(tupleitems_v)], hop) diff --git a/rpython/rtyper/lltypesystem/rtuple.py b/rpython/rtyper/lltypesystem/rtuple.py deleted file mode 100644 --- a/rpython/rtyper/lltypesystem/rtuple.py +++ /dev/null @@ -1,122 +0,0 @@ -from rpython.tool.pairtype import pairtype -from rpython.rtyper.rmodel import inputconst -from rpython.rtyper.rtuple import AbstractTupleRepr, AbstractTupleIteratorRepr -from rpython.rtyper.lltypesystem.lltype import \ - Ptr, GcStruct, Void, Signed, malloc, typeOf, nullptr -from rpython.rtyper.lltypesystem import rstr - -# ____________________________________________________________ -# -# Concrete implementation of RPython tuples: -# -# struct tuple { -# type0 item0; -# type1 item1; -# type2 item2; -# ... -# } - -def TUPLE_TYPE(field_lltypes): - if len(field_lltypes) == 0: - return Void # empty tuple - else: - fields = [('item%d' % i, TYPE) for i, TYPE in enumerate(field_lltypes)] - kwds = {'hints': {'immutable': True, - 'noidentity': True}} - return Ptr(GcStruct('tuple%d' % len(field_lltypes), *fields, **kwds)) - - -class TupleRepr(AbstractTupleRepr): - rstr_ll = rstr.LLHelpers - - def __init__(self, rtyper, items_r): - AbstractTupleRepr.__init__(self, rtyper, items_r) - self.lowleveltype = TUPLE_TYPE(self.lltypes) - - def newtuple(cls, llops, r_tuple, items_v): - # items_v should have the lowleveltype of the internal reprs - assert len(r_tuple.items_r) == len(items_v) - for r_item, v_item in zip(r_tuple.items_r, items_v): - assert r_item.lowleveltype == v_item.concretetype - # - if len(r_tuple.items_r) == 0: - return inputconst(Void, ()) # a Void empty tuple - c1 = inputconst(Void, r_tuple.lowleveltype.TO) - cflags = inputconst(Void, {'flavor': 'gc'}) - v_result = llops.genop('malloc', [c1, cflags], - resulttype = r_tuple.lowleveltype) - for i in range(len(r_tuple.items_r)): - cname = inputconst(Void, r_tuple.fieldnames[i]) - llops.genop('setfield', [v_result, cname, items_v[i]]) - return v_result - newtuple = classmethod(newtuple) - - def instantiate(self): - if len(self.items_r) == 0: - return dum_empty_tuple # PBC placeholder for an empty tuple - else: - return malloc(self.lowleveltype.TO) - - def rtype_bltn_list(self, hop): - from rpython.rtyper.lltypesystem import rlist - nitems = len(self.items_r) - vtup = hop.inputarg(self, 0) - LIST = hop.r_result.lowleveltype.TO - cno = inputconst(Signed, nitems) - hop.exception_is_here() - vlist = hop.gendirectcall(LIST.ll_newlist, cno) - v_func = hop.inputconst(Void, rlist.dum_nocheck) - for index in range(nitems): - name = self.fieldnames[index] - ritem = self.items_r[index] - cname = hop.inputconst(Void, name) - vitem = hop.genop('getfield', [vtup, cname], resulttype = ritem) - vitem = hop.llops.convertvar(vitem, ritem, hop.r_result.item_repr) - cindex = inputconst(Signed, index) - hop.gendirectcall(rlist.ll_setitem_nonneg, v_func, vlist, cindex, vitem) - return vlist - - def getitem_internal(self, llops, v_tuple, index): - """Return the index'th item, in internal repr.""" - name = self.fieldnames[index] - llresult = self.lltypes[index] - cname = inputconst(Void, name) - return llops.genop('getfield', [v_tuple, cname], resulttype = llresult) - - -def rtype_newtuple(hop): - return TupleRepr._rtype_newtuple(hop) - -newtuple = TupleRepr.newtuple - -def dum_empty_tuple(): pass - - -# ____________________________________________________________ -# -# Iteration. - -class Length1TupleIteratorRepr(AbstractTupleIteratorRepr): - - def __init__(self, r_tuple): - self.r_tuple = r_tuple - self.lowleveltype = Ptr(GcStruct('tuple1iter', - ('tuple', r_tuple.lowleveltype))) - self.ll_tupleiter = ll_tupleiter - self.ll_tuplenext = ll_tuplenext - -TupleRepr.IteratorRepr = Length1TupleIteratorRepr - -def ll_tupleiter(ITERPTR, tuple): - iter = malloc(ITERPTR.TO) - iter.tuple = tuple - return iter - -def ll_tuplenext(iter): - # for iterating over length 1 tuples only! - t = iter.tuple - if t: - iter.tuple = nullptr(typeOf(t).TO) - return t.item0 - else: - raise StopIteration diff --git a/rpython/rtyper/module/ll_os_stat.py b/rpython/rtyper/module/ll_os_stat.py --- a/rpython/rtyper/module/ll_os_stat.py +++ b/rpython/rtyper/module/ll_os_stat.py @@ -13,7 +13,7 @@ from rpython.rtyper.annlowlevel import hlstr from rpython.rtyper.extfunc import extdef from rpython.rtyper.lltypesystem import rffi, lltype -from rpython.rtyper.lltypesystem.rtuple import TUPLE_TYPE +from rpython.rtyper.rtuple import TUPLE_TYPE from rpython.rtyper.tool import rffi_platform as platform from rpython.tool.pairtype import pairtype from rpython.tool.sourcetools import func_renamer diff --git a/rpython/rtyper/rbuiltin.py b/rpython/rtyper/rbuiltin.py --- a/rpython/rtyper/rbuiltin.py +++ b/rpython/rtyper/rbuiltin.py @@ -53,14 +53,14 @@ raise TyperError("**kwds call not implemented") if arguments.w_stararg is not None: # expand the *arg in-place -- it must be a tuple - from rpython.rtyper.rtuple import AbstractTupleRepr + from rpython.rtyper.rtuple import TupleRepr if arguments.w_stararg != hop.nb_args - 3: raise TyperError("call pattern too complex") hop.nb_args -= 1 v_tuple = hop.args_v.pop() s_tuple = hop.args_s.pop() r_tuple = hop.args_r.pop() - if not isinstance(r_tuple, AbstractTupleRepr): + if not isinstance(r_tuple, TupleRepr): raise TyperError("*arg must be a tuple") for i in range(len(r_tuple.items_r)): v_item = r_tuple.getitem_internal(hop.llops, v_tuple, i) diff --git a/rpython/rtyper/rstr.py b/rpython/rtyper/rstr.py --- a/rpython/rtyper/rstr.py +++ b/rpython/rtyper/rstr.py @@ -4,7 +4,6 @@ from rpython.rtyper.error import TyperError from rpython.rtyper.lltypesystem.lltype import Signed, Bool, Void, UniChar from rpython.rtyper.rmodel import IntegerRepr, IteratorRepr, inputconst, Repr -from rpython.rtyper.rtuple import AbstractTupleRepr from rpython.tool.pairtype import pairtype, pair from rpython.tool.sourcetools import func_with_new_name from rpython.tool.staticmethods import StaticMethods @@ -564,18 +563,6 @@ hop.exception_cannot_occur() return hop.gendirectcall(r_str.ll.ll_contains, v_str, v_chr) -class __extend__(pairtype(AbstractStringRepr, AbstractTupleRepr)): - def rtype_mod((r_str, r_tuple), hop): - r_tuple = hop.args_r[1] - v_tuple = hop.args_v[1] - - sourcevars = [] - for i, r_arg in enumerate(r_tuple.external_items_r): - v_item = r_tuple.getitem(hop.llops, v_tuple, i) - sourcevars.append((v_item, r_arg)) - - return r_str.ll.do_stringformat(hop, sourcevars) - class __extend__(AbstractCharRepr): def ll_str(self, ch): diff --git a/rpython/rtyper/rtuple.py b/rpython/rtyper/rtuple.py --- a/rpython/rtyper/rtuple.py +++ b/rpython/rtyper/rtuple.py @@ -5,7 +5,10 @@ from rpython.rlib.rarithmetic import intmask from rpython.rlib.unroll import unrolling_iterable from rpython.rtyper.error import TyperError -from rpython.rtyper.lltypesystem.lltype import Void, Signed, Bool +from rpython.rtyper.lltypesystem.lltype import ( + Void, Signed, Bool, Ptr, GcStruct, malloc, typeOf, nullptr) +from rpython.rtyper.lltypesystem.rstr import LLHelpers +from rpython.rtyper.rstr import AbstractStringRepr from rpython.rtyper.rmodel import (Repr, IntegerRepr, inputconst, IteratorRepr, externalvsinternal) from rpython.tool.pairtype import pairtype @@ -13,7 +16,6 @@ class __extend__(annmodel.SomeTuple): def rtyper_makerepr(self, rtyper): - from rpython.rtyper.lltypesystem.rtuple import TupleRepr return TupleRepr(rtyper, [rtyper.getrepr(s_item) for s_item in self.items]) def rtyper_makekey_ex(self, rtyper): @@ -71,17 +73,16 @@ def gen_str_function(tuplerepr): items_r = tuplerepr.items_r - str_funcs = [r_item.ll_str for r_item in items_r] - key = tuplerepr.rstr_ll, tuple(str_funcs) + key = tuple([r_item.ll_str for r_item in items_r]) try: return _gen_str_function_cache[key] except KeyError: - autounrolling_funclist = unrolling_iterable(enumerate(str_funcs)) + autounrolling_funclist = unrolling_iterable(enumerate(key)) - constant = tuplerepr.rstr_ll.ll_constant - start = tuplerepr.rstr_ll.ll_build_start - push = tuplerepr.rstr_ll.ll_build_push - finish = tuplerepr.rstr_ll.ll_build_finish + constant = LLHelpers.ll_constant + start = LLHelpers.ll_build_start + push = LLHelpers.ll_build_push + finish = LLHelpers.ll_build_finish length = len(items_r) def ll_str(t): @@ -105,7 +106,28 @@ return ll_str -class AbstractTupleRepr(Repr): +# ____________________________________________________________ +# +# Concrete implementation of RPython tuples: +# +# struct tuple { +# type0 item0; +# type1 item1; +# type2 item2; +# ... +# } + +def TUPLE_TYPE(field_lltypes): + if len(field_lltypes) == 0: + return Void # empty tuple + else: + fields = [('item%d' % i, TYPE) for i, TYPE in enumerate(field_lltypes)] + kwds = {'hints': {'immutable': True, + 'noidentity': True}} + return Ptr(GcStruct('tuple%d' % len(field_lltypes), *fields, **kwds)) + + +class TupleRepr(Repr): def __init__(self, rtyper, items_r): self.items_r = [] @@ -118,6 +140,7 @@ self.fieldnames = ['item%d' % i for i in range(len(items_r))] self.lltypes = [r.lowleveltype for r in items_r] self.tuple_cache = {} + self.lowleveltype = TUPLE_TYPE(self.lltypes) def getitem(self, llops, v_tuple, index): """Generate the operations to get the index'th item of v_tuple, @@ -127,19 +150,37 @@ r_external_item = self.external_items_r[index] return llops.convertvar(v, r_item, r_external_item) + @classmethod + def newtuple(cls, llops, r_tuple, items_v): + # items_v should have the lowleveltype of the internal reprs + assert len(r_tuple.items_r) == len(items_v) + for r_item, v_item in zip(r_tuple.items_r, items_v): + assert r_item.lowleveltype == v_item.concretetype + # + if len(r_tuple.items_r) == 0: + return inputconst(Void, ()) # a Void empty tuple + c1 = inputconst(Void, r_tuple.lowleveltype.TO) + cflags = inputconst(Void, {'flavor': 'gc'}) + v_result = llops.genop('malloc', [c1, cflags], + resulttype = r_tuple.lowleveltype) + for i in range(len(r_tuple.items_r)): + cname = inputconst(Void, r_tuple.fieldnames[i]) + llops.genop('setfield', [v_result, cname, items_v[i]]) + return v_result + + @classmethod def newtuple_cached(cls, hop, items_v): r_tuple = hop.r_result if hop.s_result.is_constant(): return inputconst(r_tuple, hop.s_result.const) else: return cls.newtuple(hop.llops, r_tuple, items_v) - newtuple_cached = classmethod(newtuple_cached) + @classmethod def _rtype_newtuple(cls, hop): r_tuple = hop.r_result vlist = hop.inputargs(*r_tuple.items_r) return cls.newtuple_cached(hop, vlist) - _rtype_newtuple = classmethod(_rtype_newtuple) def convert_const(self, value): assert isinstance(value, tuple) and len(value) == len(self.items_r) @@ -174,8 +215,48 @@ return self.IteratorRepr(self) raise TyperError("can only iterate over tuples of length 1 for now") + def instantiate(self): + if len(self.items_r) == 0: + return dum_empty_tuple # PBC placeholder for an empty tuple + else: + return malloc(self.lowleveltype.TO) -class __extend__(pairtype(AbstractTupleRepr, IntegerRepr)): + def rtype_bltn_list(self, hop): + from rpython.rtyper.lltypesystem import rlist + nitems = len(self.items_r) + vtup = hop.inputarg(self, 0) + LIST = hop.r_result.lowleveltype.TO + cno = inputconst(Signed, nitems) + hop.exception_is_here() + vlist = hop.gendirectcall(LIST.ll_newlist, cno) + v_func = hop.inputconst(Void, rlist.dum_nocheck) + for index in range(nitems): + name = self.fieldnames[index] + ritem = self.items_r[index] + cname = hop.inputconst(Void, name) + vitem = hop.genop('getfield', [vtup, cname], resulttype = ritem) + vitem = hop.llops.convertvar(vitem, ritem, hop.r_result.item_repr) + cindex = inputconst(Signed, index) + hop.gendirectcall(rlist.ll_setitem_nonneg, v_func, vlist, cindex, vitem) + return vlist + + def getitem_internal(self, llops, v_tuple, index): + """Return the index'th item, in internal repr.""" + name = self.fieldnames[index] + llresult = self.lltypes[index] + cname = inputconst(Void, name) + return llops.genop('getfield', [v_tuple, cname], resulttype = llresult) + + +def rtype_newtuple(hop): + return TupleRepr._rtype_newtuple(hop) + +newtuple = TupleRepr.newtuple + +def dum_empty_tuple(): pass + + +class __extend__(pairtype(TupleRepr, IntegerRepr)): def rtype_getitem((r_tup, r_int), hop): v_tuple, v_index = hop.inputargs(r_tup, Signed) @@ -186,7 +267,7 @@ index = v_index.value return r_tup.getitem(hop.llops, v_tuple, index) -class __extend__(AbstractTupleRepr): +class __extend__(TupleRepr): def rtype_getslice(r_tup, hop): s_start = hop.args_s[1] @@ -203,7 +284,7 @@ for i in indices] return hop.r_result.newtuple(hop.llops, hop.r_result, items_v) -class __extend__(pairtype(AbstractTupleRepr, Repr)): +class __extend__(pairtype(TupleRepr, Repr)): def rtype_contains((r_tup, r_item), hop): s_tup = hop.args_s[0] if not s_tup.is_constant(): @@ -224,7 +305,7 @@ hop2.v_s_insertfirstarg(v_dict, s_dict) return hop2.dispatch() -class __extend__(pairtype(AbstractTupleRepr, AbstractTupleRepr)): +class __extend__(pairtype(TupleRepr, TupleRepr)): def rtype_add((r_tup1, r_tup2), hop): v_tuple1, v_tuple2 = hop.inputargs(r_tup1, r_tup2) @@ -265,6 +346,21 @@ def rtype_is_((robj1, robj2), hop): raise TyperError("cannot compare tuples with 'is'") +class __extend__(pairtype(AbstractStringRepr, TupleRepr)): + def rtype_mod((r_str, r_tuple), hop): + r_tuple = hop.args_r[1] + v_tuple = hop.args_v[1] + + sourcevars = [] + for i, r_arg in enumerate(r_tuple.external_items_r): + v_item = r_tuple.getitem(hop.llops, v_tuple, i) + sourcevars.append((v_item, r_arg)) + + return r_str.ll.do_stringformat(hop, sourcevars) + +# ____________________________________________________________ +# +# Iteration. class AbstractTupleIteratorRepr(IteratorRepr): @@ -279,3 +375,28 @@ hop.exception_is_here() v = hop.gendirectcall(self.ll_tuplenext, v_iter) return hop.llops.convertvar(v, self.r_tuple.items_r[0], self.r_tuple.external_items_r[0]) + +class Length1TupleIteratorRepr(AbstractTupleIteratorRepr): + + def __init__(self, r_tuple): + self.r_tuple = r_tuple + self.lowleveltype = Ptr(GcStruct('tuple1iter', + ('tuple', r_tuple.lowleveltype))) + self.ll_tupleiter = ll_tupleiter + self.ll_tuplenext = ll_tuplenext + +TupleRepr.IteratorRepr = Length1TupleIteratorRepr + +def ll_tupleiter(ITERPTR, tuple): + iter = malloc(ITERPTR.TO) + iter.tuple = tuple + return iter + +def ll_tuplenext(iter): + # for iterating over length 1 tuples only! + t = iter.tuple + if t: + iter.tuple = nullptr(typeOf(t).TO) + return t.item0 + else: + raise StopIteration diff --git a/rpython/rtyper/rtyper.py b/rpython/rtyper/rtyper.py --- a/rpython/rtyper/rtyper.py +++ b/rpython/rtyper/rtyper.py @@ -578,7 +578,7 @@ return pair(r_arg1, r_arg2).rtype_extend_with_char_count(hop) def translate_op_newtuple(self, hop): - from rpython.rtyper.lltypesystem.rtuple import rtype_newtuple + from rpython.rtyper.rtuple import rtype_newtuple return rtype_newtuple(hop) def translate_op_instantiate1(self, hop): diff --git a/rpython/rtyper/test/test_rtuple.py b/rpython/rtyper/test/test_rtuple.py --- a/rpython/rtyper/test/test_rtuple.py +++ b/rpython/rtyper/test/test_rtuple.py @@ -1,4 +1,4 @@ -from rpython.rtyper.lltypesystem.rtuple import TUPLE_TYPE, TupleRepr +from rpython.rtyper.rtuple import TUPLE_TYPE, TupleRepr from rpython.rtyper.lltypesystem.lltype import Signed, Bool from rpython.rtyper.rbool import bool_repr from rpython.rtyper.rint import signed_repr From noreply at buildbot.pypy.org Fri Aug 2 16:34:27 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Fri, 2 Aug 2013 16:34:27 +0200 (CEST) Subject: [pypy-commit] pypy kill-typesystem: fix _get_jitcodes() Message-ID: <20130802143427.335C51C32DE@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: kill-typesystem Changeset: r65909:ff68739a1fef Date: 2013-08-02 15:33 +0100 http://bitbucket.org/pypy/pypy/changeset/ff68739a1fef/ Log: fix _get_jitcodes() diff --git a/rpython/jit/metainterp/test/support.py b/rpython/jit/metainterp/test/support.py --- a/rpython/jit/metainterp/test/support.py +++ b/rpython/jit/metainterp/test/support.py @@ -12,7 +12,7 @@ from rpython.translator.backendopt.all import backend_optimizations -def _get_jitcodes(testself, CPUClass, func, values, type_system, +def _get_jitcodes(testself, CPUClass, func, values, supports_floats=True, supports_longlong=False, supports_singlefloats=False, From noreply at buildbot.pypy.org Fri Aug 2 17:37:31 2013 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 2 Aug 2013 17:37:31 +0200 (CEST) Subject: [pypy-commit] cffi cffi-1.0: hg merge default Message-ID: <20130802153731.411C51C3333@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cffi-1.0 Changeset: r1342:a1ca6e16db03 Date: 2013-08-02 17:32 +0200 http://bitbucket.org/cffi/cffi/changeset/a1ca6e16db03/ Log: hg merge default diff too long, truncating to 2000 out of 3220 lines diff --git a/.hgtags b/.hgtags --- a/.hgtags +++ b/.hgtags @@ -1,7 +1,14 @@ +3691a2e644c98fc8753ffb96c4ff2d5d3e57bd17 release-0.4.2 +0000000000000000000000000000000000000000 release-0.4.2 +037096d1bdaa213c2adebf3a4124ad56dba8ba82 release-0.4.1 +0000000000000000000000000000000000000000 release-0.4.1 +bd4b6090aea035a6093e684858aa7bd54a6270ec release-0.4 +0000000000000000000000000000000000000000 release-0.4 +5f31908df6c97a1f70f3fcd4d489d98dc2b30f04 release-0.3 +0000000000000000000000000000000000000000 release-0.3 +6a0f0a476101210a76f4bc4d33c5bbb0f8f979fd release-0.2.1 +0000000000000000000000000000000000000000 release-0.2.1 +a8636625e33b0f84c3744f80d49e84b175a0a215 release-0.2 +0000000000000000000000000000000000000000 release-0.2 ca6e81df7f1ea58d891129ad016a8888c08f238b release-0.1 -a8636625e33b0f84c3744f80d49e84b175a0a215 release-0.2 -6a0f0a476101210a76f4bc4d33c5bbb0f8f979fd release-0.2.1 -5f31908df6c97a1f70f3fcd4d489d98dc2b30f04 release-0.3 -bd4b6090aea035a6093e684858aa7bd54a6270ec release-0.4 -037096d1bdaa213c2adebf3a4124ad56dba8ba82 release-0.4.1 -3691a2e644c98fc8753ffb96c4ff2d5d3e57bd17 release-0.4.2 +0000000000000000000000000000000000000000 release-0.1 diff --git a/AUTHORS b/AUTHORS --- a/AUTHORS +++ b/AUTHORS @@ -1,2 +1,3 @@ This package has been mostly done by Armin Rigo with help from -Maciej Fijałkowski. \ No newline at end of file +Maciej Fijałkowski. The idea is heavily based (although not directly +copied) from LuaJIT ffi by Mike Pall. diff --git a/README.md b/README.md --- a/README.md +++ b/README.md @@ -14,3 +14,7 @@ ------- [Mailing list](https://groups.google.com/forum/#!forum/python-cffi) + +To run tests under CPython, run: + +python setup.py build_ext -i diff --git a/c/_cffi_backend.c b/c/_cffi_backend.c --- a/c/_cffi_backend.c +++ b/c/_cffi_backend.c @@ -91,6 +91,7 @@ #define CT_IS_LONGDOUBLE 65536 #define CT_IS_BOOL 131072 #define _CT_IS_FILE 262144 +#define CT_IS_VOID_PTR 524288 #define CT_PRIMITIVE_ANY (CT_PRIMITIVE_SIGNED | \ CT_PRIMITIVE_UNSIGNED | \ CT_PRIMITIVE_CHAR | \ @@ -143,11 +144,14 @@ static PyTypeObject CField_Type; static PyTypeObject CData_Type; static PyTypeObject CDataOwning_Type; +static PyTypeObject CDataOwningGC_Type; #define CTypeDescr_Check(ob) (Py_TYPE(ob) == &CTypeDescr_Type) #define CData_Check(ob) (Py_TYPE(ob) == &CData_Type || \ - Py_TYPE(ob) == &CDataOwning_Type) -#define CDataOwn_Check(ob) (Py_TYPE(ob) == &CDataOwning_Type) + Py_TYPE(ob) == &CDataOwning_Type || \ + Py_TYPE(ob) == &CDataOwningGC_Type) +#define CDataOwn_Check(ob) (Py_TYPE(ob) == &CDataOwning_Type || \ + Py_TYPE(ob) == &CDataOwningGC_Type) typedef union { unsigned char m_char; @@ -556,13 +560,6 @@ PyObject_Del(cf); } -static int -cfield_traverse(CFieldObject *cf, visitproc visit, void *arg) -{ - Py_VISIT(cf->cf_type); - return 0; -} - #undef OFF #define OFF(x) offsetof(CFieldObject, x) @@ -597,7 +594,7 @@ 0, /* tp_as_buffer */ Py_TPFLAGS_DEFAULT, /* tp_flags */ 0, /* tp_doc */ - (traverseproc)cfield_traverse, /* tp_traverse */ + 0, /* tp_traverse */ 0, /* tp_clear */ 0, /* tp_richcompare */ 0, /* tp_weaklistoffset */ @@ -1378,16 +1375,46 @@ PyObject_ClearWeakRefs((PyObject *) cd); Py_DECREF(cd->c_type); - PyObject_Del(cd); +#ifndef CFFI_MEM_LEAK /* never release anything, tests only */ + Py_TYPE(cd)->tp_free((PyObject *)cd); +#endif } static void cdataowning_dealloc(CDataObject *cd) { + assert(!(cd->c_type->ct_flags & (CT_IS_VOID_PTR | CT_FUNCTIONPTR))); + if (cd->c_type->ct_flags & CT_IS_PTR_TO_OWNED) { Py_DECREF(((CDataObject_own_structptr *)cd)->structobj); } - else if (cd->c_type->ct_flags & CT_FUNCTIONPTR) { - /* a callback */ +#if defined(CFFI_MEM_DEBUG) || defined(CFFI_MEM_LEAK) + if (cd->c_type->ct_flags & (CT_PRIMITIVE_ANY | CT_STRUCT | CT_UNION)) { + assert(cd->c_type->ct_size >= 0); + memset(cd->c_data, 0xDD, cd->c_type->ct_size); + } + else if (cd->c_type->ct_flags & CT_ARRAY) { + Py_ssize_t x = get_array_length(cd); + assert(x >= 0); + x *= cd->c_type->ct_itemdescr->ct_size; + assert(x >= 0); + memset(cd->c_data, 0xDD, x); + } +#endif + cdata_dealloc(cd); +} + +static void cdataowninggc_dealloc(CDataObject *cd) +{ + assert(!(cd->c_type->ct_flags & (CT_IS_PTR_TO_OWNED | + CT_PRIMITIVE_ANY | + CT_STRUCT | CT_UNION))); + PyObject_GC_UnTrack(cd); + + if (cd->c_type->ct_flags & CT_IS_VOID_PTR) { /* a handle */ + PyObject *x = (PyObject *)(cd->c_data + 42); + Py_DECREF(x); + } + else if (cd->c_type->ct_flags & CT_FUNCTIONPTR) { /* a callback */ ffi_closure *closure = (ffi_closure *)cd->c_data; PyObject *args = (PyObject *)(closure->user_data); Py_XDECREF(args); @@ -1396,9 +1423,34 @@ cdata_dealloc(cd); } -static int cdata_traverse(CDataObject *cd, visitproc visit, void *arg) +static int cdataowninggc_traverse(CDataObject *cd, visitproc visit, void *arg) { - Py_VISIT(cd->c_type); + if (cd->c_type->ct_flags & CT_IS_VOID_PTR) { /* a handle */ + PyObject *x = (PyObject *)(cd->c_data + 42); + Py_VISIT(x); + } + else if (cd->c_type->ct_flags & CT_FUNCTIONPTR) { /* a callback */ + ffi_closure *closure = (ffi_closure *)cd->c_data; + PyObject *args = (PyObject *)(closure->user_data); + Py_VISIT(args); + } + return 0; +} + +static int cdataowninggc_clear(CDataObject *cd) +{ + if (cd->c_type->ct_flags & CT_IS_VOID_PTR) { /* a handle */ + PyObject *x = (PyObject *)(cd->c_data + 42); + Py_INCREF(Py_None); + cd->c_data = ((char *)Py_None) - 42; + Py_DECREF(x); + } + else if (cd->c_type->ct_flags & CT_FUNCTIONPTR) { /* a callback */ + ffi_closure *closure = (ffi_closure *)cd->c_data; + PyObject *args = (PyObject *)(closure->user_data); + closure->user_data = NULL; + Py_XDECREF(args); + } return 0; } @@ -1487,11 +1539,25 @@ return result; } +static PyObject *_cdata_repr2(CDataObject *cd, char *text, PyObject *x) +{ + PyObject *res, *s = PyObject_Repr(x); + if (s == NULL) + return NULL; + res = PyText_FromFormat("", + cd->c_type->ct_name, text, PyText_AsUTF8(s)); + Py_DECREF(s); + return res; +} + static PyObject *cdataowning_repr(CDataObject *cd) { Py_ssize_t size; - if (cd->c_type->ct_flags & CT_POINTER) + if (cd->c_type->ct_flags & CT_POINTER) { + if (cd->c_type->ct_flags & CT_IS_VOID_PTR) + goto handle_repr; size = cd->c_type->ct_itemdescr->ct_size; + } else if (cd->c_type->ct_flags & CT_ARRAY) size = get_array_length(cd) * cd->c_type->ct_itemdescr->ct_size; else if (cd->c_type->ct_flags & CT_FUNCTIONPTR) @@ -1504,18 +1570,17 @@ callback_repr: { - PyObject *s, *res; PyObject *args = (PyObject *)((ffi_closure *)cd->c_data)->user_data; if (args == NULL) return cdata_repr(cd); - - s = PyObject_Repr(PyTuple_GET_ITEM(args, 1)); - if (s == NULL) - return NULL; - res = PyText_FromFormat("", - cd->c_type->ct_name, PyText_AsUTF8(s)); - Py_DECREF(s); - return res; + else + return _cdata_repr2(cd, "calling", PyTuple_GET_ITEM(args, 1)); + } + + handle_repr: + { + PyObject *x = (PyObject *)(cd->c_data + 42); + return _cdata_repr2(cd, "handle to", x); } } @@ -1635,10 +1700,7 @@ static long cdata_hash(CDataObject *cd) { - long h = _Py_HashPointer(cd->c_type) ^ _Py_HashPointer(cd->c_data); - if (h == -1) - h = -2; - return h; + return _Py_HashPointer(cd->c_data); } static Py_ssize_t @@ -2049,7 +2111,7 @@ return 0; PyErr_SetString(PyExc_TypeError, "bad argument type for 'FILE' type (note that you cannot " - "pass Python files directly any more since CFFI 0.6; see " + "pass Python files directly any more since CFFI 1.0; see " "demo/file1.py)"); return -1; } @@ -2080,8 +2142,12 @@ if ((ctptr->ct_flags & CT_CAST_ANYTHING) || ((ctitem->ct_flags & (CT_PRIMITIVE_SIGNED|CT_PRIMITIVE_UNSIGNED)) && (ctitem->ct_size == sizeof(char)))) { +#if defined(CFFI_MEM_DEBUG) || defined(CFFI_MEM_LEAK) + length = PyBytes_GET_SIZE(init) + 1; +#else *output_data = PyBytes_AS_STRING(init); return 0; +#endif } else goto convert_default; @@ -2367,7 +2433,7 @@ 0, /* tp_as_buffer */ Py_TPFLAGS_DEFAULT | Py_TPFLAGS_CHECKTYPES, /* tp_flags */ 0, /* tp_doc */ - (traverseproc)cdata_traverse, /* tp_traverse */ + 0, /* tp_traverse */ 0, /* tp_clear */ cdata_richcompare, /* tp_richcompare */ offsetof(CDataObject, c_weakreflist), /* tp_weaklistoffset */ @@ -2409,6 +2475,41 @@ &CData_Type, /* tp_base */ }; +static PyTypeObject CDataOwningGC_Type = { + PyVarObject_HEAD_INIT(NULL, 0) + "_cffi_backend.CDataOwnGC", + sizeof(CDataObject), + 0, + (destructor)cdataowninggc_dealloc, /* tp_dealloc */ + 0, /* tp_print */ + 0, /* tp_getattr */ + 0, /* tp_setattr */ + 0, /* tp_compare */ + 0, /* tp_repr */ + 0, /* tp_as_number */ + 0, /* tp_as_sequence */ + 0, /* tp_as_mapping */ + 0, /* tp_hash */ + 0, /* tp_call */ + 0, /* tp_str */ + 0, /* tp_getattro */ + 0, /* tp_setattro */ + 0, /* tp_as_buffer */ + Py_TPFLAGS_DEFAULT | Py_TPFLAGS_CHECKTYPES /* tp_flags */ + | Py_TPFLAGS_HAVE_GC, + 0, /* tp_doc */ + (traverseproc)cdataowninggc_traverse, /* tp_traverse */ + (inquiry)cdataowninggc_clear, /* tp_clear */ + 0, /* tp_richcompare */ + 0, /* tp_weaklistoffset */ + 0, /* tp_iter */ + 0, /* tp_iternext */ + 0, /* tp_methods */ + 0, /* tp_members */ + 0, /* tp_getset */ + &CDataOwning_Type, /* tp_base */ +}; + /************************************************************/ typedef struct { @@ -3228,6 +3329,8 @@ td->ct_flags = CT_POINTER; if (ctitem->ct_flags & (CT_STRUCT|CT_UNION)) td->ct_flags |= CT_IS_PTR_TO_OWNED; + if (ctitem->ct_flags & CT_VOID) + td->ct_flags |= CT_IS_VOID_PTR; if ((ctitem->ct_flags & CT_VOID) || ((ctitem->ct_flags & CT_PRIMITIVE_CHAR) && ctitem->ct_size == sizeof(char))) @@ -3311,22 +3414,18 @@ return (PyObject *)td; } -static PyObject *_b_struct_or_union_type(const char *kind, const char *name, - int flag) +static PyObject *_b_struct_or_union_type(const char *name, int flag) { - int kindlen = strlen(kind); int namelen = strlen(name); - CTypeDescrObject *td = ctypedescr_new(kindlen + 1 + namelen + 1); + CTypeDescrObject *td = ctypedescr_new(namelen + 1); if (td == NULL) return NULL; td->ct_size = -1; td->ct_length = -1; td->ct_flags = flag | CT_IS_OPAQUE; - memcpy(td->ct_name, kind, kindlen); - td->ct_name[kindlen] = ' '; - memcpy(td->ct_name + kindlen + 1, name, namelen + 1); - td->ct_name_position = kindlen + 1 + namelen; + memcpy(td->ct_name, name, namelen + 1); + td->ct_name_position = namelen; return (PyObject *)td; } @@ -3338,9 +3437,9 @@ return NULL; flag = CT_STRUCT; - if (strcmp(name, "_IO_FILE") == 0 || strcmp(name, "$FILE") == 0) + if (strcmp(name, "struct _IO_FILE") == 0 || strcmp(name, "FILE") == 0) flag |= _CT_IS_FILE; - return _b_struct_or_union_type("struct", name, flag); + return _b_struct_or_union_type(name, flag); } static PyObject *b_new_union_type(PyObject *self, PyObject *args) @@ -3348,7 +3447,7 @@ char *name; if (!PyArg_ParseTuple(args, "s:new_union_type", &name)) return NULL; - return _b_struct_or_union_type("union", name, CT_UNION); + return _b_struct_or_union_type(name, CT_UNION); } static CFieldObject * @@ -3384,20 +3483,33 @@ return cf; /* borrowed reference */ } +#define SF_MSVC_BITFIELDS 1 +#define SF_GCC_ARM_BITFIELDS 2 + static PyObject *b_complete_struct_or_union(PyObject *self, PyObject *args) { CTypeDescrObject *ct; PyObject *fields, *interned_fields, *ignored; int is_union, alignment; - Py_ssize_t offset, i, nb_fields, maxsize, prev_bit_position; + Py_ssize_t boffset, i, nb_fields, boffsetmax; Py_ssize_t totalsize = -1; int totalalignment = -1; - CFieldObject **previous, *prev_field; - - if (!PyArg_ParseTuple(args, "O!O!|Oni:complete_struct_or_union", + CFieldObject **previous; + int prev_bitfield_size, prev_bitfield_free; +#ifdef MS_WIN32 + int sflags = SF_MSVC_BITFIELDS; +#else +# ifdef __arm__ + int sflags = SF_GCC_ARM_BITFIELDS; +# else + int sflags = 0; +# endif +#endif + + if (!PyArg_ParseTuple(args, "O!O!|Onii:complete_struct_or_union", &CTypeDescr_Type, &ct, &PyList_Type, &fields, - &ignored, &totalsize, &totalalignment)) + &ignored, &totalsize, &totalalignment, &sflags)) return NULL; if ((ct->ct_flags & (CT_STRUCT|CT_IS_OPAQUE)) == @@ -3414,22 +3526,22 @@ return NULL; } - maxsize = 0; alignment = 1; - offset = 0; + boffset = 0; /* this number is in *bits*, not bytes! */ + boffsetmax = 0; /* the maximum value of boffset, in bits too */ + prev_bitfield_size = 0; + prev_bitfield_free = 0; nb_fields = PyList_GET_SIZE(fields); interned_fields = PyDict_New(); if (interned_fields == NULL) return NULL; previous = (CFieldObject **)&ct->ct_extra; - prev_bit_position = 0; - prev_field = NULL; for (i=0; i= 0) { + if (!(sflags & SF_MSVC_BITFIELDS)) { + /* GCC: anonymous bitfields (of any size) don't cause alignment */ + do_align = PyText_GetSize(fname) > 0; + } + else { + /* MSVC: zero-sized bitfields don't cause alignment */ + do_align = fbitsize > 0; + } + } + if (alignment < falign && do_align) alignment = falign; - /* align this field to its own 'falign' by inserting padding */ - offset = (offset + falign - 1) & ~(falign-1); - - if (foffset >= 0) { - /* a forced field position: ignore the offset just computed, - except to know if we must set CT_CUSTOM_FIELD_POS */ - if (offset != foffset) + if (fbitsize < 0) { + /* not a bitfield: common case */ + int bs_flag; + + if (ftype->ct_flags & CT_ARRAY && ftype->ct_length == 0) + bs_flag = BS_EMPTY_ARRAY; + else + bs_flag = BS_REGULAR; + + /* align this field to its own 'falign' by inserting padding */ + boffset = (boffset + falign*8-1) & ~(falign*8-1); /* bits! */ + + if (foffset >= 0) { + /* a forced field position: ignore the offset just computed, + except to know if we must set CT_CUSTOM_FIELD_POS */ + if (boffset != foffset * 8) + ct->ct_flags |= CT_CUSTOM_FIELD_POS; + boffset = foffset * 8; + } + + if (PyText_GetSize(fname) == 0 && + ftype->ct_flags & (CT_STRUCT|CT_UNION)) { + /* a nested anonymous struct or union */ + CFieldObject *cfsrc = (CFieldObject *)ftype->ct_extra; + for (; cfsrc != NULL; cfsrc = cfsrc->cf_next) { + /* broken complexity in the call to get_field_name(), + but we'll assume you never do that with nested + anonymous structures with thousand of fields */ + *previous = _add_field(interned_fields, + get_field_name(ftype, cfsrc), + cfsrc->cf_type, + boffset / 8 + cfsrc->cf_offset, + cfsrc->cf_bitshift, + cfsrc->cf_bitsize); + if (*previous == NULL) + goto error; + previous = &(*previous)->cf_next; + } + /* always forbid such structures from being passed by value */ ct->ct_flags |= CT_CUSTOM_FIELD_POS; - offset = foffset; - } - - if (fbitsize < 0 || (fbitsize == 8 * ftype->ct_size && - !(ftype->ct_flags & CT_PRIMITIVE_CHAR))) { - fbitsize = -1; - if (ftype->ct_flags & CT_ARRAY && ftype->ct_length == 0) - bitshift = BS_EMPTY_ARRAY; - else - bitshift = BS_REGULAR; - prev_bit_position = 0; - } - else { - if (!(ftype->ct_flags & (CT_PRIMITIVE_SIGNED | - CT_PRIMITIVE_UNSIGNED | - CT_PRIMITIVE_CHAR)) || -#ifdef HAVE_WCHAR_H - ((ftype->ct_flags & CT_PRIMITIVE_CHAR) - && ftype->ct_size == sizeof(wchar_t)) || -#endif - fbitsize == 0 || - fbitsize > 8 * ftype->ct_size) { - PyErr_Format(PyExc_TypeError, "invalid bit field '%s'", - PyText_AS_UTF8(fname)); - goto error; } - if (prev_bit_position > 0) { - assert(prev_field && prev_field->cf_bitshift >= 0); - if (prev_field->cf_type->ct_size != ftype->ct_size) { - PyErr_SetString(PyExc_NotImplementedError, - "consecutive bit fields should be " - "declared with a same-sized type"); - goto error; - } - else if (prev_bit_position + fbitsize > 8 * ftype->ct_size) { - prev_bit_position = 0; - } - else { - /* we can share the same field as 'prev_field' */ - offset = prev_field->cf_offset; - } - } - bitshift = prev_bit_position; - if (!is_union) - prev_bit_position += fbitsize; - } - - if (PyText_GetSize(fname) == 0 && - ftype->ct_flags & (CT_STRUCT|CT_UNION)) { - /* a nested anonymous struct or union */ - CFieldObject *cfsrc = (CFieldObject *)ftype->ct_extra; - for (; cfsrc != NULL; cfsrc = cfsrc->cf_next) { - /* broken complexity in the call to get_field_name(), - but we'll assume you never do that with nested - anonymous structures with thousand of fields */ - *previous = _add_field(interned_fields, - get_field_name(ftype, cfsrc), - cfsrc->cf_type, - offset + cfsrc->cf_offset, - cfsrc->cf_bitshift, - cfsrc->cf_bitsize); + else { + *previous = _add_field(interned_fields, fname, ftype, + boffset / 8, bs_flag, -1); if (*previous == NULL) goto error; previous = &(*previous)->cf_next; } - /* always forbid such structures from being passed by value */ - ct->ct_flags |= CT_CUSTOM_FIELD_POS; - prev_field = NULL; + boffset += ftype->ct_size * 8; + prev_bitfield_size = 0; } else { - prev_field = _add_field(interned_fields, fname, ftype, - offset, bitshift, fbitsize); - if (prev_field == NULL) + /* this is the case of a bitfield */ + Py_ssize_t field_offset_bytes; + int bits_already_occupied, bitshift; + + if (foffset >= 0) { + PyErr_Format(PyExc_TypeError, + "field '%s.%s' is a bitfield, " + "but a fixed offset is specified", + ct->ct_name, PyText_AS_UTF8(fname)); goto error; - *previous = prev_field; - previous = &prev_field->cf_next; + } + + if (!(ftype->ct_flags & (CT_PRIMITIVE_SIGNED | + CT_PRIMITIVE_UNSIGNED | + CT_PRIMITIVE_CHAR))) { + PyErr_Format(PyExc_TypeError, + "field '%s.%s' declared as '%s' cannot be a bit field", + ct->ct_name, PyText_AS_UTF8(fname), + ftype->ct_name); + goto error; + } + if (fbitsize > 8 * ftype->ct_size) { + PyErr_Format(PyExc_TypeError, + "bit field '%s.%s' is declared '%s:%d', which " + "exceeds the width of the type", + ct->ct_name, PyText_AS_UTF8(fname), + ftype->ct_name, fbitsize); + goto error; + } + + /* compute the starting position of the theoretical field + that covers a complete 'ftype', inside of which we will + locate the real bitfield */ + field_offset_bytes = boffset / 8; + field_offset_bytes &= ~(falign - 1); + + if (fbitsize == 0) { + if (PyText_GetSize(fname) > 0) { + PyErr_Format(PyExc_TypeError, + "field '%s.%s' is declared with :0", + ct->ct_name, PyText_AS_UTF8(fname)); + } + if (!(sflags & SF_MSVC_BITFIELDS)) { + /* GCC's notion of "ftype :0;" */ + + /* pad boffset to a value aligned for "ftype" */ + if (boffset > field_offset_bytes * 8) { + field_offset_bytes += falign; + assert(boffset < field_offset_bytes * 8); + } + boffset = field_offset_bytes * 8; + } + else { + /* MSVC's notion of "ftype :0;" */ + + /* Mostly ignored. It seems they only serve as + separator between other bitfields, to force them + into separate words. */ + } + prev_bitfield_size = 0; + } + else { + if (!(sflags & SF_MSVC_BITFIELDS)) { + /* GCC's algorithm */ + + /* Can the field start at the offset given by 'boffset'? It + can if it would entirely fit into an aligned ftype field. */ + bits_already_occupied = boffset - (field_offset_bytes * 8); + + if (bits_already_occupied + fbitsize > 8 * ftype->ct_size) { + /* it would not fit, we need to start at the next + allowed position */ + field_offset_bytes += falign; + assert(boffset < field_offset_bytes * 8); + boffset = field_offset_bytes * 8; + bitshift = 0; + } + else { + bitshift = bits_already_occupied; + assert(bitshift >= 0); + } + boffset += fbitsize; + } + else { + /* MSVC's algorithm */ + + /* A bitfield is considered as taking the full width + of their declared type. It can share some bits + with the previous field only if it was also a + bitfield and used a type of the same size. */ + if (prev_bitfield_size == ftype->ct_size && + prev_bitfield_free >= fbitsize) { + /* yes: reuse */ + bitshift = 8 * prev_bitfield_size - prev_bitfield_free; + } + else { + /* no: start a new full field */ + boffset = (boffset + falign*8-1) & ~(falign*8-1); /*align*/ + boffset += ftype->ct_size * 8; + bitshift = 0; + prev_bitfield_size = ftype->ct_size; + prev_bitfield_free = 8 * prev_bitfield_size; + } + prev_bitfield_free -= fbitsize; + field_offset_bytes = boffset / 8 - ftype->ct_size; + } + + *previous = _add_field(interned_fields, fname, ftype, + field_offset_bytes, bitshift, fbitsize); + if (*previous == NULL) + goto error; + previous = &(*previous)->cf_next; + } } - if (maxsize < ftype->ct_size) - maxsize = ftype->ct_size; - if (!is_union) - offset += ftype->ct_size; + if (boffset > boffsetmax) + boffsetmax = boffset; } *previous = NULL; - if (is_union) { - assert(offset == 0); - offset = maxsize; - } - /* Like C, if the size of this structure would be zero, we compute it as 1 instead. But for ctypes support, we allow the manually- specified totalsize to be zero in this case. */ + boffsetmax = (boffsetmax + 7) / 8; /* bits -> bytes */ if (totalsize < 0) { - offset = (offset + alignment - 1) & ~(alignment-1); - totalsize = (offset == 0 ? 1 : offset); - } - else if (totalsize < offset) { + totalsize = (boffsetmax + alignment - 1) & ~(alignment-1); + if (totalsize == 0) + totalsize = 1; + } + else if (totalsize < boffsetmax) { PyErr_Format(PyExc_TypeError, "%s cannot be of size %zd: there are fields at least " - "up to %zd", ct->ct_name, totalsize, offset); + "up to %zd", ct->ct_name, totalsize, boffsetmax); goto error; } ct->ct_size = totalsize; @@ -3676,19 +3881,6 @@ } assert(cf == NULL); -#ifdef USE_C_LIBFFI_MSVC - /* MSVC returns small structures in registers. Pretend int32 or - int64 return type. This is needed as a workaround for what - is really a bug of libffi_msvc seen as an independent library - (ctypes has a similar workaround). */ - if (is_result_type) { - if (ct->ct_size <= 4) - return &ffi_type_sint32; - if (ct->ct_size <= 8) - return &ffi_type_sint64; - } -#endif - /* next, allocate and fill the flattened list */ elements = fb_alloc(fb, (nflat + 1) * sizeof(ffi_type*)); nflat = 0; @@ -4204,13 +4396,14 @@ closure = cffi_closure_alloc(); - cd = PyObject_New(CDataObject, &CDataOwning_Type); + cd = PyObject_GC_New(CDataObject, &CDataOwningGC_Type); if (cd == NULL) goto error; Py_INCREF(ct); cd->c_type = ct; cd->c_data = (char *)closure; cd->c_weakreflist = NULL; + PyObject_GC_Track(cd); cif_descr = (cif_description_t *)ct->ct_extra; if (cif_descr == NULL) { @@ -4317,13 +4510,12 @@ Py_CLEAR(dict2); Py_CLEAR(dict1); - name_size = strlen("enum ") + strlen(ename) + 1; + name_size = strlen(ename) + 1; td = ctypedescr_new(name_size); if (td == NULL) goto error; - memcpy(td->ct_name, "enum ", strlen("enum ")); - memcpy(td->ct_name + strlen("enum "), ename, name_size - strlen("enum ")); + memcpy(td->ct_name, ename, name_size); td->ct_stuff = combined; td->ct_size = basetd->ct_size; td->ct_length = basetd->ct_length; /* alignment */ @@ -4627,6 +4819,58 @@ return Py_None; } +static PyObject *b_newp_handle(PyObject *self, PyObject *args) +{ + CTypeDescrObject *ct; + CDataObject *cd; + PyObject *x; + if (!PyArg_ParseTuple(args, "O!O", &CTypeDescr_Type, &ct, &x)) + return NULL; + + if (!(ct->ct_flags & CT_IS_VOID_PTR)) { + PyErr_Format(PyExc_TypeError, "needs 'void *', got '%s'", ct->ct_name); + return NULL; + } + + cd = (CDataObject *)PyObject_GC_New(CDataObject, &CDataOwningGC_Type); + if (cd == NULL) + return NULL; + Py_INCREF(ct); + cd->c_type = ct; + Py_INCREF(x); + cd->c_data = ((char *)x) - 42; + cd->c_weakreflist = NULL; + PyObject_GC_Track(cd); + return (PyObject *)cd; +} + +static PyObject *b_from_handle(PyObject *self, PyObject *arg) +{ + CTypeDescrObject *ct; + char *raw; + PyObject *x; + if (!CData_Check(arg)) { + PyErr_SetString(PyExc_TypeError, "expected a 'cdata' object"); + return NULL; + } + ct = ((CDataObject *)arg)->c_type; + raw = ((CDataObject *)arg)->c_data; + if (!(ct->ct_flags & CT_CAST_ANYTHING)) { + PyErr_Format(PyExc_TypeError, + "expected a 'cdata' object with a 'void *' out of " + "new_handle(), got '%s'", ct->ct_name); + return NULL; + } + if (!raw) { + PyErr_SetString(PyExc_RuntimeError, + "cannot use from_handle() on NULL pointer"); + return NULL; + } + x = (PyObject *)(raw + 42); + Py_INCREF(x); + return x; +} + static PyObject *b__get_types(PyObject *self, PyObject *noarg) { return PyTuple_Pack(2, (PyObject *)&CData_Type, @@ -4760,11 +5004,12 @@ return ptr->a1 + (int)ptr->a2; } -static long double _testfunc19(long double x) +static long double _testfunc19(long double x, int count) { int i; - for (i=0; i<28; i++) - x += x; + for (i=0; i"); + if (v == NULL || PyDict_SetItemString(CData_Type.tp_dict, + "__name__", v) < 0) + INITERROR; + v = PyCapsule_New((void *)cffi_exports, "cffi", NULL); if (v == NULL || PyModule_AddObject(m, "_C_API", v) < 0) INITERROR; - v = PyText_FromString("0.6"); + v = PyText_FromString("0.7"); if (v == NULL || PyModule_AddObject(m, "__version__", v) < 0) INITERROR; diff --git a/c/libffi_msvc/ffi.c b/c/libffi_msvc/ffi.c --- a/c/libffi_msvc/ffi.c +++ b/c/libffi_msvc/ffi.c @@ -46,7 +46,7 @@ register ffi_type **p_arg; argp = stack; - if (ecif->cif->rtype->type == FFI_TYPE_STRUCT) + if (ecif->cif->flags == FFI_TYPE_STRUCT) { *(void **) argp = ecif->rvalue; argp += sizeof(void *); @@ -102,6 +102,15 @@ FFI_ASSERT(0); } } +#ifdef _WIN64 + else if (z > 8) + { + /* On Win64, if a single argument takes more than 8 bytes, + then it is always passed by reference. */ + *(void **)argp = *p_argv; + z = 8; + } +#endif else { memcpy(argp, *p_argv, z); @@ -124,7 +133,6 @@ switch (cif->rtype->type) { case FFI_TYPE_VOID: - case FFI_TYPE_STRUCT: case FFI_TYPE_SINT64: case FFI_TYPE_FLOAT: case FFI_TYPE_DOUBLE: @@ -132,6 +140,18 @@ cif->flags = (unsigned) cif->rtype->type; break; + case FFI_TYPE_STRUCT: + /* MSVC returns small structures in registers. Put in cif->flags + the value FFI_TYPE_STRUCT only if the structure is big enough; + otherwise, put the 4- or 8-bytes integer type. */ + if (cif->rtype->size <= 4) + cif->flags = FFI_TYPE_INT; + else if (cif->rtype->size <= 8) + cif->flags = FFI_TYPE_SINT64; + else + cif->flags = FFI_TYPE_STRUCT; + break; + case FFI_TYPE_UINT64: #ifdef _WIN64 case FFI_TYPE_POINTER: @@ -180,7 +200,7 @@ /* value address then we need to make one */ if ((rvalue == NULL) && - (cif->rtype->type == FFI_TYPE_STRUCT)) + (cif->flags == FFI_TYPE_STRUCT)) { /*@-sysunrecog@*/ ecif.rvalue = alloca(cif->rtype->size); @@ -201,8 +221,7 @@ #else case FFI_SYSV: /*@-usedef@*/ - /* Function call needs at least 40 bytes stack size, on win64 AMD64 */ - return ffi_call_AMD64(ffi_prep_args, &ecif, cif->bytes ? cif->bytes : 40, + return ffi_call_AMD64(ffi_prep_args, &ecif, cif->bytes, cif->flags, ecif.rvalue, fn); /*@=usedef@*/ break; @@ -227,7 +246,7 @@ #else static void __fastcall #endif -ffi_closure_SYSV (ffi_closure *closure, int *argp) +ffi_closure_SYSV (ffi_closure *closure, char *argp) { // this is our return value storage long double res; @@ -237,7 +256,7 @@ void **arg_area; unsigned short rtype; void *resp = (void*)&res; - void *args = &argp[1]; + void *args = argp + sizeof(void *); cif = closure->cif; arg_area = (void**) alloca (cif->nargs * sizeof (void*)); @@ -338,7 +357,7 @@ argp = stack; - if ( cif->rtype->type == FFI_TYPE_STRUCT ) { + if ( cif->flags == FFI_TYPE_STRUCT ) { *rvalue = *(void **) argp; argp += 4; } @@ -358,6 +377,16 @@ /* because we're little endian, this is what it turns into. */ +#ifdef _WIN64 + if (z > 8) + { + /* On Win64, if a single argument takes more than 8 bytes, + then it is always passed by reference. */ + *p_argv = *((void**) argp); + z = 8; + } + else +#endif *p_argv = (void*) argp; p_argv++; diff --git a/c/libffi_msvc/prep_cif.c b/c/libffi_msvc/prep_cif.c --- a/c/libffi_msvc/prep_cif.c +++ b/c/libffi_msvc/prep_cif.c @@ -116,9 +116,9 @@ #if !defined M68K && !defined __x86_64__ && !defined S390 /* Make space for the return structure pointer */ if (cif->rtype->type == FFI_TYPE_STRUCT - /* MSVC returns small structures in registers. But we have a different - workaround: pretend int32 or int64 return type, and converting to - structure afterwards. */ +#ifdef _WIN32 + && (cif->rtype->size > 8) /* MSVC returns small structs in registers */ +#endif #ifdef SPARC && (cif->abi != FFI_V9 || cif->rtype->size > 32) #endif @@ -168,6 +168,12 @@ #endif } +#ifdef _WIN64 + /* Function call needs at least 40 bytes stack size, on win64 AMD64 */ + if (bytes < 40) + bytes = 40; +#endif + cif->bytes = bytes; /* Perform machine dependent cif processing */ diff --git a/c/libffi_msvc/types.c b/c/libffi_msvc/types.c --- a/c/libffi_msvc/types.c +++ b/c/libffi_msvc/types.c @@ -43,7 +43,7 @@ FFI_INTEGRAL_TYPEDEF(float, 4, 4, FFI_TYPE_FLOAT); #if defined ALPHA || defined SPARC64 || defined X86_64 || defined S390X \ - || defined IA64 + || defined IA64 || defined _WIN64 FFI_INTEGRAL_TYPEDEF(pointer, 8, 8, FFI_TYPE_POINTER); diff --git a/c/libffi_msvc/win64.obj b/c/libffi_msvc/win64.obj new file mode 100644 index 0000000000000000000000000000000000000000..38d3cd166b0ecad62ea4d9aab86cc574de0c9fe7 GIT binary patch [cut] diff --git a/c/test_c.py b/c/test_c.py --- a/c/test_c.py +++ b/c/test_c.py @@ -376,8 +376,9 @@ BInt = new_primitive_type("int") BFloat = new_primitive_type("float") for i in range(1, 20): - if (hash(cast(BChar, chr(i))) != - hash(cast(BInt, i))): + x1 = cast(BChar, chr(i)) + x2 = cast(BInt, i) + if hash(x1) != hash(x2): break else: raise AssertionError("hashes are equal") @@ -651,6 +652,8 @@ def test_new_struct_type(): BStruct = new_struct_type("foo") + assert repr(BStruct) == "" + BStruct = new_struct_type("struct foo") assert repr(BStruct) == "" BPtr = new_pointer_type(BStruct) assert repr(BPtr) == "" @@ -658,7 +661,7 @@ py.test.raises(ValueError, alignof, BStruct) def test_new_union_type(): - BUnion = new_union_type("foo") + BUnion = new_union_type("union foo") assert repr(BUnion) == "" BPtr = new_pointer_type(BUnion) assert repr(BPtr) == "" @@ -667,7 +670,7 @@ BLong = new_primitive_type("long") BChar = new_primitive_type("char") BShort = new_primitive_type("short") - BStruct = new_struct_type("foo") + BStruct = new_struct_type("struct foo") assert BStruct.kind == "struct" assert BStruct.cname == "struct foo" assert BStruct.fields is None @@ -699,7 +702,7 @@ def test_complete_union(): BLong = new_primitive_type("long") BChar = new_primitive_type("char") - BUnion = new_union_type("foo") + BUnion = new_union_type("union foo") assert BUnion.kind == "union" assert BUnion.cname == "union foo" assert BUnion.fields is None @@ -718,7 +721,7 @@ def test_struct_instance(): BInt = new_primitive_type("int") - BStruct = new_struct_type("foo") + BStruct = new_struct_type("struct foo") BStructPtr = new_pointer_type(BStruct) p = cast(BStructPtr, 0) py.test.raises(AttributeError, "p.a1") # opaque @@ -738,7 +741,7 @@ def test_union_instance(): BInt = new_primitive_type("int") BUInt = new_primitive_type("unsigned int") - BUnion = new_union_type("bar") + BUnion = new_union_type("union bar") complete_struct_or_union(BUnion, [('a1', BInt, -1), ('a2', BUInt, -1)]) p = newp(new_pointer_type(BUnion), [-42]) bigval = -42 + (1 << (8*size_of_int())) @@ -754,7 +757,7 @@ def test_struct_pointer(): BInt = new_primitive_type("int") - BStruct = new_struct_type("foo") + BStruct = new_struct_type("struct foo") BStructPtr = new_pointer_type(BStruct) complete_struct_or_union(BStruct, [('a1', BInt, -1), ('a2', BInt, -1)]) @@ -768,7 +771,7 @@ BVoidP = new_pointer_type(new_void_type()) BInt = new_primitive_type("int") BIntPtr = new_pointer_type(BInt) - BStruct = new_struct_type("foo") + BStruct = new_struct_type("struct foo") BStructPtr = new_pointer_type(BStruct) complete_struct_or_union(BStruct, [('a1', BInt, -1), ('a2', BInt, -1), @@ -802,7 +805,7 @@ def test_array_in_struct(): BInt = new_primitive_type("int") - BStruct = new_struct_type("foo") + BStruct = new_struct_type("struct foo") BArrayInt5 = new_array_type(new_pointer_type(BInt), 5) complete_struct_or_union(BStruct, [('a1', BArrayInt5, -1)]) s = newp(new_pointer_type(BStruct), [[20, 24, 27, 29, 30]]) @@ -813,7 +816,7 @@ def offsetof(BType, fieldname): return typeoffsetof(BType, fieldname)[1] BInt = new_primitive_type("int") - BStruct = new_struct_type("foo") + BStruct = new_struct_type("struct foo") py.test.raises(TypeError, offsetof, BInt, "abc") py.test.raises(TypeError, offsetof, BStruct, "abc") complete_struct_or_union(BStruct, [('abc', BInt, -1), ('def', BInt, -1)]) @@ -842,7 +845,7 @@ def test_function_type_taking_struct(): BChar = new_primitive_type("char") BShort = new_primitive_type("short") - BStruct = new_struct_type("foo") + BStruct = new_struct_type("struct foo") complete_struct_or_union(BStruct, [('a1', BChar, -1), ('a2', BShort, -1)]) BFunc = new_function_type((BStruct,), BShort, False) @@ -947,7 +950,7 @@ def test_call_function_7(): BChar = new_primitive_type("char") BShort = new_primitive_type("short") - BStruct = new_struct_type("foo") + BStruct = new_struct_type("struct foo") BStructPtr = new_pointer_type(BStruct) complete_struct_or_union(BStruct, [('a1', BChar, -1), ('a2', BShort, -1)]) @@ -963,7 +966,7 @@ def test_call_function_20(): BChar = new_primitive_type("char") BShort = new_primitive_type("short") - BStruct = new_struct_type("foo") + BStruct = new_struct_type("struct foo") BStructPtr = new_pointer_type(BStruct) complete_struct_or_union(BStruct, [('a1', BChar, -1), ('a2', BShort, -1)]) @@ -975,7 +978,7 @@ def test_call_function_21(): BInt = new_primitive_type("int") - BStruct = new_struct_type("foo") + BStruct = new_struct_type("struct foo") complete_struct_or_union(BStruct, [('a', BInt, -1), ('b', BInt, -1), ('c', BInt, -1), @@ -995,7 +998,7 @@ def test_call_function_22(): BInt = new_primitive_type("int") BArray10 = new_array_type(new_pointer_type(BInt), 10) - BStruct = new_struct_type("foo") + BStruct = new_struct_type("struct foo") BStructP = new_pointer_type(BStruct) complete_struct_or_union(BStruct, [('a', BArray10, -1)]) BFunc22 = new_function_type((BStruct, BStruct), BStruct, False) @@ -1033,7 +1036,7 @@ def test_cannot_pass_struct_with_array_of_length_0(): BInt = new_primitive_type("int") BArray0 = new_array_type(new_pointer_type(BInt), 0) - BStruct = new_struct_type("foo") + BStruct = new_struct_type("struct foo") complete_struct_or_union(BStruct, [('a', BArray0)]) py.test.raises(NotImplementedError, new_function_type, (BStruct,), BInt, False) @@ -1058,7 +1061,7 @@ def test_cannot_call_with_a_autocompleted_struct(): BSChar = new_primitive_type("signed char") BDouble = new_primitive_type("double") - BStruct = new_struct_type("foo") + BStruct = new_struct_type("struct foo") BStructPtr = new_pointer_type(BStruct) complete_struct_or_union(BStruct, [('c', BDouble, -1, 8), ('a', BSChar, -1, 2), @@ -1227,11 +1230,59 @@ for i, f in enumerate(flist): assert f(-142) == -142 + i +def test_callback_receiving_tiny_struct(): + BSChar = new_primitive_type("signed char") + BInt = new_primitive_type("int") + BStruct = new_struct_type("struct foo") + BStructPtr = new_pointer_type(BStruct) + complete_struct_or_union(BStruct, [('a', BSChar, -1), + ('b', BSChar, -1)]) + def cb(s): + return s.a + 10 * s.b + BFunc = new_function_type((BStruct,), BInt) + f = callback(BFunc, cb) + p = newp(BStructPtr, [-2, -4]) + n = f(p[0]) + assert n == -42 + +def test_callback_returning_tiny_struct(): + BSChar = new_primitive_type("signed char") + BInt = new_primitive_type("int") + BStruct = new_struct_type("struct foo") + BStructPtr = new_pointer_type(BStruct) + complete_struct_or_union(BStruct, [('a', BSChar, -1), + ('b', BSChar, -1)]) + def cb(n): + return newp(BStructPtr, [-n, -3*n])[0] + BFunc = new_function_type((BInt,), BStruct) + f = callback(BFunc, cb) + s = f(10) + assert typeof(s) is BStruct + assert repr(s) == "" + assert s.a == -10 + assert s.b == -30 + +def test_callback_receiving_struct(): + BSChar = new_primitive_type("signed char") + BInt = new_primitive_type("int") + BDouble = new_primitive_type("double") + BStruct = new_struct_type("struct foo") + BStructPtr = new_pointer_type(BStruct) + complete_struct_or_union(BStruct, [('a', BSChar, -1), + ('b', BDouble, -1)]) + def cb(s): + return s.a + int(s.b) + BFunc = new_function_type((BStruct,), BInt) + f = callback(BFunc, cb) + p = newp(BStructPtr, [-2, 44.444]) + n = f(p[0]) + assert n == 42 + def test_callback_returning_struct(): BSChar = new_primitive_type("signed char") BInt = new_primitive_type("int") BDouble = new_primitive_type("double") - BStruct = new_struct_type("foo") + BStruct = new_struct_type("struct foo") BStructPtr = new_pointer_type(BStruct) complete_struct_or_union(BStruct, [('a', BSChar, -1), ('b', BDouble, -1)]) @@ -1246,9 +1297,33 @@ assert s.a == -10 assert s.b == 1E-42 +def test_callback_receiving_big_struct(): + BInt = new_primitive_type("int") + BStruct = new_struct_type("struct foo") + BStructPtr = new_pointer_type(BStruct) + complete_struct_or_union(BStruct, [('a', BInt, -1), + ('b', BInt, -1), + ('c', BInt, -1), + ('d', BInt, -1), + ('e', BInt, -1), + ('f', BInt, -1), + ('g', BInt, -1), + ('h', BInt, -1), + ('i', BInt, -1), + ('j', BInt, -1)]) + def cb(s): + for i, name in enumerate("abcdefghij"): + assert getattr(s, name) == 13 - i + return 42 + BFunc = new_function_type((BStruct,), BInt) + f = callback(BFunc, cb) + p = newp(BStructPtr, list(range(13, 3, -1))) + n = f(p[0]) + assert n == 42 + def test_callback_returning_big_struct(): BInt = new_primitive_type("int") - BStruct = new_struct_type("foo") + BStruct = new_struct_type("struct foo") BStructPtr = new_pointer_type(BStruct) complete_struct_or_union(BStruct, [('a', BInt, -1), ('b', BInt, -1), @@ -1285,26 +1360,27 @@ def test_enum_type(): BUInt = new_primitive_type("unsigned int") BEnum = new_enum_type("foo", (), (), BUInt) - assert repr(BEnum) == "" + assert repr(BEnum) == "" assert BEnum.kind == "enum" - assert BEnum.cname == "enum foo" + assert BEnum.cname == "foo" assert BEnum.elements == {} # BInt = new_primitive_type("int") - BEnum = new_enum_type("foo", ('def', 'c', 'ab'), (0, 1, -20), BInt) + BEnum = new_enum_type("enum foo", ('def', 'c', 'ab'), (0, 1, -20), BInt) assert BEnum.kind == "enum" + assert BEnum.cname == "enum foo" assert BEnum.elements == {-20: 'ab', 0: 'def', 1: 'c'} # 'elements' is not the real dict, but merely a copy BEnum.elements[2] = '??' assert BEnum.elements == {-20: 'ab', 0: 'def', 1: 'c'} # - BEnum = new_enum_type("bar", ('ab', 'cd'), (5, 5), BUInt) + BEnum = new_enum_type("enum bar", ('ab', 'cd'), (5, 5), BUInt) assert BEnum.elements == {5: 'ab'} assert BEnum.relements == {'ab': 5, 'cd': 5} def test_cast_to_enum(): BInt = new_primitive_type("int") - BEnum = new_enum_type("foo", ('def', 'c', 'ab'), (0, 1, -20), BInt) + BEnum = new_enum_type("enum foo", ('def', 'c', 'ab'), (0, 1, -20), BInt) assert sizeof(BEnum) == sizeof(BInt) e = cast(BEnum, 0) assert repr(e) == "" @@ -1318,27 +1394,27 @@ assert string(cast(BEnum, -242 + 2**128)) == '-242' # BUInt = new_primitive_type("unsigned int") - BEnum = new_enum_type("bar", ('def', 'c', 'ab'), (0, 1, 20), BUInt) + BEnum = new_enum_type("enum bar", ('def', 'c', 'ab'), (0, 1, 20), BUInt) e = cast(BEnum, -1) assert repr(e) == "" # unsigned int # BLong = new_primitive_type("long") - BEnum = new_enum_type("baz", (), (), BLong) + BEnum = new_enum_type("enum baz", (), (), BLong) assert sizeof(BEnum) == sizeof(BLong) e = cast(BEnum, -1) assert repr(e) == "" def test_enum_with_non_injective_mapping(): BInt = new_primitive_type("int") - BEnum = new_enum_type("foo", ('ab', 'cd'), (7, 7), BInt) + BEnum = new_enum_type("enum foo", ('ab', 'cd'), (7, 7), BInt) e = cast(BEnum, 7) assert repr(e) == "" assert string(e) == 'ab' def test_enum_in_struct(): BInt = new_primitive_type("int") - BEnum = new_enum_type("foo", ('def', 'c', 'ab'), (0, 1, -20), BInt) - BStruct = new_struct_type("bar") + BEnum = new_enum_type("enum foo", ('def', 'c', 'ab'), (0, 1, -20), BInt) + BStruct = new_struct_type("struct bar") BStructPtr = new_pointer_type(BStruct) complete_struct_or_union(BStruct, [('a1', BEnum, -1)]) p = newp(BStructPtr, [-20]) @@ -1442,7 +1518,7 @@ def test_struct_with_bitfields(): BLong = new_primitive_type("long") - BStruct = new_struct_type("foo") + BStruct = new_struct_type("struct foo") LONGBITS = 8 * sizeof(BLong) complete_struct_or_union(BStruct, [('a1', BLong, 1), ('a2', BLong, 2), @@ -1465,7 +1541,7 @@ def test_bitfield_instance(): BInt = new_primitive_type("int") BUnsignedInt = new_primitive_type("unsigned int") - BStruct = new_struct_type("foo") + BStruct = new_struct_type("struct foo") complete_struct_or_union(BStruct, [('a1', BInt, 1), ('a2', BUnsignedInt, 2), ('a3', BInt, 3)]) @@ -1495,14 +1571,14 @@ def test_bitfield_instance_init(): BInt = new_primitive_type("int") - BStruct = new_struct_type("foo") + BStruct = new_struct_type("struct foo") complete_struct_or_union(BStruct, [('a1', BInt, 1)]) p = newp(new_pointer_type(BStruct), [-1]) assert p.a1 == -1 p = newp(new_pointer_type(BStruct), {'a1': -1}) assert p.a1 == -1 # - BUnion = new_union_type("bar") + BUnion = new_union_type("union bar") complete_struct_or_union(BUnion, [('a1', BInt, 1)]) p = newp(new_pointer_type(BUnion), [-1]) assert p.a1 == -1 @@ -1571,7 +1647,7 @@ py.test.raises(IndexError, newp, BArray, tuple(b'123456')) py.test.raises(IndexError, newp, BArray, list(b'123456')) py.test.raises(IndexError, newp, BArray, b'123456') - BStruct = new_struct_type("foo") + BStruct = new_struct_type("struct foo") complete_struct_or_union(BStruct, []) py.test.raises(TypeError, newp, new_pointer_type(BStruct), b'') py.test.raises(ValueError, newp, new_pointer_type(BStruct), [b'1']) @@ -1611,7 +1687,7 @@ p = newp(new_pointer_type(BFloat), cast(BFloat, 12.25)) assert p[0] == 12.25 # - BStruct = new_struct_type("foo_s") + BStruct = new_struct_type("struct foo_s") BStructPtr = new_pointer_type(BStruct) complete_struct_or_union(BStruct, [('a1', BInt, -1)]) s1 = newp(BStructPtr, [42]) @@ -1629,7 +1705,7 @@ s2 = newp(BStructPtr, s1[0]) assert s2.a1 == 42 # - BUnion = new_union_type("foo_u") + BUnion = new_union_type("union foo_u") BUnionPtr = new_pointer_type(BUnion) complete_struct_or_union(BUnion, [('a1', BInt, -1)]) u1 = newp(BUnionPtr, [42]) @@ -1707,7 +1783,7 @@ BChar = new_primitive_type("char") BCharP = new_pointer_type(BChar) BCharArray10 = new_array_type(BCharP, 10) - BStruct = new_struct_type("foo") + BStruct = new_struct_type("struct foo") BStructPtr = new_pointer_type(BStruct) complete_struct_or_union(BStruct, [('a1', BCharArray10, -1)]) p = newp(BStructPtr, None) @@ -1724,7 +1800,7 @@ new_function_type((), BFunc) # works new_function_type((), new_primitive_type("int")) new_function_type((), new_pointer_type(BFunc)) - BUnion = new_union_type("foo_u") + BUnion = new_union_type("union foo_u") complete_struct_or_union(BUnion, []) py.test.raises(NotImplementedError, new_function_type, (), BUnion) py.test.raises(TypeError, new_function_type, (), BArray) @@ -1735,7 +1811,7 @@ BFloat = new_primitive_type("float") BDouble = new_primitive_type("double") BInt = new_primitive_type("int") - BStruct = new_struct_type("foo_s") + BStruct = new_struct_type("struct foo_s") complete_struct_or_union(BStruct, [('a1', BChar, -1), ('a2', BShort, -1)]) BFunc10 = new_function_type((BInt,), BStruct) @@ -1745,7 +1821,7 @@ assert s.a1 == bytechr(40) assert s.a2 == 40 * 40 # - BStruct11 = new_struct_type("test11") + BStruct11 = new_struct_type("struct test11") complete_struct_or_union(BStruct11, [('a1', BInt, -1), ('a2', BInt, -1)]) BFunc11 = new_function_type((BInt,), BStruct11) @@ -1755,7 +1831,7 @@ assert s.a1 == 40 assert s.a2 == 40 * 40 # - BStruct12 = new_struct_type("test12") + BStruct12 = new_struct_type("struct test12") complete_struct_or_union(BStruct12, [('a1', BDouble, -1), ]) BFunc12 = new_function_type((BInt,), BStruct12) @@ -1764,7 +1840,7 @@ assert repr(s) == "" assert s.a1 == 40.0 # - BStruct13 = new_struct_type("test13") + BStruct13 = new_struct_type("struct test13") complete_struct_or_union(BStruct13, [('a1', BInt, -1), ('a2', BInt, -1), ('a3', BInt, -1)]) @@ -1776,7 +1852,7 @@ assert s.a2 == 40 * 40 assert s.a3 == 40 * 40 * 40 # - BStruct14 = new_struct_type("test14") + BStruct14 = new_struct_type("struct test14") complete_struct_or_union(BStruct14, [('a1', BFloat, -1), ]) BFunc14 = new_function_type((BInt,), BStruct14) @@ -1785,7 +1861,7 @@ assert repr(s) == "" assert s.a1 == 40.0 # - BStruct15 = new_struct_type("test15") + BStruct15 = new_struct_type("struct test15") complete_struct_or_union(BStruct15, [('a1', BFloat, -1), ('a2', BInt, -1)]) BFunc15 = new_function_type((BInt,), BStruct15) @@ -1795,7 +1871,7 @@ assert s.a1 == 40.0 assert s.a2 == 40 * 40 # - BStruct16 = new_struct_type("test16") + BStruct16 = new_struct_type("struct test16") complete_struct_or_union(BStruct16, [('a1', BFloat, -1), ('a2', BFloat, -1)]) BFunc16 = new_function_type((BInt,), BStruct16) @@ -1805,7 +1881,7 @@ assert s.a1 == 40.0 assert s.a2 == -40.0 # - BStruct17 = new_struct_type("test17") + BStruct17 = new_struct_type("struct test17") complete_struct_or_union(BStruct17, [('a1', BInt, -1), ('a2', BFloat, -1)]) BFunc17 = new_function_type((BInt,), BStruct17) @@ -1828,7 +1904,7 @@ BFunc2 = new_function_type((), new_primitive_type("short")) BCharP = new_pointer_type(new_primitive_type("char")) BIntP = new_pointer_type(new_primitive_type("int")) - BStruct = new_struct_type("foo") + BStruct = new_struct_type("struct foo") BStructPtr = new_pointer_type(BStruct) complete_struct_or_union(BStruct, [('a1', BFunc, -1)]) newp(BStructPtr, [cast(BFunc, 0)]) @@ -1855,7 +1931,7 @@ assert not pyuni4 # BWCharP = new_pointer_type(BWChar) - BStruct = new_struct_type("foo_s") + BStruct = new_struct_type("struct foo_s") BStructPtr = new_pointer_type(BStruct) complete_struct_or_union(BStruct, [('a1', BWChar, -1), ('a2', BWCharP, -1)]) @@ -1967,7 +2043,7 @@ # exception to the no-keepalive rule: p=newp(BStructPtr) returns a # pointer owning the memory, and p[0] returns a pointer to the # struct that *also* owns the memory - BStruct = new_struct_type("foo") + BStruct = new_struct_type("struct foo") BStructPtr = new_pointer_type(BStruct) complete_struct_or_union(BStruct, [('a1', new_primitive_type("int"), -1), ('a2', new_primitive_type("int"), -1), @@ -1987,7 +2063,7 @@ assert q.a1 == 123456 def test_nokeepalive_struct(): - BStruct = new_struct_type("foo") + BStruct = new_struct_type("struct foo") BStructPtr = new_pointer_type(BStruct) BStructPtrPtr = new_pointer_type(BStructPtr) complete_struct_or_union(BStruct, [('a1', new_primitive_type("int"), -1)]) @@ -2115,6 +2191,9 @@ py.test.raises(ValueError, 'buf[:] = b"this is much too long!"') buf[4:2] = b"" # no effect, but should work assert buf[:] == b"hi there\x00" + buf[:2] = b"HI" + assert buf[:] == b"HI there\x00" + buf[:2] = b"hi" expected = list(map(bitem2bchr, b"hi there\x00")) x = 0 for i in range(-12, 12): @@ -2147,6 +2226,7 @@ def test_errno_callback(): if globals().get('PY_DOT_PY') == '2.5': py.test.skip("cannot run this test on py.py with Python 2.5") + set_errno(95) def cb(): e = get_errno() set_errno(e - 6) @@ -2170,7 +2250,7 @@ assert repr(x) == "" def test_cast_invalid(): - BStruct = new_struct_type("foo") + BStruct = new_struct_type("struct foo") complete_struct_or_union(BStruct, []) p = cast(new_pointer_type(BStruct), 123456) s = p[0] @@ -2189,7 +2269,7 @@ def test_bug_delattr(): BLong = new_primitive_type("long") - BStruct = new_struct_type("foo") + BStruct = new_struct_type("struct foo") complete_struct_or_union(BStruct, [('a1', BLong, -1)]) x = newp(new_pointer_type(BStruct)) py.test.raises(AttributeError, "del x.a1") @@ -2198,7 +2278,7 @@ py.test.skip("later") BLong = new_primitive_type("long") BArray = new_array_type(new_pointer_type(BLong), None) - BStruct = new_struct_type("foo") + BStruct = new_struct_type("struct foo") BStructP = new_pointer_type(BStruct) complete_struct_or_union(BStruct, [('a1', BLong, -1), ('a2', BArray, -1)]) @@ -2273,6 +2353,7 @@ def test_longdouble(): py_py = 'PY_DOT_PY' in globals() + BInt = new_primitive_type("int") BLongDouble = new_primitive_type("long double") BLongDoublePtr = new_pointer_type(BLongDouble) BLongDoubleArray = new_array_type(BLongDoublePtr, None) @@ -2290,28 +2371,30 @@ assert float(x) == 1.23 assert int(x) == 1 # - BFunc19 = new_function_type((BLongDouble,), BLongDouble) + BFunc19 = new_function_type((BLongDouble, BInt), BLongDouble) f = cast(BFunc19, _testfunc(19)) - start = 8 + start = lstart = 1.5 for i in range(107): - start = f(start) - if sizeof(BLongDouble) > sizeof(new_primitive_type("double")): - if not py_py: - assert repr(start).startswith("") - # - c = newp(BLongDoubleArray, [start]) - x = c[0] - if not py_py: - assert repr(x).endswith("E+902>") - assert float(x) == float("inf") + start = 4 * start - start * start + lstart = f(lstart, 1) + lother = f(1.5, 107) + if not py_py: + assert float(lstart) == float(lother) + assert repr(lstart) == repr(lother) + if sizeof(BLongDouble) > sizeof(new_primitive_type("double")): + assert float(lstart) != start + assert repr(lstart).startswith("' + assert hasattr(x, '__doc__') + +def test_different_types_of_ptr_equality(): + BVoidP = new_pointer_type(new_void_type()) + BIntP = new_pointer_type(new_primitive_type("int")) + x = cast(BVoidP, 12345) + assert x == cast(BIntP, 12345) + assert x != cast(BIntP, 12344) + assert hash(x) == hash(cast(BIntP, 12345)) + +def test_new_handle(): + import _weakref + BVoidP = new_pointer_type(new_void_type()) + BCharP = new_pointer_type(new_primitive_type("char")) + class mylist(list): + pass + o = mylist([2, 3, 4]) + x = newp_handle(BVoidP, o) + assert repr(x) == "" + assert x + assert from_handle(x) is o + assert from_handle(cast(BCharP, x)) is o + wr = _weakref.ref(o) + del o + import gc; gc.collect() + assert wr() is not None + assert from_handle(x) == list((2, 3, 4)) + assert from_handle(cast(BCharP, x)) == list((2, 3, 4)) + del x + for i in range(3): + if wr() is not None: + import gc; gc.collect() + assert wr() is None + py.test.raises(RuntimeError, from_handle, cast(BCharP, 0)) + +def test_new_handle_cycle(): + import _weakref + BVoidP = new_pointer_type(new_void_type()) + class A(object): + pass + o = A() + o.cycle = newp_handle(BVoidP, o) + wr = _weakref.ref(o) + del o + for i in range(3): + if wr() is not None: + import gc; gc.collect() + assert wr() is None + +def _test_bitfield_details(flag): + BChar = new_primitive_type("char") + BShort = new_primitive_type("short") + BInt = new_primitive_type("int") + BUInt = new_primitive_type("unsigned int") + BStruct = new_struct_type("struct foo1") + complete_struct_or_union(BStruct, [('a', BChar, -1), + ('b1', BInt, 9), + ('b2', BUInt, 7), + ('c', BChar, -1)], -1, -1, -1, flag) + if flag % 2 == 0: # gcc and gcc ARM + assert typeoffsetof(BStruct, 'c') == (BChar, 3) + assert sizeof(BStruct) == 4 + else: # msvc + assert typeoffsetof(BStruct, 'c') == (BChar, 8) + assert sizeof(BStruct) == 12 + assert alignof(BStruct) == 4 + # + BStruct = new_struct_type("struct foo2") + complete_struct_or_union(BStruct, [('a', BChar, -1), + ('', BShort, 9), + ('c', BChar, -1)], -1, -1, -1, flag) + assert typeoffsetof(BStruct, 'c') == (BChar, 4) + if flag == 0: # gcc + assert sizeof(BStruct) == 5 + assert alignof(BStruct) == 1 + elif flag == 1: # msvc + assert sizeof(BStruct) == 6 + assert alignof(BStruct) == 2 + else: # gcc ARM + assert sizeof(BStruct) == 6 + assert alignof(BStruct) == 2 + # + BStruct = new_struct_type("struct foo2") + complete_struct_or_union(BStruct, [('a', BChar, -1), + ('', BInt, 0), + ('', BInt, 0), + ('c', BChar, -1)], -1, -1, -1, flag) + if flag == 0: # gcc + assert typeoffsetof(BStruct, 'c') == (BChar, 4) + assert sizeof(BStruct) == 5 + assert alignof(BStruct) == 1 + elif flag == 1: # msvc + assert typeoffsetof(BStruct, 'c') == (BChar, 1) + assert sizeof(BStruct) == 2 + assert alignof(BStruct) == 1 + else: # gcc ARM + assert typeoffsetof(BStruct, 'c') == (BChar, 4) + assert sizeof(BStruct) == 8 + assert alignof(BStruct) == 4 + + +def test_bitfield_as_gcc(): + _test_bitfield_details(flag=0) + +def test_bitfield_as_msvc(): + _test_bitfield_details(flag=1) + +def test_bitfield_as_arm_gcc(): + _test_bitfield_details(flag=2) + def test_version(): # this test is here mostly for PyPy - assert __version__ == "0.6" + assert __version__ == "0.7" diff --git a/cffi/__init__.py b/cffi/__init__.py --- a/cffi/__init__.py +++ b/cffi/__init__.py @@ -4,5 +4,5 @@ from .api import FFI, CDefError, FFIError from .ffiplatform import VerificationError, VerificationMissing -__version__ = "0.6" -__version_info__ = (0, 6) +__version__ = "0.7.2" +__version_info__ = (0, 7, 2) diff --git a/cffi/api.py b/cffi/api.py --- a/cffi/api.py +++ b/cffi/api.py @@ -53,6 +53,9 @@ # You need PyPy (>= 2.0 beta), or a CPython (>= 2.6) with # _cffi_backend.so compiled. import _cffi_backend as backend + from . import __version__ + assert (backend.__version__ == __version__ or + backend.__version__ == __version__[:3]) # (If you insist you can also try to pass the option # 'backend=backend_ctypes.CTypesBackend()', but don't # rely on it! It's probably not going to work well.) @@ -71,15 +74,15 @@ if name.startswith('RTLD_'): setattr(self, name, getattr(backend, name)) # - BVoidP = self._get_cached_btype(model.voidp_type) + self.BVoidP = self._get_cached_btype(model.voidp_type) if isinstance(backend, types.ModuleType): # _cffi_backend: attach these constants to the class if not hasattr(FFI, 'NULL'): - FFI.NULL = self.cast(BVoidP, 0) + FFI.NULL = self.cast(self.BVoidP, 0) FFI.CData, FFI.CType = backend._get_types() else: # ctypes backend: attach these constants to the instance - self.NULL = self.cast(BVoidP, 0) + self.NULL = self.cast(self.BVoidP, 0) self.CData, self.CType = backend._get_types() def cdef(self, csource, override=False): @@ -137,8 +140,13 @@ """ if isinstance(cdecl, basestring): return self._typeof(cdecl) - else: + if isinstance(cdecl, self.CData): return self._backend.typeof(cdecl) + if isinstance(cdecl, types.BuiltinFunctionType): + res = _builtin_function_type(cdecl) + if res is not None: + return res + raise TypeError(type(cdecl)) def sizeof(self, cdecl): """Return the size in bytes of the argument. It can be a @@ -216,7 +224,7 @@ it as a string or unicode string. If 'cdata' is an enum, returns the value of the enumerator as a - string, or '#NUMBER' if the value is out of range. + string, or 'NUMBER' if the value is out of range. """ return self._backend.string(cdata, maxlen) @@ -342,6 +350,12 @@ self._cdefsources.extend(ffi_to_include._cdefsources) self._cdefsources.append(']') + def new_handle(self, x): + return self._backend.newp_handle(self.BVoidP, x) + + def from_handle(self, x): + return self._backend.from_handle(x) + def _make_ffi_library(ffi, libname, flags, guess=False): import os @@ -353,23 +367,27 @@ _name = 'c' # on Posix only try: if '.' not in _name and '/' not in _name: - raise OSError + raise OSError("library not found: %r" % (_name,)) From noreply at buildbot.pypy.org Fri Aug 2 17:37:32 2013 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 2 Aug 2013 17:37:32 +0200 (CEST) Subject: [pypy-commit] cffi cffi-1.0: Merge fix Message-ID: <20130802153732.5398C1C3333@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cffi-1.0 Changeset: r1343:d4d857462b18 Date: 2013-08-02 17:36 +0200 http://bitbucket.org/cffi/cffi/changeset/d4d857462b18/ Log: Merge fix diff --git a/c/test_c.py b/c/test_c.py --- a/c/test_c.py +++ b/c/test_c.py @@ -2739,7 +2739,7 @@ assert list(c) == [0, 40, 50, 30, 0] def test_FILE_forbidden(): - BFILE = new_struct_type("_IO_FILE") + BFILE = new_struct_type("struct _IO_FILE") BFILEP = new_pointer_type(BFILE) BFunc = new_function_type((BFILEP,), BFILEP, False) func = cast(BFunc, 0) From noreply at buildbot.pypy.org Fri Aug 2 21:09:20 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Fri, 2 Aug 2013 21:09:20 +0200 (CEST) Subject: [pypy-commit] pypy default: issue1573: cursor description names should be plain strs Message-ID: <20130802190920.2A3CE1C00F4@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: Changeset: r65910:56e444747761 Date: 2013-08-02 11:51 -0700 http://bitbucket.org/pypy/pypy/changeset/56e444747761/ Log: issue1573: cursor description names should be plain strs diff --git a/lib_pypy/_sqlite3.py b/lib_pypy/_sqlite3.py --- a/lib_pypy/_sqlite3.py +++ b/lib_pypy/_sqlite3.py @@ -1305,7 +1305,7 @@ for i in xrange(_lib.sqlite3_column_count(self._statement)): name = _lib.sqlite3_column_name(self._statement, i) if name: - name = _ffi.string(name).decode('utf-8').split("[")[0].strip() + name = _ffi.string(name).split("[")[0].strip() desc.append((name, None, None, None, None, None, None)) return desc diff --git a/pypy/module/test_lib_pypy/test_sqlite3.py b/pypy/module/test_lib_pypy/test_sqlite3.py --- a/pypy/module/test_lib_pypy/test_sqlite3.py +++ b/pypy/module/test_lib_pypy/test_sqlite3.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- """Tests for _sqlite3.py""" import pytest, sys @@ -222,3 +223,8 @@ cur.execute("create table test(a)") cur.executemany("insert into test values (?)", [[1], [2], [3]]) assert cur.lastrowid is None + +def test_issue1573(con): + cur = con.cursor() + cur.execute(u'SELECT 1 as méil') + assert cur.description[0][0] == u"méil".encode('utf-8') From noreply at buildbot.pypy.org Fri Aug 2 21:09:22 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Fri, 2 Aug 2013 21:09:22 +0200 (CEST) Subject: [pypy-commit] pypy py3k: merge default Message-ID: <20130802190922.596271C00F4@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r65911:de2ab686aa9d Date: 2013-08-02 12:03 -0700 http://bitbucket.org/pypy/pypy/changeset/de2ab686aa9d/ Log: merge default diff --git a/lib_pypy/_curses.py b/lib_pypy/_curses.py --- a/lib_pypy/_curses.py +++ b/lib_pypy/_curses.py @@ -966,7 +966,7 @@ r, g, b = ffi.new("short *"), ffi.new("short *"), ffi.new("short *") if lib.color_content(color, r, g, b) == lib.ERR: raise error("Argument 1 was out of range. Check value of COLORS.") - return (r, g, b) + return (r[0], g[0], b[0]) def color_pair(n): @@ -1121,6 +1121,7 @@ term = ffi.NULL err = ffi.new("int *") if lib.setupterm(term, fd, err) == lib.ERR: + err = err[0] if err == 0: raise error("setupterm: could not find terminal") elif err == -1: diff --git a/lib_pypy/_sqlite3.py b/lib_pypy/_sqlite3.py --- a/lib_pypy/_sqlite3.py +++ b/lib_pypy/_sqlite3.py @@ -1317,7 +1317,7 @@ for i in xrange(_lib.sqlite3_column_count(self._statement)): name = _lib.sqlite3_column_name(self._statement, i) if name: - name = _ffi.string(name).decode('utf-8').split("[")[0].strip() + name = _ffi.string(name).split("[")[0].strip() desc.append((name, None, None, None, None, None, None)) return desc diff --git a/pypy/doc/conf.py b/pypy/doc/conf.py --- a/pypy/doc/conf.py +++ b/pypy/doc/conf.py @@ -45,9 +45,9 @@ # built documents. # # The short X.Y version. -version = '2.0' +version = '2.1' # The full version, including alpha/beta/rc tags. -release = '2.0.2' +release = '2.1.0' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/pypy/doc/getting-started-python.rst b/pypy/doc/getting-started-python.rst --- a/pypy/doc/getting-started-python.rst +++ b/pypy/doc/getting-started-python.rst @@ -104,8 +104,8 @@ executable. The executable behaves mostly like a normal Python interpreter:: $ ./pypy-c - Python 2.7.3 (7e4f0faa3d51, Nov 22 2012, 10:35:18) - [PyPy 2.0.0 with GCC 4.7.1] on linux2 + Python 2.7.3 (480845e6b1dd, Jul 31 2013, 11:05:31) + [PyPy 2.1.0 with GCC 4.7.1] on linux2 Type "help", "copyright", "credits" or "license" for more information. And now for something completely different: ``RPython magically makes you rich and famous (says so on the tin)'' @@ -171,7 +171,7 @@ the ``bin/pypy`` executable. To install PyPy system wide on unix-like systems, it is recommended to put the -whole hierarchy alone (e.g. in ``/opt/pypy2.0``) and put a symlink to the +whole hierarchy alone (e.g. in ``/opt/pypy2.1``) and put a symlink to the ``pypy`` executable into ``/usr/bin`` or ``/usr/local/bin`` If the executable fails to find suitable libraries, it will report diff --git a/pypy/doc/getting-started.rst b/pypy/doc/getting-started.rst --- a/pypy/doc/getting-started.rst +++ b/pypy/doc/getting-started.rst @@ -53,10 +53,10 @@ PyPy is ready to be executed as soon as you unpack the tarball or the zip file, with no need to install it in any specific location:: - $ tar xf pypy-2.0.tar.bz2 - $ ./pypy-2.0/bin/pypy - Python 2.7.3 (7e4f0faa3d51, Nov 22 2012, 10:35:18) - [PyPy 2.0.0 with GCC 4.7.1] on linux2 + $ tar xf pypy-2.1.tar.bz2 + $ ./pypy-2.1/bin/pypy + Python 2.7.3 (480845e6b1dd, Jul 31 2013, 11:05:31) + [PyPy 2.1.0 with GCC 4.4.3] on linux2 Type "help", "copyright", "credits" or "license" for more information. And now for something completely different: ``PyPy is an exciting technology that lets you to write fast, portable, multi-platform interpreters with less @@ -75,14 +75,14 @@ $ curl -O https://raw.github.com/pypa/pip/master/contrib/get-pip.py - $ ./pypy-2.0/bin/pypy distribute_setup.py + $ ./pypy-2.1/bin/pypy distribute_setup.py - $ ./pypy-2.0/bin/pypy get-pip.py + $ ./pypy-2.1/bin/pypy get-pip.py - $ ./pypy-2.0/bin/pip install pygments # for example + $ ./pypy-2.1/bin/pip install pygments # for example -3rd party libraries will be installed in ``pypy-2.0/site-packages``, and -the scripts in ``pypy-2.0/bin``. +3rd party libraries will be installed in ``pypy-2.1/site-packages``, and +the scripts in ``pypy-2.1/bin``. Installing using virtualenv --------------------------- diff --git a/pypy/doc/index.rst b/pypy/doc/index.rst --- a/pypy/doc/index.rst +++ b/pypy/doc/index.rst @@ -40,7 +40,7 @@ * `FAQ`_: some frequently asked questions. -* `Release 2.0.2`_: the latest official release +* `Release 2.1.0`_: the latest official release * `PyPy Blog`_: news and status info about PyPy @@ -110,7 +110,7 @@ .. _`Getting Started`: getting-started.html .. _`Papers`: extradoc.html .. _`Videos`: video-index.html -.. _`Release 2.0.2`: http://pypy.org/download.html +.. _`Release 2.1.0`: http://pypy.org/download.html .. _`speed.pypy.org`: http://speed.pypy.org .. _`RPython toolchain`: translation.html .. _`potential project ideas`: project-ideas.html diff --git a/pypy/doc/release-2.1.0.rst b/pypy/doc/release-2.1.0.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/release-2.1.0.rst @@ -0,0 +1,89 @@ +============================ +PyPy 2.1 - Considered ARMful +============================ + +We're pleased to announce PyPy 2.1, which targets version 2.7.3 of the Python +language. This is the first release with official support for ARM processors in the JIT. +This release also contains several bugfixes and performance improvements. + +You can download the PyPy 2.1 release here: + + http://pypy.org/download.html + +We would like to thank the `Raspberry Pi Foundation`_ for supporting the work +to finish PyPy's ARM support. + +.. _`Raspberry Pi Foundation`: http://www.raspberrypi.org + +The first beta of PyPy3 2.1, targeting version 3 of the Python language, was +just released, more details can be found `here`_. + +.. _`here`: http://morepypy.blogspot.com/2013/07/pypy3-21-beta-1.html + +What is PyPy? +============= + +PyPy is a very compliant Python interpreter, almost a drop-in replacement for +CPython 2.7. It's fast (`pypy 2.1 and cpython 2.7.2`_ performance comparison) +due to its integrated tracing JIT compiler. + +This release supports x86 machines running Linux 32/64, Mac OS X 64 or Windows +32. This release also supports ARM machines running Linux 32bit - anything with +``ARMv6`` (like the Raspberry Pi) or ``ARMv7`` (like the Beagleboard, +Chromebook, Cubieboard, etc.) that supports ``VFPv3`` should work. Both +hard-float ``armhf/gnueabihf`` and soft-float ``armel/gnueabi`` builds are +provided. ``armhf`` builds for Raspbian are created using the Raspberry Pi +`custom cross-compilation toolchain `_ +based on ``gcc-arm-linux-gnueabihf`` and should work on ``ARMv6`` and +``ARMv7`` devices running Debian or Raspbian. ``armel`` builds are built +using the ``gcc-arm-linux-gnuebi`` toolchain provided by Ubuntu and +currently target ``ARMv7``. + +Windows 64 work is still stalling, we would welcome a volunteer +to handle that. + +.. _`pypy 2.1 and cpython 2.7.2`: http://speed.pypy.org + +Highlights +========== + +* JIT support for ARM, architecture versions 6 and 7, hard- and soft-float ABI + +* Stacklet support for ARM + +* Support for os.statvfs and os.fstatvfs on unix systems + +* Improved logging performance + +* Faster sets for objects + +* Interpreter improvements + +* During packaging, compile the CFFI based TK extension + +* Pickling of numpy arrays and dtypes + +* Subarrays for numpy + +* Bugfixes to numpy + +* Bugfixes to cffi and ctypes + +* Bugfixes to the x86 stacklet support + +* Fixed issue `1533`_: fix an RPython-level OverflowError for space.float_w(w_big_long_number). + +* Fixed issue `1552`_: GreenletExit should inherit from BaseException. + +* Fixed issue `1537`_: numpypy __array_interface__ + +* Fixed issue `1238`_: Writing to an SSL socket in PyPy sometimes failed with a "bad write retry" message. + +.. _`1533`: https://bugs.pypy.org/issue1533 +.. _`1552`: https://bugs.pypy.org/issue1552 +.. _`1537`: https://bugs.pypy.org/issue1537 +.. _`1238`: https://bugs.pypy.org/issue1238 + +Cheers, + +David Schneider for the PyPy team. diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -52,3 +52,13 @@ .. branch: fast-slowpath Added an abstraction for functions with a fast and slow path in the JIT. This speeds up list.append() and list.pop(). + +.. branch: curses_fixes + +.. branch: foldable-getarrayitem-indexerror +Constant-fold reading out of constant tuples in PyPy. + +.. branch: mro-reorder-numpypy-str +No longer delegate numpy string_ methods to space.StringObject, in numpy +this works by kind of by accident. Support for merging the refactor-str-types +branch diff --git a/pypy/module/cpyext/include/patchlevel.h b/pypy/module/cpyext/include/patchlevel.h --- a/pypy/module/cpyext/include/patchlevel.h +++ b/pypy/module/cpyext/include/patchlevel.h @@ -29,7 +29,7 @@ #define PY_VERSION "3.2.3" /* PyPy version as a string */ -#define PYPY_VERSION "2.1.0-alpha0" +#define PYPY_VERSION "2.2.0-alpha0" /* Subversion Revision number of this file (not of the repository). * Empty since Mercurial migration. */ diff --git a/pypy/module/imp/importing.py b/pypy/module/imp/importing.py --- a/pypy/module/imp/importing.py +++ b/pypy/module/imp/importing.py @@ -852,15 +852,10 @@ # Depending on which opcodes are enabled, eg. CALL_METHOD we bump the version # number by some constant # -# * CALL_METHOD +2 -# -# In other words: -# -# default_magic -- used by CPython without the -U option -# default_magic + 1 -- used by CPython with the -U option -# default_magic + 2 -- used by PyPy without any extra opcode -# ... -# default_magic + 5 -- used by PyPy with both extra opcodes +# default_magic - 6 -- used by CPython without the -U option +# default_magic - 5 -- used by CPython with the -U option +# default_magic -- used by PyPy without the CALL_METHOD opcode +# default_magic + 2 -- used by PyPy with the CALL_METHOD opcode # from pypy.interpreter.pycode import default_magic MARSHAL_VERSION_FOR_PYC = 2 diff --git a/pypy/module/micronumpy/base.py b/pypy/module/micronumpy/base.py --- a/pypy/module/micronumpy/base.py +++ b/pypy/module/micronumpy/base.py @@ -37,7 +37,8 @@ from pypy.module.micronumpy.arrayimpl import concrete, scalar if not shape: - impl = scalar.Scalar(dtype.base) + w_val = dtype.base.coerce(space, space.wrap(0)) + impl = scalar.Scalar(dtype.base, w_val) else: strides, backstrides = calc_strides(shape, dtype.base, order) impl = concrete.ConcreteArray(shape, dtype.base, order, strides, @@ -79,6 +80,8 @@ if w_val is not None: w_val = dtype.coerce(space, w_val) + else: + w_val = dtype.coerce(space, space.wrap(0)) return W_NDimArray(scalar.Scalar(dtype, w_val)) diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -205,6 +205,7 @@ descr_neg = _unaryop_impl("negative") descr_abs = _unaryop_impl("absolute") descr_invert = _unaryop_impl("invert") + descr_conjugate = _unaryop_impl("conjugate") def descr_divmod(self, space, w_other): w_quotient = self.descr_div(space, w_other) @@ -378,12 +379,14 @@ return self class W_CharacterBox(W_FlexibleBox): - pass + def convert_to(self, dtype): + # XXX assert dtype is str type + return self + class W_StringBox(W_CharacterBox): def descr__new__string_box(space, w_subtype, w_arg): from pypy.module.micronumpy.interp_dtype import new_string_dtype - arg = space.str_w(space.str(w_arg)) arr = VoidBoxStorage(len(arg), new_string_dtype(space, len(arg))) for i in range(len(arg)): @@ -517,6 +520,7 @@ all = interp2app(W_GenericBox.descr_all), ravel = interp2app(W_GenericBox.descr_ravel), round = interp2app(W_GenericBox.descr_round), + conjugate = interp2app(W_GenericBox.descr_conjugate), view = interp2app(W_GenericBox.descr_view), ) @@ -682,12 +686,12 @@ __module__ = "numpypy", ) -W_StringBox.typedef = TypeDef("string_", (str_typedef, W_CharacterBox.typedef), +W_StringBox.typedef = TypeDef("string_", (W_CharacterBox.typedef, str_typedef), __module__ = "numpypy", __new__ = interp2app(W_StringBox.descr__new__string_box.im_func), ) -W_UnicodeBox.typedef = TypeDef("unicode_", (unicode_typedef, W_CharacterBox.typedef), +W_UnicodeBox.typedef = TypeDef("unicode_", (W_CharacterBox.typedef, unicode_typedef), __module__ = "numpypy", __new__ = interp2app(W_UnicodeBox.descr__new__unicode_box.im_func), ) diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -234,6 +234,9 @@ def is_record_type(self): return self.fields is not None + def is_str_type(self): + return self.num == 18 + def is_str_or_unicode(self): return (self.num == 18 or self.num == 19) diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -328,14 +328,19 @@ w_out = None w_lhs = convert_to_array(space, w_lhs) w_rhs = convert_to_array(space, w_rhs) - if (w_lhs.get_dtype().is_flexible_type() or \ - w_rhs.get_dtype().is_flexible_type()): + w_ldtype = w_lhs.get_dtype() + w_rdtype = w_rhs.get_dtype() + if w_ldtype.is_str_type() and w_rdtype.is_str_type() and \ + self.comparison_func: + pass + elif (w_ldtype.is_flexible_type() or \ + w_rdtype.is_flexible_type()): raise OperationError(space.w_TypeError, space.wrap( 'unsupported operand dtypes %s and %s for "%s"' % \ - (w_rhs.get_dtype().get_name(), w_lhs.get_dtype().get_name(), + (w_rdtype.get_name(), w_ldtype.get_name(), self.name))) calc_dtype = find_binop_result_dtype(space, - w_lhs.get_dtype(), w_rhs.get_dtype(), + w_ldtype, w_rdtype, int_only=self.int_only, promote_to_float=self.promote_to_float, promote_bools=self.promote_bools, diff --git a/pypy/module/micronumpy/stdobjspace.py b/pypy/module/micronumpy/stdobjspace.py deleted file mode 100644 --- a/pypy/module/micronumpy/stdobjspace.py +++ /dev/null @@ -1,11 +0,0 @@ - -from pypy.objspace.std import stringobject -from pypy.module.micronumpy import interp_boxes - -def delegate_stringbox2stringobj(space, w_box): - return space.wrap(w_box.dtype.itemtype.to_str(w_box)) - -def register_delegates(typeorder): - typeorder[interp_boxes.W_StringBox] = [ - (stringobject.W_StringObject, delegate_stringbox2stringobj), - ] diff --git a/pypy/module/micronumpy/test/test_complex.py b/pypy/module/micronumpy/test/test_complex.py --- a/pypy/module/micronumpy/test/test_complex.py +++ b/pypy/module/micronumpy/test/test_complex.py @@ -400,6 +400,7 @@ assert conj is conjugate assert conj(c0) == c0 + assert c0.conjugate() == c0 assert conj(c1) == complex(1, -2) assert conj(1) == 1 assert conj(-3) == -3 @@ -625,6 +626,8 @@ a = array([1 + 2j, 1 - 2j]) assert (a.conj() == [1 - 2j, 1 + 2j]).all() + a = array([1,2,3.4J],dtype=complex) + assert a[2].conjugate() == 0-3.4j def test_math(self): if self.isWindows: diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -739,6 +739,7 @@ class AppTestStrUnicodeDtypes(BaseNumpyAppTest): def test_str_unicode(self): + skip('numpypy differs from numpy') from numpypy import str_, unicode_, character, flexible, generic assert str_.mro() == [str_, str, character, flexible, generic, object] diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -264,6 +264,8 @@ assert a.dtype is dtype(int) a = ndarray([], dtype=float) assert a.shape == () + # test uninitialized value crash? + assert len(str(a)) > 0 def test_ndmin(self): from numpypy import array @@ -2754,6 +2756,19 @@ assert a[2] == 'ab' raises(TypeError, a, 'sum') raises(TypeError, 'a+a') + b = array(['abcdefg', 'ab', 'cd']) + assert a[2] == b[1] + assert bool(a[1]) + c = array(['ab','cdefg','hi','jk']) + # not implemented yet + #c[0] += c[3] + #assert c[0] == 'abjk' + + def test_to_str(self): + from numpypy import array + a = array(['abc','abc', 'def', 'ab'], 'S3') + b = array(['mnopqr','abcdef', 'ab', 'cd']) + assert b[1] != a[1] def test_string_scalar(self): from numpypy import array @@ -2765,8 +2780,7 @@ assert str(a.dtype) == '|S1' a = array('x', dtype='c') assert str(a.dtype) == '|S1' - # XXX can sort flexible types, why not comparison? - #assert a == 'x' + assert a == 'x' def test_flexible_repr(self): from numpypy import array diff --git a/pypy/module/micronumpy/tool/numready/main.py b/pypy/module/micronumpy/tool/numready/main.py --- a/pypy/module/micronumpy/tool/numready/main.py +++ b/pypy/module/micronumpy/tool/numready/main.py @@ -78,6 +78,11 @@ items.add(Item(name, kind, subitems)) return items +def get_version_str(python): + args = [python, '-c', 'import sys; print sys.version'] + lines = subprocess.check_output(args).splitlines() + return lines[0] + def split(lst): SPLIT = 5 lgt = len(lst) // SPLIT + 1 @@ -93,6 +98,7 @@ def main(argv): cpy_items = find_numpy_items("/usr/bin/python") pypy_items = find_numpy_items(argv[1], "numpypy") + ver = get_version_str(argv[1]) all_items = [] msg = "{:d}/{:d} names".format(len(pypy_items), len(cpy_items)) + " " @@ -113,7 +119,8 @@ env = jinja2.Environment( loader=jinja2.FileSystemLoader(os.path.dirname(__file__)) ) - html = env.get_template("page.html").render(all_items=split(sorted(all_items)), msg=msg) + html = env.get_template("page.html").render(all_items=split(sorted(all_items)), + msg=msg, ver=ver) if len(argv) > 2: with open(argv[2], 'w') as f: f.write(html.encode("utf-8")) diff --git a/pypy/module/micronumpy/tool/numready/page.html b/pypy/module/micronumpy/tool/numready/page.html --- a/pypy/module/micronumpy/tool/numready/page.html +++ b/pypy/module/micronumpy/tool/numready/page.html @@ -34,6 +34,7 @@

NumPyPy Status

+

Version: {{ ver }}

Overall: {{ msg }}

diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -1689,6 +1689,22 @@ def get_size(self): return self.size +def str_unary_op(func): + specialize.argtype(1)(func) + @functools.wraps(func) + def dispatcher(self, v1): + return func(self, self.to_str(v1)) + return dispatcher + +def str_binary_op(func): + specialize.argtype(1, 2)(func) + @functools.wraps(func) + def dispatcher(self, v1, v2): + return func(self, + self.to_str(v1), + self.to_str(v2) + ) + return dispatcher class StringType(BaseType, BaseStringType): T = lltype.Char @@ -1696,6 +1712,8 @@ @jit.unroll_safe def coerce(self, space, dtype, w_item): from pypy.module.micronumpy.interp_dtype import new_string_dtype + if isinstance(w_item, interp_boxes.W_StringBox): + return w_item arg = space.str_w(space.str(w_item)) arr = VoidBoxStorage(len(arg), new_string_dtype(space, len(arg))) for i in range(len(arg)): @@ -1705,6 +1723,7 @@ @jit.unroll_safe def store(self, arr, i, offset, box): assert isinstance(box, interp_boxes.W_StringBox) + # XXX simplify to range(box.dtype.get_size()) ? for k in range(min(self.size, box.arr.size-offset)): arr.storage[k + i] = box.arr.storage[k + offset] @@ -1718,7 +1737,7 @@ builder = StringBuilder() assert isinstance(item, interp_boxes.W_StringBox) i = item.ofs - end = i+self.size + end = i + item.dtype.get_size() while i < end: assert isinstance(item.arr.storage[i], str) if item.arr.storage[i] == '\x00': @@ -1734,10 +1753,53 @@ builder.append("'") return builder.build() - # XXX move to base class when UnicodeType is supported + # XXX move the rest of this to base class when UnicodeType is supported def to_builtin_type(self, space, box): return space.wrap(self.to_str(box)) + @str_binary_op + def eq(self, v1, v2): + return v1 == v2 + + @str_binary_op + def ne(self, v1, v2): + return v1 != v2 + + @str_binary_op + def lt(self, v1, v2): + return v1 < v2 + + @str_binary_op + def le(self, v1, v2): + return v1 <= v2 + + @str_binary_op + def gt(self, v1, v2): + return v1 > v2 + + @str_binary_op + def ge(self, v1, v2): + return v1 >= v2 + + @str_binary_op + def logical_and(self, v1, v2): + return bool(v1) and bool(v2) + + @str_binary_op + def logical_or(self, v1, v2): + return bool(v1) or bool(v2) + + @str_unary_op + def logical_not(self, v): + return not bool(v) + + @str_binary_op + def logical_xor(self, v1, v2): + return bool(v1) ^ bool(v2) + + def bool(self, v): + return bool(self.to_str(v)) + def build_and_convert(self, space, mydtype, box): assert isinstance(box, interp_boxes.W_GenericBox) if box.get_dtype(space).is_str_or_unicode(): @@ -1753,6 +1815,13 @@ arr.storage[j] = '\x00' return interp_boxes.W_StringBox(arr, 0, arr.dtype) +NonNativeStringType = StringType + +class UnicodeType(BaseType, BaseStringType): + T = lltype.UniChar + +NonNativeUnicodeType = UnicodeType + class VoidType(BaseType, BaseStringType): T = lltype.Char @@ -1798,12 +1867,6 @@ return W_NDimArray(implementation) NonNativeVoidType = VoidType -NonNativeStringType = StringType - -class UnicodeType(BaseType, BaseStringType): - T = lltype.UniChar - -NonNativeUnicodeType = UnicodeType class RecordType(BaseType): diff --git a/pypy/module/pypyjit/test_pypy_c/test_containers.py b/pypy/module/pypyjit/test_pypy_c/test_containers.py --- a/pypy/module/pypyjit/test_pypy_c/test_containers.py +++ b/pypy/module/pypyjit/test_pypy_c/test_containers.py @@ -209,6 +209,22 @@ opnames = log.opnames(loop.allops()) assert opnames.count('new_with_vtable') == 0 + def test_constfold_tuple(self): + code = """if 1: + tup = tuple(range(10000)) + l = [1, 2, 3, 4, 5, 6, "a"] + def main(n): + while n > 0: + sub = tup[1] # ID: getitem + l[1] = n # kill cache of tup[1] + n -= sub + """ + log = self.run(code, [1000]) + loop, = log.loops_by_filename(self.filepath) + ops = loop.ops_by_id('getitem', include_guard_not_invalidated=False) + assert log.opnames(ops) == [] + + def test_specialised_tuple(self): def main(n): import pypyjit diff --git a/pypy/module/sys/version.py b/pypy/module/sys/version.py --- a/pypy/module/sys/version.py +++ b/pypy/module/sys/version.py @@ -11,7 +11,7 @@ #XXX # sync CPYTHON_VERSION with patchlevel.h, package.py CPYTHON_API_VERSION = 1013 #XXX # sync with include/modsupport.h -PYPY_VERSION = (2, 1, 0, "alpha", 0) #XXX # sync patchlevel.h +PYPY_VERSION = (2, 2, 0, "alpha", 0) #XXX # sync patchlevel.h if platform.name == 'msvc': COMPILER_INFO = 'MSC v.%d 32 bit' % (platform.version * 10 + 600) diff --git a/pypy/module/test_lib_pypy/test_curses.py b/pypy/module/test_lib_pypy/test_curses.py new file mode 100644 --- /dev/null +++ b/pypy/module/test_lib_pypy/test_curses.py @@ -0,0 +1,48 @@ +from lib_pypy import _curses + +import pytest + +lib = _curses.lib + + +def test_color_content(monkeypatch): + def lib_color_content(color, r, g, b): + r[0], g[0], b[0] = 42, 43, 44 + return lib.OK + + monkeypatch.setattr(_curses, '_ensure_initialised_color', lambda: None) + monkeypatch.setattr(lib, 'color_content', lib_color_content) + + assert _curses.color_content(None) == (42, 43, 44) + + +def test_setupterm(monkeypatch): + def make_setupterm(err_no): + def lib_setupterm(term, fd, err): + err[0] = err_no + + return lib.ERR + + return lib_setupterm + + monkeypatch.setattr(_curses, '_initialised_setupterm', False) + monkeypatch.setattr(lib, 'setupterm', make_setupterm(0)) + + with pytest.raises(Exception) as exc_info: + _curses.setupterm() + + assert "could not find terminal" in exc_info.value.args[0] + + monkeypatch.setattr(lib, 'setupterm', make_setupterm(-1)) + + with pytest.raises(Exception) as exc_info: + _curses.setupterm() + + assert "could not find terminfo database" in exc_info.value.args[0] + + monkeypatch.setattr(lib, 'setupterm', make_setupterm(42)) + + with pytest.raises(Exception) as exc_info: + _curses.setupterm() + + assert "unknown error" in exc_info.value.args[0] diff --git a/pypy/module/test_lib_pypy/test_sqlite3.py b/pypy/module/test_lib_pypy/test_sqlite3.py --- a/pypy/module/test_lib_pypy/test_sqlite3.py +++ b/pypy/module/test_lib_pypy/test_sqlite3.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- """Tests for _sqlite3.py""" import pytest, sys @@ -222,3 +223,8 @@ cur.execute("create table test(a)") cur.executemany("insert into test values (?)", [[1], [2], [3]]) assert cur.lastrowid is None + +def test_issue1573(con): + cur = con.cursor() + cur.execute(u'SELECT 1 as méil') + assert cur.description[0][0] == u"méil".encode('utf-8') diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -678,11 +678,13 @@ find_jmp = jit.JitDriver(greens = [], reds = 'auto', name = 'list.find') class ListStrategy(object): - sizehint = -1 def __init__(self, space): self.space = space + def get_sizehint(self): + return -1 + def init_from_list_w(self, w_list, list_w): raise NotImplementedError @@ -870,7 +872,7 @@ else: strategy = self.space.fromcache(ObjectListStrategy) - storage = strategy.get_empty_storage(self.sizehint) + storage = strategy.get_empty_storage(self.get_sizehint()) w_list.strategy = strategy w_list.lstorage = storage @@ -953,6 +955,9 @@ self.sizehint = sizehint ListStrategy.__init__(self, space) + def get_sizehint(self): + return self.sizehint + def _resize_hint(self, w_list, hint): assert hint >= 0 self.sizehint = hint diff --git a/pypy/objspace/std/model.py b/pypy/objspace/std/model.py --- a/pypy/objspace/std/model.py +++ b/pypy/objspace/std/model.py @@ -132,10 +132,6 @@ # when trying to dispatch multimethods. # XXX build these lists a bit more automatically later - if config.objspace.usemodules.micronumpy: - from pypy.module.micronumpy.stdobjspace import register_delegates - register_delegates(self.typeorder) - self.typeorder[boolobject.W_BoolObject] += [ #(intobject.W_IntObject, boolobject.delegate_Bool2IntObject), (floatobject.W_FloatObject, floatobject.delegate_Bool2Float), diff --git a/pypy/tool/release/force-builds.py b/pypy/tool/release/force-builds.py --- a/pypy/tool/release/force-builds.py +++ b/pypy/tool/release/force-builds.py @@ -19,6 +19,7 @@ BUILDERS = [ 'own-linux-x86-32', 'own-linux-x86-64', + 'own-linux-armhf', # 'own-macosx-x86-32', # 'pypy-c-app-level-linux-x86-32', # 'pypy-c-app-level-linux-x86-64', diff --git a/rpython/jit/backend/llsupport/test/test_gc_integration.py b/rpython/jit/backend/llsupport/test/test_gc_integration.py --- a/rpython/jit/backend/llsupport/test/test_gc_integration.py +++ b/rpython/jit/backend/llsupport/test/test_gc_integration.py @@ -88,7 +88,7 @@ else: assert nos == [0, 1, 25] elif self.cpu.backend_name.startswith('arm'): - assert nos == [9, 10, 47] + assert nos == [0, 1, 47] else: raise Exception("write the data here") assert frame.jf_frame[nos[0]] diff --git a/rpython/jit/backend/model.py b/rpython/jit/backend/model.py --- a/rpython/jit/backend/model.py +++ b/rpython/jit/backend/model.py @@ -187,10 +187,6 @@ # with Voids removed raise NotImplementedError - def methdescrof(self, SELFTYPE, methname): - # must return a subclass of history.AbstractMethDescr - raise NotImplementedError - def typedescrof(self, TYPE): raise NotImplementedError diff --git a/rpython/jit/backend/x86/regalloc.py b/rpython/jit/backend/x86/regalloc.py --- a/rpython/jit/backend/x86/regalloc.py +++ b/rpython/jit/backend/x86/regalloc.py @@ -799,6 +799,15 @@ consider_cond_call_gc_wb_array = consider_cond_call_gc_wb def consider_cond_call(self, op): + # A 32-bit-only, asmgcc-only issue: 'cond_call_register_arguments' + # contains edi and esi, which are also in asmgcroot.py:ASM_FRAMEDATA. + # We must make sure that edi and esi do not contain GC pointers. + if IS_X86_32 and self.assembler._is_asmgcc(): + for box, loc in self.rm.reg_bindings.items(): + if (loc == edi or loc == esi) and box.type == REF: + self.rm.force_spill_var(box) + assert box not in self.rm.reg_bindings + # assert op.result is None args = op.getarglist() assert 2 <= len(args) <= 4 + 2 # maximum 4 arguments diff --git a/rpython/jit/metainterp/history.py b/rpython/jit/metainterp/history.py --- a/rpython/jit/metainterp/history.py +++ b/rpython/jit/metainterp/history.py @@ -34,7 +34,6 @@ return 'int' # singlefloats are stored in an int if TYPE in (lltype.Float, lltype.SingleFloat): raise NotImplementedError("type %s not supported" % TYPE) - # XXX fix this for oo... if (TYPE != llmemory.Address and rffi.sizeof(TYPE) > rffi.sizeof(lltype.Signed)): if supports_longlong and TYPE is not lltype.LongFloat: @@ -168,18 +167,11 @@ def __init__(self, identifier=None): self.identifier = identifier # for testing + class BasicFailDescr(AbstractFailDescr): def __init__(self, identifier=None): self.identifier = identifier # for testing -class AbstractMethDescr(AbstractDescr): - # the base class of the result of cpu.methdescrof() - jitcodes = None - def setup(self, jitcodes): - # jitcodes maps { runtimeClass -> jitcode for runtimeClass.methname } - self.jitcodes = jitcodes - def get_jitcode_for_class(self, oocls): - return self.jitcodes[oocls] class Const(AbstractValue): __slots__ = () diff --git a/rpython/jit/metainterp/optimizeopt/virtualstate.py b/rpython/jit/metainterp/optimizeopt/virtualstate.py --- a/rpython/jit/metainterp/optimizeopt/virtualstate.py +++ b/rpython/jit/metainterp/optimizeopt/virtualstate.py @@ -195,11 +195,10 @@ raise BadVirtualState if not value.is_virtual(): raise BadVirtualState + if len(self.fieldstate) > value.getlength(): + raise BadVirtualState for i in range(len(self.fieldstate)): - try: - v = value.get_item_value(i) - except IndexError: - raise BadVirtualState + v = value.get_item_value(i) s = self.fieldstate[i] if s.position > self.position: s.enum_forced_boxes(boxes, v, optimizer) @@ -269,13 +268,13 @@ raise BadVirtualState if not value.is_virtual(): raise BadVirtualState + if len(self.fielddescrs) > len(value._items): + raise BadVirtualState p = 0 for i in range(len(self.fielddescrs)): for j in range(len(self.fielddescrs[i])): try: v = value._items[i][self.fielddescrs[i][j]] - except IndexError: - raise BadVirtualState except KeyError: raise BadVirtualState s = self.fieldstate[p] diff --git a/rpython/jit/metainterp/test/test_immutable.py b/rpython/jit/metainterp/test/test_immutable.py --- a/rpython/jit/metainterp/test/test_immutable.py +++ b/rpython/jit/metainterp/test/test_immutable.py @@ -69,6 +69,28 @@ self.check_operations_history(getfield_gc=0, getfield_gc_pure=1, getarrayitem_gc=0, getarrayitem_gc_pure=1) + def test_array_index_error(self): + class X(object): + _immutable_fields_ = ["y[*]"] + + def __init__(self, x): + self.y = x + + def get(self, index): + try: + return self.y[index] + except IndexError: + return -41 + + def f(index): + l = [1, 2, 3, 4] + l[2] = 30 + a = escape(X(l)) + return a.get(index) + res = self.interp_operations(f, [2], listops=True) + assert res == 30 + self.check_operations_history(getfield_gc=0, getfield_gc_pure=1, + getarrayitem_gc=0, getarrayitem_gc_pure=1) def test_array_in_immutable(self): class X(object): diff --git a/rpython/jit/tl/targettlc.py b/rpython/jit/tl/targettlc.py --- a/rpython/jit/tl/targettlc.py +++ b/rpython/jit/tl/targettlc.py @@ -2,7 +2,6 @@ import py py.path.local(__file__) from rpython.jit.tl.tlc import interp, interp_nonjit, ConstantPool -from rpython.jit.codewriter.policy import JitPolicy from rpython.jit.backend.hlinfo import highleveljitinfo @@ -54,14 +53,10 @@ return decode_program(f.readall()) def target(driver, args): - return entry_point, None + return entry_point # ____________________________________________________________ -def jitpolicy(driver): - """Returns the JIT policy to use when translating.""" - return JitPolicy() - if __name__ == '__main__': import sys sys.exit(entry_point(sys.argv)) diff --git a/rpython/jit/tl/targettlr.py b/rpython/jit/tl/targettlr.py --- a/rpython/jit/tl/targettlr.py +++ b/rpython/jit/tl/targettlr.py @@ -29,15 +29,10 @@ return bytecode def target(driver, args): - return entry_point, None + return entry_point # ____________________________________________________________ -from rpython.jit.codewriter.policy import JitPolicy - -def jitpolicy(driver): - return JitPolicy() - if __name__ == '__main__': import sys sys.exit(entry_point(sys.argv)) diff --git a/rpython/jit/tl/tla/targettla.py b/rpython/jit/tl/tla/targettla.py --- a/rpython/jit/tl/tla/targettla.py +++ b/rpython/jit/tl/tla/targettla.py @@ -28,9 +28,6 @@ def target(driver, args): return entry_point, None -def jitpolicy(driver): - from rpython.jit.codewriter.policy import JitPolicy - return JitPolicy() # ____________________________________________________________ diff --git a/rpython/memory/gctransform/asmgcroot.py b/rpython/memory/gctransform/asmgcroot.py --- a/rpython/memory/gctransform/asmgcroot.py +++ b/rpython/memory/gctransform/asmgcroot.py @@ -729,6 +729,10 @@ # - frame address (actually the addr of the retaddr of the current function; # that's the last word of the frame in memory) # +# On 64 bits, it is an array of 7 values instead of 5: +# +# - %rbx, %r12, %r13, %r14, %r15, %rbp; and the frame address +# if IS_64_BITS: CALLEE_SAVED_REGS = 6 diff --git a/rpython/rlib/longlong2float.py b/rpython/rlib/longlong2float.py --- a/rpython/rlib/longlong2float.py +++ b/rpython/rlib/longlong2float.py @@ -68,14 +68,12 @@ uint2singlefloat = rffi.llexternal( "pypy__uint2singlefloat", [rffi.UINT], rffi.FLOAT, _callable=uint2singlefloat_emulator, compilation_info=eci, - _nowrapper=True, elidable_function=True, sandboxsafe=True, - oo_primitive="pypy__uint2singlefloat") + _nowrapper=True, elidable_function=True, sandboxsafe=True) singlefloat2uint = rffi.llexternal( "pypy__singlefloat2uint", [rffi.FLOAT], rffi.UINT, _callable=singlefloat2uint_emulator, compilation_info=eci, - _nowrapper=True, elidable_function=True, sandboxsafe=True, - oo_primitive="pypy__singlefloat2uint") + _nowrapper=True, elidable_function=True, sandboxsafe=True) class Float2LongLongEntry(ExtRegistryEntry): diff --git a/rpython/rlib/rlocale.py b/rpython/rlib/rlocale.py --- a/rpython/rlib/rlocale.py +++ b/rpython/rlib/rlocale.py @@ -193,11 +193,11 @@ raise LocaleError("unsupported locale setting") return rffi.charp2str(ll_result) -isalpha = external('isalpha', [rffi.INT], rffi.INT, oo_primitive='locale_isalpha') -isupper = external('isupper', [rffi.INT], rffi.INT, oo_primitive='locale_isupper') -islower = external('islower', [rffi.INT], rffi.INT, oo_primitive='locale_islower') -tolower = external('tolower', [rffi.INT], rffi.INT, oo_primitive='locale_tolower') -isalnum = external('isalnum', [rffi.INT], rffi.INT, oo_primitive='locale_isalnum') +isalpha = external('isalpha', [rffi.INT], rffi.INT) +isupper = external('isupper', [rffi.INT], rffi.INT) +islower = external('islower', [rffi.INT], rffi.INT) +tolower = external('tolower', [rffi.INT], rffi.INT) +isalnum = external('isalnum', [rffi.INT], rffi.INT) if HAVE_LANGINFO: _nl_langinfo = external('nl_langinfo', [rffi.INT], rffi.CCHARP) diff --git a/rpython/rlib/streamio.py b/rpython/rlib/streamio.py --- a/rpython/rlib/streamio.py +++ b/rpython/rlib/streamio.py @@ -185,11 +185,8 @@ SetEndOfFile = rffi.llexternal('SetEndOfFile', [HANDLE], BOOL, compilation_info=_eci) - # HACK: These implementations are specific to MSVCRT and the C backend. - # When generating on CLI or JVM, these are patched out. - # See PyPyTarget.target() in targetpypystandalone.py def _setfd_binary(fd): - #Allow this to succeed on invalid fd's + # Allow this to succeed on invalid fd's if rposix.is_valid_fd(fd): _setmode(fd, os.O_BINARY) diff --git a/rpython/rtyper/lltypesystem/rffi.py b/rpython/rtyper/lltypesystem/rffi.py --- a/rpython/rtyper/lltypesystem/rffi.py +++ b/rpython/rtyper/lltypesystem/rffi.py @@ -62,8 +62,8 @@ compilation_info=ExternalCompilationInfo(), sandboxsafe=False, threadsafe='auto', _nowrapper=False, calling_conv='c', - oo_primitive=None, elidable_function=False, - macro=None, random_effects_on_gcobjs='auto'): + elidable_function=False, macro=None, + random_effects_on_gcobjs='auto'): """Build an external function that will invoke the C function 'name' with the given 'args' types and 'result' type. @@ -97,8 +97,6 @@ if elidable_function: _callable._elidable_function_ = True kwds = {} - if oo_primitive: - kwds['oo_primitive'] = oo_primitive has_callback = False for ARG in args: @@ -651,6 +649,10 @@ # char * CCHARP = lltype.Ptr(lltype.Array(lltype.Char, hints={'nolength': True})) +# const char * +CONST_CCHARP = lltype.Ptr(lltype.Array(lltype.Char, hints={'nolength': True, + 'render_as_const': True})) + # wchar_t * CWCHARP = lltype.Ptr(lltype.Array(lltype.UniChar, hints={'nolength': True})) diff --git a/rpython/rtyper/rlist.py b/rpython/rtyper/rlist.py --- a/rpython/rtyper/rlist.py +++ b/rpython/rtyper/rlist.py @@ -247,27 +247,22 @@ v_lst, v_index = hop.inputargs(r_lst, Signed) if checkidx: hop.exception_is_here() + spec = dum_checkidx else: + spec = dum_nocheck hop.exception_cannot_occur() - if hop.args_s[0].listdef.listitem.mutated or checkidx: - if hop.args_s[1].nonneg: - llfn = ll_getitem_nonneg - else: - llfn = ll_getitem - if checkidx: - spec = dum_checkidx - else: - spec = dum_nocheck - c_func_marker = hop.inputconst(Void, spec) - v_res = hop.gendirectcall(llfn, c_func_marker, v_lst, v_index) + if hop.args_s[0].listdef.listitem.mutated: + basegetitem = ll_getitem_fast else: - # this is the 'foldable' version, which is not used when - # we check for IndexError - if hop.args_s[1].nonneg: - llfn = ll_getitem_foldable_nonneg - else: - llfn = ll_getitem_foldable - v_res = hop.gendirectcall(llfn, v_lst, v_index) + basegetitem = ll_getitem_foldable_nonneg + + if hop.args_s[1].nonneg: + llfn = ll_getitem_nonneg + else: + llfn = ll_getitem + c_func_marker = hop.inputconst(Void, spec) + c_basegetitem = hop.inputconst(Void, basegetitem) + v_res = hop.gendirectcall(llfn, c_func_marker, c_basegetitem, v_lst, v_index) return r_lst.recast(hop.llops, v_res) rtype_getitem_key = rtype_getitem @@ -654,16 +649,16 @@ i += 1 length_1_i -= 1 -def ll_getitem_nonneg(func, l, index): +def ll_getitem_nonneg(func, basegetitem, l, index): ll_assert(index >= 0, "unexpectedly negative list getitem index") if func is dum_checkidx: if index >= l.ll_length(): raise IndexError - return l.ll_getitem_fast(index) + return basegetitem(l, index) ll_getitem_nonneg._always_inline_ = True # no oopspec -- the function is inlined by the JIT -def ll_getitem(func, l, index): +def ll_getitem(func, basegetitem, l, index): if func is dum_checkidx: length = l.ll_length() # common case: 0 <= index < length if r_uint(index) >= r_uint(length): @@ -680,21 +675,18 @@ if index < 0: index += l.ll_length() ll_assert(index >= 0, "negative list getitem index out of bound") + return basegetitem(l, index) +# no oopspec -- the function is inlined by the JIT + +def ll_getitem_fast(l, index): return l.ll_getitem_fast(index) -# no oopspec -- the function is inlined by the JIT +ll_getitem_fast._always_inline_ = True def ll_getitem_foldable_nonneg(l, index): ll_assert(index >= 0, "unexpectedly negative list getitem index") return l.ll_getitem_fast(index) ll_getitem_foldable_nonneg.oopspec = 'list.getitem_foldable(l, index)' -def ll_getitem_foldable(l, index): - if index < 0: - index += l.ll_length() - return ll_getitem_foldable_nonneg(l, index) -ll_getitem_foldable._always_inline_ = True -# no oopspec -- the function is inlined by the JIT - def ll_setitem_nonneg(func, l, index, newitem): ll_assert(index >= 0, "unexpectedly negative list setitem index") if func is dum_checkidx: diff --git a/rpython/rtyper/rtyper.py b/rpython/rtyper/rtyper.py --- a/rpython/rtyper/rtyper.py +++ b/rpython/rtyper/rtyper.py @@ -58,7 +58,6 @@ self.classdef_to_pytypeobject = {} self.concrete_calltables = {} self.class_pbc_attributes = {} - self.oo_meth_impls = {} self.cache_dummy_values = {} self.lltype2vtable = {} self.typererrors = [] diff --git a/rpython/rtyper/test/test_rlist.py b/rpython/rtyper/test/test_rlist.py --- a/rpython/rtyper/test/test_rlist.py +++ b/rpython/rtyper/test/test_rlist.py @@ -14,15 +14,19 @@ from rpython.translator.translator import TranslationContext -# undo the specialization parameter +# undo the specialization parameters for n1 in 'get set del'.split(): + if n1 == "get": + extraarg = "ll_getitem_fast, " + else: + extraarg = "" for n2 in '', '_nonneg': name = 'll_%sitem%s' % (n1, n2) globals()['_' + name] = globals()[name] exec """if 1: def %s(*args): - return _%s(dum_checkidx, *args) -""" % (name, name) + return _%s(dum_checkidx, %s*args) +""" % (name, name, extraarg) del n1, n2, name @@ -1400,7 +1404,7 @@ block = graph.startblock op = block.operations[-1] assert op.opname == 'direct_call' - func = op.args[0].value._obj._callable + func = op.args[2].value assert ('foldable' in func.func_name) == \ ("y[*]" in immutable_fields) @@ -1511,8 +1515,8 @@ block = graph.startblock lst1_getitem_op = block.operations[-3] # XXX graph fishing lst2_getitem_op = block.operations[-2] - func1 = lst1_getitem_op.args[0].value._obj._callable - func2 = lst2_getitem_op.args[0].value._obj._callable + func1 = lst1_getitem_op.args[2].value + func2 = lst2_getitem_op.args[2].value assert func1.oopspec == 'list.getitem_foldable(l, index)' assert not hasattr(func2, 'oopspec') diff --git a/rpython/rtyper/test/test_runicode.py b/rpython/rtyper/test/test_runicode.py --- a/rpython/rtyper/test/test_runicode.py +++ b/rpython/rtyper/test/test_runicode.py @@ -8,7 +8,7 @@ # ====> test_rstr.py -class BaseTestRUnicode(AbstractTestRstr, BaseRtypingTest): +class TestRUnicode(AbstractTestRstr, BaseRtypingTest): const = unicode constchar = unichr diff --git a/rpython/translator/backendopt/test/test_removenoops.py b/rpython/translator/backendopt/test/test_removenoops.py --- a/rpython/translator/backendopt/test/test_removenoops.py +++ b/rpython/translator/backendopt/test/test_removenoops.py @@ -97,9 +97,8 @@ def test_remove_unaryops(): - # We really want to use remove_unaryops for things like ooupcast and - # oodowncast in dynamically typed languages, but it's easier to test - # it with operations on ints here. + # We really want to use remove_unaryops for more complex operations, but + # it's easier to test it with operations on ints here. def f(x): i = llop.int_invert(lltype.Signed, x) i = llop.int_add(lltype.Signed, x, 1) diff --git a/rpython/translator/c/node.py b/rpython/translator/c/node.py --- a/rpython/translator/c/node.py +++ b/rpython/translator/c/node.py @@ -358,6 +358,8 @@ self.fullptrtypename = 'void *@' else: self.fullptrtypename = self.itemtypename.replace('@', '*@') + if ARRAY._hints.get("render_as_const"): + self.fullptrtypename = 'const ' + self.fullptrtypename def setup(self): """Array loops are forbidden by ForwardReference.become() because diff --git a/rpython/translator/c/test/test_lltyped.py b/rpython/translator/c/test/test_lltyped.py --- a/rpython/translator/c/test/test_lltyped.py +++ b/rpython/translator/c/test/test_lltyped.py @@ -1,5 +1,6 @@ import py from rpython.rtyper.lltypesystem.lltype import * +from rpython.rtyper.lltypesystem import rffi from rpython.translator.c.test.test_genc import compile from rpython.tool.sourcetools import func_with_new_name @@ -314,14 +315,14 @@ from rpython.rtyper.lltypesystem.rstr import STR from rpython.rtyper.lltypesystem import rffi, llmemory, lltype P = lltype.Ptr(lltype.FixedSizeArray(lltype.Char, 1)) - + def f(): a = llstr("xyz") b = (llmemory.cast_ptr_to_adr(a) + llmemory.offsetof(STR, 'chars') + llmemory.itemoffsetof(STR.chars, 0)) buf = rffi.cast(rffi.VOIDP, b) return buf[2] - + fn = self.getcompiled(f, []) res = fn() assert res == 'z' @@ -941,3 +942,21 @@ assert fn(0) == 10 assert fn(1) == 10 + 521 assert fn(2) == 10 + 34 + + def test_const_char_star(self): + from rpython.translator.tool.cbuild import ExternalCompilationInfo + + eci = ExternalCompilationInfo(includes=["stdlib.h"]) + atoi = rffi.llexternal('atoi', [rffi.CONST_CCHARP], rffi.INT, + compilation_info=eci) + + def f(n): + s = malloc(rffi.CCHARP.TO, 2, flavor='raw') + s[0] = '9' + s[1] = '\0' + res = atoi(rffi.cast(rffi.CONST_CCHARP, s)) + free(s, flavor='raw') + return res + + fn = self.getcompiled(f, [int]) + assert fn(0) == 9 diff --git a/rpython/translator/driver.py b/rpython/translator/driver.py --- a/rpython/translator/driver.py +++ b/rpython/translator/driver.py @@ -354,8 +354,12 @@ """ Generate bytecodes for JIT and flow the JIT helper functions lltype version """ - get_policy = self.extra['jitpolicy'] - self.jitpolicy = get_policy(self) + from rpython.jit.codewriter.policy import JitPolicy + get_policy = self.extra.get('jitpolicy', None) + if get_policy is None: + self.jitpolicy = JitPolicy() + else: + self.jitpolicy = get_policy(self) # from rpython.jit.metainterp.warmspot import apply_jit apply_jit(self.translator, policy=self.jitpolicy, @@ -544,9 +548,14 @@ try: entry_point, inputtypes, policy = spec + except TypeError: + # not a tuple at all + entry_point = spec + inputtypes = policy = None except ValueError: + policy = None entry_point, inputtypes = spec - policy = None + driver.setup(entry_point, inputtypes, policy=policy, diff --git a/rpython/translator/goal/targetjitstandalone.py b/rpython/translator/goal/targetjitstandalone.py --- a/rpython/translator/goal/targetjitstandalone.py +++ b/rpython/translator/goal/targetjitstandalone.py @@ -3,7 +3,6 @@ """ from rpython.rlib import jit -from rpython.jit.codewriter.policy import JitPolicy driver = jit.JitDriver(greens = [], reds = 'auto') driver2 = jit.JitDriver(greens = [], reds = 'auto') @@ -40,7 +39,4 @@ return 0 def target(*args): - return entry_point, None - -def jitpolicy(driver): - return JitPolicy() + return entry_point diff --git a/rpython/translator/goal/targetnopstandalone.py b/rpython/translator/goal/targetnopstandalone.py --- a/rpython/translator/goal/targetnopstandalone.py +++ b/rpython/translator/goal/targetnopstandalone.py @@ -19,4 +19,4 @@ # _____ Define and setup target ___ def target(*args): - return entry_point, None + return entry_point diff --git a/rpython/translator/goal/targetrpystonedalone.py b/rpython/translator/goal/targetrpystonedalone.py --- a/rpython/translator/goal/targetrpystonedalone.py +++ b/rpython/translator/goal/targetrpystonedalone.py @@ -60,13 +60,12 @@ # _____ Define and setup target ___ def target(*args): - return entry_point, None + return entry_point """ Why is this a stand-alone target? -The above target specifies None as the argument types list. -This is a case treated specially in the driver.py . If the list -of input types is empty, it is meant to be a list of strings, -actually implementing argv of the executable. +The above target specifies no argument types list. +This is a case treated specially in the driver.py . The only argument is meant +to be a list of strings, actually implementing argv of the executable. """ diff --git a/rpython/translator/goal/translate.py b/rpython/translator/goal/translate.py --- a/rpython/translator/goal/translate.py +++ b/rpython/translator/goal/translate.py @@ -284,8 +284,6 @@ default_goal='compile') log_config(translateconfig, "translate.py configuration") if config.translation.jit: - if 'jitpolicy' not in targetspec_dic: - raise Exception('target has no jitpolicy defined.') if (translateconfig.goals != ['annotate'] and translateconfig.goals != ['rtype']): drv.set_extra_goals(['pyjitpl']) From noreply at buildbot.pypy.org Fri Aug 2 21:09:23 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Fri, 2 Aug 2013 21:09:23 +0200 (CEST) Subject: [pypy-commit] pypy py3k: py3 needs a py3 str Message-ID: <20130802190923.A3FF81C00F4@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r65912:e709308fb33d Date: 2013-08-02 12:05 -0700 http://bitbucket.org/pypy/pypy/changeset/e709308fb33d/ Log: py3 needs a py3 str diff --git a/lib_pypy/_sqlite3.py b/lib_pypy/_sqlite3.py --- a/lib_pypy/_sqlite3.py +++ b/lib_pypy/_sqlite3.py @@ -1317,7 +1317,7 @@ for i in xrange(_lib.sqlite3_column_count(self._statement)): name = _lib.sqlite3_column_name(self._statement, i) if name: - name = _ffi.string(name).split("[")[0].strip() + name = _ffi.string(name).decode('utf-8').split("[")[0].strip() desc.append((name, None, None, None, None, None, None)) return desc From noreply at buildbot.pypy.org Fri Aug 2 21:09:24 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Fri, 2 Aug 2013 21:09:24 +0200 (CEST) Subject: [pypy-commit] pypy py3k: issue1572: fix site-packages missing in sys.path and bring over some other Message-ID: <20130802190924.CE00F1C00F4@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r65913:0ad8f5229df1 Date: 2013-08-02 12:06 -0700 http://bitbucket.org/pypy/pypy/changeset/0ad8f5229df1/ Log: issue1572: fix site-packages missing in sys.path and bring over some other site.py customizations from default diff --git a/lib-python/3/site.py b/lib-python/3/site.py --- a/lib-python/3/site.py +++ b/lib-python/3/site.py @@ -57,6 +57,8 @@ import builtins import traceback +is_pypy = '__pypy__' in sys.builtin_module_names + # Prefixes for site-packages; add additional prefixes like /usr/local here PREFIXES = [sys.prefix, sys.exec_prefix] # Enable per user site-packages directory @@ -284,6 +286,10 @@ if sys.platform in ('os2emx', 'riscos'): sitepackages.append(os.path.join(prefix, "Lib", "site-packages")) + elif is_pypy: + from distutils.sysconfig import get_python_lib + sitepackages.append(get_python_lib(standard_lib=False, + prefix=prefix)) elif os.sep == '/': sitepackages.append(os.path.join(prefix, "lib", "python" + sys.version[:3], @@ -427,20 +433,27 @@ def setcopyright(): """Set 'copyright' and 'credits' in builtins""" + licenseargs = None + if is_pypy: + credits = "PyPy is maintained by the PyPy developers: http://pypy.org/" + license = "See https://bitbucket.org/pypy/pypy/src/default/LICENSE" + licenseargs = (license,) + elif sys.platform[:4] == 'java': + credits = ("Jython is maintained by the Jython developers " + "(www.jython.org).") + else: + credits = """\ + Thanks to CWI, CNRI, BeOpen.com, Zope Corporation and a cast of thousands + for supporting Python development. See www.python.org for more information.""" + builtins.copyright = _Printer("copyright", sys.copyright) - if sys.platform[:4] == 'java': - builtins.credits = _Printer( - "credits", - "Jython is maintained by the Jython developers (www.jython.org).") - else: - builtins.credits = _Printer("credits", """\ - Thanks to CWI, CNRI, BeOpen.com, Zope Corporation and a cast of thousands - for supporting Python development. See www.python.org for more information.""") - here = os.path.dirname(os.__file__) - builtins.license = _Printer( - "license", "See http://www.python.org/%.3s/license.html" % sys.version, - ["LICENSE.txt", "LICENSE"], - [os.path.join(here, os.pardir), here, os.curdir]) + builtins.credits = _Printer("credits", credits) + if licenseargs is None: + here = os.path.dirname(os.__file__) + license = "See http://www.python.org/%.3s/license.html" % sys.version + licenseargs = (license, ["LICENSE.txt", "LICENSE"], + [os.path.join(here, os.pardir), here, os.curdir]) + builtins.license = _Printer("license", *licenseargs) class _Helper(object): From noreply at buildbot.pypy.org Sat Aug 3 00:15:39 2013 From: noreply at buildbot.pypy.org (stian) Date: Sat, 3 Aug 2013 00:15:39 +0200 (CEST) Subject: [pypy-commit] pypy bigint-with-int: Experiment with rbigint ops with an int, saves the construction of a rbigint object of length 1. Which might give a speedup Message-ID: <20130802221539.8D1B21C3611@cobra.cs.uni-duesseldorf.de> Author: stian Branch: bigint-with-int Changeset: r65914:a48912159932 Date: 2013-08-02 16:59 +0200 http://bitbucket.org/pypy/pypy/changeset/a48912159932/ Log: Experiment with rbigint ops with an int, saves the construction of a rbigint object of length 1. Which might give a speedup From noreply at buildbot.pypy.org Sat Aug 3 00:15:40 2013 From: noreply at buildbot.pypy.org (stian) Date: Sat, 3 Aug 2013 00:15:40 +0200 (CEST) Subject: [pypy-commit] pypy bigint-with-int: Progress + tests Message-ID: <20130802221540.D3EE01C3611@cobra.cs.uni-duesseldorf.de> Author: stian Branch: bigint-with-int Changeset: r65915:8564d29c1285 Date: 2013-08-02 18:07 +0200 http://bitbucket.org/pypy/pypy/changeset/8564d29c1285/ Log: Progress + tests diff --git a/pypy/objspace/std/longobject.py b/pypy/objspace/std/longobject.py --- a/pypy/objspace/std/longobject.py +++ b/pypy/objspace/std/longobject.py @@ -155,30 +155,30 @@ return space.newbool(w_long1.num.ge(w_long2.num)) def lt__Long_Int(space, w_long1, w_int2): - return space.newbool(w_long1.num.lt(rbigint.fromint(w_int2.intval))) + return space.newbool(w_long1.num.int_lt(w_int2.intval)) def le__Long_Int(space, w_long1, w_int2): - return space.newbool(w_long1.num.le(rbigint.fromint(w_int2.intval))) + return space.newbool(w_long1.num.int_le(w_int2.intval)) def eq__Long_Int(space, w_long1, w_int2): - return space.newbool(w_long1.num.eq(rbigint.fromint(w_int2.intval))) + return space.newbool(w_long1.num.int_eq(w_int2.intval)) def ne__Long_Int(space, w_long1, w_int2): - return space.newbool(w_long1.num.ne(rbigint.fromint(w_int2.intval))) + return space.newbool(w_long1.num.int_ne(w_int2.intval)) def gt__Long_Int(space, w_long1, w_int2): - return space.newbool(w_long1.num.gt(rbigint.fromint(w_int2.intval))) + return space.newbool(w_long1.num.int_gt(w_int2.intval)) def ge__Long_Int(space, w_long1, w_int2): - return space.newbool(w_long1.num.ge(rbigint.fromint(w_int2.intval))) + return space.newbool(w_long1.num.int_ge(w_int2.intval)) def lt__Int_Long(space, w_int1, w_long2): - return space.newbool(rbigint.fromint(w_int1.intval).lt(w_long2.num)) + return space.newbool(w_long2.num.int_gt(w_int1.intval)) def le__Int_Long(space, w_int1, w_long2): - return space.newbool(rbigint.fromint(w_int1.intval).le(w_long2.num)) + return space.newbool(w_long2.num.int_ge(w_int1.intval)) def eq__Int_Long(space, w_int1, w_long2): - return space.newbool(rbigint.fromint(w_int1.intval).eq(w_long2.num)) + return space.newbool(w_long2.num.int_ne(w_int1.intval)) def ne__Int_Long(space, w_int1, w_long2): - return space.newbool(rbigint.fromint(w_int1.intval).ne(w_long2.num)) + return space.newbool(w_long2.num.int_eg(w_int1.intval)) def gt__Int_Long(space, w_int1, w_long2): - return space.newbool(rbigint.fromint(w_int1.intval).gt(w_long2.num)) + return space.newbool(w_long2.num.int_lt(w_int1.intval)) def ge__Int_Long(space, w_int1, w_long2): - return space.newbool(rbigint.fromint(w_int1.intval).ge(w_long2.num)) + return space.newbool(w_long2.num.int_le(w_int1.intval)) def hash__Long(space, w_value): diff --git a/rpython/rlib/rbigint.py b/rpython/rlib/rbigint.py --- a/rpython/rlib/rbigint.py +++ b/rpython/rlib/rbigint.py @@ -482,6 +482,13 @@ i += 1 return True + @jit.elidable + def int_eq(self, other): + """ eq with int """ + if self.numdigits() != 1 or self.digit(0) * self.sign != other: + return False + return True + @jit.look_inside def ne(self, other): return not self.eq(other) @@ -521,18 +528,52 @@ i -= 1 return False + @jit.elidable + def int_lt(self, other): + """ lt where other is an int """ + if other >= 0 and self.sign < 0: + return True + elif other < 0 and self.sign >= 0: + return False + digits = self.numdigits() + if digits > 1: + if self.sign == 1 and other >= 0: + return False + else: + return True + + d1 = self.sign * self.digit(0) + if d1 < other: + return True + return False + @jit.look_inside def le(self, other): return not other.lt(self) @jit.look_inside + def int_le(self, other): + e = self.int_eq(other) + if e: + return True + return self.int_lt(other) + + @jit.look_inside def gt(self, other): return other.lt(self) @jit.look_inside + def int_gt(self, other): + return not self.int_le(other) + + @jit.look_inside def ge(self, other): return not self.lt(other) + @jit.look_inside + def int_ge(self, other): + return not self.int_lt(other) + @jit.elidable def hash(self): return _hash(self) diff --git a/rpython/rlib/test/test_rbigint.py b/rpython/rlib/test/test_rbigint.py --- a/rpython/rlib/test/test_rbigint.py +++ b/rpython/rlib/test/test_rbigint.py @@ -320,6 +320,18 @@ assert not f1.eq(f2) assert not f1.eq(f3) + def test_int_eq(self): + x = 5858 + y = 58583 + f1 = rbigint.fromlong(x) + f2 = rbigint.fromlong(-x) + f3 = rbigint.fromlong(y) + assert f1.int_eq(x) + assert f2.int_eq(-x) + assert f3.int_eq(y) + assert not f1.int_eq(-x) + assert not f1.int_eq(y) + def test_eq_fastpath(self): x = 1234 y = 1234 @@ -335,6 +347,15 @@ f2 = rbigint.fromlong(y) assert (x < y) == f1.lt(f2) + def test_int_lt(self): + val = [0, 0x111111111111, 0x111111111112, 0x6FFFFFFF, 2**80] + short = [0, 0x111, 0x7FFFFFFF] + for x in gen_signs(val): + for y in gen_signs(short): + f1 = rbigint.fromlong(x) + assert (x < y) == f1.int_lt(y) + print "Pass (%d < %d) = %d" % (x, y, f1.int_lt(y)) + def test_order(self): f6 = rbigint.fromint(6) f7 = rbigint.fromint(7) @@ -343,6 +364,13 @@ assert (f6.gt(f6), f6.gt(f7), f7.gt(f6)) == (0,0,1) assert (f6.ge(f6), f6.ge(f7), f7.ge(f6)) == (1,0,1) + def test_int_order(self): + f6 = rbigint.fromint(6) + assert (f6.int_lt(6), f6.int_lt(7)) == (0,1) + assert (f6.int_le(6), f6.int_le(7)) == (1,1) + assert (f6.int_gt(6), f6.int_gt(7)) == (0,0) + assert (f6.int_ge(6), f6.int_ge(7)) == (1,0) + def test_int_conversion(self): f1 = rbigint.fromlong(12332) f2 = rbigint.fromint(12332) From noreply at buildbot.pypy.org Sat Aug 3 00:15:42 2013 From: noreply at buildbot.pypy.org (stian) Date: Sat, 3 Aug 2013 00:15:42 +0200 (CEST) Subject: [pypy-commit] pypy bigint-with-int: More progress, more tests. Message-ID: <20130802221542.1FD691C3611@cobra.cs.uni-duesseldorf.de> Author: stian Branch: bigint-with-int Changeset: r65916:5754c8cfb244 Date: 2013-08-02 22:33 +0200 http://bitbucket.org/pypy/pypy/changeset/5754c8cfb244/ Log: More progress, more tests. diff --git a/pypy/objspace/std/longobject.py b/pypy/objspace/std/longobject.py --- a/pypy/objspace/std/longobject.py +++ b/pypy/objspace/std/longobject.py @@ -174,7 +174,7 @@ def eq__Int_Long(space, w_int1, w_long2): return space.newbool(w_long2.num.int_ne(w_int1.intval)) def ne__Int_Long(space, w_int1, w_long2): - return space.newbool(w_long2.num.int_eg(w_int1.intval)) + return space.newbool(w_long2.num.int_eq(w_int1.intval)) def gt__Int_Long(space, w_int1, w_long2): return space.newbool(w_long2.num.int_lt(w_int1.intval)) def ge__Int_Long(space, w_int1, w_long2): @@ -192,12 +192,21 @@ def add__Long_Long(space, w_long1, w_long2): return W_LongObject(w_long1.num.add(w_long2.num)) +def add__Long_Int(space, w_long1, w_int2): + return W_LongObject(w_long1.num.int_add(w_int2.intval)) + def sub__Long_Long(space, w_long1, w_long2): return W_LongObject(w_long1.num.sub(w_long2.num)) +def sub__Long_Int(space, w_long1, w_int2): + return W_LongObject(w_long1.num.int_sub(w_int2.intval)) + def mul__Long_Long(space, w_long1, w_long2): return W_LongObject(w_long1.num.mul(w_long2.num)) +def mul__Long_Int(space, w_long1, w_int2): + return W_LongObject(w_long1.num.int_mul(w_int2.intval)) + def truediv__Long_Long(space, w_long1, w_long2): try: f = w_long1.num.truediv(w_long2.num) @@ -228,6 +237,14 @@ space.wrap("long division or modulo by zero")) return newlong(space, z) +def mod__Long_Int(space, w_long1, w_int2): + try: + z = w_long1.num.int_mod(w_int2.intval) + except ZeroDivisionError: + raise OperationError(space.w_ZeroDivisionError, + space.wrap("long division or modulo by zero")) + return newlong(space, z) + def divmod__Long_Long(space, w_long1, w_long2): try: div, mod = w_long1.num.divmod(w_long2.num) @@ -285,6 +302,14 @@ space.wrap("shift count too large")) return W_LongObject(w_long1.num.lshift(shift)) +def lshift__Long_Int(space, w_long1, w_int2): + # XXX need to replicate some of the logic, to get the errors right + if w_int2.intval < 0: + raise OperationError(space.w_ValueError, + space.wrap("negative shift counnt")) + + return W_LongObject(w_long1.num.lshift(w_int2.intval)) + def rshift__Long_Long(space, w_long1, w_long2): # XXX need to replicate some of the logic, to get the errors right if w_long2.num.sign < 0: @@ -297,6 +322,14 @@ space.wrap("shift count too large")) return newlong(space, w_long1.num.rshift(shift)) +def rshift__Long_Int(space, w_long1, w_int2): + # XXX need to replicate some of the logic, to get the errors right + if w_int2.intval < 0: + raise OperationError(space.w_ValueError, + space.wrap("negative shift count")) + + return newlong(space, w_long1.num.rshift(w_int2.intval)) + def and__Long_Long(space, w_long1, w_long2): return newlong(space, w_long1.num.and_(w_long2.num)) diff --git a/rpython/rlib/rbigint.py b/rpython/rlib/rbigint.py --- a/rpython/rlib/rbigint.py +++ b/rpython/rlib/rbigint.py @@ -493,6 +493,11 @@ def ne(self, other): return not self.eq(other) + @jit.look_inside + def int_ne(self, other): + """ ne with int """ + return not self.int_eq(other) + @jit.elidable def lt(self, other): if self.sign > other.sign: @@ -592,6 +597,21 @@ return result @jit.elidable + def int_add(self, other): + if other == 0: + return self + if self.sign == 0: + return rbigint.fromint(other) + if (self.sign > 0 and other > 0) or (self.sign < 0 and other < 0): + result = _x_int_add(self, abs(other)) + else: + # XXX: Improve. + result = _x_sub(rbigint.fromint(other), self) + if other < 0: + result.sign *= -1 + return result + + @jit.elidable def sub(self, other): if other.sign == 0: return self @@ -605,6 +625,20 @@ return result @jit.elidable + def int_sub(self, other): + if other == 0: + return self + if self.sign == 0: + return rbigint.fromint(-1 * other) + if (self.sign > 0 and other > 0) or (self.sign < 0 and other < 0): + # Improve + result = _x_sub(self, rbigint.fromint(other)) + else: + result = _x_int_add(self, abs(other)) + result.sign *= self.sign + return result + + @jit.elidable def mul(self, b): asize = self.numdigits() bsize = b.numdigits() @@ -650,6 +684,33 @@ return result @jit.elidable + def int_mul(self, b): + """ Mul with int. """ + asize = self.numdigits() + + if self.sign == 0 or b == 0: + return NULLRBIGINT + + if asize == 1: + if self._digits[0] == NULLDIGIT: + return NULLRBIGINT + elif self._digits[0] == ONEDIGIT: + return rbigint.fromint(self.sign * b) + + res = self.widedigit(0) * b + carry = res >> SHIFT + if carry: + return rbigint([_store_digit(res & MASK), _store_digit(carry)], self.sign * (-1 if b < 0 else 1), 2) + else: + return rbigint([_store_digit(res & MASK)], self.sign * (-1 if b < 0 else 1), 1) + + else: + result = _x_int_mul(self, abs(b)) + + result.sign = self.sign * (-1 if b < 0 else 1) + return result + + @jit.elidable def truediv(self, other): div = _bigint_true_divide(self, other) return div @@ -659,7 +720,7 @@ if self.sign == 1 and other.numdigits() == 1 and other.sign == 1: digit = other.digit(0) if digit == 1: - return rbigint(self._digits[:self.size], 1, self.size) + return self elif digit and digit & (digit - 1) == 0: return self.rshift(ptwotable[digit]) @@ -667,14 +728,36 @@ if mod.sign * other.sign == -1: if div.sign == 0: return ONENEGATIVERBIGINT - div = div.sub(ONERBIGINT) + div = div.int_sub(1) return div + @jit.elidable + def int_floordiv(self, other): + digit = abs(other) + if self.sign == 1 and other > 0: + if digit == 1: + return self + elif digit and digit & (digit - 1) == 0: + return self.rshift(ptwotable[digit]) + + div, mod = _divrem1(self, digit) + + if mod != 0 and self.sign * (-1 if other < 0 else 1) == -1: + if div.sign == 0: + return ONENEGATIVERBIGINT + div = div.int_add(1) + div.sign = self.sign * (-1 if other < 0 else 1) + return div + @jit.look_inside def div(self, other): return self.floordiv(other) + @jit.look_inside + def int_div(self, other): + return self.int_floordiv(other) + @jit.elidable def mod(self, other): if self.sign == 0: @@ -713,6 +796,46 @@ return mod @jit.elidable + def int_mod(self, other): + if self.sign == 0: + return NULLRBIGINT + + digit = abs(other) + + if digit != 0: + if digit == 1: + return NULLRBIGINT + elif digit == 2: + modm = self.digit(0) & 1 + if modm: + return ONENEGATIVERBIGINT if other < 0 else ONERBIGINT + return NULLRBIGINT + elif digit & (digit - 1) == 0: + mod = self.and_(rbigint([_store_digit(digit - 1)], 1, 1)) + else: + # Perform + size = self.numdigits() - 1 + if size > 0: + rem = self.widedigit(size) + size -= 1 + while size >= 0: + rem = ((rem << SHIFT) + self.widedigit(size)) % digit + size -= 1 + else: + rem = self.digit(0) % digit + + if rem == 0: + return NULLRBIGINT + mod = rbigint([_store_digit(rem)], -1 if self.sign < 0 else 1, 1) + else: + # Raise + raise ZeroDivisionError("long division or modulo by zero") + + if mod.sign * (-1 if other < 0 else 1) == -1: + mod = mod.int_add(other) + return mod + + @jit.elidable def divmod(v, w): """ The / and % operators are now defined in terms of divmod(). @@ -735,7 +858,28 @@ mod = mod.add(w) if div.sign == 0: return ONENEGATIVERBIGINT, mod - div = div.sub(ONERBIGINT) + div = div.int_sub(1) + return div, mod + + @jit.elidable + def int_divmod(v, w): + """ Divmod with int """ + if v.sign != (-1 if w < 0 else 1): + # TODO, fix. + return v.divmod(rbigint.fromint(w)) + div, mod = _divrem1(v, abs(w)) + if v.sign != (-1 if w < 0 else 1): + mod = rbigint.fromint(mod) + mod.sign = -1 if w < 0 else 1 + mod = mod.int_add(w) + + if div.sign == 0: + return ONENEGATIVERBIGINT, mod + div = div.int_add(1) + else: + mod = rbigint.fromint(mod) + mod.sign = -1 if w < 0 else 1 + div.sign = v.sign * (-1 if w < 0 else 1) return div, mod @jit.elidable @@ -1170,6 +1314,26 @@ z._normalize() return z +def _x_int_add(a, b): + """ Add the absolute values of one bigint and one int. """ + size_a = a.numdigits() + + z = rbigint([NULLDIGIT] * (size_a + 1), 1) + i = UDIGIT_TYPE(0) + + carry = a.udigit(0) + b + z.setdigit(0, carry) + carry >>= SHIFT + i += 1 + while i < size_a: + carry += a.udigit(i) + z.setdigit(i, carry) + carry >>= SHIFT + i += 1 + z.setdigit(i, carry) + z._normalize() + return z + def _x_sub(a, b): """ Subtract the absolute values of two integers. """ @@ -1216,6 +1380,40 @@ z._normalize() return z +def _x_int_sub(a, b): + """ Subtract the absolute values of one rbigint and one integer. """ + + size_a = a.numdigits() + sign = 1 + + if size_a == 1: + # Find highest digit where a and b differ: + if a.digit(0) == b: + return NULLRBIGINT + elif a.digit(0) < b: + sign = -1 + b *= -1 + size_a = size_b = 1 + + z = rbigint([NULLDIGIT] * size_a, sign, size_a) + borrow = UDIGIT_TYPE(0) + i = _load_unsigned_digit(1) + # The following assumes unsigned arithmetic + # works modulo 2**N for some N>SHIFT. + borrow = a.udigit(0) - b + z.setdigit(0, borrow) + borrow >>= SHIFT + while i < size_a: + borrow = a.udigit(i) - borrow + z.setdigit(i, borrow) + borrow >>= SHIFT + #borrow &= 1 + i += 1 + + assert borrow == 0 + z._normalize() + return z + # A neat little table of power of twos. ptwotable = {} for x in range(SHIFT-1): @@ -1301,6 +1499,18 @@ z._normalize() return z +def _x_int_mul(a, digit): + """ + Grade school multiplication, ignoring the signs. + Returns the absolute value of the product, or None if error. + """ + + if digit & (digit - 1) == 0: + return a.lqshift(ptwotable[digit]) + + return _muladd1(a, digit) + + def _kmul_split(n, size): """ A helper for Karatsuba multiplication (k_mul). diff --git a/rpython/rlib/test/test_rbigint.py b/rpython/rlib/test/test_rbigint.py --- a/rpython/rlib/test/test_rbigint.py +++ b/rpython/rlib/test/test_rbigint.py @@ -50,6 +50,15 @@ r2 = op1 // op2 assert r1.tolong() == r2 + def test_int_floordiv(self): + for op1 in [-12, -2, -1, 1, 2, 50]: + for op2 in [-4, -2, -1, 1, 2, 8]: + rl_op1 = rbigint.fromint(op1) + r1 = rl_op1.int_floordiv(op2) + r2 = op1 // op2 + print op1, op2 + assert r1.tolong() == r2 + def test_truediv(self): for op1 in [-12, -2, -1, 1, 2, 50]: for op2 in [-4, -2, -1, 1, 2, 8]: @@ -106,6 +115,15 @@ print op1, op2 assert r1.tolong() == r2 + def test_int_mod(self): + for op1 in [-50, -12, -2, -1, 1, 2, 50, 52]: + for op2 in [-4, -2, -1, 1, 2, 8]: + rl_op1 = rbigint.fromint(op1) + r1 = rl_op1.int_mod(op2) + r2 = op1 % op2 + print op1, op2 + assert r1.tolong() == r2 + def test_pow(self): for op1 in [-50, -12, -2, -1, 1, 2, 50, 52]: for op2 in [0, 1, 2, 8, 9, 10, 11]: @@ -237,6 +255,17 @@ result = f1.add(f2) assert result.tolong() == x * i + y * j + def test_int_add(self): + x = 123456789123456789000000L + y = 1238 + for i in [-1, 1]: + for j in [-1, 1]: + f1 = rbigint.fromlong(x * i) + f2 = y * j + result = f1.int_add(f2) + assert result.tolong() == x * i + y * j + + def test_sub(self): x = 12378959520302182384345L y = 88961284756491823819191823L @@ -247,6 +276,16 @@ result = f1.sub(f2) assert result.tolong() == x * i - y * j + def test_int_sub(self): + x = 12378959520302182384345L + y = 8896 + for i in [-1, 1]: + for j in [-1, 1]: + f1 = rbigint.fromlong(x * i) + f2 = y * j + result = f1.int_sub(f2) + assert result.tolong() == x * i - y * j + def test_subzz(self): w_l0 = rbigint.fromint(0) assert w_l0.sub(w_l0).tolong() == 0 @@ -262,6 +301,13 @@ result = f1.mul(f1) assert result.tolong() == x * x + def test_int_mul(self): + x = -1238585838347L + y = 585839 + f1 = rbigint.fromlong(x) + result = f1.int_mul(y) + assert result.tolong() == x * y + def test_tofloat(self): x = 12345678901234567890L ** 10 f1 = rbigint.fromlong(x) @@ -667,6 +713,20 @@ assert div.tolong() == _div assert rem.tolong() == _rem + def test_int_divmod(self): + x = 12345678901234567890L + for i in range(100): + y = randint(0, 1 << 60) + for sx, sy in (1, 1), (1, -1), (-1, -1), (-1, 1): + sx *= x + sy *= y + f1 = rbigint.fromlong(sx) + div, rem = f1.int_divmod(sy) + _div, _rem = divmod(sx, sy) + print sx, sy + assert div.tolong() == _div + assert rem.tolong() == _rem + # testing Karatsuba stuff def test__v_iadd(self): f1 = bigint([lobj.MASK] * 10, 1) From noreply at buildbot.pypy.org Sat Aug 3 00:15:43 2013 From: noreply at buildbot.pypy.org (stian) Date: Sat, 3 Aug 2013 00:15:43 +0200 (CEST) Subject: [pypy-commit] pypy bigint-with-int: Add these to longobject as well. Pidigits went from 6.59s to 5.79s. I call it progress :) Message-ID: <20130802221543.59CF01C3611@cobra.cs.uni-duesseldorf.de> Author: stian Branch: bigint-with-int Changeset: r65917:5c7293f44084 Date: 2013-08-02 23:15 +0200 http://bitbucket.org/pypy/pypy/changeset/5c7293f44084/ Log: Add these to longobject as well. Pidigits went from 6.59s to 5.79s. I call it progress :) diff --git a/pypy/objspace/std/longobject.py b/pypy/objspace/std/longobject.py --- a/pypy/objspace/std/longobject.py +++ b/pypy/objspace/std/longobject.py @@ -226,9 +226,21 @@ space.wrap("long division or modulo by zero")) return newlong(space, z) +def floordiv__Long_Int(space, w_long1, w_int2): + try: + z = w_long1.num.int_floordiv(w_int2.intval) + except ZeroDivisionError: + raise OperationError(space.w_ZeroDivisionError, + space.wrap("long division or modulo by zero")) + return newlong(space, z) + + def div__Long_Long(space, w_long1, w_long2): return floordiv__Long_Long(space, w_long1, w_long2) +def div__Long_Int(space, w_long1, w_int2): + return floordiv__Long_Int(space, w_long1, w_int2) + def mod__Long_Long(space, w_long1, w_long2): try: z = w_long1.num.mod(w_long2.num) @@ -253,6 +265,14 @@ space.wrap("long division or modulo by zero")) return space.newtuple([newlong(space, div), newlong(space, mod)]) +def divmod__Long_Int(space, w_long1, w_int2): + try: + div, mod = w_long1.num.int_divmod(w_int2.intval) + except ZeroDivisionError: + raise OperationError(space.w_ZeroDivisionError, + space.wrap("long division or modulo by zero")) + return space.newtuple([newlong(space, div), newlong(space, mod)]) + def pow__Long_Long_Long(space, w_long1, w_long2, w_long3): # XXX need to replicate some of the logic, to get the errors right if w_long2.num.sign < 0: From noreply at buildbot.pypy.org Sat Aug 3 00:15:44 2013 From: noreply at buildbot.pypy.org (stian) Date: Sat, 3 Aug 2013 00:15:44 +0200 (CEST) Subject: [pypy-commit] pypy bigint-with-int: Do the binary ops. Message-ID: <20130802221544.9E4FB1C3611@cobra.cs.uni-duesseldorf.de> Author: stian Branch: bigint-with-int Changeset: r65918:978a4df0c84c Date: 2013-08-03 00:14 +0200 http://bitbucket.org/pypy/pypy/changeset/978a4df0c84c/ Log: Do the binary ops. diff --git a/pypy/objspace/std/longobject.py b/pypy/objspace/std/longobject.py --- a/pypy/objspace/std/longobject.py +++ b/pypy/objspace/std/longobject.py @@ -353,12 +353,21 @@ def and__Long_Long(space, w_long1, w_long2): return newlong(space, w_long1.num.and_(w_long2.num)) +def and__Long_Int(space, w_long1, w_int2): + return newlong(space, w_long1.num.int_and_(w_int2.intval)) + def xor__Long_Long(space, w_long1, w_long2): return W_LongObject(w_long1.num.xor(w_long2.num)) +def xor__Long_Int(space, w_long1, w_int2): + return W_LongObject(w_long1.num.int_xor(w_int2.intval)) + def or__Long_Long(space, w_long1, w_long2): return W_LongObject(w_long1.num.or_(w_long2.num)) +def or__Long_Int(space, w_long1, w_int2): + return W_LongObject(w_long1.num.int_or_(w_int2.intval)) + def oct__Long(space, w_long1): return space.wrap(w_long1.num.oct()) diff --git a/rpython/rlib/rbigint.py b/rpython/rlib/rbigint.py --- a/rpython/rlib/rbigint.py +++ b/rpython/rlib/rbigint.py @@ -631,8 +631,7 @@ if self.sign == 0: return rbigint.fromint(-1 * other) if (self.sign > 0 and other > 0) or (self.sign < 0 and other < 0): - # Improve - result = _x_sub(self, rbigint.fromint(other)) + result = _x_int_sub(self, abs(other)) else: result = _x_int_add(self, abs(other)) result.sign *= self.sign @@ -773,7 +772,7 @@ return ONENEGATIVERBIGINT if other.sign == -1 else ONERBIGINT return NULLRBIGINT elif digit & (digit - 1) == 0: - mod = self.and_(rbigint([_store_digit(digit - 1)], 1, 1)) + mod = self.int_and_(digit - 1) else: # Perform size = self.numdigits() - 1 @@ -811,7 +810,7 @@ return ONENEGATIVERBIGINT if other < 0 else ONERBIGINT return NULLRBIGINT elif digit & (digit - 1) == 0: - mod = self.and_(rbigint([_store_digit(digit - 1)], 1, 1)) + mod = self.int_and_(digit -1) else: # Perform size = self.numdigits() - 1 @@ -1129,14 +1128,26 @@ return _bitwise(self, '&', other) @jit.elidable + def int_and_(self, other): + return _int_bitwise(self, '&', other) + + @jit.elidable def xor(self, other): return _bitwise(self, '^', other) @jit.elidable + def int_xor(self, other): + return _int_bitwise(self, '^', other) + + @jit.elidable def or_(self, other): return _bitwise(self, '|', other) @jit.elidable + def int_or_(self, other): + return _int_bitwise(self, '|', other) + + @jit.elidable def oct(self): if self.sign == 0: return '0L' @@ -2501,6 +2512,86 @@ return z.invert() _bitwise._annspecialcase_ = "specialize:arg(1)" +def _int_bitwise(a, op, b): # '&', '|', '^' + """ Bitwise and/or/xor operations with ints. """ + + if a.sign < 0: + a = a.invert() + maska = MASK + else: + maska = 0 + if b < 0: + b = ~b + maskb = MASK + else: + maskb = 0 + + negz = 0 + if op == '^': + if maska != maskb: + maska ^= MASK + negz = -1 + elif op == '&': + if maska and maskb: + op = '|' + maska ^= MASK + maskb ^= MASK + negz = -1 + elif op == '|': + if maska or maskb: + op = '&' + maska ^= MASK + maskb ^= MASK + negz = -1 + + # JRH: The original logic here was to allocate the result value (z) + # as the longer of the two operands. However, there are some cases + # where the result is guaranteed to be shorter than that: AND of two + # positives, OR of two negatives: use the shorter number. AND with + # mixed signs: use the positive number. OR with mixed signs: use the + # negative number. After the transformations above, op will be '&' + # iff one of these cases applies, and mask will be non-0 for operands + # whose length should be ignored. + + size_a = a.numdigits() + if op == '&': + if maska: + size_z = 1 + else: + if maskb: + size_z = size_a + else: + size_z = 1 + else: + size_z = size_a + + z = rbigint([NULLDIGIT] * size_z, 1, size_z) + i = 0 + while i < size_z: + if i < size_a: + diga = a.digit(i) ^ maska + else: + diga = maska + if i < 1: + digb = b ^ maskb + else: + digb = maskb + + if op == '&': + z.setdigit(i, diga & digb) + elif op == '|': + z.setdigit(i, diga | digb) + elif op == '^': + z.setdigit(i, diga ^ digb) + i += 1 + + z._normalize() + if negz == 0: + return z + + return z.invert() +_int_bitwise._annspecialcase_ = "specialize:arg(1)" + ULONGLONG_BOUND = r_ulonglong(1L << (r_longlong.BITS-1)) LONGLONG_MIN = r_longlong(-(1L << (r_longlong.BITS-1))) From noreply at buildbot.pypy.org Sat Aug 3 10:03:03 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 3 Aug 2013 10:03:03 +0200 (CEST) Subject: [pypy-commit] stmgc default: Implement the repeat read barrier (actually mostly just expose it Message-ID: <20130803080303.A6CB51C00F4@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r471:b57de4359eea Date: 2013-08-03 10:02 +0200 http://bitbucket.org/pypy/stmgc/changeset/b57de4359eea/ Log: Implement the repeat read barrier (actually mostly just expose it to the user, with a fast-path logic). diff --git a/c4/et.c b/c4/et.c --- a/c4/et.c +++ b/c4/et.c @@ -276,28 +276,45 @@ /* Version of stm_DirectReadBarrier() that doesn't abort and assumes * that 'P' was already an up-to-date result of a previous * stm_DirectReadBarrier(). We only have to check if we did in the - * meantime a stm_write_barrier(). + * meantime a stm_write_barrier(). Should only be called if we + * have the flag PUBLIC_TO_PRIVATE or on MOVED objects. This version + * should never abort (it is used in stm_decode_abort_info()). */ - if (P->h_tid & GCFLAG_PUBLIC) + assert(P->h_tid & GCFLAG_PUBLIC); + assert(!(P->h_tid & GCFLAG_STUB)); + + if (P->h_tid & GCFLAG_MOVED) { - if (P->h_tid & GCFLAG_MOVED) - { - P = (gcptr)P->h_revision; - assert(P->h_tid & GCFLAG_PUBLIC); - } - if (P->h_tid & GCFLAG_PUBLIC_TO_PRIVATE) - { - struct tx_descriptor *d = thread_descriptor; - wlog_t *item; - G2L_FIND(d->public_to_private, P, item, goto no_private_obj); + dprintf(("repeat_read_barrier: %p -> %p moved\n", P, + (gcptr)P->h_revision)); + P = (gcptr)P->h_revision; + assert(P->h_tid & GCFLAG_PUBLIC); + assert(!(P->h_tid & GCFLAG_STUB)); + assert(!(P->h_tid & GCFLAG_MOVED)); + if (!(P->h_tid & GCFLAG_PUBLIC_TO_PRIVATE)) + return P; + } + assert(P->h_tid & GCFLAG_PUBLIC_TO_PRIVATE); - P = item->val; - assert(!(P->h_tid & GCFLAG_PUBLIC)); - no_private_obj: - ; - } - } + struct tx_descriptor *d = thread_descriptor; + wlog_t *item; + G2L_FIND(d->public_to_private, P, item, goto no_private_obj); + + /* We have a key in 'public_to_private'. The value is the + corresponding private object. */ + dprintf(("repeat_read_barrier: %p -> %p public_to_private\n", P, item->val)); + P = item->val; + assert(!(P->h_tid & GCFLAG_PUBLIC)); assert(!(P->h_tid & GCFLAG_STUB)); + assert(is_private(P)); + return P; + + no_private_obj: + /* Key not found. It should not be waiting in 'stolen_objects', + because this case from steal.c applies to objects to were originally + backup objects. 'P' cannot be a backup object if it was obtained + earlier as a result of stm_read_barrier(). + */ return P; } diff --git a/c4/et.h b/c4/et.h --- a/c4/et.h +++ b/c4/et.h @@ -69,9 +69,9 @@ static const revision_t GCFLAG_VISITED = STM_FIRST_GCFLAG << 1; static const revision_t GCFLAG_PUBLIC = STM_FIRST_GCFLAG << 2; static const revision_t GCFLAG_PREBUILT_ORIGINAL = STM_FIRST_GCFLAG << 3; -static const revision_t GCFLAG_PUBLIC_TO_PRIVATE = STM_FIRST_GCFLAG << 4; +// in stmgc.h: GCFLAG_PUBLIC_TO_PRIVATE = STM_FIRST_GCFLAG << 4; // in stmgc.h: GCFLAG_WRITE_BARRIER = STM_FIRST_GCFLAG << 5; -static const revision_t GCFLAG_MOVED = STM_FIRST_GCFLAG << 6; +// in stmgc.h: GCFLAG_MOVED = STM_FIRST_GCFLAG << 6; static const revision_t GCFLAG_BACKUP_COPY /*debug*/ = STM_FIRST_GCFLAG << 7; static const revision_t GCFLAG_STUB /*debug*/ = STM_FIRST_GCFLAG << 8; static const revision_t GCFLAG_PRIVATE_FROM_PROTECTED = STM_FIRST_GCFLAG << 9; diff --git a/c4/extra.c b/c4/extra.c --- a/c4/extra.c +++ b/c4/extra.c @@ -204,7 +204,7 @@ WRITE_BUF(buffer, res_size); WRITE('e'); for (i=0; iabortinfo.size; i+=2) { - char *object = (char *)stm_RepeatReadBarrier(d->abortinfo.items[i+0]); + char *object = (char*)stm_repeat_read_barrier(d->abortinfo.items[i+0]); long *fieldoffsets = (long*)d->abortinfo.items[i+1]; long kind, offset; size_t rps_size; diff --git a/c4/stmgc.h b/c4/stmgc.h --- a/c4/stmgc.h +++ b/c4/stmgc.h @@ -58,7 +58,7 @@ int stm_enter_callback_call(void); void stm_leave_callback_call(int); -/* read/write barriers (the most general versions only for now). +/* read/write barriers. - the read barrier must be applied before reading from an object. the result is valid as long as we're in the same transaction, @@ -68,10 +68,18 @@ the result is valid for a shorter period of time: we have to do stm_write_barrier() again if we ended the transaction, or if we did a potential collection (e.g. stm_allocate()). + + - as an optimization, stm_repeat_read_barrier() can be used + instead of stm_read_barrier() if the object was already + obtained by a stm_read_barrier() in the same transaction. + The only thing that may have occurred is that a + stm_write_barrier() on the same object could have made it + invalid. */ #if 0 // (optimized version below) gcptr stm_read_barrier(gcptr); gcptr stm_write_barrier(gcptr); +gcptr stm_repeat_read_barrier(gcptr); #endif /* start a new transaction, calls callback(), and when it returns @@ -158,7 +166,9 @@ extern __thread revision_t stm_private_rev_num; gcptr stm_DirectReadBarrier(gcptr); gcptr stm_WriteBarrier(gcptr); +static const revision_t GCFLAG_PUBLIC_TO_PRIVATE = STM_FIRST_GCFLAG << 4; static const revision_t GCFLAG_WRITE_BARRIER = STM_FIRST_GCFLAG << 5; +static const revision_t GCFLAG_MOVED = STM_FIRST_GCFLAG << 6; extern __thread char *stm_read_barrier_cache; #define FX_MASK 65535 #define FXCACHE_AT(obj) \ @@ -178,5 +188,10 @@ stm_WriteBarrier(obj) \ : (obj)) +#define stm_repeat_read_barrier(obj) \ + (UNLIKELY((obj)->h_tid & (GCFLAG_PUBLIC_TO_PRIVATE | GCFLAG_MOVED)) ? \ + stm_RepeatReadBarrier(obj) \ + : (obj)) + #endif diff --git a/c4/test/support.py b/c4/test/support.py --- a/c4/test/support.py +++ b/c4/test/support.py @@ -57,6 +57,7 @@ int stm_in_transaction(void); gcptr stm_read_barrier(gcptr); gcptr stm_write_barrier(gcptr); + gcptr stm_repeat_read_barrier(gcptr); void stm_perform_transaction(gcptr arg, int (*callback)(gcptr, int)); void stm_commit_transaction(void); void stm_begin_inevitable_transaction(void); diff --git a/c4/test/test_et.py b/c4/test/test_et.py --- a/c4/test/test_et.py +++ b/c4/test/test_et.py @@ -713,3 +713,19 @@ assert x == 1 lib.stm_leave_callback_call(x) lib.stm_initialize_tests(0) + +def test_repeat_read_barrier(): + p = nalloc(HDR + WORD) + assert lib.stm_repeat_read_barrier(p) == p + lib.stm_commit_transaction() + lib.stm_begin_inevitable_transaction() + assert lib.stm_repeat_read_barrier(p) == p + # + p = palloc(HDR + WORD) + assert classify(p) == "public" + assert lib.stm_read_barrier(p) == p + q = lib.stm_write_barrier(p) + assert classify(q) == "private" + assert q != p + assert lib.stm_repeat_read_barrier(q) == q + assert lib.stm_repeat_read_barrier(p) == q From noreply at buildbot.pypy.org Sat Aug 3 10:38:25 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 3 Aug 2013 10:38:25 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: 'base_loc' is not actually ebp, but an immediate, which (on 64-bit) may Message-ID: <20130803083825.A0D7E1C10E6@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c4 Changeset: r65919:cc9fd4822b91 Date: 2013-08-03 10:37 +0200 http://bitbucket.org/pypy/pypy/changeset/cc9fd4822b91/ Log: 'base_loc' is not actually ebp, but an immediate, which (on 64-bit) may not fit a 32-bit number; the encoding Remi did is correct in that case. But if it is (or fits) 32-bit, use directly INC_j, with the logic of regloc.py to pick. diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -1610,14 +1610,11 @@ self.load_from_mem(resloc, src_addr, fieldsize_loc, sign_loc) def genop_discard_increment_debug_counter(self, op, arglocs): - assert IS_X86_64 - # I'm getting lazy. mem_reg_plus_const does not support - # ebp as a register, but that is what we get from the regalloc - # (mostly?) -> change to SCRATCH_REG + # base_loc and ofs_loc should be immediates, but maybe not + # fitting in 32-bit base_loc, ofs_loc, size_loc = arglocs - self.mc.MOV(X86_64_SCRATCH_REG, base_loc) - self.mc.INC_m((X86_64_SCRATCH_REG.value, ofs_loc.getint())) - + self.mc.INC(addr_add(base_loc, ofs_loc)) + def genop_discard_setfield_gc(self, op, arglocs): base_loc, ofs_loc, size_loc, value_loc = arglocs assert isinstance(size_loc, ImmedLoc) diff --git a/rpython/jit/backend/x86/regloc.py b/rpython/jit/backend/x86/regloc.py --- a/rpython/jit/backend/x86/regloc.py +++ b/rpython/jit/backend/x86/regloc.py @@ -599,6 +599,7 @@ TEST8 = _binaryop('TEST8') BTS = _binaryop('BTS') + INC = _unaryop('INC') ADD = _binaryop('ADD') SUB = _binaryop('SUB') IMUL = _binaryop('IMUL') diff --git a/rpython/jit/backend/x86/rx86.py b/rpython/jit/backend/x86/rx86.py --- a/rpython/jit/backend/x86/rx86.py +++ b/rpython/jit/backend/x86/rx86.py @@ -473,7 +473,8 @@ # ------------------------------ Arithmetic ------------------------------ INC_m = insn(rex_w, '\xFF', orbyte(0), mem_reg_plus_const(1)) - + INC_j = insn(rex_w, '\xFF', orbyte(0), abs_(1)) + ADD_ri,ADD_rr,ADD_rb,_,_,ADD_rm,_,ADD_rj,_,_ = common_modes(0) OR_ri, OR_rr, OR_rb, _,_,OR_rm, _,OR_rj, _,_ = common_modes(1) AND_ri,AND_rr,AND_rb,_,_,AND_rm,_,AND_rj,_,_ = common_modes(4) From noreply at buildbot.pypy.org Sat Aug 3 14:11:54 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 3 Aug 2013 14:11:54 +0200 (CEST) Subject: [pypy-commit] stmgc default: stm_immut_read_barrier() Message-ID: <20130803121154.A062F1C00F4@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r472:b7b44751111b Date: 2013-08-03 14:11 +0200 http://bitbucket.org/pypy/stmgc/changeset/b7b44751111b/ Log: stm_immut_read_barrier() diff --git a/c4/et.c b/c4/et.c --- a/c4/et.c +++ b/c4/et.c @@ -318,6 +318,41 @@ return P; } +gcptr stm_ImmutReadBarrier(gcptr P) +{ + assert(P->h_tid & GCFLAG_STUB); + assert(P->h_tid & GCFLAG_PUBLIC); + + revision_t v = ACCESS_ONCE(P->h_revision); + assert(IS_POINTER(v)); /* "is a pointer", "has a more recent revision" */ + + if (!(v & 2)) + { + P = (gcptr)v; + } + else + { + /* follow a stub reference */ + struct tx_descriptor *d = thread_descriptor; + struct tx_public_descriptor *foreign_pd = STUB_THREAD(P); + if (foreign_pd == d->public_descriptor) + { + /* Same thread: dereference the pointer directly. */ + dprintf(("immut_read_barrier: %p -> %p via stub\n ", P, + (gcptr)(v - 2))); + P = (gcptr)(v - 2); + } + else + { + /* stealing: needed because accessing v - 2 from this thread + is forbidden (the target might disappear under our feet) */ + dprintf(("immut_read_barrier: %p -> stealing...\n ", P)); + stm_steal_stub(P); + } + } + return stm_immut_read_barrier(P); /* retry */ +} + static gcptr _match_public_to_private(gcptr P, gcptr pubobj, gcptr privobj, int from_stolen) { diff --git a/c4/et.h b/c4/et.h --- a/c4/et.h +++ b/c4/et.h @@ -73,7 +73,7 @@ // in stmgc.h: GCFLAG_WRITE_BARRIER = STM_FIRST_GCFLAG << 5; // in stmgc.h: GCFLAG_MOVED = STM_FIRST_GCFLAG << 6; static const revision_t GCFLAG_BACKUP_COPY /*debug*/ = STM_FIRST_GCFLAG << 7; -static const revision_t GCFLAG_STUB /*debug*/ = STM_FIRST_GCFLAG << 8; +// in stmgc.h: GCFLAG_STUB = STM_FIRST_GCFLAG << 8; static const revision_t GCFLAG_PRIVATE_FROM_PROTECTED = STM_FIRST_GCFLAG << 9; static const revision_t GCFLAG_HAS_ID = STM_FIRST_GCFLAG << 10; static const revision_t GCFLAG_IMMUTABLE = STM_FIRST_GCFLAG << 11; @@ -195,8 +195,9 @@ void SpinLoop(int); gcptr stm_DirectReadBarrier(gcptr); +gcptr stm_WriteBarrier(gcptr); gcptr stm_RepeatReadBarrier(gcptr); -gcptr stm_WriteBarrier(gcptr); +gcptr stm_ImmutReadBarrier(gcptr); gcptr _stm_nonrecord_barrier(gcptr); /* debugging: read barrier, but not recording anything */ int _stm_is_private(gcptr); /* debugging */ diff --git a/c4/stmgc.h b/c4/stmgc.h --- a/c4/stmgc.h +++ b/c4/stmgc.h @@ -75,11 +75,16 @@ The only thing that may have occurred is that a stm_write_barrier() on the same object could have made it invalid. + + - a different optimization is to read immutable fields: in order + to do that, use stm_immut_read_barrier(), which only activates + on stubs. */ #if 0 // (optimized version below) gcptr stm_read_barrier(gcptr); gcptr stm_write_barrier(gcptr); gcptr stm_repeat_read_barrier(gcptr); +gcptr stm_immut_read_barrier(gcptr); #endif /* start a new transaction, calls callback(), and when it returns @@ -169,6 +174,7 @@ static const revision_t GCFLAG_PUBLIC_TO_PRIVATE = STM_FIRST_GCFLAG << 4; static const revision_t GCFLAG_WRITE_BARRIER = STM_FIRST_GCFLAG << 5; static const revision_t GCFLAG_MOVED = STM_FIRST_GCFLAG << 6; +static const revision_t GCFLAG_STUB = STM_FIRST_GCFLAG << 8; extern __thread char *stm_read_barrier_cache; #define FX_MASK 65535 #define FXCACHE_AT(obj) \ @@ -193,5 +199,10 @@ stm_RepeatReadBarrier(obj) \ : (obj)) +#define stm_immut_read_barrier(obj) \ + (UNLIKELY((obj)->h_tid & GCFLAG_STUB) ? \ + stm_ImmutReadBarrier(obj) \ + : (obj)) + #endif diff --git a/c4/test/support.py b/c4/test/support.py --- a/c4/test/support.py +++ b/c4/test/support.py @@ -58,6 +58,7 @@ gcptr stm_read_barrier(gcptr); gcptr stm_write_barrier(gcptr); gcptr stm_repeat_read_barrier(gcptr); + gcptr stm_immut_read_barrier(gcptr); void stm_perform_transaction(gcptr arg, int (*callback)(gcptr, int)); void stm_commit_transaction(void); void stm_begin_inevitable_transaction(void); diff --git a/c4/test/test_et.py b/c4/test/test_et.py --- a/c4/test/test_et.py +++ b/c4/test/test_et.py @@ -729,3 +729,19 @@ assert q != p assert lib.stm_repeat_read_barrier(q) == q assert lib.stm_repeat_read_barrier(p) == q + +def test_immut_read_barrier(): + p = palloc(HDR + WORD) + p2 = lib.stm_write_barrier(p) + lib.stm_commit_transaction() + lib.stm_begin_inevitable_transaction() + assert classify(p) == "public" + assert classify(p2) == "protected" + pstub = ffi.cast("gcptr", p.h_revision) + assert classify(pstub) == "stub" + assert lib.stm_immut_read_barrier(p) == p + assert lib.stm_immut_read_barrier(pstub) == p2 + assert lib.stm_immut_read_barrier(p2) == p2 + assert lib.stm_read_barrier(p2) == p2 + assert lib.stm_read_barrier(pstub) == p2 + assert lib.stm_read_barrier(p) == p2 From noreply at buildbot.pypy.org Sat Aug 3 14:33:49 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 3 Aug 2013 14:33:49 +0200 (CEST) Subject: [pypy-commit] stmgc default: stm_repeat_write_barrier() Message-ID: <20130803123349.6C7DF1C00F4@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r473:ef085b2228e4 Date: 2013-08-03 14:33 +0200 http://bitbucket.org/pypy/stmgc/changeset/ef085b2228e4/ Log: stm_repeat_write_barrier() diff --git a/c4/et.c b/c4/et.c --- a/c4/et.c +++ b/c4/et.c @@ -618,6 +618,16 @@ } } +gcptr stm_RepeatWriteBarrier(gcptr P) +{ + assert(!(P->h_tid & GCFLAG_IMMUTABLE)); + assert(is_private(P)); + assert(P->h_tid & GCFLAG_WRITE_BARRIER); + P->h_tid &= ~GCFLAG_WRITE_BARRIER; + gcptrlist_insert(&thread_descriptor->old_objects_to_trace, P); + return P; +} + gcptr stm_WriteBarrier(gcptr P) { assert(!(P->h_tid & GCFLAG_IMMUTABLE)); diff --git a/c4/et.h b/c4/et.h --- a/c4/et.h +++ b/c4/et.h @@ -198,6 +198,7 @@ gcptr stm_WriteBarrier(gcptr); gcptr stm_RepeatReadBarrier(gcptr); gcptr stm_ImmutReadBarrier(gcptr); +gcptr stm_RepeatWriteBarrier(gcptr); gcptr _stm_nonrecord_barrier(gcptr); /* debugging: read barrier, but not recording anything */ int _stm_is_private(gcptr); /* debugging */ diff --git a/c4/stmgc.h b/c4/stmgc.h --- a/c4/stmgc.h +++ b/c4/stmgc.h @@ -79,12 +79,17 @@ - a different optimization is to read immutable fields: in order to do that, use stm_immut_read_barrier(), which only activates on stubs. + + - stm_repeat_write_barrier() can be used on an object on which + we already did stm_write_barrier(), but a potential collection + can have occurred. */ #if 0 // (optimized version below) gcptr stm_read_barrier(gcptr); gcptr stm_write_barrier(gcptr); gcptr stm_repeat_read_barrier(gcptr); gcptr stm_immut_read_barrier(gcptr); +gcptr stm_repeat_write_barrier(gcptr); /* <= always returns its argument */ #endif /* start a new transaction, calls callback(), and when it returns @@ -204,5 +209,10 @@ stm_ImmutReadBarrier(obj) \ : (obj)) +#define stm_repeat_write_barrier(obj) \ + (UNLIKELY((obj)->h_tid & GCFLAG_WRITE_BARRIER) ? \ + stm_RepeatWriteBarrier(obj) \ + : (obj)) + #endif diff --git a/c4/test/support.py b/c4/test/support.py --- a/c4/test/support.py +++ b/c4/test/support.py @@ -59,6 +59,7 @@ gcptr stm_write_barrier(gcptr); gcptr stm_repeat_read_barrier(gcptr); gcptr stm_immut_read_barrier(gcptr); + gcptr stm_repeat_write_barrier(gcptr); void stm_perform_transaction(gcptr arg, int (*callback)(gcptr, int)); void stm_commit_transaction(void); void stm_begin_inevitable_transaction(void); diff --git a/c4/test/test_et.py b/c4/test/test_et.py --- a/c4/test/test_et.py +++ b/c4/test/test_et.py @@ -745,3 +745,20 @@ assert lib.stm_read_barrier(p2) == p2 assert lib.stm_read_barrier(pstub) == p2 assert lib.stm_read_barrier(p) == p2 + +def test_repeat_write_barrier(): + n = nalloc_refs(1) + lib.stm_push_root(n) + minor_collect() + n = lib.stm_pop_root() + q = nalloc(HDR + WORD) + lib.rawsetlong(q, 0, 1298719) + n1 = lib.stm_repeat_write_barrier(n) + assert n1 == n + lib.rawsetptr(n, 0, q) + lib.stm_push_root(n) + minor_collect() + n1 = lib.stm_pop_root() + assert n1 == n + q = lib.rawgetptr(n, 0) + assert lib.rawgetlong(q, 0) == 1298719 From noreply at buildbot.pypy.org Sat Aug 3 14:44:50 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 3 Aug 2013 14:44:50 +0200 (CEST) Subject: [pypy-commit] stmgc default: stm_pointer_equal_prebuilt() Message-ID: <20130803124450.829321C0134@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r474:9dc18268f0da Date: 2013-08-03 14:42 +0200 http://bitbucket.org/pypy/stmgc/changeset/9dc18268f0da/ Log: stm_pointer_equal_prebuilt() diff --git a/c4/extra.c b/c4/extra.c --- a/c4/extra.c +++ b/c4/extra.c @@ -153,6 +153,19 @@ return (p1 == p2); } +_Bool stm_pointer_equal_prebuilt(gcptr p1, gcptr p2) +{ + assert(p2 != NULL); + assert(p2->h_tid & GCFLAG_PREBUILT_ORIGINAL); + + if (p1 == p2) + return 1; + + /* the only possible case to still get True is if p2 == p1->h_original */ + return (p1 != NULL) && (p1->h_original == p2) && + !(p1->h_tid & GCFLAG_PREBUILT_ORIGINAL); +} + /************************************************************/ void stm_abort_info_push(gcptr obj, long fieldoffsets[]) diff --git a/c4/stmgc.h b/c4/stmgc.h --- a/c4/stmgc.h +++ b/c4/stmgc.h @@ -41,6 +41,7 @@ /* returns nonzero if the two object-copy pointers belong to the same original object */ _Bool stm_pointer_equal(gcptr, gcptr); +_Bool stm_pointer_equal_prebuilt(gcptr, gcptr); /* 2nd arg is known prebuilt */ /* to push/pop objects into the local shadowstack */ #if 0 // (optimized version below) diff --git a/c4/test/support.py b/c4/test/support.py --- a/c4/test/support.py +++ b/c4/test/support.py @@ -50,6 +50,7 @@ revision_t stm_hash(gcptr); revision_t stm_id(gcptr); _Bool stm_pointer_equal(gcptr, gcptr); + _Bool stm_pointer_equal_prebuilt(gcptr, gcptr); void stm_push_root(gcptr); gcptr stm_pop_root(void); void stm_set_max_aborts(int max_aborts); diff --git a/c4/test/test_extra.py b/c4/test/test_extra.py --- a/c4/test/test_extra.py +++ b/c4/test/test_extra.py @@ -129,3 +129,30 @@ 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1] + +def test_pointer_equal_prebuilt(): + p1 = palloc(HDR + WORD) + p2 = palloc(HDR + WORD) + p3 = oalloc(HDR + WORD) + p4 = nalloc(HDR + WORD) + lib.stm_commit_transaction() + lib.stm_begin_inevitable_transaction() + p1b = lib.stm_write_barrier(p1) + p2b = lib.stm_write_barrier(p2) + p3b = lib.stm_write_barrier(p3) + p4b = lib.stm_write_barrier(p4) + # + got = [] + for qa in [ffi.NULL, p1, p1b, p2, p2b, p3, p3b, p4, p4b]: + for qb in [p1, p2]: + got.append(lib.stm_pointer_equal_prebuilt(qa, qb)) + # + assert got == [0, 0, + 1, 0, + 1, 0, + 0, 1, + 0, 1, + 0, 0, + 0, 0, + 0, 0, + 0, 0] From noreply at buildbot.pypy.org Sat Aug 3 15:34:53 2013 From: noreply at buildbot.pypy.org (wlav) Date: Sat, 3 Aug 2013 15:34:53 +0200 (CEST) Subject: [pypy-commit] pypy reflex-support: fix for: https://bugs.pypy.org/issue1561 (enums accessible as type) Message-ID: <20130803133453.E72AF1C10E6@cobra.cs.uni-duesseldorf.de> Author: Wim Lavrijsen Branch: reflex-support Changeset: r65921:bfcf9013eef0 Date: 2013-08-03 02:54 -0700 http://bitbucket.org/pypy/pypy/changeset/bfcf9013eef0/ Log: fix for: https://bugs.pypy.org/issue1561 (enums accessible as type) diff --git a/pypy/module/cppyy/pythonify.py b/pypy/module/cppyy/pythonify.py --- a/pypy/module/cppyy/pythonify.py +++ b/pypy/module/cppyy/pythonify.py @@ -426,6 +426,11 @@ # mostly for the benefit of the CINT backend, which treats std as special gbl.std = make_cppnamespace(None, "std", None, False) + # install a type for enums to refer to + # TODO: this is correct for C++98, not for C++11 and in general there will + # be the same issue for all typedef'd builtin types + setattr(gbl, 'unsigned int', int) + # install for user access cppyy.gbl = gbl diff --git a/pypy/module/cppyy/test/test_datatypes.py b/pypy/module/cppyy/test/test_datatypes.py --- a/pypy/module/cppyy/test/test_datatypes.py +++ b/pypy/module/cppyy/test/test_datatypes.py @@ -428,12 +428,17 @@ c = cppyy_test_data() assert isinstance(c, cppyy_test_data) - # TODO: test that the enum is accessible as a type + # test that the enum is accessible as a type + assert cppyy_test_data.what assert cppyy_test_data.kNothing == 6 assert cppyy_test_data.kSomething == 111 assert cppyy_test_data.kLots == 42 + assert cppyy_test_data.what(cppyy_test_data.kNothing) == cppyy_test_data.kNothing + assert cppyy_test_data.what(6) == cppyy_test_data.kNothing + # TODO: only allow instantiations with correct values (C++11) + assert c.get_enum() == cppyy_test_data.kNothing assert c.m_enum == cppyy_test_data.kNothing @@ -455,6 +460,7 @@ assert cppyy_test_data.s_enum == cppyy_test_data.kSomething # global enums + assert gbl.fruit # test type accessible assert gbl.kApple == 78 assert gbl.kBanana == 29 assert gbl.kCitrus == 34 From noreply at buildbot.pypy.org Sat Aug 3 15:34:55 2013 From: noreply at buildbot.pypy.org (wlav) Date: Sat, 3 Aug 2013 15:34:55 +0200 (CEST) Subject: [pypy-commit] pypy reflex-support: fix for https://bugs.pypy.org/issue1563 (raises ReferenceError instead of segfaulting) Message-ID: <20130803133455.3D2CD1C1360@cobra.cs.uni-duesseldorf.de> Author: Wim Lavrijsen Branch: reflex-support Changeset: r65922:d9134817f168 Date: 2013-08-03 06:33 -0700 http://bitbucket.org/pypy/pypy/changeset/d9134817f168/ Log: fix for https://bugs.pypy.org/issue1563 (raises ReferenceError instead of segfaulting) diff --git a/pypy/module/cppyy/interp_cppyy.py b/pypy/module/cppyy/interp_cppyy.py --- a/pypy/module/cppyy/interp_cppyy.py +++ b/pypy/module/cppyy/interp_cppyy.py @@ -592,11 +592,15 @@ def get(self, w_cppinstance, w_pycppclass): cppinstance = self.space.interp_w(W_CPPInstance, w_cppinstance, can_be_None=True) + if not cppinstance: + raise OperationError(self.space.w_ReferenceError, self.space.wrap("attribute access requires an instance")) offset = self._get_offset(cppinstance) return self.converter.from_memory(self.space, w_cppinstance, w_pycppclass, offset) def set(self, w_cppinstance, w_value): cppinstance = self.space.interp_w(W_CPPInstance, w_cppinstance, can_be_None=True) + if not cppinstance: + raise OperationError(self.space.w_ReferenceError, self.space.wrap("attribute access requires an instance")) offset = self._get_offset(cppinstance) self.converter.to_memory(self.space, w_cppinstance, w_value, offset) return self.space.w_None diff --git a/pypy/module/cppyy/test/test_datatypes.py b/pypy/module/cppyy/test/test_datatypes.py --- a/pypy/module/cppyy/test/test_datatypes.py +++ b/pypy/module/cppyy/test/test_datatypes.py @@ -105,6 +105,13 @@ raises(IndexError, c.m_float_array.__getitem__, self.N) raises(IndexError, c.m_double_array.__getitem__, self.N) + # can not access an instance member on the class + raises(ReferenceError, getattr, cppyy_test_data, 'm_bool') + raises(ReferenceError, getattr, cppyy_test_data, 'm_int') + + assert not hasattr(cppyy_test_data, 'm_bool') + assert not hasattr(cppyy_test_data, 'm_int') + c.destruct() def test03_instance_data_write_access(self): From noreply at buildbot.pypy.org Sat Aug 3 15:34:52 2013 From: noreply at buildbot.pypy.org (wlav) Date: Sat, 3 Aug 2013 15:34:52 +0200 (CEST) Subject: [pypy-commit] pypy reflex-support: merge default into branch Message-ID: <20130803133452.A9B1A1C00F4@cobra.cs.uni-duesseldorf.de> Author: Wim Lavrijsen Branch: reflex-support Changeset: r65920:da64c461417c Date: 2013-08-03 02:44 -0700 http://bitbucket.org/pypy/pypy/changeset/da64c461417c/ Log: merge default into branch diff too long, truncating to 2000 out of 100692 lines diff --git a/.hgignore b/.hgignore --- a/.hgignore +++ b/.hgignore @@ -6,6 +6,7 @@ .idea .project .pydevproject +__pycache__ syntax: regexp ^testresult$ diff --git a/.hgtags b/.hgtags --- a/.hgtags +++ b/.hgtags @@ -3,3 +3,6 @@ d8ac7d23d3ec5f9a0fa1264972f74a010dbfd07f release-1.6 ff4af8f318821f7f5ca998613a60fca09aa137da release-1.7 07e08e9c885ca67d89bcc304e45a32346daea2fa release-2.0-beta-1 +9b623bc48b5950cf07184462a0e48f2c4df0d720 pypy-2.1-beta1-arm +9b623bc48b5950cf07184462a0e48f2c4df0d720 pypy-2.1-beta1-arm +ab0dd631c22015ed88e583d9fdd4c43eebf0be21 pypy-2.1-beta1-arm diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -2,9 +2,9 @@ ======= Except when otherwise stated (look for LICENSE files in directories or -information at the beginning of each file) all software and -documentation in the 'pypy', 'ctype_configure', 'dotviewer', 'demo', -and 'lib_pypy' directories is licensed as follows: +information at the beginning of each file) all software and documentation in +the 'rpython', 'pypy', 'ctype_configure', 'dotviewer', 'demo', and 'lib_pypy' +directories is licensed as follows: The MIT License @@ -38,176 +38,239 @@ Armin Rigo Maciej Fijalkowski Carl Friedrich Bolz + Antonio Cuni Amaury Forgeot d'Arc - Antonio Cuni Samuele Pedroni + Alex Gaynor Michael Hudson + David Schneider Holger Krekel - Alex Gaynor Christian Tismer Hakan Ardo Benjamin Peterson - David Schneider + Matti Picus + Philip Jenvey + Anders Chrigstrom + Brian Kearns Eric van Riet Paap - Anders Chrigstrom Richard Emslie + Alexander Schremmer + Wim Lavrijsen Dan Villiom Podlaski Christiansen - Alexander Schremmer + Manuel Jacob Lukas Diekmann + Sven Hager + Anders Lehmann Aurelien Campeas - Anders Lehmann + Niklaus Haldimann + Ronan Lamy Camillo Bruni - Niklaus Haldimann - Sven Hager + Laura Creighton + Toon Verwaest Leonardo Santagada - Toon Verwaest Seo Sanghyeon Justin Peel + Ronny Pfannschmidt + David Edelsohn + Anders Hammarquist + Jakub Gustak + Guido Wesdorp Lawrence Oluyede Bartosz Skowron - Jakub Gustak - Guido Wesdorp Daniel Roberts - Laura Creighton + Niko Matsakis Adrien Di Mascio Ludovic Aubry - Niko Matsakis - Wim Lavrijsen - Matti Picus + Alexander Hesse + Jacob Hallen + Romain Guillebert Jason Creighton - Jacob Hallen Alex Martelli - Anders Hammarquist + Michal Bendowski Jan de Mooij + Michael Foord Stephan Diehl - Michael Foord Stefan Schwarzer + Valentino Volonghi Tomek Meka Patrick Maupin + stian Bob Ippolito Bruno Gola + Jean-Paul Calderone + Timo Paulssen Alexandre Fayolle + Simon Burton Marius Gedminas - Simon Burton - David Edelsohn - Jean-Paul Calderone John Witulski - Timo Paulssen - holger krekel + Greg Price Dario Bertini Mark Pearse + Simon Cross + Konstantin Lopuhin Andreas Stührk Jean-Philippe St. Pierre Guido van Rossum Pavel Vinogradov - Valentino Volonghi Paul deGrandis Ilya Osadchiy - Ronny Pfannschmidt Adrian Kuhn + Boris Feigin tav Georg Brandl - Philip Jenvey + Bert Freudenberg + Stian Andreassen + Stefano Rivera + Wanja Saatkamp Gerald Klix - Wanja Saatkamp - Boris Feigin + Mike Blume + Taavi Burns Oscar Nierstrasz David Malcolm Eugene Oden Henry Mason + Preston Timmons Jeff Terrace + David Ripton + Dusty Phillips Lukas Renggli Guenter Jantzen + Tobias Oberstein + Remi Meier Ned Batchelder - Bert Freudenberg Amit Regmi Ben Young Nicolas Chauvat Andrew Durdin Michael Schneider Nicholas Riley + Jason Chu + Igor Trindade Oliveira + Jeremy Thurgood Rocco Moretti Gintautas Miliauskas Michael Twomey - Igor Trindade Oliveira Lucian Branescu Mihaila + Tim Felgentreff + Tyler Wade + Gabriel Lavoie Olivier Dormond Jared Grubb Karl Bartel - Gabriel Lavoie + Brian Dorsey Victor Stinner - Brian Dorsey Stuart Williams + Jasper Schulz Toby Watson Antoine Pitrou + Aaron Iles + Michael Cheng Justas Sadzevicius + Gasper Zejn Neil Shepperd Mikael Schönenberg - Gasper Zejn + Elmo Mäntynen + Tobias Pape Jonathan David Riehl - Elmo Mäntynen + Stanislaw Halik Anders Qvist + Chirag Jadwani Beatrice During + Alex Perry + Vincent Legoll + Alan McIntyre Alexander Sedov Corbin Simpson - Vincent Legoll - Romain Guillebert - Alan McIntyre - Alex Perry + Christopher Pope + Laurence Tratt + Guillebert Romain + Christian Tismer + Dan Stromberg + Stefano Parmesan + Christian Hudon + Alexis Daboville Jens-Uwe Mager - Simon Cross - Dan Stromberg - Guillebert Romain Carl Meyer + Karl Ramm Pieter Zieschang + Gabriel + Paweł Piotr Przeradowski + Andrew Dalke + Sylvain Thenault + Nathan Taylor + Vladimir Kryachko + Jacek Generowicz Alejandro J. Cura - Sylvain Thenault - Christoph Gerum + Jacob Oscarson Travis Francis Athougies + Kristjan Valur Jonsson + Neil Blakey-Milner + Lutz Paelike + Lucio Torre + Lars Wassermann Henrik Vendelbo - Lutz Paelike - Jacob Oscarson - Martin Blais - Lucio Torre - Lene Wagner + Dan Buch Miguel de Val Borro Artur Lisiecki - Bruno Gola + Sergey Kishchenko Ignas Mikalajunas - Stefano Rivera + Christoph Gerum + Martin Blais + Lene Wagner + Tomo Cocoa + Andrews Medina + roberto at goyle + William Leslie + Bobby Impollonia + timo at eistee.fritz.box + Andrew Thompson + Yusei Tahara + Roberto De Ioris + Juan Francisco Cantero Hurtado + Godefroid Chappelle Joshua Gilbert - Godefroid Chappelle - Yusei Tahara + Dan Colish Christopher Armstrong + Michael Hudson-Doyle + Anders Sigfridsson + Yasir Suhail + Floris Bruynooghe + Akira Li + Gustavo Niemeyer Stephan Busemann - Gustavo Niemeyer - William Leslie - Akira Li - Kristjan Valur Jonsson - Bobby Impollonia - Michael Hudson-Doyle - Laurence Tratt - Yasir Suhail - Andrew Thompson - Anders Sigfridsson - Floris Bruynooghe - Jacek Generowicz - Dan Colish - Zooko Wilcox-O Hearn - Dan Loewenherz + Anna Katrina Dominguez + Christian Muirhead + James Lan + shoma hosaka + Daniel Neuhäuser + Buck Golemon + Konrad Delong + Dinu Gherman Chris Lambacher - Dinu Gherman - Brett Cannon - Daniel Neuhäuser - Michael Chermside - Konrad Delong - Anna Ravencroft - Greg Price - Armin Ronacher - Christian Muirhead + coolbutuseless at gmail.com Jim Baker Rodrigo Araújo - Romain Guillebert + Armin Ronacher + Brett Cannon + yrttyr + Zooko Wilcox-O Hearn + Tomer Chachamu + Christopher Groskopf + opassembler.py + Antony Lee + Jim Hunziker + Markus Unterwaditzer + Even Wiik Thomassen + jbs + soareschen + Flavio Percoco + Kristoffer Kleine + yasirs + Michael Chermside + Anna Ravencroft + Andrew Chambers + Julien Phalip + Dan Loewenherz Heinrich-Heine University, Germany Open End AB (formerly AB Strakt), Sweden @@ -218,45 +281,22 @@ Impara, Germany Change Maker, Sweden University of California Berkeley, USA + Google Inc. + King's College London The PyPy Logo as used by http://speed.pypy.org and others was created by Samuel Reis and is distributed on terms of Creative Commons Share Alike License. -License for 'lib-python/2.7.0' and 'lib-python/2.7.0-modified' -============================================================== +License for 'lib-python/2.7' +============================ -Except when otherwise stated (look for LICENSE files or -copyright/license information at the beginning of each file) the files -in the 'lib-python/2.7.0' and 'lib-python/2.7.0-modified' directories -are all copyrighted by the Python Software Foundation and licensed under -the Python Software License of which you can find a copy here: +Except when otherwise stated (look for LICENSE files or copyright/license +information at the beginning of each file) the files in the 'lib-python/2.7' +directory are all copyrighted by the Python Software Foundation and licensed +under the Python Software License of which you can find a copy here: http://www.python.org/doc/Copyright.html -License for 'pypy/translator/jvm/src/jna.jar' -============================================= - -The file 'pypy/translator/jvm/src/jna.jar' is licensed under the GNU -Lesser General Public License of which you can find a copy here: -http://www.gnu.org/licenses/lgpl.html - -License for 'pypy/translator/jvm/src/jasmin.jar' -================================================ - -The file 'pypy/translator/jvm/src/jasmin.jar' is copyright (c) 1996-2004 Jon Meyer -and distributed with permission. The use of Jasmin by PyPy does not imply -that PyPy is endorsed by Jon Meyer nor any of Jasmin's contributors. Furthermore, -the following disclaimer applies to Jasmin: - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED -WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A -PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR -TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF -ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - License for 'pypy/module/unicodedata/' ====================================== diff --git a/lib-python/2.7/distutils/command/build_ext.py b/lib-python/2.7/distutils/command/build_ext.py --- a/lib-python/2.7/distutils/command/build_ext.py +++ b/lib-python/2.7/distutils/command/build_ext.py @@ -8,7 +8,7 @@ __revision__ = "$Id$" -import sys, os, string, re +import sys, os, string, re, imp from types import * from site import USER_BASE, USER_SITE from distutils.core import Command @@ -33,6 +33,11 @@ from distutils.ccompiler import show_compilers show_compilers() +def _get_c_extension_suffix(): + for ext, mod, typ in imp.get_suffixes(): + if typ == imp.C_EXTENSION: + return ext + class build_ext (Command): @@ -677,10 +682,18 @@ # OS/2 has an 8 character module (extension) limit :-( if os.name == "os2": ext_path[len(ext_path) - 1] = ext_path[len(ext_path) - 1][:8] + # PyPy tweak: first try to get the C extension suffix from + # 'imp'. If it fails we fall back to the 'SO' config var, like + # the previous version of this code did. This should work for + # CPython too. The point is that on PyPy with cpyext, the + # config var 'SO' is just ".so" but we want to return + # ".pypy-VERSION.so" instead. + so_ext = _get_c_extension_suffix() + if so_ext is None: + so_ext = get_config_var('SO') # fall-back # extensions in debug_mode are named 'module_d.pyd' under windows - so_ext = get_config_var('SO') if os.name == 'nt' and self.debug: - return os.path.join(*ext_path) + '_d' + so_ext + so_ext = '_d.pyd' return os.path.join(*ext_path) + so_ext def get_export_symbols (self, ext): diff --git a/lib-python/2.7/distutils/command/install.py b/lib-python/2.7/distutils/command/install.py --- a/lib-python/2.7/distutils/command/install.py +++ b/lib-python/2.7/distutils/command/install.py @@ -474,8 +474,8 @@ def select_scheme (self, name): # it's the caller's problem if they supply a bad name! - if hasattr(sys, 'pypy_version_info') and not ( - name.endswith('_user') or name.endswith('_home')): + if (hasattr(sys, 'pypy_version_info') and + not name.endswith(('_user', '_home'))): name = 'pypy' scheme = INSTALL_SCHEMES[name] for key in SCHEME_KEYS: diff --git a/lib-python/2.7/distutils/sysconfig.py b/lib-python/2.7/distutils/sysconfig.py --- a/lib-python/2.7/distutils/sysconfig.py +++ b/lib-python/2.7/distutils/sysconfig.py @@ -1,30 +1,16 @@ -"""Provide access to Python's configuration information. The specific -configuration variables available depend heavily on the platform and -configuration. The values may be retrieved using -get_config_var(name), and the list of variables is available via -get_config_vars().keys(). Additional convenience functions are also -available. - -Written by: Fred L. Drake, Jr. -Email: -""" - -__revision__ = "$Id: sysconfig.py 85358 2010-10-10 09:54:59Z antoine.pitrou $" - -import sys - # The content of this file is redirected from # sysconfig_cpython or sysconfig_pypy. +# All underscore names are imported too, because +# people like to use undocumented sysconfig._xxx +# directly. +import sys if '__pypy__' in sys.builtin_module_names: - from distutils.sysconfig_pypy import * - from distutils.sysconfig_pypy import _config_vars # needed by setuptools - from distutils.sysconfig_pypy import _variable_rx # read_setup_file() + from distutils import sysconfig_pypy as _sysconfig_module else: - from distutils.sysconfig_cpython import * - from distutils.sysconfig_cpython import _config_vars # needed by setuptools - from distutils.sysconfig_cpython import _variable_rx # read_setup_file() + from distutils import sysconfig_cpython as _sysconfig_module +globals().update(_sysconfig_module.__dict__) _USE_CLANG = None diff --git a/lib-python/2.7/distutils/sysconfig_cpython.py b/lib-python/2.7/distutils/sysconfig_cpython.py --- a/lib-python/2.7/distutils/sysconfig_cpython.py +++ b/lib-python/2.7/distutils/sysconfig_cpython.py @@ -9,7 +9,7 @@ Email: """ -__revision__ = "$Id$" +__revision__ = "$Id: sysconfig.py 85358 2010-10-10 09:54:59Z antoine.pitrou $" import os import re diff --git a/lib-python/2.7/distutils/sysconfig_pypy.py b/lib-python/2.7/distutils/sysconfig_pypy.py --- a/lib-python/2.7/distutils/sysconfig_pypy.py +++ b/lib-python/2.7/distutils/sysconfig_pypy.py @@ -1,9 +1,18 @@ -"""PyPy's minimal configuration information. +"""Provide access to Python's configuration information. +This is actually PyPy's minimal configuration information. + +The specific configuration variables available depend heavily on the +platform and configuration. The values may be retrieved using +get_config_var(name), and the list of variables is available via +get_config_vars().keys(). Additional convenience functions are also +available. """ +__revision__ = "$Id: sysconfig.py 85358 2010-10-10 09:54:59Z antoine.pitrou $" + import sys import os -import imp +import shlex from distutils.errors import DistutilsPlatformError @@ -49,16 +58,11 @@ _config_vars = None -def _get_so_extension(): - for ext, mod, typ in imp.get_suffixes(): - if typ == imp.C_EXTENSION: - return ext - def _init_posix(): """Initialize the module as appropriate for POSIX systems.""" g = {} g['EXE'] = "" - g['SO'] = _get_so_extension() or ".so" + g['SO'] = ".so" g['SOABI'] = g['SO'].rsplit('.')[0] g['LIBDIR'] = os.path.join(sys.prefix, 'lib') g['CC'] = "gcc -pthread" # -pthread might not be valid on OS/X, check @@ -71,7 +75,7 @@ """Initialize the module as appropriate for NT""" g = {} g['EXE'] = ".exe" - g['SO'] = _get_so_extension() or ".pyd" + g['SO'] = ".pyd" g['SOABI'] = g['SO'].rsplit('.')[0] global _config_vars @@ -119,13 +123,21 @@ optional C speedup components. """ if compiler.compiler_type == "unix": - compiler.compiler_so.extend(['-fPIC', '-Wimplicit']) + compiler.compiler_so.extend(['-O2', '-fPIC', '-Wimplicit']) compiler.shared_lib_extension = get_config_var('SO') + if "CPPFLAGS" in os.environ: + cppflags = shlex.split(os.environ["CPPFLAGS"]) + compiler.compiler.extend(cppflags) + compiler.compiler_so.extend(cppflags) + compiler.linker_so.extend(cppflags) if "CFLAGS" in os.environ: - cflags = os.environ["CFLAGS"] - compiler.compiler.append(cflags) - compiler.compiler_so.append(cflags) - compiler.linker_so.append(cflags) + cflags = shlex.split(os.environ["CFLAGS"]) + compiler.compiler.extend(cflags) + compiler.compiler_so.extend(cflags) + compiler.linker_so.extend(cflags) + if "LDFLAGS" in os.environ: + ldflags = shlex.split(os.environ["LDFLAGS"]) + compiler.linker_so.extend(ldflags) from sysconfig_cpython import ( diff --git a/lib-python/2.7/json/__init__.py b/lib-python/2.7/json/__init__.py --- a/lib-python/2.7/json/__init__.py +++ b/lib-python/2.7/json/__init__.py @@ -105,6 +105,12 @@ __author__ = 'Bob Ippolito ' +try: + # PyPy speedup, the interface is different than CPython's _json + import _pypyjson +except ImportError: + _pypyjson = None + from .decoder import JSONDecoder from .encoder import JSONEncoder @@ -241,7 +247,6 @@ _default_decoder = JSONDecoder(encoding=None, object_hook=None, object_pairs_hook=None) - def load(fp, encoding=None, cls=None, object_hook=None, parse_float=None, parse_int=None, parse_constant=None, object_pairs_hook=None, **kw): """Deserialize ``fp`` (a ``.read()``-supporting file-like object containing @@ -323,7 +328,10 @@ if (cls is None and encoding is None and object_hook is None and parse_int is None and parse_float is None and parse_constant is None and object_pairs_hook is None and not kw): - return _default_decoder.decode(s) + if _pypyjson and not isinstance(s, unicode): + return _pypyjson.loads(s) + else: + return _default_decoder.decode(s) if cls is None: cls = JSONDecoder if object_hook is not None: diff --git a/lib-python/2.7/logging/__init__.py b/lib-python/2.7/logging/__init__.py --- a/lib-python/2.7/logging/__init__.py +++ b/lib-python/2.7/logging/__init__.py @@ -134,21 +134,30 @@ DEBUG = 10 NOTSET = 0 -_levelNames = { - CRITICAL : 'CRITICAL', - ERROR : 'ERROR', - WARNING : 'WARNING', - INFO : 'INFO', - DEBUG : 'DEBUG', - NOTSET : 'NOTSET', - 'CRITICAL' : CRITICAL, - 'ERROR' : ERROR, - 'WARN' : WARNING, - 'WARNING' : WARNING, - 'INFO' : INFO, - 'DEBUG' : DEBUG, - 'NOTSET' : NOTSET, +# NOTE(flaper87): This is different from +# python's stdlib module since pypy's +# dicts are much faster when their +# keys are all of the same type. +# Introduced in commit 9de7b40c586f +_levelToName = { + CRITICAL: 'CRITICAL', + ERROR: 'ERROR', + WARNING: 'WARNING', + INFO: 'INFO', + DEBUG: 'DEBUG', + NOTSET: 'NOTSET', } +_nameToLevel = { + 'CRITICAL': CRITICAL, + 'ERROR': ERROR, + 'WARN': WARNING, + 'WARNING': WARNING, + 'INFO': INFO, + 'DEBUG': DEBUG, + 'NOTSET': NOTSET, +} +_levelNames = dict(_levelToName) +_levelNames.update(_nameToLevel) # backward compatibility def getLevelName(level): """ @@ -164,7 +173,11 @@ Otherwise, the string "Level %s" % level is returned. """ - return _levelNames.get(level, ("Level %s" % level)) + + # NOTE(flaper87): Check also in _nameToLevel + # if value is None. + return (_levelToName.get(level) or + _nameToLevel.get(level, ("Level %s" % level))) def addLevelName(level, levelName): """ @@ -174,8 +187,8 @@ """ _acquireLock() try: #unlikely to cause an exception, but you never know... - _levelNames[level] = levelName - _levelNames[levelName] = level + _levelToName[level] = levelName + _nameToLevel[levelName] = level finally: _releaseLock() @@ -183,9 +196,9 @@ if isinstance(level, int): rv = level elif str(level) == level: - if level not in _levelNames: + if level not in _nameToLevel: raise ValueError("Unknown level: %r" % level) - rv = _levelNames[level] + rv = _nameToLevel[level] else: raise TypeError("Level not an integer or a valid string: %r" % level) return rv @@ -277,7 +290,7 @@ self.lineno = lineno self.funcName = func self.created = ct - self.msecs = (ct - long(ct)) * 1000 + self.msecs = (ct - int(ct)) * 1000 self.relativeCreated = (self.created - _startTime) * 1000 if logThreads and thread: self.thread = thread.get_ident() diff --git a/lib-python/2.7/logging/config.py b/lib-python/2.7/logging/config.py --- a/lib-python/2.7/logging/config.py +++ b/lib-python/2.7/logging/config.py @@ -156,7 +156,7 @@ h = klass(*args) if "level" in opts: level = cp.get(sectname, "level") - h.setLevel(logging._levelNames[level]) + h.setLevel(level) if len(fmt): h.setFormatter(formatters[fmt]) if issubclass(klass, logging.handlers.MemoryHandler): @@ -187,7 +187,7 @@ opts = cp.options(sectname) if "level" in opts: level = cp.get(sectname, "level") - log.setLevel(logging._levelNames[level]) + log.setLevel(level) for h in root.handlers[:]: root.removeHandler(h) hlist = cp.get(sectname, "handlers") @@ -237,7 +237,7 @@ existing.remove(qn) if "level" in opts: level = cp.get(sectname, "level") - logger.setLevel(logging._levelNames[level]) + logger.setLevel(level) for h in logger.handlers[:]: logger.removeHandler(h) logger.propagate = propagate diff --git a/lib-python/2.7/opcode.py b/lib-python/2.7/opcode.py --- a/lib-python/2.7/opcode.py +++ b/lib-python/2.7/opcode.py @@ -193,5 +193,6 @@ hasname.append(201) def_op('CALL_METHOD', 202) # #args not including 'self' def_op('BUILD_LIST_FROM_ARG', 203) +jrel_op('JUMP_IF_NOT_DEBUG', 204) # jump over assert statements del def_op, name_op, jrel_op, jabs_op diff --git a/lib-python/2.7/pydoc.py b/lib-python/2.7/pydoc.py --- a/lib-python/2.7/pydoc.py +++ b/lib-python/2.7/pydoc.py @@ -1953,7 +1953,11 @@ if key is None: callback(None, modname, '') else: - desc = split(__import__(modname).__doc__ or '', '\n')[0] + try: + module_doc = __import__(modname).__doc__ + except ImportError: + module_doc = None + desc = split(module_doc or '', '\n')[0] if find(lower(modname + ' - ' + desc), key) >= 0: callback(None, modname, desc) diff --git a/lib-python/2.7/socket.py b/lib-python/2.7/socket.py --- a/lib-python/2.7/socket.py +++ b/lib-python/2.7/socket.py @@ -96,6 +96,7 @@ _realsocket = socket +_type = type # WSA error codes if sys.platform.lower().startswith("win"): @@ -173,31 +174,37 @@ __doc__ = _realsocket.__doc__ + __slots__ = ["_sock", "__weakref__"] + def __init__(self, family=AF_INET, type=SOCK_STREAM, proto=0, _sock=None): if _sock is None: _sock = _realsocket(family, type, proto) + elif _type(_sock) is _realsocket: + _sock._reuse() + # PyPy note about refcounting: implemented with _reuse()/_drop() + # on the class '_socket.socket'. Python 3 did it differently + # with a reference counter on this class 'socket._socketobject' + # instead, but it is a less compatible change (breaks eventlet). self._sock = _sock - self._io_refs = 0 - self._closed = False def send(self, data, flags=0): - return self._sock.send(data, flags=flags) + return self._sock.send(data, flags) send.__doc__ = _realsocket.send.__doc__ def recv(self, buffersize, flags=0): - return self._sock.recv(buffersize, flags=flags) + return self._sock.recv(buffersize, flags) recv.__doc__ = _realsocket.recv.__doc__ def recv_into(self, buffer, nbytes=0, flags=0): - return self._sock.recv_into(buffer, nbytes=nbytes, flags=flags) + return self._sock.recv_into(buffer, nbytes, flags) recv_into.__doc__ = _realsocket.recv_into.__doc__ def recvfrom(self, buffersize, flags=0): - return self._sock.recvfrom(buffersize, flags=flags) + return self._sock.recvfrom(buffersize, flags) recvfrom.__doc__ = _realsocket.recvfrom.__doc__ def recvfrom_into(self, buffer, nbytes=0, flags=0): - return self._sock.recvfrom_into(buffer, nbytes=nbytes, flags=flags) + return self._sock.recvfrom_into(buffer, nbytes, flags) recvfrom_into.__doc__ = _realsocket.recvfrom_into.__doc__ def sendto(self, data, param2, param3=None): @@ -208,13 +215,17 @@ sendto.__doc__ = _realsocket.sendto.__doc__ def close(self): - # This function should not reference any globals. See issue #808164. + s = self._sock + if type(s) is _realsocket: + s._drop() self._sock = _closedsocket() close.__doc__ = _realsocket.close.__doc__ def accept(self): sock, addr = self._sock.accept() - return _socketobject(_sock=sock), addr + sockobj = _socketobject(_sock=sock) + sock._drop() # already a copy in the _socketobject() + return sockobj, addr accept.__doc__ = _realsocket.accept.__doc__ def dup(self): @@ -228,24 +239,7 @@ Return a regular file object corresponding to the socket. The mode and bufsize arguments are as for the built-in open() function.""" - self._io_refs += 1 - return _fileobject(self, mode, bufsize) - - def _decref_socketios(self): - if self._io_refs > 0: - self._io_refs -= 1 - if self._closed: - self.close() - - def _real_close(self): - # This function should not reference any globals. See issue #808164. - self._sock.close() - - def close(self): - # This function should not reference any globals. See issue #808164. - self._closed = True - if self._io_refs <= 0: - self._real_close() + return _fileobject(self._sock, mode, bufsize) family = property(lambda self: self._sock.family, doc="the socket family") type = property(lambda self: self._sock.type, doc="the socket type") @@ -286,6 +280,8 @@ "_close"] def __init__(self, sock, mode='rb', bufsize=-1, close=False): + if type(sock) is _realsocket: + sock._reuse() self._sock = sock self.mode = mode # Not actually used in this version if bufsize < 0: @@ -320,11 +316,11 @@ if self._sock: self.flush() finally: - if self._sock: - if self._close: - self._sock.close() - else: - self._sock._decref_socketios() + s = self._sock + if type(s) is _realsocket: + s._drop() + if self._close: + self._sock.close() self._sock = None def __del__(self): diff --git a/lib-python/2.7/test/test_code.py b/lib-python/2.7/test/test_code.py --- a/lib-python/2.7/test/test_code.py +++ b/lib-python/2.7/test/test_code.py @@ -75,7 +75,7 @@ cellvars: () freevars: () nlocals: 0 -flags: 67 +flags: 1048643 consts: ("'doc string'", 'None') """ diff --git a/lib-python/2.7/test/test_codecs.py b/lib-python/2.7/test/test_codecs.py --- a/lib-python/2.7/test/test_codecs.py +++ b/lib-python/2.7/test/test_codecs.py @@ -2,7 +2,11 @@ import unittest import codecs import locale -import sys, StringIO, _testcapi +import sys, StringIO +try: + import _testcapi +except ImportError: + _testcapi = None class Queue(object): """ @@ -1387,7 +1391,7 @@ decodedresult += reader.read() self.assertEqual(decodedresult, s, "%r != %r (encoding=%r)" % (decodedresult, s, encoding)) - if encoding not in broken_incremental_coders: + if encoding not in broken_incremental_coders and _testcapi: # check incremental decoder/encoder (fetched via the Python # and C API) and iterencode()/iterdecode() try: diff --git a/lib-python/2.7/test/test_dis.py b/lib-python/2.7/test/test_dis.py --- a/lib-python/2.7/test/test_dis.py +++ b/lib-python/2.7/test/test_dis.py @@ -53,25 +53,26 @@ pass dis_bug1333982 = """\ - %-4d 0 LOAD_CONST 1 (0) - 3 POP_JUMP_IF_TRUE 41 - 6 LOAD_GLOBAL 0 (AssertionError) - 9 LOAD_FAST 0 (x) - 12 BUILD_LIST_FROM_ARG 0 - 15 GET_ITER - >> 16 FOR_ITER 12 (to 31) - 19 STORE_FAST 1 (s) - 22 LOAD_FAST 1 (s) - 25 LIST_APPEND 2 - 28 JUMP_ABSOLUTE 16 + %-4d 0 JUMP_IF_NOT_DEBUG 41 (to 44) + 3 LOAD_CONST 1 (0) + 6 POP_JUMP_IF_TRUE 44 + 9 LOAD_GLOBAL 0 (AssertionError) + 12 LOAD_FAST 0 (x) + 15 BUILD_LIST_FROM_ARG 0 + 18 GET_ITER + >> 19 FOR_ITER 12 (to 34) + 22 STORE_FAST 1 (s) + 25 LOAD_FAST 1 (s) + 28 LIST_APPEND 2 + 31 JUMP_ABSOLUTE 19 - %-4d >> 31 LOAD_CONST 2 (1) - 34 BINARY_ADD - 35 CALL_FUNCTION 1 - 38 RAISE_VARARGS 1 + %-4d >> 34 LOAD_CONST 2 (1) + 37 BINARY_ADD + 38 CALL_FUNCTION 1 + 41 RAISE_VARARGS 1 - %-4d >> 41 LOAD_CONST 0 (None) - 44 RETURN_VALUE + %-4d >> 44 LOAD_CONST 0 (None) + 47 RETURN_VALUE """%(bug1333982.func_code.co_firstlineno + 1, bug1333982.func_code.co_firstlineno + 2, bug1333982.func_code.co_firstlineno + 3) diff --git a/lib-python/2.7/test/test_logging.py b/lib-python/2.7/test/test_logging.py --- a/lib-python/2.7/test/test_logging.py +++ b/lib-python/2.7/test/test_logging.py @@ -65,7 +65,8 @@ self.saved_handlers = logging._handlers.copy() self.saved_handler_list = logging._handlerList[:] self.saved_loggers = logger_dict.copy() - self.saved_level_names = logging._levelNames.copy() + self.saved_name_to_level = logging._nameToLevel.copy() + self.saved_level_to_name = logging._levelToName.copy() finally: logging._releaseLock() @@ -97,8 +98,10 @@ self.root_logger.setLevel(self.original_logging_level) logging._acquireLock() try: - logging._levelNames.clear() - logging._levelNames.update(self.saved_level_names) + logging._levelToName.clear() + logging._levelToName.update(self.saved_level_to_name) + logging._nameToLevel.clear() + logging._nameToLevel.update(self.saved_name_to_level) logging._handlers.clear() logging._handlers.update(self.saved_handlers) logging._handlerList[:] = self.saved_handler_list @@ -275,6 +278,24 @@ def test_invalid_name(self): self.assertRaises(TypeError, logging.getLogger, any) + def test_get_level_name(self): + """Test getLevelName returns level constant.""" + # NOTE(flaper87): Bug #1517 + self.assertEqual(logging.getLevelName('NOTSET'), 0) + self.assertEqual(logging.getLevelName('DEBUG'), 10) + self.assertEqual(logging.getLevelName('INFO'), 20) + self.assertEqual(logging.getLevelName('WARN'), 30) + self.assertEqual(logging.getLevelName('WARNING'), 30) + self.assertEqual(logging.getLevelName('ERROR'), 40) + self.assertEqual(logging.getLevelName('CRITICAL'), 50) + + self.assertEqual(logging.getLevelName(0), 'NOTSET') + self.assertEqual(logging.getLevelName(10), 'DEBUG') + self.assertEqual(logging.getLevelName(20), 'INFO') + self.assertEqual(logging.getLevelName(30), 'WARNING') + self.assertEqual(logging.getLevelName(40), 'ERROR') + self.assertEqual(logging.getLevelName(50), 'CRITICAL') + class BasicFilterTest(BaseTest): """Test the bundled Filter class.""" diff --git a/lib-python/2.7/test/test_os.py b/lib-python/2.7/test/test_os.py --- a/lib-python/2.7/test/test_os.py +++ b/lib-python/2.7/test/test_os.py @@ -275,7 +275,7 @@ try: result.f_bfree = 1 self.fail("No exception thrown") - except TypeError: + except (TypeError, AttributeError): pass try: diff --git a/lib-python/2.7/test/test_sysconfig.py b/lib-python/2.7/test/test_sysconfig.py --- a/lib-python/2.7/test/test_sysconfig.py +++ b/lib-python/2.7/test/test_sysconfig.py @@ -7,7 +7,8 @@ import subprocess from copy import copy, deepcopy -from test.test_support import run_unittest, TESTFN, unlink, get_attribute +from test.test_support import (run_unittest, TESTFN, unlink, get_attribute, + import_module) import sysconfig from sysconfig import (get_paths, get_platform, get_config_vars, @@ -236,7 +237,10 @@ def test_get_config_h_filename(self): config_h = sysconfig.get_config_h_filename() - self.assertTrue(os.path.isfile(config_h), config_h) + # import_module skips the test when the CPython C Extension API + # appears to not be supported + self.assertTrue(os.path.isfile(config_h) or + not import_module('_testcapi'), config_h) def test_get_scheme_names(self): wanted = ('nt', 'nt_user', 'os2', 'os2_home', 'osx_framework_user', diff --git a/lib-python/2.7/test/test_traceback.py b/lib-python/2.7/test/test_traceback.py --- a/lib-python/2.7/test/test_traceback.py +++ b/lib-python/2.7/test/test_traceback.py @@ -1,6 +1,9 @@ """Test cases for traceback module""" -from _testcapi import traceback_print +try: + from _testcapi import traceback_print +except ImportError: + traceback_print = None from StringIO import StringIO import sys import unittest @@ -176,6 +179,8 @@ class TracebackFormatTests(unittest.TestCase): def test_traceback_format(self): + if traceback_print is None: + raise unittest.SkipTest('Requires _testcapi') try: raise KeyError('blah') except KeyError: diff --git a/lib-python/2.7/test/test_unicode.py b/lib-python/2.7/test/test_unicode.py --- a/lib-python/2.7/test/test_unicode.py +++ b/lib-python/2.7/test/test_unicode.py @@ -1609,7 +1609,10 @@ self.assertEqual("{}".format(u), '__unicode__ overridden') def test_encode_decimal(self): - from _testcapi import unicode_encodedecimal + try: + from _testcapi import unicode_encodedecimal + except ImportError: + raise unittest.SkipTest('Requires _testcapi') self.assertEqual(unicode_encodedecimal(u'123'), b'123') self.assertEqual(unicode_encodedecimal(u'\u0663.\u0661\u0664'), diff --git a/lib-python/conftest.py b/lib-python/conftest.py --- a/lib-python/conftest.py +++ b/lib-python/conftest.py @@ -130,7 +130,7 @@ RegrTest('test_bz2.py', usemodules='bz2'), RegrTest('test_calendar.py'), RegrTest('test_call.py', core=True), - RegrTest('test_capi.py'), + RegrTest('test_capi.py', usemodules='cpyext'), RegrTest('test_cd.py'), RegrTest('test_cfgparser.py'), RegrTest('test_cgi.py'), @@ -177,7 +177,7 @@ RegrTest('test_cprofile.py'), RegrTest('test_crypt.py', usemodules='crypt'), RegrTest('test_csv.py', usemodules='_csv'), - RegrTest('test_ctypes.py', usemodules="_rawffi thread"), + RegrTest('test_ctypes.py', usemodules="_rawffi thread cpyext"), RegrTest('test_curses.py'), RegrTest('test_datetime.py', usemodules='binascii struct'), RegrTest('test_dbm.py'), diff --git a/lib_pypy/_ctypes/pointer.py b/lib_pypy/_ctypes/pointer.py --- a/lib_pypy/_ctypes/pointer.py +++ b/lib_pypy/_ctypes/pointer.py @@ -120,6 +120,7 @@ return self._buffer[0] != 0 contents = property(getcontents, setcontents) + _obj = property(getcontents) # byref interface def _as_ffi_pointer_(self, ffitype): return as_ffi_pointer(self, ffitype) diff --git a/lib_pypy/_ctypes/structure.py b/lib_pypy/_ctypes/structure.py --- a/lib_pypy/_ctypes/structure.py +++ b/lib_pypy/_ctypes/structure.py @@ -20,7 +20,7 @@ or tp._type_ not in "iIhHbBlLqQ"): #XXX: are those all types? # we just dont get the type name - # in the interp levle thrown TypeError + # in the interp level thrown TypeError # from rawffi if there are more raise TypeError('bit fields not allowed for type ' + tp.__name__) @@ -166,9 +166,7 @@ if self is StructOrUnion: return if '_fields_' not in self.__dict__: - self._fields_ = [] - self._names = [] - _set_shape(self, [], self._is_union) + self._fields_ = [] # As a side-effet, this also sets the ffishape. __setattr__ = struct_setattr diff --git a/lib_pypy/_ctypes_test.py b/lib_pypy/_ctypes_test.py --- a/lib_pypy/_ctypes_test.py +++ b/lib_pypy/_ctypes_test.py @@ -1,60 +1,7 @@ -import os, sys -import tempfile - -def compile_shared(): - """Compile '_ctypes_test.c' into an extension module, and import it - """ - thisdir = os.path.dirname(__file__) - output_dir = tempfile.mkdtemp() - - from distutils.ccompiler import new_compiler - from distutils import sysconfig - - compiler = new_compiler() - compiler.output_dir = output_dir - - # Compile .c file - include_dir = os.path.join(thisdir, '..', 'include') - if sys.platform == 'win32': - ccflags = ['-D_CRT_SECURE_NO_WARNINGS'] - else: - ccflags = ['-fPIC', '-Wimplicit-function-declaration'] - res = compiler.compile([os.path.join(thisdir, '_ctypes_test.c')], - include_dirs=[include_dir], - extra_preargs=ccflags) - object_filename = res[0] - - # set link options - output_filename = '_ctypes_test' + sysconfig.get_config_var('SO') - if sys.platform == 'win32': - # XXX libpypy-c.lib is currently not installed automatically - library = os.path.join(thisdir, '..', 'include', 'libpypy-c') - if not os.path.exists(library + '.lib'): - #For a nightly build - library = os.path.join(thisdir, '..', 'include', 'python27') - if not os.path.exists(library + '.lib'): - # For a local translation - library = os.path.join(thisdir, '..', 'pypy', 'goal', 'libpypy-c') - libraries = [library, 'oleaut32'] - extra_ldargs = ['/MANIFEST', # needed for VC10 - '/EXPORT:init_ctypes_test'] - else: - libraries = [] - extra_ldargs = [] - - # link the dynamic library - compiler.link_shared_object( - [object_filename], - output_filename, - libraries=libraries, - extra_preargs=extra_ldargs) - - # Now import the newly created library, it will replace our module in sys.modules - import imp - fp, filename, description = imp.find_module('_ctypes_test', path=[output_dir]) - imp.load_module('_ctypes_test', fp, filename, description) - - +try: + import cpyext +except ImportError: + raise ImportError("No module named '_ctypes_test'") try: import _ctypes _ctypes.PyObj_FromPtr = None @@ -62,4 +9,5 @@ except ImportError: pass # obscure condition of _ctypes_test.py being imported by py.test else: - compile_shared() + import _pypy_testcapi + _pypy_testcapi.compile_shared('_ctypes_test.c', '_ctypes_test') diff --git a/lib_pypy/_curses.py b/lib_pypy/_curses.py --- a/lib_pypy/_curses.py +++ b/lib_pypy/_curses.py @@ -476,6 +476,15 @@ def _chtype(ch): return int(ffi.cast("chtype", ch)) +def _texttype(text): + if isinstance(text, str): + return text + elif isinstance(text, unicode): + return str(text) # default encoding + else: + raise TypeError("str or unicode expected, got a '%s' object" + % (type(text).__name__,)) + def _extract_yx(args): if len(args) >= 2: @@ -589,6 +598,7 @@ @_argspec(1, 1, 2) def addstr(self, y, x, text, attr=None): + text = _texttype(text) if attr is not None: attr_old = lib.getattrs(self._win) lib.wattrset(self._win, attr) @@ -602,6 +612,7 @@ @_argspec(2, 1, 2) def addnstr(self, y, x, text, n, attr=None): + text = _texttype(text) if attr is not None: attr_old = lib.getattrs(self._win) lib.wattrset(self._win, attr) @@ -780,6 +791,7 @@ @_argspec(1, 1, 2) def insstr(self, y, x, text, attr=None): + text = _texttype(text) if attr is not None: attr_old = lib.getattrs(self._win) lib.wattrset(self._win, attr) @@ -793,6 +805,7 @@ @_argspec(2, 1, 2) def insnstr(self, y, x, text, n, attr=None): + text = _texttype(text) if attr is not None: attr_old = lib.getattrs(self._win) lib.wattrset(self._win, attr) @@ -953,7 +966,7 @@ r, g, b = ffi.new("short *"), ffi.new("short *"), ffi.new("short *") if lib.color_content(color, r, g, b) == lib.ERR: raise error("Argument 1 was out of range. Check value of COLORS.") - return (r, g, b) + return (r[0], g[0], b[0]) def color_pair(n): @@ -1108,6 +1121,7 @@ term = ffi.NULL err = ffi.new("int *") if lib.setupterm(term, fd, err) == lib.ERR: + err = err[0] if err == 0: raise error("setupterm: could not find terminal") elif err == -1: @@ -1197,6 +1211,7 @@ def putp(text): + text = _texttype(text) return _check_ERR(lib.putp(text), "putp") diff --git a/lib_pypy/_pypy_irc_topic.py b/lib_pypy/_pypy_irc_topic.py --- a/lib_pypy/_pypy_irc_topic.py +++ b/lib_pypy/_pypy_irc_topic.py @@ -1,4 +1,4 @@ -"""eclguba: flagnk naq frznagvpf bs clguba, fcrrq bs p, erfgevpgvbaf bs wnin naq pbzcvyre reebe zrffntrf nf crargenoyr nf ZHZCF +__doc__ = """eclguba: flagnk naq frznagvpf bs clguba, fcrrq bs p, erfgevpgvbaf bs wnin naq pbzcvyre reebe zrffntrf nf crargenoyr nf ZHZCF pglcrf unf n fcva bs 1/3 ' ' vf n fcnpr gbb Clguba 2.k rfg cerfdhr zbeg, ivir Clguba! @@ -165,6 +165,63 @@ Nyy rkprcgoybpxf frrz fnar. N cvax tyvggrel ebgngvat ynzoqn "vg'f yvxryl grzcbenel hagvy sberire" nevtb +"Lbh xabj jung'f avpr nobhg EClguba? Ybatre fjbeq svtugf." +nccneragyl pbashfvba vf n srngher +nccneragyl pbashfvba vf n srngher... be vf vg? +ClCl 1.7 eryrnfrq +vs lbh jnag gb or penml, lbh qba'g unir gb sbepr vg +vs lbh jnag vg gb or iveghny, lbh fubhyq abg sbepr vg + svwny: V whfg... fgnegrq pbqvat naq fhqqragyl... nobzvangvba +fabj, fabj! :-) +fabj, fabj, fabj, fabj +clcl 1.8 eryrnfrq +vg jnf srj lnxf gbb yngr +ClCl vf n enpr orgjrra hf funivat lnxf, naq gur havirefr gelvat gb cebqhpr zber naq zber lnxf +Jevgvat na SC7 nabalzbhf cebcbfny sbe ClCl vf yvxr znxvat n gi nq sbe n uvtu cresbeznapr fcbegf pne jvgubhg orvat noyr gb zragvba vgf zbqry be znahsnpghere +Fabj, fabj (ntnva) +Fgvyy fabjvat +thrff jung, fabj +"lbh haqrerfgvzngr gur vzcbegnapr bs zvpebnepuvgrpgher" "ab, vg'f zber gung jr ner fgvyy unccvyl va gur ynaq bs ernpunoyr sehvg" +Jub nz V? Naq vs lrf, ubj znal? +ClCl vf nyjnlf n cynfzn +"genafyngvba gbbypunva" = "EClgure"? gb jevgr vagrEClguref va +"sberire" va clcl grezf zrnaf yvxr srj zbaguf :) +"Onu. PClguba bofphevgl. - Nezva Evtb" +svwny: pna V vavgvngr lbh ba gur (rnfl ohg) aba-gevivny gbcvp bs jevgvat P shapgvba glcrf? :-) +nyy fbsgjner vzcebirzragf unccra ol n ovg +gur genprf qba'g yvr +:-) be engure ":-/" bss-ol-bar-xrl reebe +Be Nezva Evtb. V guvax ur'f noyr gb haqrefgnaq k86 gur jnl Plcure pna frr jbzra va gur zngevk. +V zvtug, ohg abobql erfcrpgf zr +cerohvyg vafgnapr Ryyvcfvf unf ab nggevohgr 'reeab' +guvf frnfba'f svefg "fabj! fabj!" vf urer +ClCl 2.0 orgn1 eryrnfrq - orggre yngr guna arire +Fjvgreynaq 2012: zber fabj va Qrprzore guna rire fvapr clcl fgnegrq +Fjvgmreynaq 2012: zber fabj va Qrprzore guna rire fvapr clcl fgnegrq + n sngny reebe, ol qrsvavgvba, vf sngny + V'z tynq gung jr cebtenz va Clguba naq jr qba'g qrny jvgu gubfr vffhrf rirel qnl. Ncneg gur snpg gung jr unir gb qrny jvgu gurz naljnl, vg frrzf +unccl arj lrne! +"zrffl" vf abg n whqtrzrag, ohg whfg n snpg bs pbzcyvpngrqarff +n ybg bs fabj +qb lbh xabj nobhg n gbnfgre jvgu 8XO bs ENZ naq 64XO bs EBZ? +vg'f orra fabjvat rirelqnl sbe n juvyr, V pna'g whfg chg "fabj, fabj" hc urer rirel qnl +fabjonyy svtugf! +sbejneq pbzcngvovyvgl jvgu bcgvzvmngvbaf gung unira'g orra vairagrq lrg +jr fgvyy unir gb jevgr fbsgjner jvgu n zrgnfcnpr ohooyr va vg +cebonoyl gur ynfg gvzr va gur frnfba, ohg: fabj, fabj! +ClCl 2.0-orgn2 eryrnfrq +Gur ceboyrz vf gung sbe nyzbfg nal aba-gevivny cebtenz, vg'f abg pyrne jung 'pbeerpg' zrnaf. +ClCl 2.0 nyzbfg eryrnfrq +ClCl 2.0 eryrnfrq +WVG pbzcvyref fubhyq or jevggra ol crbcyr jub npghnyyl unir snvgu va WVG pbzcvyref' novyvgl gb znxrf guvatf tb fpernzvat snfg +ClCl 2.0.1 eryrnfrq +arire haqrerfgvzngr gur vzcebonoyr jura lbh qb fbzrguvat ng 2TUm +ClCl 2.0.2 eryrnfrq +nyy jr arrq vf n angvir Cebybt znpuvar +V haqrefgnaq ubj qravnyvfz vf n onq qrohttvat grpuavdhr +rirel IZ fubhyq pbzr jvgu arheny argjbex genvarq gb erpbtavmr zvpeborapuznexf naq enaqbzyl syhpghngr gurz +/-9000% +lbh qvq abg nccebnpu clcl sebz gur rnfl raq: fgz, gura wvg. vg'f n ovg gur Abegu snpr +va ClCl orvat bayl zbqrengryl zntvp vf n tbbq guvat """ from string import ascii_uppercase, ascii_lowercase diff --git a/lib_pypy/_testcapi.py b/lib_pypy/_pypy_testcapi.py copy from lib_pypy/_testcapi.py copy to lib_pypy/_pypy_testcapi.py --- a/lib_pypy/_testcapi.py +++ b/lib_pypy/_pypy_testcapi.py @@ -1,14 +1,20 @@ -import os, sys +import os, sys, imp import tempfile -def compile_shared(): - """Compile '_testcapi.c' into an extension module, and import it +def _get_c_extension_suffix(): + for ext, mod, typ in imp.get_suffixes(): + if typ == imp.C_EXTENSION: + return ext + + +def compile_shared(csource, modulename): + """Compile '_testcapi.c' or '_ctypes_test.c' into an extension module, + and import it. """ thisdir = os.path.dirname(__file__) output_dir = tempfile.mkdtemp() from distutils.ccompiler import new_compiler - from distutils import sysconfig compiler = new_compiler() compiler.output_dir = output_dir @@ -19,13 +25,13 @@ ccflags = ['-D_CRT_SECURE_NO_WARNINGS'] else: ccflags = ['-fPIC', '-Wimplicit-function-declaration'] - res = compiler.compile([os.path.join(thisdir, '_testcapimodule.c')], + res = compiler.compile([os.path.join(thisdir, csource)], include_dirs=[include_dir], extra_preargs=ccflags) object_filename = res[0] # set link options - output_filename = '_testcapi' + sysconfig.get_config_var('SO') + output_filename = modulename + _get_c_extension_suffix() if sys.platform == 'win32': # XXX libpypy-c.lib is currently not installed automatically library = os.path.join(thisdir, '..', 'include', 'libpypy-c') @@ -37,7 +43,7 @@ library = os.path.join(thisdir, '..', 'pypy', 'goal', 'libpypy-c') libraries = [library, 'oleaut32'] extra_ldargs = ['/MANIFEST', # needed for VC10 - '/EXPORT:init_testcapi'] + '/EXPORT:init' + modulename] else: libraries = [] extra_ldargs = [] @@ -49,9 +55,7 @@ libraries=libraries, extra_preargs=extra_ldargs) - # Now import the newly created library, it will replace our module in sys.modules - import imp - fp, filename, description = imp.find_module('_testcapi', path=[output_dir]) - imp.load_module('_testcapi', fp, filename, description) - -compile_shared() + # Now import the newly created library, it will replace the original + # module in sys.modules + fp, filename, description = imp.find_module(modulename, path=[output_dir]) + imp.load_module(modulename, fp, filename, description) diff --git a/lib_pypy/_sqlite3.py b/lib_pypy/_sqlite3.py --- a/lib_pypy/_sqlite3.py +++ b/lib_pypy/_sqlite3.py @@ -1305,7 +1305,7 @@ for i in xrange(_lib.sqlite3_column_count(self._statement)): name = _lib.sqlite3_column_name(self._statement, i) if name: - name = _ffi.string(name).decode('utf-8').split("[")[0].strip() + name = _ffi.string(name).split("[")[0].strip() desc.append((name, None, None, None, None, None, None)) return desc diff --git a/lib_pypy/_testcapi.py b/lib_pypy/_testcapi.py --- a/lib_pypy/_testcapi.py +++ b/lib_pypy/_testcapi.py @@ -1,57 +1,7 @@ -import os, sys -import tempfile - -def compile_shared(): - """Compile '_testcapi.c' into an extension module, and import it - """ - thisdir = os.path.dirname(__file__) - output_dir = tempfile.mkdtemp() - - from distutils.ccompiler import new_compiler - from distutils import sysconfig - - compiler = new_compiler() - compiler.output_dir = output_dir - - # Compile .c file - include_dir = os.path.join(thisdir, '..', 'include') - if sys.platform == 'win32': - ccflags = ['-D_CRT_SECURE_NO_WARNINGS'] - else: - ccflags = ['-fPIC', '-Wimplicit-function-declaration'] - res = compiler.compile([os.path.join(thisdir, '_testcapimodule.c')], - include_dirs=[include_dir], - extra_preargs=ccflags) - object_filename = res[0] - - # set link options - output_filename = '_testcapi' + sysconfig.get_config_var('SO') - if sys.platform == 'win32': - # XXX libpypy-c.lib is currently not installed automatically - library = os.path.join(thisdir, '..', 'include', 'libpypy-c') - if not os.path.exists(library + '.lib'): - #For a nightly build - library = os.path.join(thisdir, '..', 'include', 'python27') - if not os.path.exists(library + '.lib'): - # For a local translation - library = os.path.join(thisdir, '..', 'pypy', 'goal', 'libpypy-c') - libraries = [library, 'oleaut32'] - extra_ldargs = ['/MANIFEST', # needed for VC10 - '/EXPORT:init_testcapi'] - else: - libraries = [] - extra_ldargs = [] - - # link the dynamic library - compiler.link_shared_object( - [object_filename], - output_filename, - libraries=libraries, - extra_preargs=extra_ldargs) - - # Now import the newly created library, it will replace our module in sys.modules - import imp - fp, filename, description = imp.find_module('_testcapi', path=[output_dir]) - imp.load_module('_testcapi', fp, filename, description) - -compile_shared() +try: + import cpyext +except ImportError: + raise ImportError("No module named '_testcapi'") +else: + import _pypy_testcapi + _pypy_testcapi.compile_shared('_testcapimodule.c', '_testcapi') diff --git a/lib_pypy/_tkinter/__init__.py b/lib_pypy/_tkinter/__init__.py new file mode 100644 --- /dev/null +++ b/lib_pypy/_tkinter/__init__.py @@ -0,0 +1,48 @@ +# _tkinter package -- low-level interface to libtk and libtcl. +# +# This is an internal module, applications should "import Tkinter" instead. +# +# This version is based on cffi, and is a translation of _tkinter.c +# from CPython, version 2.7.4. + +class TclError(Exception): + pass + +import cffi +try: + from .tklib import tklib, tkffi +except cffi.VerificationError: + raise ImportError("Tk headers and development libraries are required") + +from .app import TkApp + +TK_VERSION = tkffi.string(tklib.get_tk_version()) +TCL_VERSION = tkffi.string(tklib.get_tcl_version()) + +READABLE = tklib.TCL_READABLE +WRITABLE = tklib.TCL_WRITABLE +EXCEPTION = tklib.TCL_EXCEPTION + +def create(screenName=None, baseName=None, className=None, + interactive=False, wantobjects=False, wantTk=True, + sync=False, use=None): + return TkApp(screenName, baseName, className, + interactive, wantobjects, wantTk, sync, use) + +def _flatten(item): + def _flatten1(output, item, depth): + if depth > 1000: + raise ValueError("nesting too deep in _flatten") + if not isinstance(item, (list, tuple)): + raise TypeError("argument must be sequence") + # copy items to output tuple + for o in item: + if isinstance(o, (list, tuple)): + _flatten1(output, o, depth + 1) + elif o is not None: + output.append(o) + + result = [] + _flatten1(result, item, 0) + return tuple(result) + diff --git a/lib_pypy/_tkinter/app.py b/lib_pypy/_tkinter/app.py new file mode 100644 --- /dev/null +++ b/lib_pypy/_tkinter/app.py @@ -0,0 +1,389 @@ +# The TkApp class. + +from .tklib import tklib, tkffi +from . import TclError +from .tclobj import TclObject, FromObj, AsObj, TypeCache + +import sys + +def varname_converter(input): + if isinstance(input, TclObject): + return input.string + return input + + +def Tcl_AppInit(app): + if tklib.Tcl_Init(app.interp) == tklib.TCL_ERROR: + app.raiseTclError() + skip_tk_init = tklib.Tcl_GetVar( + app.interp, "_tkinter_skip_tk_init", tklib.TCL_GLOBAL_ONLY) + if skip_tk_init and tkffi.string(skip_tk_init) == "1": + return + + if tklib.Tk_Init(app.interp) == tklib.TCL_ERROR: + app.raiseTclError() + +class _CommandData(object): + def __new__(cls, app, name, func): + self = object.__new__(cls) + self.app = app + self.name = name + self.func = func + handle = tkffi.new_handle(self) + app._commands[name] = handle # To keep the command alive + return tkffi.cast("ClientData", handle) + + @tkffi.callback("Tcl_CmdProc") + def PythonCmd(clientData, interp, argc, argv): + self = tkffi.from_handle(clientData) + assert self.app.interp == interp + try: + args = [tkffi.string(arg) for arg in argv[1:argc]] + result = self.func(*args) + obj = AsObj(result) + tklib.Tcl_SetObjResult(interp, obj) + except: + self.app.errorInCmd = True + self.app.exc_info = sys.exc_info() + return tklib.TCL_ERROR + else: + return tklib.TCL_OK + + @tkffi.callback("Tcl_CmdDeleteProc") + def PythonCmdDelete(clientData): + self = tkffi.from_handle(clientData) + app = self.app + del app._commands[self.name] + return + + +class TkApp(object): + def __new__(cls, screenName, baseName, className, + interactive, wantobjects, wantTk, sync, use): + if not wantobjects: + raise NotImplementedError("wantobjects=True only") + self = object.__new__(cls) + self.interp = tklib.Tcl_CreateInterp() + self._wantobjects = wantobjects + self.threaded = bool(tklib.Tcl_GetVar2Ex( + self.interp, "tcl_platform", "threaded", + tklib.TCL_GLOBAL_ONLY)) + self.thread_id = tklib.Tcl_GetCurrentThread() + self.dispatching = False + self.quitMainLoop = False + self.errorInCmd = False + + self._typeCache = TypeCache() + self._commands = {} + + # Delete the 'exit' command, which can screw things up + tklib.Tcl_DeleteCommand(self.interp, "exit") + + if screenName is not None: + tklib.Tcl_SetVar2(self.interp, "env", "DISPLAY", screenName, + tklib.TCL_GLOBAL_ONLY) + + if interactive: + tklib.Tcl_SetVar(self.interp, "tcl_interactive", "1", + tklib.TCL_GLOBAL_ONLY) + else: + tklib.Tcl_SetVar(self.interp, "tcl_interactive", "0", + tklib.TCL_GLOBAL_ONLY) + + # This is used to get the application class for Tk 4.1 and up + argv0 = className.lower() + tklib.Tcl_SetVar(self.interp, "argv0", argv0, + tklib.TCL_GLOBAL_ONLY) + + if not wantTk: + tklib.Tcl_SetVar(self.interp, "_tkinter_skip_tk_init", "1", + tklib.TCL_GLOBAL_ONLY) + + # some initial arguments need to be in argv + if sync or use: + args = "" + if sync: + args += "-sync" + if use: + if sync: + args += " " + args += "-use " + use + + tklib.Tcl_SetVar(self.interp, "argv", args, + tklib.TCL_GLOBAL_ONLY) + + Tcl_AppInit(self) + # EnableEventHook() + return self + + def __del__(self): + tklib.Tcl_DeleteInterp(self.interp) + # DisableEventHook() + + def raiseTclError(self): + if self.errorInCmd: + self.errorInCmd = False + raise self.exc_info[0], self.exc_info[1], self.exc_info[2] + raise TclError(tkffi.string(tklib.Tcl_GetStringResult(self.interp))) + + def wantobjects(self): + return self._wantobjects + + def _check_tcl_appartment(self): + if self.threaded and self.thread_id != tklib.Tcl_GetCurrentThread(): + raise RuntimeError("Calling Tcl from different appartment") + + def loadtk(self): + # We want to guard against calling Tk_Init() multiple times + err = tklib.Tcl_Eval(self.interp, "info exists tk_version") + if err == tklib.TCL_ERROR: + self.raiseTclError() + tk_exists = tklib.Tcl_GetStringResult(self.interp) + if not tk_exists or tkffi.string(tk_exists) != "1": + err = tklib.Tk_Init(self.interp) + if err == tklib.TCL_ERROR: + self.raiseTclError() + + def _var_invoke(self, func, *args, **kwargs): + if self.threaded and self.thread_id != tklib.Tcl_GetCurrentThread(): + # The current thread is not the interpreter thread. + # Marshal the call to the interpreter thread, then wait + # for completion. + raise NotImplementedError("Call from another thread") + return func(*args, **kwargs) + + def _getvar(self, name1, name2=None, global_only=False): + name1 = varname_converter(name1) + if not name2: + name2 = tkffi.NULL + flags=tklib.TCL_LEAVE_ERR_MSG + if global_only: + flags |= tklib.TCL_GLOBAL_ONLY + res = tklib.Tcl_GetVar2Ex(self.interp, name1, name2, flags) + if not res: + self.raiseTclError() + assert self._wantobjects + return FromObj(self, res) + + def _setvar(self, name1, value, global_only=False): + name1 = varname_converter(name1) + newval = AsObj(value) + flags=tklib.TCL_LEAVE_ERR_MSG + if global_only: + flags |= tklib.TCL_GLOBAL_ONLY + res = tklib.Tcl_SetVar2Ex(self.interp, name1, tkffi.NULL, + newval, flags) + if not res: + self.raiseTclError() + + def _unsetvar(self, name1, name2=None, global_only=False): + name1 = varname_converter(name1) + if not name2: + name2 = tkffi.NULL + flags=tklib.TCL_LEAVE_ERR_MSG + if global_only: + flags |= tklib.TCL_GLOBAL_ONLY + res = tklib.Tcl_UnsetVar2(self.interp, name1, name2, flags) + if res == tklib.TCL_ERROR: + self.raiseTclError() + + def getvar(self, name1, name2=None): + return self._var_invoke(self._getvar, name1, name2) + + def globalgetvar(self, name1, name2=None): + return self._var_invoke(self._getvar, name1, name2, global_only=True) + + def setvar(self, name1, value): + return self._var_invoke(self._setvar, name1, value) + + def globalsetvar(self, name1, value): + return self._var_invoke(self._setvar, name1, value, global_only=True) + + def unsetvar(self, name1, name2=None): + return self._var_invoke(self._unsetvar, name1, name2) + + def globalunsetvar(self, name1, name2=None): + return self._var_invoke(self._unsetvar, name1, name2, global_only=True) + + # COMMANDS + + def createcommand(self, cmdName, func): + if not callable(func): + raise TypeError("command not callable") + + if self.threaded and self.thread_id != tklib.Tcl_GetCurrentThread(): + raise NotImplementedError("Call from another thread") + + clientData = _CommandData(self, cmdName, func) + + if self.threaded and self.thread_id != tklib.Tcl_GetCurrentThread(): + raise NotImplementedError("Call from another thread") + + res = tklib.Tcl_CreateCommand( + self.interp, cmdName, _CommandData.PythonCmd, + clientData, _CommandData.PythonCmdDelete) + if not res: + raise TclError("can't create Tcl command") + + def deletecommand(self, cmdName): + if self.threaded and self.thread_id != tklib.Tcl_GetCurrentThread(): + raise NotImplementedError("Call from another thread") + + res = tklib.Tcl_DeleteCommand(self.interp, cmdName) + if res == -1: + raise TclError("can't delete Tcl command") + + def call(self, *args): + flags = tklib.TCL_EVAL_DIRECT | tklib.TCL_EVAL_GLOBAL + + # If args is a single tuple, replace with contents of tuple + if len(args) == 1 and isinstance(args[0], tuple): + args = args[0] + + if self.threaded and self.thread_id != tklib.Tcl_GetCurrentThread(): + # We cannot call the command directly. Instead, we must + # marshal the parameters to the interpreter thread. + raise NotImplementedError("Call from another thread") + + objects = tkffi.new("Tcl_Obj*[]", len(args)) + argc = len(args) + try: + for i, arg in enumerate(args): + if arg is None: + argc = i + break + obj = AsObj(arg) + tklib.Tcl_IncrRefCount(obj) + objects[i] = obj + + res = tklib.Tcl_EvalObjv(self.interp, argc, objects, flags) + if res == tklib.TCL_ERROR: + self.raiseTclError() + else: + result = self._callResult() + finally: + for obj in objects: + if obj: + tklib.Tcl_DecrRefCount(obj) + return result + + def _callResult(self): + assert self._wantobjects + value = tklib.Tcl_GetObjResult(self.interp) + # Not sure whether the IncrRef is necessary, but something + # may overwrite the interpreter result while we are + # converting it. + tklib.Tcl_IncrRefCount(value) + res = FromObj(self, value) + tklib.Tcl_DecrRefCount(value) + return res + + def eval(self, script): + self._check_tcl_appartment() + res = tklib.Tcl_Eval(self.interp, script) + if res == tklib.TCL_ERROR: + self.raiseTclError() + return tkffi.string(tklib.Tcl_GetStringResult(self.interp)) + + def evalfile(self, filename): + self._check_tcl_appartment() + res = tklib.Tcl_EvalFile(self.interp, filename) + if res == tklib.TCL_ERROR: + self.raiseTclError() + return tkffi.string(tklib.Tcl_GetStringResult(self.interp)) + + def split(self, arg): + if isinstance(arg, tuple): + return self._splitObj(arg) + else: + return self._split(arg) + + def splitlist(self, arg): + if isinstance(arg, tuple): + return arg + if isinstance(arg, unicode): + arg = arg.encode('utf8') + + argc = tkffi.new("int*") + argv = tkffi.new("char***") + res = tklib.Tcl_SplitList(self.interp, arg, argc, argv) + if res == tklib.TCL_ERROR: + self.raiseTclError() + + result = tuple(tkffi.string(argv[0][i]) + for i in range(argc[0])) + tklib.Tcl_Free(argv[0]) + return result + + def _splitObj(self, arg): + if isinstance(arg, tuple): + size = len(arg) + # Recursively invoke SplitObj for all tuple items. + # If this does not return a new object, no action is + # needed. + result = None + newelems = (self._splitObj(elem) for elem in arg) + for elem, newelem in zip(arg, newelems): + if elem is not newelem: + return newelems + elif isinstance(arg, str): + argc = tkffi.new("int*") + argv = tkffi.new("char***") + res = tklib.Tcl_SplitList(tkffi.NULL, arg, argc, argv) + if res == tklib.TCL_ERROR: + return arg + tklib.Tcl_Free(argv[0]) + if argc[0] > 1: + return self._split(arg) + return arg + + def _split(self, arg): + argc = tkffi.new("int*") + argv = tkffi.new("char***") + res = tklib.Tcl_SplitList(tkffi.NULL, arg, argc, argv) + if res == tklib.TCL_ERROR: + # Not a list. + # Could be a quoted string containing funnies, e.g. {"}. + # Return the string itself. + return arg + + try: + if argc[0] == 0: + return "" + elif argc[0] == 1: + return argv[0][0] + else: + return (self._split(argv[0][i]) + for i in range(argc[0])) + finally: + tklib.Tcl_Free(argv[0]) + + def getboolean(self, s): + if isinstance(s, int): + return s + v = tkffi.new("int*") + res = tklib.Tcl_GetBoolean(self.interp, s, v) + if res == tklib.TCL_ERROR: + self.raiseTclError() + + def mainloop(self, threshold): + self._check_tcl_appartment() + self.dispatching = True + while (tklib.Tk_GetNumMainWindows() > threshold and + not self.quitMainLoop and not self.errorInCmd): + + if self.threaded: + result = tklib.Tcl_DoOneEvent(0) + else: + raise NotImplementedError("TCL configured without threads") + + if result < 0: + break + self.dispatching = False + self.quitMainLoop = False + if self.errorInCmd: + self.errorInCmd = False + raise self.exc_info[0], self.exc_info[1], self.exc_info[2] + + def quit(self): + self.quitMainLoop = True diff --git a/lib_pypy/_tkinter/tclobj.py b/lib_pypy/_tkinter/tclobj.py new file mode 100644 --- /dev/null +++ b/lib_pypy/_tkinter/tclobj.py From noreply at buildbot.pypy.org Sat Aug 3 16:20:40 2013 From: noreply at buildbot.pypy.org (wlav) Date: Sat, 3 Aug 2013 16:20:40 +0200 (CEST) Subject: [pypy-commit] pypy default: merge reflex-support to fix 1561 and 1563 Message-ID: <20130803142040.72DA21C3578@cobra.cs.uni-duesseldorf.de> Author: Wim Lavrijsen Branch: Changeset: r65923:9b5b81b12899 Date: 2013-08-03 07:19 -0700 http://bitbucket.org/pypy/pypy/changeset/9b5b81b12899/ Log: merge reflex-support to fix 1561 and 1563 diff --git a/pypy/module/cppyy/interp_cppyy.py b/pypy/module/cppyy/interp_cppyy.py --- a/pypy/module/cppyy/interp_cppyy.py +++ b/pypy/module/cppyy/interp_cppyy.py @@ -592,11 +592,15 @@ def get(self, w_cppinstance, w_pycppclass): cppinstance = self.space.interp_w(W_CPPInstance, w_cppinstance, can_be_None=True) + if not cppinstance: + raise OperationError(self.space.w_ReferenceError, self.space.wrap("attribute access requires an instance")) offset = self._get_offset(cppinstance) return self.converter.from_memory(self.space, w_cppinstance, w_pycppclass, offset) def set(self, w_cppinstance, w_value): cppinstance = self.space.interp_w(W_CPPInstance, w_cppinstance, can_be_None=True) + if not cppinstance: + raise OperationError(self.space.w_ReferenceError, self.space.wrap("attribute access requires an instance")) offset = self._get_offset(cppinstance) self.converter.to_memory(self.space, w_cppinstance, w_value, offset) return self.space.w_None diff --git a/pypy/module/cppyy/pythonify.py b/pypy/module/cppyy/pythonify.py --- a/pypy/module/cppyy/pythonify.py +++ b/pypy/module/cppyy/pythonify.py @@ -426,6 +426,11 @@ # mostly for the benefit of the CINT backend, which treats std as special gbl.std = make_cppnamespace(None, "std", None, False) + # install a type for enums to refer to + # TODO: this is correct for C++98, not for C++11 and in general there will + # be the same issue for all typedef'd builtin types + setattr(gbl, 'unsigned int', int) + # install for user access cppyy.gbl = gbl diff --git a/pypy/module/cppyy/test/test_datatypes.py b/pypy/module/cppyy/test/test_datatypes.py --- a/pypy/module/cppyy/test/test_datatypes.py +++ b/pypy/module/cppyy/test/test_datatypes.py @@ -105,6 +105,13 @@ raises(IndexError, c.m_float_array.__getitem__, self.N) raises(IndexError, c.m_double_array.__getitem__, self.N) + # can not access an instance member on the class + raises(ReferenceError, getattr, cppyy_test_data, 'm_bool') + raises(ReferenceError, getattr, cppyy_test_data, 'm_int') + + assert not hasattr(cppyy_test_data, 'm_bool') + assert not hasattr(cppyy_test_data, 'm_int') + c.destruct() def test03_instance_data_write_access(self): @@ -428,12 +435,17 @@ c = cppyy_test_data() assert isinstance(c, cppyy_test_data) - # TODO: test that the enum is accessible as a type + # test that the enum is accessible as a type + assert cppyy_test_data.what assert cppyy_test_data.kNothing == 6 assert cppyy_test_data.kSomething == 111 assert cppyy_test_data.kLots == 42 + assert cppyy_test_data.what(cppyy_test_data.kNothing) == cppyy_test_data.kNothing + assert cppyy_test_data.what(6) == cppyy_test_data.kNothing + # TODO: only allow instantiations with correct values (C++11) + assert c.get_enum() == cppyy_test_data.kNothing assert c.m_enum == cppyy_test_data.kNothing @@ -455,6 +467,7 @@ assert cppyy_test_data.s_enum == cppyy_test_data.kSomething # global enums + assert gbl.fruit # test type accessible assert gbl.kApple == 78 assert gbl.kBanana == 29 assert gbl.kCitrus == 34 From noreply at buildbot.pypy.org Sat Aug 3 17:56:51 2013 From: noreply at buildbot.pypy.org (zzzeek) Date: Sat, 3 Aug 2013 17:56:51 +0200 (CEST) Subject: [pypy-commit] pypy default: the --nostrip and --without-tk options need to come before all positional arguments, fix the help Message-ID: <20130803155651.3E7961C0134@cobra.cs.uni-duesseldorf.de> Author: Mike Bayer Branch: Changeset: r65924:73b5c3a97483 Date: 2013-08-01 21:09 -0400 http://bitbucket.org/pypy/pypy/changeset/73b5c3a97483/ Log: the --nostrip and --without-tk options need to come before all positional arguments, fix the help diff --git a/pypy/tool/release/package.py b/pypy/tool/release/package.py --- a/pypy/tool/release/package.py +++ b/pypy/tool/release/package.py @@ -3,7 +3,7 @@ It uses 'pypy/goal/pypy-c' and parts of the rest of the working copy. Usage: - package.py root-pypy-dir [--nostrip] [--without-tk] [name-of-archive] [name-of-pypy-c] [destination-for-tarball] [pypy-c-path] + package.py [--nostrip] [--without-tk] root-pypy-dir [name-of-archive] [name-of-pypy-c] [destination-for-tarball] [pypy-c-path] Usually you would do: package.py ../../.. pypy-VER-PLATFORM The output is found in the directory /tmp/usession-YOURNAME/build/. From noreply at buildbot.pypy.org Sat Aug 3 17:56:52 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 3 Aug 2013 17:56:52 +0200 (CEST) Subject: [pypy-commit] pypy default: Merged in zzzeek/pypy (pull request #178) Message-ID: <20130803155652.8067D1C0134@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r65925:4627ee7d7b96 Date: 2013-08-03 17:56 +0200 http://bitbucket.org/pypy/pypy/changeset/4627ee7d7b96/ Log: Merged in zzzeek/pypy (pull request #178) the --nostrip and --without-tk options need to come before all positional arguments, fix the help diff --git a/pypy/tool/release/package.py b/pypy/tool/release/package.py --- a/pypy/tool/release/package.py +++ b/pypy/tool/release/package.py @@ -3,7 +3,7 @@ It uses 'pypy/goal/pypy-c' and parts of the rest of the working copy. Usage: - package.py root-pypy-dir [--nostrip] [--without-tk] [name-of-archive] [name-of-pypy-c] [destination-for-tarball] [pypy-c-path] + package.py [--nostrip] [--without-tk] root-pypy-dir [name-of-archive] [name-of-pypy-c] [destination-for-tarball] [pypy-c-path] Usually you would do: package.py ../../.. pypy-VER-PLATFORM The output is found in the directory /tmp/usession-YOURNAME/build/. From noreply at buildbot.pypy.org Sat Aug 3 19:00:58 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 3 Aug 2013 19:00:58 +0200 (CEST) Subject: [pypy-commit] pypy closed-branches: Merge closed head ae53e0f9be1e on branch remove-frame-force Message-ID: <20130803170058.572DE1C10E6@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: closed-branches Changeset: r65928:e95a68f86de3 Date: 2013-08-03 18:59 +0200 http://bitbucket.org/pypy/pypy/changeset/e95a68f86de3/ Log: Merge closed head ae53e0f9be1e on branch remove-frame-force From noreply at buildbot.pypy.org Sat Aug 3 19:00:59 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 3 Aug 2013 19:00:59 +0200 (CEST) Subject: [pypy-commit] pypy closed-branches: Merge closed head 61977b89cac0 on branch longdouble Message-ID: <20130803170059.92C0A1C1360@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: closed-branches Changeset: r65929:7dd6603b656b Date: 2013-08-03 18:59 +0200 http://bitbucket.org/pypy/pypy/changeset/7dd6603b656b/ Log: Merge closed head 61977b89cac0 on branch longdouble From noreply at buildbot.pypy.org Sat Aug 3 19:01:00 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 3 Aug 2013 19:01:00 +0200 (CEST) Subject: [pypy-commit] pypy closed-branches: Merge closed head 783ea6d7ecbf on branch sqlite-cffi Message-ID: <20130803170100.B725E1C10E6@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: closed-branches Changeset: r65930:c2f71e467fc8 Date: 2013-08-03 18:59 +0200 http://bitbucket.org/pypy/pypy/changeset/c2f71e467fc8/ Log: Merge closed head 783ea6d7ecbf on branch sqlite-cffi From noreply at buildbot.pypy.org Sat Aug 3 19:01:02 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 3 Aug 2013 19:01:02 +0200 (CEST) Subject: [pypy-commit] pypy closed-branches: Merge closed head fc22917ceb73 on branch vref-copy Message-ID: <20130803170102.1800A1C10E6@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: closed-branches Changeset: r65931:1c5331913f91 Date: 2013-08-03 18:59 +0200 http://bitbucket.org/pypy/pypy/changeset/1c5331913f91/ Log: Merge closed head fc22917ceb73 on branch vref-copy From noreply at buildbot.pypy.org Sat Aug 3 19:01:03 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 3 Aug 2013 19:01:03 +0200 (CEST) Subject: [pypy-commit] pypy closed-branches: Merge closed head ef9c84a9a1aa on branch remove-string-smm Message-ID: <20130803170103.3A3FC1C10E6@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: closed-branches Changeset: r65932:56b25fe6dd41 Date: 2013-08-03 18:59 +0200 http://bitbucket.org/pypy/pypy/changeset/56b25fe6dd41/ Log: Merge closed head ef9c84a9a1aa on branch remove-string-smm From noreply at buildbot.pypy.org Sat Aug 3 19:01:04 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 3 Aug 2013 19:01:04 +0200 (CEST) Subject: [pypy-commit] pypy closed-branches: Merge closed head be5edadba1ac on branch dotviewer-linewidth Message-ID: <20130803170104.4917D1C10E6@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: closed-branches Changeset: r65933:71378ce161ea Date: 2013-08-03 18:59 +0200 http://bitbucket.org/pypy/pypy/changeset/71378ce161ea/ Log: Merge closed head be5edadba1ac on branch dotviewer-linewidth From noreply at buildbot.pypy.org Sat Aug 3 19:01:05 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 3 Aug 2013 19:01:05 +0200 (CEST) Subject: [pypy-commit] pypy closed-branches: re-close this branch Message-ID: <20130803170105.646CE1C10E6@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: closed-branches Changeset: r65934:ccf2b4208535 Date: 2013-08-03 18:59 +0200 http://bitbucket.org/pypy/pypy/changeset/ccf2b4208535/ Log: re-close this branch From noreply at buildbot.pypy.org Sat Aug 3 21:39:24 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Sat, 3 Aug 2013 21:39:24 +0200 (CEST) Subject: [pypy-commit] pypy kill-typesystem: add to whatsnew Message-ID: <20130803193924.8C8D21C0134@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: kill-typesystem Changeset: r65936:3eee51959c8d Date: 2013-08-03 20:27 +0100 http://bitbucket.org/pypy/pypy/changeset/3eee51959c8d/ Log: add to whatsnew diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -48,3 +48,7 @@ Allow subclassing ndarray, i.e. matrix .. branch: kill-ootype + +.. branch: kill-typesystem +Remove the "type system" abstraction, now that there is only ever one kind of +type system used. From noreply at buildbot.pypy.org Sat Aug 3 21:39:23 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Sat, 3 Aug 2013 21:39:23 +0200 (CEST) Subject: [pypy-commit] pypy kill-typesystem: kill TypeSystem.perform_normalizations() Message-ID: <20130803193923.4E3341C00F4@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: kill-typesystem Changeset: r65935:db51479f78c1 Date: 2013-08-03 20:18 +0100 http://bitbucket.org/pypy/pypy/changeset/db51479f78c1/ Log: kill TypeSystem.perform_normalizations() diff --git a/rpython/rtyper/annlowlevel.py b/rpython/rtyper/annlowlevel.py --- a/rpython/rtyper/annlowlevel.py +++ b/rpython/rtyper/annlowlevel.py @@ -7,6 +7,7 @@ from rpython.annotator.policy import AnnotatorPolicy from rpython.annotator.signature import Sig from rpython.annotator.specialize import flatten_star_args +from rpython.rtyper.normalizecalls import perform_normalizations from rpython.rtyper.lltypesystem import lltype, llmemory from rpython.flowspace.model import Constant from rpython.rlib.objectmodel import specialize @@ -251,7 +252,7 @@ rtyper = self.rtyper translator = rtyper.annotator.translator original_graph_count = len(translator.graphs) - rtyper.type_system.perform_normalizations(rtyper) + perform_normalizations(rtyper) for r in self.delayedreprs: r.set_setup_delayed(False) rtyper.call_all_setups() diff --git a/rpython/rtyper/rtyper.py b/rpython/rtyper/rtyper.py --- a/rpython/rtyper/rtyper.py +++ b/rpython/rtyper/rtyper.py @@ -26,6 +26,7 @@ attachRuntimeTypeInfo, Primitive) from rpython.rtyper.rmodel import Repr, inputconst, BrokenReprTyperError from rpython.rtyper.typesystem import LowLevelTypeSystem +from rpython.rtyper.normalizecalls import perform_normalizations from rpython.tool.pairtype import pair from rpython.translator.unsimplify import insert_empty_block @@ -172,12 +173,12 @@ # first make sure that all functions called in a group have exactly # the same signature, by hacking their flow graphs if needed - self.type_system.perform_normalizations(self) + perform_normalizations(self) self.exceptiondata.finish(self) + # new blocks can be created as a result of specialize_block(), so # we need to be careful about the loop here. self.already_seen = {} - self.specialize_more_blocks() if self.exceptiondata is not None: self.exceptiondata.make_helpers(self) diff --git a/rpython/rtyper/typesystem.py b/rpython/rtyper/typesystem.py --- a/rpython/rtyper/typesystem.py +++ b/rpython/rtyper/typesystem.py @@ -71,13 +71,6 @@ in a graph.""" raise NotImplementedError() - def perform_normalizations(self, rtyper): - """Prepare the annotator's internal data structures for rtyping - with the specified type system. - """ - # default implementation - from rpython.rtyper.normalizecalls import perform_normalizations - perform_normalizations(rtyper) class LowLevelTypeSystem(TypeSystem): name = "lltypesystem" From noreply at buildbot.pypy.org Sat Aug 3 21:39:26 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Sat, 3 Aug 2013 21:39:26 +0200 (CEST) Subject: [pypy-commit] pypy default: merge branch kill-typesystem Message-ID: <20130803193926.075551C00F4@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: Changeset: r65937:81d4a31242a6 Date: 2013-08-03 20:37 +0100 http://bitbucket.org/pypy/pypy/changeset/81d4a31242a6/ Log: merge branch kill-typesystem diff too long, truncating to 2000 out of 2396 lines diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -62,3 +62,7 @@ No longer delegate numpy string_ methods to space.StringObject, in numpy this works by kind of by accident. Support for merging the refactor-str-types branch + +.. branch: kill-typesystem +Remove the "type system" abstraction, now that there is only ever one kind of +type system used. diff --git a/rpython/jit/backend/test/support.py b/rpython/jit/backend/test/support.py --- a/rpython/jit/backend/test/support.py +++ b/rpython/jit/backend/test/support.py @@ -59,7 +59,7 @@ t.buildannotator().build_types(function, [int] * len(args), main_entry_point=True) - t.buildrtyper(type_system=self.type_system).specialize() + t.buildrtyper().specialize() warmrunnerdesc = WarmRunnerDesc(t, translate_support_code=True, CPUClass=self.CPUClass, **kwds) diff --git a/rpython/jit/codewriter/codewriter.py b/rpython/jit/codewriter/codewriter.py --- a/rpython/jit/codewriter/codewriter.py +++ b/rpython/jit/codewriter/codewriter.py @@ -21,9 +21,9 @@ self.callcontrol = CallControl(cpu, jitdrivers_sd) self._seen_files = set() - def transform_func_to_jitcode(self, func, values, type_system='lltype'): + def transform_func_to_jitcode(self, func, values): """For testing.""" - rtyper = support.annotate(func, values, type_system=type_system) + rtyper = support.annotate(func, values) graph = rtyper.annotator.translator.graphs[0] jitcode = JitCode("test") self.transform_graph_to_jitcode(graph, jitcode, True) diff --git a/rpython/jit/codewriter/support.py b/rpython/jit/codewriter/support.py --- a/rpython/jit/codewriter/support.py +++ b/rpython/jit/codewriter/support.py @@ -35,7 +35,7 @@ return a.typeannotation(t) def annotate(func, values, inline=None, backendoptimize=True, - type_system="lltype", translationoptions={}): + translationoptions={}): # build the normal ll graphs for ll_function t = TranslationContext() for key, value in translationoptions.items(): @@ -44,7 +44,7 @@ a = t.buildannotator(policy=annpolicy) argtypes = getargtypes(a, values) a.build_types(func, argtypes, main_entry_point=True) - rtyper = t.buildrtyper(type_system = type_system) + rtyper = t.buildrtyper() rtyper.specialize() #if inline: # auto_inlining(t, threshold=inline) diff --git a/rpython/jit/codewriter/test/test_flatten.py b/rpython/jit/codewriter/test/test_flatten.py --- a/rpython/jit/codewriter/test/test_flatten.py +++ b/rpython/jit/codewriter/test/test_flatten.py @@ -125,8 +125,8 @@ class TestFlatten: - def make_graphs(self, func, values, type_system='lltype'): - self.rtyper = support.annotate(func, values, type_system=type_system) + def make_graphs(self, func, values): + self.rtyper = support.annotate(func, values) return self.rtyper.annotator.translator.graphs def encoding_test(self, func, args, expected, diff --git a/rpython/jit/codewriter/test/test_regalloc.py b/rpython/jit/codewriter/test/test_regalloc.py --- a/rpython/jit/codewriter/test/test_regalloc.py +++ b/rpython/jit/codewriter/test/test_regalloc.py @@ -13,8 +13,8 @@ class TestRegAlloc: - def make_graphs(self, func, values, type_system='lltype'): - self.rtyper = support.annotate(func, values, type_system=type_system) + def make_graphs(self, func, values): + self.rtyper = support.annotate(func, values) return self.rtyper.annotator.translator.graphs def check_assembler(self, graph, expected, transform=False, diff --git a/rpython/jit/metainterp/jitexc.py b/rpython/jit/metainterp/jitexc.py --- a/rpython/jit/metainterp/jitexc.py +++ b/rpython/jit/metainterp/jitexc.py @@ -62,7 +62,7 @@ def _get_standard_error(rtyper, Class): - exdata = rtyper.getexceptiondata() + exdata = rtyper.exceptiondata clsdef = rtyper.annotator.bookkeeper.getuniqueclassdef(Class) evalue = exdata.get_standard_ll_exc_instance(rtyper, clsdef) return evalue diff --git a/rpython/jit/metainterp/test/support.py b/rpython/jit/metainterp/test/support.py --- a/rpython/jit/metainterp/test/support.py +++ b/rpython/jit/metainterp/test/support.py @@ -12,7 +12,7 @@ from rpython.translator.backendopt.all import backend_optimizations -def _get_jitcodes(testself, CPUClass, func, values, type_system, +def _get_jitcodes(testself, CPUClass, func, values, supports_floats=True, supports_longlong=False, supports_singlefloats=False, @@ -50,7 +50,7 @@ FakeWarmRunnerState.enable_opts = {} func._jit_unroll_safe_ = True - rtyper = support.annotate(func, values, type_system=type_system, + rtyper = support.annotate(func, values, translationoptions=translationoptions) graphs = rtyper.annotator.translator.graphs testself.all_graphs = graphs @@ -210,7 +210,7 @@ def interp_operations(self, f, args, **kwds): # get the JitCodes for the function f - _get_jitcodes(self, self.CPUClass, f, args, self.type_system, **kwds) + _get_jitcodes(self, self.CPUClass, f, args, **kwds) # try to run it with blackhole.py result1 = _run_with_blackhole(self, args) # try to run it with pyjitpl.py diff --git a/rpython/jit/metainterp/test/test_virtualizable.py b/rpython/jit/metainterp/test/test_virtualizable.py --- a/rpython/jit/metainterp/test/test_virtualizable.py +++ b/rpython/jit/metainterp/test/test_virtualizable.py @@ -1069,7 +1069,7 @@ if getattr(graph, 'func', None) is f] init_graph = t._graphof(Frame.__init__.im_func) - deref = t.rtyper.type_system_deref + deref = t.rtyper.type_system.deref def direct_calls(graph): return [deref(op.args[0].value)._callable.func_name diff --git a/rpython/jit/metainterp/test/test_virtualref.py b/rpython/jit/metainterp/test/test_virtualref.py --- a/rpython/jit/metainterp/test/test_virtualref.py +++ b/rpython/jit/metainterp/test/test_virtualref.py @@ -27,7 +27,7 @@ x1 = vref() # jit_force_virtual virtual_ref_finish(vref, x) # - _get_jitcodes(self, self.CPUClass, fn, [], self.type_system) + _get_jitcodes(self, self.CPUClass, fn, []) graph = self.all_graphs[0] assert graph.name == 'fn' self.vrefinfo.replace_force_virtual_with_call([graph]) diff --git a/rpython/memory/gctransform/transform.py b/rpython/memory/gctransform/transform.py --- a/rpython/memory/gctransform/transform.py +++ b/rpython/memory/gctransform/transform.py @@ -105,7 +105,7 @@ self.minimalgctransformer = None def get_lltype_of_exception_value(self): - exceptiondata = self.translator.rtyper.getexceptiondata() + exceptiondata = self.translator.rtyper.exceptiondata return exceptiondata.lltype_of_exception_value def need_minimal_transform(self, graph): @@ -479,11 +479,11 @@ flags = hop.spaceop.args[1].value flavor = flags['flavor'] meth = getattr(self, 'gct_fv_%s_malloc' % flavor, None) - assert meth, "%s has no support for malloc with flavor %r" % (self, flavor) + assert meth, "%s has no support for malloc with flavor %r" % (self, flavor) c_size = rmodel.inputconst(lltype.Signed, llmemory.sizeof(TYPE)) v_raw = meth(hop, flags, TYPE, c_size) hop.cast_result(v_raw) - + def gct_fv_raw_malloc(self, hop, flags, TYPE, c_size): v_raw = hop.genop("direct_call", [self.raw_malloc_fixedsize_ptr, c_size], resulttype=llmemory.Address) @@ -506,7 +506,7 @@ flags.update(add_flags) flavor = flags['flavor'] meth = getattr(self, 'gct_fv_%s_malloc_varsize' % flavor, None) - assert meth, "%s has no support for malloc_varsize with flavor %r" % (self, flavor) + assert meth, "%s has no support for malloc_varsize with flavor %r" % (self, flavor) return self.varsize_malloc_helper(hop, flags, meth, []) def gct_malloc_nonmovable(self, *args, **kwds): diff --git a/rpython/rlib/debug.py b/rpython/rlib/debug.py --- a/rpython/rlib/debug.py +++ b/rpython/rlib/debug.py @@ -127,8 +127,8 @@ return None def specialize_call(self, hop): + from rpython.rtyper.lltypesystem.rstr import string_repr fn = self.instance - string_repr = hop.rtyper.type_system.rstr.string_repr vlist = hop.inputargs(string_repr) hop.exception_cannot_occur() t = hop.rtyper.annotator.translator @@ -190,7 +190,7 @@ def compute_result_annotation(self): return None - + def specialize_call(self, hop): hop.exception_cannot_occur() return hop.genop('debug_flush', []) @@ -278,7 +278,7 @@ from rpython.annotator.annrpython import log log.WARNING('make_sure_not_resized called, but has no effect since list_comprehension is off') return s_arg - + def specialize_call(self, hop): hop.exception_cannot_occur() return hop.inputarg(hop.args_r[0], arg=0) @@ -294,7 +294,7 @@ class DictMarkEntry(ExtRegistryEntry): _about_ = mark_dict_non_null - + def compute_result_annotation(self, s_dict): from rpython.annotator.model import SomeDict diff --git a/rpython/rlib/rstring.py b/rpython/rlib/rstring.py --- a/rpython/rlib/rstring.py +++ b/rpython/rlib/rstring.py @@ -362,7 +362,8 @@ return SomeString() def rtyper_makerepr(self, rtyper): - return rtyper.type_system.rbuilder.stringbuilder_repr + from rpython.rtyper.lltypesystem.rbuilder import stringbuilder_repr + return stringbuilder_repr def rtyper_makekey(self): return self.__class__, @@ -398,7 +399,8 @@ return SomeUnicodeString() def rtyper_makerepr(self, rtyper): - return rtyper.type_system.rbuilder.unicodebuilder_repr + from rpython.rtyper.lltypesystem.rbuilder import unicodebuilder_repr + return unicodebuilder_repr def rtyper_makekey(self): return self.__class__, diff --git a/rpython/rtyper/annlowlevel.py b/rpython/rtyper/annlowlevel.py --- a/rpython/rtyper/annlowlevel.py +++ b/rpython/rtyper/annlowlevel.py @@ -7,6 +7,7 @@ from rpython.annotator.policy import AnnotatorPolicy from rpython.annotator.signature import Sig from rpython.annotator.specialize import flatten_star_args +from rpython.rtyper.normalizecalls import perform_normalizations from rpython.rtyper.lltypesystem import lltype, llmemory from rpython.flowspace.model import Constant from rpython.rlib.objectmodel import specialize @@ -73,16 +74,6 @@ return LowLevelAnnotatorPolicy.lowlevelspecialize(funcdesc, args_s, {}) default_specialize = staticmethod(default_specialize) - def specialize__ts(pol, funcdesc, args_s, ref): - ts = pol.rtyper.type_system - ref = ref.split('.') - x = ts - for part in ref: - x = getattr(x, part) - bk = pol.rtyper.annotator.bookkeeper - funcdesc2 = bk.getdesc(x) - return pol.default_specialize(funcdesc2, args_s) - def specialize__semierased(funcdesc, args_s): a2l = annmodel.annotation_to_lltype l2a = annmodel.lltype_to_annotation @@ -261,7 +252,7 @@ rtyper = self.rtyper translator = rtyper.annotator.translator original_graph_count = len(translator.graphs) - rtyper.type_system.perform_normalizations(rtyper) + perform_normalizations(rtyper) for r in self.delayedreprs: r.set_setup_delayed(False) rtyper.call_all_setups() diff --git a/rpython/rtyper/callparse.py b/rpython/rtyper/callparse.py --- a/rpython/rtyper/callparse.py +++ b/rpython/rtyper/callparse.py @@ -137,7 +137,7 @@ return self.holders def _emit(self, repr, hop): - assert isinstance(repr, rtuple.AbstractTupleRepr) + assert isinstance(repr, rtuple.TupleRepr) tupleitems_v = [] for h in self.holders: v = h.emit(repr.items_r[len(tupleitems_v)], hop) diff --git a/rpython/rtyper/exceptiondata.py b/rpython/rtyper/exceptiondata.py --- a/rpython/rtyper/exceptiondata.py +++ b/rpython/rtyper/exceptiondata.py @@ -1,34 +1,22 @@ from rpython.annotator import model as annmodel from rpython.rlib import rstackovf from rpython.rtyper import rclass - +from rpython.rtyper.lltypesystem.rclass import (ll_issubclass, ll_type, + ll_cast_to_object) # the exceptions that can be implicitely raised by some operations -standardexceptions = { - TypeError : True, - OverflowError : True, - ValueError : True, - ZeroDivisionError: True, - MemoryError : True, - IOError : True, - OSError : True, - StopIteration : True, - KeyError : True, - IndexError : True, - AssertionError : True, - RuntimeError : True, - UnicodeDecodeError: True, - UnicodeEncodeError: True, - NotImplementedError: True, - rstackovf._StackOverflow: True, - } +standardexceptions = set([TypeError, OverflowError, ValueError, + ZeroDivisionError, MemoryError, IOError, OSError, StopIteration, KeyError, + IndexError, AssertionError, RuntimeError, UnicodeDecodeError, + UnicodeEncodeError, NotImplementedError, rstackovf._StackOverflow]) class UnknownException(Exception): pass -class AbstractExceptionData: +class ExceptionData(object): """Public information for the code generators to help with exceptions.""" + standardexceptions = standardexceptions def __init__(self, rtyper): @@ -63,10 +51,10 @@ return helper_fn def get_standard_ll_exc_instance(self, rtyper, clsdef): - rclass = rtyper.type_system.rclass - r_inst = rclass.getinstancerepr(rtyper, clsdef) + from rpython.rtyper.lltypesystem.rclass import getinstancerepr + r_inst = getinstancerepr(rtyper, clsdef) example = r_inst.get_reusable_prebuilt_instance() - example = self.cast_exception(self.lltype_of_exception_value, example) + example = ll_cast_to_object(example) return example def get_standard_ll_exc_instance_by_class(self, exceptionclass): @@ -75,3 +63,21 @@ clsdef = self.rtyper.annotator.bookkeeper.getuniqueclassdef( exceptionclass) return self.get_standard_ll_exc_instance(self.rtyper, clsdef) + + def make_helpers(self, rtyper): + # create helper functionptrs + self.fn_exception_match = self.make_exception_matcher(rtyper) + self.fn_type_of_exc_inst = self.make_type_of_exc_inst(rtyper) + self.fn_raise_OSError = self.make_raise_OSError(rtyper) + + def make_exception_matcher(self, rtyper): + # ll_exception_matcher(real_exception_vtable, match_exception_vtable) + s_typeptr = annmodel.SomePtr(self.lltype_of_exception_type) + helper_fn = rtyper.annotate_helper_fn(ll_issubclass, [s_typeptr, s_typeptr]) + return helper_fn + + def make_type_of_exc_inst(self, rtyper): + # ll_type_of_exc_inst(exception_instance) -> exception_vtable + s_excinst = annmodel.SomePtr(self.lltype_of_exception_value) + helper_fn = rtyper.annotate_helper_fn(ll_type, [s_excinst]) + return helper_fn diff --git a/rpython/rtyper/llinterp.py b/rpython/rtyper/llinterp.py --- a/rpython/rtyper/llinterp.py +++ b/rpython/rtyper/llinterp.py @@ -286,7 +286,7 @@ rtyper = self.llinterpreter.typer bk = rtyper.annotator.bookkeeper classdef = bk.getuniqueclassdef(rstackovf._StackOverflow) - exdata = rtyper.getexceptiondata() + exdata = rtyper.exceptiondata evalue = exdata.get_standard_ll_exc_instance(rtyper, classdef) etype = exdata.fn_type_of_exc_inst(evalue) e = LLException(etype, evalue) @@ -335,7 +335,7 @@ elif catch_exception: link = block.exits[0] if e: - exdata = self.llinterpreter.typer.getexceptiondata() + exdata = self.llinterpreter.typer.exceptiondata cls = e.args[0] inst = e.args[1] for link in block.exits[1:]: @@ -440,7 +440,7 @@ else: extraargs = () typer = self.llinterpreter.typer - exdata = typer.getexceptiondata() + exdata = typer.exceptiondata if isinstance(exc, OSError): self.op_direct_call(exdata.fn_raise_OSError, exc.errno) assert False, "op_direct_call above should have raised" diff --git a/rpython/rtyper/lltypesystem/exceptiondata.py b/rpython/rtyper/lltypesystem/exceptiondata.py deleted file mode 100644 --- a/rpython/rtyper/lltypesystem/exceptiondata.py +++ /dev/null @@ -1,31 +0,0 @@ -from rpython.annotator import model as annmodel -from rpython.rtyper.lltypesystem import rclass -from rpython.rtyper.lltypesystem.lltype import (Array, malloc, Ptr, FuncType, - functionptr, Signed) -from rpython.rtyper.exceptiondata import AbstractExceptionData -from rpython.annotator.classdef import FORCE_ATTRIBUTES_INTO_CLASSES - - -class ExceptionData(AbstractExceptionData): - """Public information for the code generators to help with exceptions.""" - - def make_helpers(self, rtyper): - # create helper functionptrs - self.fn_exception_match = self.make_exception_matcher(rtyper) - self.fn_type_of_exc_inst = self.make_type_of_exc_inst(rtyper) - self.fn_raise_OSError = self.make_raise_OSError(rtyper) - - def make_exception_matcher(self, rtyper): - # ll_exception_matcher(real_exception_vtable, match_exception_vtable) - s_typeptr = annmodel.SomePtr(self.lltype_of_exception_type) - helper_fn = rtyper.annotate_helper_fn(rclass.ll_issubclass, [s_typeptr, s_typeptr]) - return helper_fn - - def make_type_of_exc_inst(self, rtyper): - # ll_type_of_exc_inst(exception_instance) -> exception_vtable - s_excinst = annmodel.SomePtr(self.lltype_of_exception_value) - helper_fn = rtyper.annotate_helper_fn(rclass.ll_type, [s_excinst]) - return helper_fn - - def cast_exception(self, TYPE, value): - return rclass.ll_cast_to_object(value) diff --git a/rpython/rtyper/lltypesystem/ll_str.py b/rpython/rtyper/lltypesystem/ll_str.py --- a/rpython/rtyper/lltypesystem/ll_str.py +++ b/rpython/rtyper/lltypesystem/ll_str.py @@ -1,14 +1,9 @@ from rpython.rtyper.lltypesystem.lltype import GcArray, Array, Char, malloc -from rpython.rtyper.annlowlevel import llstr from rpython.rlib.rarithmetic import r_uint, r_longlong, r_ulonglong from rpython.rlib import jit CHAR_ARRAY = GcArray(Char) - at jit.elidable -def ll_int_str(repr, i): - return ll_int2dec(i) - def ll_unsigned(i): if isinstance(i, r_longlong) or isinstance(i, r_ulonglong): return r_ulonglong(i) @@ -47,7 +42,7 @@ hex_chars = malloc(Array(Char), 16, immortal=True) for i in range(16): - hex_chars[i] = "%x"%i + hex_chars[i] = "%x" % i @jit.elidable def ll_int2hex(i, addPrefix): @@ -122,8 +117,3 @@ result.chars[j] = temp[len-j-1] j += 1 return result - - at jit.elidable -def ll_float_str(repr, f): - from rpython.rlib.rfloat import formatd - return llstr(formatd(f, 'f', 6)) diff --git a/rpython/rtyper/lltypesystem/rbuiltin.py b/rpython/rtyper/lltypesystem/rbuiltin.py deleted file mode 100644 --- a/rpython/rtyper/lltypesystem/rbuiltin.py +++ /dev/null @@ -1,90 +0,0 @@ -from rpython.annotator import model as annmodel -from rpython.rlib import objectmodel -from rpython.rtyper.lltypesystem import lltype, rclass -from rpython.rtyper.lltypesystem.rdict import rtype_r_dict -from rpython.rtyper.rmodel import TyperError - - -def rtype_builtin_isinstance(hop): - hop.exception_cannot_occur() - if hop.s_result.is_constant(): - return hop.inputconst(lltype.Bool, hop.s_result.const) - - if hop.args_s[1].is_constant() and hop.args_s[1].const == list: - if hop.args_s[0].knowntype != list: - raise TyperError("isinstance(x, list) expects x to be known statically to be a list or None") - rlist = hop.args_r[0] - vlist = hop.inputarg(rlist, arg=0) - cnone = hop.inputconst(rlist, None) - return hop.genop('ptr_ne', [vlist, cnone], resulttype=lltype.Bool) - - assert isinstance(hop.args_r[0], rclass.InstanceRepr) - return hop.args_r[0].rtype_isinstance(hop) - -def ll_instantiate(typeptr): # NB. used by rpbc.ClassesPBCRepr as well - my_instantiate = typeptr.instantiate - return my_instantiate() - -def rtype_instantiate(hop): - hop.exception_cannot_occur() - s_class = hop.args_s[0] - assert isinstance(s_class, annmodel.SomePBC) - if len(s_class.descriptions) != 1: - # instantiate() on a variable class - vtypeptr, = hop.inputargs(rclass.get_type_repr(hop.rtyper)) - v_inst = hop.gendirectcall(ll_instantiate, vtypeptr) - return hop.genop('cast_pointer', [v_inst], # v_type implicit in r_result - resulttype = hop.r_result.lowleveltype) - - classdef = s_class.any_description().getuniqueclassdef() - return rclass.rtype_new_instance(hop.rtyper, classdef, hop.llops) - -def rtype_builtin_hasattr(hop): - hop.exception_cannot_occur() - if hop.s_result.is_constant(): - return hop.inputconst(lltype.Bool, hop.s_result.const) - - raise TyperError("hasattr is only suported on a constant") - -BUILTIN_TYPER = {} -BUILTIN_TYPER[objectmodel.instantiate] = rtype_instantiate -BUILTIN_TYPER[isinstance] = rtype_builtin_isinstance -BUILTIN_TYPER[hasattr] = rtype_builtin_hasattr -BUILTIN_TYPER[objectmodel.r_dict] = rtype_r_dict - -# _________________________________________________________________ -# weakrefs - -import weakref -from rpython.rtyper.lltypesystem import llmemory - -def rtype_weakref_create(hop): - # Note: this code also works for the RPython-level calls 'weakref.ref(x)'. - vlist = hop.inputargs(hop.args_r[0]) - hop.exception_cannot_occur() - return hop.genop('weakref_create', vlist, resulttype=llmemory.WeakRefPtr) - -def rtype_weakref_deref(hop): - c_ptrtype, v_wref = hop.inputargs(lltype.Void, hop.args_r[1]) - assert v_wref.concretetype == llmemory.WeakRefPtr - hop.exception_cannot_occur() - return hop.genop('weakref_deref', [v_wref], resulttype=c_ptrtype.value) - -def rtype_cast_ptr_to_weakrefptr(hop): - vlist = hop.inputargs(hop.args_r[0]) - hop.exception_cannot_occur() - return hop.genop('cast_ptr_to_weakrefptr', vlist, - resulttype=llmemory.WeakRefPtr) - -def rtype_cast_weakrefptr_to_ptr(hop): - c_ptrtype, v_wref = hop.inputargs(lltype.Void, hop.args_r[1]) - assert v_wref.concretetype == llmemory.WeakRefPtr - hop.exception_cannot_occur() - return hop.genop('cast_weakrefptr_to_ptr', [v_wref], - resulttype=c_ptrtype.value) - -BUILTIN_TYPER[weakref.ref] = rtype_weakref_create -BUILTIN_TYPER[llmemory.weakref_create] = rtype_weakref_create -BUILTIN_TYPER[llmemory.weakref_deref] = rtype_weakref_deref -BUILTIN_TYPER[llmemory.cast_ptr_to_weakrefptr] = rtype_cast_ptr_to_weakrefptr -BUILTIN_TYPER[llmemory.cast_weakrefptr_to_ptr] = rtype_cast_weakrefptr_to_ptr diff --git a/rpython/rtyper/lltypesystem/rstr.py b/rpython/rtyper/lltypesystem/rstr.py --- a/rpython/rtyper/lltypesystem/rstr.py +++ b/rpython/rtyper/lltypesystem/rstr.py @@ -317,7 +317,7 @@ def ll_str2bytearray(str): from rpython.rtyper.lltypesystem.rbytearray import BYTEARRAY - + lgt = len(str.chars) b = malloc(BYTEARRAY, lgt) for i in range(lgt): @@ -974,7 +974,7 @@ argsiter = iter(sourcevarsrepr) - InstanceRepr = hop.rtyper.type_system.rclass.InstanceRepr + from rpython.rtyper.lltypesystem.rclass import InstanceRepr for i, thing in enumerate(things): if isinstance(thing, tuple): code = thing[0] @@ -1007,7 +1007,6 @@ else: raise TyperError("%%%s is not RPython" % (code,)) else: - from rpython.rtyper.lltypesystem.rstr import string_repr, unicode_repr if is_unicode: vchunk = inputconst(unicode_repr, thing) else: diff --git a/rpython/rtyper/lltypesystem/rtuple.py b/rpython/rtyper/lltypesystem/rtuple.py deleted file mode 100644 --- a/rpython/rtyper/lltypesystem/rtuple.py +++ /dev/null @@ -1,113 +0,0 @@ -from rpython.tool.pairtype import pairtype -from rpython.rtyper.rmodel import inputconst -from rpython.rtyper.rtuple import AbstractTupleRepr, AbstractTupleIteratorRepr -from rpython.rtyper.lltypesystem.lltype import \ - Ptr, GcStruct, Void, Signed, malloc, typeOf, nullptr -from rpython.rtyper.lltypesystem.rtupletype import TUPLE_TYPE -from rpython.rtyper.lltypesystem import rstr - -# ____________________________________________________________ -# -# Concrete implementation of RPython tuples: -# -# struct tuple { -# type0 item0; -# type1 item1; -# type2 item2; -# ... -# } - -class TupleRepr(AbstractTupleRepr): - rstr_ll = rstr.LLHelpers - - def __init__(self, rtyper, items_r): - AbstractTupleRepr.__init__(self, rtyper, items_r) - self.lowleveltype = TUPLE_TYPE(self.lltypes) - - def newtuple(cls, llops, r_tuple, items_v): - # items_v should have the lowleveltype of the internal reprs - assert len(r_tuple.items_r) == len(items_v) - for r_item, v_item in zip(r_tuple.items_r, items_v): - assert r_item.lowleveltype == v_item.concretetype - # - if len(r_tuple.items_r) == 0: - return inputconst(Void, ()) # a Void empty tuple - c1 = inputconst(Void, r_tuple.lowleveltype.TO) - cflags = inputconst(Void, {'flavor': 'gc'}) - v_result = llops.genop('malloc', [c1, cflags], - resulttype = r_tuple.lowleveltype) - for i in range(len(r_tuple.items_r)): - cname = inputconst(Void, r_tuple.fieldnames[i]) - llops.genop('setfield', [v_result, cname, items_v[i]]) - return v_result - newtuple = classmethod(newtuple) - - def instantiate(self): - if len(self.items_r) == 0: - return dum_empty_tuple # PBC placeholder for an empty tuple - else: - return malloc(self.lowleveltype.TO) - - def rtype_bltn_list(self, hop): - from rpython.rtyper.lltypesystem import rlist - nitems = len(self.items_r) - vtup = hop.inputarg(self, 0) - LIST = hop.r_result.lowleveltype.TO - cno = inputconst(Signed, nitems) - hop.exception_is_here() - vlist = hop.gendirectcall(LIST.ll_newlist, cno) - v_func = hop.inputconst(Void, rlist.dum_nocheck) - for index in range(nitems): - name = self.fieldnames[index] - ritem = self.items_r[index] - cname = hop.inputconst(Void, name) - vitem = hop.genop('getfield', [vtup, cname], resulttype = ritem) - vitem = hop.llops.convertvar(vitem, ritem, hop.r_result.item_repr) - cindex = inputconst(Signed, index) - hop.gendirectcall(rlist.ll_setitem_nonneg, v_func, vlist, cindex, vitem) - return vlist - - def getitem_internal(self, llops, v_tuple, index): - """Return the index'th item, in internal repr.""" - name = self.fieldnames[index] - llresult = self.lltypes[index] - cname = inputconst(Void, name) - return llops.genop('getfield', [v_tuple, cname], resulttype = llresult) - - -def rtype_newtuple(hop): - return TupleRepr._rtype_newtuple(hop) - -newtuple = TupleRepr.newtuple - -def dum_empty_tuple(): pass - - -# ____________________________________________________________ -# -# Iteration. - -class Length1TupleIteratorRepr(AbstractTupleIteratorRepr): - - def __init__(self, r_tuple): - self.r_tuple = r_tuple - self.lowleveltype = Ptr(GcStruct('tuple1iter', - ('tuple', r_tuple.lowleveltype))) - self.ll_tupleiter = ll_tupleiter - self.ll_tuplenext = ll_tuplenext - -TupleRepr.IteratorRepr = Length1TupleIteratorRepr - -def ll_tupleiter(ITERPTR, tuple): - iter = malloc(ITERPTR.TO) - iter.tuple = tuple - return iter - -def ll_tuplenext(iter): - # for iterating over length 1 tuples only! - t = iter.tuple - if t: - iter.tuple = nullptr(typeOf(t).TO) - return t.item0 - else: - raise StopIteration diff --git a/rpython/rtyper/lltypesystem/rtupletype.py b/rpython/rtyper/lltypesystem/rtupletype.py deleted file mode 100644 --- a/rpython/rtyper/lltypesystem/rtupletype.py +++ /dev/null @@ -1,15 +0,0 @@ -# Helper to build the lowleveltype corresponding to an RPython tuple. -# This is not in rtuple.py so that it can be imported without bringing -# the whole rtyper in. - -from rpython.rtyper.lltypesystem.lltype import Void, Ptr, GcStruct - - -def TUPLE_TYPE(field_lltypes): - if len(field_lltypes) == 0: - return Void # empty tuple - else: - fields = [('item%d' % i, TYPE) for i, TYPE in enumerate(field_lltypes)] - kwds = {'hints': {'immutable': True, - 'noidentity': True}} - return Ptr(GcStruct('tuple%d' % len(field_lltypes), *fields, **kwds)) diff --git a/rpython/rtyper/lltypesystem/rvirtualizable2.py b/rpython/rtyper/lltypesystem/rvirtualizable2.py deleted file mode 100644 --- a/rpython/rtyper/lltypesystem/rvirtualizable2.py +++ /dev/null @@ -1,13 +0,0 @@ -from rpython.rtyper.rmodel import inputconst -from rpython.rtyper.lltypesystem import lltype, llmemory -from rpython.rtyper.lltypesystem.rclass import InstanceRepr, OBJECTPTR -from rpython.rtyper.rvirtualizable2 import AbstractVirtualizable2InstanceRepr - - -class Virtualizable2InstanceRepr(AbstractVirtualizable2InstanceRepr, InstanceRepr): - - def _setup_repr_llfields(self): - llfields = [] - if self.top_of_virtualizable_hierarchy: - llfields.append(('vable_token', llmemory.GCREF)) - return llfields diff --git a/rpython/rtyper/module/ll_os_stat.py b/rpython/rtyper/module/ll_os_stat.py --- a/rpython/rtyper/module/ll_os_stat.py +++ b/rpython/rtyper/module/ll_os_stat.py @@ -13,7 +13,7 @@ from rpython.rtyper.annlowlevel import hlstr from rpython.rtyper.extfunc import extdef from rpython.rtyper.lltypesystem import rffi, lltype -from rpython.rtyper.lltypesystem.rtupletype import TUPLE_TYPE +from rpython.rtyper.rtuple import TUPLE_TYPE from rpython.rtyper.tool import rffi_platform as platform from rpython.tool.pairtype import pairtype from rpython.tool.sourcetools import func_renamer diff --git a/rpython/rtyper/rbuiltin.py b/rpython/rtyper/rbuiltin.py --- a/rpython/rtyper/rbuiltin.py +++ b/rpython/rtyper/rbuiltin.py @@ -3,7 +3,8 @@ from rpython.rlib import rarithmetic, objectmodel from rpython.rtyper import raddress, rptr, extregistry, rrange from rpython.rtyper.error import TyperError -from rpython.rtyper.lltypesystem import lltype, llmemory +from rpython.rtyper.lltypesystem import lltype, llmemory, rclass +from rpython.rtyper.lltypesystem.rdict import rtype_r_dict from rpython.rtyper.rmodel import Repr from rpython.tool.pairtype import pairtype @@ -52,14 +53,14 @@ raise TyperError("**kwds call not implemented") if arguments.w_stararg is not None: # expand the *arg in-place -- it must be a tuple - from rpython.rtyper.rtuple import AbstractTupleRepr + from rpython.rtyper.rtuple import TupleRepr if arguments.w_stararg != hop.nb_args - 3: raise TyperError("call pattern too complex") hop.nb_args -= 1 v_tuple = hop.args_v.pop() s_tuple = hop.args_s.pop() r_tuple = hop.args_r.pop() - if not isinstance(r_tuple, AbstractTupleRepr): + if not isinstance(r_tuple, TupleRepr): raise TyperError("*arg must be a tuple") for i in range(len(r_tuple.items_r)): v_item = r_tuple.getitem_internal(hop.llops, v_tuple, i) @@ -92,10 +93,6 @@ return BUILTIN_TYPER[self.builtinfunc] except (KeyError, TypeError): pass - try: - return rtyper.type_system.rbuiltin.BUILTIN_TYPER[self.builtinfunc] - except (KeyError, TypeError): - pass if extregistry.is_registered(self.builtinfunc): entry = extregistry.lookup(self.builtinfunc) return entry.specialize_call @@ -691,3 +688,86 @@ BUILTIN_TYPER[llmemory.cast_adr_to_ptr] = rtype_cast_adr_to_ptr BUILTIN_TYPER[llmemory.cast_adr_to_int] = rtype_cast_adr_to_int BUILTIN_TYPER[llmemory.cast_int_to_adr] = rtype_cast_int_to_adr + +def rtype_builtin_isinstance(hop): + hop.exception_cannot_occur() + if hop.s_result.is_constant(): + return hop.inputconst(lltype.Bool, hop.s_result.const) + + if hop.args_s[1].is_constant() and hop.args_s[1].const == list: + if hop.args_s[0].knowntype != list: + raise TyperError("isinstance(x, list) expects x to be known statically to be a list or None") + rlist = hop.args_r[0] + vlist = hop.inputarg(rlist, arg=0) + cnone = hop.inputconst(rlist, None) + return hop.genop('ptr_ne', [vlist, cnone], resulttype=lltype.Bool) + + assert isinstance(hop.args_r[0], rclass.InstanceRepr) + return hop.args_r[0].rtype_isinstance(hop) + +def ll_instantiate(typeptr): # NB. used by rpbc.ClassesPBCRepr as well + my_instantiate = typeptr.instantiate + return my_instantiate() + +def rtype_instantiate(hop): + hop.exception_cannot_occur() + s_class = hop.args_s[0] + assert isinstance(s_class, annmodel.SomePBC) + if len(s_class.descriptions) != 1: + # instantiate() on a variable class + vtypeptr, = hop.inputargs(rclass.get_type_repr(hop.rtyper)) + v_inst = hop.gendirectcall(ll_instantiate, vtypeptr) + return hop.genop('cast_pointer', [v_inst], # v_type implicit in r_result + resulttype = hop.r_result.lowleveltype) + + classdef = s_class.any_description().getuniqueclassdef() + return rclass.rtype_new_instance(hop.rtyper, classdef, hop.llops) + +def rtype_builtin_hasattr(hop): + hop.exception_cannot_occur() + if hop.s_result.is_constant(): + return hop.inputconst(lltype.Bool, hop.s_result.const) + + raise TyperError("hasattr is only suported on a constant") + +BUILTIN_TYPER[objectmodel.instantiate] = rtype_instantiate +BUILTIN_TYPER[isinstance] = rtype_builtin_isinstance +BUILTIN_TYPER[hasattr] = rtype_builtin_hasattr +BUILTIN_TYPER[objectmodel.r_dict] = rtype_r_dict + +# _________________________________________________________________ +# weakrefs + +import weakref +from rpython.rtyper.lltypesystem import llmemory + +def rtype_weakref_create(hop): + # Note: this code also works for the RPython-level calls 'weakref.ref(x)'. + vlist = hop.inputargs(hop.args_r[0]) + hop.exception_cannot_occur() + return hop.genop('weakref_create', vlist, resulttype=llmemory.WeakRefPtr) + +def rtype_weakref_deref(hop): + c_ptrtype, v_wref = hop.inputargs(lltype.Void, hop.args_r[1]) + assert v_wref.concretetype == llmemory.WeakRefPtr + hop.exception_cannot_occur() + return hop.genop('weakref_deref', [v_wref], resulttype=c_ptrtype.value) + +def rtype_cast_ptr_to_weakrefptr(hop): + vlist = hop.inputargs(hop.args_r[0]) + hop.exception_cannot_occur() + return hop.genop('cast_ptr_to_weakrefptr', vlist, + resulttype=llmemory.WeakRefPtr) + +def rtype_cast_weakrefptr_to_ptr(hop): + c_ptrtype, v_wref = hop.inputargs(lltype.Void, hop.args_r[1]) + assert v_wref.concretetype == llmemory.WeakRefPtr + hop.exception_cannot_occur() + return hop.genop('cast_weakrefptr_to_ptr', [v_wref], + resulttype=c_ptrtype.value) + +BUILTIN_TYPER[weakref.ref] = rtype_weakref_create +BUILTIN_TYPER[llmemory.weakref_create] = rtype_weakref_create +BUILTIN_TYPER[llmemory.weakref_deref] = rtype_weakref_deref +BUILTIN_TYPER[llmemory.cast_ptr_to_weakrefptr] = rtype_cast_ptr_to_weakrefptr +BUILTIN_TYPER[llmemory.cast_weakrefptr_to_ptr] = rtype_cast_weakrefptr_to_ptr diff --git a/rpython/rtyper/rbytearray.py b/rpython/rtyper/rbytearray.py --- a/rpython/rtyper/rbytearray.py +++ b/rpython/rtyper/rbytearray.py @@ -57,4 +57,5 @@ return self.__class__, def rtyper_makerepr(self, rtyper): - return rtyper.type_system.rbytearray.bytearray_repr + from rpython.rtyper.lltypesystem.rbytearray import bytearray_repr + return bytearray_repr diff --git a/rpython/rtyper/rclass.py b/rpython/rtyper/rclass.py --- a/rpython/rtyper/rclass.py +++ b/rpython/rtyper/rclass.py @@ -1,9 +1,11 @@ import types +from rpython.flowspace.model import Constant from rpython.annotator import description, model as annmodel from rpython.rtyper.error import TyperError from rpython.rtyper.lltypesystem.lltype import Void from rpython.rtyper.rmodel import Repr, getgcflavor, inputconst +from rpython.rlib.objectmodel import UnboxedValue class FieldListAccessor(object): @@ -52,7 +54,8 @@ try: result = rtyper.class_reprs[classdef] except KeyError: - result = rtyper.type_system.rclass.ClassRepr(rtyper, classdef) + from rpython.rtyper.lltypesystem.rclass import ClassRepr + result = ClassRepr(rtyper, classdef) rtyper.class_reprs[classdef] = result rtyper.add_pendingsetup(result) return result @@ -73,8 +76,7 @@ def buildinstancerepr(rtyper, classdef, gcflavor='gc'): - from rpython.rlib.objectmodel import UnboxedValue - from rpython.flowspace.model import Constant + from rpython.rtyper.rvirtualizable2 import Virtualizable2InstanceRepr if classdef is None: unboxed = [] @@ -91,8 +93,8 @@ if virtualizable2: assert len(unboxed) == 0 assert gcflavor == 'gc' - return rtyper.type_system.rvirtualizable2.Virtualizable2InstanceRepr(rtyper, classdef) - elif usetagging and rtyper.type_system.name == 'lltypesystem': + return Virtualizable2InstanceRepr(rtyper, classdef) + elif usetagging: # the UnboxedValue class and its parent classes need a # special repr for their instances if len(unboxed) != 1: @@ -102,7 +104,8 @@ from rpython.rtyper.lltypesystem import rtagged return rtagged.TaggedInstanceRepr(rtyper, classdef, unboxed[0]) else: - return rtyper.type_system.rclass.InstanceRepr(rtyper, classdef, gcflavor) + from rpython.rtyper.lltypesystem.rclass import InstanceRepr + return InstanceRepr(rtyper, classdef, gcflavor) class MissingRTypeAttribute(TyperError): diff --git a/rpython/rtyper/rdict.py b/rpython/rtyper/rdict.py --- a/rpython/rtyper/rdict.py +++ b/rpython/rtyper/rdict.py @@ -5,23 +5,20 @@ class __extend__(annmodel.SomeDict): def rtyper_makerepr(self, rtyper): - dictkey = self.dictdef.dictkey + from rpython.rtyper.lltypesystem.rdict import DictRepr + dictkey = self.dictdef.dictkey dictvalue = self.dictdef.dictvalue - s_key = dictkey .s_value - s_value = dictvalue.s_value + s_key = dictkey.s_value + s_value = dictvalue.s_value force_non_null = self.dictdef.force_non_null if dictkey.custom_eq_hash: custom_eq_hash = lambda: (rtyper.getrepr(dictkey.s_rdict_eqfn), rtyper.getrepr(dictkey.s_rdict_hashfn)) else: custom_eq_hash = None - return rtyper.type_system.rdict.DictRepr(rtyper, - lambda: rtyper.getrepr(s_key), - lambda: rtyper.getrepr(s_value), - dictkey, - dictvalue, - custom_eq_hash, - force_non_null) + return DictRepr(rtyper, lambda: rtyper.getrepr(s_key), + lambda: rtyper.getrepr(s_value), dictkey, dictvalue, + custom_eq_hash, force_non_null) def rtyper_makekey(self): self.dictdef.dictkey .dont_change_any_more = True @@ -29,7 +26,6 @@ return (self.__class__, self.dictdef.dictkey, self.dictdef.dictvalue) - class AbstractDictRepr(rmodel.Repr): def pickrepr(self, item_repr): @@ -41,7 +37,8 @@ pickkeyrepr = pickrepr def compact_repr(self): - return 'DictR %s %s' % (self.key_repr.compact_repr(), self.value_repr.compact_repr()) + return 'DictR %s %s' % (self.key_repr.compact_repr(), + self.value_repr.compact_repr()) def recast_value(self, llops, v): return llops.convertvar(v, self.value_repr, self.external_value_repr) @@ -51,10 +48,11 @@ def rtype_newdict(hop): + from rpython.rtyper.lltypesystem.rdict import ll_newdict hop.inputargs() # no arguments expected r_dict = hop.r_result cDICT = hop.inputconst(lltype.Void, r_dict.DICT) - v_result = hop.gendirectcall(hop.rtyper.type_system.rdict.ll_newdict, cDICT) + v_result = hop.gendirectcall(ll_newdict, cDICT) return v_result diff --git a/rpython/rtyper/rfloat.py b/rpython/rtyper/rfloat.py --- a/rpython/rtyper/rfloat.py +++ b/rpython/rtyper/rfloat.py @@ -1,6 +1,9 @@ from rpython.annotator import model as annmodel from rpython.rlib.objectmodel import _hash_float from rpython.rlib.rarithmetic import base_int +from rpython.rlib.rfloat import formatd +from rpython.rlib import jit +from rpython.rtyper.annlowlevel import llstr from rpython.rtyper.error import TyperError from rpython.rtyper.lltypesystem.lltype import (Signed, Unsigned, SignedLongLong, UnsignedLongLong, Bool, Float) @@ -74,8 +77,8 @@ class __extend__(pairtype(AbstractStringRepr, FloatRepr)): def rtype_mod(_, hop): - rstr = hop.rtyper.type_system.rstr - return rstr.do_stringformat(hop, [(hop.args_v[1], hop.args_r[1])]) + from rpython.rtyper.lltypesystem.rstr import do_stringformat + return do_stringformat(hop, [(hop.args_v[1], hop.args_r[1])]) #Helpers FloatRepr,FloatRepr @@ -87,7 +90,6 @@ vlist = hop.inputargs(Float, Float) return hop.genop('float_'+func, vlist, resulttype=Bool) -# class __extend__(FloatRepr): @@ -134,11 +136,9 @@ hop.exception_cannot_occur() return vlist[0] - # version picked by specialisation based on which - # type system rtyping is using, from .ll_str module + @jit.elidable def ll_str(self, f): - pass - ll_str._annspecialcase_ = "specialize:ts('ll_str.ll_float_str')" + return llstr(formatd(f, 'f', 6)) # # _________________________ Conversions _________________________ diff --git a/rpython/rtyper/rint.py b/rpython/rtyper/rint.py --- a/rpython/rtyper/rint.py +++ b/rpython/rtyper/rint.py @@ -2,7 +2,7 @@ from rpython.annotator import model as annmodel from rpython.flowspace.operation import op_appendices -from rpython.rlib import objectmodel +from rpython.rlib import objectmodel, jit from rpython.rlib.rarithmetic import intmask, r_int, r_longlong from rpython.rtyper.error import TyperError from rpython.rtyper.lltypesystem.lltype import (Signed, Unsigned, Bool, Float, @@ -365,25 +365,24 @@ hop.exception_cannot_occur() return vlist[0] - # version picked by specialisation based on which - # type system rtyping is using, from .ll_str module + @jit.elidable def ll_str(self, i): - raise NotImplementedError - ll_str._annspecialcase_ = "specialize:ts('ll_str.ll_int_str')" + from rpython.rtyper.lltypesystem.ll_str import ll_int2dec + return ll_int2dec(i) def rtype_hex(self, hop): + from rpython.rtyper.lltypesystem.ll_str import ll_int2hex self = self.as_int varg = hop.inputarg(self, 0) true = inputconst(Bool, True) - fn = hop.rtyper.type_system.ll_str.ll_int2hex - return hop.gendirectcall(fn, varg, true) + return hop.gendirectcall(ll_int2hex, varg, true) def rtype_oct(self, hop): + from rpython.rtyper.lltypesystem.ll_str import ll_int2oct self = self.as_int varg = hop.inputarg(self, 0) true = inputconst(Bool, True) - fn = hop.rtyper.type_system.ll_str.ll_int2oct - return hop.gendirectcall(fn, varg, true) + return hop.gendirectcall(ll_int2oct, varg, true) def ll_hash_int(n): return intmask(n) diff --git a/rpython/rtyper/rlist.py b/rpython/rtyper/rlist.py --- a/rpython/rtyper/rlist.py +++ b/rpython/rtyper/rlist.py @@ -41,17 +41,18 @@ listitem = self.listdef.listitem s_value = listitem.s_value if (listitem.range_step is not None and not listitem.mutated and - not isinstance(s_value, annmodel.SomeImpossibleValue)): - return rtyper.type_system.rrange.RangeRepr(listitem.range_step) + not isinstance(s_value, annmodel.SomeImpossibleValue)): + from rpython.rtyper.lltypesystem.rrange import RangeRepr + return RangeRepr(listitem.range_step) else: # cannot do the rtyper.getrepr() call immediately, for the case # of recursive structures -- i.e. if the listdef contains itself - rlist = rtyper.type_system.rlist + from rpython.rtyper.lltypesystem.rlist import ListRepr, FixedSizeListRepr item_repr = lambda: rtyper.getrepr(listitem.s_value) if self.listdef.listitem.resized: - return rlist.ListRepr(rtyper, item_repr, listitem) + return ListRepr(rtyper, item_repr, listitem) else: - return rlist.FixedSizeListRepr(rtyper, item_repr, listitem) + return FixedSizeListRepr(rtyper, item_repr, listitem) def rtyper_makekey(self): self.listdef.listitem.dont_change_any_more = True @@ -334,12 +335,12 @@ def rtype_newlist(hop, v_sizehint=None): + from rpython.rtyper.lltypesystem.rlist import newlist nb_args = hop.nb_args r_list = hop.r_result r_listitem = r_list.item_repr items_v = [hop.inputarg(r_listitem, arg=i) for i in range(nb_args)] - return hop.rtyper.type_system.rlist.newlist(hop.llops, r_list, items_v, - v_sizehint=v_sizehint) + return newlist(hop.llops, r_list, items_v, v_sizehint=v_sizehint) def rtype_alloc_and_set(hop): r_list = hop.r_result @@ -377,10 +378,10 @@ return v_lst1 def rtype_extend_with_str_slice((r_lst1, r_str2), hop): + from rpython.rtyper.lltypesystem.rstr import string_repr if r_lst1.item_repr.lowleveltype not in (Char, UniChar): raise TyperError('"lst += string" only supported with a list ' 'of chars or unichars') - string_repr = r_lst1.rtyper.type_system.rstr.string_repr v_lst1 = hop.inputarg(r_lst1, arg=0) v_str2 = hop.inputarg(string_repr, arg=3) kind, vlist = hop.decompose_slice_args() @@ -393,10 +394,10 @@ class __extend__(pairtype(AbstractListRepr, AbstractCharRepr)): def rtype_extend_with_char_count((r_lst1, r_chr2), hop): + from rpython.rtyper.lltypesystem.rstr import char_repr if r_lst1.item_repr.lowleveltype not in (Char, UniChar): raise TyperError('"lst += string" only supported with a list ' 'of chars or unichars') - char_repr = r_lst1.rtyper.type_system.rstr.char_repr v_lst1, v_chr, v_count = hop.inputargs(r_lst1, char_repr, Signed) hop.gendirectcall(ll_extend_with_char_count, v_lst1, v_chr, v_count) return v_lst1 diff --git a/rpython/rtyper/rpbc.py b/rpython/rtyper/rpbc.py --- a/rpython/rtyper/rpbc.py +++ b/rpython/rtyper/rpbc.py @@ -12,8 +12,7 @@ def small_cand(rtyper, s_pbc): - if 1 < len(s_pbc.descriptions) < rtyper.getconfig().translation.withsmallfuncsets and \ - hasattr(rtyper.type_system.rpbc, 'SmallFunctionSetPBCRepr'): + if 1 < len(s_pbc.descriptions) < rtyper.getconfig().translation.withsmallfuncsets: callfamily = s_pbc.any_description().getcallfamily() concretetable, uniquerows = get_concrete_calltable(rtyper, callfamily) if len(uniquerows) == 1 and (not s_pbc.subset_of or small_cand(rtyper, s_pbc.subset_of)): @@ -22,6 +21,9 @@ class __extend__(annmodel.SomePBC): def rtyper_makerepr(self, rtyper): + from rpython.rtyper.lltypesystem.rpbc import (FunctionsPBCRepr, + SmallFunctionSetPBCRepr, ClassesPBCRepr, MethodsPBCRepr, + MethodOfFrozenPBCRepr) if self.isNone(): return none_frozen_pbc_repr kind = self.getKind() @@ -32,20 +34,20 @@ if sample.overridden: getRepr = OverriddenFunctionPBCRepr else: - getRepr = rtyper.type_system.rpbc.FunctionsPBCRepr + getRepr = FunctionsPBCRepr if small_cand(rtyper, self): - getRepr = rtyper.type_system.rpbc.SmallFunctionSetPBCRepr + getRepr = SmallFunctionSetPBCRepr else: getRepr = getFrozenPBCRepr elif issubclass(kind, description.ClassDesc): # user classes - getRepr = rtyper.type_system.rpbc.ClassesPBCRepr + getRepr = ClassesPBCRepr elif issubclass(kind, description.MethodDesc): - getRepr = rtyper.type_system.rpbc.MethodsPBCRepr + getRepr = MethodsPBCRepr elif issubclass(kind, description.FrozenDesc): getRepr = getFrozenPBCRepr elif issubclass(kind, description.MethodOfFrozenDesc): - getRepr = rtyper.type_system.rpbc.MethodOfFrozenPBCRepr + getRepr = MethodOfFrozenPBCRepr else: raise TyperError("unexpected PBC kind %r" % (kind,)) @@ -350,6 +352,8 @@ return rtype_call_specialcase(hop) def getFrozenPBCRepr(rtyper, s_pbc): + from rpython.rtyper.lltypesystem.rpbc import ( + MultipleUnrelatedFrozenPBCRepr, MultipleFrozenPBCRepr) descs = list(s_pbc.descriptions) assert len(descs) >= 1 if len(descs) == 1 and not s_pbc.can_be_None: @@ -362,15 +366,13 @@ try: return rtyper.pbc_reprs['unrelated'] except KeyError: - rpbc = rtyper.type_system.rpbc - result = rpbc.MultipleUnrelatedFrozenPBCRepr(rtyper) + result = MultipleUnrelatedFrozenPBCRepr(rtyper) rtyper.pbc_reprs['unrelated'] = result return result try: return rtyper.pbc_reprs[access] except KeyError: - result = rtyper.type_system.rpbc.MultipleFrozenPBCRepr(rtyper, - access) + result = MultipleFrozenPBCRepr(rtyper, access) rtyper.pbc_reprs[access] = result rtyper.add_pendingsetup(result) return result @@ -612,9 +614,10 @@ return inputconst(Void, None) def rtype_is_((robj1, rnone2), hop): + from rpython.rtyper.lltypesystem.rpbc import rtype_is_None if hop.s_result.is_constant(): return hop.inputconst(Bool, hop.s_result.const) - return hop.rtyper.type_system.rpbc.rtype_is_None(robj1, rnone2, hop) + return rtype_is_None(robj1, rnone2, hop) class __extend__(pairtype(NoneFrozenPBCRepr, Repr)): @@ -622,10 +625,10 @@ return inputconst(r_to, None) def rtype_is_((rnone1, robj2), hop): + from rpython.rtyper.lltypesystem.rpbc import rtype_is_None if hop.s_result.is_constant(): return hop.inputconst(Bool, hop.s_result.const) - return hop.rtyper.type_system.rpbc.rtype_is_None( - robj2, rnone1, hop, pos=1) + return rtype_is_None(robj2, rnone1, hop, pos=1) # ____________________________________________________________ diff --git a/rpython/rtyper/rstr.py b/rpython/rtyper/rstr.py --- a/rpython/rtyper/rstr.py +++ b/rpython/rtyper/rstr.py @@ -4,7 +4,6 @@ from rpython.rtyper.error import TyperError from rpython.rtyper.lltypesystem.lltype import Signed, Bool, Void, UniChar from rpython.rtyper.rmodel import IntegerRepr, IteratorRepr, inputconst, Repr -from rpython.rtyper.rtuple import AbstractTupleRepr from rpython.tool.pairtype import pairtype, pair from rpython.tool.sourcetools import func_with_new_name from rpython.tool.staticmethods import StaticMethods @@ -88,26 +87,33 @@ class __extend__(annmodel.SomeString): def rtyper_makerepr(self, rtyper): - return rtyper.type_system.rstr.string_repr + from rpython.rtyper.lltypesystem.rstr import string_repr + return string_repr + def rtyper_makekey(self): return self.__class__, class __extend__(annmodel.SomeUnicodeString): def rtyper_makerepr(self, rtyper): - return rtyper.type_system.rstr.unicode_repr + from rpython.rtyper.lltypesystem.rstr import unicode_repr + return unicode_repr def rtyper_makekey(self): return self.__class__, class __extend__(annmodel.SomeChar): def rtyper_makerepr(self, rtyper): - return rtyper.type_system.rstr.char_repr + from rpython.rtyper.lltypesystem.rstr import char_repr + return char_repr + def rtyper_makekey(self): return self.__class__, class __extend__(annmodel.SomeUnicodeCodePoint): def rtyper_makerepr(self, rtyper): - return rtyper.type_system.rstr.unichar_repr + from rpython.rtyper.lltypesystem.rstr import unichar_repr + return unichar_repr + def rtyper_makekey(self): return self.__class__, @@ -271,12 +277,14 @@ raise NotImplementedError def rtype_method_join(self, hop): + from rpython.rtyper.lltypesystem.rlist import BaseListRepr + from rpython.rtyper.lltypesystem.rstr import char_repr, unichar_repr hop.exception_cannot_occur() rstr = hop.args_r[0] if hop.s_result.is_constant(): return inputconst(rstr.repr, hop.s_result.const) r_lst = hop.args_r[1] - if not isinstance(r_lst, hop.rtyper.type_system.rlist.BaseListRepr): + if not isinstance(r_lst, BaseListRepr): raise TyperError("string.join of non-list: %r" % r_lst) v_str, v_lst = hop.inputargs(rstr.repr, r_lst) v_length, v_items = self._list_length_items(hop, v_lst, r_lst.lowleveltype) @@ -284,8 +292,8 @@ if hop.args_s[0].is_constant() and hop.args_s[0].const == '': if r_lst.item_repr == rstr.repr: llfn = self.ll.ll_join_strs - elif (r_lst.item_repr == hop.rtyper.type_system.rstr.char_repr or - r_lst.item_repr == hop.rtyper.type_system.rstr.unichar_repr): + elif (r_lst.item_repr == char_repr or + r_lst.item_repr == unichar_repr): v_tp = hop.inputconst(Void, self.lowleveltype) return hop.gendirectcall(self.ll.ll_join_chars, v_length, v_items, v_tp) @@ -555,18 +563,6 @@ hop.exception_cannot_occur() return hop.gendirectcall(r_str.ll.ll_contains, v_str, v_chr) -class __extend__(pairtype(AbstractStringRepr, AbstractTupleRepr)): - def rtype_mod((r_str, r_tuple), hop): - r_tuple = hop.args_r[1] - v_tuple = hop.args_v[1] - - sourcevars = [] - for i, r_arg in enumerate(r_tuple.external_items_r): - v_item = r_tuple.getitem(hop.llops, v_tuple, i) - sourcevars.append((v_item, r_arg)) - - return r_str.ll.do_stringformat(hop, sourcevars) - class __extend__(AbstractCharRepr): def ll_str(self, ch): @@ -655,8 +651,8 @@ #Helper functions for comparisons def _rtype_compare_template(hop, func): - rstr = hop.rtyper.type_system.rstr - vlist = hop.inputargs(rstr.char_repr, rstr.char_repr) + from rpython.rtyper.lltypesystem.rstr import char_repr + vlist = hop.inputargs(char_repr, char_repr) return hop.genop('char_' + func, vlist, resulttype=Bool) class __extend__(AbstractUniCharRepr): @@ -677,8 +673,8 @@ get_ll_fasthash_function = get_ll_hash_function def rtype_ord(_, hop): - rstr = hop.rtyper.type_system.rstr - vlist = hop.inputargs(rstr.unichar_repr) + from rpython.rtyper.lltypesystem.rstr import unichar_repr + vlist = hop.inputargs(unichar_repr) return hop.genop('cast_unichar_to_int', vlist, resulttype=Signed) @@ -691,8 +687,8 @@ #Helper functions for comparisons def _rtype_unchr_compare_template(hop, func): - rstr = hop.rtyper.type_system.rstr - vlist = hop.inputargs(rstr.unichar_repr, rstr.unichar_repr) + from rpython.rtyper.lltypesystem.rstr import unichar_repr + vlist = hop.inputargs(unichar_repr, unichar_repr) return hop.genop('unichar_' + func, vlist, resulttype=Bool) @@ -702,16 +698,17 @@ class __extend__(pairtype(AbstractCharRepr, AbstractStringRepr), pairtype(AbstractUniCharRepr, AbstractUnicodeRepr)): def convert_from_to((r_from, r_to), v, llops): - rstr = llops.rtyper.type_system.rstr - if (r_from == rstr.char_repr and r_to == rstr.string_repr) or\ - (r_from == rstr.unichar_repr and r_to == rstr.unicode_repr): + from rpython.rtyper.lltypesystem.rstr import ( + string_repr, unicode_repr, char_repr, unichar_repr) + if (r_from == char_repr and r_to == string_repr) or\ + (r_from == unichar_repr and r_to == unicode_repr): return llops.gendirectcall(r_from.ll.ll_chr2str, v) return NotImplemented class __extend__(pairtype(AbstractStringRepr, AbstractCharRepr)): def convert_from_to((r_from, r_to), v, llops): - rstr = llops.rtyper.type_system.rstr - if r_from == rstr.string_repr and r_to == rstr.char_repr: + from rpython.rtyper.lltypesystem.rstr import string_repr, char_repr + if r_from == string_repr and r_to == char_repr: c_zero = inputconst(Signed, 0) return llops.gendirectcall(r_from.ll.ll_stritem_nonneg, v, c_zero) return NotImplemented diff --git a/rpython/rtyper/rtuple.py b/rpython/rtyper/rtuple.py --- a/rpython/rtyper/rtuple.py +++ b/rpython/rtyper/rtuple.py @@ -5,7 +5,10 @@ from rpython.rlib.rarithmetic import intmask from rpython.rlib.unroll import unrolling_iterable from rpython.rtyper.error import TyperError -from rpython.rtyper.lltypesystem.lltype import Void, Signed, Bool +from rpython.rtyper.lltypesystem.lltype import ( + Void, Signed, Bool, Ptr, GcStruct, malloc, typeOf, nullptr) +from rpython.rtyper.lltypesystem.rstr import LLHelpers +from rpython.rtyper.rstr import AbstractStringRepr from rpython.rtyper.rmodel import (Repr, IntegerRepr, inputconst, IteratorRepr, externalvsinternal) from rpython.tool.pairtype import pairtype @@ -13,8 +16,7 @@ class __extend__(annmodel.SomeTuple): def rtyper_makerepr(self, rtyper): - repr_class = rtyper.type_system.rtuple.TupleRepr - return repr_class(rtyper, [rtyper.getrepr(s_item) for s_item in self.items]) + return TupleRepr(rtyper, [rtyper.getrepr(s_item) for s_item in self.items]) def rtyper_makekey_ex(self, rtyper): keys = [rtyper.makekey(s_item) for s_item in self.items] @@ -71,17 +73,16 @@ def gen_str_function(tuplerepr): items_r = tuplerepr.items_r - str_funcs = [r_item.ll_str for r_item in items_r] - key = tuplerepr.rstr_ll, tuple(str_funcs) + key = tuple([r_item.ll_str for r_item in items_r]) try: return _gen_str_function_cache[key] except KeyError: - autounrolling_funclist = unrolling_iterable(enumerate(str_funcs)) + autounrolling_funclist = unrolling_iterable(enumerate(key)) - constant = tuplerepr.rstr_ll.ll_constant - start = tuplerepr.rstr_ll.ll_build_start - push = tuplerepr.rstr_ll.ll_build_push - finish = tuplerepr.rstr_ll.ll_build_finish + constant = LLHelpers.ll_constant + start = LLHelpers.ll_build_start + push = LLHelpers.ll_build_push + finish = LLHelpers.ll_build_finish length = len(items_r) def ll_str(t): @@ -105,7 +106,28 @@ return ll_str -class AbstractTupleRepr(Repr): +# ____________________________________________________________ +# +# Concrete implementation of RPython tuples: +# +# struct tuple { +# type0 item0; +# type1 item1; +# type2 item2; +# ... +# } + +def TUPLE_TYPE(field_lltypes): + if len(field_lltypes) == 0: + return Void # empty tuple + else: + fields = [('item%d' % i, TYPE) for i, TYPE in enumerate(field_lltypes)] + kwds = {'hints': {'immutable': True, + 'noidentity': True}} + return Ptr(GcStruct('tuple%d' % len(field_lltypes), *fields, **kwds)) + + +class TupleRepr(Repr): def __init__(self, rtyper, items_r): self.items_r = [] @@ -118,6 +140,7 @@ self.fieldnames = ['item%d' % i for i in range(len(items_r))] self.lltypes = [r.lowleveltype for r in items_r] self.tuple_cache = {} + self.lowleveltype = TUPLE_TYPE(self.lltypes) def getitem(self, llops, v_tuple, index): """Generate the operations to get the index'th item of v_tuple, @@ -127,19 +150,37 @@ r_external_item = self.external_items_r[index] return llops.convertvar(v, r_item, r_external_item) + @classmethod + def newtuple(cls, llops, r_tuple, items_v): + # items_v should have the lowleveltype of the internal reprs + assert len(r_tuple.items_r) == len(items_v) + for r_item, v_item in zip(r_tuple.items_r, items_v): + assert r_item.lowleveltype == v_item.concretetype + # + if len(r_tuple.items_r) == 0: + return inputconst(Void, ()) # a Void empty tuple + c1 = inputconst(Void, r_tuple.lowleveltype.TO) + cflags = inputconst(Void, {'flavor': 'gc'}) + v_result = llops.genop('malloc', [c1, cflags], + resulttype = r_tuple.lowleveltype) + for i in range(len(r_tuple.items_r)): + cname = inputconst(Void, r_tuple.fieldnames[i]) + llops.genop('setfield', [v_result, cname, items_v[i]]) + return v_result + + @classmethod def newtuple_cached(cls, hop, items_v): r_tuple = hop.r_result if hop.s_result.is_constant(): return inputconst(r_tuple, hop.s_result.const) else: return cls.newtuple(hop.llops, r_tuple, items_v) - newtuple_cached = classmethod(newtuple_cached) + @classmethod def _rtype_newtuple(cls, hop): r_tuple = hop.r_result vlist = hop.inputargs(*r_tuple.items_r) return cls.newtuple_cached(hop, vlist) - _rtype_newtuple = classmethod(_rtype_newtuple) def convert_const(self, value): assert isinstance(value, tuple) and len(value) == len(self.items_r) @@ -174,8 +215,48 @@ return self.IteratorRepr(self) raise TyperError("can only iterate over tuples of length 1 for now") + def instantiate(self): + if len(self.items_r) == 0: + return dum_empty_tuple # PBC placeholder for an empty tuple + else: + return malloc(self.lowleveltype.TO) -class __extend__(pairtype(AbstractTupleRepr, IntegerRepr)): + def rtype_bltn_list(self, hop): + from rpython.rtyper.lltypesystem import rlist + nitems = len(self.items_r) + vtup = hop.inputarg(self, 0) + LIST = hop.r_result.lowleveltype.TO + cno = inputconst(Signed, nitems) + hop.exception_is_here() + vlist = hop.gendirectcall(LIST.ll_newlist, cno) + v_func = hop.inputconst(Void, rlist.dum_nocheck) + for index in range(nitems): + name = self.fieldnames[index] + ritem = self.items_r[index] + cname = hop.inputconst(Void, name) + vitem = hop.genop('getfield', [vtup, cname], resulttype = ritem) + vitem = hop.llops.convertvar(vitem, ritem, hop.r_result.item_repr) + cindex = inputconst(Signed, index) + hop.gendirectcall(rlist.ll_setitem_nonneg, v_func, vlist, cindex, vitem) + return vlist + + def getitem_internal(self, llops, v_tuple, index): + """Return the index'th item, in internal repr.""" + name = self.fieldnames[index] + llresult = self.lltypes[index] + cname = inputconst(Void, name) + return llops.genop('getfield', [v_tuple, cname], resulttype = llresult) + + +def rtype_newtuple(hop): + return TupleRepr._rtype_newtuple(hop) + +newtuple = TupleRepr.newtuple + +def dum_empty_tuple(): pass + + +class __extend__(pairtype(TupleRepr, IntegerRepr)): def rtype_getitem((r_tup, r_int), hop): v_tuple, v_index = hop.inputargs(r_tup, Signed) @@ -186,7 +267,7 @@ index = v_index.value return r_tup.getitem(hop.llops, v_tuple, index) -class __extend__(AbstractTupleRepr): +class __extend__(TupleRepr): def rtype_getslice(r_tup, hop): s_start = hop.args_s[1] @@ -203,7 +284,7 @@ for i in indices] return hop.r_result.newtuple(hop.llops, hop.r_result, items_v) -class __extend__(pairtype(AbstractTupleRepr, Repr)): +class __extend__(pairtype(TupleRepr, Repr)): def rtype_contains((r_tup, r_item), hop): s_tup = hop.args_s[0] if not s_tup.is_constant(): @@ -224,7 +305,7 @@ hop2.v_s_insertfirstarg(v_dict, s_dict) return hop2.dispatch() -class __extend__(pairtype(AbstractTupleRepr, AbstractTupleRepr)): +class __extend__(pairtype(TupleRepr, TupleRepr)): def rtype_add((r_tup1, r_tup2), hop): v_tuple1, v_tuple2 = hop.inputargs(r_tup1, r_tup2) @@ -265,6 +346,21 @@ def rtype_is_((robj1, robj2), hop): raise TyperError("cannot compare tuples with 'is'") +class __extend__(pairtype(AbstractStringRepr, TupleRepr)): + def rtype_mod((r_str, r_tuple), hop): + r_tuple = hop.args_r[1] + v_tuple = hop.args_v[1] + + sourcevars = [] + for i, r_arg in enumerate(r_tuple.external_items_r): + v_item = r_tuple.getitem(hop.llops, v_tuple, i) + sourcevars.append((v_item, r_arg)) + + return r_str.ll.do_stringformat(hop, sourcevars) + +# ____________________________________________________________ +# +# Iteration. class AbstractTupleIteratorRepr(IteratorRepr): @@ -279,3 +375,28 @@ hop.exception_is_here() v = hop.gendirectcall(self.ll_tuplenext, v_iter) return hop.llops.convertvar(v, self.r_tuple.items_r[0], self.r_tuple.external_items_r[0]) + +class Length1TupleIteratorRepr(AbstractTupleIteratorRepr): + + def __init__(self, r_tuple): + self.r_tuple = r_tuple + self.lowleveltype = Ptr(GcStruct('tuple1iter', + ('tuple', r_tuple.lowleveltype))) + self.ll_tupleiter = ll_tupleiter + self.ll_tuplenext = ll_tuplenext + +TupleRepr.IteratorRepr = Length1TupleIteratorRepr + +def ll_tupleiter(ITERPTR, tuple): + iter = malloc(ITERPTR.TO) + iter.tuple = tuple + return iter + +def ll_tuplenext(iter): + # for iterating over length 1 tuples only! + t = iter.tuple + if t: + iter.tuple = nullptr(typeOf(t).TO) + return t.item0 + else: + raise StopIteration diff --git a/rpython/rtyper/rtyper.py b/rpython/rtyper/rtyper.py --- a/rpython/rtyper/rtyper.py +++ b/rpython/rtyper/rtyper.py @@ -20,11 +20,13 @@ from rpython.flowspace.model import Variable, Constant, SpaceOperation, c_last_exception from rpython.rtyper.annlowlevel import annotate_lowlevel_helper, LowLevelAnnotatorPolicy from rpython.rtyper.error import TyperError +from rpython.rtyper.exceptiondata import ExceptionData from rpython.rtyper.lltypesystem.lltype import (Signed, Void, LowLevelType, Ptr, ContainerType, FuncType, functionptr, typeOf, RuntimeTypeInfo, attachRuntimeTypeInfo, Primitive) from rpython.rtyper.rmodel import Repr, inputconst, BrokenReprTyperError from rpython.rtyper.typesystem import LowLevelTypeSystem +from rpython.rtyper.normalizecalls import perform_normalizations from rpython.tool.pairtype import pair from rpython.translator.unsimplify import insert_empty_block @@ -32,19 +34,10 @@ class RPythonTyper(object): from rpython.rtyper.rmodel import log - def __init__(self, annotator, type_system="lltype"): + def __init__(self, annotator): self.annotator = annotator - self.lowlevel_ann_policy = LowLevelAnnotatorPolicy(self) - - if isinstance(type_system, str): - if type_system == "lltype": - self.type_system = LowLevelTypeSystem.instance - else: - raise TyperError("Unknown type system %r!" % type_system) - else: - self.type_system = type_system - self.type_system_deref = self.type_system.deref + self.type_system = LowLevelTypeSystem.instance self.reprs = {} self._reprs_must_call_setup = [] self._seen_reprs_must_call_setup = {} @@ -64,10 +57,7 @@ self.typererror_count = 0 # make the primitive_to_repr constant mapping self.primitive_to_repr = {} - if self.type_system.offers_exceptiondata: - self.exceptiondata = self.type_system.exceptiondata.ExceptionData(self) - else: - self.exceptiondata = None + self.exceptiondata = ExceptionData(self) try: self.seed = int(os.getenv('RTYPERSEED')) @@ -108,9 +98,6 @@ self._reprs_must_call_setup.append(repr) self._seen_reprs_must_call_setup[repr] = True - def getexceptiondata(self): - return self.exceptiondata # built at the end of specialize() - def lltype_to_classdef_mapping(self): result = {} for (classdef, _), repr in self.instance_reprs.iteritems(): @@ -148,10 +135,9 @@ raise KeyError(search) def makekey(self, s_obj): - return pair(self.type_system, s_obj).rtyper_makekey(self) - - def _makerepr(self, s_obj): - return pair(self.type_system, s_obj).rtyper_makerepr(self) + if hasattr(s_obj, "rtyper_makekey_ex"): + return s_obj.rtyper_makekey_ex(self) + return s_obj.rtyper_makekey() def getrepr(self, s_obj): # s_objs are not hashable... try hard to find a unique key anyway @@ -161,7 +147,7 @@ result = self.reprs[key] except KeyError: self.reprs[key] = None - result = self._makerepr(s_obj) + result = s_obj.rtyper_makerepr(self) assert not isinstance(result.lowleveltype, ContainerType), ( "missing a Ptr in the type specification " "of %s:\n%r" % (s_obj, result.lowleveltype)) @@ -186,12 +172,12 @@ # first make sure that all functions called in a group have exactly # the same signature, by hacking their flow graphs if needed - self.type_system.perform_normalizations(self) + perform_normalizations(self) self.exceptiondata.finish(self) + # new blocks can be created as a result of specialize_block(), so # we need to be careful about the loop here. self.already_seen = {} - self.specialize_more_blocks() if self.exceptiondata is not None: self.exceptiondata.make_helpers(self) @@ -592,7 +578,8 @@ return pair(r_arg1, r_arg2).rtype_extend_with_char_count(hop) def translate_op_newtuple(self, hop): - return self.type_system.rtuple.rtype_newtuple(hop) + from rpython.rtyper.rtuple import rtype_newtuple + return rtype_newtuple(hop) def translate_op_instantiate1(self, hop): from rpython.rtyper.lltypesystem import rclass @@ -932,7 +919,7 @@ # build the 'direct_call' operation f = self.rtyper.getcallable(graph) c = inputconst(typeOf(f), f) - fobj = self.rtyper.type_system_deref(f) + fobj = self.rtyper.type_system.deref(f) return self.genop('direct_call', [c]+newargs_v, resulttype = typeOf(fobj).RESULT) diff --git a/rpython/rtyper/rvirtualizable2.py b/rpython/rtyper/rvirtualizable2.py --- a/rpython/rtyper/rvirtualizable2.py +++ b/rpython/rtyper/rvirtualizable2.py @@ -1,35 +1,40 @@ from rpython.rtyper.rmodel import inputconst, log -from rpython.rtyper.lltypesystem import lltype -from rpython.rtyper.rclass import AbstractInstanceRepr, FieldListAccessor +from rpython.rtyper.lltypesystem import lltype, llmemory +from rpython.rtyper.rclass import FieldListAccessor +from rpython.rtyper.lltypesystem.rclass import InstanceRepr -class AbstractVirtualizable2InstanceRepr(AbstractInstanceRepr): +class Virtualizable2InstanceRepr(InstanceRepr): def _super(self): - return super(AbstractVirtualizable2InstanceRepr, self) + return super(Virtualizable2InstanceRepr, self) def __init__(self, rtyper, classdef): self._super().__init__(rtyper, classdef) classdesc = classdef.classdesc if '_virtualizable2_' in classdesc.classdict: basedesc = classdesc.basedesc - assert basedesc is None or basedesc.lookup('_virtualizable2_') is None + assert basedesc is None or basedesc.lookup( + '_virtualizable2_') is None self.top_of_virtualizable_hierarchy = True self.accessor = FieldListAccessor() else: self.top_of_virtualizable_hierarchy = False def _setup_repr_llfields(self): - raise NotImplementedError + llfields = [] + if self.top_of_virtualizable_hierarchy: + llfields.append(('vable_token', llmemory.GCREF)) + return llfields def _setup_repr(self): if self.top_of_virtualizable_hierarchy: hints = {'virtualizable2_accessor': self.accessor} llfields = self._setup_repr_llfields() if llfields: - self._super()._setup_repr(llfields, hints = hints) + self._super()._setup_repr(llfields, hints=hints) else: - self._super()._setup_repr(hints = hints) + self._super()._setup_repr(hints=hints) c_vfields = self.classdef.classdesc.classdict['_virtualizable2_'] self.my_redirected_fields = self._parse_field_list(c_vfields.value, self.accessor) @@ -40,7 +45,7 @@ self.my_redirected_fields = self.rbase.my_redirected_fields def hook_access_field(self, vinst, cname, llops, flags): - #if not flags.get('access_directly'): + # if not flags.get('access_directly'): if self.my_redirected_fields.get(cname.value): cflags = inputconst(lltype.Void, flags) llops.genop('jit_force_virtualizable', [vinst, cname, cflags]) diff --git a/rpython/rtyper/test/test_llinterp.py b/rpython/rtyper/test/test_llinterp.py --- a/rpython/rtyper/test/test_llinterp.py +++ b/rpython/rtyper/test/test_llinterp.py @@ -36,8 +36,7 @@ return res def gengraph(func, argtypes=[], viewbefore='auto', policy=None, - type_system="lltype", backendopt=False, config=None, - **extraconfigopts): + backendopt=False, config=None, **extraconfigopts): t = TranslationContext(config=config) t.config.set(**extraconfigopts) a = t.buildannotator(policy=policy) @@ -48,7 +47,7 @@ a.simplify() t.view() global typer # we need it for find_exception - typer = t.buildrtyper(type_system=type_system) + typer = t.buildrtyper() timelog("rtyper-specializing", typer.specialize) #t.view() timelog("checking graphs", t.checkgraphs) @@ -88,9 +87,8 @@ policy = AnnotatorPolicy() t, typer, graph = gengraph(func, [annotation(x) for x in values], - viewbefore, policy, type_system=type_system, - backendopt=backendopt, config=config, - **extraconfigopts) + viewbefore, policy, backendopt=backendopt, + config=config, **extraconfigopts) interp = LLInterpreter(typer) _tcache[key] = (t, interp, graph) # keep the cache small diff --git a/rpython/rtyper/test/test_rlist.py b/rpython/rtyper/test/test_rlist.py --- a/rpython/rtyper/test/test_rlist.py +++ b/rpython/rtyper/test/test_rlist.py @@ -1443,7 +1443,7 @@ t = TranslationContext() s = t.buildannotator().build_types(f, []) - rtyper = t.buildrtyper(type_system=self.type_system) + rtyper = t.buildrtyper() rtyper.specialize() s_A_list = s.items[0] @@ -1471,7 +1471,7 @@ t = TranslationContext() s = t.buildannotator().build_types(f, []) - rtyper = t.buildrtyper(type_system=self.type_system) + rtyper = t.buildrtyper() rtyper.specialize() s_A_list = s.items[0] diff --git a/rpython/rtyper/test/test_rtuple.py b/rpython/rtyper/test/test_rtuple.py --- a/rpython/rtyper/test/test_rtuple.py +++ b/rpython/rtyper/test/test_rtuple.py @@ -1,15 +1,15 @@ -from rpython.rtyper.lltypesystem import rtupletype +from rpython.rtyper.rtuple import TUPLE_TYPE, TupleRepr from rpython.rtyper.lltypesystem.lltype import Signed, Bool from rpython.rtyper.rbool import bool_repr from rpython.rtyper.rint import signed_repr from rpython.rtyper.test.tool import BaseRtypingTest +from rpython.rlib.objectmodel import compute_hash from rpython.translator.translator import TranslationContext def test_rtuple(): - from rpython.rtyper.lltypesystem.rtuple import TupleRepr rtuple = TupleRepr(None, [signed_repr, bool_repr]) - assert rtuple.lowleveltype == rtupletype.TUPLE_TYPE([Signed, Bool]) + assert rtuple.lowleveltype == TUPLE_TYPE([Signed, Bool]) # ____________________________________________________________ @@ -159,7 +159,7 @@ t = TranslationContext() s = t.buildannotator().build_types(f, []) - rtyper = t.buildrtyper(type_system=self.type_system) + rtyper = t.buildrtyper() rtyper.specialize() s_AB_tup = s.items[0] @@ -171,7 +171,6 @@ assert r_AB_tup.lowleveltype == r_BA_tup.lowleveltype def test_tuple_hash(self): - from rpython.rlib.objectmodel import compute_hash def f(i, j): return compute_hash((i, j)) @@ -180,7 +179,6 @@ assert res1 != res2 def test_constant_tuple_hash_str(self): - from rpython.rlib.objectmodel import compute_hash def f(i): if i: t = (None, "abc") @@ -312,7 +310,6 @@ assert res is True def test_tuple_hash_2(self): - from rpython.rlib.objectmodel import compute_hash def f(n): return compute_hash((n, 6)) == compute_hash((3, n*2)) res = self.interpret(f, [3]) diff --git a/rpython/rtyper/test/test_rvirtualizable2.py b/rpython/rtyper/test/test_rvirtualizable2.py --- a/rpython/rtyper/test/test_rvirtualizable2.py +++ b/rpython/rtyper/test/test_rvirtualizable2.py @@ -339,7 +339,7 @@ g(a) t, typer, graph = self.gengraph(f, []) - deref = typer.type_system_deref + deref = typer.type_system.deref desc = typer.annotator.bookkeeper.getdesc(g) g_graphs = desc._cache.items() From noreply at buildbot.pypy.org Sat Aug 3 22:37:09 2013 From: noreply at buildbot.pypy.org (stian) Date: Sat, 3 Aug 2013 22:37:09 +0200 (CEST) Subject: [pypy-commit] pypy bigint-with-int: Fix a cornercase of eq where rbigint is two. Fix int_lt, make objectspace tests pass Message-ID: <20130803203709.681B31C1360@cobra.cs.uni-duesseldorf.de> Author: stian Branch: bigint-with-int Changeset: r65938:db61d384e3e3 Date: 2013-08-03 18:02 +0200 http://bitbucket.org/pypy/pypy/changeset/db61d384e3e3/ Log: Fix a cornercase of eq where rbigint is two. Fix int_lt, make objectspace tests pass diff --git a/pypy/objspace/std/longobject.py b/pypy/objspace/std/longobject.py --- a/pypy/objspace/std/longobject.py +++ b/pypy/objspace/std/longobject.py @@ -172,9 +172,9 @@ def le__Int_Long(space, w_int1, w_long2): return space.newbool(w_long2.num.int_ge(w_int1.intval)) def eq__Int_Long(space, w_int1, w_long2): - return space.newbool(w_long2.num.int_ne(w_int1.intval)) + return space.newbool(not w_long2.num.int_ne(w_int1.intval)) def ne__Int_Long(space, w_int1, w_long2): - return space.newbool(w_long2.num.int_eq(w_int1.intval)) + return space.newbool(not w_long2.num.int_eq(w_int1.intval)) def gt__Int_Long(space, w_int1, w_long2): return space.newbool(w_long2.num.int_lt(w_int1.intval)) def ge__Int_Long(space, w_int1, w_long2): diff --git a/rpython/rlib/rbigint.py b/rpython/rlib/rbigint.py --- a/rpython/rlib/rbigint.py +++ b/rpython/rlib/rbigint.py @@ -485,8 +485,15 @@ @jit.elidable def int_eq(self, other): """ eq with int """ - if self.numdigits() != 1 or self.digit(0) * self.sign != other: + + if self.numdigits() > 2: return False + try: + if self.toint() != other: + return False + except OverflowError: + return False + return True @jit.look_inside @@ -536,13 +543,20 @@ @jit.elidable def int_lt(self, other): """ lt where other is an int """ - if other >= 0 and self.sign < 0: + osign = 1 + if other == 0: + osign = 0 + elif other < 0: + osign = -1 + + if self.sign > osign: + return False + elif self.sign < osign: return True - elif other < 0 and self.sign >= 0: - return False + digits = self.numdigits() if digits > 1: - if self.sign == 1 and other >= 0: + if self.sign == 1 and other < 0: return False else: return True diff --git a/rpython/rlib/test/test_rbigint.py b/rpython/rlib/test/test_rbigint.py --- a/rpython/rlib/test/test_rbigint.py +++ b/rpython/rlib/test/test_rbigint.py @@ -569,6 +569,15 @@ res2 = getattr(operator, mod)(x, y) assert res1 == res2 + def test_int_bitwise(self): + for x in gen_signs([0, 1, 5, 11, 42, 43, 3 ** 30]): + for y in gen_signs([0, 1, 5, 11, 42, 4]): + lx = rbigint.fromlong(x) + for mod in "xor and_ or_".split(): + res1 = getattr(lx, "int_"+mod)(y).tolong() + res2 = getattr(operator, mod)(x, y) + assert res1 == res2 + def test_mul_eq_shift(self): p2 = rbigint.fromlong(1).lshift(63) f1 = rbigint.fromlong(0).lshift(63) @@ -716,7 +725,7 @@ def test_int_divmod(self): x = 12345678901234567890L for i in range(100): - y = randint(0, 1 << 60) + y = randint(0, 1 << 30) for sx, sy in (1, 1), (1, -1), (-1, -1), (-1, 1): sx *= x sy *= y From noreply at buildbot.pypy.org Sat Aug 3 22:37:10 2013 From: noreply at buildbot.pypy.org (stian) Date: Sat, 3 Aug 2013 22:37:10 +0200 (CEST) Subject: [pypy-commit] pypy bigint-with-int: Another fix for int_lt Message-ID: <20130803203710.BA2BB1C1360@cobra.cs.uni-duesseldorf.de> Author: stian Branch: bigint-with-int Changeset: r65939:40d6c0e1f164 Date: 2013-08-03 18:23 +0200 http://bitbucket.org/pypy/pypy/changeset/40d6c0e1f164/ Log: Another fix for int_lt diff --git a/rpython/rlib/rbigint.py b/rpython/rlib/rbigint.py --- a/rpython/rlib/rbigint.py +++ b/rpython/rlib/rbigint.py @@ -555,8 +555,9 @@ return True digits = self.numdigits() + if digits > 1: - if self.sign == 1 and other < 0: + if osign == 1: return False else: return True From noreply at buildbot.pypy.org Sat Aug 3 22:37:12 2013 From: noreply at buildbot.pypy.org (stian) Date: Sat, 3 Aug 2013 22:37:12 +0200 (CEST) Subject: [pypy-commit] pypy bigint-with-int: Merge default Message-ID: <20130803203712.00DA81C1360@cobra.cs.uni-duesseldorf.de> Author: stian Branch: bigint-with-int Changeset: r65940:044c71c8423f Date: 2013-08-03 18:23 +0200 http://bitbucket.org/pypy/pypy/changeset/044c71c8423f/ Log: Merge default diff --git a/lib_pypy/_sqlite3.py b/lib_pypy/_sqlite3.py --- a/lib_pypy/_sqlite3.py +++ b/lib_pypy/_sqlite3.py @@ -1305,7 +1305,7 @@ for i in xrange(_lib.sqlite3_column_count(self._statement)): name = _lib.sqlite3_column_name(self._statement, i) if name: - name = _ffi.string(name).decode('utf-8').split("[")[0].strip() + name = _ffi.string(name).split("[")[0].strip() desc.append((name, None, None, None, None, None, None)) return desc diff --git a/pypy/module/cppyy/interp_cppyy.py b/pypy/module/cppyy/interp_cppyy.py --- a/pypy/module/cppyy/interp_cppyy.py +++ b/pypy/module/cppyy/interp_cppyy.py @@ -592,11 +592,15 @@ def get(self, w_cppinstance, w_pycppclass): cppinstance = self.space.interp_w(W_CPPInstance, w_cppinstance, can_be_None=True) + if not cppinstance: + raise OperationError(self.space.w_ReferenceError, self.space.wrap("attribute access requires an instance")) offset = self._get_offset(cppinstance) return self.converter.from_memory(self.space, w_cppinstance, w_pycppclass, offset) def set(self, w_cppinstance, w_value): cppinstance = self.space.interp_w(W_CPPInstance, w_cppinstance, can_be_None=True) + if not cppinstance: + raise OperationError(self.space.w_ReferenceError, self.space.wrap("attribute access requires an instance")) offset = self._get_offset(cppinstance) self.converter.to_memory(self.space, w_cppinstance, w_value, offset) return self.space.w_None diff --git a/pypy/module/cppyy/pythonify.py b/pypy/module/cppyy/pythonify.py --- a/pypy/module/cppyy/pythonify.py +++ b/pypy/module/cppyy/pythonify.py @@ -426,6 +426,11 @@ # mostly for the benefit of the CINT backend, which treats std as special gbl.std = make_cppnamespace(None, "std", None, False) + # install a type for enums to refer to + # TODO: this is correct for C++98, not for C++11 and in general there will + # be the same issue for all typedef'd builtin types + setattr(gbl, 'unsigned int', int) + # install for user access cppyy.gbl = gbl diff --git a/pypy/module/cppyy/test/test_datatypes.py b/pypy/module/cppyy/test/test_datatypes.py --- a/pypy/module/cppyy/test/test_datatypes.py +++ b/pypy/module/cppyy/test/test_datatypes.py @@ -105,6 +105,13 @@ raises(IndexError, c.m_float_array.__getitem__, self.N) raises(IndexError, c.m_double_array.__getitem__, self.N) + # can not access an instance member on the class + raises(ReferenceError, getattr, cppyy_test_data, 'm_bool') + raises(ReferenceError, getattr, cppyy_test_data, 'm_int') + + assert not hasattr(cppyy_test_data, 'm_bool') + assert not hasattr(cppyy_test_data, 'm_int') + c.destruct() def test03_instance_data_write_access(self): @@ -428,12 +435,17 @@ c = cppyy_test_data() assert isinstance(c, cppyy_test_data) - # TODO: test that the enum is accessible as a type + # test that the enum is accessible as a type + assert cppyy_test_data.what assert cppyy_test_data.kNothing == 6 assert cppyy_test_data.kSomething == 111 assert cppyy_test_data.kLots == 42 + assert cppyy_test_data.what(cppyy_test_data.kNothing) == cppyy_test_data.kNothing + assert cppyy_test_data.what(6) == cppyy_test_data.kNothing + # TODO: only allow instantiations with correct values (C++11) + assert c.get_enum() == cppyy_test_data.kNothing assert c.m_enum == cppyy_test_data.kNothing @@ -455,6 +467,7 @@ assert cppyy_test_data.s_enum == cppyy_test_data.kSomething # global enums + assert gbl.fruit # test type accessible assert gbl.kApple == 78 assert gbl.kBanana == 29 assert gbl.kCitrus == 34 diff --git a/pypy/module/test_lib_pypy/test_sqlite3.py b/pypy/module/test_lib_pypy/test_sqlite3.py --- a/pypy/module/test_lib_pypy/test_sqlite3.py +++ b/pypy/module/test_lib_pypy/test_sqlite3.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- """Tests for _sqlite3.py""" import pytest, sys @@ -222,3 +223,8 @@ cur.execute("create table test(a)") cur.executemany("insert into test values (?)", [[1], [2], [3]]) assert cur.lastrowid is None + +def test_issue1573(con): + cur = con.cursor() + cur.execute(u'SELECT 1 as méil') + assert cur.description[0][0] == u"méil".encode('utf-8') diff --git a/pypy/tool/release/package.py b/pypy/tool/release/package.py --- a/pypy/tool/release/package.py +++ b/pypy/tool/release/package.py @@ -3,7 +3,7 @@ It uses 'pypy/goal/pypy-c' and parts of the rest of the working copy. Usage: - package.py root-pypy-dir [--nostrip] [--without-tk] [name-of-archive] [name-of-pypy-c] [destination-for-tarball] [pypy-c-path] + package.py [--nostrip] [--without-tk] root-pypy-dir [name-of-archive] [name-of-pypy-c] [destination-for-tarball] [pypy-c-path] Usually you would do: package.py ../../.. pypy-VER-PLATFORM The output is found in the directory /tmp/usession-YOURNAME/build/. From noreply at buildbot.pypy.org Sat Aug 3 22:37:13 2013 From: noreply at buildbot.pypy.org (stian) Date: Sat, 3 Aug 2013 22:37:13 +0200 (CEST) Subject: [pypy-commit] pypy bigint-with-int: Do overflow ops with just one long and one int instead of two longs. Message-ID: <20130803203713.34E1D1C1360@cobra.cs.uni-duesseldorf.de> Author: stian Branch: bigint-with-int Changeset: r65941:ed316c19db44 Date: 2013-08-03 18:46 +0200 http://bitbucket.org/pypy/pypy/changeset/ed316c19db44/ Log: Do overflow ops with just one long and one int instead of two longs. diff --git a/pypy/objspace/std/longobject.py b/pypy/objspace/std/longobject.py --- a/pypy/objspace/std/longobject.py +++ b/pypy/objspace/std/longobject.py @@ -393,8 +393,7 @@ from pypy.objspace.std.smalllongobject import %(opname)s_ovr return %(opname)s_ovr(space, w_int1, w_int2) w_long1 = delegate_Int2Long(space, w_int1) - w_long2 = delegate_Int2Long(space, w_int2) - return %(opname)s__Long_Long(space, w_long1, w_long2) + return %(opname)s__Long_Int(space, w_long1, w_int2) """ % {'opname': opname}, '', 'exec') getattr(model.MM, opname).register(globals()['%s_ovr__Int_Int' % opname], From noreply at buildbot.pypy.org Sat Aug 3 22:37:14 2013 From: noreply at buildbot.pypy.org (stian) Date: Sat, 3 Aug 2013 22:37:14 +0200 (CEST) Subject: [pypy-commit] pypy bigint-with-int: Use the int_ops some other places, implant a int_truediv placeholder and add ZeroDivisionError Message-ID: <20130803203714.5E32E1C1360@cobra.cs.uni-duesseldorf.de> Author: stian Branch: bigint-with-int Changeset: r65942:54da7171d2a4 Date: 2013-08-03 22:36 +0200 http://bitbucket.org/pypy/pypy/changeset/54da7171d2a4/ Log: Use the int_ops some other places, implant a int_truediv placeholder and add ZeroDivisionError TODO: - Fix: 5 .__rsub__(2L) to give NotImplanted - Fallback to Long transformation if abs(INT) > MASK (currently causes a few tests to fail with rpython assert errors because we don't do this) diff --git a/pypy/objspace/std/complexobject.py b/pypy/objspace/std/complexobject.py --- a/pypy/objspace/std/complexobject.py +++ b/pypy/objspace/std/complexobject.py @@ -42,7 +42,7 @@ imag = space.float_w(space.getattr(self, space.wrap("imag"))) real_b = rbigint.fromrarith_int(float2longlong(real)) imag_b = rbigint.fromrarith_int(r_ulonglong(float2longlong(imag))) - val = real_b.lshift(64).or_(imag_b).lshift(3).or_(rbigint.fromint(tag)) + val = real_b.lshift(64).or_(imag_b).lshift(3).int_or_(tag) return space.newlong_from_rbigint(val) diff --git a/pypy/objspace/std/floattype.py b/pypy/objspace/std/floattype.py --- a/pypy/objspace/std/floattype.py +++ b/pypy/objspace/std/floattype.py @@ -291,7 +291,7 @@ from pypy.objspace.std.model import IDTAG_FLOAT as tag val = float2longlong(space.float_w(self)) b = rbigint.fromrarith_int(val) - b = b.lshift(3).or_(rbigint.fromint(tag)) + b = b.lshift(3).int_or_(tag) return space.newlong_from_rbigint(b) def int(self, space): diff --git a/pypy/objspace/std/inttype.py b/pypy/objspace/std/inttype.py --- a/pypy/objspace/std/inttype.py +++ b/pypy/objspace/std/inttype.py @@ -195,7 +195,7 @@ return None from pypy.objspace.std.model import IDTAG_INT as tag b = space.bigint_w(self) - b = b.lshift(3).or_(rbigint.fromint(tag)) + b = b.lshift(3).int_or_(tag) return space.newlong_from_rbigint(b) def int(self, space): diff --git a/pypy/objspace/std/longobject.py b/pypy/objspace/std/longobject.py --- a/pypy/objspace/std/longobject.py +++ b/pypy/objspace/std/longobject.py @@ -218,6 +218,17 @@ space.wrap("long/long too large for a float")) return space.newfloat(f) +def truediv__Long_Int(space, w_long1, w_int2): + try: + f = w_long1.num.int_truediv(w_int2.intval) + except ZeroDivisionError: + raise OperationError(space.w_ZeroDivisionError, + space.wrap("long division or modulo by zero")) + except OverflowError: + raise OperationError(space.w_OverflowError, + space.wrap("long/long too large for a float")) + return space.newfloat(f) + def floordiv__Long_Long(space, w_long1, w_long2): try: z = w_long1.num.floordiv(w_long2.num) diff --git a/pypy/objspace/std/longtype.py b/pypy/objspace/std/longtype.py --- a/pypy/objspace/std/longtype.py +++ b/pypy/objspace/std/longtype.py @@ -130,7 +130,7 @@ return None from pypy.objspace.std.model import IDTAG_LONG as tag b = space.bigint_w(self) - b = b.lshift(3).or_(rbigint.fromint(tag)) + b = b.lshift(3).int_or_(tag) return space.newlong_from_rbigint(b) def unwrap(w_self, space): #YYYYYY diff --git a/rpython/rlib/rbigint.py b/rpython/rlib/rbigint.py --- a/rpython/rlib/rbigint.py +++ b/rpython/rlib/rbigint.py @@ -711,7 +711,7 @@ elif self._digits[0] == ONEDIGIT: return rbigint.fromint(self.sign * b) - res = self.widedigit(0) * b + res = self.widedigit(0) * abs(b) carry = res >> SHIFT if carry: return rbigint([_store_digit(res & MASK), _store_digit(carry)], self.sign * (-1 if b < 0 else 1), 2) @@ -730,6 +730,12 @@ return div @jit.elidable + def int_truediv(self, other): + # XXX: Not specialized. Just use regular truediv for now. + div = _bigint_true_divide(self, rbigint.fromint(other)) + return div + + @jit.elidable def floordiv(self, other): if self.sign == 1 and other.numdigits() == 1 and other.sign == 1: digit = other.digit(0) @@ -748,6 +754,10 @@ @jit.elidable def int_floordiv(self, other): + + if other == 0: + raise ZeroDivisionError("long division or modulo by zero") + digit = abs(other) if self.sign == 1 and other > 0: if digit == 1: @@ -878,22 +888,22 @@ @jit.elidable def int_divmod(v, w): """ Divmod with int """ - if v.sign != (-1 if w < 0 else 1): - # TODO, fix. + + if w == 0: + raise ZeroDivisionError("long division or modulo by zero") + + wsign = (-1 if w < 0 else 1) + if v.sign != wsign: + # Divrem1 doesn't deal with the sign difference. Instead of having yet another copy, + # Just fallback. return v.divmod(rbigint.fromint(w)) + div, mod = _divrem1(v, abs(w)) - if v.sign != (-1 if w < 0 else 1): - mod = rbigint.fromint(mod) - mod.sign = -1 if w < 0 else 1 - mod = mod.int_add(w) - - if div.sign == 0: - return ONENEGATIVERBIGINT, mod - div = div.int_add(1) - else: - mod = rbigint.fromint(mod) - mod.sign = -1 if w < 0 else 1 - div.sign = v.sign * (-1 if w < 0 else 1) + mod = rbigint.fromint(mod) + + mod.sign = wsign + div.sign = v.sign * wsign + return div, mod @jit.elidable @@ -1045,7 +1055,7 @@ if self.sign == 0: return ONENEGATIVERBIGINT - ret = self.add(ONERBIGINT) + ret = self.int_add(1) ret.sign = -ret.sign return ret From noreply at buildbot.pypy.org Mon Aug 5 02:13:34 2013 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Mon, 5 Aug 2013 02:13:34 +0200 (CEST) Subject: [pypy-commit] pypy default: Removed some now unused code (was for ootypesystem) Message-ID: <20130805001334.A347D1C013B@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r65943:0193d7e63652 Date: 2013-08-04 17:12 -0700 http://bitbucket.org/pypy/pypy/changeset/0193d7e63652/ Log: Removed some now unused code (was for ootypesystem) diff --git a/rpython/rtyper/extfuncregistry.py b/rpython/rtyper/extfuncregistry.py --- a/rpython/rtyper/extfuncregistry.py +++ b/rpython/rtyper/extfuncregistry.py @@ -61,27 +61,3 @@ sandboxsafe=True, llimpl=getattr(ll_math, method_name)) -# ___________________________ -# os.path functions - -from rpython.tool.sourcetools import func_with_new_name -import os.path - -# os.path.join is RPython, but we don't want to compile it directly -# because it's platform dependant. This is ok for lltype where the -# execution platform is the same as the translation platform, but not -# for ootype where the executable produced by some backends (e.g. CLI, -# JVM) are expected to run everywhere. Thus, we register it as an -# external function, but we provide a clone for lltype using -# func_with_new_name. - -path_functions = [ - ('join', [ll_os.str0, ll_os.str0], ll_os.str0), - ('dirname', [ll_os.str0], ll_os.str0), - ] - -for name, args, res in path_functions: - func = getattr(os.path, name) - llimpl = func_with_new_name(func, name) - register_external(func, args, res, 'll_os_path.ll_%s' % name, - llimpl=llimpl, sandboxsafe=True) diff --git a/rpython/rtyper/module/test/test_ll_os.py b/rpython/rtyper/module/test/test_ll_os.py --- a/rpython/rtyper/module/test/test_ll_os.py +++ b/rpython/rtyper/module/test/test_ll_os.py @@ -50,7 +50,7 @@ if not hasattr(os, 'statvfs'): py.test.skip('posix specific function') try: - expected = os.statvfs('.') + os.statvfs('.') except OSError, e: py.test.skip("the underlying os.statvfs() failed: %s" % e) getllimpl(os.statvfs)('.') @@ -59,7 +59,7 @@ if not hasattr(os, 'fstatvfs'): py.test.skip('posix specific function') try: - expected = os.fstatvfs(0) + os.fstatvfs(0) except OSError, e: py.test.skip("the underlying os.fstatvfs() failed: %s" % e) getllimpl(os.fstatvfs)(0) @@ -87,7 +87,7 @@ assert data == posix._getfullpathname(stuff) # the most intriguing failure of ntpath.py should not repeat, here: assert not data.endswith(stuff) - + def test_getcwd(): data = getllimpl(os.getcwd)() assert data == os.getcwd() @@ -104,8 +104,8 @@ # the ctypes call seems not to work in the Wing debugger return assert str(buf.value).lower() == pwd.lower() - # ctypes returns the drive letter in uppercase, - # os.getcwd does not, + # ctypes returns the drive letter in uppercase, + # os.getcwd does not, # but there may be uppercase in os.getcwd path pwd = os.getcwd() @@ -298,11 +298,10 @@ def setup_class(cls): if not hasattr(os, 'ttyname'): py.test.skip("no ttyname") - + def test_ttyname(self): def f(): import os - import py from rpython.rtyper.test.test_llinterp import interpret def ll_to_string(s): diff --git a/rpython/rtyper/module/test/test_ll_os_path.py b/rpython/rtyper/module/test/test_ll_os_path.py --- a/rpython/rtyper/module/test/test_ll_os_path.py +++ b/rpython/rtyper/module/test/test_ll_os_path.py @@ -3,10 +3,10 @@ import sys, os from rpython.rtyper.lltypesystem.module.ll_os_path import Implementation as impl -from rpython.rtyper.module.support import ll_strcpy from rpython.rtyper.test.test_llinterp import interpret from rpython.tool.udir import udir + def test_exists(): filename = impl.to_rstr(str(py.path.local(__file__))) assert impl.ll_os_path_exists(filename) == True From noreply at buildbot.pypy.org Mon Aug 5 02:16:11 2013 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Mon, 5 Aug 2013 02:16:11 +0200 (CEST) Subject: [pypy-commit] pypy default: Dead links Message-ID: <20130805001611.B14291C013B@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r65944:009a2cacc095 Date: 2013-08-04 17:15 -0700 http://bitbucket.org/pypy/pypy/changeset/009a2cacc095/ Log: Dead links diff --git a/pypy/doc/_ref.txt b/pypy/doc/_ref.txt --- a/pypy/doc/_ref.txt +++ b/pypy/doc/_ref.txt @@ -98,8 +98,6 @@ .. _`rpython/rtyper/memory/gc/hybrid.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/memory/gc/hybrid.py .. _`rpython/rtyper/memory/gc/minimarkpage.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/memory/gc/minimarkpage.py .. _`rpython/rtyper/memory/gc/semispace.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/memory/gc/semispace.py -.. _`rpython/rtyper/ootypesystem/`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/ootypesystem/ -.. _`rpython/rtyper/ootypesystem/ootype.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/ootypesystem/ootype.py .. _`rpython/rtyper/rint.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/rint.py .. _`rpython/rtyper/rlist.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/rlist.py .. _`rpython/rtyper/rmodel.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/rmodel.py From noreply at buildbot.pypy.org Mon Aug 5 14:27:06 2013 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 5 Aug 2013 14:27:06 +0200 (CEST) Subject: [pypy-commit] pypy kill-gen-store-back-in: Rename this attribute Message-ID: <20130805122706.347971C013B@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: kill-gen-store-back-in Changeset: r65945:27dfed4f2dff Date: 2013-08-05 14:26 +0200 http://bitbucket.org/pypy/pypy/changeset/27dfed4f2dff/ Log: Rename this attribute diff --git a/rpython/jit/metainterp/optimizeopt/virtualize.py b/rpython/jit/metainterp/optimizeopt/virtualize.py --- a/rpython/jit/metainterp/optimizeopt/virtualize.py +++ b/rpython/jit/metainterp/optimizeopt/virtualize.py @@ -484,7 +484,7 @@ class OptVirtualize(optimizer.Optimization): "Virtualize objects until they escape." - _last_guard_not_forced = None + _last_guard_not_forced_2 = None def new(self): return OptVirtualize() @@ -530,11 +530,11 @@ self.emit_operation(op) def optimize_GUARD_NOT_FORCED_2(self, op): - self._last_guard_not_forced = op + self._last_guard_not_forced_2 = op def optimize_FINISH(self, op): - if self._last_guard_not_forced is not None: - guard_op = self._last_guard_not_forced + if self._last_guard_not_forced_2 is not None: + guard_op = self._last_guard_not_forced_2 self.emit_operation(op) guard_op = self.optimizer.store_final_boxes_in_guard(guard_op, []) i = len(self.optimizer._newoperations) - 1 From noreply at buildbot.pypy.org Mon Aug 5 14:43:48 2013 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 5 Aug 2013 14:43:48 +0200 (CEST) Subject: [pypy-commit] pypy kill-gen-store-back-in: Kill bogus line Message-ID: <20130805124348.49A081C01B7@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: kill-gen-store-back-in Changeset: r65946:e8567fc5a488 Date: 2013-08-05 14:43 +0200 http://bitbucket.org/pypy/pypy/changeset/e8567fc5a488/ Log: Kill bogus line diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -2553,8 +2553,6 @@ virtualizable) self.virtualizable_boxes.append(virtualizable_box) - vinfo = self.jitdriver_sd.virtualizable_info - def gen_store_back_in_vable(self, box): vinfo = self.jitdriver_sd.virtualizable_info if vinfo is not None: From noreply at buildbot.pypy.org Mon Aug 5 14:46:41 2013 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 5 Aug 2013 14:46:41 +0200 (CEST) Subject: [pypy-commit] pypy kill-gen-store-back-in: Revert these changes, not needed any more Message-ID: <20130805124641.DE0181C01B7@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: kill-gen-store-back-in Changeset: r65947:2e8522488faa Date: 2013-08-05 14:45 +0200 http://bitbucket.org/pypy/pypy/changeset/2e8522488faa/ Log: Revert these changes, not needed any more diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py --- a/rpython/jit/metainterp/resoperation.py +++ b/rpython/jit/metainterp/resoperation.py @@ -158,8 +158,7 @@ return rop._MALLOC_FIRST <= self.getopnum() <= rop._MALLOC_LAST def can_malloc(self): - return (rop._CANMALLOC_FIRST <= self.getopnum() <= rop._CANMALLOC_LAST - or self.is_call()) + return self.is_call() or self.is_malloc() def is_call(self): return rop._CALL_FIRST <= self.getopnum() <= rop._CALL_LAST @@ -479,7 +478,6 @@ 'RAW_LOAD/2d', 'GETFIELD_GC/1d', 'GETFIELD_RAW/1d', - '_CANMALLOC_FIRST', '_MALLOC_FIRST', 'NEW/0d', 'NEW_WITH_VTABLE/1', @@ -487,7 +485,6 @@ 'NEWSTR/1', 'NEWUNICODE/1', '_MALLOC_LAST', - '_CANMALLOC_LAST', 'FORCE_TOKEN/0', 'VIRTUAL_REF/2', # removed before it's passed to the backend 'READ_TIMESTAMP/0', From noreply at buildbot.pypy.org Mon Aug 5 22:41:45 2013 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 5 Aug 2013 22:41:45 +0200 (CEST) Subject: [pypy-commit] jitviewer default: Add a note Message-ID: <20130805204145.2108D1C00F4@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r228:7307df41949f Date: 2013-08-05 22:41 +0200 http://bitbucket.org/pypy/jitviewer/changeset/7307df41949f/ Log: Add a note diff --git a/README b/README --- a/README +++ b/README @@ -19,7 +19,9 @@ python setup.py develop It also requires pypy to be importable (as in source code), you can do this -by setting your ``PYTHONPATH`` enviromental variable. +by setting your ``PYTHONPATH`` enviroment variable. Make sure that this +source code is (roughly) the same version as the binary pypy that produced +the log file. Finally, run it: From noreply at buildbot.pypy.org Tue Aug 6 09:27:42 2013 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 6 Aug 2013 09:27:42 +0200 (CEST) Subject: [pypy-commit] pypy default: issue #1577: maybe? fix refcounting issues with ssl sockets Message-ID: <20130806072742.14F7B1C01B7@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r65948:d8124d5c9c00 Date: 2013-08-06 09:26 +0200 http://bitbucket.org/pypy/pypy/changeset/d8124d5c9c00/ Log: issue #1577: maybe? fix refcounting issues with ssl sockets diff --git a/lib-python/2.7/ssl.py b/lib-python/2.7/ssl.py --- a/lib-python/2.7/ssl.py +++ b/lib-python/2.7/ssl.py @@ -110,6 +110,12 @@ suppress_ragged_eofs=True, ciphers=None): socket.__init__(self, _sock=sock._sock) + # "close" the original socket: it is not usable any more. + # this only calls _drop(), which should not actually call + # the operating system's close() because the reference + # counter is greater than 1 (we hold one too). + sock.close() + if ciphers is None and ssl_version != _SSLv2_IF_EXISTS: ciphers = _DEFAULT_CIPHERS From noreply at buildbot.pypy.org Tue Aug 6 10:33:39 2013 From: noreply at buildbot.pypy.org (fijal) Date: Tue, 6 Aug 2013 10:33:39 +0200 (CEST) Subject: [pypy-commit] pypy kill-gen-store-back-in: merge default (I hope) Message-ID: <20130806083339.3269D1C346F@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: kill-gen-store-back-in Changeset: r65949:0155f66199d4 Date: 2013-08-06 10:32 +0200 http://bitbucket.org/pypy/pypy/changeset/0155f66199d4/ Log: merge default (I hope) diff too long, truncating to 2000 out of 2984 lines diff --git a/dotviewer/drawgraph.py b/dotviewer/drawgraph.py --- a/dotviewer/drawgraph.py +++ b/dotviewer/drawgraph.py @@ -22,6 +22,7 @@ 'yellow': (255,255,0), } re_nonword=re.compile(r'([^0-9a-zA-Z_.]+)') +re_linewidth=re.compile(r'setlinewidth\((\d+(\.\d*)?|\.\d+)\)') def combine(color1, color2, alpha): r1, g1, b1 = color1 @@ -138,6 +139,13 @@ self.yl = float(yl) rest = rest[3:] self.style, self.color = rest + linematch = re_linewidth.match(self.style) + if linematch: + num = linematch.group(1) + self.linewidth = int(round(float(num))) + self.style = self.style[linematch.end(0):] + else: + self.linewidth = 1 self.highlight = False self.cachedbezierpoints = None self.cachedarrowhead = None @@ -520,8 +528,8 @@ fgcolor = highlight_color(fgcolor) points = [self.map(*xy) for xy in edge.bezierpoints()] - def drawedgebody(points=points, fgcolor=fgcolor): - pygame.draw.lines(self.screen, fgcolor, False, points) + def drawedgebody(points=points, fgcolor=fgcolor, width=edge.linewidth): + pygame.draw.lines(self.screen, fgcolor, False, points, width) edgebodycmd.append(drawedgebody) points = [self.map(*xy) for xy in edge.arrowhead()] diff --git a/dotviewer/test/test_interactive.py b/dotviewer/test/test_interactive.py --- a/dotviewer/test/test_interactive.py +++ b/dotviewer/test/test_interactive.py @@ -34,6 +34,23 @@ } ''' +SOURCE2=r'''digraph f { + a; d; e; f; g; h; i; j; k; l; + a -> d [penwidth=1, style="setlinewidth(1)"]; + d -> e [penwidth=2, style="setlinewidth(2)"]; + e -> f [penwidth=4, style="setlinewidth(4)"]; + f -> g [penwidth=8, style="setlinewidth(8)"]; + g -> h [penwidth=16, style="setlinewidth(16)"]; + h -> i [penwidth=32, style="setlinewidth(32)"]; + i -> j [penwidth=64, style="setlinewidth(64)"]; + j -> k [penwidth=128, style="setlinewidth(128)"]; + k -> l [penwidth=256, style="setlinewidth(256)"]; +}''' + + + + + def setup_module(mod): if not option.pygame: py.test.skip("--pygame not enabled") @@ -161,3 +178,10 @@ page = MyPage(str(dotfile)) page.fixedfont = True graphclient.display_page(page) + +def test_linewidth(): + udir.join("graph2.dot").write(SOURCE2) + from dotviewer import graphpage, graphclient + dotfile = udir.join('graph2.dot') + page = graphpage.DotFileGraphPage(str(dotfile)) + graphclient.display_page(page) diff --git a/lib-python/2.7/ssl.py b/lib-python/2.7/ssl.py --- a/lib-python/2.7/ssl.py +++ b/lib-python/2.7/ssl.py @@ -110,6 +110,12 @@ suppress_ragged_eofs=True, ciphers=None): socket.__init__(self, _sock=sock._sock) + # "close" the original socket: it is not usable any more. + # this only calls _drop(), which should not actually call + # the operating system's close() because the reference + # counter is greater than 1 (we hold one too). + sock.close() + if ciphers is None and ssl_version != _SSLv2_IF_EXISTS: ciphers = _DEFAULT_CIPHERS diff --git a/lib_pypy/_sqlite3.py b/lib_pypy/_sqlite3.py --- a/lib_pypy/_sqlite3.py +++ b/lib_pypy/_sqlite3.py @@ -1305,7 +1305,7 @@ for i in xrange(_lib.sqlite3_column_count(self._statement)): name = _lib.sqlite3_column_name(self._statement, i) if name: - name = _ffi.string(name).decode('utf-8').split("[")[0].strip() + name = _ffi.string(name).split("[")[0].strip() desc.append((name, None, None, None, None, None, None)) return desc diff --git a/pypy/doc/_ref.txt b/pypy/doc/_ref.txt --- a/pypy/doc/_ref.txt +++ b/pypy/doc/_ref.txt @@ -98,8 +98,6 @@ .. _`rpython/rtyper/memory/gc/hybrid.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/memory/gc/hybrid.py .. _`rpython/rtyper/memory/gc/minimarkpage.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/memory/gc/minimarkpage.py .. _`rpython/rtyper/memory/gc/semispace.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/memory/gc/semispace.py -.. _`rpython/rtyper/ootypesystem/`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/ootypesystem/ -.. _`rpython/rtyper/ootypesystem/ootype.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/ootypesystem/ootype.py .. _`rpython/rtyper/rint.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/rint.py .. _`rpython/rtyper/rlist.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/rlist.py .. _`rpython/rtyper/rmodel.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/rmodel.py diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -58,3 +58,11 @@ .. branch: foldable-getarrayitem-indexerror Constant-fold reading out of constant tuples in PyPy. +.. branch: mro-reorder-numpypy-str +No longer delegate numpy string_ methods to space.StringObject, in numpy +this works by kind of by accident. Support for merging the refactor-str-types +branch + +.. branch: kill-typesystem +Remove the "type system" abstraction, now that there is only ever one kind of +type system used. diff --git a/pypy/module/cppyy/interp_cppyy.py b/pypy/module/cppyy/interp_cppyy.py --- a/pypy/module/cppyy/interp_cppyy.py +++ b/pypy/module/cppyy/interp_cppyy.py @@ -592,11 +592,15 @@ def get(self, w_cppinstance, w_pycppclass): cppinstance = self.space.interp_w(W_CPPInstance, w_cppinstance, can_be_None=True) + if not cppinstance: + raise OperationError(self.space.w_ReferenceError, self.space.wrap("attribute access requires an instance")) offset = self._get_offset(cppinstance) return self.converter.from_memory(self.space, w_cppinstance, w_pycppclass, offset) def set(self, w_cppinstance, w_value): cppinstance = self.space.interp_w(W_CPPInstance, w_cppinstance, can_be_None=True) + if not cppinstance: + raise OperationError(self.space.w_ReferenceError, self.space.wrap("attribute access requires an instance")) offset = self._get_offset(cppinstance) self.converter.to_memory(self.space, w_cppinstance, w_value, offset) return self.space.w_None diff --git a/pypy/module/cppyy/pythonify.py b/pypy/module/cppyy/pythonify.py --- a/pypy/module/cppyy/pythonify.py +++ b/pypy/module/cppyy/pythonify.py @@ -426,6 +426,11 @@ # mostly for the benefit of the CINT backend, which treats std as special gbl.std = make_cppnamespace(None, "std", None, False) + # install a type for enums to refer to + # TODO: this is correct for C++98, not for C++11 and in general there will + # be the same issue for all typedef'd builtin types + setattr(gbl, 'unsigned int', int) + # install for user access cppyy.gbl = gbl diff --git a/pypy/module/cppyy/test/test_datatypes.py b/pypy/module/cppyy/test/test_datatypes.py --- a/pypy/module/cppyy/test/test_datatypes.py +++ b/pypy/module/cppyy/test/test_datatypes.py @@ -105,6 +105,13 @@ raises(IndexError, c.m_float_array.__getitem__, self.N) raises(IndexError, c.m_double_array.__getitem__, self.N) + # can not access an instance member on the class + raises(ReferenceError, getattr, cppyy_test_data, 'm_bool') + raises(ReferenceError, getattr, cppyy_test_data, 'm_int') + + assert not hasattr(cppyy_test_data, 'm_bool') + assert not hasattr(cppyy_test_data, 'm_int') + c.destruct() def test03_instance_data_write_access(self): @@ -428,12 +435,17 @@ c = cppyy_test_data() assert isinstance(c, cppyy_test_data) - # TODO: test that the enum is accessible as a type + # test that the enum is accessible as a type + assert cppyy_test_data.what assert cppyy_test_data.kNothing == 6 assert cppyy_test_data.kSomething == 111 assert cppyy_test_data.kLots == 42 + assert cppyy_test_data.what(cppyy_test_data.kNothing) == cppyy_test_data.kNothing + assert cppyy_test_data.what(6) == cppyy_test_data.kNothing + # TODO: only allow instantiations with correct values (C++11) + assert c.get_enum() == cppyy_test_data.kNothing assert c.m_enum == cppyy_test_data.kNothing @@ -455,6 +467,7 @@ assert cppyy_test_data.s_enum == cppyy_test_data.kSomething # global enums + assert gbl.fruit # test type accessible assert gbl.kApple == 78 assert gbl.kBanana == 29 assert gbl.kCitrus == 34 diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -379,12 +379,14 @@ return self class W_CharacterBox(W_FlexibleBox): - pass + def convert_to(self, dtype): + # XXX assert dtype is str type + return self + class W_StringBox(W_CharacterBox): def descr__new__string_box(space, w_subtype, w_arg): from pypy.module.micronumpy.interp_dtype import new_string_dtype - arg = space.str_w(space.str(w_arg)) arr = VoidBoxStorage(len(arg), new_string_dtype(space, len(arg))) for i in range(len(arg)): @@ -684,12 +686,12 @@ __module__ = "numpypy", ) -W_StringBox.typedef = TypeDef("string_", (str_typedef, W_CharacterBox.typedef), +W_StringBox.typedef = TypeDef("string_", (W_CharacterBox.typedef, str_typedef), __module__ = "numpypy", __new__ = interp2app(W_StringBox.descr__new__string_box.im_func), ) -W_UnicodeBox.typedef = TypeDef("unicode_", (unicode_typedef, W_CharacterBox.typedef), +W_UnicodeBox.typedef = TypeDef("unicode_", (W_CharacterBox.typedef, unicode_typedef), __module__ = "numpypy", __new__ = interp2app(W_UnicodeBox.descr__new__unicode_box.im_func), ) diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -234,6 +234,9 @@ def is_record_type(self): return self.fields is not None + def is_str_type(self): + return self.num == 18 + def is_str_or_unicode(self): return (self.num == 18 or self.num == 19) diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -328,14 +328,19 @@ w_out = None w_lhs = convert_to_array(space, w_lhs) w_rhs = convert_to_array(space, w_rhs) - if (w_lhs.get_dtype().is_flexible_type() or \ - w_rhs.get_dtype().is_flexible_type()): + w_ldtype = w_lhs.get_dtype() + w_rdtype = w_rhs.get_dtype() + if w_ldtype.is_str_type() and w_rdtype.is_str_type() and \ + self.comparison_func: + pass + elif (w_ldtype.is_flexible_type() or \ + w_rdtype.is_flexible_type()): raise OperationError(space.w_TypeError, space.wrap( 'unsupported operand dtypes %s and %s for "%s"' % \ - (w_rhs.get_dtype().get_name(), w_lhs.get_dtype().get_name(), + (w_rdtype.get_name(), w_ldtype.get_name(), self.name))) calc_dtype = find_binop_result_dtype(space, - w_lhs.get_dtype(), w_rhs.get_dtype(), + w_ldtype, w_rdtype, int_only=self.int_only, promote_to_float=self.promote_to_float, promote_bools=self.promote_bools, diff --git a/pypy/module/micronumpy/stdobjspace.py b/pypy/module/micronumpy/stdobjspace.py deleted file mode 100644 --- a/pypy/module/micronumpy/stdobjspace.py +++ /dev/null @@ -1,11 +0,0 @@ - -from pypy.objspace.std import stringobject -from pypy.module.micronumpy import interp_boxes - -def delegate_stringbox2stringobj(space, w_box): - return space.wrap(w_box.dtype.itemtype.to_str(w_box)) - -def register_delegates(typeorder): - typeorder[interp_boxes.W_StringBox] = [ - (stringobject.W_StringObject, delegate_stringbox2stringobj), - ] diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -740,6 +740,7 @@ class AppTestStrUnicodeDtypes(BaseNumpyAppTest): def test_str_unicode(self): + skip('numpypy differs from numpy') from numpypy import str_, unicode_, character, flexible, generic assert str_.mro() == [str_, str, basestring, character, flexible, generic, object] diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -2755,6 +2755,19 @@ assert a[2] == 'ab' raises(TypeError, a, 'sum') raises(TypeError, 'a+a') + b = array(['abcdefg', 'ab', 'cd']) + assert a[2] == b[1] + assert bool(a[1]) + c = array(['ab','cdefg','hi','jk']) + # not implemented yet + #c[0] += c[3] + #assert c[0] == 'abjk' + + def test_to_str(self): + from numpypy import array + a = array(['abc','abc', 'def', 'ab'], 'S3') + b = array(['mnopqr','abcdef', 'ab', 'cd']) + assert b[1] != a[1] def test_string_scalar(self): from numpypy import array @@ -2766,8 +2779,7 @@ assert str(a.dtype) == '|S1' a = array('x', dtype='c') assert str(a.dtype) == '|S1' - # XXX can sort flexible types, why not comparison? - #assert a == 'x' + assert a == 'x' def test_flexible_repr(self): from numpypy import array diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -1689,6 +1689,22 @@ def get_size(self): return self.size +def str_unary_op(func): + specialize.argtype(1)(func) + @functools.wraps(func) + def dispatcher(self, v1): + return func(self, self.to_str(v1)) + return dispatcher + +def str_binary_op(func): + specialize.argtype(1, 2)(func) + @functools.wraps(func) + def dispatcher(self, v1, v2): + return func(self, + self.to_str(v1), + self.to_str(v2) + ) + return dispatcher class StringType(BaseType, BaseStringType): T = lltype.Char @@ -1696,6 +1712,8 @@ @jit.unroll_safe def coerce(self, space, dtype, w_item): from pypy.module.micronumpy.interp_dtype import new_string_dtype + if isinstance(w_item, interp_boxes.W_StringBox): + return w_item arg = space.str_w(space.str(w_item)) arr = VoidBoxStorage(len(arg), new_string_dtype(space, len(arg))) for i in range(len(arg)): @@ -1705,6 +1723,7 @@ @jit.unroll_safe def store(self, arr, i, offset, box): assert isinstance(box, interp_boxes.W_StringBox) + # XXX simplify to range(box.dtype.get_size()) ? for k in range(min(self.size, box.arr.size-offset)): arr.storage[k + i] = box.arr.storage[k + offset] @@ -1718,7 +1737,7 @@ builder = StringBuilder() assert isinstance(item, interp_boxes.W_StringBox) i = item.ofs - end = i+self.size + end = i + item.dtype.get_size() while i < end: assert isinstance(item.arr.storage[i], str) if item.arr.storage[i] == '\x00': @@ -1734,10 +1753,53 @@ builder.append("'") return builder.build() - # XXX move to base class when UnicodeType is supported + # XXX move the rest of this to base class when UnicodeType is supported def to_builtin_type(self, space, box): return space.wrap(self.to_str(box)) + @str_binary_op + def eq(self, v1, v2): + return v1 == v2 + + @str_binary_op + def ne(self, v1, v2): + return v1 != v2 + + @str_binary_op + def lt(self, v1, v2): + return v1 < v2 + + @str_binary_op + def le(self, v1, v2): + return v1 <= v2 + + @str_binary_op + def gt(self, v1, v2): + return v1 > v2 + + @str_binary_op + def ge(self, v1, v2): + return v1 >= v2 + + @str_binary_op + def logical_and(self, v1, v2): + return bool(v1) and bool(v2) + + @str_binary_op + def logical_or(self, v1, v2): + return bool(v1) or bool(v2) + + @str_unary_op + def logical_not(self, v): + return not bool(v) + + @str_binary_op + def logical_xor(self, v1, v2): + return bool(v1) ^ bool(v2) + + def bool(self, v): + return bool(self.to_str(v)) + def build_and_convert(self, space, mydtype, box): assert isinstance(box, interp_boxes.W_GenericBox) if box.get_dtype(space).is_str_or_unicode(): @@ -1753,6 +1815,13 @@ arr.storage[j] = '\x00' return interp_boxes.W_StringBox(arr, 0, arr.dtype) +NonNativeStringType = StringType + +class UnicodeType(BaseType, BaseStringType): + T = lltype.UniChar + +NonNativeUnicodeType = UnicodeType + class VoidType(BaseType, BaseStringType): T = lltype.Char @@ -1798,12 +1867,6 @@ return W_NDimArray(implementation) NonNativeVoidType = VoidType -NonNativeStringType = StringType - -class UnicodeType(BaseType, BaseStringType): - T = lltype.UniChar - -NonNativeUnicodeType = UnicodeType class RecordType(BaseType): diff --git a/pypy/module/test_lib_pypy/test_sqlite3.py b/pypy/module/test_lib_pypy/test_sqlite3.py --- a/pypy/module/test_lib_pypy/test_sqlite3.py +++ b/pypy/module/test_lib_pypy/test_sqlite3.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- """Tests for _sqlite3.py""" import pytest, sys @@ -222,3 +223,8 @@ cur.execute("create table test(a)") cur.executemany("insert into test values (?)", [[1], [2], [3]]) assert cur.lastrowid is None + +def test_issue1573(con): + cur = con.cursor() + cur.execute(u'SELECT 1 as méil') + assert cur.description[0][0] == u"méil".encode('utf-8') diff --git a/pypy/objspace/std/model.py b/pypy/objspace/std/model.py --- a/pypy/objspace/std/model.py +++ b/pypy/objspace/std/model.py @@ -133,10 +133,6 @@ # when trying to dispatch multimethods. # XXX build these lists a bit more automatically later - if config.objspace.usemodules.micronumpy: - from pypy.module.micronumpy.stdobjspace import register_delegates - register_delegates(self.typeorder) - self.typeorder[boolobject.W_BoolObject] += [ (intobject.W_IntObject, boolobject.delegate_Bool2IntObject), (floatobject.W_FloatObject, floatobject.delegate_Bool2Float), diff --git a/pypy/tool/release/package.py b/pypy/tool/release/package.py --- a/pypy/tool/release/package.py +++ b/pypy/tool/release/package.py @@ -3,7 +3,7 @@ It uses 'pypy/goal/pypy-c' and parts of the rest of the working copy. Usage: - package.py root-pypy-dir [--nostrip] [--without-tk] [name-of-archive] [name-of-pypy-c] [destination-for-tarball] [pypy-c-path] + package.py [--nostrip] [--without-tk] root-pypy-dir [name-of-archive] [name-of-pypy-c] [destination-for-tarball] [pypy-c-path] Usually you would do: package.py ../../.. pypy-VER-PLATFORM The output is found in the directory /tmp/usession-YOURNAME/build/. diff --git a/rpython/jit/backend/test/support.py b/rpython/jit/backend/test/support.py --- a/rpython/jit/backend/test/support.py +++ b/rpython/jit/backend/test/support.py @@ -59,7 +59,7 @@ t.buildannotator().build_types(function, [int] * len(args), main_entry_point=True) - t.buildrtyper(type_system=self.type_system).specialize() + t.buildrtyper().specialize() warmrunnerdesc = WarmRunnerDesc(t, translate_support_code=True, CPUClass=self.CPUClass, **kwds) diff --git a/rpython/jit/codewriter/codewriter.py b/rpython/jit/codewriter/codewriter.py --- a/rpython/jit/codewriter/codewriter.py +++ b/rpython/jit/codewriter/codewriter.py @@ -21,9 +21,9 @@ self.callcontrol = CallControl(cpu, jitdrivers_sd) self._seen_files = set() - def transform_func_to_jitcode(self, func, values, type_system='lltype'): + def transform_func_to_jitcode(self, func, values): """For testing.""" - rtyper = support.annotate(func, values, type_system=type_system) + rtyper = support.annotate(func, values) graph = rtyper.annotator.translator.graphs[0] jitcode = JitCode("test") self.transform_graph_to_jitcode(graph, jitcode, True) diff --git a/rpython/jit/codewriter/support.py b/rpython/jit/codewriter/support.py --- a/rpython/jit/codewriter/support.py +++ b/rpython/jit/codewriter/support.py @@ -35,7 +35,7 @@ return a.typeannotation(t) def annotate(func, values, inline=None, backendoptimize=True, - type_system="lltype", translationoptions={}): + translationoptions={}): # build the normal ll graphs for ll_function t = TranslationContext() for key, value in translationoptions.items(): @@ -44,7 +44,7 @@ a = t.buildannotator(policy=annpolicy) argtypes = getargtypes(a, values) a.build_types(func, argtypes, main_entry_point=True) - rtyper = t.buildrtyper(type_system = type_system) + rtyper = t.buildrtyper() rtyper.specialize() #if inline: # auto_inlining(t, threshold=inline) diff --git a/rpython/jit/codewriter/test/test_flatten.py b/rpython/jit/codewriter/test/test_flatten.py --- a/rpython/jit/codewriter/test/test_flatten.py +++ b/rpython/jit/codewriter/test/test_flatten.py @@ -125,8 +125,8 @@ class TestFlatten: - def make_graphs(self, func, values, type_system='lltype'): - self.rtyper = support.annotate(func, values, type_system=type_system) + def make_graphs(self, func, values): + self.rtyper = support.annotate(func, values) return self.rtyper.annotator.translator.graphs def encoding_test(self, func, args, expected, diff --git a/rpython/jit/codewriter/test/test_regalloc.py b/rpython/jit/codewriter/test/test_regalloc.py --- a/rpython/jit/codewriter/test/test_regalloc.py +++ b/rpython/jit/codewriter/test/test_regalloc.py @@ -13,8 +13,8 @@ class TestRegAlloc: - def make_graphs(self, func, values, type_system='lltype'): - self.rtyper = support.annotate(func, values, type_system=type_system) + def make_graphs(self, func, values): + self.rtyper = support.annotate(func, values) return self.rtyper.annotator.translator.graphs def check_assembler(self, graph, expected, transform=False, diff --git a/rpython/jit/metainterp/jitexc.py b/rpython/jit/metainterp/jitexc.py --- a/rpython/jit/metainterp/jitexc.py +++ b/rpython/jit/metainterp/jitexc.py @@ -62,7 +62,7 @@ def _get_standard_error(rtyper, Class): - exdata = rtyper.getexceptiondata() + exdata = rtyper.exceptiondata clsdef = rtyper.annotator.bookkeeper.getuniqueclassdef(Class) evalue = exdata.get_standard_ll_exc_instance(rtyper, clsdef) return evalue diff --git a/rpython/jit/metainterp/test/support.py b/rpython/jit/metainterp/test/support.py --- a/rpython/jit/metainterp/test/support.py +++ b/rpython/jit/metainterp/test/support.py @@ -12,7 +12,7 @@ from rpython.translator.backendopt.all import backend_optimizations -def _get_jitcodes(testself, CPUClass, func, values, type_system, +def _get_jitcodes(testself, CPUClass, func, values, supports_floats=True, supports_longlong=False, supports_singlefloats=False, @@ -50,7 +50,7 @@ FakeWarmRunnerState.enable_opts = {} func._jit_unroll_safe_ = True - rtyper = support.annotate(func, values, type_system=type_system, + rtyper = support.annotate(func, values, translationoptions=translationoptions) graphs = rtyper.annotator.translator.graphs testself.all_graphs = graphs @@ -210,7 +210,7 @@ def interp_operations(self, f, args, **kwds): # get the JitCodes for the function f - _get_jitcodes(self, self.CPUClass, f, args, self.type_system, **kwds) + _get_jitcodes(self, self.CPUClass, f, args, **kwds) # try to run it with blackhole.py result1 = _run_with_blackhole(self, args) # try to run it with pyjitpl.py diff --git a/rpython/jit/metainterp/test/test_virtualizable.py b/rpython/jit/metainterp/test/test_virtualizable.py --- a/rpython/jit/metainterp/test/test_virtualizable.py +++ b/rpython/jit/metainterp/test/test_virtualizable.py @@ -1074,7 +1074,7 @@ if getattr(graph, 'func', None) is f] init_graph = t._graphof(Frame.__init__.im_func) - deref = t.rtyper.type_system_deref + deref = t.rtyper.type_system.deref def direct_calls(graph): return [deref(op.args[0].value)._callable.func_name diff --git a/rpython/jit/metainterp/test/test_virtualref.py b/rpython/jit/metainterp/test/test_virtualref.py --- a/rpython/jit/metainterp/test/test_virtualref.py +++ b/rpython/jit/metainterp/test/test_virtualref.py @@ -27,7 +27,7 @@ x1 = vref() # jit_force_virtual virtual_ref_finish(vref, x) # - _get_jitcodes(self, self.CPUClass, fn, [], self.type_system) + _get_jitcodes(self, self.CPUClass, fn, []) graph = self.all_graphs[0] assert graph.name == 'fn' self.vrefinfo.replace_force_virtual_with_call([graph]) diff --git a/rpython/memory/gctransform/transform.py b/rpython/memory/gctransform/transform.py --- a/rpython/memory/gctransform/transform.py +++ b/rpython/memory/gctransform/transform.py @@ -105,7 +105,7 @@ self.minimalgctransformer = None def get_lltype_of_exception_value(self): - exceptiondata = self.translator.rtyper.getexceptiondata() + exceptiondata = self.translator.rtyper.exceptiondata return exceptiondata.lltype_of_exception_value def need_minimal_transform(self, graph): @@ -479,11 +479,11 @@ flags = hop.spaceop.args[1].value flavor = flags['flavor'] meth = getattr(self, 'gct_fv_%s_malloc' % flavor, None) - assert meth, "%s has no support for malloc with flavor %r" % (self, flavor) + assert meth, "%s has no support for malloc with flavor %r" % (self, flavor) c_size = rmodel.inputconst(lltype.Signed, llmemory.sizeof(TYPE)) v_raw = meth(hop, flags, TYPE, c_size) hop.cast_result(v_raw) - + def gct_fv_raw_malloc(self, hop, flags, TYPE, c_size): v_raw = hop.genop("direct_call", [self.raw_malloc_fixedsize_ptr, c_size], resulttype=llmemory.Address) @@ -506,7 +506,7 @@ flags.update(add_flags) flavor = flags['flavor'] meth = getattr(self, 'gct_fv_%s_malloc_varsize' % flavor, None) - assert meth, "%s has no support for malloc_varsize with flavor %r" % (self, flavor) + assert meth, "%s has no support for malloc_varsize with flavor %r" % (self, flavor) return self.varsize_malloc_helper(hop, flags, meth, []) def gct_malloc_nonmovable(self, *args, **kwds): diff --git a/rpython/rlib/debug.py b/rpython/rlib/debug.py --- a/rpython/rlib/debug.py +++ b/rpython/rlib/debug.py @@ -127,8 +127,8 @@ return None def specialize_call(self, hop): + from rpython.rtyper.lltypesystem.rstr import string_repr fn = self.instance - string_repr = hop.rtyper.type_system.rstr.string_repr vlist = hop.inputargs(string_repr) hop.exception_cannot_occur() t = hop.rtyper.annotator.translator @@ -190,7 +190,7 @@ def compute_result_annotation(self): return None - + def specialize_call(self, hop): hop.exception_cannot_occur() return hop.genop('debug_flush', []) @@ -278,7 +278,7 @@ from rpython.annotator.annrpython import log log.WARNING('make_sure_not_resized called, but has no effect since list_comprehension is off') return s_arg - + def specialize_call(self, hop): hop.exception_cannot_occur() return hop.inputarg(hop.args_r[0], arg=0) @@ -294,7 +294,7 @@ class DictMarkEntry(ExtRegistryEntry): _about_ = mark_dict_non_null - + def compute_result_annotation(self, s_dict): from rpython.annotator.model import SomeDict diff --git a/rpython/rlib/rstring.py b/rpython/rlib/rstring.py --- a/rpython/rlib/rstring.py +++ b/rpython/rlib/rstring.py @@ -362,7 +362,8 @@ return SomeString() def rtyper_makerepr(self, rtyper): - return rtyper.type_system.rbuilder.stringbuilder_repr + from rpython.rtyper.lltypesystem.rbuilder import stringbuilder_repr + return stringbuilder_repr def rtyper_makekey(self): return self.__class__, @@ -398,7 +399,8 @@ return SomeUnicodeString() def rtyper_makerepr(self, rtyper): - return rtyper.type_system.rbuilder.unicodebuilder_repr + from rpython.rtyper.lltypesystem.rbuilder import unicodebuilder_repr + return unicodebuilder_repr def rtyper_makekey(self): return self.__class__, diff --git a/rpython/rtyper/annlowlevel.py b/rpython/rtyper/annlowlevel.py --- a/rpython/rtyper/annlowlevel.py +++ b/rpython/rtyper/annlowlevel.py @@ -7,6 +7,7 @@ from rpython.annotator.policy import AnnotatorPolicy from rpython.annotator.signature import Sig from rpython.annotator.specialize import flatten_star_args +from rpython.rtyper.normalizecalls import perform_normalizations from rpython.rtyper.lltypesystem import lltype, llmemory from rpython.flowspace.model import Constant from rpython.rlib.objectmodel import specialize @@ -73,16 +74,6 @@ return LowLevelAnnotatorPolicy.lowlevelspecialize(funcdesc, args_s, {}) default_specialize = staticmethod(default_specialize) - def specialize__ts(pol, funcdesc, args_s, ref): - ts = pol.rtyper.type_system - ref = ref.split('.') - x = ts - for part in ref: - x = getattr(x, part) - bk = pol.rtyper.annotator.bookkeeper - funcdesc2 = bk.getdesc(x) - return pol.default_specialize(funcdesc2, args_s) - def specialize__semierased(funcdesc, args_s): a2l = annmodel.annotation_to_lltype l2a = annmodel.lltype_to_annotation @@ -261,7 +252,7 @@ rtyper = self.rtyper translator = rtyper.annotator.translator original_graph_count = len(translator.graphs) - rtyper.type_system.perform_normalizations(rtyper) + perform_normalizations(rtyper) for r in self.delayedreprs: r.set_setup_delayed(False) rtyper.call_all_setups() diff --git a/rpython/rtyper/callparse.py b/rpython/rtyper/callparse.py --- a/rpython/rtyper/callparse.py +++ b/rpython/rtyper/callparse.py @@ -137,7 +137,7 @@ return self.holders def _emit(self, repr, hop): - assert isinstance(repr, rtuple.AbstractTupleRepr) + assert isinstance(repr, rtuple.TupleRepr) tupleitems_v = [] for h in self.holders: v = h.emit(repr.items_r[len(tupleitems_v)], hop) diff --git a/rpython/rtyper/exceptiondata.py b/rpython/rtyper/exceptiondata.py --- a/rpython/rtyper/exceptiondata.py +++ b/rpython/rtyper/exceptiondata.py @@ -1,34 +1,22 @@ from rpython.annotator import model as annmodel from rpython.rlib import rstackovf from rpython.rtyper import rclass - +from rpython.rtyper.lltypesystem.rclass import (ll_issubclass, ll_type, + ll_cast_to_object) # the exceptions that can be implicitely raised by some operations -standardexceptions = { - TypeError : True, - OverflowError : True, - ValueError : True, - ZeroDivisionError: True, - MemoryError : True, - IOError : True, - OSError : True, - StopIteration : True, - KeyError : True, - IndexError : True, - AssertionError : True, - RuntimeError : True, - UnicodeDecodeError: True, - UnicodeEncodeError: True, - NotImplementedError: True, - rstackovf._StackOverflow: True, - } +standardexceptions = set([TypeError, OverflowError, ValueError, + ZeroDivisionError, MemoryError, IOError, OSError, StopIteration, KeyError, + IndexError, AssertionError, RuntimeError, UnicodeDecodeError, + UnicodeEncodeError, NotImplementedError, rstackovf._StackOverflow]) class UnknownException(Exception): pass -class AbstractExceptionData: +class ExceptionData(object): """Public information for the code generators to help with exceptions.""" + standardexceptions = standardexceptions def __init__(self, rtyper): @@ -63,10 +51,10 @@ return helper_fn def get_standard_ll_exc_instance(self, rtyper, clsdef): - rclass = rtyper.type_system.rclass - r_inst = rclass.getinstancerepr(rtyper, clsdef) + from rpython.rtyper.lltypesystem.rclass import getinstancerepr + r_inst = getinstancerepr(rtyper, clsdef) example = r_inst.get_reusable_prebuilt_instance() - example = self.cast_exception(self.lltype_of_exception_value, example) + example = ll_cast_to_object(example) return example def get_standard_ll_exc_instance_by_class(self, exceptionclass): @@ -75,3 +63,21 @@ clsdef = self.rtyper.annotator.bookkeeper.getuniqueclassdef( exceptionclass) return self.get_standard_ll_exc_instance(self.rtyper, clsdef) + + def make_helpers(self, rtyper): + # create helper functionptrs + self.fn_exception_match = self.make_exception_matcher(rtyper) + self.fn_type_of_exc_inst = self.make_type_of_exc_inst(rtyper) + self.fn_raise_OSError = self.make_raise_OSError(rtyper) + + def make_exception_matcher(self, rtyper): + # ll_exception_matcher(real_exception_vtable, match_exception_vtable) + s_typeptr = annmodel.SomePtr(self.lltype_of_exception_type) + helper_fn = rtyper.annotate_helper_fn(ll_issubclass, [s_typeptr, s_typeptr]) + return helper_fn + + def make_type_of_exc_inst(self, rtyper): + # ll_type_of_exc_inst(exception_instance) -> exception_vtable + s_excinst = annmodel.SomePtr(self.lltype_of_exception_value) + helper_fn = rtyper.annotate_helper_fn(ll_type, [s_excinst]) + return helper_fn diff --git a/rpython/rtyper/extfuncregistry.py b/rpython/rtyper/extfuncregistry.py --- a/rpython/rtyper/extfuncregistry.py +++ b/rpython/rtyper/extfuncregistry.py @@ -61,27 +61,3 @@ sandboxsafe=True, llimpl=getattr(ll_math, method_name)) -# ___________________________ -# os.path functions - -from rpython.tool.sourcetools import func_with_new_name -import os.path - -# os.path.join is RPython, but we don't want to compile it directly -# because it's platform dependant. This is ok for lltype where the -# execution platform is the same as the translation platform, but not -# for ootype where the executable produced by some backends (e.g. CLI, -# JVM) are expected to run everywhere. Thus, we register it as an -# external function, but we provide a clone for lltype using -# func_with_new_name. - -path_functions = [ - ('join', [ll_os.str0, ll_os.str0], ll_os.str0), - ('dirname', [ll_os.str0], ll_os.str0), - ] - -for name, args, res in path_functions: - func = getattr(os.path, name) - llimpl = func_with_new_name(func, name) - register_external(func, args, res, 'll_os_path.ll_%s' % name, - llimpl=llimpl, sandboxsafe=True) diff --git a/rpython/rtyper/llinterp.py b/rpython/rtyper/llinterp.py --- a/rpython/rtyper/llinterp.py +++ b/rpython/rtyper/llinterp.py @@ -286,7 +286,7 @@ rtyper = self.llinterpreter.typer bk = rtyper.annotator.bookkeeper classdef = bk.getuniqueclassdef(rstackovf._StackOverflow) - exdata = rtyper.getexceptiondata() + exdata = rtyper.exceptiondata evalue = exdata.get_standard_ll_exc_instance(rtyper, classdef) etype = exdata.fn_type_of_exc_inst(evalue) e = LLException(etype, evalue) @@ -335,7 +335,7 @@ elif catch_exception: link = block.exits[0] if e: - exdata = self.llinterpreter.typer.getexceptiondata() + exdata = self.llinterpreter.typer.exceptiondata cls = e.args[0] inst = e.args[1] for link in block.exits[1:]: @@ -440,7 +440,7 @@ else: extraargs = () typer = self.llinterpreter.typer - exdata = typer.getexceptiondata() + exdata = typer.exceptiondata if isinstance(exc, OSError): self.op_direct_call(exdata.fn_raise_OSError, exc.errno) assert False, "op_direct_call above should have raised" diff --git a/rpython/rtyper/lltypesystem/exceptiondata.py b/rpython/rtyper/lltypesystem/exceptiondata.py deleted file mode 100644 --- a/rpython/rtyper/lltypesystem/exceptiondata.py +++ /dev/null @@ -1,31 +0,0 @@ -from rpython.annotator import model as annmodel -from rpython.rtyper.lltypesystem import rclass -from rpython.rtyper.lltypesystem.lltype import (Array, malloc, Ptr, FuncType, - functionptr, Signed) -from rpython.rtyper.exceptiondata import AbstractExceptionData -from rpython.annotator.classdef import FORCE_ATTRIBUTES_INTO_CLASSES - - -class ExceptionData(AbstractExceptionData): - """Public information for the code generators to help with exceptions.""" - - def make_helpers(self, rtyper): - # create helper functionptrs - self.fn_exception_match = self.make_exception_matcher(rtyper) - self.fn_type_of_exc_inst = self.make_type_of_exc_inst(rtyper) - self.fn_raise_OSError = self.make_raise_OSError(rtyper) - - def make_exception_matcher(self, rtyper): - # ll_exception_matcher(real_exception_vtable, match_exception_vtable) - s_typeptr = annmodel.SomePtr(self.lltype_of_exception_type) - helper_fn = rtyper.annotate_helper_fn(rclass.ll_issubclass, [s_typeptr, s_typeptr]) - return helper_fn - - def make_type_of_exc_inst(self, rtyper): - # ll_type_of_exc_inst(exception_instance) -> exception_vtable - s_excinst = annmodel.SomePtr(self.lltype_of_exception_value) - helper_fn = rtyper.annotate_helper_fn(rclass.ll_type, [s_excinst]) - return helper_fn - - def cast_exception(self, TYPE, value): - return rclass.ll_cast_to_object(value) diff --git a/rpython/rtyper/lltypesystem/ll_str.py b/rpython/rtyper/lltypesystem/ll_str.py --- a/rpython/rtyper/lltypesystem/ll_str.py +++ b/rpython/rtyper/lltypesystem/ll_str.py @@ -1,14 +1,9 @@ from rpython.rtyper.lltypesystem.lltype import GcArray, Array, Char, malloc -from rpython.rtyper.annlowlevel import llstr from rpython.rlib.rarithmetic import r_uint, r_longlong, r_ulonglong from rpython.rlib import jit CHAR_ARRAY = GcArray(Char) - at jit.elidable -def ll_int_str(repr, i): - return ll_int2dec(i) - def ll_unsigned(i): if isinstance(i, r_longlong) or isinstance(i, r_ulonglong): return r_ulonglong(i) @@ -47,7 +42,7 @@ hex_chars = malloc(Array(Char), 16, immortal=True) for i in range(16): - hex_chars[i] = "%x"%i + hex_chars[i] = "%x" % i @jit.elidable def ll_int2hex(i, addPrefix): @@ -122,8 +117,3 @@ result.chars[j] = temp[len-j-1] j += 1 return result - - at jit.elidable -def ll_float_str(repr, f): - from rpython.rlib.rfloat import formatd - return llstr(formatd(f, 'f', 6)) diff --git a/rpython/rtyper/lltypesystem/rbuiltin.py b/rpython/rtyper/lltypesystem/rbuiltin.py deleted file mode 100644 --- a/rpython/rtyper/lltypesystem/rbuiltin.py +++ /dev/null @@ -1,90 +0,0 @@ -from rpython.annotator import model as annmodel -from rpython.rlib import objectmodel -from rpython.rtyper.lltypesystem import lltype, rclass -from rpython.rtyper.lltypesystem.rdict import rtype_r_dict -from rpython.rtyper.rmodel import TyperError - - -def rtype_builtin_isinstance(hop): - hop.exception_cannot_occur() - if hop.s_result.is_constant(): - return hop.inputconst(lltype.Bool, hop.s_result.const) - - if hop.args_s[1].is_constant() and hop.args_s[1].const == list: - if hop.args_s[0].knowntype != list: - raise TyperError("isinstance(x, list) expects x to be known statically to be a list or None") - rlist = hop.args_r[0] - vlist = hop.inputarg(rlist, arg=0) - cnone = hop.inputconst(rlist, None) - return hop.genop('ptr_ne', [vlist, cnone], resulttype=lltype.Bool) - - assert isinstance(hop.args_r[0], rclass.InstanceRepr) - return hop.args_r[0].rtype_isinstance(hop) - -def ll_instantiate(typeptr): # NB. used by rpbc.ClassesPBCRepr as well - my_instantiate = typeptr.instantiate - return my_instantiate() - -def rtype_instantiate(hop): - hop.exception_cannot_occur() - s_class = hop.args_s[0] - assert isinstance(s_class, annmodel.SomePBC) - if len(s_class.descriptions) != 1: - # instantiate() on a variable class - vtypeptr, = hop.inputargs(rclass.get_type_repr(hop.rtyper)) - v_inst = hop.gendirectcall(ll_instantiate, vtypeptr) - return hop.genop('cast_pointer', [v_inst], # v_type implicit in r_result - resulttype = hop.r_result.lowleveltype) - - classdef = s_class.any_description().getuniqueclassdef() - return rclass.rtype_new_instance(hop.rtyper, classdef, hop.llops) - -def rtype_builtin_hasattr(hop): - hop.exception_cannot_occur() - if hop.s_result.is_constant(): - return hop.inputconst(lltype.Bool, hop.s_result.const) - - raise TyperError("hasattr is only suported on a constant") - -BUILTIN_TYPER = {} -BUILTIN_TYPER[objectmodel.instantiate] = rtype_instantiate -BUILTIN_TYPER[isinstance] = rtype_builtin_isinstance -BUILTIN_TYPER[hasattr] = rtype_builtin_hasattr -BUILTIN_TYPER[objectmodel.r_dict] = rtype_r_dict - -# _________________________________________________________________ -# weakrefs - -import weakref -from rpython.rtyper.lltypesystem import llmemory - -def rtype_weakref_create(hop): - # Note: this code also works for the RPython-level calls 'weakref.ref(x)'. - vlist = hop.inputargs(hop.args_r[0]) - hop.exception_cannot_occur() - return hop.genop('weakref_create', vlist, resulttype=llmemory.WeakRefPtr) - -def rtype_weakref_deref(hop): - c_ptrtype, v_wref = hop.inputargs(lltype.Void, hop.args_r[1]) - assert v_wref.concretetype == llmemory.WeakRefPtr - hop.exception_cannot_occur() - return hop.genop('weakref_deref', [v_wref], resulttype=c_ptrtype.value) - -def rtype_cast_ptr_to_weakrefptr(hop): - vlist = hop.inputargs(hop.args_r[0]) - hop.exception_cannot_occur() - return hop.genop('cast_ptr_to_weakrefptr', vlist, - resulttype=llmemory.WeakRefPtr) - -def rtype_cast_weakrefptr_to_ptr(hop): - c_ptrtype, v_wref = hop.inputargs(lltype.Void, hop.args_r[1]) - assert v_wref.concretetype == llmemory.WeakRefPtr - hop.exception_cannot_occur() - return hop.genop('cast_weakrefptr_to_ptr', [v_wref], - resulttype=c_ptrtype.value) - -BUILTIN_TYPER[weakref.ref] = rtype_weakref_create -BUILTIN_TYPER[llmemory.weakref_create] = rtype_weakref_create -BUILTIN_TYPER[llmemory.weakref_deref] = rtype_weakref_deref -BUILTIN_TYPER[llmemory.cast_ptr_to_weakrefptr] = rtype_cast_ptr_to_weakrefptr -BUILTIN_TYPER[llmemory.cast_weakrefptr_to_ptr] = rtype_cast_weakrefptr_to_ptr diff --git a/rpython/rtyper/lltypesystem/rstr.py b/rpython/rtyper/lltypesystem/rstr.py --- a/rpython/rtyper/lltypesystem/rstr.py +++ b/rpython/rtyper/lltypesystem/rstr.py @@ -317,7 +317,7 @@ def ll_str2bytearray(str): from rpython.rtyper.lltypesystem.rbytearray import BYTEARRAY - + lgt = len(str.chars) b = malloc(BYTEARRAY, lgt) for i in range(lgt): @@ -974,7 +974,7 @@ argsiter = iter(sourcevarsrepr) - InstanceRepr = hop.rtyper.type_system.rclass.InstanceRepr + from rpython.rtyper.lltypesystem.rclass import InstanceRepr for i, thing in enumerate(things): if isinstance(thing, tuple): code = thing[0] @@ -1007,7 +1007,6 @@ else: raise TyperError("%%%s is not RPython" % (code,)) else: - from rpython.rtyper.lltypesystem.rstr import string_repr, unicode_repr if is_unicode: vchunk = inputconst(unicode_repr, thing) else: diff --git a/rpython/rtyper/lltypesystem/rtuple.py b/rpython/rtyper/lltypesystem/rtuple.py deleted file mode 100644 --- a/rpython/rtyper/lltypesystem/rtuple.py +++ /dev/null @@ -1,113 +0,0 @@ -from rpython.tool.pairtype import pairtype -from rpython.rtyper.rmodel import inputconst -from rpython.rtyper.rtuple import AbstractTupleRepr, AbstractTupleIteratorRepr -from rpython.rtyper.lltypesystem.lltype import \ - Ptr, GcStruct, Void, Signed, malloc, typeOf, nullptr -from rpython.rtyper.lltypesystem.rtupletype import TUPLE_TYPE -from rpython.rtyper.lltypesystem import rstr - -# ____________________________________________________________ -# -# Concrete implementation of RPython tuples: -# -# struct tuple { -# type0 item0; -# type1 item1; -# type2 item2; -# ... -# } - -class TupleRepr(AbstractTupleRepr): - rstr_ll = rstr.LLHelpers - - def __init__(self, rtyper, items_r): - AbstractTupleRepr.__init__(self, rtyper, items_r) - self.lowleveltype = TUPLE_TYPE(self.lltypes) - - def newtuple(cls, llops, r_tuple, items_v): - # items_v should have the lowleveltype of the internal reprs - assert len(r_tuple.items_r) == len(items_v) - for r_item, v_item in zip(r_tuple.items_r, items_v): - assert r_item.lowleveltype == v_item.concretetype - # - if len(r_tuple.items_r) == 0: - return inputconst(Void, ()) # a Void empty tuple - c1 = inputconst(Void, r_tuple.lowleveltype.TO) - cflags = inputconst(Void, {'flavor': 'gc'}) - v_result = llops.genop('malloc', [c1, cflags], - resulttype = r_tuple.lowleveltype) - for i in range(len(r_tuple.items_r)): - cname = inputconst(Void, r_tuple.fieldnames[i]) - llops.genop('setfield', [v_result, cname, items_v[i]]) - return v_result - newtuple = classmethod(newtuple) - - def instantiate(self): - if len(self.items_r) == 0: - return dum_empty_tuple # PBC placeholder for an empty tuple - else: - return malloc(self.lowleveltype.TO) - - def rtype_bltn_list(self, hop): - from rpython.rtyper.lltypesystem import rlist - nitems = len(self.items_r) - vtup = hop.inputarg(self, 0) - LIST = hop.r_result.lowleveltype.TO - cno = inputconst(Signed, nitems) - hop.exception_is_here() - vlist = hop.gendirectcall(LIST.ll_newlist, cno) - v_func = hop.inputconst(Void, rlist.dum_nocheck) - for index in range(nitems): - name = self.fieldnames[index] - ritem = self.items_r[index] - cname = hop.inputconst(Void, name) - vitem = hop.genop('getfield', [vtup, cname], resulttype = ritem) - vitem = hop.llops.convertvar(vitem, ritem, hop.r_result.item_repr) - cindex = inputconst(Signed, index) - hop.gendirectcall(rlist.ll_setitem_nonneg, v_func, vlist, cindex, vitem) - return vlist - - def getitem_internal(self, llops, v_tuple, index): - """Return the index'th item, in internal repr.""" - name = self.fieldnames[index] - llresult = self.lltypes[index] - cname = inputconst(Void, name) - return llops.genop('getfield', [v_tuple, cname], resulttype = llresult) - - -def rtype_newtuple(hop): - return TupleRepr._rtype_newtuple(hop) - -newtuple = TupleRepr.newtuple - -def dum_empty_tuple(): pass - - -# ____________________________________________________________ -# -# Iteration. - -class Length1TupleIteratorRepr(AbstractTupleIteratorRepr): - - def __init__(self, r_tuple): - self.r_tuple = r_tuple - self.lowleveltype = Ptr(GcStruct('tuple1iter', - ('tuple', r_tuple.lowleveltype))) - self.ll_tupleiter = ll_tupleiter - self.ll_tuplenext = ll_tuplenext - -TupleRepr.IteratorRepr = Length1TupleIteratorRepr - -def ll_tupleiter(ITERPTR, tuple): - iter = malloc(ITERPTR.TO) - iter.tuple = tuple - return iter - -def ll_tuplenext(iter): - # for iterating over length 1 tuples only! - t = iter.tuple - if t: - iter.tuple = nullptr(typeOf(t).TO) - return t.item0 - else: - raise StopIteration diff --git a/rpython/rtyper/lltypesystem/rtupletype.py b/rpython/rtyper/lltypesystem/rtupletype.py deleted file mode 100644 --- a/rpython/rtyper/lltypesystem/rtupletype.py +++ /dev/null @@ -1,15 +0,0 @@ -# Helper to build the lowleveltype corresponding to an RPython tuple. -# This is not in rtuple.py so that it can be imported without bringing -# the whole rtyper in. - -from rpython.rtyper.lltypesystem.lltype import Void, Ptr, GcStruct - - -def TUPLE_TYPE(field_lltypes): - if len(field_lltypes) == 0: - return Void # empty tuple - else: - fields = [('item%d' % i, TYPE) for i, TYPE in enumerate(field_lltypes)] - kwds = {'hints': {'immutable': True, - 'noidentity': True}} - return Ptr(GcStruct('tuple%d' % len(field_lltypes), *fields, **kwds)) diff --git a/rpython/rtyper/module/ll_os_stat.py b/rpython/rtyper/module/ll_os_stat.py --- a/rpython/rtyper/module/ll_os_stat.py +++ b/rpython/rtyper/module/ll_os_stat.py @@ -13,7 +13,7 @@ from rpython.rtyper.annlowlevel import hlstr from rpython.rtyper.extfunc import extdef from rpython.rtyper.lltypesystem import rffi, lltype -from rpython.rtyper.lltypesystem.rtupletype import TUPLE_TYPE +from rpython.rtyper.rtuple import TUPLE_TYPE from rpython.rtyper.tool import rffi_platform as platform from rpython.tool.pairtype import pairtype from rpython.tool.sourcetools import func_renamer diff --git a/rpython/rtyper/module/test/test_ll_os.py b/rpython/rtyper/module/test/test_ll_os.py --- a/rpython/rtyper/module/test/test_ll_os.py +++ b/rpython/rtyper/module/test/test_ll_os.py @@ -50,7 +50,7 @@ if not hasattr(os, 'statvfs'): py.test.skip('posix specific function') try: - expected = os.statvfs('.') + os.statvfs('.') except OSError, e: py.test.skip("the underlying os.statvfs() failed: %s" % e) getllimpl(os.statvfs)('.') @@ -59,7 +59,7 @@ if not hasattr(os, 'fstatvfs'): py.test.skip('posix specific function') try: - expected = os.fstatvfs(0) + os.fstatvfs(0) except OSError, e: py.test.skip("the underlying os.fstatvfs() failed: %s" % e) getllimpl(os.fstatvfs)(0) @@ -87,7 +87,7 @@ assert data == posix._getfullpathname(stuff) # the most intriguing failure of ntpath.py should not repeat, here: assert not data.endswith(stuff) - + def test_getcwd(): data = getllimpl(os.getcwd)() assert data == os.getcwd() @@ -104,8 +104,8 @@ # the ctypes call seems not to work in the Wing debugger return assert str(buf.value).lower() == pwd.lower() - # ctypes returns the drive letter in uppercase, - # os.getcwd does not, + # ctypes returns the drive letter in uppercase, + # os.getcwd does not, # but there may be uppercase in os.getcwd path pwd = os.getcwd() @@ -298,11 +298,10 @@ def setup_class(cls): if not hasattr(os, 'ttyname'): py.test.skip("no ttyname") - + def test_ttyname(self): def f(): import os - import py from rpython.rtyper.test.test_llinterp import interpret def ll_to_string(s): diff --git a/rpython/rtyper/module/test/test_ll_os_path.py b/rpython/rtyper/module/test/test_ll_os_path.py --- a/rpython/rtyper/module/test/test_ll_os_path.py +++ b/rpython/rtyper/module/test/test_ll_os_path.py @@ -3,10 +3,10 @@ import sys, os from rpython.rtyper.lltypesystem.module.ll_os_path import Implementation as impl -from rpython.rtyper.module.support import ll_strcpy from rpython.rtyper.test.test_llinterp import interpret from rpython.tool.udir import udir + def test_exists(): filename = impl.to_rstr(str(py.path.local(__file__))) assert impl.ll_os_path_exists(filename) == True diff --git a/rpython/rtyper/rbuiltin.py b/rpython/rtyper/rbuiltin.py --- a/rpython/rtyper/rbuiltin.py +++ b/rpython/rtyper/rbuiltin.py @@ -3,7 +3,8 @@ from rpython.rlib import rarithmetic, objectmodel from rpython.rtyper import raddress, rptr, extregistry, rrange from rpython.rtyper.error import TyperError -from rpython.rtyper.lltypesystem import lltype, llmemory +from rpython.rtyper.lltypesystem import lltype, llmemory, rclass +from rpython.rtyper.lltypesystem.rdict import rtype_r_dict from rpython.rtyper.rmodel import Repr from rpython.tool.pairtype import pairtype @@ -52,14 +53,14 @@ raise TyperError("**kwds call not implemented") if arguments.w_stararg is not None: # expand the *arg in-place -- it must be a tuple - from rpython.rtyper.rtuple import AbstractTupleRepr + from rpython.rtyper.rtuple import TupleRepr if arguments.w_stararg != hop.nb_args - 3: raise TyperError("call pattern too complex") hop.nb_args -= 1 v_tuple = hop.args_v.pop() s_tuple = hop.args_s.pop() r_tuple = hop.args_r.pop() - if not isinstance(r_tuple, AbstractTupleRepr): + if not isinstance(r_tuple, TupleRepr): raise TyperError("*arg must be a tuple") for i in range(len(r_tuple.items_r)): v_item = r_tuple.getitem_internal(hop.llops, v_tuple, i) @@ -92,10 +93,6 @@ return BUILTIN_TYPER[self.builtinfunc] except (KeyError, TypeError): pass - try: - return rtyper.type_system.rbuiltin.BUILTIN_TYPER[self.builtinfunc] - except (KeyError, TypeError): - pass if extregistry.is_registered(self.builtinfunc): entry = extregistry.lookup(self.builtinfunc) return entry.specialize_call @@ -691,3 +688,86 @@ BUILTIN_TYPER[llmemory.cast_adr_to_ptr] = rtype_cast_adr_to_ptr BUILTIN_TYPER[llmemory.cast_adr_to_int] = rtype_cast_adr_to_int BUILTIN_TYPER[llmemory.cast_int_to_adr] = rtype_cast_int_to_adr + +def rtype_builtin_isinstance(hop): + hop.exception_cannot_occur() + if hop.s_result.is_constant(): + return hop.inputconst(lltype.Bool, hop.s_result.const) + + if hop.args_s[1].is_constant() and hop.args_s[1].const == list: + if hop.args_s[0].knowntype != list: + raise TyperError("isinstance(x, list) expects x to be known statically to be a list or None") + rlist = hop.args_r[0] + vlist = hop.inputarg(rlist, arg=0) + cnone = hop.inputconst(rlist, None) + return hop.genop('ptr_ne', [vlist, cnone], resulttype=lltype.Bool) + + assert isinstance(hop.args_r[0], rclass.InstanceRepr) + return hop.args_r[0].rtype_isinstance(hop) + +def ll_instantiate(typeptr): # NB. used by rpbc.ClassesPBCRepr as well + my_instantiate = typeptr.instantiate + return my_instantiate() + +def rtype_instantiate(hop): + hop.exception_cannot_occur() + s_class = hop.args_s[0] + assert isinstance(s_class, annmodel.SomePBC) + if len(s_class.descriptions) != 1: + # instantiate() on a variable class + vtypeptr, = hop.inputargs(rclass.get_type_repr(hop.rtyper)) + v_inst = hop.gendirectcall(ll_instantiate, vtypeptr) + return hop.genop('cast_pointer', [v_inst], # v_type implicit in r_result + resulttype = hop.r_result.lowleveltype) + + classdef = s_class.any_description().getuniqueclassdef() + return rclass.rtype_new_instance(hop.rtyper, classdef, hop.llops) + +def rtype_builtin_hasattr(hop): + hop.exception_cannot_occur() + if hop.s_result.is_constant(): + return hop.inputconst(lltype.Bool, hop.s_result.const) + + raise TyperError("hasattr is only suported on a constant") + +BUILTIN_TYPER[objectmodel.instantiate] = rtype_instantiate +BUILTIN_TYPER[isinstance] = rtype_builtin_isinstance +BUILTIN_TYPER[hasattr] = rtype_builtin_hasattr +BUILTIN_TYPER[objectmodel.r_dict] = rtype_r_dict + +# _________________________________________________________________ +# weakrefs + +import weakref +from rpython.rtyper.lltypesystem import llmemory + +def rtype_weakref_create(hop): + # Note: this code also works for the RPython-level calls 'weakref.ref(x)'. + vlist = hop.inputargs(hop.args_r[0]) + hop.exception_cannot_occur() + return hop.genop('weakref_create', vlist, resulttype=llmemory.WeakRefPtr) + +def rtype_weakref_deref(hop): + c_ptrtype, v_wref = hop.inputargs(lltype.Void, hop.args_r[1]) + assert v_wref.concretetype == llmemory.WeakRefPtr + hop.exception_cannot_occur() + return hop.genop('weakref_deref', [v_wref], resulttype=c_ptrtype.value) + +def rtype_cast_ptr_to_weakrefptr(hop): + vlist = hop.inputargs(hop.args_r[0]) + hop.exception_cannot_occur() + return hop.genop('cast_ptr_to_weakrefptr', vlist, + resulttype=llmemory.WeakRefPtr) + +def rtype_cast_weakrefptr_to_ptr(hop): + c_ptrtype, v_wref = hop.inputargs(lltype.Void, hop.args_r[1]) + assert v_wref.concretetype == llmemory.WeakRefPtr + hop.exception_cannot_occur() + return hop.genop('cast_weakrefptr_to_ptr', [v_wref], + resulttype=c_ptrtype.value) + +BUILTIN_TYPER[weakref.ref] = rtype_weakref_create +BUILTIN_TYPER[llmemory.weakref_create] = rtype_weakref_create +BUILTIN_TYPER[llmemory.weakref_deref] = rtype_weakref_deref +BUILTIN_TYPER[llmemory.cast_ptr_to_weakrefptr] = rtype_cast_ptr_to_weakrefptr +BUILTIN_TYPER[llmemory.cast_weakrefptr_to_ptr] = rtype_cast_weakrefptr_to_ptr diff --git a/rpython/rtyper/rbytearray.py b/rpython/rtyper/rbytearray.py --- a/rpython/rtyper/rbytearray.py +++ b/rpython/rtyper/rbytearray.py @@ -57,4 +57,5 @@ return self.__class__, def rtyper_makerepr(self, rtyper): - return rtyper.type_system.rbytearray.bytearray_repr + from rpython.rtyper.lltypesystem.rbytearray import bytearray_repr + return bytearray_repr diff --git a/rpython/rtyper/rclass.py b/rpython/rtyper/rclass.py --- a/rpython/rtyper/rclass.py +++ b/rpython/rtyper/rclass.py @@ -1,9 +1,11 @@ import types +from rpython.flowspace.model import Constant from rpython.annotator import description, model as annmodel from rpython.rtyper.error import TyperError from rpython.rtyper.lltypesystem.lltype import Void from rpython.rtyper.rmodel import Repr, getgcflavor, inputconst +from rpython.rlib.objectmodel import UnboxedValue class FieldListAccessor(object): @@ -52,7 +54,8 @@ try: result = rtyper.class_reprs[classdef] except KeyError: - result = rtyper.type_system.rclass.ClassRepr(rtyper, classdef) + from rpython.rtyper.lltypesystem.rclass import ClassRepr + result = ClassRepr(rtyper, classdef) rtyper.class_reprs[classdef] = result rtyper.add_pendingsetup(result) return result @@ -73,8 +76,7 @@ def buildinstancerepr(rtyper, classdef, gcflavor='gc'): - from rpython.rlib.objectmodel import UnboxedValue - from rpython.flowspace.model import Constant + from rpython.rtyper.rvirtualizable2 import VirtualizableInstanceRepr if classdef is None: unboxed = [] @@ -91,8 +93,8 @@ if virtualizable: assert len(unboxed) == 0 assert gcflavor == 'gc' - return rtyper.type_system.rvirtualizable.VirtualizableInstanceRepr(rtyper, classdef) - elif usetagging and rtyper.type_system.name == 'lltypesystem': + return VirtualizableInstanceRepr(rtyper, classdef) + elif usetagging: # the UnboxedValue class and its parent classes need a # special repr for their instances if len(unboxed) != 1: @@ -102,7 +104,8 @@ from rpython.rtyper.lltypesystem import rtagged return rtagged.TaggedInstanceRepr(rtyper, classdef, unboxed[0]) else: - return rtyper.type_system.rclass.InstanceRepr(rtyper, classdef, gcflavor) + from rpython.rtyper.lltypesystem.rclass import InstanceRepr + return InstanceRepr(rtyper, classdef, gcflavor) class MissingRTypeAttribute(TyperError): diff --git a/rpython/rtyper/rdict.py b/rpython/rtyper/rdict.py --- a/rpython/rtyper/rdict.py +++ b/rpython/rtyper/rdict.py @@ -5,23 +5,20 @@ class __extend__(annmodel.SomeDict): def rtyper_makerepr(self, rtyper): - dictkey = self.dictdef.dictkey + from rpython.rtyper.lltypesystem.rdict import DictRepr + dictkey = self.dictdef.dictkey dictvalue = self.dictdef.dictvalue - s_key = dictkey .s_value - s_value = dictvalue.s_value + s_key = dictkey.s_value + s_value = dictvalue.s_value force_non_null = self.dictdef.force_non_null if dictkey.custom_eq_hash: custom_eq_hash = lambda: (rtyper.getrepr(dictkey.s_rdict_eqfn), rtyper.getrepr(dictkey.s_rdict_hashfn)) else: custom_eq_hash = None - return rtyper.type_system.rdict.DictRepr(rtyper, - lambda: rtyper.getrepr(s_key), - lambda: rtyper.getrepr(s_value), - dictkey, - dictvalue, - custom_eq_hash, - force_non_null) + return DictRepr(rtyper, lambda: rtyper.getrepr(s_key), + lambda: rtyper.getrepr(s_value), dictkey, dictvalue, + custom_eq_hash, force_non_null) def rtyper_makekey(self): self.dictdef.dictkey .dont_change_any_more = True @@ -29,7 +26,6 @@ return (self.__class__, self.dictdef.dictkey, self.dictdef.dictvalue) - class AbstractDictRepr(rmodel.Repr): def pickrepr(self, item_repr): @@ -41,7 +37,8 @@ pickkeyrepr = pickrepr def compact_repr(self): - return 'DictR %s %s' % (self.key_repr.compact_repr(), self.value_repr.compact_repr()) + return 'DictR %s %s' % (self.key_repr.compact_repr(), + self.value_repr.compact_repr()) def recast_value(self, llops, v): return llops.convertvar(v, self.value_repr, self.external_value_repr) @@ -51,10 +48,11 @@ def rtype_newdict(hop): + from rpython.rtyper.lltypesystem.rdict import ll_newdict hop.inputargs() # no arguments expected r_dict = hop.r_result cDICT = hop.inputconst(lltype.Void, r_dict.DICT) - v_result = hop.gendirectcall(hop.rtyper.type_system.rdict.ll_newdict, cDICT) + v_result = hop.gendirectcall(ll_newdict, cDICT) return v_result diff --git a/rpython/rtyper/rfloat.py b/rpython/rtyper/rfloat.py --- a/rpython/rtyper/rfloat.py +++ b/rpython/rtyper/rfloat.py @@ -1,6 +1,9 @@ from rpython.annotator import model as annmodel from rpython.rlib.objectmodel import _hash_float from rpython.rlib.rarithmetic import base_int +from rpython.rlib.rfloat import formatd +from rpython.rlib import jit +from rpython.rtyper.annlowlevel import llstr from rpython.rtyper.error import TyperError from rpython.rtyper.lltypesystem.lltype import (Signed, Unsigned, SignedLongLong, UnsignedLongLong, Bool, Float) @@ -74,8 +77,8 @@ class __extend__(pairtype(AbstractStringRepr, FloatRepr)): def rtype_mod(_, hop): - rstr = hop.rtyper.type_system.rstr - return rstr.do_stringformat(hop, [(hop.args_v[1], hop.args_r[1])]) + from rpython.rtyper.lltypesystem.rstr import do_stringformat + return do_stringformat(hop, [(hop.args_v[1], hop.args_r[1])]) #Helpers FloatRepr,FloatRepr @@ -87,7 +90,6 @@ vlist = hop.inputargs(Float, Float) return hop.genop('float_'+func, vlist, resulttype=Bool) -# class __extend__(FloatRepr): @@ -134,11 +136,9 @@ hop.exception_cannot_occur() return vlist[0] - # version picked by specialisation based on which - # type system rtyping is using, from .ll_str module + @jit.elidable def ll_str(self, f): - pass - ll_str._annspecialcase_ = "specialize:ts('ll_str.ll_float_str')" + return llstr(formatd(f, 'f', 6)) # # _________________________ Conversions _________________________ diff --git a/rpython/rtyper/rint.py b/rpython/rtyper/rint.py --- a/rpython/rtyper/rint.py +++ b/rpython/rtyper/rint.py @@ -2,7 +2,7 @@ from rpython.annotator import model as annmodel from rpython.flowspace.operation import op_appendices -from rpython.rlib import objectmodel +from rpython.rlib import objectmodel, jit from rpython.rlib.rarithmetic import intmask, r_int, r_longlong from rpython.rtyper.error import TyperError from rpython.rtyper.lltypesystem.lltype import (Signed, Unsigned, Bool, Float, @@ -365,25 +365,24 @@ hop.exception_cannot_occur() return vlist[0] - # version picked by specialisation based on which - # type system rtyping is using, from .ll_str module + @jit.elidable def ll_str(self, i): - raise NotImplementedError - ll_str._annspecialcase_ = "specialize:ts('ll_str.ll_int_str')" + from rpython.rtyper.lltypesystem.ll_str import ll_int2dec + return ll_int2dec(i) def rtype_hex(self, hop): + from rpython.rtyper.lltypesystem.ll_str import ll_int2hex self = self.as_int varg = hop.inputarg(self, 0) true = inputconst(Bool, True) - fn = hop.rtyper.type_system.ll_str.ll_int2hex - return hop.gendirectcall(fn, varg, true) + return hop.gendirectcall(ll_int2hex, varg, true) def rtype_oct(self, hop): + from rpython.rtyper.lltypesystem.ll_str import ll_int2oct self = self.as_int varg = hop.inputarg(self, 0) true = inputconst(Bool, True) - fn = hop.rtyper.type_system.ll_str.ll_int2oct - return hop.gendirectcall(fn, varg, true) + return hop.gendirectcall(ll_int2oct, varg, true) def ll_hash_int(n): return intmask(n) diff --git a/rpython/rtyper/rlist.py b/rpython/rtyper/rlist.py --- a/rpython/rtyper/rlist.py +++ b/rpython/rtyper/rlist.py @@ -41,17 +41,18 @@ listitem = self.listdef.listitem s_value = listitem.s_value if (listitem.range_step is not None and not listitem.mutated and - not isinstance(s_value, annmodel.SomeImpossibleValue)): - return rtyper.type_system.rrange.RangeRepr(listitem.range_step) + not isinstance(s_value, annmodel.SomeImpossibleValue)): + from rpython.rtyper.lltypesystem.rrange import RangeRepr + return RangeRepr(listitem.range_step) else: # cannot do the rtyper.getrepr() call immediately, for the case # of recursive structures -- i.e. if the listdef contains itself - rlist = rtyper.type_system.rlist + from rpython.rtyper.lltypesystem.rlist import ListRepr, FixedSizeListRepr item_repr = lambda: rtyper.getrepr(listitem.s_value) if self.listdef.listitem.resized: - return rlist.ListRepr(rtyper, item_repr, listitem) + return ListRepr(rtyper, item_repr, listitem) else: - return rlist.FixedSizeListRepr(rtyper, item_repr, listitem) + return FixedSizeListRepr(rtyper, item_repr, listitem) def rtyper_makekey(self): self.listdef.listitem.dont_change_any_more = True @@ -334,12 +335,12 @@ def rtype_newlist(hop, v_sizehint=None): + from rpython.rtyper.lltypesystem.rlist import newlist nb_args = hop.nb_args r_list = hop.r_result r_listitem = r_list.item_repr items_v = [hop.inputarg(r_listitem, arg=i) for i in range(nb_args)] - return hop.rtyper.type_system.rlist.newlist(hop.llops, r_list, items_v, - v_sizehint=v_sizehint) + return newlist(hop.llops, r_list, items_v, v_sizehint=v_sizehint) def rtype_alloc_and_set(hop): r_list = hop.r_result @@ -377,10 +378,10 @@ return v_lst1 def rtype_extend_with_str_slice((r_lst1, r_str2), hop): + from rpython.rtyper.lltypesystem.rstr import string_repr if r_lst1.item_repr.lowleveltype not in (Char, UniChar): raise TyperError('"lst += string" only supported with a list ' 'of chars or unichars') - string_repr = r_lst1.rtyper.type_system.rstr.string_repr v_lst1 = hop.inputarg(r_lst1, arg=0) v_str2 = hop.inputarg(string_repr, arg=3) kind, vlist = hop.decompose_slice_args() @@ -393,10 +394,10 @@ class __extend__(pairtype(AbstractListRepr, AbstractCharRepr)): def rtype_extend_with_char_count((r_lst1, r_chr2), hop): + from rpython.rtyper.lltypesystem.rstr import char_repr if r_lst1.item_repr.lowleveltype not in (Char, UniChar): raise TyperError('"lst += string" only supported with a list ' 'of chars or unichars') - char_repr = r_lst1.rtyper.type_system.rstr.char_repr v_lst1, v_chr, v_count = hop.inputargs(r_lst1, char_repr, Signed) hop.gendirectcall(ll_extend_with_char_count, v_lst1, v_chr, v_count) return v_lst1 diff --git a/rpython/rtyper/rpbc.py b/rpython/rtyper/rpbc.py --- a/rpython/rtyper/rpbc.py +++ b/rpython/rtyper/rpbc.py @@ -12,8 +12,7 @@ def small_cand(rtyper, s_pbc): - if 1 < len(s_pbc.descriptions) < rtyper.getconfig().translation.withsmallfuncsets and \ - hasattr(rtyper.type_system.rpbc, 'SmallFunctionSetPBCRepr'): + if 1 < len(s_pbc.descriptions) < rtyper.getconfig().translation.withsmallfuncsets: callfamily = s_pbc.any_description().getcallfamily() concretetable, uniquerows = get_concrete_calltable(rtyper, callfamily) if len(uniquerows) == 1 and (not s_pbc.subset_of or small_cand(rtyper, s_pbc.subset_of)): @@ -22,6 +21,9 @@ class __extend__(annmodel.SomePBC): def rtyper_makerepr(self, rtyper): + from rpython.rtyper.lltypesystem.rpbc import (FunctionsPBCRepr, + SmallFunctionSetPBCRepr, ClassesPBCRepr, MethodsPBCRepr, + MethodOfFrozenPBCRepr) if self.isNone(): return none_frozen_pbc_repr kind = self.getKind() @@ -32,20 +34,20 @@ if sample.overridden: getRepr = OverriddenFunctionPBCRepr else: - getRepr = rtyper.type_system.rpbc.FunctionsPBCRepr + getRepr = FunctionsPBCRepr if small_cand(rtyper, self): - getRepr = rtyper.type_system.rpbc.SmallFunctionSetPBCRepr + getRepr = SmallFunctionSetPBCRepr else: getRepr = getFrozenPBCRepr elif issubclass(kind, description.ClassDesc): # user classes - getRepr = rtyper.type_system.rpbc.ClassesPBCRepr + getRepr = ClassesPBCRepr elif issubclass(kind, description.MethodDesc): - getRepr = rtyper.type_system.rpbc.MethodsPBCRepr + getRepr = MethodsPBCRepr elif issubclass(kind, description.FrozenDesc): getRepr = getFrozenPBCRepr elif issubclass(kind, description.MethodOfFrozenDesc): - getRepr = rtyper.type_system.rpbc.MethodOfFrozenPBCRepr + getRepr = MethodOfFrozenPBCRepr else: raise TyperError("unexpected PBC kind %r" % (kind,)) @@ -350,6 +352,8 @@ return rtype_call_specialcase(hop) def getFrozenPBCRepr(rtyper, s_pbc): + from rpython.rtyper.lltypesystem.rpbc import ( + MultipleUnrelatedFrozenPBCRepr, MultipleFrozenPBCRepr) descs = list(s_pbc.descriptions) assert len(descs) >= 1 if len(descs) == 1 and not s_pbc.can_be_None: @@ -362,15 +366,13 @@ try: return rtyper.pbc_reprs['unrelated'] except KeyError: - rpbc = rtyper.type_system.rpbc - result = rpbc.MultipleUnrelatedFrozenPBCRepr(rtyper) + result = MultipleUnrelatedFrozenPBCRepr(rtyper) rtyper.pbc_reprs['unrelated'] = result return result try: return rtyper.pbc_reprs[access] except KeyError: - result = rtyper.type_system.rpbc.MultipleFrozenPBCRepr(rtyper, - access) + result = MultipleFrozenPBCRepr(rtyper, access) rtyper.pbc_reprs[access] = result rtyper.add_pendingsetup(result) return result @@ -612,9 +614,10 @@ return inputconst(Void, None) def rtype_is_((robj1, rnone2), hop): + from rpython.rtyper.lltypesystem.rpbc import rtype_is_None if hop.s_result.is_constant(): return hop.inputconst(Bool, hop.s_result.const) - return hop.rtyper.type_system.rpbc.rtype_is_None(robj1, rnone2, hop) + return rtype_is_None(robj1, rnone2, hop) class __extend__(pairtype(NoneFrozenPBCRepr, Repr)): @@ -622,10 +625,10 @@ return inputconst(r_to, None) def rtype_is_((rnone1, robj2), hop): + from rpython.rtyper.lltypesystem.rpbc import rtype_is_None if hop.s_result.is_constant(): return hop.inputconst(Bool, hop.s_result.const) - return hop.rtyper.type_system.rpbc.rtype_is_None( - robj2, rnone1, hop, pos=1) + return rtype_is_None(robj2, rnone1, hop, pos=1) # ____________________________________________________________ diff --git a/rpython/rtyper/rstr.py b/rpython/rtyper/rstr.py --- a/rpython/rtyper/rstr.py +++ b/rpython/rtyper/rstr.py @@ -4,7 +4,6 @@ from rpython.rtyper.error import TyperError from rpython.rtyper.lltypesystem.lltype import Signed, Bool, Void, UniChar from rpython.rtyper.rmodel import IntegerRepr, IteratorRepr, inputconst, Repr -from rpython.rtyper.rtuple import AbstractTupleRepr from rpython.tool.pairtype import pairtype, pair from rpython.tool.sourcetools import func_with_new_name from rpython.tool.staticmethods import StaticMethods @@ -88,26 +87,33 @@ class __extend__(annmodel.SomeString): def rtyper_makerepr(self, rtyper): - return rtyper.type_system.rstr.string_repr + from rpython.rtyper.lltypesystem.rstr import string_repr + return string_repr + def rtyper_makekey(self): return self.__class__, class __extend__(annmodel.SomeUnicodeString): def rtyper_makerepr(self, rtyper): - return rtyper.type_system.rstr.unicode_repr + from rpython.rtyper.lltypesystem.rstr import unicode_repr + return unicode_repr def rtyper_makekey(self): return self.__class__, class __extend__(annmodel.SomeChar): def rtyper_makerepr(self, rtyper): - return rtyper.type_system.rstr.char_repr + from rpython.rtyper.lltypesystem.rstr import char_repr + return char_repr + def rtyper_makekey(self): return self.__class__, class __extend__(annmodel.SomeUnicodeCodePoint): def rtyper_makerepr(self, rtyper): - return rtyper.type_system.rstr.unichar_repr + from rpython.rtyper.lltypesystem.rstr import unichar_repr + return unichar_repr + def rtyper_makekey(self): return self.__class__, @@ -271,12 +277,14 @@ raise NotImplementedError def rtype_method_join(self, hop): + from rpython.rtyper.lltypesystem.rlist import BaseListRepr + from rpython.rtyper.lltypesystem.rstr import char_repr, unichar_repr hop.exception_cannot_occur() rstr = hop.args_r[0] if hop.s_result.is_constant(): return inputconst(rstr.repr, hop.s_result.const) r_lst = hop.args_r[1] - if not isinstance(r_lst, hop.rtyper.type_system.rlist.BaseListRepr): + if not isinstance(r_lst, BaseListRepr): raise TyperError("string.join of non-list: %r" % r_lst) v_str, v_lst = hop.inputargs(rstr.repr, r_lst) v_length, v_items = self._list_length_items(hop, v_lst, r_lst.lowleveltype) @@ -284,8 +292,8 @@ if hop.args_s[0].is_constant() and hop.args_s[0].const == '': if r_lst.item_repr == rstr.repr: llfn = self.ll.ll_join_strs - elif (r_lst.item_repr == hop.rtyper.type_system.rstr.char_repr or - r_lst.item_repr == hop.rtyper.type_system.rstr.unichar_repr): + elif (r_lst.item_repr == char_repr or + r_lst.item_repr == unichar_repr): v_tp = hop.inputconst(Void, self.lowleveltype) return hop.gendirectcall(self.ll.ll_join_chars, v_length, v_items, v_tp) @@ -555,18 +563,6 @@ hop.exception_cannot_occur() return hop.gendirectcall(r_str.ll.ll_contains, v_str, v_chr) -class __extend__(pairtype(AbstractStringRepr, AbstractTupleRepr)): - def rtype_mod((r_str, r_tuple), hop): - r_tuple = hop.args_r[1] - v_tuple = hop.args_v[1] - - sourcevars = [] - for i, r_arg in enumerate(r_tuple.external_items_r): - v_item = r_tuple.getitem(hop.llops, v_tuple, i) - sourcevars.append((v_item, r_arg)) - - return r_str.ll.do_stringformat(hop, sourcevars) - class __extend__(AbstractCharRepr): def ll_str(self, ch): @@ -655,8 +651,8 @@ #Helper functions for comparisons def _rtype_compare_template(hop, func): - rstr = hop.rtyper.type_system.rstr - vlist = hop.inputargs(rstr.char_repr, rstr.char_repr) + from rpython.rtyper.lltypesystem.rstr import char_repr + vlist = hop.inputargs(char_repr, char_repr) return hop.genop('char_' + func, vlist, resulttype=Bool) class __extend__(AbstractUniCharRepr): @@ -677,8 +673,8 @@ get_ll_fasthash_function = get_ll_hash_function def rtype_ord(_, hop): - rstr = hop.rtyper.type_system.rstr - vlist = hop.inputargs(rstr.unichar_repr) + from rpython.rtyper.lltypesystem.rstr import unichar_repr + vlist = hop.inputargs(unichar_repr) return hop.genop('cast_unichar_to_int', vlist, resulttype=Signed) From noreply at buildbot.pypy.org Tue Aug 6 10:36:47 2013 From: noreply at buildbot.pypy.org (fijal) Date: Tue, 6 Aug 2013 10:36:47 +0200 (CEST) Subject: [pypy-commit] pypy kill-gen-store-back-in: update the comment Message-ID: <20130806083647.6BF071C346F@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: kill-gen-store-back-in Changeset: r65950:4a5f91e93b7d Date: 2013-08-06 10:36 +0200 http://bitbucket.org/pypy/pypy/changeset/4a5f91e93b7d/ Log: update the comment diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py --- a/pypy/interpreter/pyframe.py +++ b/pypy/interpreter/pyframe.py @@ -123,8 +123,8 @@ # CO_OPTIMIZED: no locals dict needed at all # NB: this method is overridden in nestedscope.py flags = code.co_flags - if flags & pycode.CO_OPTIMIZED: - return + if flags & pycode.CO_OPTIMIZED: + return if flags & pycode.CO_NEWLOCALS: self.w_locals = self.space.newdict(module=True) else: @@ -176,8 +176,10 @@ executioncontext.return_trace(self, self.space.w_None) raise executioncontext.return_trace(self, w_exitvalue) - # clean up the exception, might be useful for not - # allocating exception objects in some cases + # it used to say self.last_exception = None + # this is now done by the code in pypyjit module + # since we don't want to invalidate the virtualizable + # for no good reason got_exception = False finally: executioncontext.leave(self, w_exitvalue, got_exception) @@ -259,7 +261,7 @@ break w_value = self.peekvalue(delta) self.pushvalue(w_value) - + def peekvalue(self, index_from_top=0): # NOTE: top of the stack is peekvalue(0). # Contrast this with CPython where it's PEEK(-1). @@ -329,7 +331,7 @@ nlocals = self.pycode.co_nlocals values_w = self.locals_stack_w[nlocals:self.valuestackdepth] w_valuestack = maker.slp_into_tuple_with_nulls(space, values_w) - + w_blockstack = nt([block._get_state_(space) for block in self.get_blocklist()]) w_fastlocals = maker.slp_into_tuple_with_nulls( space, self.locals_stack_w[:nlocals]) @@ -339,7 +341,7 @@ else: w_exc_value = self.last_exception.get_w_value(space) w_tb = w(self.last_exception.get_traceback()) - + tup_state = [ w(self.f_backref()), w(self.get_builtin()), @@ -354,7 +356,7 @@ w(f_lineno), w_fastlocals, space.w_None, #XXX placeholder for f_locals - + #f_restricted requires no additional data! space.w_None, ## self.w_f_trace, ignore for now @@ -388,7 +390,7 @@ ncellvars = len(pycode.co_cellvars) cellvars = cells[:ncellvars] closure = cells[ncellvars:] - + # do not use the instance's __init__ but the base's, because we set # everything like cells from here # XXX hack @@ -475,7 +477,7 @@ ### line numbers ### - def fget_f_lineno(self, space): + def fget_f_lineno(self, space): "Returns the line number of the instruction currently being executed." if self.w_f_trace is None: return space.wrap(self.get_last_lineno()) @@ -489,7 +491,7 @@ except OperationError, e: raise OperationError(space.w_ValueError, space.wrap("lineno must be an integer")) - + if self.w_f_trace is None: raise OperationError(space.w_ValueError, space.wrap("f_lineno can only be set by a trace function.")) @@ -521,7 +523,7 @@ if ord(code[new_lasti]) in (DUP_TOP, POP_TOP): raise OperationError(space.w_ValueError, space.wrap("can't jump to 'except' line as there's no exception")) - + # Don't jump into or out of a finally block. f_lasti_setup_addr = -1 new_lasti_setup_addr = -1 @@ -552,12 +554,12 @@ if addr == self.last_instr: f_lasti_setup_addr = setup_addr break - + if op >= HAVE_ARGUMENT: addr += 3 else: addr += 1 - + assert len(blockstack) == 0 if new_lasti_setup_addr != f_lasti_setup_addr: @@ -608,10 +610,10 @@ block = self.pop_block() block.cleanup(self) f_iblock -= 1 - + self.f_lineno = new_lineno self.last_instr = new_lasti - + def get_last_lineno(self): "Returns the line number of the instruction currently being executed." return pytraceback.offset2lineno(self.pycode, self.last_instr) @@ -637,8 +639,8 @@ self.f_lineno = self.get_last_lineno() space.frame_trace_action.fire() - def fdel_f_trace(self, space): - self.w_f_trace = None + def fdel_f_trace(self, space): + self.w_f_trace = None def fget_f_exc_type(self, space): if self.last_exception is not None: @@ -648,7 +650,7 @@ if f is not None: return f.last_exception.w_type return space.w_None - + def fget_f_exc_value(self, space): if self.last_exception is not None: f = self.f_backref() @@ -666,7 +668,7 @@ if f is not None: return space.wrap(f.last_exception.get_traceback()) return space.w_None - + def fget_f_restricted(self, space): if space.config.objspace.honor__builtins__: return space.wrap(self.builtin is not space.builtin) From noreply at buildbot.pypy.org Tue Aug 6 10:38:20 2013 From: noreply at buildbot.pypy.org (fijal) Date: Tue, 6 Aug 2013 10:38:20 +0200 (CEST) Subject: [pypy-commit] pypy kill-gen-store-back-in: Debugging leftover Message-ID: <20130806083820.757B61C346F@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: kill-gen-store-back-in Changeset: r65951:c749bd50ed3d Date: 2013-08-06 10:37 +0200 http://bitbucket.org/pypy/pypy/changeset/c749bd50ed3d/ Log: Debugging leftover diff --git a/rpython/jit/codewriter/codewriter.py b/rpython/jit/codewriter/codewriter.py --- a/rpython/jit/codewriter/codewriter.py +++ b/rpython/jit/codewriter/codewriter.py @@ -13,7 +13,7 @@ class CodeWriter(object): callcontrol = None # for tests - debug = True + debug = False def __init__(self, cpu=None, jitdrivers_sd=[]): self.cpu = cpu From noreply at buildbot.pypy.org Tue Aug 6 10:39:50 2013 From: noreply at buildbot.pypy.org (fijal) Date: Tue, 6 Aug 2013 10:39:50 +0200 (CEST) Subject: [pypy-commit] pypy kill-gen-store-back-in: Kill the leftover from the existance of the FORCE_VIRTUALIZABLE operation Message-ID: <20130806083950.2B45A1C00F4@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: kill-gen-store-back-in Changeset: r65952:2cb1b6cfb53f Date: 2013-08-06 10:39 +0200 http://bitbucket.org/pypy/pypy/changeset/2cb1b6cfb53f/ Log: Kill the leftover from the existance of the FORCE_VIRTUALIZABLE operation diff --git a/rpython/jit/metainterp/heapcache.py b/rpython/jit/metainterp/heapcache.py --- a/rpython/jit/metainterp/heapcache.py +++ b/rpython/jit/metainterp/heapcache.py @@ -143,7 +143,6 @@ self.heap_cache.clear() self.heap_array_cache.clear() - self.nonstandard_virtualizables.clear() def is_class_known(self, box): return box in self.known_class_boxes From noreply at buildbot.pypy.org Tue Aug 6 10:43:42 2013 From: noreply at buildbot.pypy.org (fijal) Date: Tue, 6 Aug 2013 10:43:42 +0200 (CEST) Subject: [pypy-commit] pypy kill-gen-store-back-in: Clean up. force_descr is no longer used. test_runner does not have to have Message-ID: <20130806084342.A720E1C00F4@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: kill-gen-store-back-in Changeset: r65953:ef0a7113ac43 Date: 2013-08-06 10:43 +0200 http://bitbucket.org/pypy/pypy/changeset/ef0a7113ac43/ Log: Clean up. force_descr is no longer used. test_runner does not have to have force_virtualizable test, since it's a normal cond_call these days diff --git a/rpython/jit/backend/llgraph/runner.py b/rpython/jit/backend/llgraph/runner.py --- a/rpython/jit/backend/llgraph/runner.py +++ b/rpython/jit/backend/llgraph/runner.py @@ -593,12 +593,11 @@ _TYPE = llmemory.GCREF def __init__(self, latest_descr, values, - last_exception=None, saved_data=None, force_descr=None): + last_exception=None, saved_data=None): self._latest_descr = latest_descr self._values = values self._last_exception = last_exception self._saved_data = saved_data - self.force_descr = force_descr class LLFrame(object): @@ -704,24 +703,15 @@ values = [value for value in values if value is not None] raise Jump(target, values) else: - if self.force_guard_op is not None: - force_descr = self.force_guard_op.getdescr() - else: - force_descr = None raise ExecutionFinished(LLDeadFrame(descr, values, self.last_exception, - saved_data, force_descr)) + saved_data)) def execute_force_spill(self, _, arg): pass def execute_finish(self, descr, *args): - if self.force_guard_op is not None: - force_descr = self.force_guard_op.getdescr() - else: - force_descr = None - raise ExecutionFinished(LLDeadFrame(descr, args, - force_descr=force_descr)) + raise ExecutionFinished(LLDeadFrame(descr, args)) def execute_label(self, descr, *args): argboxes = self.current_op.getarglist() diff --git a/rpython/jit/backend/test/runner_test.py b/rpython/jit/backend/test/runner_test.py --- a/rpython/jit/backend/test/runner_test.py +++ b/rpython/jit/backend/test/runner_test.py @@ -4210,37 +4210,6 @@ assert descr.identifier == 42 assert not self.cpu.grab_exc_value(frame) - def test_force_virtualizable(self): - - class FakeVinfo(object): - pass - - def clear_vable_token(token): - lltype.cast_opaque_ptr(lltype.Ptr(S), token).x = 18 - - FUNC = lltype.FuncType([llmemory.GCREF], lltype.Void) - clear_vable_ptr = llhelper(lltype.Ptr(FUNC), clear_vable_token) - S = lltype.GcStruct('x', ('x', lltype.Signed)) - - pdescr = self.cpu.fielddescrof(S, 'x') - pdescr.vinfo = FakeVinfo() - pdescr.vinfo.clear_vable_token = clear_vable_token - pdescr.vinfo.clear_vable_ptr = clear_vable_ptr - pdescr.vinfo.clear_vable_descr = self.cpu.calldescrof(FUNC, FUNC.ARGS, - FUNC.RESULT, EffectInfo.LEAST_GENERAL) - loop = parse(""" - [p0] - force_virtualizable(p0, descr=pdescr) - i1 = getfield_gc(p0, descr=pdescr) - finish(i1) - """, namespace={'pdescr': pdescr}) - looptoken = JitCellToken() - self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) - s = lltype.malloc(S) - s.x = 13 - frame = self.cpu.execute_token(looptoken, lltype.cast_opaque_ptr(llmemory.GCREF, s)) - assert self.cpu.get_int_value(frame, 0) == 18 - def test_setarrayitem_raw_short(self): # setarrayitem_raw(140737353744432, 0, 30583, descr=) A = rffi.CArray(rffi.SHORT) From noreply at buildbot.pypy.org Tue Aug 6 10:56:40 2013 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 6 Aug 2013 10:56:40 +0200 (CEST) Subject: [pypy-commit] pypy default: issue #1568 Message-ID: <20130806085640.06B7E1C013B@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r65954:867a673cccfb Date: 2013-08-06 10:56 +0200 http://bitbucket.org/pypy/pypy/changeset/867a673cccfb/ Log: issue #1568 Trying to change socket.py to explicitly crash if some other lib messes around with internal implementation details --- more specifically, if they build a _socketobject() or _fileobject() with a custom object. The goal is to require these libraries to provide the correct fix by adding _reuse() and _drop() methods on their custom objects. diff --git a/lib-python/2.7/socket.py b/lib-python/2.7/socket.py --- a/lib-python/2.7/socket.py +++ b/lib-python/2.7/socket.py @@ -179,12 +179,21 @@ def __init__(self, family=AF_INET, type=SOCK_STREAM, proto=0, _sock=None): if _sock is None: _sock = _realsocket(family, type, proto) - elif _type(_sock) is _realsocket: + else: + # PyPy note about refcounting: implemented with _reuse()/_drop() + # on the class '_socket.socket'. Python 3 did it differently + # with a reference counter on this class 'socket._socketobject' + # instead, but it is a less compatible change. + + # Note that a few libraries (like eventlet) poke at the + # private implementation of socket.py, passing custom + # objects to _socketobject(). These libraries need the + # following fix for use on PyPy: the custom objects need + # methods _reuse() and _drop() that maintains an explicit + # reference counter, starting at 0. When it drops back to + # zero, close() must be called. _sock._reuse() - # PyPy note about refcounting: implemented with _reuse()/_drop() - # on the class '_socket.socket'. Python 3 did it differently - # with a reference counter on this class 'socket._socketobject' - # instead, but it is a less compatible change (breaks eventlet). + self._sock = _sock def send(self, data, flags=0): @@ -216,9 +225,8 @@ def close(self): s = self._sock - if type(s) is _realsocket: - s._drop() self._sock = _closedsocket() + s._drop() close.__doc__ = _realsocket.close.__doc__ def accept(self): @@ -280,8 +288,14 @@ "_close"] def __init__(self, sock, mode='rb', bufsize=-1, close=False): - if type(sock) is _realsocket: - sock._reuse() + # Note that a few libraries (like eventlet) poke at the + # private implementation of socket.py, passing custom + # objects to _fileobject(). These libraries need the + # following fix for use on PyPy: the custom objects need + # methods _reuse() and _drop() that maintains an explicit + # reference counter, starting at 0. When it drops back to + # zero, close() must be called. + sock._reuse() self._sock = sock self.mode = mode # Not actually used in this version if bufsize < 0: @@ -317,11 +331,10 @@ self.flush() finally: s = self._sock - if type(s) is _realsocket: - s._drop() if self._close: self._sock.close() self._sock = None + s._drop() def __del__(self): try: From noreply at buildbot.pypy.org Tue Aug 6 10:57:24 2013 From: noreply at buildbot.pypy.org (fijal) Date: Tue, 6 Aug 2013 10:57:24 +0200 (CEST) Subject: [pypy-commit] pypy kill-gen-store-back-in: fix the merge Message-ID: <20130806085724.21FEA1C013B@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: kill-gen-store-back-in Changeset: r65955:132a6ea70bca Date: 2013-08-06 10:56 +0200 http://bitbucket.org/pypy/pypy/changeset/132a6ea70bca/ Log: fix the merge diff --git a/rpython/rtyper/rclass.py b/rpython/rtyper/rclass.py --- a/rpython/rtyper/rclass.py +++ b/rpython/rtyper/rclass.py @@ -76,7 +76,7 @@ def buildinstancerepr(rtyper, classdef, gcflavor='gc'): - from rpython.rtyper.rvirtualizable2 import VirtualizableInstanceRepr + from rpython.rtyper.rvirtualizable import VirtualizableInstanceRepr if classdef is None: unboxed = [] From noreply at buildbot.pypy.org Tue Aug 6 11:17:36 2013 From: noreply at buildbot.pypy.org (antocuni) Date: Tue, 6 Aug 2013 11:17:36 +0200 (CEST) Subject: [pypy-commit] pypy default: put the name of the driver in the location string, in case get_printable_location is not set Message-ID: <20130806091736.7373F1C01B7@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: Changeset: r65956:cb457dcf368c Date: 2013-08-06 11:12 +0200 http://bitbucket.org/pypy/pypy/changeset/cb457dcf368c/ Log: put the name of the driver in the location string, in case get_printable_location is not set diff --git a/rpython/jit/metainterp/test/test_warmstate.py b/rpython/jit/metainterp/test/test_warmstate.py --- a/rpython/jit/metainterp/test/test_warmstate.py +++ b/rpython/jit/metainterp/test/test_warmstate.py @@ -185,7 +185,7 @@ state.jit_getter = jit_getter state.make_jitdriver_callbacks() res = state.get_location_str([ConstInt(5), constfloat(42.5)]) - assert res == '(no jitdriver.get_printable_location!)' + assert res == '(: no get_printable_location)' def test_make_jitdriver_callbacks_3(): def get_location(x, y): diff --git a/rpython/jit/metainterp/warmstate.py b/rpython/jit/metainterp/warmstate.py --- a/rpython/jit/metainterp/warmstate.py +++ b/rpython/jit/metainterp/warmstate.py @@ -600,7 +600,12 @@ # get_location_ptr = self.jitdriver_sd._get_printable_location_ptr if get_location_ptr is None: - missing = '(no jitdriver.get_printable_location!)' + jitdriver = self.jitdriver_sd.jitdriver + if self.jitdriver_sd.jitdriver: + drivername = jitdriver.name + else: + drivername = '' + missing = '(%s: no get_printable_location)' % drivername def get_location_str(greenkey): return missing else: From noreply at buildbot.pypy.org Tue Aug 6 11:17:37 2013 From: noreply at buildbot.pypy.org (antocuni) Date: Tue, 6 Aug 2013 11:17:37 +0200 (CEST) Subject: [pypy-commit] pypy default: add a name to all the JitDriver which lacked it Message-ID: <20130806091737.A598C1C01CC@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: Changeset: r65957:175f992157ba Date: 2013-08-06 11:16 +0200 http://bitbucket.org/pypy/pypy/changeset/175f992157ba/ Log: add a name to all the JitDriver which lacked it diff --git a/pypy/module/array/interp_array.py b/pypy/module/array/interp_array.py --- a/pypy/module/array/interp_array.py +++ b/pypy/module/array/interp_array.py @@ -56,7 +56,7 @@ def descr_typecode(space, self): return space.wrap(self.typecode) -arr_eq_driver = jit.JitDriver(greens = ['comp_func'], reds = 'auto') +arr_eq_driver = jit.JitDriver(name='array_eq_driver', greens = ['comp_func'], reds = 'auto') EQ, NE, LT, LE, GT, GE = range(6) def compare_arrays(space, arr1, arr2, comp_op): diff --git a/pypy/module/micronumpy/loop.py b/pypy/module/micronumpy/loop.py --- a/pypy/module/micronumpy/loop.py +++ b/pypy/module/micronumpy/loop.py @@ -155,7 +155,8 @@ obj_iter.next() return cur_value -reduce_cum_driver = jit.JitDriver(greens = ['shapelen', 'func', 'dtype'], +reduce_cum_driver = jit.JitDriver(name='numpy_reduce_cum_driver', + greens = ['shapelen', 'func', 'dtype'], reds = 'auto') def compute_reduce_cumultative(obj, out, calc_dtype, func, identity): @@ -529,8 +530,9 @@ val_arr.descr_getitem(space, w_idx)) iter.next() -byteswap_driver = jit.JitDriver(greens = ['dtype'], - reds = 'auto') +byteswap_driver = jit.JitDriver(name='numpy_byteswap_driver', + greens = ['dtype'], + reds = 'auto') def byteswap(from_, to): dtype = from_.dtype @@ -542,8 +544,9 @@ to_iter.next() from_iter.next() -choose_driver = jit.JitDriver(greens = ['shapelen', 'mode', 'dtype'], - reds = 'auto') +choose_driver = jit.JitDriver(name='numpy_choose_driver', + greens = ['shapelen', 'mode', 'dtype'], + reds = 'auto') def choose(space, arr, choices, shape, dtype, out, mode): shapelen = len(shape) @@ -572,8 +575,9 @@ out_iter.next() arr_iter.next() -clip_driver = jit.JitDriver(greens = ['shapelen', 'dtype'], - reds = 'auto') +clip_driver = jit.JitDriver(name='numpy_clip_driver', + greens = ['shapelen', 'dtype'], + reds = 'auto') def clip(space, arr, shape, min, max, out): arr_iter = arr.create_iter(shape) @@ -597,8 +601,9 @@ out_iter.next() min_iter.next() -round_driver = jit.JitDriver(greens = ['shapelen', 'dtype'], - reds = 'auto') +round_driver = jit.JitDriver(name='numpy_round_driver', + greens = ['shapelen', 'dtype'], + reds = 'auto') def round(space, arr, dtype, shape, decimals, out): arr_iter = arr.create_iter(shape) @@ -612,7 +617,8 @@ arr_iter.next() out_iter.next() -diagonal_simple_driver = jit.JitDriver(greens = ['axis1', 'axis2'], +diagonal_simple_driver = jit.JitDriver(name='numpy_diagonal_simple_driver', + greens = ['axis1', 'axis2'], reds = 'auto') def diagonal_simple(space, arr, out, offset, axis1, axis2, size): From noreply at buildbot.pypy.org Tue Aug 6 11:17:38 2013 From: noreply at buildbot.pypy.org (antocuni) Date: Tue, 6 Aug 2013 11:17:38 +0200 (CEST) Subject: [pypy-commit] pypy default: merge heads Message-ID: <20130806091738.E274D1C033D@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: Changeset: r65958:fb2eccc0e1e0 Date: 2013-08-06 11:16 +0200 http://bitbucket.org/pypy/pypy/changeset/fb2eccc0e1e0/ Log: merge heads diff --git a/lib-python/2.7/socket.py b/lib-python/2.7/socket.py --- a/lib-python/2.7/socket.py +++ b/lib-python/2.7/socket.py @@ -179,12 +179,21 @@ def __init__(self, family=AF_INET, type=SOCK_STREAM, proto=0, _sock=None): if _sock is None: _sock = _realsocket(family, type, proto) - elif _type(_sock) is _realsocket: + else: + # PyPy note about refcounting: implemented with _reuse()/_drop() + # on the class '_socket.socket'. Python 3 did it differently + # with a reference counter on this class 'socket._socketobject' + # instead, but it is a less compatible change. + + # Note that a few libraries (like eventlet) poke at the + # private implementation of socket.py, passing custom + # objects to _socketobject(). These libraries need the + # following fix for use on PyPy: the custom objects need + # methods _reuse() and _drop() that maintains an explicit + # reference counter, starting at 0. When it drops back to + # zero, close() must be called. _sock._reuse() - # PyPy note about refcounting: implemented with _reuse()/_drop() - # on the class '_socket.socket'. Python 3 did it differently - # with a reference counter on this class 'socket._socketobject' - # instead, but it is a less compatible change (breaks eventlet). + self._sock = _sock def send(self, data, flags=0): @@ -216,9 +225,8 @@ def close(self): s = self._sock - if type(s) is _realsocket: - s._drop() self._sock = _closedsocket() + s._drop() close.__doc__ = _realsocket.close.__doc__ def accept(self): @@ -280,8 +288,14 @@ "_close"] def __init__(self, sock, mode='rb', bufsize=-1, close=False): - if type(sock) is _realsocket: - sock._reuse() + # Note that a few libraries (like eventlet) poke at the + # private implementation of socket.py, passing custom + # objects to _fileobject(). These libraries need the + # following fix for use on PyPy: the custom objects need + # methods _reuse() and _drop() that maintains an explicit + # reference counter, starting at 0. When it drops back to + # zero, close() must be called. + sock._reuse() self._sock = sock self.mode = mode # Not actually used in this version if bufsize < 0: @@ -317,11 +331,10 @@ self.flush() finally: s = self._sock - if type(s) is _realsocket: - s._drop() if self._close: self._sock.close() self._sock = None + s._drop() def __del__(self): try: From noreply at buildbot.pypy.org Tue Aug 6 11:29:16 2013 From: noreply at buildbot.pypy.org (fijal) Date: Tue, 6 Aug 2013 11:29:16 +0200 (CEST) Subject: [pypy-commit] pypy kill-gen-store-back-in: fix this test (if rewrite runs before virtualize) Message-ID: <20130806092916.073441C01B7@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: kill-gen-store-back-in Changeset: r65959:56723750c93d Date: 2013-08-06 11:28 +0200 http://bitbucket.org/pypy/pypy/changeset/56723750c93d/ Log: fix this test (if rewrite runs before virtualize) diff --git a/rpython/jit/metainterp/optimizeopt/virtualize.py b/rpython/jit/metainterp/optimizeopt/virtualize.py --- a/rpython/jit/metainterp/optimizeopt/virtualize.py +++ b/rpython/jit/metainterp/optimizeopt/virtualize.py @@ -682,6 +682,11 @@ self.do_RAW_MALLOC_VARSIZE_CHAR(op) elif effectinfo.oopspecindex == EffectInfo.OS_RAW_FREE: self.do_RAW_FREE(op) + elif effectinfo.oopspecindex == EffectInfo.OS_JIT_FORCE_VIRTUALIZABLE: + # we might end up having CALL here instead of COND_CALL + value = self.getvalue(op.getarg(1)) + if value.is_virtual(): + return else: self.emit_operation(op) From noreply at buildbot.pypy.org Tue Aug 6 11:38:05 2013 From: noreply at buildbot.pypy.org (fijal) Date: Tue, 6 Aug 2013 11:38:05 +0200 (CEST) Subject: [pypy-commit] pypy kill-gen-store-back-in: fix the test Message-ID: <20130806093805.343881C01B7@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: kill-gen-store-back-in Changeset: r65960:beb44d5c43d0 Date: 2013-08-06 11:37 +0200 http://bitbucket.org/pypy/pypy/changeset/beb44d5c43d0/ Log: fix the test diff --git a/rpython/jit/metainterp/test/test_recursive.py b/rpython/jit/metainterp/test/test_recursive.py --- a/rpython/jit/metainterp/test/test_recursive.py +++ b/rpython/jit/metainterp/test/test_recursive.py @@ -794,7 +794,7 @@ return frame.thing.val res = self.meta_interp(main, [0], inline=True) - self.check_resops(cond_call=2) + self.check_resops(call=0, cond_call=0) # got removed by optimization assert res == main(0) def test_directly_call_assembler_virtualizable_reset_token(self): From noreply at buildbot.pypy.org Tue Aug 6 11:47:40 2013 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 6 Aug 2013 11:47:40 +0200 (CEST) Subject: [pypy-commit] pypy default: Skip the curses test when running on top of a plain CPython and CFFI is Message-ID: <20130806094740.28E591C00F4@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r65961:1b7efda3796a Date: 2013-08-06 11:47 +0200 http://bitbucket.org/pypy/pypy/changeset/1b7efda3796a/ Log: Skip the curses test when running on top of a plain CPython and CFFI is not installed, or a wrong version. diff --git a/pypy/module/test_lib_pypy/test_curses.py b/pypy/module/test_lib_pypy/test_curses.py --- a/pypy/module/test_lib_pypy/test_curses.py +++ b/pypy/module/test_lib_pypy/test_curses.py @@ -1,6 +1,15 @@ +import pytest + +# Check that lib_pypy.cffi finds the correct version of _cffi_backend. +# Otherwise, the test is skipped. It should never be skipped when run +# with "pypy py.test -A". +try: + from lib_pypy import cffi; cffi.FFI() +except (ImportError, AssertionError), e: + pytest.skip("no cffi module or wrong version (%s)" % (e,)) + from lib_pypy import _curses -import pytest lib = _curses.lib From noreply at buildbot.pypy.org Tue Aug 6 11:54:50 2013 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 6 Aug 2013 11:54:50 +0200 (CEST) Subject: [pypy-commit] pypy default: Clarify the meaning of this constant Message-ID: <20130806095450.232A81C00F4@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r65962:d128eac00db1 Date: 2013-08-06 11:54 +0200 http://bitbucket.org/pypy/pypy/changeset/d128eac00db1/ Log: Clarify the meaning of this constant diff --git a/rpython/memory/gc/minimark.py b/rpython/memory/gc/minimark.py --- a/rpython/memory/gc/minimark.py +++ b/rpython/memory/gc/minimark.py @@ -124,7 +124,7 @@ # note that GCFLAG_CARDS_SET is the most significant bit of a byte: # this is required for the JIT (x86) -TID_MASK = (first_gcflag << 8) - 1 +_GCFLAG_FIRST_UNUSED = first_gcflag << 8 # the first unused bit FORWARDSTUB = lltype.GcStruct('forwarding_stub', @@ -944,7 +944,7 @@ ll_assert(tid == -42, "bogus header for young obj") else: ll_assert(bool(tid), "bogus header (1)") - ll_assert(tid & ~TID_MASK == 0, "bogus header (2)") + ll_assert(tid & -_GCFLAG_FIRST_UNUSED == 0, "bogus header (2)") return result def get_forwarding_address(self, obj): From noreply at buildbot.pypy.org Tue Aug 6 12:09:43 2013 From: noreply at buildbot.pypy.org (andrewchambers) Date: Tue, 6 Aug 2013 12:09:43 +0200 (CEST) Subject: [pypy-commit] pypy incremental-gc: Added branch for incremental-gc Message-ID: <20130806100943.3225D1C0134@cobra.cs.uni-duesseldorf.de> Author: Andrew Chambers Branch: incremental-gc Changeset: r65963:3e89eeb73ed5 Date: 2013-08-06 22:04 +1200 http://bitbucket.org/pypy/pypy/changeset/3e89eeb73ed5/ Log: Added branch for incremental-gc From noreply at buildbot.pypy.org Tue Aug 6 12:24:57 2013 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 6 Aug 2013 12:24:57 +0200 (CEST) Subject: [pypy-commit] pypy default: rmarshal: support dicts Message-ID: <20130806102457.715721C0134@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r65964:c6a81f72894e Date: 2013-08-06 12:24 +0200 http://bitbucket.org/pypy/pypy/changeset/c6a81f72894e/ Log: rmarshal: support dicts diff --git a/rpython/rlib/rmarshal.py b/rpython/rlib/rmarshal.py --- a/rpython/rlib/rmarshal.py +++ b/rpython/rlib/rmarshal.py @@ -77,6 +77,7 @@ TYPE_STRING = 's' TYPE_TUPLE = '(' TYPE_LIST = '[' +TYPE_DICT = '{' dumpers = [] loaders = [] @@ -302,6 +303,12 @@ loader.pos = pos + 1 return loader.buf[pos] +def peekchr(loader): + pos = loader.pos + while pos >= len(loader.buf): + loader.need_more_data() + return loader.buf[pos] + def readlong(loader): a = ord(readchr(loader)) b = ord(readchr(loader)) @@ -398,6 +405,51 @@ add_loader(s_list, load_list_or_none) +class __extend__(pairtype(MTag, annmodel.SomeDict)): + + def install_marshaller((tag, s_dict)): + def dump_dict_or_none(buf, x): + if x is None: + dump_none(buf, x) + else: + buf.append(TYPE_DICT) + for key, value in x.items(): + keydumper(buf, key) + valuedumper(buf, value) + buf.append('0') # end of dict + + keydumper = get_marshaller(s_dict.dictdef.dictkey.s_value) + valuedumper = get_marshaller(s_dict.dictdef.dictvalue.s_value) + if (s_dict.dictdef.dictkey.dont_change_any_more or + s_dict.dictdef.dictvalue.dont_change_any_more): + s_general_dict = s_dict + else: + s_key = get_dumper_annotation(keydumper) + s_value = get_dumper_annotation(valuedumper) + s_general_dict = annotation({s_key: s_value}) + add_dumper(s_general_dict, dump_dict_or_none) + + def install_unmarshaller((tag, s_dict)): + def load_dict_or_none(loader): + t = readchr(loader) + if t == TYPE_DICT: + result = {} + while peekchr(loader) != '0': + key = keyloader(loader) + value = valueloader(loader) + result[key] = value + readchr(loader) # consume the final '0' + return result + elif t == TYPE_NONE: + return None + else: + raise ValueError("expected a dict or None") + + keyloader = get_loader(s_dict.dictdef.dictkey.s_value) + valueloader = get_loader(s_dict.dictdef.dictvalue.s_value) + add_loader(s_dict, load_dict_or_none) + + class __extend__(pairtype(MTag, annmodel.SomeTuple)): def install_marshaller((tag, s_tuple)): diff --git a/rpython/rlib/test/test_rmarshal.py b/rpython/rlib/test/test_rmarshal.py --- a/rpython/rlib/test/test_rmarshal.py +++ b/rpython/rlib/test/test_rmarshal.py @@ -9,6 +9,7 @@ [int], annmodel.SomeString(can_be_None=True), annmodel.s_None, + {int: int}, ] @@ -58,6 +59,10 @@ get_marshaller((int, float, (str, ())))(buf, (7, -1.5, ("foo", ()))) assert marshal.loads(''.join(buf)) == (7, -1.5, ("foo", ())) + buf = [] + get_marshaller({int: str})(buf, {2: "foo", -3: "bar"}) + assert marshal.loads(''.join(buf)) == {2: "foo", -3: "bar"} + for typ in types_that_can_be_none: buf = [] get_marshaller(typ)(buf, None) @@ -111,6 +116,11 @@ res = get_unmarshaller((int, (str, ())))(buf) assert res == (7, ("foo", ())) + buf = ('{i\xfb\xff\xff\xffs\x03\x00\x00\x00bar' + 'i\x06\x00\x00\x00s\x00\x00\x00\x000') + res = get_unmarshaller({int: str})(buf) + assert res == {-5: "bar", 6: ""} + for typ in types_that_can_be_none: buf = 'N' assert get_unmarshaller(typ)(buf) is None From noreply at buildbot.pypy.org Tue Aug 6 12:49:41 2013 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 6 Aug 2013 12:49:41 +0200 (CEST) Subject: [pypy-commit] pypy default: fixes Message-ID: <20130806104941.778ED1C00F4@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r65965:105d5feef576 Date: 2013-08-06 12:49 +0200 http://bitbucket.org/pypy/pypy/changeset/105d5feef576/ Log: fixes diff --git a/lib-python/2.7/socket.py b/lib-python/2.7/socket.py --- a/lib-python/2.7/socket.py +++ b/lib-python/2.7/socket.py @@ -165,6 +165,8 @@ # All _delegate_methods must also be initialized here. send = recv = recv_into = sendto = recvfrom = recvfrom_into = _dummy __getattr__ = _dummy + def _drop(self): + pass # Wrapper around platform socket objects. This implements # a platform-independent dup() functionality. The @@ -331,10 +333,11 @@ self.flush() finally: s = self._sock - if self._close: - self._sock.close() self._sock = None - s._drop() + if s is not None: + s._drop() + if self._close: + s.close() def __del__(self): try: From noreply at buildbot.pypy.org Tue Aug 6 12:55:20 2013 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 6 Aug 2013 12:55:20 +0200 (CEST) Subject: [pypy-commit] pypy default: Carefully prevent overflows from passing silently Message-ID: <20130806105520.255341C00F4@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r65966:163681440d6c Date: 2013-08-06 12:54 +0200 http://bitbucket.org/pypy/pypy/changeset/163681440d6c/ Log: Carefully prevent overflows from passing silently diff --git a/rpython/rlib/rmarshal.py b/rpython/rlib/rmarshal.py --- a/rpython/rlib/rmarshal.py +++ b/rpython/rlib/rmarshal.py @@ -6,7 +6,7 @@ from rpython.annotator.signature import annotation from rpython.annotator.listdef import ListDef, TooLateForChange from rpython.tool.pairtype import pair, pairtype -from rpython.rlib.rarithmetic import r_longlong, intmask, LONG_BIT +from rpython.rlib.rarithmetic import r_longlong, intmask, LONG_BIT, ovfcheck from rpython.rlib.rfloat import formatd, rstring_to_float from rpython.rlib.unroll import unrolling_iterable from rpython.rlib.rstring import assert_str0 @@ -289,7 +289,10 @@ if count < 0: raise ValueError("negative count") pos = loader.pos - end = pos + count + try: + end = ovfcheck(pos + count) + except OverflowError: + raise ValueError("cannot decode count: value too big") while end > len(loader.buf): loader.need_more_data() loader.pos = end From noreply at buildbot.pypy.org Tue Aug 6 13:41:16 2013 From: noreply at buildbot.pypy.org (wlav) Date: Tue, 6 Aug 2013 13:41:16 +0200 (CEST) Subject: [pypy-commit] pypy reflex-support: proper import of cpyext before use Message-ID: <20130806114116.A11511C00F4@cobra.cs.uni-duesseldorf.de> Author: Wim Lavrijsen Branch: reflex-support Changeset: r65967:a0e5fb5c0db0 Date: 2013-08-06 03:43 -0700 http://bitbucket.org/pypy/pypy/changeset/a0e5fb5c0db0/ Log: proper import of cpyext before use diff --git a/pypy/module/cppyy/converter.py b/pypy/module/cppyy/converter.py --- a/pypy/module/cppyy/converter.py +++ b/pypy/module/cppyy/converter.py @@ -570,6 +570,7 @@ def free_argument(self, space, arg, call_local): if hasattr(space, "fake"): raise NotImplementedError + space.getbuiltinmodule("cpyext") from pypy.module.cpyext.pyobject import Py_DecRef, PyObject Py_DecRef(space, rffi.cast(PyObject, rffi.cast(rffi.VOIDPP, arg)[0])) From noreply at buildbot.pypy.org Tue Aug 6 13:41:18 2013 From: noreply at buildbot.pypy.org (wlav) Date: Tue, 6 Aug 2013 13:41:18 +0200 (CEST) Subject: [pypy-commit] pypy reflex-support: consistent usemodules to prevent cpyext initialization ordering issues Message-ID: <20130806114118.239141C3634@cobra.cs.uni-duesseldorf.de> Author: Wim Lavrijsen Branch: reflex-support Changeset: r65968:15ed4866c0da Date: 2013-08-06 04:40 -0700 http://bitbucket.org/pypy/pypy/changeset/15ed4866c0da/ Log: consistent usemodules to prevent cpyext initialization ordering issues diff --git a/pypy/module/cppyy/test/test_aclassloader.py b/pypy/module/cppyy/test/test_aclassloader.py --- a/pypy/module/cppyy/test/test_aclassloader.py +++ b/pypy/module/cppyy/test/test_aclassloader.py @@ -12,7 +12,7 @@ class AppTestACLASSLOADER: - spaceconfig = dict(usemodules=['cppyy']) + spaceconfig = dict(usemodules=['cppyy', '_rawffi', '_ffi', 'itertools']) def setup_class(cls): cls.space.appexec([], """(): diff --git a/pypy/module/cppyy/test/test_advancedcpp.py b/pypy/module/cppyy/test/test_advancedcpp.py --- a/pypy/module/cppyy/test/test_advancedcpp.py +++ b/pypy/module/cppyy/test/test_advancedcpp.py @@ -15,7 +15,7 @@ raise OSError("'make' failed (see stderr)") class AppTestADVANCEDCPP: - spaceconfig = dict(usemodules=['cppyy', 'array']) + spaceconfig = dict(usemodules=['cppyy', '_rawffi', '_ffi', 'itertools']) def setup_class(cls): cls.w_test_dct = cls.space.wrap(test_dct) diff --git a/pypy/module/cppyy/test/test_cint.py b/pypy/module/cppyy/test/test_cint.py --- a/pypy/module/cppyy/test/test_cint.py +++ b/pypy/module/cppyy/test/test_cint.py @@ -18,7 +18,7 @@ raise OSError("'make' failed (see stderr)") class AppTestCINT: - spaceconfig = dict(usemodules=['cppyy']) + spaceconfig = dict(usemodules=['cppyy', '_rawffi', '_ffi', 'itertools']) def test01_globals(self): """Test the availability of ROOT globals""" @@ -96,7 +96,7 @@ class AppTestCINTPYTHONIZATIONS: - spaceconfig = dict(usemodules=['cppyy']) + spaceconfig = dict(usemodules=['cppyy', '_rawffi', '_ffi', 'itertools']) def test01_strings(self): """Test TString/TObjString compatibility""" @@ -139,7 +139,7 @@ class AppTestCINTTTREE: - spaceconfig = dict(usemodules=['cppyy', 'array', '_rawffi', '_cffi_backend']) + spaceconfig = dict(usemodules=['cppyy', '_rawffi', '_ffi', 'itertools', '_cffi_backend']) def setup_class(cls): cls.w_N = cls.space.wrap(5) @@ -385,7 +385,7 @@ class AppTestCINTREGRESSION: - spaceconfig = dict(usemodules=['cppyy']) + spaceconfig = dict(usemodules=['cppyy', '_rawffi', '_ffi', 'itertools']) # these are tests that at some point in the past resulted in failures on # PyROOT; kept here to confirm no regression from PyROOT @@ -405,7 +405,7 @@ class AppTestSURPLUS: - spaceconfig = dict(usemodules=['cppyy']) + spaceconfig = dict(usemodules=['cppyy', '_rawffi', '_ffi', 'itertools']) # these are tests that were historically exercised on ROOT classes and # have twins on custom classes; kept here just in case differences crop diff --git a/pypy/module/cppyy/test/test_cppyy.py b/pypy/module/cppyy/test/test_cppyy.py --- a/pypy/module/cppyy/test/test_cppyy.py +++ b/pypy/module/cppyy/test/test_cppyy.py @@ -28,7 +28,7 @@ class AppTestCPPYY: - spaceconfig = dict(usemodules=['cppyy']) + spaceconfig = dict(usemodules=['cppyy', '_rawffi', '_ffi', 'itertools']) def setup_class(cls): cls.w_example01, cls.w_payload = cls.space.unpackiterable(cls.space.appexec([], """(): diff --git a/pypy/module/cppyy/test/test_crossing.py b/pypy/module/cppyy/test/test_crossing.py --- a/pypy/module/cppyy/test/test_crossing.py +++ b/pypy/module/cppyy/test/test_crossing.py @@ -65,8 +65,9 @@ return str(pydname) class AppTestCrossing(AppTestCpythonExtensionBase): - spaceconfig = dict(usemodules=['cpyext', 'cppyy', 'thread', '_rawffi', '_ffi', - 'array', 'itertools', 'rctime', 'binascii']) + #spaceconfig = dict(usemodules=['cpyext', 'thread', '_rawffi', + # 'array', 'rctime', 'binascii', 'itertools', '_ffi', 'cppyy']) + spaceconfig = dict(usemodules=['cppyy', '_rawffi', '_ffi', 'itertools', 'cpyext']) def setup_class(cls): AppTestCpythonExtensionBase.setup_class.im_func(cls) @@ -74,7 +75,7 @@ # to allow the generated extension module be loaded first) cls.w_test_dct = cls.space.wrap(test_dct) cls.w_pre_imports = cls.space.appexec([], """(): - import cppyy, ctypes""") # prevents leak-checking complaints on ctypes + import cppyy, cpyext, ctypes""") # prevents leak-checking complaints on ctypes def setup_method(self, func): AppTestCpythonExtensionBase.setup_method.im_func(self, func) diff --git a/pypy/module/cppyy/test/test_datatypes.py b/pypy/module/cppyy/test/test_datatypes.py --- a/pypy/module/cppyy/test/test_datatypes.py +++ b/pypy/module/cppyy/test/test_datatypes.py @@ -12,7 +12,7 @@ raise OSError("'make' failed (see stderr)") class AppTestDATATYPES: - spaceconfig = dict(usemodules=['cppyy', 'array', '_rawffi']) + spaceconfig = dict(usemodules=['cppyy', '_rawffi', '_ffi', 'itertools']) def setup_class(cls): cls.w_N = cls.space.wrap(5) # should be imported from the dictionary diff --git a/pypy/module/cppyy/test/test_fragile.py b/pypy/module/cppyy/test/test_fragile.py --- a/pypy/module/cppyy/test/test_fragile.py +++ b/pypy/module/cppyy/test/test_fragile.py @@ -12,7 +12,7 @@ raise OSError("'make' failed (see stderr)") class AppTestFRAGILE: - spaceconfig = dict(usemodules=['cppyy']) + spaceconfig = dict(usemodules=['cppyy', '_rawffi', '_ffi', 'itertools']) def setup_class(cls): cls.w_test_dct = cls.space.wrap(test_dct) diff --git a/pypy/module/cppyy/test/test_operators.py b/pypy/module/cppyy/test/test_operators.py --- a/pypy/module/cppyy/test/test_operators.py +++ b/pypy/module/cppyy/test/test_operators.py @@ -12,7 +12,7 @@ raise OSError("'make' failed (see stderr)") class AppTestOPERATORS: - spaceconfig = dict(usemodules=['cppyy']) + spaceconfig = dict(usemodules=['cppyy', '_rawffi', '_ffi', 'itertools']) def setup_class(cls): cls.w_N = cls.space.wrap(5) # should be imported from the dictionary diff --git a/pypy/module/cppyy/test/test_overloads.py b/pypy/module/cppyy/test/test_overloads.py --- a/pypy/module/cppyy/test/test_overloads.py +++ b/pypy/module/cppyy/test/test_overloads.py @@ -12,7 +12,7 @@ raise OSError("'make' failed (see stderr)") class AppTestOVERLOADS: - spaceconfig = dict(usemodules=['cppyy', 'array']) + spaceconfig = dict(usemodules=['cppyy', '_rawffi', '_ffi', 'itertools']) def setup_class(cls): env = os.environ diff --git a/pypy/module/cppyy/test/test_pythonify.py b/pypy/module/cppyy/test/test_pythonify.py --- a/pypy/module/cppyy/test/test_pythonify.py +++ b/pypy/module/cppyy/test/test_pythonify.py @@ -16,7 +16,7 @@ raise OSError("'make' failed (see stderr)") class AppTestPYTHONIFY: - spaceconfig = dict(usemodules=['cppyy']) + spaceconfig = dict(usemodules=['cppyy', '_rawffi', '_ffi', 'itertools']) def setup_class(cls): cls.w_test_dct = cls.space.wrap(test_dct) @@ -325,7 +325,7 @@ class AppTestPYTHONIFY_UI: - spaceconfig = dict(usemodules=['cppyy']) + spaceconfig = dict(usemodules=['cppyy', '_rawffi', '_ffi', 'itertools']) def setup_class(cls): cls.w_test_dct = cls.space.wrap(test_dct) diff --git a/pypy/module/cppyy/test/test_stltypes.py b/pypy/module/cppyy/test/test_stltypes.py --- a/pypy/module/cppyy/test/test_stltypes.py +++ b/pypy/module/cppyy/test/test_stltypes.py @@ -12,7 +12,7 @@ raise OSError("'make' failed (see stderr)") class AppTestSTLVECTOR: - spaceconfig = dict(usemodules=['cppyy']) + spaceconfig = dict(usemodules=['cppyy', '_rawffi', '_ffi', 'itertools']) def setup_class(cls): cls.w_N = cls.space.wrap(13) @@ -200,7 +200,7 @@ class AppTestSTLSTRING: - spaceconfig = dict(usemodules=['cppyy']) + spaceconfig = dict(usemodules=['cppyy', '_rawffi', '_ffi', 'itertools']) def setup_class(cls): cls.w_test_dct = cls.space.wrap(test_dct) @@ -280,7 +280,7 @@ class AppTestSTLLIST: - spaceconfig = dict(usemodules=['cppyy']) + spaceconfig = dict(usemodules=['cppyy', '_rawffi', '_ffi', 'itertools']) def setup_class(cls): cls.w_N = cls.space.wrap(13) @@ -336,13 +336,13 @@ class AppTestSTLMAP: - spaceconfig = dict(usemodules=['cppyy', 'itertools']) + spaceconfig = dict(usemodules=['cppyy', '_rawffi', '_ffi', 'itertools']) def setup_class(cls): cls.w_N = cls.space.wrap(13) cls.w_test_dct = cls.space.wrap(test_dct) cls.w_stlstring = cls.space.appexec([], """(): - import cppyy, math, sys + import cppyy return cppyy.load_reflection_info(%r)""" % (test_dct, )) def test01_builtin_map_type(self): @@ -445,7 +445,7 @@ class AppTestSTLITERATOR: - spaceconfig = dict(usemodules=['cppyy', 'itertools']) + spaceconfig = dict(usemodules=['cppyy', '_rawffi', '_ffi', 'itertools']) def setup_class(cls): cls.w_test_dct = cls.space.wrap(test_dct) diff --git a/pypy/module/cppyy/test/test_streams.py b/pypy/module/cppyy/test/test_streams.py --- a/pypy/module/cppyy/test/test_streams.py +++ b/pypy/module/cppyy/test/test_streams.py @@ -12,7 +12,7 @@ raise OSError("'make' failed (see stderr)") class AppTestSTDStreams: - spaceconfig = dict(usemodules=['cppyy']) + spaceconfig = dict(usemodules=['cppyy', '_rawffi', '_ffi', 'itertools']) def setup_class(cls): cls.w_test_dct = cls.space.wrap(test_dct) From noreply at buildbot.pypy.org Tue Aug 6 14:15:29 2013 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 6 Aug 2013 14:15:29 +0200 (CEST) Subject: [pypy-commit] pypy default: Fix for tests: add _reuse and _drop methods in mock low-level socket objects Message-ID: <20130806121529.405681C00F4@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r65969:c30185912227 Date: 2013-08-06 14:14 +0200 http://bitbucket.org/pypy/pypy/changeset/c30185912227/ Log: Fix for tests: add _reuse and _drop methods in mock low-level socket objects diff --git a/lib-python/2.7/test/test_socket.py b/lib-python/2.7/test/test_socket.py --- a/lib-python/2.7/test/test_socket.py +++ b/lib-python/2.7/test/test_socket.py @@ -1066,6 +1066,9 @@ def recv(self, size): return self._recv_step.next()() + def _reuse(self): pass + def _drop(self): pass + @staticmethod def _raise_eintr(): raise socket.error(errno.EINTR) @@ -1321,7 +1324,8 @@ closed = False def flush(self): pass def close(self): self.closed = True - def _decref_socketios(self): pass + def _reuse(self): pass + def _drop(self): pass # must not close unless we request it: the original use of _fileobject # by module socket requires that the underlying socket not be closed until diff --git a/lib-python/2.7/test/test_urllib2.py b/lib-python/2.7/test/test_urllib2.py --- a/lib-python/2.7/test/test_urllib2.py +++ b/lib-python/2.7/test/test_urllib2.py @@ -270,6 +270,8 @@ self.reason = reason def read(self): return '' + def _reuse(self): pass + def _drop(self): pass class MockHTTPClass: def __init__(self): diff --git a/lib-python/2.7/urllib2.py b/lib-python/2.7/urllib2.py --- a/lib-python/2.7/urllib2.py +++ b/lib-python/2.7/urllib2.py @@ -1193,6 +1193,8 @@ # out of socket._fileobject() and into a base class. r.recv = r.read + r._reuse = lambda: None + r._drop = lambda: None fp = socket._fileobject(r, close=True) resp = addinfourl(fp, r.msg, req.get_full_url()) From noreply at buildbot.pypy.org Tue Aug 6 14:19:47 2013 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 6 Aug 2013 14:19:47 +0200 (CEST) Subject: [pypy-commit] pypy default: Fix, shown by test_ftplib. Message-ID: <20130806121947.C73401C00F4@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r65970:93abdc5a9e3d Date: 2013-08-06 14:19 +0200 http://bitbucket.org/pypy/pypy/changeset/93abdc5a9e3d/ Log: Fix, shown by test_ftplib. diff --git a/lib-python/2.7/ssl.py b/lib-python/2.7/ssl.py --- a/lib-python/2.7/ssl.py +++ b/lib-python/2.7/ssl.py @@ -358,11 +358,19 @@ works with the SSL connection. Just use the code from the socket module.""" - self._makefile_refs += 1 # close=True so as to decrement the reference count when done with # the file-like object. return _fileobject(self, mode, bufsize, close=True) + def _reuse(self): + self._makefile_refs += 1 + + def _drop(self): + if self._makefile_refs < 1: + self.close() + else: + self._makefile_refs -= 1 + def wrap_socket(sock, keyfile=None, certfile=None, From noreply at buildbot.pypy.org Tue Aug 6 14:54:31 2013 From: noreply at buildbot.pypy.org (lwassermann) Date: Tue, 6 Aug 2013 14:54:31 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk default: Changed the fail primitive to better print in case of DNU and not halting (allowing for endless loops. Message-ID: <20130806125431.71F701C00F4@cobra.cs.uni-duesseldorf.de> Author: Lars Wassermann Branch: Changeset: r509:b2990d16a4e8 Date: 2013-07-20 23:16 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/b2990d16a4e8/ Log: Changed the fail primitive to better print in case of DNU and not halting (allowing for endless loops. Increased the initial priority for the injected process in old images to 8(/9). diff --git a/spyvm/primitives.py b/spyvm/primitives.py --- a/spyvm/primitives.py +++ b/spyvm/primitives.py @@ -369,10 +369,11 @@ print '' print s_frame.print_stack() w_message = s_frame.peek(0) - print w_message.as_repr_string() + print ("%s" % w_message).replace('\r', '\n') + print ("%s" % s_frame.peek(1)).replace('\r', '\n') if isinstance(w_message, model.W_PointersObject): - print w_message._vars - raise Exit('Probably Debugger called...') + print ('%s' % w_message._vars).replace('\r', '\n') + # raise Exit('Probably Debugger called...') raise PrimitiveFailedError() # ___________________________________________________________________________ @@ -634,7 +635,6 @@ w_bitmap = w_dest_form.fetch(space, 0) assert isinstance(w_bitmap, model.W_DisplayBitmap) w_bitmap.flush_to_screen() - return w_rcvr # try: # s_frame._sendSelfSelector(interp.image.w_simulateCopyBits, 0, interp) @@ -645,8 +645,8 @@ # assert isinstance(w_bitmap, model.W_DisplayBitmap) # w_bitmap.flush_to_screen() - # # in case we return normally, we have to restore the removed w_rcvr - # return w_rcvr + # in case we return normally, we have to restore the removed w_rcvr + return w_rcvr @expose_primitive(BE_CURSOR) def func(interp, s_frame, argcount): diff --git a/targetimageloadingsmalltalk.py b/targetimageloadingsmalltalk.py --- a/targetimageloadingsmalltalk.py +++ b/targetimageloadingsmalltalk.py @@ -34,6 +34,8 @@ # Priorities below 10 are not allowed in newer versions of Squeak. if interp.image.version.has_closures: priority = max(11, priority) + else: + priority = 7 w_benchmark_proc.store(space, 2, space.wrap_int(priority)) # make process eligible for scheduling From noreply at buildbot.pypy.org Tue Aug 6 14:54:32 2013 From: noreply at buildbot.pypy.org (lwassermann) Date: Tue, 6 Aug 2013 14:54:32 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk default: changed benchmarks Message-ID: <20130806125432.EAF921C00F4@cobra.cs.uni-duesseldorf.de> Author: Lars Wassermann Branch: Changeset: r510:cf38c4375aee Date: 2013-07-20 23:16 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/cf38c4375aee/ Log: changed benchmarks diff --git a/images/minibluebookdebug.image b/images/minibluebookdebug.image index 4bb8ed8fd0cdcee3a381df2e4ad05a6af5337096..c7fd015aa69e9491f224dd71a879f226a3958e69 GIT binary patch [cut] From noreply at buildbot.pypy.org Tue Aug 6 14:54:34 2013 From: noreply at buildbot.pypy.org (lwassermann) Date: Tue, 6 Aug 2013 14:54:34 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk default: added benchmarksMixedRules to the image as a bitblt-only benchmark Message-ID: <20130806125434.7614E1C00F4@cobra.cs.uni-duesseldorf.de> Author: Lars Wassermann Branch: Changeset: r511:085d4f0b0d98 Date: 2013-07-24 09:22 +0000 http://bitbucket.org/pypy/lang-smalltalk/changeset/085d4f0b0d98/ Log: added benchmarksMixedRules to the image as a bitblt-only benchmark diff --git a/images/minibluebookdebug.image b/images/minibluebookdebug.image index c7fd015aa69e9491f224dd71a879f226a3958e69..57de1563eabae878b457a2a9b7bc8961ca646eab GIT binary patch [cut] From noreply at buildbot.pypy.org Tue Aug 6 14:54:35 2013 From: noreply at buildbot.pypy.org (lwassermann) Date: Tue, 6 Aug 2013 14:54:35 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk default: adde benchmarkFill to the minibluebookdebug image, allowing for the FILL primitive to be benchmarked (145) Message-ID: <20130806125435.D42101C00F4@cobra.cs.uni-duesseldorf.de> Author: Lars Wassermann Branch: Changeset: r512:00d58cffb79d Date: 2013-08-06 14:53 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/00d58cffb79d/ Log: adde benchmarkFill to the minibluebookdebug image, allowing for the FILL primitive to be benchmarked (145) fixed the fill primitive for word-arrays diff --git a/images/minibluebookdebug.image b/images/minibluebookdebug.image index 57de1563eabae878b457a2a9b7bc8961ca646eab..ca00384ffd476a836212366990611cb4ce6b79cf GIT binary patch [cut] diff --git a/spyvm/primitives.py b/spyvm/primitives.py --- a/spyvm/primitives.py +++ b/spyvm/primitives.py @@ -1057,7 +1057,7 @@ raise PrimitiveFailedError for i in xrange(w_arg.size()): w_arg.setchar(i, chr(new_value)) - elif isinstance(w_arg, model.W_PointersObject) or isinstance(w_arg, model.W_DisplayBitmap): + elif isinstance(w_arg, model.W_WordsObject) or isinstance(w_arg, model.W_DisplayBitmap): for i in xrange(w_arg.size()): w_arg.setword(i, new_value) else: From noreply at buildbot.pypy.org Tue Aug 6 17:53:26 2013 From: noreply at buildbot.pypy.org (fijal) Date: Tue, 6 Aug 2013 17:53:26 +0200 (CEST) Subject: [pypy-commit] pypy kill-gen-store-back-in: fix test_pypy_c Message-ID: <20130806155326.716901C0134@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: kill-gen-store-back-in Changeset: r65971:fdaa8624e272 Date: 2013-08-06 17:52 +0200 http://bitbucket.org/pypy/pypy/changeset/fdaa8624e272/ Log: fix test_pypy_c diff --git a/pypy/module/pypyjit/test_pypy_c/test_containers.py b/pypy/module/pypyjit/test_pypy_c/test_containers.py --- a/pypy/module/pypyjit/test_pypy_c/test_containers.py +++ b/pypy/module/pypyjit/test_pypy_c/test_containers.py @@ -87,7 +87,7 @@ i8 = int_lt(i5, i7) guard_true(i8, descr=...) guard_not_invalidated(descr=...) - p10 = call(ConstClass(ll_int_str), i5, descr=) + p10 = call(ConstClass(ll_str__IntegerR_SignedConst_Signed), i5, descr=) guard_no_exception(descr=...) i12 = call(ConstClass(ll_strhash), p10, descr=) p13 = new(descr=...) diff --git a/pypy/module/pypyjit/test_pypy_c/test_generators.py b/pypy/module/pypyjit/test_pypy_c/test_generators.py --- a/pypy/module/pypyjit/test_pypy_c/test_generators.py +++ b/pypy/module/pypyjit/test_pypy_c/test_generators.py @@ -19,6 +19,7 @@ log = self.run(main, [500]) loop, = log.loops_by_filename(self.filepath) assert loop.match_by_id("generator", """ + cond_call(..., descr=...) i16 = force_token() p45 = new_with_vtable(ConstClass(W_IntObject)) setfield_gc(p45, i29, descr=) From noreply at buildbot.pypy.org Tue Aug 6 17:55:30 2013 From: noreply at buildbot.pypy.org (fijal) Date: Tue, 6 Aug 2013 17:55:30 +0200 (CEST) Subject: [pypy-commit] pypy kill-gen-store-back-in: close to-be-merged branch Message-ID: <20130806155530.B20621C0134@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: kill-gen-store-back-in Changeset: r65972:bed146c0627e Date: 2013-08-06 17:53 +0200 http://bitbucket.org/pypy/pypy/changeset/bed146c0627e/ Log: close to-be-merged branch From noreply at buildbot.pypy.org Tue Aug 6 17:55:32 2013 From: noreply at buildbot.pypy.org (fijal) Date: Tue, 6 Aug 2013 17:55:32 +0200 (CEST) Subject: [pypy-commit] pypy default: Merge kill-gen-store-back-in. Message-ID: <20130806155532.CDE011C0134@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r65973:51204985cc75 Date: 2013-08-06 17:54 +0200 http://bitbucket.org/pypy/pypy/changeset/51204985cc75/ Log: Merge kill-gen-store-back-in. This branch kills gen_store_back_in_virtualizable in pyjitpl.py. Should improve performance on non-generator like code. Additionally it removes some of the implicit assumptions about the virtualizables and tries to document them at least a little. diff too long, truncating to 2000 out of 2487 lines diff --git a/pypy/doc/jit/index.rst b/pypy/doc/jit/index.rst --- a/pypy/doc/jit/index.rst +++ b/pypy/doc/jit/index.rst @@ -23,7 +23,10 @@ - Hooks_ debugging facilities available to a python programmer +- Virtualizable_ how virtualizables work and what they are (in other words how + to make frames more efficient). .. _Overview: overview.html .. _Notes: pyjitpl5.html .. _Hooks: ../jit-hooks.html +.. _Virtualizable: virtualizable.html diff --git a/pypy/doc/jit/virtualizable.rst b/pypy/doc/jit/virtualizable.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/jit/virtualizable.rst @@ -0,0 +1,60 @@ + +Virtualizables +============== + +**Note:** this document does not have a proper introduction as to how +to understand the basics. We should write some. If you happen to be here +and you're missing context, feel free to pester us on IRC. + +Problem description +------------------- + +The JIT is very good at making sure some objects are never allocated if they +don't escape from the trace. Such objects are called ``virtuals``. However, +if we're dealing with frames, virtuals are often not good enough. Frames +can escape and they can also be allocated already at the moment we enter the +JIT. In such cases we need some extra object that can still be optimized away, +despite existing on the heap. + +Solution +-------- + +We introduce virtualizables. They're objects that exist on the heap, but their +fields are not always in sync with whatever happens in the assembler. One +example is that virtualizable fields can store virtual objects without +forcing them. This is very useful for frames. Declaring an object to be +virtualizable works like this: + + class Frame(object): + _virtualizable_ = ['locals[*]', 'stackdepth'] + +And we use them in ``JitDriver`` like this:: + + jitdriver = JitDriver(greens=[], reds=['frame'], virtualizables=['frame']) + +This declaration means that ``stackdepth`` is a virtualizable **field**, while +``locals`` is a virtualizable **array** (a list stored on a virtualizable). +There are various rules about using virtualizables, especially using +virtualizable arrays that can be very confusing. Those will usually end +up with a compile-time error (as opposed to strange behavior). The rules are: + +* Each array access must be with a known positive index that cannot raise + an ``IndexError``. Using ``no = jit.hint(no, promote=True)`` might be useful + to get a constant-number access. This is only safe if the index is actually + constant or changing rarely within the context of the user's code. + +* If you initialize a new virtualizable in the JIT, it has to be done like this + (for example if we're in ``Frame.__init__``):: + + self = hint(self, access_directly=True, fresh_virtualizable=True) + + that way you can populate the fields directly. + +* If you use virtualizable outside of the JIT – it's very expensive and + sometimes aborts tracing. Consider it carefully as to how do it only for + debugging purposes and not every time (e.g. ``sys._getframe`` call). + +* If you have something equivalent of a Python generator, where the + virtualizable survives for longer, you want to force it before returning. + It's better to do it that way than by an external call some time later. + It's done using ``jit.hint(frame, force_virtualizable=True)`` diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py --- a/pypy/interpreter/pyframe.py +++ b/pypy/interpreter/pyframe.py @@ -123,8 +123,8 @@ # CO_OPTIMIZED: no locals dict needed at all # NB: this method is overridden in nestedscope.py flags = code.co_flags - if flags & pycode.CO_OPTIMIZED: - return + if flags & pycode.CO_OPTIMIZED: + return if flags & pycode.CO_NEWLOCALS: self.w_locals = self.space.newdict(module=True) else: @@ -176,9 +176,10 @@ executioncontext.return_trace(self, self.space.w_None) raise executioncontext.return_trace(self, w_exitvalue) - # clean up the exception, might be useful for not - # allocating exception objects in some cases - self.last_exception = None + # it used to say self.last_exception = None + # this is now done by the code in pypyjit module + # since we don't want to invalidate the virtualizable + # for no good reason got_exception = False finally: executioncontext.leave(self, w_exitvalue, got_exception) @@ -260,7 +261,7 @@ break w_value = self.peekvalue(delta) self.pushvalue(w_value) - + def peekvalue(self, index_from_top=0): # NOTE: top of the stack is peekvalue(0). # Contrast this with CPython where it's PEEK(-1). @@ -330,7 +331,7 @@ nlocals = self.pycode.co_nlocals values_w = self.locals_stack_w[nlocals:self.valuestackdepth] w_valuestack = maker.slp_into_tuple_with_nulls(space, values_w) - + w_blockstack = nt([block._get_state_(space) for block in self.get_blocklist()]) w_fastlocals = maker.slp_into_tuple_with_nulls( space, self.locals_stack_w[:nlocals]) @@ -340,7 +341,7 @@ else: w_exc_value = self.last_exception.get_w_value(space) w_tb = w(self.last_exception.get_traceback()) - + tup_state = [ w(self.f_backref()), w(self.get_builtin()), @@ -355,7 +356,7 @@ w(f_lineno), w_fastlocals, space.w_None, #XXX placeholder for f_locals - + #f_restricted requires no additional data! space.w_None, ## self.w_f_trace, ignore for now @@ -389,7 +390,7 @@ ncellvars = len(pycode.co_cellvars) cellvars = cells[:ncellvars] closure = cells[ncellvars:] - + # do not use the instance's __init__ but the base's, because we set # everything like cells from here # XXX hack @@ -476,7 +477,7 @@ ### line numbers ### - def fget_f_lineno(self, space): + def fget_f_lineno(self, space): "Returns the line number of the instruction currently being executed." if self.w_f_trace is None: return space.wrap(self.get_last_lineno()) @@ -490,7 +491,7 @@ except OperationError, e: raise OperationError(space.w_ValueError, space.wrap("lineno must be an integer")) - + if self.w_f_trace is None: raise OperationError(space.w_ValueError, space.wrap("f_lineno can only be set by a trace function.")) @@ -522,7 +523,7 @@ if ord(code[new_lasti]) in (DUP_TOP, POP_TOP): raise OperationError(space.w_ValueError, space.wrap("can't jump to 'except' line as there's no exception")) - + # Don't jump into or out of a finally block. f_lasti_setup_addr = -1 new_lasti_setup_addr = -1 @@ -553,12 +554,12 @@ if addr == self.last_instr: f_lasti_setup_addr = setup_addr break - + if op >= HAVE_ARGUMENT: addr += 3 else: addr += 1 - + assert len(blockstack) == 0 if new_lasti_setup_addr != f_lasti_setup_addr: @@ -609,10 +610,10 @@ block = self.pop_block() block.cleanup(self) f_iblock -= 1 - + self.f_lineno = new_lineno self.last_instr = new_lasti - + def get_last_lineno(self): "Returns the line number of the instruction currently being executed." return pytraceback.offset2lineno(self.pycode, self.last_instr) @@ -638,8 +639,8 @@ self.f_lineno = self.get_last_lineno() space.frame_trace_action.fire() - def fdel_f_trace(self, space): - self.w_f_trace = None + def fdel_f_trace(self, space): + self.w_f_trace = None def fget_f_exc_type(self, space): if self.last_exception is not None: @@ -649,7 +650,7 @@ if f is not None: return f.last_exception.w_type return space.w_None - + def fget_f_exc_value(self, space): if self.last_exception is not None: f = self.f_backref() @@ -667,7 +668,7 @@ if f is not None: return space.wrap(f.last_exception.get_traceback()) return space.w_None - + def fget_f_restricted(self, space): if space.config.objspace.honor__builtins__: return space.wrap(self.builtin is not space.builtin) diff --git a/pypy/module/pypyjit/interp_jit.py b/pypy/module/pypyjit/interp_jit.py --- a/pypy/module/pypyjit/interp_jit.py +++ b/pypy/module/pypyjit/interp_jit.py @@ -12,18 +12,18 @@ from pypy.interpreter.error import OperationError, operationerrfmt from pypy.interpreter.pycode import PyCode, CO_GENERATOR from pypy.interpreter.pyframe import PyFrame -from pypy.interpreter.pyopcode import ExitFrame +from pypy.interpreter.pyopcode import ExitFrame, Yield from opcode import opmap -PyFrame._virtualizable2_ = ['last_instr', 'pycode', - 'valuestackdepth', 'locals_stack_w[*]', - 'cells[*]', - 'last_exception', - 'lastblock', - 'is_being_profiled', - 'w_globals', - 'w_f_trace', - ] +PyFrame._virtualizable_ = ['last_instr', 'pycode', + 'valuestackdepth', 'locals_stack_w[*]', + 'cells[*]', + 'last_exception', + 'lastblock', + 'is_being_profiled', + 'w_globals', + 'w_f_trace', + ] JUMP_ABSOLUTE = opmap['JUMP_ABSOLUTE'] @@ -73,7 +73,13 @@ self.valuestackdepth = hint(self.valuestackdepth, promote=True) next_instr = self.handle_bytecode(co_code, next_instr, ec) is_being_profiled = self.is_being_profiled + except Yield: + self.last_exception = None + w_result = self.popvalue() + jit.hint(self, force_virtualizable=True) + return w_result except ExitFrame: + self.last_exception = None return self.popvalue() def jump_absolute(self, jumpto, ec): diff --git a/pypy/module/pypyjit/test_pypy_c/test_containers.py b/pypy/module/pypyjit/test_pypy_c/test_containers.py --- a/pypy/module/pypyjit/test_pypy_c/test_containers.py +++ b/pypy/module/pypyjit/test_pypy_c/test_containers.py @@ -87,7 +87,7 @@ i8 = int_lt(i5, i7) guard_true(i8, descr=...) guard_not_invalidated(descr=...) - p10 = call(ConstClass(ll_int_str), i5, descr=) + p10 = call(ConstClass(ll_str__IntegerR_SignedConst_Signed), i5, descr=) guard_no_exception(descr=...) i12 = call(ConstClass(ll_strhash), p10, descr=) p13 = new(descr=...) diff --git a/pypy/module/pypyjit/test_pypy_c/test_generators.py b/pypy/module/pypyjit/test_pypy_c/test_generators.py --- a/pypy/module/pypyjit/test_pypy_c/test_generators.py +++ b/pypy/module/pypyjit/test_pypy_c/test_generators.py @@ -19,6 +19,7 @@ log = self.run(main, [500]) loop, = log.loops_by_filename(self.filepath) assert loop.match_by_id("generator", """ + cond_call(..., descr=...) i16 = force_token() p45 = new_with_vtable(ConstClass(W_IntObject)) setfield_gc(p45, i29, descr=) diff --git a/pypy/tool/pypyjit_child.py b/pypy/tool/pypyjit_child.py --- a/pypy/tool/pypyjit_child.py +++ b/pypy/tool/pypyjit_child.py @@ -14,9 +14,9 @@ return lltype.nullptr(T) interp.heap.malloc_nonmovable = returns_null # XXX - from rpython.jit.backend.llgraph.runner import LLtypeCPU + from rpython.jit.backend.llgraph.runner import LLGraphCPU #LLtypeCPU.supports_floats = False # for now - apply_jit(interp, graph, LLtypeCPU) + apply_jit(interp, graph, LLGraphCPU) def apply_jit(interp, graph, CPUClass): diff --git a/rpython/annotator/test/test_annrpython.py b/rpython/annotator/test/test_annrpython.py --- a/rpython/annotator/test/test_annrpython.py +++ b/rpython/annotator/test/test_annrpython.py @@ -3088,7 +3088,7 @@ from rpython.rlib.jit import hint class A: - _virtualizable2_ = [] + _virtualizable_ = [] class B(A): def meth(self): return self @@ -3128,7 +3128,7 @@ from rpython.rlib.jit import hint class A: - _virtualizable2_ = [] + _virtualizable_ = [] class I: pass diff --git a/rpython/jit/backend/arm/opassembler.py b/rpython/jit/backend/arm/opassembler.py --- a/rpython/jit/backend/arm/opassembler.py +++ b/rpython/jit/backend/arm/opassembler.py @@ -959,16 +959,6 @@ pmc.B_offs(self.mc.currpos(), c.EQ) return pos - def _call_assembler_reset_vtoken(self, jd, vloc): - from rpython.jit.backend.llsupport.descr import FieldDescr - fielddescr = jd.vable_token_descr - assert isinstance(fielddescr, FieldDescr) - ofs = fielddescr.offset - tmploc = self._regalloc.get_scratch_reg(INT) - self.mov_loc_loc(vloc, r.ip) - self.mc.MOV_ri(tmploc.value, 0) - self.mc.STR_ri(tmploc.value, r.ip.value, ofs) - def _call_assembler_load_result(self, op, result_loc): if op.result is not None: # load the return value from (tmploc, 0) diff --git a/rpython/jit/backend/llgraph/runner.py b/rpython/jit/backend/llgraph/runner.py --- a/rpython/jit/backend/llgraph/runner.py +++ b/rpython/jit/backend/llgraph/runner.py @@ -101,6 +101,9 @@ self.fieldname = fieldname self.FIELD = getattr(S, fieldname) + def get_vinfo(self): + return self.vinfo + def __repr__(self): return 'FieldDescr(%r, %r)' % (self.S, self.fieldname) @@ -170,7 +173,7 @@ translate_support_code = False is_llgraph = True - def __init__(self, rtyper, stats=None, *ignored_args, **ignored_kwds): + def __init__(self, rtyper, stats=None, *ignored_args, **kwds): model.AbstractCPU.__init__(self) self.rtyper = rtyper self.llinterp = LLInterpreter(rtyper) @@ -178,6 +181,7 @@ class MiniStats: pass self.stats = stats or MiniStats() + self.vinfo_for_tests = kwds.get('vinfo_for_tests', None) def compile_loop(self, inputargs, operations, looptoken, log=True, name=''): clt = model.CompiledLoopToken(self, looptoken.number) @@ -316,6 +320,8 @@ except KeyError: descr = FieldDescr(S, fieldname) self.descrs[key] = descr + if self.vinfo_for_tests is not None: + descr.vinfo = self.vinfo_for_tests return descr def arraydescrof(self, A): @@ -600,6 +606,7 @@ forced_deadframe = None overflow_flag = False last_exception = None + force_guard_op = None def __init__(self, cpu, argboxes, args): self.env = {} @@ -766,6 +773,8 @@ if self.forced_deadframe is not None: saved_data = self.forced_deadframe._saved_data self.fail_guard(descr, saved_data) + self.force_guard_op = self.current_op + execute_guard_not_forced_2 = execute_guard_not_forced def execute_guard_not_invalidated(self, descr): if self.lltrace.invalid: @@ -887,7 +896,6 @@ # res = CALL assembler_call_helper(pframe) # jmp @done # @fastpath: - # RESET_VABLE # res = GETFIELD(pframe, 'result') # @done: # @@ -907,25 +915,17 @@ vable = lltype.nullptr(llmemory.GCREF.TO) # # Emulate the fast path - def reset_vable(jd, vable): - if jd.index_of_virtualizable != -1: - fielddescr = jd.vable_token_descr - NULL = lltype.nullptr(llmemory.GCREF.TO) - self.cpu.bh_setfield_gc(vable, NULL, fielddescr) + # faildescr = self.cpu.get_latest_descr(pframe) if faildescr == self.cpu.done_with_this_frame_descr_int: - reset_vable(jd, vable) return self.cpu.get_int_value(pframe, 0) elif faildescr == self.cpu.done_with_this_frame_descr_ref: - reset_vable(jd, vable) return self.cpu.get_ref_value(pframe, 0) elif faildescr == self.cpu.done_with_this_frame_descr_float: - reset_vable(jd, vable) return self.cpu.get_float_value(pframe, 0) elif faildescr == self.cpu.done_with_this_frame_descr_void: - reset_vable(jd, vable) return None - # + assembler_helper_ptr = jd.assembler_helper_adr.ptr # fish try: result = assembler_helper_ptr(pframe, vable) diff --git a/rpython/jit/backend/llsupport/assembler.py b/rpython/jit/backend/llsupport/assembler.py --- a/rpython/jit/backend/llsupport/assembler.py +++ b/rpython/jit/backend/llsupport/assembler.py @@ -2,7 +2,7 @@ from rpython.jit.backend.llsupport.memcpy import memcpy_fn from rpython.jit.backend.llsupport.symbolic import WORD from rpython.jit.metainterp.history import (INT, REF, FLOAT, JitCellToken, - ConstInt, BoxInt) + ConstInt, BoxInt, AbstractFailDescr) from rpython.jit.metainterp.resoperation import ResOperation, rop from rpython.rlib import rgc from rpython.rlib.debug import (debug_start, debug_stop, have_debug_prints, @@ -24,6 +24,7 @@ class GuardToken(object): def __init__(self, cpu, gcmap, faildescr, failargs, fail_locs, exc, frame_depth, is_guard_not_invalidated, is_guard_not_forced): + assert isinstance(faildescr, AbstractFailDescr) self.cpu = cpu self.faildescr = faildescr self.failargs = failargs @@ -232,11 +233,8 @@ jmp_location = self._call_assembler_patch_je(result_loc, je_location) - # Path B: fast path. Must load the return value, and reset the token + # Path B: fast path. Must load the return value - # Reset the vable token --- XXX really too much special logic here:-( - if jd.index_of_virtualizable >= 0: - self._call_assembler_reset_vtoken(jd, vloc) # self._call_assembler_load_result(op, result_loc) # diff --git a/rpython/jit/backend/llsupport/descr.py b/rpython/jit/backend/llsupport/descr.py --- a/rpython/jit/backend/llsupport/descr.py +++ b/rpython/jit/backend/llsupport/descr.py @@ -76,7 +76,13 @@ FLAG_STRUCT = 'X' FLAG_VOID = 'V' -class FieldDescr(AbstractDescr): +class ArrayOrFieldDescr(AbstractDescr): + vinfo = None + + def get_vinfo(self): + return self.vinfo + +class FieldDescr(ArrayOrFieldDescr): name = '' offset = 0 # help translation field_size = 0 @@ -150,12 +156,13 @@ # ____________________________________________________________ # ArrayDescrs -class ArrayDescr(AbstractDescr): +class ArrayDescr(ArrayOrFieldDescr): tid = 0 basesize = 0 # workaround for the annotator itemsize = 0 lendescr = None flag = '\x00' + vinfo = None def __init__(self, basesize, itemsize, lendescr, flag): self.basesize = basesize diff --git a/rpython/jit/backend/llsupport/test/ztranslation_test.py b/rpython/jit/backend/llsupport/test/ztranslation_test.py --- a/rpython/jit/backend/llsupport/test/ztranslation_test.py +++ b/rpython/jit/backend/llsupport/test/ztranslation_test.py @@ -23,7 +23,7 @@ # - floats neg and abs class Frame(object): - _virtualizable2_ = ['i'] + _virtualizable_ = ['i'] def __init__(self, i): self.i = i @@ -98,7 +98,7 @@ self.val = val class Frame(object): - _virtualizable2_ = ['thing'] + _virtualizable_ = ['thing'] driver = JitDriver(greens = ['codeno'], reds = ['i', 'frame'], virtualizables = ['frame'], diff --git a/rpython/jit/backend/model.py b/rpython/jit/backend/model.py --- a/rpython/jit/backend/model.py +++ b/rpython/jit/backend/model.py @@ -6,7 +6,7 @@ total_compiled_loops = 0 total_compiled_bridges = 0 total_freed_loops = 0 - total_freed_bridges = 0 + total_freed_bridges = 0 # for heaptracker # _all_size_descrs_with_vtable = None @@ -182,7 +182,7 @@ def arraydescrof(self, A): raise NotImplementedError - def calldescrof(self, FUNC, ARGS, RESULT): + def calldescrof(self, FUNC, ARGS, RESULT, extrainfo): # FUNC is the original function type, but ARGS is a list of types # with Voids removed raise NotImplementedError @@ -294,7 +294,6 @@ def bh_copyunicodecontent(self, src, dst, srcstart, dststart, length): raise NotImplementedError - class CompiledLoopToken(object): asmmemmgr_blocks = None asmmemmgr_gcroots = 0 diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -79,6 +79,7 @@ allblocks) self.target_tokens_currently_compiling = {} self.frame_depth_to_patch = [] + self._finish_gcmap = lltype.nullptr(jitframe.GCMAP) def teardown(self): self.pending_guard_tokens = None @@ -1846,7 +1847,11 @@ self.mov(fail_descr_loc, RawEbpLoc(ofs)) arglist = op.getarglist() if arglist and arglist[0].type == REF: - gcmap = self.gcmap_for_finish + if self._finish_gcmap: + self._finish_gcmap[0] |= r_uint(1) # rax + gcmap = self._finish_gcmap + else: + gcmap = self.gcmap_for_finish self.push_gcmap(self.mc, gcmap, store=True) else: # note that the 0 here is redundant, but I would rather @@ -1991,15 +1996,6 @@ # return jmp_location - def _call_assembler_reset_vtoken(self, jd, vloc): - from rpython.jit.backend.llsupport.descr import FieldDescr - fielddescr = jd.vable_token_descr - assert isinstance(fielddescr, FieldDescr) - vtoken_ofs = fielddescr.offset - self.mc.MOV(edx, vloc) # we know vloc is on the current frame - self.mc.MOV_mi((edx.value, vtoken_ofs), 0) - # in the line above, TOKEN_NONE = 0 - def _call_assembler_load_result(self, op, result_loc): if op.result is not None: # load the return value from the dead frame's value index 0 @@ -2326,6 +2322,15 @@ assert 0 < offset <= 127 self.mc.overwrite(jmp_location - 1, chr(offset)) + def store_force_descr(self, op, fail_locs, frame_depth): + guard_token = self.implement_guard_recovery(op.opnum, + op.getdescr(), + op.getfailargs(), + fail_locs, frame_depth) + self._finish_gcmap = guard_token.gcmap + self._store_force_index(op) + self.store_info_on_descr(0, guard_token) + def force_token(self, reg): # XXX kill me assert isinstance(reg, RegLoc) diff --git a/rpython/jit/backend/x86/regalloc.py b/rpython/jit/backend/x86/regalloc.py --- a/rpython/jit/backend/x86/regalloc.py +++ b/rpython/jit/backend/x86/regalloc.py @@ -22,7 +22,7 @@ from rpython.jit.codewriter.effectinfo import EffectInfo from rpython.jit.metainterp.history import (Box, Const, ConstInt, ConstPtr, ConstFloat, BoxInt, BoxFloat, INT, REF, FLOAT, TargetToken) -from rpython.jit.metainterp.resoperation import rop +from rpython.jit.metainterp.resoperation import rop, ResOperation from rpython.rlib import rgc from rpython.rlib.objectmodel import we_are_translated from rpython.rlib.rarithmetic import r_longlong, r_uint @@ -1332,6 +1332,13 @@ #if jump_op is not None and jump_op.getdescr() is descr: # self._compute_hint_frame_locations_from_descr(descr) + def consider_guard_not_forced_2(self, op): + self.rm.before_call(op.getfailargs(), save_all_regs=True) + fail_locs = [self.loc(v) for v in op.getfailargs()] + self.assembler.store_force_descr(op, fail_locs, + self.fm.get_frame_depth()) + self.possibly_free_vars(op.getfailargs()) + def consider_keepalive(self, op): pass diff --git a/rpython/jit/codewriter/effectinfo.py b/rpython/jit/codewriter/effectinfo.py --- a/rpython/jit/codewriter/effectinfo.py +++ b/rpython/jit/codewriter/effectinfo.py @@ -83,6 +83,7 @@ OS_UNI_COPY_TO_RAW = 113 OS_JIT_FORCE_VIRTUAL = 120 + OS_JIT_FORCE_VIRTUALIZABLE = 121 # for debugging: _OS_CANRAISE = set([ @@ -165,6 +166,9 @@ EffectInfo.MOST_GENERAL = EffectInfo(None, None, None, None, EffectInfo.EF_RANDOM_EFFECTS, can_invalidate=True) +EffectInfo.LEAST_GENERAL = EffectInfo([], [], [], [], + EffectInfo.EF_ELIDABLE_CANNOT_RAISE, + can_invalidate=False) def effectinfo_from_writeanalyze(effects, cpu, diff --git a/rpython/jit/codewriter/jtransform.py b/rpython/jit/codewriter/jtransform.py --- a/rpython/jit/codewriter/jtransform.py +++ b/rpython/jit/codewriter/jtransform.py @@ -521,6 +521,8 @@ op1 = SpaceOperation('str_guard_value', [op.args[0], c, descr], op.result) return [SpaceOperation('-live-', [], None), op1, None] + if hints.get('force_virtualizable'): + return SpaceOperation('hint_force_virtualizable', [op.args[0]], None) else: log.WARNING('ignoring hint %r at %r' % (hints, self.graph)) diff --git a/rpython/jit/codewriter/test/test_policy.py b/rpython/jit/codewriter/test/test_policy.py --- a/rpython/jit/codewriter/test/test_policy.py +++ b/rpython/jit/codewriter/test/test_policy.py @@ -131,7 +131,7 @@ def test_access_directly_but_not_seen(): class X: - _virtualizable2_ = ["a"] + _virtualizable_ = ["a"] def h(x, y): w = 0 for i in range(y): diff --git a/rpython/jit/metainterp/blackhole.py b/rpython/jit/metainterp/blackhole.py --- a/rpython/jit/metainterp/blackhole.py +++ b/rpython/jit/metainterp/blackhole.py @@ -1320,6 +1320,10 @@ from rpython.jit.metainterp import quasiimmut quasiimmut.do_force_quasi_immutable(cpu, struct, mutatefielddescr) + @arguments("r") + def bhimpl_hint_force_virtualizable(r): + pass + @arguments("cpu", "d", returns="r") def bhimpl_new(cpu, descr): return cpu.bh_new(descr) diff --git a/rpython/jit/metainterp/compile.py b/rpython/jit/metainterp/compile.py --- a/rpython/jit/metainterp/compile.py +++ b/rpython/jit/metainterp/compile.py @@ -708,6 +708,8 @@ rstack._stack_criticalcode_start() try: deadframe = cpu.force(token) + # this should set descr to ResumeGuardForceDescr, if it + # was not that already faildescr = cpu.get_latest_descr(deadframe) assert isinstance(faildescr, ResumeGuardForcedDescr) faildescr.handle_async_forcing(deadframe) @@ -715,12 +717,18 @@ rstack._stack_criticalcode_stop() def handle_async_forcing(self, deadframe): - from rpython.jit.metainterp.resume import force_from_resumedata + from rpython.jit.metainterp.resume import (force_from_resumedata, + AlreadyForced) metainterp_sd = self.metainterp_sd vinfo = self.jitdriver_sd.virtualizable_info ginfo = self.jitdriver_sd.greenfield_info - all_virtuals = force_from_resumedata(metainterp_sd, self, deadframe, - vinfo, ginfo) + # there is some chance that this is already forced. In this case + # the virtualizable would have a token = NULL + try: + all_virtuals = force_from_resumedata(metainterp_sd, self, deadframe, + vinfo, ginfo) + except AlreadyForced: + return # The virtualizable data was stored on the real virtualizable above. # Handle all_virtuals: keep them for later blackholing from the # future failure of the GUARD_NOT_FORCED diff --git a/rpython/jit/metainterp/history.py b/rpython/jit/metainterp/history.py --- a/rpython/jit/metainterp/history.py +++ b/rpython/jit/metainterp/history.py @@ -151,6 +151,8 @@ descr_ptr = cpu.ts.cast_to_baseclass(descr_gcref) return cast_base_ptr_to_instance(AbstractDescr, descr_ptr) + def get_vinfo(self): + raise NotImplementedError class AbstractFailDescr(AbstractDescr): index = -1 diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py @@ -5101,6 +5101,15 @@ } self.optimize_loop(ops, expected, call_pure_results) + def test_guard_not_forced_2_virtual(self): + ops = """ + [i0] + p0 = new_array(3, descr=arraydescr) + guard_not_forced_2() [p0] + finish(p0) + """ + self.optimize_loop(ops, ops) + class TestLLtype(BaseTestOptimizeBasic, LLtypeMixin): pass diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py @@ -7086,6 +7086,19 @@ """ self.optimize_loop(ops, expected) + def test_force_virtualizable_virtual(self): + ops = """ + [i0] + p1 = new_with_vtable(ConstClass(node_vtable)) + cond_call(1, 123, p1, descr=clear_vable) + jump(i0) + """ + expected = """ + [i0] + jump(i0) + """ + self.optimize_loop(ops, expected) + def test_setgetfield_counter(self): ops = """ [p1] diff --git a/rpython/jit/metainterp/optimizeopt/test/test_util.py b/rpython/jit/metainterp/optimizeopt/test/test_util.py --- a/rpython/jit/metainterp/optimizeopt/test/test_util.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_util.py @@ -254,12 +254,19 @@ asmdescr = LoopToken() # it can be whatever, it's not a descr though from rpython.jit.metainterp.virtualref import VirtualRefInfo + class FakeWarmRunnerDesc: pass FakeWarmRunnerDesc.cpu = cpu vrefinfo = VirtualRefInfo(FakeWarmRunnerDesc) virtualtokendescr = vrefinfo.descr_virtual_token virtualforceddescr = vrefinfo.descr_forced + FUNC = lltype.FuncType([], lltype.Void) + ei = EffectInfo([], [], [], [], EffectInfo.EF_CANNOT_RAISE, + can_invalidate=False, + oopspecindex=EffectInfo.OS_JIT_FORCE_VIRTUALIZABLE) + clear_vable = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, ei) + jit_virtual_ref_vtable = vrefinfo.jit_virtual_ref_vtable jvr_vtable_adr = llmemory.cast_ptr_to_adr(jit_virtual_ref_vtable) diff --git a/rpython/jit/metainterp/optimizeopt/virtualize.py b/rpython/jit/metainterp/optimizeopt/virtualize.py --- a/rpython/jit/metainterp/optimizeopt/virtualize.py +++ b/rpython/jit/metainterp/optimizeopt/virtualize.py @@ -484,6 +484,8 @@ class OptVirtualize(optimizer.Optimization): "Virtualize objects until they escape." + _last_guard_not_forced_2 = None + def new(self): return OptVirtualize() @@ -527,6 +529,20 @@ return self.emit_operation(op) + def optimize_GUARD_NOT_FORCED_2(self, op): + self._last_guard_not_forced_2 = op + + def optimize_FINISH(self, op): + if self._last_guard_not_forced_2 is not None: + guard_op = self._last_guard_not_forced_2 + self.emit_operation(op) + guard_op = self.optimizer.store_final_boxes_in_guard(guard_op, []) + i = len(self.optimizer._newoperations) - 1 + assert i >= 0 + self.optimizer._newoperations.insert(i, guard_op) + else: + self.emit_operation(op) + def optimize_CALL_MAY_FORCE(self, op): effectinfo = op.getdescr().get_extra_info() oopspecindex = effectinfo.oopspecindex @@ -535,6 +551,15 @@ return self.emit_operation(op) + def optimize_COND_CALL(self, op): + effectinfo = op.getdescr().get_extra_info() + oopspecindex = effectinfo.oopspecindex + if oopspecindex == EffectInfo.OS_JIT_FORCE_VIRTUALIZABLE: + value = self.getvalue(op.getarg(2)) + if value.is_virtual(): + return + self.emit_operation(op) + def optimize_VIRTUAL_REF(self, op): # get some constants vrefinfo = self.optimizer.metainterp_sd.virtualref_info @@ -657,6 +682,11 @@ self.do_RAW_MALLOC_VARSIZE_CHAR(op) elif effectinfo.oopspecindex == EffectInfo.OS_RAW_FREE: self.do_RAW_FREE(op) + elif effectinfo.oopspecindex == EffectInfo.OS_JIT_FORCE_VIRTUALIZABLE: + # we might end up having CALL here instead of COND_CALL + value = self.getvalue(op.getarg(1)) + if value.is_virtual(): + return else: self.emit_operation(op) diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -18,7 +18,7 @@ from rpython.rlib.jit import Counters from rpython.rlib.objectmodel import we_are_translated, specialize from rpython.rlib.unroll import unrolling_iterable -from rpython.rtyper.lltypesystem import lltype, rclass +from rpython.rtyper.lltypesystem import lltype, rclass, rffi @@ -313,7 +313,7 @@ opnum = rop.GUARD_TRUE else: opnum = rop.GUARD_FALSE - self.generate_guard(opnum, box) + self.metainterp.generate_guard(opnum, box) if not switchcase: self.pc = target @@ -341,10 +341,12 @@ value = box.nonnull() if value: if not self.metainterp.heapcache.is_class_known(box): - self.generate_guard(rop.GUARD_NONNULL, box, resumepc=orgpc) + self.metainterp.generate_guard(rop.GUARD_NONNULL, box, + resumepc=orgpc) else: if not isinstance(box, Const): - self.generate_guard(rop.GUARD_ISNULL, box, resumepc=orgpc) + self.metainterp.generate_guard(rop.GUARD_ISNULL, box, + resumepc=orgpc) promoted_box = box.constbox() self.metainterp.replace_box(box, promoted_box) return value @@ -604,7 +606,7 @@ def _opimpl_getfield_gc_greenfield_any(self, box, fielddescr, pc): ginfo = self.metainterp.jitdriver_sd.greenfield_info if (ginfo is not None and fielddescr in ginfo.green_field_descrs - and not self._nonstandard_virtualizable(pc, box)): + and not self._nonstandard_virtualizable(pc, box, fielddescr)): # fetch the result, but consider it as a Const box and don't # record any operation resbox = executor.execute(self.metainterp.cpu, self.metainterp, @@ -672,6 +674,10 @@ opimpl_raw_load_i = _opimpl_raw_load opimpl_raw_load_f = _opimpl_raw_load + @arguments("box") + def opimpl_hint_force_virtualizable(self, box): + self.metainterp.gen_store_back_in_vable(box) + @arguments("box", "descr", "descr", "orgpc") def opimpl_record_quasiimmut_field(self, box, fielddescr, mutatefielddescr, orgpc): @@ -680,7 +686,8 @@ descr = QuasiImmutDescr(cpu, box, fielddescr, mutatefielddescr) self.metainterp.history.record(rop.QUASIIMMUT_FIELD, [box], None, descr=descr) - self.generate_guard(rop.GUARD_NOT_INVALIDATED, resumepc=orgpc) + self.metainterp.generate_guard(rop.GUARD_NOT_INVALIDATED, + resumepc=orgpc) @arguments("box", "descr", "orgpc") def opimpl_jit_force_quasi_immutable(self, box, mutatefielddescr, orgpc): @@ -699,28 +706,46 @@ do_force_quasi_immutable(self.metainterp.cpu, box.getref_base(), mutatefielddescr) raise SwitchToBlackhole(Counters.ABORT_FORCE_QUASIIMMUT) - self.generate_guard(rop.GUARD_ISNULL, mutatebox, resumepc=orgpc) + self.metainterp.generate_guard(rop.GUARD_ISNULL, mutatebox, + resumepc=orgpc) - def _nonstandard_virtualizable(self, pc, box): + def _nonstandard_virtualizable(self, pc, box, fielddescr): # returns True if 'box' is actually not the "standard" virtualizable # that is stored in metainterp.virtualizable_boxes[-1] - if (self.metainterp.jitdriver_sd.virtualizable_info is None and - self.metainterp.jitdriver_sd.greenfield_info is None): - return True # can occur in case of multiple JITs - standard_box = self.metainterp.virtualizable_boxes[-1] - if standard_box is box: - return False if self.metainterp.heapcache.is_nonstandard_virtualizable(box): return True - eqbox = self.metainterp.execute_and_record(rop.PTR_EQ, None, - box, standard_box) - eqbox = self.implement_guard_value(eqbox, pc) - isstandard = eqbox.getint() - if isstandard: - self.metainterp.replace_box(box, standard_box) - else: - self.metainterp.heapcache.nonstandard_virtualizables_now_known(box) - return not isstandard + if box is self.metainterp.forced_virtualizable: + self.metainterp.forced_virtualizable = None + if (self.metainterp.jitdriver_sd.virtualizable_info is not None or + self.metainterp.jitdriver_sd.greenfield_info is not None): + standard_box = self.metainterp.virtualizable_boxes[-1] + if standard_box is box: + return False + vinfo = self.metainterp.jitdriver_sd.virtualizable_info + if vinfo is fielddescr.get_vinfo(): + eqbox = self.metainterp.execute_and_record(rop.PTR_EQ, None, + box, standard_box) + eqbox = self.implement_guard_value(eqbox, pc) + isstandard = eqbox.getint() + if isstandard: + self.metainterp.replace_box(box, standard_box) + return False + if not self.metainterp.heapcache.is_unescaped(box): + self.emit_force_virtualizable(fielddescr, box) + self.metainterp.heapcache.nonstandard_virtualizables_now_known(box) + return True + + def emit_force_virtualizable(self, fielddescr, box): + vinfo = fielddescr.get_vinfo() + token_descr = vinfo.vable_token_descr + mi = self.metainterp + tokenbox = mi.execute_and_record(rop.GETFIELD_GC, token_descr, box) + condbox = mi.execute_and_record(rop.PTR_NE, None, tokenbox, + history.CONST_NULL) + funcbox = ConstInt(rffi.cast(lltype.Signed, vinfo.clear_vable_ptr)) + calldescr = vinfo.clear_vable_descr + self.execute_varargs(rop.COND_CALL, [condbox, funcbox, box], + calldescr, False, False) def _get_virtualizable_field_index(self, fielddescr): # Get the index of a fielddescr. Must only be called for @@ -730,7 +755,7 @@ @arguments("box", "descr", "orgpc") def _opimpl_getfield_vable(self, box, fielddescr, pc): - if self._nonstandard_virtualizable(pc, box): + if self._nonstandard_virtualizable(pc, box, fielddescr): return self._opimpl_getfield_gc_any(box, fielddescr) self.metainterp.check_synchronized_virtualizable() index = self._get_virtualizable_field_index(fielddescr) @@ -742,7 +767,7 @@ @arguments("box", "box", "descr", "orgpc") def _opimpl_setfield_vable(self, box, valuebox, fielddescr, pc): - if self._nonstandard_virtualizable(pc, box): + if self._nonstandard_virtualizable(pc, box, fielddescr): return self._opimpl_setfield_gc_any(box, valuebox, fielddescr) index = self._get_virtualizable_field_index(fielddescr) self.metainterp.virtualizable_boxes[index] = valuebox @@ -772,7 +797,7 @@ @arguments("box", "box", "descr", "descr", "orgpc") def _opimpl_getarrayitem_vable(self, box, indexbox, fdescr, adescr, pc): - if self._nonstandard_virtualizable(pc, box): + if self._nonstandard_virtualizable(pc, box, fdescr): arraybox = self._opimpl_getfield_gc_any(box, fdescr) return self._opimpl_getarrayitem_gc_any(arraybox, indexbox, adescr) self.metainterp.check_synchronized_virtualizable() @@ -786,7 +811,7 @@ @arguments("box", "box", "box", "descr", "descr", "orgpc") def _opimpl_setarrayitem_vable(self, box, indexbox, valuebox, fdescr, adescr, pc): - if self._nonstandard_virtualizable(pc, box): + if self._nonstandard_virtualizable(pc, box, fdescr): arraybox = self._opimpl_getfield_gc_any(box, fdescr) self._opimpl_setarrayitem_gc_any(arraybox, indexbox, valuebox, adescr) @@ -802,7 +827,7 @@ @arguments("box", "descr", "descr", "orgpc") def opimpl_arraylen_vable(self, box, fdescr, adescr, pc): - if self._nonstandard_virtualizable(pc, box): + if self._nonstandard_virtualizable(pc, box, fdescr): arraybox = self._opimpl_getfield_gc_any(box, fdescr) return self.opimpl_arraylen_gc(arraybox, adescr) vinfo = self.metainterp.jitdriver_sd.virtualizable_info @@ -958,8 +983,9 @@ promoted_box = resbox.constbox() # This is GUARD_VALUE because GUARD_TRUE assumes the existance # of a label when computing resumepc - self.generate_guard(rop.GUARD_VALUE, resbox, [promoted_box], - resumepc=orgpc) + self.metainterp.generate_guard(rop.GUARD_VALUE, resbox, + [promoted_box], + resumepc=orgpc) self.metainterp.replace_box(box, constbox) return constbox @@ -971,7 +997,8 @@ def opimpl_guard_class(self, box, orgpc): clsbox = self.cls_of_box(box) if not self.metainterp.heapcache.is_class_known(box): - self.generate_guard(rop.GUARD_CLASS, box, [clsbox], resumepc=orgpc) + self.metainterp.generate_guard(rop.GUARD_CLASS, box, [clsbox], + resumepc=orgpc) self.metainterp.heapcache.class_now_known(box) return clsbox @@ -989,7 +1016,7 @@ def opimpl_jit_merge_point(self, jdindex, greenboxes, jcposition, redboxes, orgpc): resumedescr = compile.ResumeAtPositionDescr() - self.capture_resumedata(resumedescr, orgpc) + self.metainterp.capture_resumedata(resumedescr, orgpc) any_operation = len(self.metainterp.history.operations) > 0 jitdriver_sd = self.metainterp.staticdata.jitdrivers_sd[jdindex] @@ -1071,8 +1098,8 @@ # xxx hack if not self.metainterp.heapcache.is_class_known(exc_value_box): clsbox = self.cls_of_box(exc_value_box) - self.generate_guard(rop.GUARD_CLASS, exc_value_box, [clsbox], - resumepc=orgpc) + self.metainterp.generate_guard(rop.GUARD_CLASS, exc_value_box, + [clsbox], resumepc=orgpc) self.metainterp.class_of_last_exc_is_const = True self.metainterp.last_exc_value_box = exc_value_box self.metainterp.popframe() @@ -1271,43 +1298,6 @@ except ChangeFrame: pass - def generate_guard(self, opnum, box=None, extraargs=[], resumepc=-1): - if isinstance(box, Const): # no need for a guard - return - metainterp = self.metainterp - if box is not None: - moreargs = [box] + extraargs - else: - moreargs = list(extraargs) - metainterp_sd = metainterp.staticdata - if opnum == rop.GUARD_NOT_FORCED: - resumedescr = compile.ResumeGuardForcedDescr(metainterp_sd, - metainterp.jitdriver_sd) - elif opnum == rop.GUARD_NOT_INVALIDATED: - resumedescr = compile.ResumeGuardNotInvalidated() - else: - resumedescr = compile.ResumeGuardDescr() - guard_op = metainterp.history.record(opnum, moreargs, None, - descr=resumedescr) - self.capture_resumedata(resumedescr, resumepc) - self.metainterp.staticdata.profiler.count_ops(opnum, Counters.GUARDS) - # count - metainterp.attach_debug_info(guard_op) - return guard_op - - def capture_resumedata(self, resumedescr, resumepc=-1): - metainterp = self.metainterp - virtualizable_boxes = None - if (metainterp.jitdriver_sd.virtualizable_info is not None or - metainterp.jitdriver_sd.greenfield_info is not None): - virtualizable_boxes = metainterp.virtualizable_boxes - saved_pc = self.pc - if resumepc >= 0: - self.pc = resumepc - resume.capture_resumedata(metainterp.framestack, virtualizable_boxes, - metainterp.virtualref_boxes, resumedescr) - self.pc = saved_pc - def implement_guard_value(self, box, orgpc): """Promote the given Box into a Const. Note: be careful, it's a bit unclear what occurs if a single opcode needs to generate @@ -1316,8 +1306,8 @@ return box # no promotion needed, already a Const else: promoted_box = box.constbox() - self.generate_guard(rop.GUARD_VALUE, box, [promoted_box], - resumepc=orgpc) + self.metainterp.generate_guard(rop.GUARD_VALUE, box, [promoted_box], + resumepc=orgpc) self.metainterp.replace_box(box, promoted_box) return promoted_box @@ -1411,7 +1401,7 @@ if resbox is not None: self.make_result_of_lastop(resbox) self.metainterp.vable_after_residual_call() - self.generate_guard(rop.GUARD_NOT_FORCED, None) + self.metainterp.generate_guard(rop.GUARD_NOT_FORCED, None) if vablebox is not None: self.metainterp.history.record(rop.KEEPALIVE, [vablebox], None) self.metainterp.handle_possible_exception() @@ -1660,6 +1650,7 @@ self.portal_trace_positions = [] self.free_frames_list = [] self.last_exc_value_box = None + self.forced_virtualizable = None self.partial_trace = None self.retracing_from = -1 self.call_pure_results = args_dict_box() @@ -1783,6 +1774,45 @@ print jitcode.name raise AssertionError + def generate_guard(self, opnum, box=None, extraargs=[], resumepc=-1): + if isinstance(box, Const): # no need for a guard + return + if box is not None: + moreargs = [box] + extraargs + else: + moreargs = list(extraargs) + metainterp_sd = self.staticdata + if opnum == rop.GUARD_NOT_FORCED or opnum == rop.GUARD_NOT_FORCED_2: + resumedescr = compile.ResumeGuardForcedDescr(metainterp_sd, + self.jitdriver_sd) + elif opnum == rop.GUARD_NOT_INVALIDATED: + resumedescr = compile.ResumeGuardNotInvalidated() + else: + resumedescr = compile.ResumeGuardDescr() + guard_op = self.history.record(opnum, moreargs, None, + descr=resumedescr) + self.capture_resumedata(resumedescr, resumepc) + self.staticdata.profiler.count_ops(opnum, Counters.GUARDS) + # count + self.attach_debug_info(guard_op) + return guard_op + + def capture_resumedata(self, resumedescr, resumepc=-1): + virtualizable_boxes = None + if (self.jitdriver_sd.virtualizable_info is not None or + self.jitdriver_sd.greenfield_info is not None): + virtualizable_boxes = self.virtualizable_boxes + saved_pc = 0 + if self.framestack: + frame = self.framestack[-1] + saved_pc = frame.pc + if resumepc >= 0: + frame.pc = resumepc + resume.capture_resumedata(self.framestack, virtualizable_boxes, + self.virtualref_boxes, resumedescr) + if self.framestack: + self.framestack[-1].pc = saved_pc + def create_empty_history(self): self.history = history.History() self.staticdata.stats.set_history(self.history) @@ -2253,8 +2283,8 @@ self.raise_continue_running_normally(live_arg_boxes, jitcell_token) def compile_done_with_this_frame(self, exitbox): - self.gen_store_back_in_virtualizable() # temporarily put a JUMP to a pseudo-loop + self.store_token_in_vable() sd = self.staticdata result_type = self.jitdriver_sd.result_type if result_type == history.VOID: @@ -2280,8 +2310,24 @@ if target_token is not token: compile.giveup() + def store_token_in_vable(self): + vinfo = self.jitdriver_sd.virtualizable_info + if vinfo is None: + return + vbox = self.virtualizable_boxes[-1] + if vbox is self.forced_virtualizable: + return # we already forced it by hand + force_token_box = history.BoxPtr() + # in case the force_token has not been recorded, record it here + # to make sure we know the virtualizable can be broken. However, the + # contents of the virtualizable should be generally correct + self.history.record(rop.FORCE_TOKEN, [], force_token_box) + self.history.record(rop.SETFIELD_GC, [vbox, force_token_box], + None, descr=vinfo.vable_token_descr) + self.generate_guard(rop.GUARD_NOT_FORCED_2, None) + def compile_exit_frame_with_exception(self, valuebox): - self.gen_store_back_in_virtualizable() + self.store_token_in_vable() sd = self.staticdata token = sd.loop_tokens_exit_frame_with_exception_ref[0].finishdescr self.history.record(rop.FINISH, [valuebox], None, descr=token) @@ -2420,27 +2466,25 @@ self.virtualref_boxes[i+1] = self.cpu.ts.CONST_NULL def handle_possible_exception(self): - frame = self.framestack[-1] if self.last_exc_value_box is not None: exception_box = self.cpu.ts.cls_of_box(self.last_exc_value_box) - op = frame.generate_guard(rop.GUARD_EXCEPTION, - None, [exception_box]) + op = self.generate_guard(rop.GUARD_EXCEPTION, + None, [exception_box]) assert op is not None op.result = self.last_exc_value_box self.class_of_last_exc_is_const = True self.finishframe_exception() else: - frame.generate_guard(rop.GUARD_NO_EXCEPTION, None, []) + self.generate_guard(rop.GUARD_NO_EXCEPTION, None, []) def handle_possible_overflow_error(self): - frame = self.framestack[-1] if self.last_exc_value_box is not None: - frame.generate_guard(rop.GUARD_OVERFLOW, None) + self.generate_guard(rop.GUARD_OVERFLOW, None) assert isinstance(self.last_exc_value_box, Const) assert self.class_of_last_exc_is_const self.finishframe_exception() else: - frame.generate_guard(rop.GUARD_NO_OVERFLOW, None) + self.generate_guard(rop.GUARD_NO_OVERFLOW, None) def assert_no_exception(self): assert self.last_exc_value_box is None @@ -2467,12 +2511,13 @@ if vinfo is not None: self.virtualizable_boxes = virtualizable_boxes # just jumped away from assembler (case 4 in the comment in - # virtualizable.py) into tracing (case 2); check that vable_token - # is and stays NULL. Note the call to reset_vable_token() in - # warmstate.py. + # virtualizable.py) into tracing (case 2); if we get the + # virtualizable from somewhere strange it might not be forced, + # do it virtualizable_box = self.virtualizable_boxes[-1] virtualizable = vinfo.unwrap_virtualizable_box(virtualizable_box) - assert not vinfo.is_token_nonnull_gcref(virtualizable) + if vinfo.is_token_nonnull_gcref(virtualizable): + vinfo.reset_token_gcref(virtualizable) # fill the virtualizable with the local boxes self.synchronize_virtualizable() # @@ -2508,11 +2553,20 @@ virtualizable) self.virtualizable_boxes.append(virtualizable_box) - def gen_store_back_in_virtualizable(self): + def gen_store_back_in_vable(self, box): vinfo = self.jitdriver_sd.virtualizable_info if vinfo is not None: # xxx only write back the fields really modified vbox = self.virtualizable_boxes[-1] + if vbox is not box: + # ignore the hint on non-standard virtualizable + # specifically, ignore it on a virtual + return + if self.forced_virtualizable is not None: + # this can happen only in strange cases, but we don't care + # it was already forced + return + self.forced_virtualizable = vbox for i in range(vinfo.num_static_extra_boxes): fieldbox = self.virtualizable_boxes[i] descr = vinfo.static_field_descrs[i] @@ -2529,6 +2583,9 @@ self.execute_and_record(rop.SETARRAYITEM_GC, descr, abox, ConstInt(j), itembox) assert i + 1 == len(self.virtualizable_boxes) + # we're during tracing, so we should not execute it + self.history.record(rop.SETFIELD_GC, [vbox, self.cpu.ts.CONST_NULL], + None, descr=vinfo.vable_token_descr) def replace_box(self, oldbox, newbox): assert isinstance(oldbox, Box) diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py --- a/rpython/jit/metainterp/resoperation.py +++ b/rpython/jit/metainterp/resoperation.py @@ -397,6 +397,7 @@ 'GUARD_NO_OVERFLOW/0d', 'GUARD_OVERFLOW/0d', 'GUARD_NOT_FORCED/0d', # may be called with an exception currently set + 'GUARD_NOT_FORCED_2/0d', # same as GUARD_NOT_FORCED, but for finish() 'GUARD_NOT_INVALIDATED/0d', '_GUARD_LAST', # ----- end of guard operations ----- @@ -488,6 +489,8 @@ 'VIRTUAL_REF/2', # removed before it's passed to the backend 'READ_TIMESTAMP/0', 'MARK_OPAQUE_PTR/1b', + # this one has no *visible* side effect, since the virtualizable + # must be forced, however we need to execute it anyway '_NOSIDEEFFECT_LAST', # ----- end of no_side_effect operations ----- 'SETARRAYITEM_GC/3d', diff --git a/rpython/jit/metainterp/resume.py b/rpython/jit/metainterp/resume.py --- a/rpython/jit/metainterp/resume.py +++ b/rpython/jit/metainterp/resume.py @@ -17,6 +17,9 @@ # because it needs to support optimize.py which encodes virtuals with # arbitrary cycles and also to compress the information +class AlreadyForced(Exception): + pass + class Snapshot(object): __slots__ = ('prev', 'boxes') @@ -51,20 +54,24 @@ def capture_resumedata(framestack, virtualizable_boxes, virtualref_boxes, storage): - n = len(framestack)-1 - top = framestack[n] - _ensure_parent_resumedata(framestack, n) - frame_info_list = FrameInfo(top.parent_resumedata_frame_info_list, - top.jitcode, top.pc) - storage.rd_frame_info_list = frame_info_list - snapshot = Snapshot(top.parent_resumedata_snapshot, - top.get_list_of_active_boxes(False)) + n = len(framestack) - 1 if virtualizable_boxes is not None: boxes = virtualref_boxes + virtualizable_boxes else: boxes = virtualref_boxes[:] - snapshot = Snapshot(snapshot, boxes) - storage.rd_snapshot = snapshot + if n >= 0: + top = framestack[n] + _ensure_parent_resumedata(framestack, n) + frame_info_list = FrameInfo(top.parent_resumedata_frame_info_list, + top.jitcode, top.pc) + storage.rd_frame_info_list = frame_info_list + snapshot = Snapshot(top.parent_resumedata_snapshot, + top.get_list_of_active_boxes(False)) + snapshot = Snapshot(snapshot, boxes) + storage.rd_snapshot = snapshot + else: + storage.rd_frame_info_list = None + storage.rd_snapshot = Snapshot(None, boxes) # # The following is equivalent to the RPython-level declaration: @@ -1214,16 +1221,8 @@ return len(numb.nums) index = len(numb.nums) - 1 virtualizable = self.decode_ref(numb.nums[index]) - if self.resume_after_guard_not_forced == 1: - # in the middle of handle_async_forcing() - assert vinfo.is_token_nonnull_gcref(virtualizable) - vinfo.reset_token_gcref(virtualizable) - else: - # just jumped away from assembler (case 4 in the comment in - # virtualizable.py) into tracing (case 2); check that vable_token - # is and stays NULL. Note the call to reset_vable_token() in - # warmstate.py. - assert not vinfo.is_token_nonnull_gcref(virtualizable) + # just reset the token, we'll force it later + vinfo.reset_token_gcref(virtualizable) return vinfo.write_from_resume_data_partial(virtualizable, self, numb) def load_value_of_type(self, TYPE, tagged): diff --git a/rpython/jit/metainterp/test/test_recursive.py b/rpython/jit/metainterp/test/test_recursive.py --- a/rpython/jit/metainterp/test/test_recursive.py +++ b/rpython/jit/metainterp/test/test_recursive.py @@ -412,7 +412,6 @@ self.i8 = i8 self.i9 = i9 - def loop(n): i = 0 o = A(0, 1, 2, 3, 4, 5, 6, 7, 8, 9) @@ -445,7 +444,6 @@ self.i8 = i8 self.i9 = i9 - def loop(n): i = 0 o = A(0, 1, 2, 3, 4, 5, 6, 7, 8, 9) @@ -643,7 +641,7 @@ # exactly the same logic as the previous test, but with 'frame.j' # instead of just 'j' class Frame(object): - _virtualizable2_ = ['j'] + _virtualizable_ = ['j'] def __init__(self, j): self.j = j @@ -767,9 +765,9 @@ self.val = val class Frame(object): - _virtualizable2_ = ['thing'] + _virtualizable_ = ['thing'] - driver = JitDriver(greens = ['codeno'], reds = ['i', 'frame'], + driver = JitDriver(greens = ['codeno'], reds = ['i', 's', 'frame'], virtualizables = ['frame'], get_printable_location = lambda codeno : str(codeno)) @@ -781,22 +779,26 @@ def portal(codeno, frame): i = 0 + s = 0 while i < 10: - driver.can_enter_jit(frame=frame, codeno=codeno, i=i) - driver.jit_merge_point(frame=frame, codeno=codeno, i=i) + driver.can_enter_jit(frame=frame, codeno=codeno, i=i, s=s) + driver.jit_merge_point(frame=frame, codeno=codeno, i=i, s=s) nextval = frame.thing.val if codeno == 0: subframe = Frame() subframe.thing = Thing(nextval) nextval = portal(1, subframe) + s += subframe.thing.val frame.thing = Thing(nextval + 1) i += 1 return frame.thing.val res = self.meta_interp(main, [0], inline=True) + self.check_resops(call=0, cond_call=0) # got removed by optimization assert res == main(0) def test_directly_call_assembler_virtualizable_reset_token(self): + py.test.skip("not applicable any more, I think") from rpython.rtyper.lltypesystem import lltype from rpython.rlib.debug import llinterpcall @@ -805,7 +807,7 @@ self.val = val class Frame(object): - _virtualizable2_ = ['thing'] + _virtualizable_ = ['thing'] driver = JitDriver(greens = ['codeno'], reds = ['i', 'frame'], virtualizables = ['frame'], @@ -856,7 +858,7 @@ self.val = val class Frame(object): - _virtualizable2_ = ['thing'] + _virtualizable_ = ['thing'] driver = JitDriver(greens = ['codeno'], reds = ['i', 'frame'], virtualizables = ['frame'], @@ -907,7 +909,7 @@ virtualizables = ['frame']) class Frame(object): - _virtualizable2_ = ['l[*]', 's'] + _virtualizable_ = ['l[*]', 's'] def __init__(self, l, s): self = hint(self, access_directly=True, @@ -950,7 +952,7 @@ self.val = val class Frame(object): - _virtualizable2_ = ['thing'] + _virtualizable_ = ['thing'] driver = JitDriver(greens = ['codeno'], reds = ['i', 'frame'], virtualizables = ['frame'], diff --git a/rpython/jit/metainterp/test/test_tracingopts.py b/rpython/jit/metainterp/test/test_tracingopts.py --- a/rpython/jit/metainterp/test/test_tracingopts.py +++ b/rpython/jit/metainterp/test/test_tracingopts.py @@ -344,7 +344,7 @@ virtualizables = ['frame']) class Frame(object): - _virtualizable2_ = ['l[*]', 's'] + _virtualizable_ = ['l[*]', 's'] def __init__(self, a, s): self = jit.hint(self, access_directly=True, fresh_virtualizable=True) diff --git a/rpython/jit/metainterp/test/test_virtualizable.py b/rpython/jit/metainterp/test/test_virtualizable.py --- a/rpython/jit/metainterp/test/test_virtualizable.py +++ b/rpython/jit/metainterp/test/test_virtualizable.py @@ -4,7 +4,8 @@ from rpython.jit.codewriter.policy import StopAtXPolicy from rpython.jit.metainterp.optimizeopt.test.test_util import LLtypeMixin from rpython.jit.metainterp.test.support import LLJitMixin -from rpython.jit.metainterp.warmspot import get_translator +from rpython.jit.metainterp.warmspot import get_translator, get_stats +from rpython.jit.metainterp.resoperation import rop from rpython.rlib.jit import JitDriver, hint, dont_look_inside, promote, virtual_ref from rpython.rlib.rarithmetic import intmask from rpython.rtyper.annlowlevel import hlstr @@ -26,7 +27,6 @@ return lltype_to_annotation(lltype.Void) def specialize_call(self, hop): - op = self.instance # the LLOp object that was called args_v = [hop.inputarg(hop.args_r[0], 0), hop.inputconst(lltype.Void, hop.args_v[1].value), hop.inputconst(lltype.Void, {})] @@ -46,8 +46,8 @@ ('vable_token', llmemory.GCREF), ('inst_x', lltype.Signed), ('inst_node', lltype.Ptr(LLtypeMixin.NODE)), - hints = {'virtualizable2_accessor': FieldListAccessor()}) - XY._hints['virtualizable2_accessor'].initialize( + hints = {'virtualizable_accessor': FieldListAccessor()}) + XY._hints['virtualizable_accessor'].initialize( XY, {'inst_x': IR_IMMUTABLE, 'inst_node': IR_IMMUTABLE}) xy_vtable = lltype.malloc(rclass.OBJECT_VTABLE, immortal=True) @@ -141,11 +141,13 @@ n -= 1 def f(n): xy = self.setup() + promote_virtualizable(xy, 'inst_x') xy.inst_x = 10000 m = 10 while m > 0: g(xy, n) m -= 1 + promote_virtualizable(xy, 'inst_x') return xy.inst_x res = self.meta_interp(f, [18]) assert res == 10180 @@ -200,8 +202,8 @@ return xy.inst_x res = self.meta_interp(f, [20]) assert res == 134 - self.check_simple_loop(setfield_gc=1, getfield_gc=0) - self.check_resops(setfield_gc=2, getfield_gc=3) + self.check_simple_loop(setfield_gc=1, getfield_gc=0, cond_call=1) + self.check_resops(setfield_gc=2, getfield_gc=4) # ------------------------------ @@ -212,8 +214,8 @@ ('inst_x', lltype.Signed), ('inst_l1', lltype.Ptr(lltype.GcArray(lltype.Signed))), ('inst_l2', lltype.Ptr(lltype.GcArray(lltype.Signed))), - hints = {'virtualizable2_accessor': FieldListAccessor()}) - XY2._hints['virtualizable2_accessor'].initialize( + hints = {'virtualizable_accessor': FieldListAccessor()}) + XY2._hints['virtualizable_accessor'].initialize( XY2, {'inst_x': IR_IMMUTABLE, 'inst_l1': IR_IMMUTABLE_ARRAY, 'inst_l2': IR_IMMUTABLE_ARRAY}) @@ -278,6 +280,7 @@ while m > 0: g(xy2, n) m -= 1 + promote_virtualizable(xy2, 'inst_l2') return xy2.inst_l2[0] assert f(18) == 10360 res = self.meta_interp(f, [18]) @@ -381,7 +384,7 @@ res = self.meta_interp(f, [20], enable_opts='') assert res == expected self.check_simple_loop(setarrayitem_gc=1, setfield_gc=0, - getarrayitem_gc=1, arraylen_gc=1, getfield_gc=1) + getarrayitem_gc=1, arraylen_gc=1, getfield_gc=2) # ------------------------------ @@ -424,7 +427,9 @@ while m > 0: g(xy2, n) m -= 1 - return xy2.parent.inst_l2[0] + parent = xy2.parent + promote_virtualizable(parent, 'inst_l2') + return parent.inst_l2[0] assert f(18) == 10360 res = self.meta_interp(f, [18]) assert res == 10360 @@ -440,7 +445,7 @@ virtualizables = ['frame']) class Frame(object): - _virtualizable2_ = ['x', 'y'] + _virtualizable_ = ['x', 'y'] def __init__(self, x, y): self.x = x @@ -469,7 +474,7 @@ virtualizables = ['frame']) class Frame(object): - _virtualizable2_ = ['l[*]', 's'] + _virtualizable_ = ['l[*]', 's'] def __init__(self, l, s): self.l = l @@ -504,7 +509,7 @@ virtualizables = ['frame']) class Frame(object): - _virtualizable2_ = ['x', 'y'] + _virtualizable_ = ['x', 'y'] def __init__(self, x, y): self.x = x @@ -532,7 +537,7 @@ virtualizables = ['frame']) class BaseFrame(object): - _virtualizable2_ = ['x[*]'] + _virtualizable_ = ['x[*]'] def __init__(self, x): self.x = x @@ -563,7 +568,7 @@ virtualizables = ['frame']) class Frame(object): - _virtualizable2_ = ['x', 'y'] + _virtualizable_ = ['x', 'y'] class SomewhereElse: pass @@ -596,7 +601,7 @@ virtualizables = ['frame']) class Frame(object): - _virtualizable2_ = ['x', 'y'] + _virtualizable_ = ['x', 'y'] class SomewhereElse: pass @@ -636,7 +641,7 @@ virtualizables = ['frame']) class Frame(object): - _virtualizable2_ = ['x', 'y'] + _virtualizable_ = ['x', 'y'] class SomewhereElse: pass @@ -669,7 +674,7 @@ virtualizables = ['frame']) class Frame(object): - _virtualizable2_ = ['x', 'y'] + _virtualizable_ = ['x', 'y'] class SomewhereElse: pass @@ -706,7 +711,7 @@ virtualizables = ['frame']) class Frame(object): - _virtualizable2_ = ['x', 'y'] + _virtualizable_ = ['x', 'y'] class Y: pass @@ -751,7 +756,7 @@ virtualizables = ['frame']) class Frame(object): - _virtualizable2_ = ['x', 'y'] + _virtualizable_ = ['x', 'y'] class Y: pass @@ -801,7 +806,7 @@ virtualizables = ['frame']) class Frame(object): - _virtualizable2_ = ['x', 'y'] + _virtualizable_ = ['x', 'y'] class FooBarError(Exception): pass @@ -845,7 +850,7 @@ virtualizables = ['frame']) class Frame(object): - _virtualizable2_ = ['x', 'y'] + _virtualizable_ = ['x', 'y'] class SomewhereElse: pass @@ -882,7 +887,7 @@ virtualizables = ['frame']) class Frame(object): - _virtualizable2_ = ['x', 'y'] + _virtualizable_ = ['x', 'y'] class SomewhereElse: pass @@ -934,7 +939,7 @@ virtualizables = ['frame']) class Frame(object): - _virtualizable2_ = ['x', 'y'] + _virtualizable_ = ['x', 'y'] class SomewhereElse: pass @@ -971,7 +976,7 @@ virtualizables = ['frame']) class Frame(object): - _virtualizable2_ = ['x', 'y'] + _virtualizable_ = ['x', 'y'] class SomewhereElse: pass @@ -1005,7 +1010,7 @@ virtualizables = ['frame']) class Frame(object): - _virtualizable2_ = ['stackpos', 'stack[*]'] + _virtualizable_ = ['stackpos', 'stack[*]'] def f(n): frame = Frame() @@ -1034,7 +1039,7 @@ virtualizables = ['frame']) class Frame(object): - _virtualizable2_ = ['x', 'y'] + _virtualizable_ = ['x', 'y'] def __init__(self, x, y): self = hint(self, access_directly=True) @@ -1088,7 +1093,7 @@ virtualizables = ['frame']) class Frame(object): - _virtualizable2_ = ['x', 'y'] + _virtualizable_ = ['x', 'y'] def __init__(self, x, y): self = hint(self, access_directly=True) @@ -1120,7 +1125,7 @@ virtualizables = ['frame']) class Frame(object): - _virtualizable2_ = ['x', 'y', 'z'] + _virtualizable_ = ['x', 'y', 'z'] def __init__(self, x, y, z=1): self = hint(self, access_directly=True) @@ -1155,7 +1160,7 @@ virtualizables = ['frame']) class Frame(object): - _virtualizable2_ = ['x[*]'] + _virtualizable_ = ['x[*]'] def __init__(self, x, y): self = hint(self, access_directly=True, @@ -1187,7 +1192,7 @@ virtualizables = ['frame']) class Frame(object): - _virtualizable2_ = ['x', 'y'] + _virtualizable_ = ['x', 'y'] def __init__(self, x, y): self = hint(self, access_directly=True) @@ -1226,7 +1231,7 @@ virtualizables = ['frame']) class Frame(object): - _virtualizable2_ = ['x', 'y'] + _virtualizable_ = ['x', 'y'] def __init__(self, x, y): self.x = x @@ -1266,7 +1271,7 @@ virtualizables = ['frame']) class Frame(object): - _virtualizable2_ = ['x', 'y'] + _virtualizable_ = ['x', 'y'] def __init__(self, x, y): self = hint(self, access_directly=True) @@ -1310,7 +1315,7 @@ def test_inlining(self): class Frame(object): - _virtualizable2_ = ['x', 'next'] + _virtualizable_ = ['x', 'next'] def __init__(self, x): self = hint(self, access_directly=True) @@ -1345,7 +1350,7 @@ def test_guard_failure_in_inlined_function(self): class Frame(object): - _virtualizable2_ = ['n', 'next'] + _virtualizable_ = ['n', 'next'] def __init__(self, n): self = hint(self, access_directly=True) @@ -1399,7 +1404,7 @@ self.val = val class Frame(object): - _virtualizable2_ = ['thing'] + _virtualizable_ = ['thing'] driver = JitDriver(greens = ['codeno'], reds = ['i', 'frame'], virtualizables = ['frame'], @@ -1450,7 +1455,7 @@ ) class Frame(object): - _virtualizable2_ = ['x'] + _virtualizable_ = ['x'] def main(n): f = Frame() @@ -1469,6 +1474,113 @@ "int_add": 2, "jump": 1 }) + def test_frame_nonstandard_no_virtualizable(self): + + driver1 = JitDriver(greens=[], reds=['i', 's', 'frame']) + driver2 = JitDriver(greens=[], reds=['frame'], + virtualizables=['frame']) + + class Frame(object): + _virtualizable_ = ['x'] + + def g(frame): + driver2.jit_merge_point(frame=frame) + frame.x += 1 + return frame + + def f(): + i = 0 + s = 0 + frame = Frame() + frame.x = 0 + g(frame) + while i < 10: + driver1.jit_merge_point(frame=frame, s=s, i=i) + frame = g(frame) + s += frame.x + i += 1 + return s + + def main(): + res = 0 + for i in range(10): + res += f() + return res + + res = self.meta_interp(main, []) + assert res == main() + + def test_two_virtualizables_mixed(self): + driver1 = JitDriver(greens=[], reds=['i', 's', 'frame', + 'subframe']) + driver2 = JitDriver(greens=[], reds=['subframe'], + virtualizables=['subframe']) + + class Frame(object): + _virtualizable_ = ['x'] + + class SubFrame(object): + _virtualizable_ = ['x'] + + def g(subframe): + driver2.jit_merge_point(subframe=subframe) + subframe.x += 1 + + def f(): + i = 0 + frame = Frame() + frame.x = 0 + subframe = SubFrame() + subframe.x = 0 + s = 0 + while i < 10: + driver1.jit_merge_point(frame=frame, subframe=subframe, i=i, + s=s) + g(subframe) + s += subframe.x + i += 1 + return s + + res = self.meta_interp(f, []) + assert res == f() + + def test_force_virtualizable_by_hint(self): + class Frame(object): + _virtualizable_ = ['x'] + + driver = JitDriver(greens = [], reds = ['i', 'frame'], + virtualizables = ['frame']) + + def f(frame, i): + while i > 0: + driver.jit_merge_point(i=i, frame=frame) + i -= 1 + frame.x += 1 + hint(frame, force_virtualizable=True) + + def main(): + frame = Frame() + frame.x = 0 + s = 0 + for i in range(20): + f(frame, 4) + s += frame.x + return s + + r = self.meta_interp(main, []) + assert r == main() + # fish the bridge + loop = get_stats().get_all_loops()[0] + d = loop.operations[-3].getdescr() + bridge = getattr(d, '_llgraph_bridge', None) + if bridge is not None: + l = [op for op in + bridge.operations if op.getopnum() == rop.SETFIELD_GC] + assert "'inst_x'" in str(l[0].getdescr().realdescrref()) + assert len(l) == 2 # vable token set to null + l = [op for op in bridge.operations if + op.getopnum() == rop.GUARD_NOT_FORCED_2] + assert len(l) == 0 class TestLLtype(ExplicitVirtualizableTests, ImplicitVirtualizableTests, diff --git a/rpython/jit/metainterp/virtualizable.py b/rpython/jit/metainterp/virtualizable.py --- a/rpython/jit/metainterp/virtualizable.py +++ b/rpython/jit/metainterp/virtualizable.py @@ -1,8 +1,9 @@ +from rpython.jit.codewriter.effectinfo import EffectInfo from rpython.jit.metainterp import history from rpython.jit.metainterp.typesystem import deref, fieldType, arrayItem From noreply at buildbot.pypy.org Tue Aug 6 17:57:26 2013 From: noreply at buildbot.pypy.org (fijal) Date: Tue, 6 Aug 2013 17:57:26 +0200 (CEST) Subject: [pypy-commit] pypy default: document missing branches Message-ID: <20130806155726.AA8521C0134@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r65974:83155329906e Date: 2013-08-06 17:56 +0200 http://bitbucket.org/pypy/pypy/changeset/83155329906e/ Log: document missing branches diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -66,3 +66,10 @@ .. branch: kill-typesystem Remove the "type system" abstraction, now that there is only ever one kind of type system used. + +.. branch: kill-gen-store-back-in +Kills gen_store_back_in_virtualizable - should improve non-inlined calls by +a bit + +.. branch: dotviewer-linewidth +.. branch: reflex-support From noreply at buildbot.pypy.org Tue Aug 6 18:27:30 2013 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 6 Aug 2013 18:27:30 +0200 (CEST) Subject: [pypy-commit] pypy r15-for-shadowstack: Another attempt: use r15 for the shadowstack and for exception signalling. Message-ID: <20130806162730.D722F1C013B@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: r15-for-shadowstack Changeset: r65975:98fdd551c43a Date: 2013-08-06 15:46 +0200 http://bitbucket.org/pypy/pypy/changeset/98fdd551c43a/ Log: Another attempt: use r15 for the shadowstack and for exception signalling. From noreply at buildbot.pypy.org Tue Aug 6 18:27:32 2013 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 6 Aug 2013 18:27:32 +0200 (CEST) Subject: [pypy-commit] pypy r15-for-shadowstack: in-progress, targetgcbench works Message-ID: <20130806162732.187B21C01B7@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: r15-for-shadowstack Changeset: r65976:7262c0519909 Date: 2013-08-06 16:47 +0200 http://bitbucket.org/pypy/pypy/changeset/7262c0519909/ Log: in-progress, targetgcbench works diff --git a/rpython/memory/gctransform/framework.py b/rpython/memory/gctransform/framework.py --- a/rpython/memory/gctransform/framework.py +++ b/rpython/memory/gctransform/framework.py @@ -784,6 +784,7 @@ self._gc_adr_of_gc_attr(hop, 'nursery_top') def _gc_adr_of_gcdata_attr(self, hop, attrname): + xxxxxxxxxxxxxx op = hop.spaceop ofs = llmemory.offsetof(self.c_const_gcdata.concretetype.TO, 'inst_' + attrname) diff --git a/rpython/memory/gctransform/shadowstack.py b/rpython/memory/gctransform/shadowstack.py --- a/rpython/memory/gctransform/shadowstack.py +++ b/rpython/memory/gctransform/shadowstack.py @@ -4,7 +4,8 @@ from rpython.rlib import rgc from rpython.rtyper import rmodel from rpython.rtyper.annlowlevel import llhelper -from rpython.rtyper.lltypesystem import lltype, llmemory +from rpython.rtyper.lltypesystem import lltype, llmemory, rffi +from rpython.rtyper.lltypesystem.lloperation import llop from rpython.memory.gctransform.framework import ( BaseFrameworkGCTransformer, BaseRootWalker, sizeofaddr) from rpython.rtyper.rbuiltin import gen_cast @@ -12,14 +13,7 @@ class ShadowStackFrameworkGCTransformer(BaseFrameworkGCTransformer): def annotate_walker_functions(self, getfn): - self.incr_stack_ptr = getfn(self.root_walker.incr_stack, - [annmodel.SomeInteger()], - annmodel.SomeAddress(), - inline = True) - self.decr_stack_ptr = getfn(self.root_walker.decr_stack, - [annmodel.SomeInteger()], - annmodel.SomeAddress(), - inline = True) + pass def build_root_walker(self): return ShadowStackRootWalker(self) @@ -27,30 +21,13 @@ def push_roots(self, hop, keep_current_args=False): livevars = self.get_livevars_for_roots(hop, keep_current_args) self.num_pushs += len(livevars) - if not livevars: - return [] - c_len = rmodel.inputconst(lltype.Signed, len(livevars) ) - base_addr = hop.genop("direct_call", [self.incr_stack_ptr, c_len ], - resulttype=llmemory.Address) - for k,var in enumerate(livevars): - c_k = rmodel.inputconst(lltype.Signed, k * sizeofaddr) - v_adr = gen_cast(hop.llops, llmemory.Address, var) - hop.genop("raw_store", [base_addr, c_k, v_adr]) + hop.genop("shadowstack_push", list(livevars)) # may be 0 return livevars def pop_roots(self, hop, livevars): if not livevars: return - c_len = rmodel.inputconst(lltype.Signed, len(livevars) ) - base_addr = hop.genop("direct_call", [self.decr_stack_ptr, c_len ], - resulttype=llmemory.Address) - if self.gcdata.gc.moving_gc: - # for moving collectors, reload the roots into the local variables - for k,var in enumerate(livevars): - c_k = rmodel.inputconst(lltype.Signed, k * sizeofaddr) - v_newaddr = hop.genop("raw_load", [base_addr, c_k], - resulttype=llmemory.Address) - hop.genop("gc_reload_possibly_moved", [v_newaddr, var]) + hop.genop("shadowstack_pop", list(livevars)) class ShadowStackRootWalker(BaseRootWalker): @@ -59,53 +36,21 @@ # NB. 'self' is frozen, but we can use self.gcdata to store state gcdata = self.gcdata - def incr_stack(n): - top = gcdata.root_stack_top - gcdata.root_stack_top = top + n*sizeofaddr - return top - self.incr_stack = incr_stack - - def decr_stack(n): - top = gcdata.root_stack_top - n*sizeofaddr - gcdata.root_stack_top = top - return top - self.decr_stack = decr_stack - - root_iterator = get_root_iterator(gctransformer) - def walk_stack_root(callback, start, end): - root_iterator.setcontext(NonConstant(llmemory.NULL)) - gc = self.gc - addr = end - while True: - addr = root_iterator.nextleft(gc, start, addr) - if addr == llmemory.NULL: - return - callback(gc, addr) - self.rootstackhook = walk_stack_root - - self.shadow_stack_pool = ShadowStackPool(gcdata) - rsd = gctransformer.root_stack_depth - if rsd is not None: - self.shadow_stack_pool.root_stack_depth = rsd - - def push_stack(self, addr): - top = self.incr_stack(1) - top.address[0] = addr - - def pop_stack(self): - top = self.decr_stack(1) - return top.address[0] - - def setup_root_walker(self): - self.shadow_stack_pool.initial_setup() - BaseRootWalker.setup_root_walker(self) - def walk_stack_roots(self, collect_stack_root): - gcdata = self.gcdata - self.rootstackhook(collect_stack_root, - gcdata.root_stack_base, gcdata.root_stack_top) + WORD = llmemory.sizeof(llmemory.Address) + r15 = llop.shadowstack_r15(lltype.Signed) + gc = self.gc + while r15 != -1: + n = (r15 & 7) + 1 + r15 &= ~7 + while n > 0: + addr = rffi.cast(llmemory.Address, r15 + n * WORD) + collect_stack_root(gc, addr) + n -= 1 + r15 = rffi.cast(llmemory.Address, r15).signed[0] def need_thread_support(self, gctransformer, getfn): + xxxxxxxxx from rpython.rlib import rthread # xxx fish gcdata = self.gcdata # the interfacing between the threads and the GC is done via @@ -216,6 +161,7 @@ minimal_transform=False) def need_stacklet_support(self, gctransformer, getfn): + xxxxxxxxxxxxx shadow_stack_pool = self.shadow_stack_pool SHADOWSTACKREF = get_shadowstackref(self, gctransformer) @@ -264,156 +210,3 @@ inline=True) # ____________________________________________________________ - -class ShadowStackPool(object): - """Manages a pool of shadowstacks. The MAX most recently used - shadowstacks are fully allocated and can be directly jumped into. - The rest are stored in a more virtual-memory-friendly way, i.e. - with just the right amount malloced. Before they can run, they - must be copied into a full shadowstack. XXX NOT IMPLEMENTED SO FAR! - """ - _alloc_flavor_ = "raw" - root_stack_depth = 163840 - - #MAX = 20 not implemented yet - - def __init__(self, gcdata): - self.unused_full_stack = llmemory.NULL - self.gcdata = gcdata - - def initial_setup(self): - self._prepare_unused_stack() - self.start_fresh_new_state() - - def allocate(self, SHADOWSTACKREF): - """Allocate an empty SHADOWSTACKREF object.""" - return lltype.malloc(SHADOWSTACKREF, zero=True) - - def save_current_state_away(self, shadowstackref, ncontext): - """Save the current state away into 'shadowstackref'. - This either works, or raise MemoryError and nothing is done. - To do a switch, first call save_current_state_away() or - forget_current_state(), and then call restore_state_from() - or start_fresh_new_state(). - """ - self._prepare_unused_stack() - shadowstackref.base = self.gcdata.root_stack_base - shadowstackref.top = self.gcdata.root_stack_top - shadowstackref.context = ncontext - ll_assert(shadowstackref.base <= shadowstackref.top, - "save_current_state_away: broken shadowstack") - #shadowstackref.fullstack = True - # - # cannot use llop.gc_assume_young_pointers() here, because - # we are in a minimally-transformed GC helper :-/ - gc = self.gcdata.gc - if hasattr(gc.__class__, 'assume_young_pointers'): - shadowstackadr = llmemory.cast_ptr_to_adr(shadowstackref) - gc.assume_young_pointers(shadowstackadr) - # - self.gcdata.root_stack_top = llmemory.NULL # to detect missing restore - - def forget_current_state(self): - ll_assert(self.gcdata.root_stack_base == self.gcdata.root_stack_top, - "forget_current_state: shadowstack not empty!") - if self.unused_full_stack: - llmemory.raw_free(self.unused_full_stack) - self.unused_full_stack = self.gcdata.root_stack_base - self.gcdata.root_stack_top = llmemory.NULL # to detect missing restore - - def restore_state_from(self, shadowstackref): - ll_assert(bool(shadowstackref.base), "empty shadowstackref!") - ll_assert(shadowstackref.base <= shadowstackref.top, - "restore_state_from: broken shadowstack") - self.gcdata.root_stack_base = shadowstackref.base - self.gcdata.root_stack_top = shadowstackref.top - self._cleanup(shadowstackref) - - def start_fresh_new_state(self): - self.gcdata.root_stack_base = self.unused_full_stack - self.gcdata.root_stack_top = self.unused_full_stack - self.unused_full_stack = llmemory.NULL - - def _cleanup(self, shadowstackref): - shadowstackref.base = llmemory.NULL - shadowstackref.top = llmemory.NULL - shadowstackref.context = llmemory.NULL - - def _prepare_unused_stack(self): - if self.unused_full_stack == llmemory.NULL: - root_stack_size = sizeofaddr * self.root_stack_depth - self.unused_full_stack = llmemory.raw_malloc(root_stack_size) - if self.unused_full_stack == llmemory.NULL: - raise MemoryError - - -def get_root_iterator(gctransformer): - if hasattr(gctransformer, '_root_iterator'): - return gctransformer._root_iterator # if already built - class RootIterator(object): - def _freeze_(self): - return True - def setcontext(self, context): - pass - def nextleft(self, gc, start, addr): - while addr != start: - addr -= sizeofaddr - if gc.points_to_valid_gc_object(addr): - return addr - return llmemory.NULL - result = RootIterator() - gctransformer._root_iterator = result - return result - - -def get_shadowstackref(root_walker, gctransformer): - if hasattr(gctransformer, '_SHADOWSTACKREF'): - return gctransformer._SHADOWSTACKREF - - SHADOWSTACKREFPTR = lltype.Ptr(lltype.GcForwardReference()) - SHADOWSTACKREF = lltype.GcStruct('ShadowStackRef', - ('base', llmemory.Address), - ('top', llmemory.Address), - ('context', llmemory.Address), - #('fullstack', lltype.Bool), - rtti=True) - SHADOWSTACKREFPTR.TO.become(SHADOWSTACKREF) - - gc = gctransformer.gcdata.gc - root_iterator = get_root_iterator(gctransformer) - - def customtrace(obj, prev): - obj = llmemory.cast_adr_to_ptr(obj, SHADOWSTACKREFPTR) - if not prev: - root_iterator.setcontext(obj.context) - prev = obj.top - return root_iterator.nextleft(gc, obj.base, prev) - - CUSTOMTRACEFUNC = lltype.FuncType([llmemory.Address, llmemory.Address], - llmemory.Address) - customtraceptr = llhelper(lltype.Ptr(CUSTOMTRACEFUNC), customtrace) - - def shadowstack_destructor(shadowstackref): - if root_walker.stacklet_support: - from rpython.rlib import _rffi_stacklet as _c - h = shadowstackref.context - h = llmemory.cast_adr_to_ptr(h, _c.handle) - shadowstackref.context = llmemory.NULL - # - base = shadowstackref.base - shadowstackref.base = llmemory.NULL - shadowstackref.top = llmemory.NULL - llmemory.raw_free(base) - # - if root_walker.stacklet_support: - if h: - _c.destroy(h) - - destrptr = gctransformer.annotate_helper(shadowstack_destructor, - [SHADOWSTACKREFPTR], lltype.Void) - - lltype.attachRuntimeTypeInfo(SHADOWSTACKREF, customtraceptr=customtraceptr, - destrptr=destrptr) - - gctransformer._SHADOWSTACKREF = SHADOWSTACKREF - return SHADOWSTACKREF diff --git a/rpython/rtyper/lltypesystem/lloperation.py b/rpython/rtyper/lltypesystem/lloperation.py --- a/rpython/rtyper/lltypesystem/lloperation.py +++ b/rpython/rtyper/lltypesystem/lloperation.py @@ -561,6 +561,10 @@ 'debug_print_traceback': LLOp(), 'debug_nonnull_pointer': LLOp(canrun=True), + 'shadowstack_push': LLOp(), + 'shadowstack_pop': LLOp(), + 'shadowstack_r15': LLOp(), + # __________ instrumentation _________ 'instrument_count': LLOp(), } diff --git a/rpython/translator/c/funcgen.py b/rpython/translator/c/funcgen.py --- a/rpython/translator/c/funcgen.py +++ b/rpython/translator/c/funcgen.py @@ -204,6 +204,8 @@ def cfunction_body(self): graph = self.graph + yield 'char *shadowstack[9]; /* xxx reduce */' + yield 'shadowstack[0] = pypy_r15;' yield 'goto block0;' # to avoid a warning "this label is not used" # generate the body of each block @@ -899,4 +901,34 @@ else: return None # use the default + def OP_SHADOWSTACK_PUSH(self, op): + numvars = len(op.args) + if numvars == 0: + return 'pypy_r15 = shadowstack[0];' + else: + assert numvars <= 8 + exprs = [] + for i in range(numvars): + exprs.append('shadowstack[%d] = (char *)%s;' % ( + i + 1, self.expr(op.args[i]))) + exprs.append('pypy_r15 = ((char *)shadowstack) + %d;' % ( + numvars - 1,)) + return '\n'.join(exprs) + + def OP_SHADOWSTACK_POP(self, op): + numvars = len(op.args) + assert 1 <= numvars <= 8 + exprs = [] + for i in range(numvars-1, -1, -1): + v = op.args[i] + exprs.append('%s = (%s)shadowstack[%d];' % ( + self.expr(v), cdecl(self.lltypename(v), ''), i + 1)) + return '\n'.join(exprs) + + def OP_SHADOWSTACK_R15(self, op): + v = op.result + return '%s = (%s)pypy_r15;' % (self.expr(v), + cdecl(self.lltypename(v), '')) + + assert not USESLOTS or '__dict__' not in dir(FunctionCodeGenerator) diff --git a/rpython/translator/c/src/entrypoint.c b/rpython/translator/c/src/entrypoint.c --- a/rpython/translator/c/src/entrypoint.c +++ b/rpython/translator/c/src/entrypoint.c @@ -30,14 +30,18 @@ char *errmsg; int i, exitcode; RPyListOfString *list; + char *saved_r15; #ifdef PYPY_USE_ASMGCC pypy_g_rpython_rtyper_lltypesystem_rffi_StackCounter.sc_inst_stacks_counter++; #endif - pypy_asm_stack_bottom(); + saved_r15 = pypy_r15; + //pypy_asm_stack_bottom(); #ifdef PYPY_X86_CHECK_SSE2_DEFINED + pypy_r15 = (char *)-1; pypy_x86_check_sse2(); #endif + pypy_r15 = (char *)-1; instrument_setup(); #ifndef MS_WINDOWS @@ -49,28 +53,37 @@ } #endif + pypy_r15 = (char *)-1; errmsg = RPython_StartupCode(); if (errmsg) goto error; + pypy_r15 = (char *)-1; list = _RPyListOfString_New(argc); if (RPyExceptionOccurred()) goto memory_out; for (i=0; i Author: Armin Rigo Branch: r15-for-shadowstack Changeset: r65977:cb26b6786974 Date: 2013-08-06 18:17 +0200 http://bitbucket.org/pypy/pypy/changeset/cb26b6786974/ Log: Fixes to reserve only the space necessary, and to allow for more than 8 saved variables. The result is encouragingly good. diff --git a/rpython/translator/c/funcgen.py b/rpython/translator/c/funcgen.py --- a/rpython/translator/c/funcgen.py +++ b/rpython/translator/c/funcgen.py @@ -204,8 +204,8 @@ def cfunction_body(self): graph = self.graph - yield 'char *shadowstack[9]; /* xxx reduce */' - yield 'shadowstack[0] = pypy_r15;' + for line in self.shadowstack_prologue(): + yield line yield 'goto block0;' # to avoid a warning "this label is not used" # generate the body of each block @@ -904,25 +904,30 @@ def OP_SHADOWSTACK_PUSH(self, op): numvars = len(op.args) if numvars == 0: - return 'pypy_r15 = shadowstack[0];' + return 'pypy_r15 = shadowstack1[0];' else: - assert numvars <= 8 exprs = [] + ss = 0 for i in range(numvars): - exprs.append('shadowstack[%d] = (char *)%s;' % ( - i + 1, self.expr(op.args[i]))) - exprs.append('pypy_r15 = ((char *)shadowstack) + %d;' % ( - numvars - 1,)) + if i % 8 == 0: + ss += 1 + exprs.append('shadowstack%d[%d] = (char *)%s;' % ( + ss, (i % 8) + 1, self.expr(op.args[i]))) + exprs.append('pypy_r15 = ((char *)shadowstack%d) + %d;' % ( + ss, (numvars - 1) % 8,)) return '\n'.join(exprs) def OP_SHADOWSTACK_POP(self, op): numvars = len(op.args) - assert 1 <= numvars <= 8 + assert numvars >= 1 exprs = [] - for i in range(numvars-1, -1, -1): + ss = 0 + for i in range(numvars): + if i % 8 == 0: + ss += 1 v = op.args[i] - exprs.append('%s = (%s)shadowstack[%d];' % ( - self.expr(v), cdecl(self.lltypename(v), ''), i + 1)) + exprs.append('%s = (%s)shadowstack%d[%d];' % ( + self.expr(v), cdecl(self.lltypename(v), ''), ss, (i % 8) + 1)) return '\n'.join(exprs) def OP_SHADOWSTACK_R15(self, op): @@ -930,5 +935,25 @@ return '%s = (%s)pypy_r15;' % (self.expr(v), cdecl(self.lltypename(v), '')) + def shadowstack_prologue(self): + maxlength = -1 + for block in self.graph.iterblocks(): + for op in block.operations: + if op.opname == 'shadowstack_push': + maxlength = max(maxlength, len(op.args)) + if maxlength < 0: + return + ss = 1 + ssprev = 'pypy_r15' + while True: + sslen = min(maxlength + 1, 9) + yield 'char *shadowstack%d[%d];' % (ss, sslen) + yield 'shadowstack%d[0] = %s;' % (ss, ssprev) + maxlength -= (sslen - 1) + if maxlength == 0: + break + ssprev = '((char *)shadowstack%d) + 7' % ss + ss += 1 + assert not USESLOTS or '__dict__' not in dir(FunctionCodeGenerator) From noreply at buildbot.pypy.org Tue Aug 6 19:08:34 2013 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 6 Aug 2013 19:08:34 +0200 (CEST) Subject: [pypy-commit] pypy default: Fix translation without threads (hopefully) Message-ID: <20130806170834.2D8DE1C094F@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r65978:4c7b26f3c910 Date: 2013-08-06 19:07 +0200 http://bitbucket.org/pypy/pypy/changeset/4c7b26f3c910/ Log: Fix translation without threads (hopefully) diff --git a/pypy/module/cpyext/pystate.py b/pypy/module/cpyext/pystate.py --- a/pypy/module/cpyext/pystate.py +++ b/pypy/module/cpyext/pystate.py @@ -3,7 +3,6 @@ from pypy.module.cpyext.pyobject import PyObject, Py_DecRef, make_ref, from_ref from rpython.rtyper.lltypesystem import rffi, lltype from rpython.rlib import rthread -from pypy.module.thread import os_thread PyInterpreterStateStruct = lltype.ForwardReference() PyInterpreterState = lltype.Ptr(PyInterpreterStateStruct) @@ -45,7 +44,10 @@ @cpython_api([], lltype.Void) def PyEval_InitThreads(space): - os_thread.setup_threads(space) + if space.config.translation.thread: + from pypy.module.thread import os_thread + os_thread.setup_threads(space) + #else: nothing to do @cpython_api([], rffi.INT_real, error=CANNOT_FAIL) def PyEval_ThreadsInitialized(space): From noreply at buildbot.pypy.org Tue Aug 6 19:08:35 2013 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 6 Aug 2013 19:08:35 +0200 (CEST) Subject: [pypy-commit] pypy default: merge heads Message-ID: <20130806170835.C311A1C10E6@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r65979:a5b0f9727317 Date: 2013-08-06 19:07 +0200 http://bitbucket.org/pypy/pypy/changeset/a5b0f9727317/ Log: merge heads diff too long, truncating to 2000 out of 2501 lines diff --git a/pypy/doc/jit/index.rst b/pypy/doc/jit/index.rst --- a/pypy/doc/jit/index.rst +++ b/pypy/doc/jit/index.rst @@ -23,7 +23,10 @@ - Hooks_ debugging facilities available to a python programmer +- Virtualizable_ how virtualizables work and what they are (in other words how + to make frames more efficient). .. _Overview: overview.html .. _Notes: pyjitpl5.html .. _Hooks: ../jit-hooks.html +.. _Virtualizable: virtualizable.html diff --git a/pypy/doc/jit/virtualizable.rst b/pypy/doc/jit/virtualizable.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/jit/virtualizable.rst @@ -0,0 +1,60 @@ + +Virtualizables +============== + +**Note:** this document does not have a proper introduction as to how +to understand the basics. We should write some. If you happen to be here +and you're missing context, feel free to pester us on IRC. + +Problem description +------------------- + +The JIT is very good at making sure some objects are never allocated if they +don't escape from the trace. Such objects are called ``virtuals``. However, +if we're dealing with frames, virtuals are often not good enough. Frames +can escape and they can also be allocated already at the moment we enter the +JIT. In such cases we need some extra object that can still be optimized away, +despite existing on the heap. + +Solution +-------- + +We introduce virtualizables. They're objects that exist on the heap, but their +fields are not always in sync with whatever happens in the assembler. One +example is that virtualizable fields can store virtual objects without +forcing them. This is very useful for frames. Declaring an object to be +virtualizable works like this: + + class Frame(object): + _virtualizable_ = ['locals[*]', 'stackdepth'] + +And we use them in ``JitDriver`` like this:: + + jitdriver = JitDriver(greens=[], reds=['frame'], virtualizables=['frame']) + +This declaration means that ``stackdepth`` is a virtualizable **field**, while +``locals`` is a virtualizable **array** (a list stored on a virtualizable). +There are various rules about using virtualizables, especially using +virtualizable arrays that can be very confusing. Those will usually end +up with a compile-time error (as opposed to strange behavior). The rules are: + +* Each array access must be with a known positive index that cannot raise + an ``IndexError``. Using ``no = jit.hint(no, promote=True)`` might be useful + to get a constant-number access. This is only safe if the index is actually + constant or changing rarely within the context of the user's code. + +* If you initialize a new virtualizable in the JIT, it has to be done like this + (for example if we're in ``Frame.__init__``):: + + self = hint(self, access_directly=True, fresh_virtualizable=True) + + that way you can populate the fields directly. + +* If you use virtualizable outside of the JIT – it's very expensive and + sometimes aborts tracing. Consider it carefully as to how do it only for + debugging purposes and not every time (e.g. ``sys._getframe`` call). + +* If you have something equivalent of a Python generator, where the + virtualizable survives for longer, you want to force it before returning. + It's better to do it that way than by an external call some time later. + It's done using ``jit.hint(frame, force_virtualizable=True)`` diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -66,3 +66,10 @@ .. branch: kill-typesystem Remove the "type system" abstraction, now that there is only ever one kind of type system used. + +.. branch: kill-gen-store-back-in +Kills gen_store_back_in_virtualizable - should improve non-inlined calls by +a bit + +.. branch: dotviewer-linewidth +.. branch: reflex-support diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py --- a/pypy/interpreter/pyframe.py +++ b/pypy/interpreter/pyframe.py @@ -123,8 +123,8 @@ # CO_OPTIMIZED: no locals dict needed at all # NB: this method is overridden in nestedscope.py flags = code.co_flags - if flags & pycode.CO_OPTIMIZED: - return + if flags & pycode.CO_OPTIMIZED: + return if flags & pycode.CO_NEWLOCALS: self.w_locals = self.space.newdict(module=True) else: @@ -176,9 +176,10 @@ executioncontext.return_trace(self, self.space.w_None) raise executioncontext.return_trace(self, w_exitvalue) - # clean up the exception, might be useful for not - # allocating exception objects in some cases - self.last_exception = None + # it used to say self.last_exception = None + # this is now done by the code in pypyjit module + # since we don't want to invalidate the virtualizable + # for no good reason got_exception = False finally: executioncontext.leave(self, w_exitvalue, got_exception) @@ -260,7 +261,7 @@ break w_value = self.peekvalue(delta) self.pushvalue(w_value) - + def peekvalue(self, index_from_top=0): # NOTE: top of the stack is peekvalue(0). # Contrast this with CPython where it's PEEK(-1). @@ -330,7 +331,7 @@ nlocals = self.pycode.co_nlocals values_w = self.locals_stack_w[nlocals:self.valuestackdepth] w_valuestack = maker.slp_into_tuple_with_nulls(space, values_w) - + w_blockstack = nt([block._get_state_(space) for block in self.get_blocklist()]) w_fastlocals = maker.slp_into_tuple_with_nulls( space, self.locals_stack_w[:nlocals]) @@ -340,7 +341,7 @@ else: w_exc_value = self.last_exception.get_w_value(space) w_tb = w(self.last_exception.get_traceback()) - + tup_state = [ w(self.f_backref()), w(self.get_builtin()), @@ -355,7 +356,7 @@ w(f_lineno), w_fastlocals, space.w_None, #XXX placeholder for f_locals - + #f_restricted requires no additional data! space.w_None, ## self.w_f_trace, ignore for now @@ -389,7 +390,7 @@ ncellvars = len(pycode.co_cellvars) cellvars = cells[:ncellvars] closure = cells[ncellvars:] - + # do not use the instance's __init__ but the base's, because we set # everything like cells from here # XXX hack @@ -476,7 +477,7 @@ ### line numbers ### - def fget_f_lineno(self, space): + def fget_f_lineno(self, space): "Returns the line number of the instruction currently being executed." if self.w_f_trace is None: return space.wrap(self.get_last_lineno()) @@ -490,7 +491,7 @@ except OperationError, e: raise OperationError(space.w_ValueError, space.wrap("lineno must be an integer")) - + if self.w_f_trace is None: raise OperationError(space.w_ValueError, space.wrap("f_lineno can only be set by a trace function.")) @@ -522,7 +523,7 @@ if ord(code[new_lasti]) in (DUP_TOP, POP_TOP): raise OperationError(space.w_ValueError, space.wrap("can't jump to 'except' line as there's no exception")) - + # Don't jump into or out of a finally block. f_lasti_setup_addr = -1 new_lasti_setup_addr = -1 @@ -553,12 +554,12 @@ if addr == self.last_instr: f_lasti_setup_addr = setup_addr break - + if op >= HAVE_ARGUMENT: addr += 3 else: addr += 1 - + assert len(blockstack) == 0 if new_lasti_setup_addr != f_lasti_setup_addr: @@ -609,10 +610,10 @@ block = self.pop_block() block.cleanup(self) f_iblock -= 1 - + self.f_lineno = new_lineno self.last_instr = new_lasti - + def get_last_lineno(self): "Returns the line number of the instruction currently being executed." return pytraceback.offset2lineno(self.pycode, self.last_instr) @@ -638,8 +639,8 @@ self.f_lineno = self.get_last_lineno() space.frame_trace_action.fire() - def fdel_f_trace(self, space): - self.w_f_trace = None + def fdel_f_trace(self, space): + self.w_f_trace = None def fget_f_exc_type(self, space): if self.last_exception is not None: @@ -649,7 +650,7 @@ if f is not None: return f.last_exception.w_type return space.w_None - + def fget_f_exc_value(self, space): if self.last_exception is not None: f = self.f_backref() @@ -667,7 +668,7 @@ if f is not None: return space.wrap(f.last_exception.get_traceback()) return space.w_None - + def fget_f_restricted(self, space): if space.config.objspace.honor__builtins__: return space.wrap(self.builtin is not space.builtin) diff --git a/pypy/module/pypyjit/interp_jit.py b/pypy/module/pypyjit/interp_jit.py --- a/pypy/module/pypyjit/interp_jit.py +++ b/pypy/module/pypyjit/interp_jit.py @@ -12,18 +12,18 @@ from pypy.interpreter.error import OperationError, operationerrfmt from pypy.interpreter.pycode import PyCode, CO_GENERATOR from pypy.interpreter.pyframe import PyFrame -from pypy.interpreter.pyopcode import ExitFrame +from pypy.interpreter.pyopcode import ExitFrame, Yield from opcode import opmap -PyFrame._virtualizable2_ = ['last_instr', 'pycode', - 'valuestackdepth', 'locals_stack_w[*]', - 'cells[*]', - 'last_exception', - 'lastblock', - 'is_being_profiled', - 'w_globals', - 'w_f_trace', - ] +PyFrame._virtualizable_ = ['last_instr', 'pycode', + 'valuestackdepth', 'locals_stack_w[*]', + 'cells[*]', + 'last_exception', + 'lastblock', + 'is_being_profiled', + 'w_globals', + 'w_f_trace', + ] JUMP_ABSOLUTE = opmap['JUMP_ABSOLUTE'] @@ -73,7 +73,13 @@ self.valuestackdepth = hint(self.valuestackdepth, promote=True) next_instr = self.handle_bytecode(co_code, next_instr, ec) is_being_profiled = self.is_being_profiled + except Yield: + self.last_exception = None + w_result = self.popvalue() + jit.hint(self, force_virtualizable=True) + return w_result except ExitFrame: + self.last_exception = None return self.popvalue() def jump_absolute(self, jumpto, ec): diff --git a/pypy/module/pypyjit/test_pypy_c/test_containers.py b/pypy/module/pypyjit/test_pypy_c/test_containers.py --- a/pypy/module/pypyjit/test_pypy_c/test_containers.py +++ b/pypy/module/pypyjit/test_pypy_c/test_containers.py @@ -87,7 +87,7 @@ i8 = int_lt(i5, i7) guard_true(i8, descr=...) guard_not_invalidated(descr=...) - p10 = call(ConstClass(ll_int_str), i5, descr=) + p10 = call(ConstClass(ll_str__IntegerR_SignedConst_Signed), i5, descr=) guard_no_exception(descr=...) i12 = call(ConstClass(ll_strhash), p10, descr=) p13 = new(descr=...) diff --git a/pypy/module/pypyjit/test_pypy_c/test_generators.py b/pypy/module/pypyjit/test_pypy_c/test_generators.py --- a/pypy/module/pypyjit/test_pypy_c/test_generators.py +++ b/pypy/module/pypyjit/test_pypy_c/test_generators.py @@ -19,6 +19,7 @@ log = self.run(main, [500]) loop, = log.loops_by_filename(self.filepath) assert loop.match_by_id("generator", """ + cond_call(..., descr=...) i16 = force_token() p45 = new_with_vtable(ConstClass(W_IntObject)) setfield_gc(p45, i29, descr=) diff --git a/pypy/tool/pypyjit_child.py b/pypy/tool/pypyjit_child.py --- a/pypy/tool/pypyjit_child.py +++ b/pypy/tool/pypyjit_child.py @@ -14,9 +14,9 @@ return lltype.nullptr(T) interp.heap.malloc_nonmovable = returns_null # XXX - from rpython.jit.backend.llgraph.runner import LLtypeCPU + from rpython.jit.backend.llgraph.runner import LLGraphCPU #LLtypeCPU.supports_floats = False # for now - apply_jit(interp, graph, LLtypeCPU) + apply_jit(interp, graph, LLGraphCPU) def apply_jit(interp, graph, CPUClass): diff --git a/rpython/annotator/test/test_annrpython.py b/rpython/annotator/test/test_annrpython.py --- a/rpython/annotator/test/test_annrpython.py +++ b/rpython/annotator/test/test_annrpython.py @@ -3088,7 +3088,7 @@ from rpython.rlib.jit import hint class A: - _virtualizable2_ = [] + _virtualizable_ = [] class B(A): def meth(self): return self @@ -3128,7 +3128,7 @@ from rpython.rlib.jit import hint class A: - _virtualizable2_ = [] + _virtualizable_ = [] class I: pass diff --git a/rpython/jit/backend/arm/opassembler.py b/rpython/jit/backend/arm/opassembler.py --- a/rpython/jit/backend/arm/opassembler.py +++ b/rpython/jit/backend/arm/opassembler.py @@ -959,16 +959,6 @@ pmc.B_offs(self.mc.currpos(), c.EQ) return pos - def _call_assembler_reset_vtoken(self, jd, vloc): - from rpython.jit.backend.llsupport.descr import FieldDescr - fielddescr = jd.vable_token_descr - assert isinstance(fielddescr, FieldDescr) - ofs = fielddescr.offset - tmploc = self._regalloc.get_scratch_reg(INT) - self.mov_loc_loc(vloc, r.ip) - self.mc.MOV_ri(tmploc.value, 0) - self.mc.STR_ri(tmploc.value, r.ip.value, ofs) - def _call_assembler_load_result(self, op, result_loc): if op.result is not None: # load the return value from (tmploc, 0) diff --git a/rpython/jit/backend/llgraph/runner.py b/rpython/jit/backend/llgraph/runner.py --- a/rpython/jit/backend/llgraph/runner.py +++ b/rpython/jit/backend/llgraph/runner.py @@ -101,6 +101,9 @@ self.fieldname = fieldname self.FIELD = getattr(S, fieldname) + def get_vinfo(self): + return self.vinfo + def __repr__(self): return 'FieldDescr(%r, %r)' % (self.S, self.fieldname) @@ -170,7 +173,7 @@ translate_support_code = False is_llgraph = True - def __init__(self, rtyper, stats=None, *ignored_args, **ignored_kwds): + def __init__(self, rtyper, stats=None, *ignored_args, **kwds): model.AbstractCPU.__init__(self) self.rtyper = rtyper self.llinterp = LLInterpreter(rtyper) @@ -178,6 +181,7 @@ class MiniStats: pass self.stats = stats or MiniStats() + self.vinfo_for_tests = kwds.get('vinfo_for_tests', None) def compile_loop(self, inputargs, operations, looptoken, log=True, name=''): clt = model.CompiledLoopToken(self, looptoken.number) @@ -316,6 +320,8 @@ except KeyError: descr = FieldDescr(S, fieldname) self.descrs[key] = descr + if self.vinfo_for_tests is not None: + descr.vinfo = self.vinfo_for_tests return descr def arraydescrof(self, A): @@ -600,6 +606,7 @@ forced_deadframe = None overflow_flag = False last_exception = None + force_guard_op = None def __init__(self, cpu, argboxes, args): self.env = {} @@ -766,6 +773,8 @@ if self.forced_deadframe is not None: saved_data = self.forced_deadframe._saved_data self.fail_guard(descr, saved_data) + self.force_guard_op = self.current_op + execute_guard_not_forced_2 = execute_guard_not_forced def execute_guard_not_invalidated(self, descr): if self.lltrace.invalid: @@ -887,7 +896,6 @@ # res = CALL assembler_call_helper(pframe) # jmp @done # @fastpath: - # RESET_VABLE # res = GETFIELD(pframe, 'result') # @done: # @@ -907,25 +915,17 @@ vable = lltype.nullptr(llmemory.GCREF.TO) # # Emulate the fast path - def reset_vable(jd, vable): - if jd.index_of_virtualizable != -1: - fielddescr = jd.vable_token_descr - NULL = lltype.nullptr(llmemory.GCREF.TO) - self.cpu.bh_setfield_gc(vable, NULL, fielddescr) + # faildescr = self.cpu.get_latest_descr(pframe) if faildescr == self.cpu.done_with_this_frame_descr_int: - reset_vable(jd, vable) return self.cpu.get_int_value(pframe, 0) elif faildescr == self.cpu.done_with_this_frame_descr_ref: - reset_vable(jd, vable) return self.cpu.get_ref_value(pframe, 0) elif faildescr == self.cpu.done_with_this_frame_descr_float: - reset_vable(jd, vable) return self.cpu.get_float_value(pframe, 0) elif faildescr == self.cpu.done_with_this_frame_descr_void: - reset_vable(jd, vable) return None - # + assembler_helper_ptr = jd.assembler_helper_adr.ptr # fish try: result = assembler_helper_ptr(pframe, vable) diff --git a/rpython/jit/backend/llsupport/assembler.py b/rpython/jit/backend/llsupport/assembler.py --- a/rpython/jit/backend/llsupport/assembler.py +++ b/rpython/jit/backend/llsupport/assembler.py @@ -2,7 +2,7 @@ from rpython.jit.backend.llsupport.memcpy import memcpy_fn from rpython.jit.backend.llsupport.symbolic import WORD from rpython.jit.metainterp.history import (INT, REF, FLOAT, JitCellToken, - ConstInt, BoxInt) + ConstInt, BoxInt, AbstractFailDescr) from rpython.jit.metainterp.resoperation import ResOperation, rop from rpython.rlib import rgc from rpython.rlib.debug import (debug_start, debug_stop, have_debug_prints, @@ -24,6 +24,7 @@ class GuardToken(object): def __init__(self, cpu, gcmap, faildescr, failargs, fail_locs, exc, frame_depth, is_guard_not_invalidated, is_guard_not_forced): + assert isinstance(faildescr, AbstractFailDescr) self.cpu = cpu self.faildescr = faildescr self.failargs = failargs @@ -232,11 +233,8 @@ jmp_location = self._call_assembler_patch_je(result_loc, je_location) - # Path B: fast path. Must load the return value, and reset the token + # Path B: fast path. Must load the return value - # Reset the vable token --- XXX really too much special logic here:-( - if jd.index_of_virtualizable >= 0: - self._call_assembler_reset_vtoken(jd, vloc) # self._call_assembler_load_result(op, result_loc) # diff --git a/rpython/jit/backend/llsupport/descr.py b/rpython/jit/backend/llsupport/descr.py --- a/rpython/jit/backend/llsupport/descr.py +++ b/rpython/jit/backend/llsupport/descr.py @@ -76,7 +76,13 @@ FLAG_STRUCT = 'X' FLAG_VOID = 'V' -class FieldDescr(AbstractDescr): +class ArrayOrFieldDescr(AbstractDescr): + vinfo = None + + def get_vinfo(self): + return self.vinfo + +class FieldDescr(ArrayOrFieldDescr): name = '' offset = 0 # help translation field_size = 0 @@ -150,12 +156,13 @@ # ____________________________________________________________ # ArrayDescrs -class ArrayDescr(AbstractDescr): +class ArrayDescr(ArrayOrFieldDescr): tid = 0 basesize = 0 # workaround for the annotator itemsize = 0 lendescr = None flag = '\x00' + vinfo = None def __init__(self, basesize, itemsize, lendescr, flag): self.basesize = basesize diff --git a/rpython/jit/backend/llsupport/test/ztranslation_test.py b/rpython/jit/backend/llsupport/test/ztranslation_test.py --- a/rpython/jit/backend/llsupport/test/ztranslation_test.py +++ b/rpython/jit/backend/llsupport/test/ztranslation_test.py @@ -23,7 +23,7 @@ # - floats neg and abs class Frame(object): - _virtualizable2_ = ['i'] + _virtualizable_ = ['i'] def __init__(self, i): self.i = i @@ -98,7 +98,7 @@ self.val = val class Frame(object): - _virtualizable2_ = ['thing'] + _virtualizable_ = ['thing'] driver = JitDriver(greens = ['codeno'], reds = ['i', 'frame'], virtualizables = ['frame'], diff --git a/rpython/jit/backend/model.py b/rpython/jit/backend/model.py --- a/rpython/jit/backend/model.py +++ b/rpython/jit/backend/model.py @@ -6,7 +6,7 @@ total_compiled_loops = 0 total_compiled_bridges = 0 total_freed_loops = 0 - total_freed_bridges = 0 + total_freed_bridges = 0 # for heaptracker # _all_size_descrs_with_vtable = None @@ -182,7 +182,7 @@ def arraydescrof(self, A): raise NotImplementedError - def calldescrof(self, FUNC, ARGS, RESULT): + def calldescrof(self, FUNC, ARGS, RESULT, extrainfo): # FUNC is the original function type, but ARGS is a list of types # with Voids removed raise NotImplementedError @@ -294,7 +294,6 @@ def bh_copyunicodecontent(self, src, dst, srcstart, dststart, length): raise NotImplementedError - class CompiledLoopToken(object): asmmemmgr_blocks = None asmmemmgr_gcroots = 0 diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -79,6 +79,7 @@ allblocks) self.target_tokens_currently_compiling = {} self.frame_depth_to_patch = [] + self._finish_gcmap = lltype.nullptr(jitframe.GCMAP) def teardown(self): self.pending_guard_tokens = None @@ -1846,7 +1847,11 @@ self.mov(fail_descr_loc, RawEbpLoc(ofs)) arglist = op.getarglist() if arglist and arglist[0].type == REF: - gcmap = self.gcmap_for_finish + if self._finish_gcmap: + self._finish_gcmap[0] |= r_uint(1) # rax + gcmap = self._finish_gcmap + else: + gcmap = self.gcmap_for_finish self.push_gcmap(self.mc, gcmap, store=True) else: # note that the 0 here is redundant, but I would rather @@ -1991,15 +1996,6 @@ # return jmp_location - def _call_assembler_reset_vtoken(self, jd, vloc): - from rpython.jit.backend.llsupport.descr import FieldDescr - fielddescr = jd.vable_token_descr - assert isinstance(fielddescr, FieldDescr) - vtoken_ofs = fielddescr.offset - self.mc.MOV(edx, vloc) # we know vloc is on the current frame - self.mc.MOV_mi((edx.value, vtoken_ofs), 0) - # in the line above, TOKEN_NONE = 0 - def _call_assembler_load_result(self, op, result_loc): if op.result is not None: # load the return value from the dead frame's value index 0 @@ -2326,6 +2322,15 @@ assert 0 < offset <= 127 self.mc.overwrite(jmp_location - 1, chr(offset)) + def store_force_descr(self, op, fail_locs, frame_depth): + guard_token = self.implement_guard_recovery(op.opnum, + op.getdescr(), + op.getfailargs(), + fail_locs, frame_depth) + self._finish_gcmap = guard_token.gcmap + self._store_force_index(op) + self.store_info_on_descr(0, guard_token) + def force_token(self, reg): # XXX kill me assert isinstance(reg, RegLoc) diff --git a/rpython/jit/backend/x86/regalloc.py b/rpython/jit/backend/x86/regalloc.py --- a/rpython/jit/backend/x86/regalloc.py +++ b/rpython/jit/backend/x86/regalloc.py @@ -22,7 +22,7 @@ from rpython.jit.codewriter.effectinfo import EffectInfo from rpython.jit.metainterp.history import (Box, Const, ConstInt, ConstPtr, ConstFloat, BoxInt, BoxFloat, INT, REF, FLOAT, TargetToken) -from rpython.jit.metainterp.resoperation import rop +from rpython.jit.metainterp.resoperation import rop, ResOperation from rpython.rlib import rgc from rpython.rlib.objectmodel import we_are_translated from rpython.rlib.rarithmetic import r_longlong, r_uint @@ -1332,6 +1332,13 @@ #if jump_op is not None and jump_op.getdescr() is descr: # self._compute_hint_frame_locations_from_descr(descr) + def consider_guard_not_forced_2(self, op): + self.rm.before_call(op.getfailargs(), save_all_regs=True) + fail_locs = [self.loc(v) for v in op.getfailargs()] + self.assembler.store_force_descr(op, fail_locs, + self.fm.get_frame_depth()) + self.possibly_free_vars(op.getfailargs()) + def consider_keepalive(self, op): pass diff --git a/rpython/jit/codewriter/effectinfo.py b/rpython/jit/codewriter/effectinfo.py --- a/rpython/jit/codewriter/effectinfo.py +++ b/rpython/jit/codewriter/effectinfo.py @@ -83,6 +83,7 @@ OS_UNI_COPY_TO_RAW = 113 OS_JIT_FORCE_VIRTUAL = 120 + OS_JIT_FORCE_VIRTUALIZABLE = 121 # for debugging: _OS_CANRAISE = set([ @@ -165,6 +166,9 @@ EffectInfo.MOST_GENERAL = EffectInfo(None, None, None, None, EffectInfo.EF_RANDOM_EFFECTS, can_invalidate=True) +EffectInfo.LEAST_GENERAL = EffectInfo([], [], [], [], + EffectInfo.EF_ELIDABLE_CANNOT_RAISE, + can_invalidate=False) def effectinfo_from_writeanalyze(effects, cpu, diff --git a/rpython/jit/codewriter/jtransform.py b/rpython/jit/codewriter/jtransform.py --- a/rpython/jit/codewriter/jtransform.py +++ b/rpython/jit/codewriter/jtransform.py @@ -521,6 +521,8 @@ op1 = SpaceOperation('str_guard_value', [op.args[0], c, descr], op.result) return [SpaceOperation('-live-', [], None), op1, None] + if hints.get('force_virtualizable'): + return SpaceOperation('hint_force_virtualizable', [op.args[0]], None) else: log.WARNING('ignoring hint %r at %r' % (hints, self.graph)) diff --git a/rpython/jit/codewriter/test/test_policy.py b/rpython/jit/codewriter/test/test_policy.py --- a/rpython/jit/codewriter/test/test_policy.py +++ b/rpython/jit/codewriter/test/test_policy.py @@ -131,7 +131,7 @@ def test_access_directly_but_not_seen(): class X: - _virtualizable2_ = ["a"] + _virtualizable_ = ["a"] def h(x, y): w = 0 for i in range(y): diff --git a/rpython/jit/metainterp/blackhole.py b/rpython/jit/metainterp/blackhole.py --- a/rpython/jit/metainterp/blackhole.py +++ b/rpython/jit/metainterp/blackhole.py @@ -1320,6 +1320,10 @@ from rpython.jit.metainterp import quasiimmut quasiimmut.do_force_quasi_immutable(cpu, struct, mutatefielddescr) + @arguments("r") + def bhimpl_hint_force_virtualizable(r): + pass + @arguments("cpu", "d", returns="r") def bhimpl_new(cpu, descr): return cpu.bh_new(descr) diff --git a/rpython/jit/metainterp/compile.py b/rpython/jit/metainterp/compile.py --- a/rpython/jit/metainterp/compile.py +++ b/rpython/jit/metainterp/compile.py @@ -708,6 +708,8 @@ rstack._stack_criticalcode_start() try: deadframe = cpu.force(token) + # this should set descr to ResumeGuardForceDescr, if it + # was not that already faildescr = cpu.get_latest_descr(deadframe) assert isinstance(faildescr, ResumeGuardForcedDescr) faildescr.handle_async_forcing(deadframe) @@ -715,12 +717,18 @@ rstack._stack_criticalcode_stop() def handle_async_forcing(self, deadframe): - from rpython.jit.metainterp.resume import force_from_resumedata + from rpython.jit.metainterp.resume import (force_from_resumedata, + AlreadyForced) metainterp_sd = self.metainterp_sd vinfo = self.jitdriver_sd.virtualizable_info ginfo = self.jitdriver_sd.greenfield_info - all_virtuals = force_from_resumedata(metainterp_sd, self, deadframe, - vinfo, ginfo) + # there is some chance that this is already forced. In this case + # the virtualizable would have a token = NULL + try: + all_virtuals = force_from_resumedata(metainterp_sd, self, deadframe, + vinfo, ginfo) + except AlreadyForced: + return # The virtualizable data was stored on the real virtualizable above. # Handle all_virtuals: keep them for later blackholing from the # future failure of the GUARD_NOT_FORCED diff --git a/rpython/jit/metainterp/history.py b/rpython/jit/metainterp/history.py --- a/rpython/jit/metainterp/history.py +++ b/rpython/jit/metainterp/history.py @@ -151,6 +151,8 @@ descr_ptr = cpu.ts.cast_to_baseclass(descr_gcref) return cast_base_ptr_to_instance(AbstractDescr, descr_ptr) + def get_vinfo(self): + raise NotImplementedError class AbstractFailDescr(AbstractDescr): index = -1 diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py @@ -5101,6 +5101,15 @@ } self.optimize_loop(ops, expected, call_pure_results) + def test_guard_not_forced_2_virtual(self): + ops = """ + [i0] + p0 = new_array(3, descr=arraydescr) + guard_not_forced_2() [p0] + finish(p0) + """ + self.optimize_loop(ops, ops) + class TestLLtype(BaseTestOptimizeBasic, LLtypeMixin): pass diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py @@ -7086,6 +7086,19 @@ """ self.optimize_loop(ops, expected) + def test_force_virtualizable_virtual(self): + ops = """ + [i0] + p1 = new_with_vtable(ConstClass(node_vtable)) + cond_call(1, 123, p1, descr=clear_vable) + jump(i0) + """ + expected = """ + [i0] + jump(i0) + """ + self.optimize_loop(ops, expected) + def test_setgetfield_counter(self): ops = """ [p1] diff --git a/rpython/jit/metainterp/optimizeopt/test/test_util.py b/rpython/jit/metainterp/optimizeopt/test/test_util.py --- a/rpython/jit/metainterp/optimizeopt/test/test_util.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_util.py @@ -254,12 +254,19 @@ asmdescr = LoopToken() # it can be whatever, it's not a descr though from rpython.jit.metainterp.virtualref import VirtualRefInfo + class FakeWarmRunnerDesc: pass FakeWarmRunnerDesc.cpu = cpu vrefinfo = VirtualRefInfo(FakeWarmRunnerDesc) virtualtokendescr = vrefinfo.descr_virtual_token virtualforceddescr = vrefinfo.descr_forced + FUNC = lltype.FuncType([], lltype.Void) + ei = EffectInfo([], [], [], [], EffectInfo.EF_CANNOT_RAISE, + can_invalidate=False, + oopspecindex=EffectInfo.OS_JIT_FORCE_VIRTUALIZABLE) + clear_vable = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, ei) + jit_virtual_ref_vtable = vrefinfo.jit_virtual_ref_vtable jvr_vtable_adr = llmemory.cast_ptr_to_adr(jit_virtual_ref_vtable) diff --git a/rpython/jit/metainterp/optimizeopt/virtualize.py b/rpython/jit/metainterp/optimizeopt/virtualize.py --- a/rpython/jit/metainterp/optimizeopt/virtualize.py +++ b/rpython/jit/metainterp/optimizeopt/virtualize.py @@ -484,6 +484,8 @@ class OptVirtualize(optimizer.Optimization): "Virtualize objects until they escape." + _last_guard_not_forced_2 = None + def new(self): return OptVirtualize() @@ -527,6 +529,20 @@ return self.emit_operation(op) + def optimize_GUARD_NOT_FORCED_2(self, op): + self._last_guard_not_forced_2 = op + + def optimize_FINISH(self, op): + if self._last_guard_not_forced_2 is not None: + guard_op = self._last_guard_not_forced_2 + self.emit_operation(op) + guard_op = self.optimizer.store_final_boxes_in_guard(guard_op, []) + i = len(self.optimizer._newoperations) - 1 + assert i >= 0 + self.optimizer._newoperations.insert(i, guard_op) + else: + self.emit_operation(op) + def optimize_CALL_MAY_FORCE(self, op): effectinfo = op.getdescr().get_extra_info() oopspecindex = effectinfo.oopspecindex @@ -535,6 +551,15 @@ return self.emit_operation(op) + def optimize_COND_CALL(self, op): + effectinfo = op.getdescr().get_extra_info() + oopspecindex = effectinfo.oopspecindex + if oopspecindex == EffectInfo.OS_JIT_FORCE_VIRTUALIZABLE: + value = self.getvalue(op.getarg(2)) + if value.is_virtual(): + return + self.emit_operation(op) + def optimize_VIRTUAL_REF(self, op): # get some constants vrefinfo = self.optimizer.metainterp_sd.virtualref_info @@ -657,6 +682,11 @@ self.do_RAW_MALLOC_VARSIZE_CHAR(op) elif effectinfo.oopspecindex == EffectInfo.OS_RAW_FREE: self.do_RAW_FREE(op) + elif effectinfo.oopspecindex == EffectInfo.OS_JIT_FORCE_VIRTUALIZABLE: + # we might end up having CALL here instead of COND_CALL + value = self.getvalue(op.getarg(1)) + if value.is_virtual(): + return else: self.emit_operation(op) diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -18,7 +18,7 @@ from rpython.rlib.jit import Counters from rpython.rlib.objectmodel import we_are_translated, specialize from rpython.rlib.unroll import unrolling_iterable -from rpython.rtyper.lltypesystem import lltype, rclass +from rpython.rtyper.lltypesystem import lltype, rclass, rffi @@ -313,7 +313,7 @@ opnum = rop.GUARD_TRUE else: opnum = rop.GUARD_FALSE - self.generate_guard(opnum, box) + self.metainterp.generate_guard(opnum, box) if not switchcase: self.pc = target @@ -341,10 +341,12 @@ value = box.nonnull() if value: if not self.metainterp.heapcache.is_class_known(box): - self.generate_guard(rop.GUARD_NONNULL, box, resumepc=orgpc) + self.metainterp.generate_guard(rop.GUARD_NONNULL, box, + resumepc=orgpc) else: if not isinstance(box, Const): - self.generate_guard(rop.GUARD_ISNULL, box, resumepc=orgpc) + self.metainterp.generate_guard(rop.GUARD_ISNULL, box, + resumepc=orgpc) promoted_box = box.constbox() self.metainterp.replace_box(box, promoted_box) return value @@ -604,7 +606,7 @@ def _opimpl_getfield_gc_greenfield_any(self, box, fielddescr, pc): ginfo = self.metainterp.jitdriver_sd.greenfield_info if (ginfo is not None and fielddescr in ginfo.green_field_descrs - and not self._nonstandard_virtualizable(pc, box)): + and not self._nonstandard_virtualizable(pc, box, fielddescr)): # fetch the result, but consider it as a Const box and don't # record any operation resbox = executor.execute(self.metainterp.cpu, self.metainterp, @@ -672,6 +674,10 @@ opimpl_raw_load_i = _opimpl_raw_load opimpl_raw_load_f = _opimpl_raw_load + @arguments("box") + def opimpl_hint_force_virtualizable(self, box): + self.metainterp.gen_store_back_in_vable(box) + @arguments("box", "descr", "descr", "orgpc") def opimpl_record_quasiimmut_field(self, box, fielddescr, mutatefielddescr, orgpc): @@ -680,7 +686,8 @@ descr = QuasiImmutDescr(cpu, box, fielddescr, mutatefielddescr) self.metainterp.history.record(rop.QUASIIMMUT_FIELD, [box], None, descr=descr) - self.generate_guard(rop.GUARD_NOT_INVALIDATED, resumepc=orgpc) + self.metainterp.generate_guard(rop.GUARD_NOT_INVALIDATED, + resumepc=orgpc) @arguments("box", "descr", "orgpc") def opimpl_jit_force_quasi_immutable(self, box, mutatefielddescr, orgpc): @@ -699,28 +706,46 @@ do_force_quasi_immutable(self.metainterp.cpu, box.getref_base(), mutatefielddescr) raise SwitchToBlackhole(Counters.ABORT_FORCE_QUASIIMMUT) - self.generate_guard(rop.GUARD_ISNULL, mutatebox, resumepc=orgpc) + self.metainterp.generate_guard(rop.GUARD_ISNULL, mutatebox, + resumepc=orgpc) - def _nonstandard_virtualizable(self, pc, box): + def _nonstandard_virtualizable(self, pc, box, fielddescr): # returns True if 'box' is actually not the "standard" virtualizable # that is stored in metainterp.virtualizable_boxes[-1] - if (self.metainterp.jitdriver_sd.virtualizable_info is None and - self.metainterp.jitdriver_sd.greenfield_info is None): - return True # can occur in case of multiple JITs - standard_box = self.metainterp.virtualizable_boxes[-1] - if standard_box is box: - return False if self.metainterp.heapcache.is_nonstandard_virtualizable(box): return True - eqbox = self.metainterp.execute_and_record(rop.PTR_EQ, None, - box, standard_box) - eqbox = self.implement_guard_value(eqbox, pc) - isstandard = eqbox.getint() - if isstandard: - self.metainterp.replace_box(box, standard_box) - else: - self.metainterp.heapcache.nonstandard_virtualizables_now_known(box) - return not isstandard + if box is self.metainterp.forced_virtualizable: + self.metainterp.forced_virtualizable = None + if (self.metainterp.jitdriver_sd.virtualizable_info is not None or + self.metainterp.jitdriver_sd.greenfield_info is not None): + standard_box = self.metainterp.virtualizable_boxes[-1] + if standard_box is box: + return False + vinfo = self.metainterp.jitdriver_sd.virtualizable_info + if vinfo is fielddescr.get_vinfo(): + eqbox = self.metainterp.execute_and_record(rop.PTR_EQ, None, + box, standard_box) + eqbox = self.implement_guard_value(eqbox, pc) + isstandard = eqbox.getint() + if isstandard: + self.metainterp.replace_box(box, standard_box) + return False + if not self.metainterp.heapcache.is_unescaped(box): + self.emit_force_virtualizable(fielddescr, box) + self.metainterp.heapcache.nonstandard_virtualizables_now_known(box) + return True + + def emit_force_virtualizable(self, fielddescr, box): + vinfo = fielddescr.get_vinfo() + token_descr = vinfo.vable_token_descr + mi = self.metainterp + tokenbox = mi.execute_and_record(rop.GETFIELD_GC, token_descr, box) + condbox = mi.execute_and_record(rop.PTR_NE, None, tokenbox, + history.CONST_NULL) + funcbox = ConstInt(rffi.cast(lltype.Signed, vinfo.clear_vable_ptr)) + calldescr = vinfo.clear_vable_descr + self.execute_varargs(rop.COND_CALL, [condbox, funcbox, box], + calldescr, False, False) def _get_virtualizable_field_index(self, fielddescr): # Get the index of a fielddescr. Must only be called for @@ -730,7 +755,7 @@ @arguments("box", "descr", "orgpc") def _opimpl_getfield_vable(self, box, fielddescr, pc): - if self._nonstandard_virtualizable(pc, box): + if self._nonstandard_virtualizable(pc, box, fielddescr): return self._opimpl_getfield_gc_any(box, fielddescr) self.metainterp.check_synchronized_virtualizable() index = self._get_virtualizable_field_index(fielddescr) @@ -742,7 +767,7 @@ @arguments("box", "box", "descr", "orgpc") def _opimpl_setfield_vable(self, box, valuebox, fielddescr, pc): - if self._nonstandard_virtualizable(pc, box): + if self._nonstandard_virtualizable(pc, box, fielddescr): return self._opimpl_setfield_gc_any(box, valuebox, fielddescr) index = self._get_virtualizable_field_index(fielddescr) self.metainterp.virtualizable_boxes[index] = valuebox @@ -772,7 +797,7 @@ @arguments("box", "box", "descr", "descr", "orgpc") def _opimpl_getarrayitem_vable(self, box, indexbox, fdescr, adescr, pc): - if self._nonstandard_virtualizable(pc, box): + if self._nonstandard_virtualizable(pc, box, fdescr): arraybox = self._opimpl_getfield_gc_any(box, fdescr) return self._opimpl_getarrayitem_gc_any(arraybox, indexbox, adescr) self.metainterp.check_synchronized_virtualizable() @@ -786,7 +811,7 @@ @arguments("box", "box", "box", "descr", "descr", "orgpc") def _opimpl_setarrayitem_vable(self, box, indexbox, valuebox, fdescr, adescr, pc): - if self._nonstandard_virtualizable(pc, box): + if self._nonstandard_virtualizable(pc, box, fdescr): arraybox = self._opimpl_getfield_gc_any(box, fdescr) self._opimpl_setarrayitem_gc_any(arraybox, indexbox, valuebox, adescr) @@ -802,7 +827,7 @@ @arguments("box", "descr", "descr", "orgpc") def opimpl_arraylen_vable(self, box, fdescr, adescr, pc): - if self._nonstandard_virtualizable(pc, box): + if self._nonstandard_virtualizable(pc, box, fdescr): arraybox = self._opimpl_getfield_gc_any(box, fdescr) return self.opimpl_arraylen_gc(arraybox, adescr) vinfo = self.metainterp.jitdriver_sd.virtualizable_info @@ -958,8 +983,9 @@ promoted_box = resbox.constbox() # This is GUARD_VALUE because GUARD_TRUE assumes the existance # of a label when computing resumepc - self.generate_guard(rop.GUARD_VALUE, resbox, [promoted_box], - resumepc=orgpc) + self.metainterp.generate_guard(rop.GUARD_VALUE, resbox, + [promoted_box], + resumepc=orgpc) self.metainterp.replace_box(box, constbox) return constbox @@ -971,7 +997,8 @@ def opimpl_guard_class(self, box, orgpc): clsbox = self.cls_of_box(box) if not self.metainterp.heapcache.is_class_known(box): - self.generate_guard(rop.GUARD_CLASS, box, [clsbox], resumepc=orgpc) + self.metainterp.generate_guard(rop.GUARD_CLASS, box, [clsbox], + resumepc=orgpc) self.metainterp.heapcache.class_now_known(box) return clsbox @@ -989,7 +1016,7 @@ def opimpl_jit_merge_point(self, jdindex, greenboxes, jcposition, redboxes, orgpc): resumedescr = compile.ResumeAtPositionDescr() - self.capture_resumedata(resumedescr, orgpc) + self.metainterp.capture_resumedata(resumedescr, orgpc) any_operation = len(self.metainterp.history.operations) > 0 jitdriver_sd = self.metainterp.staticdata.jitdrivers_sd[jdindex] @@ -1071,8 +1098,8 @@ # xxx hack if not self.metainterp.heapcache.is_class_known(exc_value_box): clsbox = self.cls_of_box(exc_value_box) - self.generate_guard(rop.GUARD_CLASS, exc_value_box, [clsbox], - resumepc=orgpc) + self.metainterp.generate_guard(rop.GUARD_CLASS, exc_value_box, + [clsbox], resumepc=orgpc) self.metainterp.class_of_last_exc_is_const = True self.metainterp.last_exc_value_box = exc_value_box self.metainterp.popframe() @@ -1271,43 +1298,6 @@ except ChangeFrame: pass - def generate_guard(self, opnum, box=None, extraargs=[], resumepc=-1): - if isinstance(box, Const): # no need for a guard - return - metainterp = self.metainterp - if box is not None: - moreargs = [box] + extraargs - else: - moreargs = list(extraargs) - metainterp_sd = metainterp.staticdata - if opnum == rop.GUARD_NOT_FORCED: - resumedescr = compile.ResumeGuardForcedDescr(metainterp_sd, - metainterp.jitdriver_sd) - elif opnum == rop.GUARD_NOT_INVALIDATED: - resumedescr = compile.ResumeGuardNotInvalidated() - else: - resumedescr = compile.ResumeGuardDescr() - guard_op = metainterp.history.record(opnum, moreargs, None, - descr=resumedescr) - self.capture_resumedata(resumedescr, resumepc) - self.metainterp.staticdata.profiler.count_ops(opnum, Counters.GUARDS) - # count - metainterp.attach_debug_info(guard_op) - return guard_op - - def capture_resumedata(self, resumedescr, resumepc=-1): - metainterp = self.metainterp - virtualizable_boxes = None - if (metainterp.jitdriver_sd.virtualizable_info is not None or - metainterp.jitdriver_sd.greenfield_info is not None): - virtualizable_boxes = metainterp.virtualizable_boxes - saved_pc = self.pc - if resumepc >= 0: - self.pc = resumepc - resume.capture_resumedata(metainterp.framestack, virtualizable_boxes, - metainterp.virtualref_boxes, resumedescr) - self.pc = saved_pc - def implement_guard_value(self, box, orgpc): """Promote the given Box into a Const. Note: be careful, it's a bit unclear what occurs if a single opcode needs to generate @@ -1316,8 +1306,8 @@ return box # no promotion needed, already a Const else: promoted_box = box.constbox() - self.generate_guard(rop.GUARD_VALUE, box, [promoted_box], - resumepc=orgpc) + self.metainterp.generate_guard(rop.GUARD_VALUE, box, [promoted_box], + resumepc=orgpc) self.metainterp.replace_box(box, promoted_box) return promoted_box @@ -1411,7 +1401,7 @@ if resbox is not None: self.make_result_of_lastop(resbox) self.metainterp.vable_after_residual_call() - self.generate_guard(rop.GUARD_NOT_FORCED, None) + self.metainterp.generate_guard(rop.GUARD_NOT_FORCED, None) if vablebox is not None: self.metainterp.history.record(rop.KEEPALIVE, [vablebox], None) self.metainterp.handle_possible_exception() @@ -1660,6 +1650,7 @@ self.portal_trace_positions = [] self.free_frames_list = [] self.last_exc_value_box = None + self.forced_virtualizable = None self.partial_trace = None self.retracing_from = -1 self.call_pure_results = args_dict_box() @@ -1783,6 +1774,45 @@ print jitcode.name raise AssertionError + def generate_guard(self, opnum, box=None, extraargs=[], resumepc=-1): + if isinstance(box, Const): # no need for a guard + return + if box is not None: + moreargs = [box] + extraargs + else: + moreargs = list(extraargs) + metainterp_sd = self.staticdata + if opnum == rop.GUARD_NOT_FORCED or opnum == rop.GUARD_NOT_FORCED_2: + resumedescr = compile.ResumeGuardForcedDescr(metainterp_sd, + self.jitdriver_sd) + elif opnum == rop.GUARD_NOT_INVALIDATED: + resumedescr = compile.ResumeGuardNotInvalidated() + else: + resumedescr = compile.ResumeGuardDescr() + guard_op = self.history.record(opnum, moreargs, None, + descr=resumedescr) + self.capture_resumedata(resumedescr, resumepc) + self.staticdata.profiler.count_ops(opnum, Counters.GUARDS) + # count + self.attach_debug_info(guard_op) + return guard_op + + def capture_resumedata(self, resumedescr, resumepc=-1): + virtualizable_boxes = None + if (self.jitdriver_sd.virtualizable_info is not None or + self.jitdriver_sd.greenfield_info is not None): + virtualizable_boxes = self.virtualizable_boxes + saved_pc = 0 + if self.framestack: + frame = self.framestack[-1] + saved_pc = frame.pc + if resumepc >= 0: + frame.pc = resumepc + resume.capture_resumedata(self.framestack, virtualizable_boxes, + self.virtualref_boxes, resumedescr) + if self.framestack: + self.framestack[-1].pc = saved_pc + def create_empty_history(self): self.history = history.History() self.staticdata.stats.set_history(self.history) @@ -2253,8 +2283,8 @@ self.raise_continue_running_normally(live_arg_boxes, jitcell_token) def compile_done_with_this_frame(self, exitbox): - self.gen_store_back_in_virtualizable() # temporarily put a JUMP to a pseudo-loop + self.store_token_in_vable() sd = self.staticdata result_type = self.jitdriver_sd.result_type if result_type == history.VOID: @@ -2280,8 +2310,24 @@ if target_token is not token: compile.giveup() + def store_token_in_vable(self): + vinfo = self.jitdriver_sd.virtualizable_info + if vinfo is None: + return + vbox = self.virtualizable_boxes[-1] + if vbox is self.forced_virtualizable: + return # we already forced it by hand + force_token_box = history.BoxPtr() + # in case the force_token has not been recorded, record it here + # to make sure we know the virtualizable can be broken. However, the + # contents of the virtualizable should be generally correct + self.history.record(rop.FORCE_TOKEN, [], force_token_box) + self.history.record(rop.SETFIELD_GC, [vbox, force_token_box], + None, descr=vinfo.vable_token_descr) + self.generate_guard(rop.GUARD_NOT_FORCED_2, None) + def compile_exit_frame_with_exception(self, valuebox): - self.gen_store_back_in_virtualizable() + self.store_token_in_vable() sd = self.staticdata token = sd.loop_tokens_exit_frame_with_exception_ref[0].finishdescr self.history.record(rop.FINISH, [valuebox], None, descr=token) @@ -2420,27 +2466,25 @@ self.virtualref_boxes[i+1] = self.cpu.ts.CONST_NULL def handle_possible_exception(self): - frame = self.framestack[-1] if self.last_exc_value_box is not None: exception_box = self.cpu.ts.cls_of_box(self.last_exc_value_box) - op = frame.generate_guard(rop.GUARD_EXCEPTION, - None, [exception_box]) + op = self.generate_guard(rop.GUARD_EXCEPTION, + None, [exception_box]) assert op is not None op.result = self.last_exc_value_box self.class_of_last_exc_is_const = True self.finishframe_exception() else: - frame.generate_guard(rop.GUARD_NO_EXCEPTION, None, []) + self.generate_guard(rop.GUARD_NO_EXCEPTION, None, []) def handle_possible_overflow_error(self): - frame = self.framestack[-1] if self.last_exc_value_box is not None: - frame.generate_guard(rop.GUARD_OVERFLOW, None) + self.generate_guard(rop.GUARD_OVERFLOW, None) assert isinstance(self.last_exc_value_box, Const) assert self.class_of_last_exc_is_const self.finishframe_exception() else: - frame.generate_guard(rop.GUARD_NO_OVERFLOW, None) + self.generate_guard(rop.GUARD_NO_OVERFLOW, None) def assert_no_exception(self): assert self.last_exc_value_box is None @@ -2467,12 +2511,13 @@ if vinfo is not None: self.virtualizable_boxes = virtualizable_boxes # just jumped away from assembler (case 4 in the comment in - # virtualizable.py) into tracing (case 2); check that vable_token - # is and stays NULL. Note the call to reset_vable_token() in - # warmstate.py. + # virtualizable.py) into tracing (case 2); if we get the + # virtualizable from somewhere strange it might not be forced, + # do it virtualizable_box = self.virtualizable_boxes[-1] virtualizable = vinfo.unwrap_virtualizable_box(virtualizable_box) - assert not vinfo.is_token_nonnull_gcref(virtualizable) + if vinfo.is_token_nonnull_gcref(virtualizable): + vinfo.reset_token_gcref(virtualizable) # fill the virtualizable with the local boxes self.synchronize_virtualizable() # @@ -2508,11 +2553,20 @@ virtualizable) self.virtualizable_boxes.append(virtualizable_box) - def gen_store_back_in_virtualizable(self): + def gen_store_back_in_vable(self, box): vinfo = self.jitdriver_sd.virtualizable_info if vinfo is not None: # xxx only write back the fields really modified vbox = self.virtualizable_boxes[-1] + if vbox is not box: + # ignore the hint on non-standard virtualizable + # specifically, ignore it on a virtual + return + if self.forced_virtualizable is not None: + # this can happen only in strange cases, but we don't care + # it was already forced + return + self.forced_virtualizable = vbox for i in range(vinfo.num_static_extra_boxes): fieldbox = self.virtualizable_boxes[i] descr = vinfo.static_field_descrs[i] @@ -2529,6 +2583,9 @@ self.execute_and_record(rop.SETARRAYITEM_GC, descr, abox, ConstInt(j), itembox) assert i + 1 == len(self.virtualizable_boxes) + # we're during tracing, so we should not execute it + self.history.record(rop.SETFIELD_GC, [vbox, self.cpu.ts.CONST_NULL], + None, descr=vinfo.vable_token_descr) def replace_box(self, oldbox, newbox): assert isinstance(oldbox, Box) diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py --- a/rpython/jit/metainterp/resoperation.py +++ b/rpython/jit/metainterp/resoperation.py @@ -397,6 +397,7 @@ 'GUARD_NO_OVERFLOW/0d', 'GUARD_OVERFLOW/0d', 'GUARD_NOT_FORCED/0d', # may be called with an exception currently set + 'GUARD_NOT_FORCED_2/0d', # same as GUARD_NOT_FORCED, but for finish() 'GUARD_NOT_INVALIDATED/0d', '_GUARD_LAST', # ----- end of guard operations ----- @@ -488,6 +489,8 @@ 'VIRTUAL_REF/2', # removed before it's passed to the backend 'READ_TIMESTAMP/0', 'MARK_OPAQUE_PTR/1b', + # this one has no *visible* side effect, since the virtualizable + # must be forced, however we need to execute it anyway '_NOSIDEEFFECT_LAST', # ----- end of no_side_effect operations ----- 'SETARRAYITEM_GC/3d', diff --git a/rpython/jit/metainterp/resume.py b/rpython/jit/metainterp/resume.py --- a/rpython/jit/metainterp/resume.py +++ b/rpython/jit/metainterp/resume.py @@ -17,6 +17,9 @@ # because it needs to support optimize.py which encodes virtuals with # arbitrary cycles and also to compress the information +class AlreadyForced(Exception): + pass + class Snapshot(object): __slots__ = ('prev', 'boxes') @@ -51,20 +54,24 @@ def capture_resumedata(framestack, virtualizable_boxes, virtualref_boxes, storage): - n = len(framestack)-1 - top = framestack[n] - _ensure_parent_resumedata(framestack, n) - frame_info_list = FrameInfo(top.parent_resumedata_frame_info_list, - top.jitcode, top.pc) - storage.rd_frame_info_list = frame_info_list - snapshot = Snapshot(top.parent_resumedata_snapshot, - top.get_list_of_active_boxes(False)) + n = len(framestack) - 1 if virtualizable_boxes is not None: boxes = virtualref_boxes + virtualizable_boxes else: boxes = virtualref_boxes[:] - snapshot = Snapshot(snapshot, boxes) - storage.rd_snapshot = snapshot + if n >= 0: + top = framestack[n] + _ensure_parent_resumedata(framestack, n) + frame_info_list = FrameInfo(top.parent_resumedata_frame_info_list, + top.jitcode, top.pc) + storage.rd_frame_info_list = frame_info_list + snapshot = Snapshot(top.parent_resumedata_snapshot, + top.get_list_of_active_boxes(False)) + snapshot = Snapshot(snapshot, boxes) + storage.rd_snapshot = snapshot + else: + storage.rd_frame_info_list = None + storage.rd_snapshot = Snapshot(None, boxes) # # The following is equivalent to the RPython-level declaration: @@ -1214,16 +1221,8 @@ return len(numb.nums) index = len(numb.nums) - 1 virtualizable = self.decode_ref(numb.nums[index]) - if self.resume_after_guard_not_forced == 1: - # in the middle of handle_async_forcing() - assert vinfo.is_token_nonnull_gcref(virtualizable) - vinfo.reset_token_gcref(virtualizable) - else: - # just jumped away from assembler (case 4 in the comment in - # virtualizable.py) into tracing (case 2); check that vable_token - # is and stays NULL. Note the call to reset_vable_token() in - # warmstate.py. - assert not vinfo.is_token_nonnull_gcref(virtualizable) + # just reset the token, we'll force it later + vinfo.reset_token_gcref(virtualizable) return vinfo.write_from_resume_data_partial(virtualizable, self, numb) def load_value_of_type(self, TYPE, tagged): diff --git a/rpython/jit/metainterp/test/test_recursive.py b/rpython/jit/metainterp/test/test_recursive.py --- a/rpython/jit/metainterp/test/test_recursive.py +++ b/rpython/jit/metainterp/test/test_recursive.py @@ -412,7 +412,6 @@ self.i8 = i8 self.i9 = i9 - def loop(n): i = 0 o = A(0, 1, 2, 3, 4, 5, 6, 7, 8, 9) @@ -445,7 +444,6 @@ self.i8 = i8 self.i9 = i9 - def loop(n): i = 0 o = A(0, 1, 2, 3, 4, 5, 6, 7, 8, 9) @@ -643,7 +641,7 @@ # exactly the same logic as the previous test, but with 'frame.j' # instead of just 'j' class Frame(object): - _virtualizable2_ = ['j'] + _virtualizable_ = ['j'] def __init__(self, j): self.j = j @@ -767,9 +765,9 @@ self.val = val class Frame(object): - _virtualizable2_ = ['thing'] + _virtualizable_ = ['thing'] - driver = JitDriver(greens = ['codeno'], reds = ['i', 'frame'], + driver = JitDriver(greens = ['codeno'], reds = ['i', 's', 'frame'], virtualizables = ['frame'], get_printable_location = lambda codeno : str(codeno)) @@ -781,22 +779,26 @@ def portal(codeno, frame): i = 0 + s = 0 while i < 10: - driver.can_enter_jit(frame=frame, codeno=codeno, i=i) - driver.jit_merge_point(frame=frame, codeno=codeno, i=i) + driver.can_enter_jit(frame=frame, codeno=codeno, i=i, s=s) + driver.jit_merge_point(frame=frame, codeno=codeno, i=i, s=s) nextval = frame.thing.val if codeno == 0: subframe = Frame() subframe.thing = Thing(nextval) nextval = portal(1, subframe) + s += subframe.thing.val frame.thing = Thing(nextval + 1) i += 1 return frame.thing.val res = self.meta_interp(main, [0], inline=True) + self.check_resops(call=0, cond_call=0) # got removed by optimization assert res == main(0) def test_directly_call_assembler_virtualizable_reset_token(self): + py.test.skip("not applicable any more, I think") from rpython.rtyper.lltypesystem import lltype from rpython.rlib.debug import llinterpcall @@ -805,7 +807,7 @@ self.val = val class Frame(object): - _virtualizable2_ = ['thing'] + _virtualizable_ = ['thing'] driver = JitDriver(greens = ['codeno'], reds = ['i', 'frame'], virtualizables = ['frame'], @@ -856,7 +858,7 @@ self.val = val class Frame(object): - _virtualizable2_ = ['thing'] + _virtualizable_ = ['thing'] driver = JitDriver(greens = ['codeno'], reds = ['i', 'frame'], virtualizables = ['frame'], @@ -907,7 +909,7 @@ virtualizables = ['frame']) class Frame(object): - _virtualizable2_ = ['l[*]', 's'] + _virtualizable_ = ['l[*]', 's'] def __init__(self, l, s): self = hint(self, access_directly=True, @@ -950,7 +952,7 @@ self.val = val class Frame(object): - _virtualizable2_ = ['thing'] + _virtualizable_ = ['thing'] driver = JitDriver(greens = ['codeno'], reds = ['i', 'frame'], virtualizables = ['frame'], diff --git a/rpython/jit/metainterp/test/test_tracingopts.py b/rpython/jit/metainterp/test/test_tracingopts.py --- a/rpython/jit/metainterp/test/test_tracingopts.py +++ b/rpython/jit/metainterp/test/test_tracingopts.py @@ -344,7 +344,7 @@ virtualizables = ['frame']) class Frame(object): - _virtualizable2_ = ['l[*]', 's'] + _virtualizable_ = ['l[*]', 's'] def __init__(self, a, s): self = jit.hint(self, access_directly=True, fresh_virtualizable=True) diff --git a/rpython/jit/metainterp/test/test_virtualizable.py b/rpython/jit/metainterp/test/test_virtualizable.py --- a/rpython/jit/metainterp/test/test_virtualizable.py +++ b/rpython/jit/metainterp/test/test_virtualizable.py @@ -4,7 +4,8 @@ from rpython.jit.codewriter.policy import StopAtXPolicy from rpython.jit.metainterp.optimizeopt.test.test_util import LLtypeMixin from rpython.jit.metainterp.test.support import LLJitMixin -from rpython.jit.metainterp.warmspot import get_translator +from rpython.jit.metainterp.warmspot import get_translator, get_stats +from rpython.jit.metainterp.resoperation import rop from rpython.rlib.jit import JitDriver, hint, dont_look_inside, promote, virtual_ref from rpython.rlib.rarithmetic import intmask from rpython.rtyper.annlowlevel import hlstr @@ -26,7 +27,6 @@ return lltype_to_annotation(lltype.Void) def specialize_call(self, hop): - op = self.instance # the LLOp object that was called args_v = [hop.inputarg(hop.args_r[0], 0), hop.inputconst(lltype.Void, hop.args_v[1].value), hop.inputconst(lltype.Void, {})] @@ -46,8 +46,8 @@ ('vable_token', llmemory.GCREF), ('inst_x', lltype.Signed), ('inst_node', lltype.Ptr(LLtypeMixin.NODE)), - hints = {'virtualizable2_accessor': FieldListAccessor()}) - XY._hints['virtualizable2_accessor'].initialize( + hints = {'virtualizable_accessor': FieldListAccessor()}) + XY._hints['virtualizable_accessor'].initialize( XY, {'inst_x': IR_IMMUTABLE, 'inst_node': IR_IMMUTABLE}) xy_vtable = lltype.malloc(rclass.OBJECT_VTABLE, immortal=True) @@ -141,11 +141,13 @@ n -= 1 def f(n): xy = self.setup() + promote_virtualizable(xy, 'inst_x') xy.inst_x = 10000 m = 10 while m > 0: g(xy, n) m -= 1 + promote_virtualizable(xy, 'inst_x') return xy.inst_x res = self.meta_interp(f, [18]) assert res == 10180 @@ -200,8 +202,8 @@ return xy.inst_x res = self.meta_interp(f, [20]) assert res == 134 - self.check_simple_loop(setfield_gc=1, getfield_gc=0) - self.check_resops(setfield_gc=2, getfield_gc=3) + self.check_simple_loop(setfield_gc=1, getfield_gc=0, cond_call=1) + self.check_resops(setfield_gc=2, getfield_gc=4) # ------------------------------ @@ -212,8 +214,8 @@ ('inst_x', lltype.Signed), ('inst_l1', lltype.Ptr(lltype.GcArray(lltype.Signed))), ('inst_l2', lltype.Ptr(lltype.GcArray(lltype.Signed))), - hints = {'virtualizable2_accessor': FieldListAccessor()}) - XY2._hints['virtualizable2_accessor'].initialize( + hints = {'virtualizable_accessor': FieldListAccessor()}) + XY2._hints['virtualizable_accessor'].initialize( XY2, {'inst_x': IR_IMMUTABLE, 'inst_l1': IR_IMMUTABLE_ARRAY, 'inst_l2': IR_IMMUTABLE_ARRAY}) @@ -278,6 +280,7 @@ while m > 0: g(xy2, n) m -= 1 + promote_virtualizable(xy2, 'inst_l2') return xy2.inst_l2[0] assert f(18) == 10360 res = self.meta_interp(f, [18]) @@ -381,7 +384,7 @@ res = self.meta_interp(f, [20], enable_opts='') assert res == expected self.check_simple_loop(setarrayitem_gc=1, setfield_gc=0, - getarrayitem_gc=1, arraylen_gc=1, getfield_gc=1) + getarrayitem_gc=1, arraylen_gc=1, getfield_gc=2) # ------------------------------ @@ -424,7 +427,9 @@ while m > 0: g(xy2, n) m -= 1 - return xy2.parent.inst_l2[0] + parent = xy2.parent + promote_virtualizable(parent, 'inst_l2') + return parent.inst_l2[0] assert f(18) == 10360 res = self.meta_interp(f, [18]) assert res == 10360 @@ -440,7 +445,7 @@ virtualizables = ['frame']) class Frame(object): - _virtualizable2_ = ['x', 'y'] + _virtualizable_ = ['x', 'y'] def __init__(self, x, y): self.x = x @@ -469,7 +474,7 @@ virtualizables = ['frame']) class Frame(object): - _virtualizable2_ = ['l[*]', 's'] + _virtualizable_ = ['l[*]', 's'] def __init__(self, l, s): self.l = l @@ -504,7 +509,7 @@ virtualizables = ['frame']) class Frame(object): - _virtualizable2_ = ['x', 'y'] + _virtualizable_ = ['x', 'y'] def __init__(self, x, y): self.x = x @@ -532,7 +537,7 @@ virtualizables = ['frame']) class BaseFrame(object): - _virtualizable2_ = ['x[*]'] + _virtualizable_ = ['x[*]'] def __init__(self, x): self.x = x @@ -563,7 +568,7 @@ virtualizables = ['frame']) class Frame(object): - _virtualizable2_ = ['x', 'y'] + _virtualizable_ = ['x', 'y'] class SomewhereElse: pass @@ -596,7 +601,7 @@ virtualizables = ['frame']) class Frame(object): - _virtualizable2_ = ['x', 'y'] + _virtualizable_ = ['x', 'y'] class SomewhereElse: pass @@ -636,7 +641,7 @@ virtualizables = ['frame']) class Frame(object): - _virtualizable2_ = ['x', 'y'] + _virtualizable_ = ['x', 'y'] class SomewhereElse: pass @@ -669,7 +674,7 @@ virtualizables = ['frame']) class Frame(object): - _virtualizable2_ = ['x', 'y'] + _virtualizable_ = ['x', 'y'] class SomewhereElse: pass @@ -706,7 +711,7 @@ virtualizables = ['frame']) class Frame(object): - _virtualizable2_ = ['x', 'y'] + _virtualizable_ = ['x', 'y'] class Y: pass @@ -751,7 +756,7 @@ virtualizables = ['frame']) class Frame(object): - _virtualizable2_ = ['x', 'y'] + _virtualizable_ = ['x', 'y'] class Y: pass @@ -801,7 +806,7 @@ virtualizables = ['frame']) class Frame(object): - _virtualizable2_ = ['x', 'y'] + _virtualizable_ = ['x', 'y'] class FooBarError(Exception): pass @@ -845,7 +850,7 @@ virtualizables = ['frame']) class Frame(object): - _virtualizable2_ = ['x', 'y'] + _virtualizable_ = ['x', 'y'] class SomewhereElse: pass @@ -882,7 +887,7 @@ virtualizables = ['frame']) class Frame(object): - _virtualizable2_ = ['x', 'y'] + _virtualizable_ = ['x', 'y'] class SomewhereElse: pass @@ -934,7 +939,7 @@ virtualizables = ['frame']) class Frame(object): - _virtualizable2_ = ['x', 'y'] + _virtualizable_ = ['x', 'y'] class SomewhereElse: pass @@ -971,7 +976,7 @@ virtualizables = ['frame']) class Frame(object): - _virtualizable2_ = ['x', 'y'] + _virtualizable_ = ['x', 'y'] class SomewhereElse: pass @@ -1005,7 +1010,7 @@ virtualizables = ['frame']) class Frame(object): - _virtualizable2_ = ['stackpos', 'stack[*]'] + _virtualizable_ = ['stackpos', 'stack[*]'] def f(n): frame = Frame() @@ -1034,7 +1039,7 @@ virtualizables = ['frame']) class Frame(object): - _virtualizable2_ = ['x', 'y'] + _virtualizable_ = ['x', 'y'] def __init__(self, x, y): self = hint(self, access_directly=True) @@ -1088,7 +1093,7 @@ virtualizables = ['frame']) class Frame(object): - _virtualizable2_ = ['x', 'y'] + _virtualizable_ = ['x', 'y'] def __init__(self, x, y): self = hint(self, access_directly=True) @@ -1120,7 +1125,7 @@ virtualizables = ['frame']) class Frame(object): - _virtualizable2_ = ['x', 'y', 'z'] + _virtualizable_ = ['x', 'y', 'z'] def __init__(self, x, y, z=1): self = hint(self, access_directly=True) @@ -1155,7 +1160,7 @@ virtualizables = ['frame']) class Frame(object): - _virtualizable2_ = ['x[*]'] + _virtualizable_ = ['x[*]'] def __init__(self, x, y): self = hint(self, access_directly=True, @@ -1187,7 +1192,7 @@ virtualizables = ['frame']) class Frame(object): - _virtualizable2_ = ['x', 'y'] + _virtualizable_ = ['x', 'y'] def __init__(self, x, y): self = hint(self, access_directly=True) @@ -1226,7 +1231,7 @@ virtualizables = ['frame']) class Frame(object): - _virtualizable2_ = ['x', 'y'] + _virtualizable_ = ['x', 'y'] def __init__(self, x, y): self.x = x @@ -1266,7 +1271,7 @@ virtualizables = ['frame']) class Frame(object): - _virtualizable2_ = ['x', 'y'] + _virtualizable_ = ['x', 'y'] def __init__(self, x, y): self = hint(self, access_directly=True) @@ -1310,7 +1315,7 @@ def test_inlining(self): class Frame(object): - _virtualizable2_ = ['x', 'next'] + _virtualizable_ = ['x', 'next'] def __init__(self, x): self = hint(self, access_directly=True) @@ -1345,7 +1350,7 @@ def test_guard_failure_in_inlined_function(self): class Frame(object): - _virtualizable2_ = ['n', 'next'] + _virtualizable_ = ['n', 'next'] def __init__(self, n): self = hint(self, access_directly=True) @@ -1399,7 +1404,7 @@ self.val = val class Frame(object): - _virtualizable2_ = ['thing'] + _virtualizable_ = ['thing'] driver = JitDriver(greens = ['codeno'], reds = ['i', 'frame'], virtualizables = ['frame'], @@ -1450,7 +1455,7 @@ ) class Frame(object): - _virtualizable2_ = ['x'] + _virtualizable_ = ['x'] def main(n): f = Frame() @@ -1469,6 +1474,113 @@ "int_add": 2, "jump": 1 }) + def test_frame_nonstandard_no_virtualizable(self): + + driver1 = JitDriver(greens=[], reds=['i', 's', 'frame']) + driver2 = JitDriver(greens=[], reds=['frame'], + virtualizables=['frame']) + + class Frame(object): + _virtualizable_ = ['x'] + + def g(frame): + driver2.jit_merge_point(frame=frame) + frame.x += 1 + return frame + + def f(): + i = 0 + s = 0 + frame = Frame() + frame.x = 0 + g(frame) + while i < 10: + driver1.jit_merge_point(frame=frame, s=s, i=i) + frame = g(frame) + s += frame.x + i += 1 + return s + + def main(): + res = 0 + for i in range(10): + res += f() + return res + + res = self.meta_interp(main, []) + assert res == main() + + def test_two_virtualizables_mixed(self): + driver1 = JitDriver(greens=[], reds=['i', 's', 'frame', + 'subframe']) + driver2 = JitDriver(greens=[], reds=['subframe'], + virtualizables=['subframe']) + + class Frame(object): + _virtualizable_ = ['x'] + + class SubFrame(object): + _virtualizable_ = ['x'] + + def g(subframe): + driver2.jit_merge_point(subframe=subframe) + subframe.x += 1 + + def f(): + i = 0 + frame = Frame() + frame.x = 0 + subframe = SubFrame() + subframe.x = 0 + s = 0 + while i < 10: + driver1.jit_merge_point(frame=frame, subframe=subframe, i=i, + s=s) + g(subframe) + s += subframe.x + i += 1 + return s + + res = self.meta_interp(f, []) + assert res == f() + + def test_force_virtualizable_by_hint(self): + class Frame(object): + _virtualizable_ = ['x'] + + driver = JitDriver(greens = [], reds = ['i', 'frame'], + virtualizables = ['frame']) + + def f(frame, i): + while i > 0: + driver.jit_merge_point(i=i, frame=frame) + i -= 1 + frame.x += 1 + hint(frame, force_virtualizable=True) + + def main(): + frame = Frame() + frame.x = 0 + s = 0 + for i in range(20): + f(frame, 4) + s += frame.x + return s + + r = self.meta_interp(main, []) + assert r == main() + # fish the bridge + loop = get_stats().get_all_loops()[0] + d = loop.operations[-3].getdescr() + bridge = getattr(d, '_llgraph_bridge', None) + if bridge is not None: + l = [op for op in + bridge.operations if op.getopnum() == rop.SETFIELD_GC] + assert "'inst_x'" in str(l[0].getdescr().realdescrref()) From noreply at buildbot.pypy.org Tue Aug 6 19:09:07 2013 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 6 Aug 2013 19:09:07 +0200 (CEST) Subject: [pypy-commit] pypy r15-for-shadowstack: hg merge default Message-ID: <20130806170907.EB3AB1C0134@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: r15-for-shadowstack Changeset: r65980:7ead82da0adb Date: 2013-08-06 19:08 +0200 http://bitbucket.org/pypy/pypy/changeset/7ead82da0adb/ Log: hg merge default diff --git a/pypy/module/cpyext/pystate.py b/pypy/module/cpyext/pystate.py --- a/pypy/module/cpyext/pystate.py +++ b/pypy/module/cpyext/pystate.py @@ -3,7 +3,6 @@ from pypy.module.cpyext.pyobject import PyObject, Py_DecRef, make_ref, from_ref from rpython.rtyper.lltypesystem import rffi, lltype from rpython.rlib import rthread -from pypy.module.thread import os_thread PyInterpreterStateStruct = lltype.ForwardReference() PyInterpreterState = lltype.Ptr(PyInterpreterStateStruct) @@ -45,7 +44,10 @@ @cpython_api([], lltype.Void) def PyEval_InitThreads(space): - os_thread.setup_threads(space) + if space.config.translation.thread: + from pypy.module.thread import os_thread + os_thread.setup_threads(space) + #else: nothing to do @cpython_api([], rffi.INT_real, error=CANNOT_FAIL) def PyEval_ThreadsInitialized(space): From noreply at buildbot.pypy.org Tue Aug 6 19:24:39 2013 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 6 Aug 2013 19:24:39 +0200 (CEST) Subject: [pypy-commit] pypy default: pom pom pom Message-ID: <20130806172439.5327B1C094F@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r65981:107b3a5b1389 Date: 2013-08-06 19:23 +0200 http://bitbucket.org/pypy/pypy/changeset/107b3a5b1389/ Log: pom pom pom diff --git a/pypy/module/cpyext/pystate.py b/pypy/module/cpyext/pystate.py --- a/pypy/module/cpyext/pystate.py +++ b/pypy/module/cpyext/pystate.py @@ -250,7 +250,9 @@ interpreter lock must be held.""" Py_DecRef(space, tstate.c_dict) tstate.c_dict = lltype.nullptr(PyObject.TO) - space.threadlocals.leave_thread(space) + if space.config.translation.thread: + space.threadlocals.leave_thread(space) + #else: nothing to do space.getexecutioncontext().cleanup_cpyext_state() rthread.gc_thread_die() From noreply at buildbot.pypy.org Tue Aug 6 19:26:25 2013 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 6 Aug 2013 19:26:25 +0200 (CEST) Subject: [pypy-commit] pypy r15-for-shadowstack: hg merge default Message-ID: <20130806172625.F337E1C094F@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: r15-for-shadowstack Changeset: r65982:c5dc72722b4d Date: 2013-08-06 19:25 +0200 http://bitbucket.org/pypy/pypy/changeset/c5dc72722b4d/ Log: hg merge default diff too long, truncating to 2000 out of 2515 lines diff --git a/pypy/doc/jit/index.rst b/pypy/doc/jit/index.rst --- a/pypy/doc/jit/index.rst +++ b/pypy/doc/jit/index.rst @@ -23,7 +23,10 @@ - Hooks_ debugging facilities available to a python programmer +- Virtualizable_ how virtualizables work and what they are (in other words how + to make frames more efficient). .. _Overview: overview.html .. _Notes: pyjitpl5.html .. _Hooks: ../jit-hooks.html +.. _Virtualizable: virtualizable.html diff --git a/pypy/doc/jit/virtualizable.rst b/pypy/doc/jit/virtualizable.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/jit/virtualizable.rst @@ -0,0 +1,60 @@ + +Virtualizables +============== + +**Note:** this document does not have a proper introduction as to how +to understand the basics. We should write some. If you happen to be here +and you're missing context, feel free to pester us on IRC. + +Problem description +------------------- + +The JIT is very good at making sure some objects are never allocated if they +don't escape from the trace. Such objects are called ``virtuals``. However, +if we're dealing with frames, virtuals are often not good enough. Frames +can escape and they can also be allocated already at the moment we enter the +JIT. In such cases we need some extra object that can still be optimized away, +despite existing on the heap. + +Solution +-------- + +We introduce virtualizables. They're objects that exist on the heap, but their +fields are not always in sync with whatever happens in the assembler. One +example is that virtualizable fields can store virtual objects without +forcing them. This is very useful for frames. Declaring an object to be +virtualizable works like this: + + class Frame(object): + _virtualizable_ = ['locals[*]', 'stackdepth'] + +And we use them in ``JitDriver`` like this:: + + jitdriver = JitDriver(greens=[], reds=['frame'], virtualizables=['frame']) + +This declaration means that ``stackdepth`` is a virtualizable **field**, while +``locals`` is a virtualizable **array** (a list stored on a virtualizable). +There are various rules about using virtualizables, especially using +virtualizable arrays that can be very confusing. Those will usually end +up with a compile-time error (as opposed to strange behavior). The rules are: + +* Each array access must be with a known positive index that cannot raise + an ``IndexError``. Using ``no = jit.hint(no, promote=True)`` might be useful + to get a constant-number access. This is only safe if the index is actually + constant or changing rarely within the context of the user's code. + +* If you initialize a new virtualizable in the JIT, it has to be done like this + (for example if we're in ``Frame.__init__``):: + + self = hint(self, access_directly=True, fresh_virtualizable=True) + + that way you can populate the fields directly. + +* If you use virtualizable outside of the JIT – it's very expensive and + sometimes aborts tracing. Consider it carefully as to how do it only for + debugging purposes and not every time (e.g. ``sys._getframe`` call). + +* If you have something equivalent of a Python generator, where the + virtualizable survives for longer, you want to force it before returning. + It's better to do it that way than by an external call some time later. + It's done using ``jit.hint(frame, force_virtualizable=True)`` diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -66,3 +66,10 @@ .. branch: kill-typesystem Remove the "type system" abstraction, now that there is only ever one kind of type system used. + +.. branch: kill-gen-store-back-in +Kills gen_store_back_in_virtualizable - should improve non-inlined calls by +a bit + +.. branch: dotviewer-linewidth +.. branch: reflex-support diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py --- a/pypy/interpreter/pyframe.py +++ b/pypy/interpreter/pyframe.py @@ -123,8 +123,8 @@ # CO_OPTIMIZED: no locals dict needed at all # NB: this method is overridden in nestedscope.py flags = code.co_flags - if flags & pycode.CO_OPTIMIZED: - return + if flags & pycode.CO_OPTIMIZED: + return if flags & pycode.CO_NEWLOCALS: self.w_locals = self.space.newdict(module=True) else: @@ -176,9 +176,10 @@ executioncontext.return_trace(self, self.space.w_None) raise executioncontext.return_trace(self, w_exitvalue) - # clean up the exception, might be useful for not - # allocating exception objects in some cases - self.last_exception = None + # it used to say self.last_exception = None + # this is now done by the code in pypyjit module + # since we don't want to invalidate the virtualizable + # for no good reason got_exception = False finally: executioncontext.leave(self, w_exitvalue, got_exception) @@ -260,7 +261,7 @@ break w_value = self.peekvalue(delta) self.pushvalue(w_value) - + def peekvalue(self, index_from_top=0): # NOTE: top of the stack is peekvalue(0). # Contrast this with CPython where it's PEEK(-1). @@ -330,7 +331,7 @@ nlocals = self.pycode.co_nlocals values_w = self.locals_stack_w[nlocals:self.valuestackdepth] w_valuestack = maker.slp_into_tuple_with_nulls(space, values_w) - + w_blockstack = nt([block._get_state_(space) for block in self.get_blocklist()]) w_fastlocals = maker.slp_into_tuple_with_nulls( space, self.locals_stack_w[:nlocals]) @@ -340,7 +341,7 @@ else: w_exc_value = self.last_exception.get_w_value(space) w_tb = w(self.last_exception.get_traceback()) - + tup_state = [ w(self.f_backref()), w(self.get_builtin()), @@ -355,7 +356,7 @@ w(f_lineno), w_fastlocals, space.w_None, #XXX placeholder for f_locals - + #f_restricted requires no additional data! space.w_None, ## self.w_f_trace, ignore for now @@ -389,7 +390,7 @@ ncellvars = len(pycode.co_cellvars) cellvars = cells[:ncellvars] closure = cells[ncellvars:] - + # do not use the instance's __init__ but the base's, because we set # everything like cells from here # XXX hack @@ -476,7 +477,7 @@ ### line numbers ### - def fget_f_lineno(self, space): + def fget_f_lineno(self, space): "Returns the line number of the instruction currently being executed." if self.w_f_trace is None: return space.wrap(self.get_last_lineno()) @@ -490,7 +491,7 @@ except OperationError, e: raise OperationError(space.w_ValueError, space.wrap("lineno must be an integer")) - + if self.w_f_trace is None: raise OperationError(space.w_ValueError, space.wrap("f_lineno can only be set by a trace function.")) @@ -522,7 +523,7 @@ if ord(code[new_lasti]) in (DUP_TOP, POP_TOP): raise OperationError(space.w_ValueError, space.wrap("can't jump to 'except' line as there's no exception")) - + # Don't jump into or out of a finally block. f_lasti_setup_addr = -1 new_lasti_setup_addr = -1 @@ -553,12 +554,12 @@ if addr == self.last_instr: f_lasti_setup_addr = setup_addr break - + if op >= HAVE_ARGUMENT: addr += 3 else: addr += 1 - + assert len(blockstack) == 0 if new_lasti_setup_addr != f_lasti_setup_addr: @@ -609,10 +610,10 @@ block = self.pop_block() block.cleanup(self) f_iblock -= 1 - + self.f_lineno = new_lineno self.last_instr = new_lasti - + def get_last_lineno(self): "Returns the line number of the instruction currently being executed." return pytraceback.offset2lineno(self.pycode, self.last_instr) @@ -638,8 +639,8 @@ self.f_lineno = self.get_last_lineno() space.frame_trace_action.fire() - def fdel_f_trace(self, space): - self.w_f_trace = None + def fdel_f_trace(self, space): + self.w_f_trace = None def fget_f_exc_type(self, space): if self.last_exception is not None: @@ -649,7 +650,7 @@ if f is not None: return f.last_exception.w_type return space.w_None - + def fget_f_exc_value(self, space): if self.last_exception is not None: f = self.f_backref() @@ -667,7 +668,7 @@ if f is not None: return space.wrap(f.last_exception.get_traceback()) return space.w_None - + def fget_f_restricted(self, space): if space.config.objspace.honor__builtins__: return space.wrap(self.builtin is not space.builtin) diff --git a/pypy/module/cpyext/pystate.py b/pypy/module/cpyext/pystate.py --- a/pypy/module/cpyext/pystate.py +++ b/pypy/module/cpyext/pystate.py @@ -250,7 +250,9 @@ interpreter lock must be held.""" Py_DecRef(space, tstate.c_dict) tstate.c_dict = lltype.nullptr(PyObject.TO) - space.threadlocals.leave_thread(space) + if space.config.translation.thread: + space.threadlocals.leave_thread(space) + #else: nothing to do space.getexecutioncontext().cleanup_cpyext_state() rthread.gc_thread_die() diff --git a/pypy/module/pypyjit/interp_jit.py b/pypy/module/pypyjit/interp_jit.py --- a/pypy/module/pypyjit/interp_jit.py +++ b/pypy/module/pypyjit/interp_jit.py @@ -12,18 +12,18 @@ from pypy.interpreter.error import OperationError, operationerrfmt from pypy.interpreter.pycode import PyCode, CO_GENERATOR from pypy.interpreter.pyframe import PyFrame -from pypy.interpreter.pyopcode import ExitFrame +from pypy.interpreter.pyopcode import ExitFrame, Yield from opcode import opmap -PyFrame._virtualizable2_ = ['last_instr', 'pycode', - 'valuestackdepth', 'locals_stack_w[*]', - 'cells[*]', - 'last_exception', - 'lastblock', - 'is_being_profiled', - 'w_globals', - 'w_f_trace', - ] +PyFrame._virtualizable_ = ['last_instr', 'pycode', + 'valuestackdepth', 'locals_stack_w[*]', + 'cells[*]', + 'last_exception', + 'lastblock', + 'is_being_profiled', + 'w_globals', + 'w_f_trace', + ] JUMP_ABSOLUTE = opmap['JUMP_ABSOLUTE'] @@ -73,7 +73,13 @@ self.valuestackdepth = hint(self.valuestackdepth, promote=True) next_instr = self.handle_bytecode(co_code, next_instr, ec) is_being_profiled = self.is_being_profiled + except Yield: + self.last_exception = None + w_result = self.popvalue() + jit.hint(self, force_virtualizable=True) + return w_result except ExitFrame: + self.last_exception = None return self.popvalue() def jump_absolute(self, jumpto, ec): diff --git a/pypy/module/pypyjit/test_pypy_c/test_containers.py b/pypy/module/pypyjit/test_pypy_c/test_containers.py --- a/pypy/module/pypyjit/test_pypy_c/test_containers.py +++ b/pypy/module/pypyjit/test_pypy_c/test_containers.py @@ -87,7 +87,7 @@ i8 = int_lt(i5, i7) guard_true(i8, descr=...) guard_not_invalidated(descr=...) - p10 = call(ConstClass(ll_int_str), i5, descr=) + p10 = call(ConstClass(ll_str__IntegerR_SignedConst_Signed), i5, descr=) guard_no_exception(descr=...) i12 = call(ConstClass(ll_strhash), p10, descr=) p13 = new(descr=...) diff --git a/pypy/module/pypyjit/test_pypy_c/test_generators.py b/pypy/module/pypyjit/test_pypy_c/test_generators.py --- a/pypy/module/pypyjit/test_pypy_c/test_generators.py +++ b/pypy/module/pypyjit/test_pypy_c/test_generators.py @@ -19,6 +19,7 @@ log = self.run(main, [500]) loop, = log.loops_by_filename(self.filepath) assert loop.match_by_id("generator", """ + cond_call(..., descr=...) i16 = force_token() p45 = new_with_vtable(ConstClass(W_IntObject)) setfield_gc(p45, i29, descr=) diff --git a/pypy/tool/pypyjit_child.py b/pypy/tool/pypyjit_child.py --- a/pypy/tool/pypyjit_child.py +++ b/pypy/tool/pypyjit_child.py @@ -14,9 +14,9 @@ return lltype.nullptr(T) interp.heap.malloc_nonmovable = returns_null # XXX - from rpython.jit.backend.llgraph.runner import LLtypeCPU + from rpython.jit.backend.llgraph.runner import LLGraphCPU #LLtypeCPU.supports_floats = False # for now - apply_jit(interp, graph, LLtypeCPU) + apply_jit(interp, graph, LLGraphCPU) def apply_jit(interp, graph, CPUClass): diff --git a/rpython/annotator/test/test_annrpython.py b/rpython/annotator/test/test_annrpython.py --- a/rpython/annotator/test/test_annrpython.py +++ b/rpython/annotator/test/test_annrpython.py @@ -3088,7 +3088,7 @@ from rpython.rlib.jit import hint class A: - _virtualizable2_ = [] + _virtualizable_ = [] class B(A): def meth(self): return self @@ -3128,7 +3128,7 @@ from rpython.rlib.jit import hint class A: - _virtualizable2_ = [] + _virtualizable_ = [] class I: pass diff --git a/rpython/jit/backend/arm/opassembler.py b/rpython/jit/backend/arm/opassembler.py --- a/rpython/jit/backend/arm/opassembler.py +++ b/rpython/jit/backend/arm/opassembler.py @@ -959,16 +959,6 @@ pmc.B_offs(self.mc.currpos(), c.EQ) return pos - def _call_assembler_reset_vtoken(self, jd, vloc): - from rpython.jit.backend.llsupport.descr import FieldDescr - fielddescr = jd.vable_token_descr - assert isinstance(fielddescr, FieldDescr) - ofs = fielddescr.offset - tmploc = self._regalloc.get_scratch_reg(INT) - self.mov_loc_loc(vloc, r.ip) - self.mc.MOV_ri(tmploc.value, 0) - self.mc.STR_ri(tmploc.value, r.ip.value, ofs) - def _call_assembler_load_result(self, op, result_loc): if op.result is not None: # load the return value from (tmploc, 0) diff --git a/rpython/jit/backend/llgraph/runner.py b/rpython/jit/backend/llgraph/runner.py --- a/rpython/jit/backend/llgraph/runner.py +++ b/rpython/jit/backend/llgraph/runner.py @@ -101,6 +101,9 @@ self.fieldname = fieldname self.FIELD = getattr(S, fieldname) + def get_vinfo(self): + return self.vinfo + def __repr__(self): return 'FieldDescr(%r, %r)' % (self.S, self.fieldname) @@ -170,7 +173,7 @@ translate_support_code = False is_llgraph = True - def __init__(self, rtyper, stats=None, *ignored_args, **ignored_kwds): + def __init__(self, rtyper, stats=None, *ignored_args, **kwds): model.AbstractCPU.__init__(self) self.rtyper = rtyper self.llinterp = LLInterpreter(rtyper) @@ -178,6 +181,7 @@ class MiniStats: pass self.stats = stats or MiniStats() + self.vinfo_for_tests = kwds.get('vinfo_for_tests', None) def compile_loop(self, inputargs, operations, looptoken, log=True, name=''): clt = model.CompiledLoopToken(self, looptoken.number) @@ -316,6 +320,8 @@ except KeyError: descr = FieldDescr(S, fieldname) self.descrs[key] = descr + if self.vinfo_for_tests is not None: + descr.vinfo = self.vinfo_for_tests return descr def arraydescrof(self, A): @@ -600,6 +606,7 @@ forced_deadframe = None overflow_flag = False last_exception = None + force_guard_op = None def __init__(self, cpu, argboxes, args): self.env = {} @@ -766,6 +773,8 @@ if self.forced_deadframe is not None: saved_data = self.forced_deadframe._saved_data self.fail_guard(descr, saved_data) + self.force_guard_op = self.current_op + execute_guard_not_forced_2 = execute_guard_not_forced def execute_guard_not_invalidated(self, descr): if self.lltrace.invalid: @@ -887,7 +896,6 @@ # res = CALL assembler_call_helper(pframe) # jmp @done # @fastpath: - # RESET_VABLE # res = GETFIELD(pframe, 'result') # @done: # @@ -907,25 +915,17 @@ vable = lltype.nullptr(llmemory.GCREF.TO) # # Emulate the fast path - def reset_vable(jd, vable): - if jd.index_of_virtualizable != -1: - fielddescr = jd.vable_token_descr - NULL = lltype.nullptr(llmemory.GCREF.TO) - self.cpu.bh_setfield_gc(vable, NULL, fielddescr) + # faildescr = self.cpu.get_latest_descr(pframe) if faildescr == self.cpu.done_with_this_frame_descr_int: - reset_vable(jd, vable) return self.cpu.get_int_value(pframe, 0) elif faildescr == self.cpu.done_with_this_frame_descr_ref: - reset_vable(jd, vable) return self.cpu.get_ref_value(pframe, 0) elif faildescr == self.cpu.done_with_this_frame_descr_float: - reset_vable(jd, vable) return self.cpu.get_float_value(pframe, 0) elif faildescr == self.cpu.done_with_this_frame_descr_void: - reset_vable(jd, vable) return None - # + assembler_helper_ptr = jd.assembler_helper_adr.ptr # fish try: result = assembler_helper_ptr(pframe, vable) diff --git a/rpython/jit/backend/llsupport/assembler.py b/rpython/jit/backend/llsupport/assembler.py --- a/rpython/jit/backend/llsupport/assembler.py +++ b/rpython/jit/backend/llsupport/assembler.py @@ -2,7 +2,7 @@ from rpython.jit.backend.llsupport.memcpy import memcpy_fn from rpython.jit.backend.llsupport.symbolic import WORD from rpython.jit.metainterp.history import (INT, REF, FLOAT, JitCellToken, - ConstInt, BoxInt) + ConstInt, BoxInt, AbstractFailDescr) from rpython.jit.metainterp.resoperation import ResOperation, rop from rpython.rlib import rgc from rpython.rlib.debug import (debug_start, debug_stop, have_debug_prints, @@ -24,6 +24,7 @@ class GuardToken(object): def __init__(self, cpu, gcmap, faildescr, failargs, fail_locs, exc, frame_depth, is_guard_not_invalidated, is_guard_not_forced): + assert isinstance(faildescr, AbstractFailDescr) self.cpu = cpu self.faildescr = faildescr self.failargs = failargs @@ -232,11 +233,8 @@ jmp_location = self._call_assembler_patch_je(result_loc, je_location) - # Path B: fast path. Must load the return value, and reset the token + # Path B: fast path. Must load the return value - # Reset the vable token --- XXX really too much special logic here:-( - if jd.index_of_virtualizable >= 0: - self._call_assembler_reset_vtoken(jd, vloc) # self._call_assembler_load_result(op, result_loc) # diff --git a/rpython/jit/backend/llsupport/descr.py b/rpython/jit/backend/llsupport/descr.py --- a/rpython/jit/backend/llsupport/descr.py +++ b/rpython/jit/backend/llsupport/descr.py @@ -76,7 +76,13 @@ FLAG_STRUCT = 'X' FLAG_VOID = 'V' -class FieldDescr(AbstractDescr): +class ArrayOrFieldDescr(AbstractDescr): + vinfo = None + + def get_vinfo(self): + return self.vinfo + +class FieldDescr(ArrayOrFieldDescr): name = '' offset = 0 # help translation field_size = 0 @@ -150,12 +156,13 @@ # ____________________________________________________________ # ArrayDescrs -class ArrayDescr(AbstractDescr): +class ArrayDescr(ArrayOrFieldDescr): tid = 0 basesize = 0 # workaround for the annotator itemsize = 0 lendescr = None flag = '\x00' + vinfo = None def __init__(self, basesize, itemsize, lendescr, flag): self.basesize = basesize diff --git a/rpython/jit/backend/llsupport/test/ztranslation_test.py b/rpython/jit/backend/llsupport/test/ztranslation_test.py --- a/rpython/jit/backend/llsupport/test/ztranslation_test.py +++ b/rpython/jit/backend/llsupport/test/ztranslation_test.py @@ -23,7 +23,7 @@ # - floats neg and abs class Frame(object): - _virtualizable2_ = ['i'] + _virtualizable_ = ['i'] def __init__(self, i): self.i = i @@ -98,7 +98,7 @@ self.val = val class Frame(object): - _virtualizable2_ = ['thing'] + _virtualizable_ = ['thing'] driver = JitDriver(greens = ['codeno'], reds = ['i', 'frame'], virtualizables = ['frame'], diff --git a/rpython/jit/backend/model.py b/rpython/jit/backend/model.py --- a/rpython/jit/backend/model.py +++ b/rpython/jit/backend/model.py @@ -6,7 +6,7 @@ total_compiled_loops = 0 total_compiled_bridges = 0 total_freed_loops = 0 - total_freed_bridges = 0 + total_freed_bridges = 0 # for heaptracker # _all_size_descrs_with_vtable = None @@ -182,7 +182,7 @@ def arraydescrof(self, A): raise NotImplementedError - def calldescrof(self, FUNC, ARGS, RESULT): + def calldescrof(self, FUNC, ARGS, RESULT, extrainfo): # FUNC is the original function type, but ARGS is a list of types # with Voids removed raise NotImplementedError @@ -294,7 +294,6 @@ def bh_copyunicodecontent(self, src, dst, srcstart, dststart, length): raise NotImplementedError - class CompiledLoopToken(object): asmmemmgr_blocks = None asmmemmgr_gcroots = 0 diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -79,6 +79,7 @@ allblocks) self.target_tokens_currently_compiling = {} self.frame_depth_to_patch = [] + self._finish_gcmap = lltype.nullptr(jitframe.GCMAP) def teardown(self): self.pending_guard_tokens = None @@ -1846,7 +1847,11 @@ self.mov(fail_descr_loc, RawEbpLoc(ofs)) arglist = op.getarglist() if arglist and arglist[0].type == REF: - gcmap = self.gcmap_for_finish + if self._finish_gcmap: + self._finish_gcmap[0] |= r_uint(1) # rax + gcmap = self._finish_gcmap + else: + gcmap = self.gcmap_for_finish self.push_gcmap(self.mc, gcmap, store=True) else: # note that the 0 here is redundant, but I would rather @@ -1991,15 +1996,6 @@ # return jmp_location - def _call_assembler_reset_vtoken(self, jd, vloc): - from rpython.jit.backend.llsupport.descr import FieldDescr - fielddescr = jd.vable_token_descr - assert isinstance(fielddescr, FieldDescr) - vtoken_ofs = fielddescr.offset - self.mc.MOV(edx, vloc) # we know vloc is on the current frame - self.mc.MOV_mi((edx.value, vtoken_ofs), 0) - # in the line above, TOKEN_NONE = 0 - def _call_assembler_load_result(self, op, result_loc): if op.result is not None: # load the return value from the dead frame's value index 0 @@ -2326,6 +2322,15 @@ assert 0 < offset <= 127 self.mc.overwrite(jmp_location - 1, chr(offset)) + def store_force_descr(self, op, fail_locs, frame_depth): + guard_token = self.implement_guard_recovery(op.opnum, + op.getdescr(), + op.getfailargs(), + fail_locs, frame_depth) + self._finish_gcmap = guard_token.gcmap + self._store_force_index(op) + self.store_info_on_descr(0, guard_token) + def force_token(self, reg): # XXX kill me assert isinstance(reg, RegLoc) diff --git a/rpython/jit/backend/x86/regalloc.py b/rpython/jit/backend/x86/regalloc.py --- a/rpython/jit/backend/x86/regalloc.py +++ b/rpython/jit/backend/x86/regalloc.py @@ -22,7 +22,7 @@ from rpython.jit.codewriter.effectinfo import EffectInfo from rpython.jit.metainterp.history import (Box, Const, ConstInt, ConstPtr, ConstFloat, BoxInt, BoxFloat, INT, REF, FLOAT, TargetToken) -from rpython.jit.metainterp.resoperation import rop +from rpython.jit.metainterp.resoperation import rop, ResOperation from rpython.rlib import rgc from rpython.rlib.objectmodel import we_are_translated from rpython.rlib.rarithmetic import r_longlong, r_uint @@ -1332,6 +1332,13 @@ #if jump_op is not None and jump_op.getdescr() is descr: # self._compute_hint_frame_locations_from_descr(descr) + def consider_guard_not_forced_2(self, op): + self.rm.before_call(op.getfailargs(), save_all_regs=True) + fail_locs = [self.loc(v) for v in op.getfailargs()] + self.assembler.store_force_descr(op, fail_locs, + self.fm.get_frame_depth()) + self.possibly_free_vars(op.getfailargs()) + def consider_keepalive(self, op): pass diff --git a/rpython/jit/codewriter/effectinfo.py b/rpython/jit/codewriter/effectinfo.py --- a/rpython/jit/codewriter/effectinfo.py +++ b/rpython/jit/codewriter/effectinfo.py @@ -83,6 +83,7 @@ OS_UNI_COPY_TO_RAW = 113 OS_JIT_FORCE_VIRTUAL = 120 + OS_JIT_FORCE_VIRTUALIZABLE = 121 # for debugging: _OS_CANRAISE = set([ @@ -165,6 +166,9 @@ EffectInfo.MOST_GENERAL = EffectInfo(None, None, None, None, EffectInfo.EF_RANDOM_EFFECTS, can_invalidate=True) +EffectInfo.LEAST_GENERAL = EffectInfo([], [], [], [], + EffectInfo.EF_ELIDABLE_CANNOT_RAISE, + can_invalidate=False) def effectinfo_from_writeanalyze(effects, cpu, diff --git a/rpython/jit/codewriter/jtransform.py b/rpython/jit/codewriter/jtransform.py --- a/rpython/jit/codewriter/jtransform.py +++ b/rpython/jit/codewriter/jtransform.py @@ -521,6 +521,8 @@ op1 = SpaceOperation('str_guard_value', [op.args[0], c, descr], op.result) return [SpaceOperation('-live-', [], None), op1, None] + if hints.get('force_virtualizable'): + return SpaceOperation('hint_force_virtualizable', [op.args[0]], None) else: log.WARNING('ignoring hint %r at %r' % (hints, self.graph)) diff --git a/rpython/jit/codewriter/test/test_policy.py b/rpython/jit/codewriter/test/test_policy.py --- a/rpython/jit/codewriter/test/test_policy.py +++ b/rpython/jit/codewriter/test/test_policy.py @@ -131,7 +131,7 @@ def test_access_directly_but_not_seen(): class X: - _virtualizable2_ = ["a"] + _virtualizable_ = ["a"] def h(x, y): w = 0 for i in range(y): diff --git a/rpython/jit/metainterp/blackhole.py b/rpython/jit/metainterp/blackhole.py --- a/rpython/jit/metainterp/blackhole.py +++ b/rpython/jit/metainterp/blackhole.py @@ -1320,6 +1320,10 @@ from rpython.jit.metainterp import quasiimmut quasiimmut.do_force_quasi_immutable(cpu, struct, mutatefielddescr) + @arguments("r") + def bhimpl_hint_force_virtualizable(r): + pass + @arguments("cpu", "d", returns="r") def bhimpl_new(cpu, descr): return cpu.bh_new(descr) diff --git a/rpython/jit/metainterp/compile.py b/rpython/jit/metainterp/compile.py --- a/rpython/jit/metainterp/compile.py +++ b/rpython/jit/metainterp/compile.py @@ -708,6 +708,8 @@ rstack._stack_criticalcode_start() try: deadframe = cpu.force(token) + # this should set descr to ResumeGuardForceDescr, if it + # was not that already faildescr = cpu.get_latest_descr(deadframe) assert isinstance(faildescr, ResumeGuardForcedDescr) faildescr.handle_async_forcing(deadframe) @@ -715,12 +717,18 @@ rstack._stack_criticalcode_stop() def handle_async_forcing(self, deadframe): - from rpython.jit.metainterp.resume import force_from_resumedata + from rpython.jit.metainterp.resume import (force_from_resumedata, + AlreadyForced) metainterp_sd = self.metainterp_sd vinfo = self.jitdriver_sd.virtualizable_info ginfo = self.jitdriver_sd.greenfield_info - all_virtuals = force_from_resumedata(metainterp_sd, self, deadframe, - vinfo, ginfo) + # there is some chance that this is already forced. In this case + # the virtualizable would have a token = NULL + try: + all_virtuals = force_from_resumedata(metainterp_sd, self, deadframe, + vinfo, ginfo) + except AlreadyForced: + return # The virtualizable data was stored on the real virtualizable above. # Handle all_virtuals: keep them for later blackholing from the # future failure of the GUARD_NOT_FORCED diff --git a/rpython/jit/metainterp/history.py b/rpython/jit/metainterp/history.py --- a/rpython/jit/metainterp/history.py +++ b/rpython/jit/metainterp/history.py @@ -151,6 +151,8 @@ descr_ptr = cpu.ts.cast_to_baseclass(descr_gcref) return cast_base_ptr_to_instance(AbstractDescr, descr_ptr) + def get_vinfo(self): + raise NotImplementedError class AbstractFailDescr(AbstractDescr): index = -1 diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py @@ -5101,6 +5101,15 @@ } self.optimize_loop(ops, expected, call_pure_results) + def test_guard_not_forced_2_virtual(self): + ops = """ + [i0] + p0 = new_array(3, descr=arraydescr) + guard_not_forced_2() [p0] + finish(p0) + """ + self.optimize_loop(ops, ops) + class TestLLtype(BaseTestOptimizeBasic, LLtypeMixin): pass diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py @@ -7086,6 +7086,19 @@ """ self.optimize_loop(ops, expected) + def test_force_virtualizable_virtual(self): + ops = """ + [i0] + p1 = new_with_vtable(ConstClass(node_vtable)) + cond_call(1, 123, p1, descr=clear_vable) + jump(i0) + """ + expected = """ + [i0] + jump(i0) + """ + self.optimize_loop(ops, expected) + def test_setgetfield_counter(self): ops = """ [p1] diff --git a/rpython/jit/metainterp/optimizeopt/test/test_util.py b/rpython/jit/metainterp/optimizeopt/test/test_util.py --- a/rpython/jit/metainterp/optimizeopt/test/test_util.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_util.py @@ -254,12 +254,19 @@ asmdescr = LoopToken() # it can be whatever, it's not a descr though from rpython.jit.metainterp.virtualref import VirtualRefInfo + class FakeWarmRunnerDesc: pass FakeWarmRunnerDesc.cpu = cpu vrefinfo = VirtualRefInfo(FakeWarmRunnerDesc) virtualtokendescr = vrefinfo.descr_virtual_token virtualforceddescr = vrefinfo.descr_forced + FUNC = lltype.FuncType([], lltype.Void) + ei = EffectInfo([], [], [], [], EffectInfo.EF_CANNOT_RAISE, + can_invalidate=False, + oopspecindex=EffectInfo.OS_JIT_FORCE_VIRTUALIZABLE) + clear_vable = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, ei) + jit_virtual_ref_vtable = vrefinfo.jit_virtual_ref_vtable jvr_vtable_adr = llmemory.cast_ptr_to_adr(jit_virtual_ref_vtable) diff --git a/rpython/jit/metainterp/optimizeopt/virtualize.py b/rpython/jit/metainterp/optimizeopt/virtualize.py --- a/rpython/jit/metainterp/optimizeopt/virtualize.py +++ b/rpython/jit/metainterp/optimizeopt/virtualize.py @@ -484,6 +484,8 @@ class OptVirtualize(optimizer.Optimization): "Virtualize objects until they escape." + _last_guard_not_forced_2 = None + def new(self): return OptVirtualize() @@ -527,6 +529,20 @@ return self.emit_operation(op) + def optimize_GUARD_NOT_FORCED_2(self, op): + self._last_guard_not_forced_2 = op + + def optimize_FINISH(self, op): + if self._last_guard_not_forced_2 is not None: + guard_op = self._last_guard_not_forced_2 + self.emit_operation(op) + guard_op = self.optimizer.store_final_boxes_in_guard(guard_op, []) + i = len(self.optimizer._newoperations) - 1 + assert i >= 0 + self.optimizer._newoperations.insert(i, guard_op) + else: + self.emit_operation(op) + def optimize_CALL_MAY_FORCE(self, op): effectinfo = op.getdescr().get_extra_info() oopspecindex = effectinfo.oopspecindex @@ -535,6 +551,15 @@ return self.emit_operation(op) + def optimize_COND_CALL(self, op): + effectinfo = op.getdescr().get_extra_info() + oopspecindex = effectinfo.oopspecindex + if oopspecindex == EffectInfo.OS_JIT_FORCE_VIRTUALIZABLE: + value = self.getvalue(op.getarg(2)) + if value.is_virtual(): + return + self.emit_operation(op) + def optimize_VIRTUAL_REF(self, op): # get some constants vrefinfo = self.optimizer.metainterp_sd.virtualref_info @@ -657,6 +682,11 @@ self.do_RAW_MALLOC_VARSIZE_CHAR(op) elif effectinfo.oopspecindex == EffectInfo.OS_RAW_FREE: self.do_RAW_FREE(op) + elif effectinfo.oopspecindex == EffectInfo.OS_JIT_FORCE_VIRTUALIZABLE: + # we might end up having CALL here instead of COND_CALL + value = self.getvalue(op.getarg(1)) + if value.is_virtual(): + return else: self.emit_operation(op) diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -18,7 +18,7 @@ from rpython.rlib.jit import Counters from rpython.rlib.objectmodel import we_are_translated, specialize from rpython.rlib.unroll import unrolling_iterable -from rpython.rtyper.lltypesystem import lltype, rclass +from rpython.rtyper.lltypesystem import lltype, rclass, rffi @@ -313,7 +313,7 @@ opnum = rop.GUARD_TRUE else: opnum = rop.GUARD_FALSE - self.generate_guard(opnum, box) + self.metainterp.generate_guard(opnum, box) if not switchcase: self.pc = target @@ -341,10 +341,12 @@ value = box.nonnull() if value: if not self.metainterp.heapcache.is_class_known(box): - self.generate_guard(rop.GUARD_NONNULL, box, resumepc=orgpc) + self.metainterp.generate_guard(rop.GUARD_NONNULL, box, + resumepc=orgpc) else: if not isinstance(box, Const): - self.generate_guard(rop.GUARD_ISNULL, box, resumepc=orgpc) + self.metainterp.generate_guard(rop.GUARD_ISNULL, box, + resumepc=orgpc) promoted_box = box.constbox() self.metainterp.replace_box(box, promoted_box) return value @@ -604,7 +606,7 @@ def _opimpl_getfield_gc_greenfield_any(self, box, fielddescr, pc): ginfo = self.metainterp.jitdriver_sd.greenfield_info if (ginfo is not None and fielddescr in ginfo.green_field_descrs - and not self._nonstandard_virtualizable(pc, box)): + and not self._nonstandard_virtualizable(pc, box, fielddescr)): # fetch the result, but consider it as a Const box and don't # record any operation resbox = executor.execute(self.metainterp.cpu, self.metainterp, @@ -672,6 +674,10 @@ opimpl_raw_load_i = _opimpl_raw_load opimpl_raw_load_f = _opimpl_raw_load + @arguments("box") + def opimpl_hint_force_virtualizable(self, box): + self.metainterp.gen_store_back_in_vable(box) + @arguments("box", "descr", "descr", "orgpc") def opimpl_record_quasiimmut_field(self, box, fielddescr, mutatefielddescr, orgpc): @@ -680,7 +686,8 @@ descr = QuasiImmutDescr(cpu, box, fielddescr, mutatefielddescr) self.metainterp.history.record(rop.QUASIIMMUT_FIELD, [box], None, descr=descr) - self.generate_guard(rop.GUARD_NOT_INVALIDATED, resumepc=orgpc) + self.metainterp.generate_guard(rop.GUARD_NOT_INVALIDATED, + resumepc=orgpc) @arguments("box", "descr", "orgpc") def opimpl_jit_force_quasi_immutable(self, box, mutatefielddescr, orgpc): @@ -699,28 +706,46 @@ do_force_quasi_immutable(self.metainterp.cpu, box.getref_base(), mutatefielddescr) raise SwitchToBlackhole(Counters.ABORT_FORCE_QUASIIMMUT) - self.generate_guard(rop.GUARD_ISNULL, mutatebox, resumepc=orgpc) + self.metainterp.generate_guard(rop.GUARD_ISNULL, mutatebox, + resumepc=orgpc) - def _nonstandard_virtualizable(self, pc, box): + def _nonstandard_virtualizable(self, pc, box, fielddescr): # returns True if 'box' is actually not the "standard" virtualizable # that is stored in metainterp.virtualizable_boxes[-1] - if (self.metainterp.jitdriver_sd.virtualizable_info is None and - self.metainterp.jitdriver_sd.greenfield_info is None): - return True # can occur in case of multiple JITs - standard_box = self.metainterp.virtualizable_boxes[-1] - if standard_box is box: - return False if self.metainterp.heapcache.is_nonstandard_virtualizable(box): return True - eqbox = self.metainterp.execute_and_record(rop.PTR_EQ, None, - box, standard_box) - eqbox = self.implement_guard_value(eqbox, pc) - isstandard = eqbox.getint() - if isstandard: - self.metainterp.replace_box(box, standard_box) - else: - self.metainterp.heapcache.nonstandard_virtualizables_now_known(box) - return not isstandard + if box is self.metainterp.forced_virtualizable: + self.metainterp.forced_virtualizable = None + if (self.metainterp.jitdriver_sd.virtualizable_info is not None or + self.metainterp.jitdriver_sd.greenfield_info is not None): + standard_box = self.metainterp.virtualizable_boxes[-1] + if standard_box is box: + return False + vinfo = self.metainterp.jitdriver_sd.virtualizable_info + if vinfo is fielddescr.get_vinfo(): + eqbox = self.metainterp.execute_and_record(rop.PTR_EQ, None, + box, standard_box) + eqbox = self.implement_guard_value(eqbox, pc) + isstandard = eqbox.getint() + if isstandard: + self.metainterp.replace_box(box, standard_box) + return False + if not self.metainterp.heapcache.is_unescaped(box): + self.emit_force_virtualizable(fielddescr, box) + self.metainterp.heapcache.nonstandard_virtualizables_now_known(box) + return True + + def emit_force_virtualizable(self, fielddescr, box): + vinfo = fielddescr.get_vinfo() + token_descr = vinfo.vable_token_descr + mi = self.metainterp + tokenbox = mi.execute_and_record(rop.GETFIELD_GC, token_descr, box) + condbox = mi.execute_and_record(rop.PTR_NE, None, tokenbox, + history.CONST_NULL) + funcbox = ConstInt(rffi.cast(lltype.Signed, vinfo.clear_vable_ptr)) + calldescr = vinfo.clear_vable_descr + self.execute_varargs(rop.COND_CALL, [condbox, funcbox, box], + calldescr, False, False) def _get_virtualizable_field_index(self, fielddescr): # Get the index of a fielddescr. Must only be called for @@ -730,7 +755,7 @@ @arguments("box", "descr", "orgpc") def _opimpl_getfield_vable(self, box, fielddescr, pc): - if self._nonstandard_virtualizable(pc, box): + if self._nonstandard_virtualizable(pc, box, fielddescr): return self._opimpl_getfield_gc_any(box, fielddescr) self.metainterp.check_synchronized_virtualizable() index = self._get_virtualizable_field_index(fielddescr) @@ -742,7 +767,7 @@ @arguments("box", "box", "descr", "orgpc") def _opimpl_setfield_vable(self, box, valuebox, fielddescr, pc): - if self._nonstandard_virtualizable(pc, box): + if self._nonstandard_virtualizable(pc, box, fielddescr): return self._opimpl_setfield_gc_any(box, valuebox, fielddescr) index = self._get_virtualizable_field_index(fielddescr) self.metainterp.virtualizable_boxes[index] = valuebox @@ -772,7 +797,7 @@ @arguments("box", "box", "descr", "descr", "orgpc") def _opimpl_getarrayitem_vable(self, box, indexbox, fdescr, adescr, pc): - if self._nonstandard_virtualizable(pc, box): + if self._nonstandard_virtualizable(pc, box, fdescr): arraybox = self._opimpl_getfield_gc_any(box, fdescr) return self._opimpl_getarrayitem_gc_any(arraybox, indexbox, adescr) self.metainterp.check_synchronized_virtualizable() @@ -786,7 +811,7 @@ @arguments("box", "box", "box", "descr", "descr", "orgpc") def _opimpl_setarrayitem_vable(self, box, indexbox, valuebox, fdescr, adescr, pc): - if self._nonstandard_virtualizable(pc, box): + if self._nonstandard_virtualizable(pc, box, fdescr): arraybox = self._opimpl_getfield_gc_any(box, fdescr) self._opimpl_setarrayitem_gc_any(arraybox, indexbox, valuebox, adescr) @@ -802,7 +827,7 @@ @arguments("box", "descr", "descr", "orgpc") def opimpl_arraylen_vable(self, box, fdescr, adescr, pc): - if self._nonstandard_virtualizable(pc, box): + if self._nonstandard_virtualizable(pc, box, fdescr): arraybox = self._opimpl_getfield_gc_any(box, fdescr) return self.opimpl_arraylen_gc(arraybox, adescr) vinfo = self.metainterp.jitdriver_sd.virtualizable_info @@ -958,8 +983,9 @@ promoted_box = resbox.constbox() # This is GUARD_VALUE because GUARD_TRUE assumes the existance # of a label when computing resumepc - self.generate_guard(rop.GUARD_VALUE, resbox, [promoted_box], - resumepc=orgpc) + self.metainterp.generate_guard(rop.GUARD_VALUE, resbox, + [promoted_box], + resumepc=orgpc) self.metainterp.replace_box(box, constbox) return constbox @@ -971,7 +997,8 @@ def opimpl_guard_class(self, box, orgpc): clsbox = self.cls_of_box(box) if not self.metainterp.heapcache.is_class_known(box): - self.generate_guard(rop.GUARD_CLASS, box, [clsbox], resumepc=orgpc) + self.metainterp.generate_guard(rop.GUARD_CLASS, box, [clsbox], + resumepc=orgpc) self.metainterp.heapcache.class_now_known(box) return clsbox @@ -989,7 +1016,7 @@ def opimpl_jit_merge_point(self, jdindex, greenboxes, jcposition, redboxes, orgpc): resumedescr = compile.ResumeAtPositionDescr() - self.capture_resumedata(resumedescr, orgpc) + self.metainterp.capture_resumedata(resumedescr, orgpc) any_operation = len(self.metainterp.history.operations) > 0 jitdriver_sd = self.metainterp.staticdata.jitdrivers_sd[jdindex] @@ -1071,8 +1098,8 @@ # xxx hack if not self.metainterp.heapcache.is_class_known(exc_value_box): clsbox = self.cls_of_box(exc_value_box) - self.generate_guard(rop.GUARD_CLASS, exc_value_box, [clsbox], - resumepc=orgpc) + self.metainterp.generate_guard(rop.GUARD_CLASS, exc_value_box, + [clsbox], resumepc=orgpc) self.metainterp.class_of_last_exc_is_const = True self.metainterp.last_exc_value_box = exc_value_box self.metainterp.popframe() @@ -1271,43 +1298,6 @@ except ChangeFrame: pass - def generate_guard(self, opnum, box=None, extraargs=[], resumepc=-1): - if isinstance(box, Const): # no need for a guard - return - metainterp = self.metainterp - if box is not None: - moreargs = [box] + extraargs - else: - moreargs = list(extraargs) - metainterp_sd = metainterp.staticdata - if opnum == rop.GUARD_NOT_FORCED: - resumedescr = compile.ResumeGuardForcedDescr(metainterp_sd, - metainterp.jitdriver_sd) - elif opnum == rop.GUARD_NOT_INVALIDATED: - resumedescr = compile.ResumeGuardNotInvalidated() - else: - resumedescr = compile.ResumeGuardDescr() - guard_op = metainterp.history.record(opnum, moreargs, None, - descr=resumedescr) - self.capture_resumedata(resumedescr, resumepc) - self.metainterp.staticdata.profiler.count_ops(opnum, Counters.GUARDS) - # count - metainterp.attach_debug_info(guard_op) - return guard_op - - def capture_resumedata(self, resumedescr, resumepc=-1): - metainterp = self.metainterp - virtualizable_boxes = None - if (metainterp.jitdriver_sd.virtualizable_info is not None or - metainterp.jitdriver_sd.greenfield_info is not None): - virtualizable_boxes = metainterp.virtualizable_boxes - saved_pc = self.pc - if resumepc >= 0: - self.pc = resumepc - resume.capture_resumedata(metainterp.framestack, virtualizable_boxes, - metainterp.virtualref_boxes, resumedescr) - self.pc = saved_pc - def implement_guard_value(self, box, orgpc): """Promote the given Box into a Const. Note: be careful, it's a bit unclear what occurs if a single opcode needs to generate @@ -1316,8 +1306,8 @@ return box # no promotion needed, already a Const else: promoted_box = box.constbox() - self.generate_guard(rop.GUARD_VALUE, box, [promoted_box], - resumepc=orgpc) + self.metainterp.generate_guard(rop.GUARD_VALUE, box, [promoted_box], + resumepc=orgpc) self.metainterp.replace_box(box, promoted_box) return promoted_box @@ -1411,7 +1401,7 @@ if resbox is not None: self.make_result_of_lastop(resbox) self.metainterp.vable_after_residual_call() - self.generate_guard(rop.GUARD_NOT_FORCED, None) + self.metainterp.generate_guard(rop.GUARD_NOT_FORCED, None) if vablebox is not None: self.metainterp.history.record(rop.KEEPALIVE, [vablebox], None) self.metainterp.handle_possible_exception() @@ -1660,6 +1650,7 @@ self.portal_trace_positions = [] self.free_frames_list = [] self.last_exc_value_box = None + self.forced_virtualizable = None self.partial_trace = None self.retracing_from = -1 self.call_pure_results = args_dict_box() @@ -1783,6 +1774,45 @@ print jitcode.name raise AssertionError + def generate_guard(self, opnum, box=None, extraargs=[], resumepc=-1): + if isinstance(box, Const): # no need for a guard + return + if box is not None: + moreargs = [box] + extraargs + else: + moreargs = list(extraargs) + metainterp_sd = self.staticdata + if opnum == rop.GUARD_NOT_FORCED or opnum == rop.GUARD_NOT_FORCED_2: + resumedescr = compile.ResumeGuardForcedDescr(metainterp_sd, + self.jitdriver_sd) + elif opnum == rop.GUARD_NOT_INVALIDATED: + resumedescr = compile.ResumeGuardNotInvalidated() + else: + resumedescr = compile.ResumeGuardDescr() + guard_op = self.history.record(opnum, moreargs, None, + descr=resumedescr) + self.capture_resumedata(resumedescr, resumepc) + self.staticdata.profiler.count_ops(opnum, Counters.GUARDS) + # count + self.attach_debug_info(guard_op) + return guard_op + + def capture_resumedata(self, resumedescr, resumepc=-1): + virtualizable_boxes = None + if (self.jitdriver_sd.virtualizable_info is not None or + self.jitdriver_sd.greenfield_info is not None): + virtualizable_boxes = self.virtualizable_boxes + saved_pc = 0 + if self.framestack: + frame = self.framestack[-1] + saved_pc = frame.pc + if resumepc >= 0: + frame.pc = resumepc + resume.capture_resumedata(self.framestack, virtualizable_boxes, + self.virtualref_boxes, resumedescr) + if self.framestack: + self.framestack[-1].pc = saved_pc + def create_empty_history(self): self.history = history.History() self.staticdata.stats.set_history(self.history) @@ -2253,8 +2283,8 @@ self.raise_continue_running_normally(live_arg_boxes, jitcell_token) def compile_done_with_this_frame(self, exitbox): - self.gen_store_back_in_virtualizable() # temporarily put a JUMP to a pseudo-loop + self.store_token_in_vable() sd = self.staticdata result_type = self.jitdriver_sd.result_type if result_type == history.VOID: @@ -2280,8 +2310,24 @@ if target_token is not token: compile.giveup() + def store_token_in_vable(self): + vinfo = self.jitdriver_sd.virtualizable_info + if vinfo is None: + return + vbox = self.virtualizable_boxes[-1] + if vbox is self.forced_virtualizable: + return # we already forced it by hand + force_token_box = history.BoxPtr() + # in case the force_token has not been recorded, record it here + # to make sure we know the virtualizable can be broken. However, the + # contents of the virtualizable should be generally correct + self.history.record(rop.FORCE_TOKEN, [], force_token_box) + self.history.record(rop.SETFIELD_GC, [vbox, force_token_box], + None, descr=vinfo.vable_token_descr) + self.generate_guard(rop.GUARD_NOT_FORCED_2, None) + def compile_exit_frame_with_exception(self, valuebox): - self.gen_store_back_in_virtualizable() + self.store_token_in_vable() sd = self.staticdata token = sd.loop_tokens_exit_frame_with_exception_ref[0].finishdescr self.history.record(rop.FINISH, [valuebox], None, descr=token) @@ -2420,27 +2466,25 @@ self.virtualref_boxes[i+1] = self.cpu.ts.CONST_NULL def handle_possible_exception(self): - frame = self.framestack[-1] if self.last_exc_value_box is not None: exception_box = self.cpu.ts.cls_of_box(self.last_exc_value_box) - op = frame.generate_guard(rop.GUARD_EXCEPTION, - None, [exception_box]) + op = self.generate_guard(rop.GUARD_EXCEPTION, + None, [exception_box]) assert op is not None op.result = self.last_exc_value_box self.class_of_last_exc_is_const = True self.finishframe_exception() else: - frame.generate_guard(rop.GUARD_NO_EXCEPTION, None, []) + self.generate_guard(rop.GUARD_NO_EXCEPTION, None, []) def handle_possible_overflow_error(self): - frame = self.framestack[-1] if self.last_exc_value_box is not None: - frame.generate_guard(rop.GUARD_OVERFLOW, None) + self.generate_guard(rop.GUARD_OVERFLOW, None) assert isinstance(self.last_exc_value_box, Const) assert self.class_of_last_exc_is_const self.finishframe_exception() else: - frame.generate_guard(rop.GUARD_NO_OVERFLOW, None) + self.generate_guard(rop.GUARD_NO_OVERFLOW, None) def assert_no_exception(self): assert self.last_exc_value_box is None @@ -2467,12 +2511,13 @@ if vinfo is not None: self.virtualizable_boxes = virtualizable_boxes # just jumped away from assembler (case 4 in the comment in - # virtualizable.py) into tracing (case 2); check that vable_token - # is and stays NULL. Note the call to reset_vable_token() in - # warmstate.py. + # virtualizable.py) into tracing (case 2); if we get the + # virtualizable from somewhere strange it might not be forced, + # do it virtualizable_box = self.virtualizable_boxes[-1] virtualizable = vinfo.unwrap_virtualizable_box(virtualizable_box) - assert not vinfo.is_token_nonnull_gcref(virtualizable) + if vinfo.is_token_nonnull_gcref(virtualizable): + vinfo.reset_token_gcref(virtualizable) # fill the virtualizable with the local boxes self.synchronize_virtualizable() # @@ -2508,11 +2553,20 @@ virtualizable) self.virtualizable_boxes.append(virtualizable_box) - def gen_store_back_in_virtualizable(self): + def gen_store_back_in_vable(self, box): vinfo = self.jitdriver_sd.virtualizable_info if vinfo is not None: # xxx only write back the fields really modified vbox = self.virtualizable_boxes[-1] + if vbox is not box: + # ignore the hint on non-standard virtualizable + # specifically, ignore it on a virtual + return + if self.forced_virtualizable is not None: + # this can happen only in strange cases, but we don't care + # it was already forced + return + self.forced_virtualizable = vbox for i in range(vinfo.num_static_extra_boxes): fieldbox = self.virtualizable_boxes[i] descr = vinfo.static_field_descrs[i] @@ -2529,6 +2583,9 @@ self.execute_and_record(rop.SETARRAYITEM_GC, descr, abox, ConstInt(j), itembox) assert i + 1 == len(self.virtualizable_boxes) + # we're during tracing, so we should not execute it + self.history.record(rop.SETFIELD_GC, [vbox, self.cpu.ts.CONST_NULL], + None, descr=vinfo.vable_token_descr) def replace_box(self, oldbox, newbox): assert isinstance(oldbox, Box) diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py --- a/rpython/jit/metainterp/resoperation.py +++ b/rpython/jit/metainterp/resoperation.py @@ -397,6 +397,7 @@ 'GUARD_NO_OVERFLOW/0d', 'GUARD_OVERFLOW/0d', 'GUARD_NOT_FORCED/0d', # may be called with an exception currently set + 'GUARD_NOT_FORCED_2/0d', # same as GUARD_NOT_FORCED, but for finish() 'GUARD_NOT_INVALIDATED/0d', '_GUARD_LAST', # ----- end of guard operations ----- @@ -488,6 +489,8 @@ 'VIRTUAL_REF/2', # removed before it's passed to the backend 'READ_TIMESTAMP/0', 'MARK_OPAQUE_PTR/1b', + # this one has no *visible* side effect, since the virtualizable + # must be forced, however we need to execute it anyway '_NOSIDEEFFECT_LAST', # ----- end of no_side_effect operations ----- 'SETARRAYITEM_GC/3d', diff --git a/rpython/jit/metainterp/resume.py b/rpython/jit/metainterp/resume.py --- a/rpython/jit/metainterp/resume.py +++ b/rpython/jit/metainterp/resume.py @@ -17,6 +17,9 @@ # because it needs to support optimize.py which encodes virtuals with # arbitrary cycles and also to compress the information +class AlreadyForced(Exception): + pass + class Snapshot(object): __slots__ = ('prev', 'boxes') @@ -51,20 +54,24 @@ def capture_resumedata(framestack, virtualizable_boxes, virtualref_boxes, storage): - n = len(framestack)-1 - top = framestack[n] - _ensure_parent_resumedata(framestack, n) - frame_info_list = FrameInfo(top.parent_resumedata_frame_info_list, - top.jitcode, top.pc) - storage.rd_frame_info_list = frame_info_list - snapshot = Snapshot(top.parent_resumedata_snapshot, - top.get_list_of_active_boxes(False)) + n = len(framestack) - 1 if virtualizable_boxes is not None: boxes = virtualref_boxes + virtualizable_boxes else: boxes = virtualref_boxes[:] - snapshot = Snapshot(snapshot, boxes) - storage.rd_snapshot = snapshot + if n >= 0: + top = framestack[n] + _ensure_parent_resumedata(framestack, n) + frame_info_list = FrameInfo(top.parent_resumedata_frame_info_list, + top.jitcode, top.pc) + storage.rd_frame_info_list = frame_info_list + snapshot = Snapshot(top.parent_resumedata_snapshot, + top.get_list_of_active_boxes(False)) + snapshot = Snapshot(snapshot, boxes) + storage.rd_snapshot = snapshot + else: + storage.rd_frame_info_list = None + storage.rd_snapshot = Snapshot(None, boxes) # # The following is equivalent to the RPython-level declaration: @@ -1214,16 +1221,8 @@ return len(numb.nums) index = len(numb.nums) - 1 virtualizable = self.decode_ref(numb.nums[index]) - if self.resume_after_guard_not_forced == 1: - # in the middle of handle_async_forcing() - assert vinfo.is_token_nonnull_gcref(virtualizable) - vinfo.reset_token_gcref(virtualizable) - else: - # just jumped away from assembler (case 4 in the comment in - # virtualizable.py) into tracing (case 2); check that vable_token - # is and stays NULL. Note the call to reset_vable_token() in - # warmstate.py. - assert not vinfo.is_token_nonnull_gcref(virtualizable) + # just reset the token, we'll force it later + vinfo.reset_token_gcref(virtualizable) return vinfo.write_from_resume_data_partial(virtualizable, self, numb) def load_value_of_type(self, TYPE, tagged): diff --git a/rpython/jit/metainterp/test/test_recursive.py b/rpython/jit/metainterp/test/test_recursive.py --- a/rpython/jit/metainterp/test/test_recursive.py +++ b/rpython/jit/metainterp/test/test_recursive.py @@ -412,7 +412,6 @@ self.i8 = i8 self.i9 = i9 - def loop(n): i = 0 o = A(0, 1, 2, 3, 4, 5, 6, 7, 8, 9) @@ -445,7 +444,6 @@ self.i8 = i8 self.i9 = i9 - def loop(n): i = 0 o = A(0, 1, 2, 3, 4, 5, 6, 7, 8, 9) @@ -643,7 +641,7 @@ # exactly the same logic as the previous test, but with 'frame.j' # instead of just 'j' class Frame(object): - _virtualizable2_ = ['j'] + _virtualizable_ = ['j'] def __init__(self, j): self.j = j @@ -767,9 +765,9 @@ self.val = val class Frame(object): - _virtualizable2_ = ['thing'] + _virtualizable_ = ['thing'] - driver = JitDriver(greens = ['codeno'], reds = ['i', 'frame'], + driver = JitDriver(greens = ['codeno'], reds = ['i', 's', 'frame'], virtualizables = ['frame'], get_printable_location = lambda codeno : str(codeno)) @@ -781,22 +779,26 @@ def portal(codeno, frame): i = 0 + s = 0 while i < 10: - driver.can_enter_jit(frame=frame, codeno=codeno, i=i) - driver.jit_merge_point(frame=frame, codeno=codeno, i=i) + driver.can_enter_jit(frame=frame, codeno=codeno, i=i, s=s) + driver.jit_merge_point(frame=frame, codeno=codeno, i=i, s=s) nextval = frame.thing.val if codeno == 0: subframe = Frame() subframe.thing = Thing(nextval) nextval = portal(1, subframe) + s += subframe.thing.val frame.thing = Thing(nextval + 1) i += 1 return frame.thing.val res = self.meta_interp(main, [0], inline=True) + self.check_resops(call=0, cond_call=0) # got removed by optimization assert res == main(0) def test_directly_call_assembler_virtualizable_reset_token(self): + py.test.skip("not applicable any more, I think") from rpython.rtyper.lltypesystem import lltype from rpython.rlib.debug import llinterpcall @@ -805,7 +807,7 @@ self.val = val class Frame(object): - _virtualizable2_ = ['thing'] + _virtualizable_ = ['thing'] driver = JitDriver(greens = ['codeno'], reds = ['i', 'frame'], virtualizables = ['frame'], @@ -856,7 +858,7 @@ self.val = val class Frame(object): - _virtualizable2_ = ['thing'] + _virtualizable_ = ['thing'] driver = JitDriver(greens = ['codeno'], reds = ['i', 'frame'], virtualizables = ['frame'], @@ -907,7 +909,7 @@ virtualizables = ['frame']) class Frame(object): - _virtualizable2_ = ['l[*]', 's'] + _virtualizable_ = ['l[*]', 's'] def __init__(self, l, s): self = hint(self, access_directly=True, @@ -950,7 +952,7 @@ self.val = val class Frame(object): - _virtualizable2_ = ['thing'] + _virtualizable_ = ['thing'] driver = JitDriver(greens = ['codeno'], reds = ['i', 'frame'], virtualizables = ['frame'], diff --git a/rpython/jit/metainterp/test/test_tracingopts.py b/rpython/jit/metainterp/test/test_tracingopts.py --- a/rpython/jit/metainterp/test/test_tracingopts.py +++ b/rpython/jit/metainterp/test/test_tracingopts.py @@ -344,7 +344,7 @@ virtualizables = ['frame']) class Frame(object): - _virtualizable2_ = ['l[*]', 's'] + _virtualizable_ = ['l[*]', 's'] def __init__(self, a, s): self = jit.hint(self, access_directly=True, fresh_virtualizable=True) diff --git a/rpython/jit/metainterp/test/test_virtualizable.py b/rpython/jit/metainterp/test/test_virtualizable.py --- a/rpython/jit/metainterp/test/test_virtualizable.py +++ b/rpython/jit/metainterp/test/test_virtualizable.py @@ -4,7 +4,8 @@ from rpython.jit.codewriter.policy import StopAtXPolicy from rpython.jit.metainterp.optimizeopt.test.test_util import LLtypeMixin from rpython.jit.metainterp.test.support import LLJitMixin -from rpython.jit.metainterp.warmspot import get_translator +from rpython.jit.metainterp.warmspot import get_translator, get_stats +from rpython.jit.metainterp.resoperation import rop from rpython.rlib.jit import JitDriver, hint, dont_look_inside, promote, virtual_ref from rpython.rlib.rarithmetic import intmask from rpython.rtyper.annlowlevel import hlstr @@ -26,7 +27,6 @@ return lltype_to_annotation(lltype.Void) def specialize_call(self, hop): - op = self.instance # the LLOp object that was called args_v = [hop.inputarg(hop.args_r[0], 0), hop.inputconst(lltype.Void, hop.args_v[1].value), hop.inputconst(lltype.Void, {})] @@ -46,8 +46,8 @@ ('vable_token', llmemory.GCREF), ('inst_x', lltype.Signed), ('inst_node', lltype.Ptr(LLtypeMixin.NODE)), - hints = {'virtualizable2_accessor': FieldListAccessor()}) - XY._hints['virtualizable2_accessor'].initialize( + hints = {'virtualizable_accessor': FieldListAccessor()}) + XY._hints['virtualizable_accessor'].initialize( XY, {'inst_x': IR_IMMUTABLE, 'inst_node': IR_IMMUTABLE}) xy_vtable = lltype.malloc(rclass.OBJECT_VTABLE, immortal=True) @@ -141,11 +141,13 @@ n -= 1 def f(n): xy = self.setup() + promote_virtualizable(xy, 'inst_x') xy.inst_x = 10000 m = 10 while m > 0: g(xy, n) m -= 1 + promote_virtualizable(xy, 'inst_x') return xy.inst_x res = self.meta_interp(f, [18]) assert res == 10180 @@ -200,8 +202,8 @@ return xy.inst_x res = self.meta_interp(f, [20]) assert res == 134 - self.check_simple_loop(setfield_gc=1, getfield_gc=0) - self.check_resops(setfield_gc=2, getfield_gc=3) + self.check_simple_loop(setfield_gc=1, getfield_gc=0, cond_call=1) + self.check_resops(setfield_gc=2, getfield_gc=4) # ------------------------------ @@ -212,8 +214,8 @@ ('inst_x', lltype.Signed), ('inst_l1', lltype.Ptr(lltype.GcArray(lltype.Signed))), ('inst_l2', lltype.Ptr(lltype.GcArray(lltype.Signed))), - hints = {'virtualizable2_accessor': FieldListAccessor()}) - XY2._hints['virtualizable2_accessor'].initialize( + hints = {'virtualizable_accessor': FieldListAccessor()}) + XY2._hints['virtualizable_accessor'].initialize( XY2, {'inst_x': IR_IMMUTABLE, 'inst_l1': IR_IMMUTABLE_ARRAY, 'inst_l2': IR_IMMUTABLE_ARRAY}) @@ -278,6 +280,7 @@ while m > 0: g(xy2, n) m -= 1 + promote_virtualizable(xy2, 'inst_l2') return xy2.inst_l2[0] assert f(18) == 10360 res = self.meta_interp(f, [18]) @@ -381,7 +384,7 @@ res = self.meta_interp(f, [20], enable_opts='') assert res == expected self.check_simple_loop(setarrayitem_gc=1, setfield_gc=0, - getarrayitem_gc=1, arraylen_gc=1, getfield_gc=1) + getarrayitem_gc=1, arraylen_gc=1, getfield_gc=2) # ------------------------------ @@ -424,7 +427,9 @@ while m > 0: g(xy2, n) m -= 1 - return xy2.parent.inst_l2[0] + parent = xy2.parent + promote_virtualizable(parent, 'inst_l2') + return parent.inst_l2[0] assert f(18) == 10360 res = self.meta_interp(f, [18]) assert res == 10360 @@ -440,7 +445,7 @@ virtualizables = ['frame']) class Frame(object): - _virtualizable2_ = ['x', 'y'] + _virtualizable_ = ['x', 'y'] def __init__(self, x, y): self.x = x @@ -469,7 +474,7 @@ virtualizables = ['frame']) class Frame(object): - _virtualizable2_ = ['l[*]', 's'] + _virtualizable_ = ['l[*]', 's'] def __init__(self, l, s): self.l = l @@ -504,7 +509,7 @@ virtualizables = ['frame']) class Frame(object): - _virtualizable2_ = ['x', 'y'] + _virtualizable_ = ['x', 'y'] def __init__(self, x, y): self.x = x @@ -532,7 +537,7 @@ virtualizables = ['frame']) class BaseFrame(object): - _virtualizable2_ = ['x[*]'] + _virtualizable_ = ['x[*]'] def __init__(self, x): self.x = x @@ -563,7 +568,7 @@ virtualizables = ['frame']) class Frame(object): - _virtualizable2_ = ['x', 'y'] + _virtualizable_ = ['x', 'y'] class SomewhereElse: pass @@ -596,7 +601,7 @@ virtualizables = ['frame']) class Frame(object): - _virtualizable2_ = ['x', 'y'] + _virtualizable_ = ['x', 'y'] class SomewhereElse: pass @@ -636,7 +641,7 @@ virtualizables = ['frame']) class Frame(object): - _virtualizable2_ = ['x', 'y'] + _virtualizable_ = ['x', 'y'] class SomewhereElse: pass @@ -669,7 +674,7 @@ virtualizables = ['frame']) class Frame(object): - _virtualizable2_ = ['x', 'y'] + _virtualizable_ = ['x', 'y'] class SomewhereElse: pass @@ -706,7 +711,7 @@ virtualizables = ['frame']) class Frame(object): - _virtualizable2_ = ['x', 'y'] + _virtualizable_ = ['x', 'y'] class Y: pass @@ -751,7 +756,7 @@ virtualizables = ['frame']) class Frame(object): - _virtualizable2_ = ['x', 'y'] + _virtualizable_ = ['x', 'y'] class Y: pass @@ -801,7 +806,7 @@ virtualizables = ['frame']) class Frame(object): - _virtualizable2_ = ['x', 'y'] + _virtualizable_ = ['x', 'y'] class FooBarError(Exception): pass @@ -845,7 +850,7 @@ virtualizables = ['frame']) class Frame(object): - _virtualizable2_ = ['x', 'y'] + _virtualizable_ = ['x', 'y'] class SomewhereElse: pass @@ -882,7 +887,7 @@ virtualizables = ['frame']) class Frame(object): - _virtualizable2_ = ['x', 'y'] + _virtualizable_ = ['x', 'y'] class SomewhereElse: pass @@ -934,7 +939,7 @@ virtualizables = ['frame']) class Frame(object): - _virtualizable2_ = ['x', 'y'] + _virtualizable_ = ['x', 'y'] class SomewhereElse: pass @@ -971,7 +976,7 @@ virtualizables = ['frame']) class Frame(object): - _virtualizable2_ = ['x', 'y'] + _virtualizable_ = ['x', 'y'] class SomewhereElse: pass @@ -1005,7 +1010,7 @@ virtualizables = ['frame']) class Frame(object): - _virtualizable2_ = ['stackpos', 'stack[*]'] + _virtualizable_ = ['stackpos', 'stack[*]'] def f(n): frame = Frame() @@ -1034,7 +1039,7 @@ virtualizables = ['frame']) class Frame(object): - _virtualizable2_ = ['x', 'y'] + _virtualizable_ = ['x', 'y'] def __init__(self, x, y): self = hint(self, access_directly=True) @@ -1088,7 +1093,7 @@ virtualizables = ['frame']) class Frame(object): - _virtualizable2_ = ['x', 'y'] + _virtualizable_ = ['x', 'y'] def __init__(self, x, y): self = hint(self, access_directly=True) @@ -1120,7 +1125,7 @@ virtualizables = ['frame']) class Frame(object): - _virtualizable2_ = ['x', 'y', 'z'] + _virtualizable_ = ['x', 'y', 'z'] def __init__(self, x, y, z=1): self = hint(self, access_directly=True) @@ -1155,7 +1160,7 @@ virtualizables = ['frame']) class Frame(object): - _virtualizable2_ = ['x[*]'] + _virtualizable_ = ['x[*]'] def __init__(self, x, y): self = hint(self, access_directly=True, @@ -1187,7 +1192,7 @@ virtualizables = ['frame']) class Frame(object): - _virtualizable2_ = ['x', 'y'] + _virtualizable_ = ['x', 'y'] def __init__(self, x, y): self = hint(self, access_directly=True) @@ -1226,7 +1231,7 @@ virtualizables = ['frame']) class Frame(object): - _virtualizable2_ = ['x', 'y'] + _virtualizable_ = ['x', 'y'] def __init__(self, x, y): self.x = x @@ -1266,7 +1271,7 @@ virtualizables = ['frame']) class Frame(object): - _virtualizable2_ = ['x', 'y'] + _virtualizable_ = ['x', 'y'] def __init__(self, x, y): self = hint(self, access_directly=True) @@ -1310,7 +1315,7 @@ def test_inlining(self): class Frame(object): - _virtualizable2_ = ['x', 'next'] + _virtualizable_ = ['x', 'next'] def __init__(self, x): self = hint(self, access_directly=True) @@ -1345,7 +1350,7 @@ def test_guard_failure_in_inlined_function(self): class Frame(object): - _virtualizable2_ = ['n', 'next'] + _virtualizable_ = ['n', 'next'] def __init__(self, n): self = hint(self, access_directly=True) @@ -1399,7 +1404,7 @@ self.val = val class Frame(object): - _virtualizable2_ = ['thing'] + _virtualizable_ = ['thing'] driver = JitDriver(greens = ['codeno'], reds = ['i', 'frame'], virtualizables = ['frame'], @@ -1450,7 +1455,7 @@ ) class Frame(object): - _virtualizable2_ = ['x'] + _virtualizable_ = ['x'] def main(n): f = Frame() @@ -1469,6 +1474,113 @@ "int_add": 2, "jump": 1 }) + def test_frame_nonstandard_no_virtualizable(self): + + driver1 = JitDriver(greens=[], reds=['i', 's', 'frame']) + driver2 = JitDriver(greens=[], reds=['frame'], + virtualizables=['frame']) + + class Frame(object): + _virtualizable_ = ['x'] + + def g(frame): + driver2.jit_merge_point(frame=frame) + frame.x += 1 + return frame + + def f(): + i = 0 + s = 0 + frame = Frame() + frame.x = 0 + g(frame) + while i < 10: + driver1.jit_merge_point(frame=frame, s=s, i=i) + frame = g(frame) + s += frame.x + i += 1 + return s + + def main(): + res = 0 + for i in range(10): + res += f() + return res + + res = self.meta_interp(main, []) + assert res == main() + + def test_two_virtualizables_mixed(self): + driver1 = JitDriver(greens=[], reds=['i', 's', 'frame', + 'subframe']) + driver2 = JitDriver(greens=[], reds=['subframe'], + virtualizables=['subframe']) + + class Frame(object): + _virtualizable_ = ['x'] + + class SubFrame(object): + _virtualizable_ = ['x'] + + def g(subframe): + driver2.jit_merge_point(subframe=subframe) + subframe.x += 1 + + def f(): + i = 0 + frame = Frame() + frame.x = 0 + subframe = SubFrame() + subframe.x = 0 + s = 0 + while i < 10: + driver1.jit_merge_point(frame=frame, subframe=subframe, i=i, + s=s) + g(subframe) + s += subframe.x + i += 1 + return s + + res = self.meta_interp(f, []) + assert res == f() + + def test_force_virtualizable_by_hint(self): + class Frame(object): + _virtualizable_ = ['x'] + + driver = JitDriver(greens = [], reds = ['i', 'frame'], + virtualizables = ['frame']) + + def f(frame, i): + while i > 0: + driver.jit_merge_point(i=i, frame=frame) + i -= 1 + frame.x += 1 + hint(frame, force_virtualizable=True) + + def main(): + frame = Frame() + frame.x = 0 + s = 0 + for i in range(20): From noreply at buildbot.pypy.org Tue Aug 6 20:16:56 2013 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 6 Aug 2013 20:16:56 +0200 (CEST) Subject: [pypy-commit] pypy default: Really fix translation for --no-thread Message-ID: <20130806181656.B581F1C01B7@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r65983:17c47786c0eb Date: 2013-08-06 20:16 +0200 http://bitbucket.org/pypy/pypy/changeset/17c47786c0eb/ Log: Really fix translation for --no-thread diff --git a/pypy/module/cpyext/pystate.py b/pypy/module/cpyext/pystate.py --- a/pypy/module/cpyext/pystate.py +++ b/pypy/module/cpyext/pystate.py @@ -16,6 +16,9 @@ ('dict', PyObject), ])) +class NoThreads(Exception): + pass + @cpython_api([], PyThreadState, error=CANNOT_FAIL) def PyEval_SaveThread(space): """Release the global interpreter lock (if it has been created and thread @@ -44,13 +47,15 @@ @cpython_api([], lltype.Void) def PyEval_InitThreads(space): - if space.config.translation.thread: - from pypy.module.thread import os_thread - os_thread.setup_threads(space) - #else: nothing to do + if not space.config.translation.thread: + raise NoThreads + from pypy.module.thread import os_thread + os_thread.setup_threads(space) @cpython_api([], rffi.INT_real, error=CANNOT_FAIL) def PyEval_ThreadsInitialized(space): + if not space.config.translation.thread: + return 0 return 1 # XXX: might be generally useful @@ -234,6 +239,8 @@ """Create a new thread state object belonging to the given interpreter object. The global interpreter lock need not be held, but may be held if it is necessary to serialize calls to this function.""" + if not space.config.translation.thread: + raise NoThreads rthread.gc_thread_prepare() # PyThreadState_Get will allocate a new execution context, # we need to protect gc and other globals with the GIL. @@ -248,11 +255,11 @@ def PyThreadState_Clear(space, tstate): """Reset all information in a thread state object. The global interpreter lock must be held.""" + if not space.config.translation.thread: + raise NoThreads Py_DecRef(space, tstate.c_dict) tstate.c_dict = lltype.nullptr(PyObject.TO) - if space.config.translation.thread: - space.threadlocals.leave_thread(space) - #else: nothing to do + space.threadlocals.leave_thread(space) space.getexecutioncontext().cleanup_cpyext_state() rthread.gc_thread_die() From noreply at buildbot.pypy.org Tue Aug 6 20:18:05 2013 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 6 Aug 2013 20:18:05 +0200 (CEST) Subject: [pypy-commit] pypy r15-for-shadowstack: hg merge default Message-ID: <20130806181805.6C8591C01B7@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: r15-for-shadowstack Changeset: r65984:06288239285a Date: 2013-08-06 20:17 +0200 http://bitbucket.org/pypy/pypy/changeset/06288239285a/ Log: hg merge default diff --git a/pypy/module/cpyext/pystate.py b/pypy/module/cpyext/pystate.py --- a/pypy/module/cpyext/pystate.py +++ b/pypy/module/cpyext/pystate.py @@ -16,6 +16,9 @@ ('dict', PyObject), ])) +class NoThreads(Exception): + pass + @cpython_api([], PyThreadState, error=CANNOT_FAIL) def PyEval_SaveThread(space): """Release the global interpreter lock (if it has been created and thread @@ -44,13 +47,15 @@ @cpython_api([], lltype.Void) def PyEval_InitThreads(space): - if space.config.translation.thread: - from pypy.module.thread import os_thread - os_thread.setup_threads(space) - #else: nothing to do + if not space.config.translation.thread: + raise NoThreads + from pypy.module.thread import os_thread + os_thread.setup_threads(space) @cpython_api([], rffi.INT_real, error=CANNOT_FAIL) def PyEval_ThreadsInitialized(space): + if not space.config.translation.thread: + return 0 return 1 # XXX: might be generally useful @@ -234,6 +239,8 @@ """Create a new thread state object belonging to the given interpreter object. The global interpreter lock need not be held, but may be held if it is necessary to serialize calls to this function.""" + if not space.config.translation.thread: + raise NoThreads rthread.gc_thread_prepare() # PyThreadState_Get will allocate a new execution context, # we need to protect gc and other globals with the GIL. @@ -248,11 +255,11 @@ def PyThreadState_Clear(space, tstate): """Reset all information in a thread state object. The global interpreter lock must be held.""" + if not space.config.translation.thread: + raise NoThreads Py_DecRef(space, tstate.c_dict) tstate.c_dict = lltype.nullptr(PyObject.TO) - if space.config.translation.thread: - space.threadlocals.leave_thread(space) - #else: nothing to do + space.threadlocals.leave_thread(space) space.getexecutioncontext().cleanup_cpyext_state() rthread.gc_thread_die() From noreply at buildbot.pypy.org Tue Aug 6 22:37:52 2013 From: noreply at buildbot.pypy.org (fijal) Date: Tue, 6 Aug 2013 22:37:52 +0200 (CEST) Subject: [pypy-commit] benchmarks single-run: Add what we know about benchmark in one place Message-ID: <20130806203752.3A6D41C01CC@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: single-run Changeset: r227:1d171ace4fae Date: 2013-08-06 22:37 +0200 http://bitbucket.org/pypy/benchmarks/changeset/1d171ace4fae/ Log: Add what we know about benchmark in one place diff --git a/bench-data.json b/bench-data.json new file mode 100644 --- /dev/null +++ b/bench-data.json @@ -0,0 +1,149 @@ +{ + "ai": { + "warmup": 2, + "total_runs": 52, + "description": "Brute force n-queens solver." + }, + "bm_chameleon": { + }, + "bm_mako": { + }, + "chaos": { + "description": "Creates chaosgame-like fractals" + }, + "cpython_doc": { + "description": "Run sphinx over cpython documentation" + }, + "crypto_pyaes": { + "description": "A pure python implementation of AES" + }, + "django": { + "warmup": 2, + "total_runs": 52, + "description": "Uses the Django template system to build a 150x150-cell HTML table." + }, + "eparse": { + }, + "fannkuch": { + "description": "Indexed-access to tiny integer-sequence. The fannkuch benchmark is defined by programs in Performing Lisp Analysis of the FANNKUCH Benchmark, Kenneth R. Anderson and Duane Rettig." + }, + "float": { + "description": "Creates an array of points using circular projection and then normalizes and maximizes them. Floating-point heavy." + }, + "genshi_text": { + "description": "Genshi template rendering using text, generator heavy" + }, + "genshi_xml": { + "description": "Genshi template rendering using XML, generator heavy" + }, + "go": { + "description": "A go game computer player AI." + }, + "hexiom2": { + }, + "html5lib": { + "warmup": 0, + "total_runs": 50, + "description": "Parses the HTML 5 spec using html5lib." + }, + "json_bench": { + "description": "Tests the speed of json encoding" + }, + "meteor-contest": { + "description": "Searchs for solutions to shape packing puzzle." + }, + "nbody_modified": { + "warmup": 2, + "total_runs": 52, + "description": "Double-precision N-body simulation. It models the orbits of Jovian planets, using a simple symplectic-integrator." + }, + "pidigits": { + "description": "Computes the digits of PI. Long heavy" + }, + "pyflate-fast": { + "description": "Stand-alone pure-Python DEFLATE (gzip) and bzip2 decoder/decompressor." + }, + "raytrace-simple": { + "description": "A raytracer renderer" + }, + "richards": { + "warmup": 2, + "total_runs": 52, + "description": "Medium-sized language benchmark that simulates the task dispatcher in the kernel of an operating system." + }, + "rietveld": { + "warmup": 2, + "total_runs": 1500, + "description": "A Django application benchmark.", + "legacy_multiplier": 30 + }, + "scimark_fft": { + }, + "scimark_lu": { + }, + "scimark_montecarlo": { + }, + "scimark_sor": { + }, + "scimark_sparsematmult": { + }, + "slowspitfire": { + "description": "Uses the Spitfire template system to build a 1000x1000-cell HTML table; it differs from spitfire in that it uses .join(list) instead of cStringIO." + }, + "spambayes": { + "description": "Spambayes spam classification filter" + }, + "spectral-norm": { + }, + "spitfire": { + "description": "Uses the Spitfire template system to build a 100x100-cell HTML table; it differs from spitfire in that it uses .join(list) instead of cStringIO." + }, + "spitfire_cstringio": { + "description": "ses the Spitfire template system to build a 1000x1000-cell HTML table, using the cStringIO module." + }, + "sympy_expand": { + "description": "Use sympy (pure python symbolic math lib) do to expansion" + }, + "sympy_integrate": { + "description": "Use sympy (pure python symbolic math lib) do to integration" + }, + "sympy_str": { + "description": "Use sympy (pure python symbolic math lib) do to str() operation" + }, + "sympy_sum": { + "description": "Use sympy (pure python symbolic math lib) do to summation" + }, + "telco": { + "description": "A small program which is intended to capture the essence of a telephone company billing application, with a realistic balance between Input/Output activity and application calculations." + }, + "trans2_annotate": { + "description": "PyPy translation -O2 - annotation" + }, + "trans2_backendopt": { + "description": "PyPy translation -O2 - backendopt" + }, + "trans2_database": { + "description": "PyPy translation -O2 - C database" + }, + "trans2_rtype": { + "description": "PyPy translation -O2 - rtype" + }, + "trans2_source": { + "description": "PyPy translation -O2 - C source" + }, + "twisted_iteration" : { + "description": "Iterates a Twisted reactor as quickly as possible without doing any work." + }, + "twisted_names": { + "description": "Runs a DNS server with Twisted Names and then issues requests to it over loopback UDP." + }, + "twisted_pb": { + "description": "Runs a Perspective Broker server with a no-op method and invokes that method over loopback TCP with some strings, dictionaries, and tuples as arguments." + }, + "twisted_tcp": { + "description": "Connects one Twised client to one Twisted server over TCP (on the loopback interface) and then writes bytes as fast as it can." + }, + "twisted_web": { + "description": "Runs twisted web server and connects through twisted HTTP client" + } +} From noreply at buildbot.pypy.org Tue Aug 6 22:39:24 2013 From: noreply at buildbot.pypy.org (fijal) Date: Tue, 6 Aug 2013 22:39:24 +0200 (CEST) Subject: [pypy-commit] benchmarks single-run: We no longer need that (thank god) Message-ID: <20130806203924.DB4E31C013B@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: single-run Changeset: r228:4a86ac0a48ef Date: 2013-08-06 22:39 +0200 http://bitbucket.org/pypy/benchmarks/changeset/4a86ac0a48ef/ Log: We no longer need that (thank god) diff --git a/nullpython.py b/nullpython.py deleted file mode 100755 --- a/nullpython.py +++ /dev/null @@ -1,20 +0,0 @@ -#!/usr/bin/python -'''This is a dummy that does nothing except that it returns 1 -second for every round of the benchmark. - -You can use this as the baseline interpreter if you are only -interested in the time of the changed interpreter, but not -in the difference to a baseline interpreter. -''' -from own import util -import optparse - -if __name__ == '__main__': - parser = optparse.OptionParser( - usage="%prog [options]", - description="Test the performance of the Go benchmark") - util.add_standard_options_to(parser) - options, args = parser.parse_args() - - main = lambda n: [0.0001 for x in range(options.num_runs)] - util.run_benchmark(options, options.num_runs, main) From noreply at buildbot.pypy.org Wed Aug 7 02:02:10 2013 From: noreply at buildbot.pypy.org (andrewchambers) Date: Wed, 7 Aug 2013 02:02:10 +0200 (CEST) Subject: [pypy-commit] pypy incremental-gc: Initial copies of minimark -> incminimark Message-ID: <20130807000210.0CF3A1C01B7@cobra.cs.uni-duesseldorf.de> Author: Andrew Chambers Branch: incremental-gc Changeset: r65985:09f7f432cb89 Date: 2013-08-07 03:55 +1200 http://bitbucket.org/pypy/pypy/changeset/09f7f432cb89/ Log: Initial copies of minimark -> incminimark diff too long, truncating to 2000 out of 2335 lines diff --git a/rpython/config/translationoption.py b/rpython/config/translationoption.py --- a/rpython/config/translationoption.py +++ b/rpython/config/translationoption.py @@ -50,7 +50,7 @@ # gc ChoiceOption("gc", "Garbage Collection Strategy", ["boehm", "ref", "semispace", "statistics", - "generation", "hybrid", "minimark", "none"], + "generation", "hybrid", "minimark",'incminimark', "none"], "ref", requires={ "ref": [("translation.rweakref", False), # XXX ("translation.gctransformer", "ref")], @@ -63,6 +63,7 @@ "boehm": [("translation.continuation", False), # breaks ("translation.gctransformer", "boehm")], "minimark": [("translation.gctransformer", "framework")], + "incminimark": [("translation.gctransformer", "framework")], }, cmdline="--gc"), ChoiceOption("gctransformer", "GC transformer that is used - internal", diff --git a/rpython/memory/gc/base.py b/rpython/memory/gc/base.py --- a/rpython/memory/gc/base.py +++ b/rpython/memory/gc/base.py @@ -430,6 +430,7 @@ "generation": "generation.GenerationGC", "hybrid": "hybrid.HybridGC", "minimark" : "minimark.MiniMarkGC", + "incminimark" : "incminimark.IncrementalMiniMarkGC", } try: modulename, classname = classes[config.translation.gc].split('.') diff --git a/rpython/memory/gc/incminimark.py b/rpython/memory/gc/incminimark.py new file mode 100644 --- /dev/null +++ b/rpython/memory/gc/incminimark.py @@ -0,0 +1,2110 @@ +"""Incremental version of the MiniMark GC. + +Environment variables can be used to fine-tune the following parameters: + + PYPY_GC_NURSERY The nursery size. Defaults to 1/2 of your cache or + '4M'. Small values + (like 1 or 1KB) are useful for debugging. + + PYPY_GC_NURSERY_CLEANUP The interval at which nursery is cleaned up. Must + be smaller than the nursery size and bigger than the + biggest object we can allotate in the nursery. + + PYPY_GC_MAJOR_COLLECT Major collection memory factor. Default is '1.82', + which means trigger a major collection when the + memory consumed equals 1.82 times the memory + really used at the end of the previous major + collection. + + PYPY_GC_GROWTH Major collection threshold's max growth rate. + Default is '1.4'. Useful to collect more often + than normally on sudden memory growth, e.g. when + there is a temporary peak in memory usage. + + PYPY_GC_MAX The max heap size. If coming near this limit, it + will first collect more often, then raise an + RPython MemoryError, and if that is not enough, + crash the program with a fatal error. Try values + like '1.6GB'. + + PYPY_GC_MAX_DELTA The major collection threshold will never be set + to more than PYPY_GC_MAX_DELTA the amount really + used after a collection. Defaults to 1/8th of the + total RAM size (which is constrained to be at most + 2/3/4GB on 32-bit systems). Try values like '200MB'. + + PYPY_GC_MIN Don't collect while the memory size is below this + limit. Useful to avoid spending all the time in + the GC in very small programs. Defaults to 8 + times the nursery. + + PYPY_GC_DEBUG Enable extra checks around collections that are + too slow for normal use. Values are 0 (off), + 1 (on major collections) or 2 (also on minor + collections). +""" +# XXX Should find a way to bound the major collection threshold by the +# XXX total addressable size. Maybe by keeping some minimarkpage arenas +# XXX pre-reserved, enough for a few nursery collections? What about +# XXX raw-malloced memory? +import sys +from rpython.rtyper.lltypesystem import lltype, llmemory, llarena, llgroup +from rpython.rtyper.lltypesystem.lloperation import llop +from rpython.rtyper.lltypesystem.llmemory import raw_malloc_usage +from rpython.memory.gc.base import GCBase, MovingGCBase +from rpython.memory.gc import env +from rpython.memory.support import mangle_hash +from rpython.rlib.rarithmetic import ovfcheck, LONG_BIT, intmask, r_uint +from rpython.rlib.rarithmetic import LONG_BIT_SHIFT +from rpython.rlib.debug import ll_assert, debug_print, debug_start, debug_stop +from rpython.rlib.objectmodel import specialize + + +# +# Handles the objects in 2 generations: +# +# * young objects: allocated in the nursery if they are not too large, or +# raw-malloced otherwise. The nursery is a fixed-size memory buffer of +# 4MB by default. When full, we do a minor collection; +# the surviving objects from the nursery are moved outside, and the +# non-surviving raw-malloced objects are freed. All surviving objects +# become old. +# +# * old objects: never move again. These objects are either allocated by +# minimarkpage.py (if they are small), or raw-malloced (if they are not +# small). Collected by regular mark-n-sweep during major collections. +# + +WORD = LONG_BIT // 8 +NULL = llmemory.NULL + +first_gcflag = 1 << (LONG_BIT//2) + +# The following flag is set on objects if we need to do something to +# track the young pointers that it might contain. The flag is not set +# on young objects (unless they are large arrays, see below), and we +# simply assume that any young object can point to any other young object. +# For old and prebuilt objects, the flag is usually set, and is cleared +# when we write a young pointer to it. For large arrays with +# GCFLAG_HAS_CARDS, we rely on card marking to track where the +# young pointers are; the flag GCFLAG_TRACK_YOUNG_PTRS is set in this +# case too, to speed up the write barrier. +GCFLAG_TRACK_YOUNG_PTRS = first_gcflag << 0 + +# The following flag is set on some prebuilt objects. The flag is set +# unless the object is already listed in 'prebuilt_root_objects'. +# When a pointer is written inside an object with GCFLAG_NO_HEAP_PTRS +# set, the write_barrier clears the flag and adds the object to +# 'prebuilt_root_objects'. +GCFLAG_NO_HEAP_PTRS = first_gcflag << 1 + +# The following flag is set on surviving objects during a major collection, +# and on surviving raw-malloced young objects during a minor collection. +GCFLAG_VISITED = first_gcflag << 2 + +# The following flag is set on nursery objects of which we asked the id +# or the identityhash. It means that a space of the size of the object +# has already been allocated in the nonmovable part. The same flag is +# abused to mark prebuilt objects whose hash has been taken during +# translation and is statically recorded. +GCFLAG_HAS_SHADOW = first_gcflag << 3 + +# The following flag is set temporarily on some objects during a major +# collection. See pypy/doc/discussion/finalizer-order.txt +GCFLAG_FINALIZATION_ORDERING = first_gcflag << 4 + +# This flag is reserved for RPython. +GCFLAG_EXTRA = first_gcflag << 5 + +# The following flag is set on externally raw_malloc'ed arrays of pointers. +# They are allocated with some extra space in front of them for a bitfield, +# one bit per 'card_page_indices' indices. +GCFLAG_HAS_CARDS = first_gcflag << 6 +GCFLAG_CARDS_SET = first_gcflag << 7 # <- at least one card bit is set +# note that GCFLAG_CARDS_SET is the most significant bit of a byte: +# this is required for the JIT (x86) + +TID_MASK = (first_gcflag << 8) - 1 + + +FORWARDSTUB = lltype.GcStruct('forwarding_stub', + ('forw', llmemory.Address)) +FORWARDSTUBPTR = lltype.Ptr(FORWARDSTUB) +NURSARRAY = lltype.Array(llmemory.Address) + +# ____________________________________________________________ + +class IncrementalMiniMarkGC(MovingGCBase): + _alloc_flavor_ = "raw" + inline_simple_malloc = True + inline_simple_malloc_varsize = True + needs_write_barrier = True + prebuilt_gc_objects_are_static_roots = False + malloc_zero_filled = True # xxx experiment with False + gcflag_extra = GCFLAG_EXTRA + + # All objects start with a HDR, i.e. with a field 'tid' which contains + # a word. This word is divided in two halves: the lower half contains + # the typeid, and the upper half contains various flags, as defined + # by GCFLAG_xxx above. + HDR = lltype.Struct('header', ('tid', lltype.Signed)) + typeid_is_in_field = 'tid' + withhash_flag_is_in_field = 'tid', GCFLAG_HAS_SHADOW + # ^^^ prebuilt objects may have the flag GCFLAG_HAS_SHADOW; + # then they are one word longer, the extra word storing the hash. + + + # During a minor collection, the objects in the nursery that are + # moved outside are changed in-place: their header is replaced with + # the value -42, and the following word is set to the address of + # where the object was moved. This means that all objects in the + # nursery need to be at least 2 words long, but objects outside the + # nursery don't need to. + minimal_size_in_nursery = ( + llmemory.sizeof(HDR) + llmemory.sizeof(llmemory.Address)) + + + TRANSLATION_PARAMS = { + # Automatically adjust the size of the nursery and the + # 'major_collection_threshold' from the environment. + # See docstring at the start of the file. + "read_from_env": True, + + # The size of the nursery. Note that this is only used as a + # fall-back number. + "nursery_size": 896*1024, + + # The system page size. Like obmalloc.c, we assume that it is 4K + # for 32-bit systems; unlike obmalloc.c, we assume that it is 8K + # for 64-bit systems, for consistent results. + "page_size": 1024*WORD, + + # The size of an arena. Arenas are groups of pages allocated + # together. + "arena_size": 65536*WORD, + + # The maximum size of an object allocated compactly. All objects + # that are larger are just allocated with raw_malloc(). Note that + # the size limit for being first allocated in the nursery is much + # larger; see below. + "small_request_threshold": 35*WORD, + + # Full collection threshold: after a major collection, we record + # the total size consumed; and after every minor collection, if the + # total size is now more than 'major_collection_threshold' times, + # we trigger the next major collection. + "major_collection_threshold": 1.82, + + # Threshold to avoid that the total heap size grows by a factor of + # major_collection_threshold at every collection: it can only + # grow at most by the following factor from one collection to the + # next. Used e.g. when there is a sudden, temporary peak in memory + # usage; this avoids that the upper bound grows too fast. + "growth_rate_max": 1.4, + + # The number of array indices that are mapped to a single bit in + # write_barrier_from_array(). Must be a power of two. The default + # value of 128 means that card pages are 512 bytes (1024 on 64-bits) + # in regular arrays of pointers; more in arrays whose items are + # larger. A value of 0 disables card marking. + "card_page_indices": 128, + + # Objects whose total size is at least 'large_object' bytes are + # allocated out of the nursery immediately, as old objects. The + # minimal allocated size of the nursery is 2x the following + # number (by default, at least 132KB on 32-bit and 264KB on 64-bit). + "large_object": (16384+512)*WORD, + + # This is the chunk that we cleanup in the nursery. The point is + # to avoid having to trash all the caches just to zero the nursery, + # so we trade it by cleaning it bit-by-bit, as we progress through + # nursery. Has to fit at least one large object + "nursery_cleanup": 32768 * WORD, + } + + def __init__(self, config, + read_from_env=False, + nursery_size=32*WORD, + nursery_cleanup=9*WORD, + page_size=16*WORD, + arena_size=64*WORD, + small_request_threshold=5*WORD, + major_collection_threshold=2.5, + growth_rate_max=2.5, # for tests + card_page_indices=0, + large_object=8*WORD, + ArenaCollectionClass=None, + **kwds): + MovingGCBase.__init__(self, config, **kwds) + assert small_request_threshold % WORD == 0 + self.read_from_env = read_from_env + self.nursery_size = nursery_size + self.nursery_cleanup = nursery_cleanup + self.small_request_threshold = small_request_threshold + self.major_collection_threshold = major_collection_threshold + self.growth_rate_max = growth_rate_max + self.num_major_collects = 0 + self.min_heap_size = 0.0 + self.max_heap_size = 0.0 + self.max_heap_size_already_raised = False + self.max_delta = float(r_uint(-1)) + # + self.card_page_indices = card_page_indices + if self.card_page_indices > 0: + self.card_page_shift = 0 + while (1 << self.card_page_shift) < self.card_page_indices: + self.card_page_shift += 1 + # + # 'large_object' limit how big objects can be in the nursery, so + # it gives a lower bound on the allowed size of the nursery. + self.nonlarge_max = large_object - 1 + # + self.nursery = NULL + self.nursery_free = NULL + self.nursery_top = NULL + self.nursery_real_top = NULL + self.debug_tiny_nursery = -1 + self.debug_rotating_nurseries = lltype.nullptr(NURSARRAY) + self.extra_threshold = 0 + # + # The ArenaCollection() handles the nonmovable objects allocation. + if ArenaCollectionClass is None: + from rpython.memory.gc import minimarkpage + ArenaCollectionClass = minimarkpage.ArenaCollection + self.ac = ArenaCollectionClass(arena_size, page_size, + small_request_threshold) + # + # Used by minor collection: a list of (mostly non-young) objects that + # (may) contain a pointer to a young object. Populated by + # the write barrier: when we clear GCFLAG_TRACK_YOUNG_PTRS, we + # add it to this list. + # Note that young array objects may (by temporary "mistake") be added + # to this list, but will be removed again at the start of the next + # minor collection. + self.old_objects_pointing_to_young = self.AddressStack() + # + # Similar to 'old_objects_pointing_to_young', but lists objects + # that have the GCFLAG_CARDS_SET bit. For large arrays. Note + # that it is possible for an object to be listed both in here + # and in 'old_objects_pointing_to_young', in which case we + # should just clear the cards and trace it fully, as usual. + # Note also that young array objects are never listed here. + self.old_objects_with_cards_set = self.AddressStack() + # + # A list of all prebuilt GC objects that contain pointers to the heap + self.prebuilt_root_objects = self.AddressStack() + # + self._init_writebarrier_logic() + + + def setup(self): + """Called at run-time to initialize the GC.""" + # + # Hack: MovingGCBase.setup() sets up stuff related to id(), which + # we implement differently anyway. So directly call GCBase.setup(). + GCBase.setup(self) + # + # Two lists of all raw_malloced objects (the objects too large) + self.young_rawmalloced_objects = self.null_address_dict() + self.old_rawmalloced_objects = self.AddressStack() + self.rawmalloced_total_size = r_uint(0) + # + # A list of all objects with finalizers (these are never young). + self.objects_with_finalizers = self.AddressDeque() + self.young_objects_with_light_finalizers = self.AddressStack() + self.old_objects_with_light_finalizers = self.AddressStack() + # + # Two lists of the objects with weakrefs. No weakref can be an + # old object weakly pointing to a young object: indeed, weakrefs + # are immutable so they cannot point to an object that was + # created after it. + self.young_objects_with_weakrefs = self.AddressStack() + self.old_objects_with_weakrefs = self.AddressStack() + # + # Support for id and identityhash: map nursery objects with + # GCFLAG_HAS_SHADOW to their future location at the next + # minor collection. + self.nursery_objects_shadows = self.AddressDict() + # + # Allocate a nursery. In case of auto_nursery_size, start by + # allocating a very small nursery, enough to do things like look + # up the env var, which requires the GC; and then really + # allocate the nursery of the final size. + if not self.read_from_env: + self.allocate_nursery() + else: + # + defaultsize = self.nursery_size + minsize = 2 * (self.nonlarge_max + 1) + self.nursery_size = minsize + self.allocate_nursery() + # + # From there on, the GC is fully initialized and the code + # below can use it + newsize = env.read_from_env('PYPY_GC_NURSERY') + # PYPY_GC_NURSERY=smallvalue means that minor collects occur + # very frequently; the extreme case is PYPY_GC_NURSERY=1, which + # forces a minor collect for every malloc. Useful to debug + # external factors, like trackgcroot or the handling of the write + # barrier. Implemented by still using 'minsize' for the nursery + # size (needed to handle mallocs just below 'large_objects') but + # hacking at the current nursery position in collect_and_reserve(). + if newsize <= 0: + newsize = env.estimate_best_nursery_size() + if newsize <= 0: + newsize = defaultsize + if newsize < minsize: + self.debug_tiny_nursery = newsize & ~(WORD-1) + newsize = minsize + + nurs_cleanup = env.read_from_env('PYPY_GC_NURSERY_CLEANUP') + if nurs_cleanup > 0: + self.nursery_cleanup = nurs_cleanup + # + major_coll = env.read_float_from_env('PYPY_GC_MAJOR_COLLECT') + if major_coll > 1.0: + self.major_collection_threshold = major_coll + # + growth = env.read_float_from_env('PYPY_GC_GROWTH') + if growth > 1.0: + self.growth_rate_max = growth + # + min_heap_size = env.read_uint_from_env('PYPY_GC_MIN') + if min_heap_size > 0: + self.min_heap_size = float(min_heap_size) + else: + # defaults to 8 times the nursery + self.min_heap_size = newsize * 8 + # + max_heap_size = env.read_uint_from_env('PYPY_GC_MAX') + if max_heap_size > 0: + self.max_heap_size = float(max_heap_size) + # + max_delta = env.read_uint_from_env('PYPY_GC_MAX_DELTA') + if max_delta > 0: + self.max_delta = float(max_delta) + else: + self.max_delta = 0.125 * env.get_total_memory() + # + self.minor_collection() # to empty the nursery + llarena.arena_free(self.nursery) + self.nursery_size = newsize + self.allocate_nursery() + # + if self.nursery_cleanup < self.nonlarge_max + 1: + self.nursery_cleanup = self.nonlarge_max + 1 + # We need exactly initial_cleanup + N*nursery_cleanup = nursery_size. + # We choose the value of initial_cleanup to be between 1x and 2x the + # value of nursery_cleanup. + self.initial_cleanup = self.nursery_cleanup + ( + self.nursery_size % self.nursery_cleanup) + if (r_uint(self.initial_cleanup) > r_uint(self.nursery_size) or + self.debug_tiny_nursery >= 0): + self.initial_cleanup = self.nursery_size + + def _nursery_memory_size(self): + extra = self.nonlarge_max + 1 + return self.nursery_size + extra + + def _alloc_nursery(self): + # the start of the nursery: we actually allocate a bit more for + # the nursery than really needed, to simplify pointer arithmetic + # in malloc_fixedsize_clear(). The few extra pages are never used + # anyway so it doesn't even count. + nursery = llarena.arena_malloc(self._nursery_memory_size(), 2) + if not nursery: + raise MemoryError("cannot allocate nursery") + return nursery + + def allocate_nursery(self): + debug_start("gc-set-nursery-size") + debug_print("nursery size:", self.nursery_size) + self.nursery = self._alloc_nursery() + # the current position in the nursery: + self.nursery_free = self.nursery + # the end of the nursery: + self.nursery_top = self.nursery + self.nursery_size + self.nursery_real_top = self.nursery_top + # initialize the threshold + self.min_heap_size = max(self.min_heap_size, self.nursery_size * + self.major_collection_threshold) + # the following two values are usually equal, but during raw mallocs + # of arrays, next_major_collection_threshold is decremented to make + # the next major collection arrive earlier. + # See translator/c/test/test_newgc, test_nongc_attached_to_gc + self.next_major_collection_initial = self.min_heap_size + self.next_major_collection_threshold = self.min_heap_size + self.set_major_threshold_from(0.0) + ll_assert(self.extra_threshold == 0, "extra_threshold set too early") + self.initial_cleanup = self.nursery_size + debug_stop("gc-set-nursery-size") + + + def set_major_threshold_from(self, threshold, reserving_size=0): + # Set the next_major_collection_threshold. + threshold_max = (self.next_major_collection_initial * + self.growth_rate_max) + if threshold > threshold_max: + threshold = threshold_max + # + threshold += reserving_size + if threshold < self.min_heap_size: + threshold = self.min_heap_size + # + if self.max_heap_size > 0.0 and threshold > self.max_heap_size: + threshold = self.max_heap_size + bounded = True + else: + bounded = False + # + self.next_major_collection_initial = threshold + self.next_major_collection_threshold = threshold + return bounded + + + def post_setup(self): + # set up extra stuff for PYPY_GC_DEBUG. + MovingGCBase.post_setup(self) + if self.DEBUG and llarena.has_protect: + # gc debug mode: allocate 23 nurseries instead of just 1, + # and use them alternatively, while mprotect()ing the unused + # ones to detect invalid access. + debug_start("gc-debug") + self.debug_rotating_nurseries = lltype.malloc( + NURSARRAY, 22, flavor='raw', track_allocation=False) + i = 0 + while i < 22: + nurs = self._alloc_nursery() + llarena.arena_protect(nurs, self._nursery_memory_size(), True) + self.debug_rotating_nurseries[i] = nurs + i += 1 + debug_print("allocated", len(self.debug_rotating_nurseries), + "extra nurseries") + debug_stop("gc-debug") + + def debug_rotate_nursery(self): + if self.debug_rotating_nurseries: + debug_start("gc-debug") + oldnurs = self.nursery + llarena.arena_protect(oldnurs, self._nursery_memory_size(), True) + # + newnurs = self.debug_rotating_nurseries[0] + i = 0 + while i < len(self.debug_rotating_nurseries) - 1: + self.debug_rotating_nurseries[i] = ( + self.debug_rotating_nurseries[i + 1]) + i += 1 + self.debug_rotating_nurseries[i] = oldnurs + # + llarena.arena_protect(newnurs, self._nursery_memory_size(), False) + self.nursery = newnurs + self.nursery_top = self.nursery + self.initial_cleanup + self.nursery_real_top = self.nursery + self.nursery_size + debug_print("switching from nursery", oldnurs, + "to nursery", self.nursery, + "size", self.nursery_size) + debug_stop("gc-debug") + + + def malloc_fixedsize_clear(self, typeid, size, + needs_finalizer=False, + is_finalizer_light=False, + contains_weakptr=False): + size_gc_header = self.gcheaderbuilder.size_gc_header + totalsize = size_gc_header + size + rawtotalsize = raw_malloc_usage(totalsize) + # + # If the object needs a finalizer, ask for a rawmalloc. + # The following check should be constant-folded. + if needs_finalizer and not is_finalizer_light: + ll_assert(not contains_weakptr, + "'needs_finalizer' and 'contains_weakptr' both specified") + obj = self.external_malloc(typeid, 0, can_make_young=False) + self.objects_with_finalizers.append(obj) + # + # If totalsize is greater than nonlarge_max (which should never be + # the case in practice), ask for a rawmalloc. The following check + # should be constant-folded. + elif rawtotalsize > self.nonlarge_max: + ll_assert(not contains_weakptr, + "'contains_weakptr' specified for a large object") + obj = self.external_malloc(typeid, 0) + # + else: + # If totalsize is smaller than minimal_size_in_nursery, round it + # up. The following check should also be constant-folded. + min_size = raw_malloc_usage(self.minimal_size_in_nursery) + if rawtotalsize < min_size: + totalsize = rawtotalsize = min_size + # + # Get the memory from the nursery. If there is not enough space + # there, do a collect first. + result = self.nursery_free + self.nursery_free = result + totalsize + if self.nursery_free > self.nursery_top: + result = self.collect_and_reserve(result, totalsize) + # + # Build the object. + llarena.arena_reserve(result, totalsize) + obj = result + size_gc_header + if is_finalizer_light: + self.young_objects_with_light_finalizers.append(obj) + self.init_gc_object(result, typeid, flags=0) + # + # If it is a weakref, record it (check constant-folded). + if contains_weakptr: + self.young_objects_with_weakrefs.append(obj) + # + return llmemory.cast_adr_to_ptr(obj, llmemory.GCREF) + + + def malloc_varsize_clear(self, typeid, length, size, itemsize, + offset_to_length): + size_gc_header = self.gcheaderbuilder.size_gc_header + nonvarsize = size_gc_header + size + # + # Compute the maximal length that makes the object still + # below 'nonlarge_max'. All the following logic is usually + # constant-folded because self.nonlarge_max, size and itemsize + # are all constants (the arguments are constant due to + # inlining). + maxsize = self.nonlarge_max - raw_malloc_usage(nonvarsize) + if maxsize < 0: + toobig = r_uint(0) # the nonvarsize alone is too big + elif raw_malloc_usage(itemsize): + toobig = r_uint(maxsize // raw_malloc_usage(itemsize)) + 1 + else: + toobig = r_uint(sys.maxint) + 1 + + if r_uint(length) >= r_uint(toobig): + # + # If the total size of the object would be larger than + # 'nonlarge_max', then allocate it externally. We also + # go there if 'length' is actually negative. + obj = self.external_malloc(typeid, length) + # + else: + # With the above checks we know now that totalsize cannot be more + # than 'nonlarge_max'; in particular, the + and * cannot overflow. + totalsize = nonvarsize + itemsize * length + totalsize = llarena.round_up_for_allocation(totalsize) + # + # 'totalsize' should contain at least the GC header and + # the length word, so it should never be smaller than + # 'minimal_size_in_nursery' + ll_assert(raw_malloc_usage(totalsize) >= + raw_malloc_usage(self.minimal_size_in_nursery), + "malloc_varsize_clear(): totalsize < minimalsize") + # + # Get the memory from the nursery. If there is not enough space + # there, do a collect first. + result = self.nursery_free + self.nursery_free = result + totalsize + if self.nursery_free > self.nursery_top: + result = self.collect_and_reserve(result, totalsize) + # + # Build the object. + llarena.arena_reserve(result, totalsize) + self.init_gc_object(result, typeid, flags=0) + # + # Set the length and return the object. + obj = result + size_gc_header + (obj + offset_to_length).signed[0] = length + # + return llmemory.cast_adr_to_ptr(obj, llmemory.GCREF) + + + def collect(self, gen=1): + """Do a minor (gen=0) or major (gen>0) collection.""" + self.minor_collection() + if gen > 0: + self.major_collection() + + def move_nursery_top(self, totalsize): + size = self.nursery_cleanup + ll_assert(self.nursery_real_top - self.nursery_top >= size, + "nursery_cleanup not a divisor of nursery_size - initial_cleanup") + ll_assert(llmemory.raw_malloc_usage(totalsize) <= size, + "totalsize > nursery_cleanup") + llarena.arena_reset(self.nursery_top, size, 2) + self.nursery_top += size + move_nursery_top._always_inline_ = True + + def collect_and_reserve(self, prev_result, totalsize): + """To call when nursery_free overflows nursery_top. + First check if the nursery_top is the real top, otherwise we + can just move the top of one cleanup and continue + + Do a minor collection, and possibly also a major collection, + and finally reserve 'totalsize' bytes at the start of the + now-empty nursery. + """ + if self.nursery_top < self.nursery_real_top: + self.move_nursery_top(totalsize) + return prev_result + self.minor_collection() + # + if self.get_total_memory_used() > self.next_major_collection_threshold: + self.major_collection() + # + # The nursery might not be empty now, because of + # execute_finalizers(). If it is almost full again, + # we need to fix it with another call to minor_collection(). + if self.nursery_free + totalsize > self.nursery_top: + # + if self.nursery_free + totalsize > self.nursery_real_top: + self.minor_collection() + # then the nursery is empty + else: + # we just need to clean up a bit more of the nursery + self.move_nursery_top(totalsize) + # + result = self.nursery_free + self.nursery_free = result + totalsize + ll_assert(self.nursery_free <= self.nursery_top, "nursery overflow") + # + if self.debug_tiny_nursery >= 0: # for debugging + if self.nursery_top - self.nursery_free > self.debug_tiny_nursery: + self.nursery_free = self.nursery_top - self.debug_tiny_nursery + # + return result + collect_and_reserve._dont_inline_ = True + + + def external_malloc(self, typeid, length, can_make_young=True): + """Allocate a large object using the ArenaCollection or + raw_malloc(), possibly as an object with card marking enabled, + if it has gc pointers in its var-sized part. 'length' should be + specified as 0 if the object is not varsized. The returned + object is fully initialized and zero-filled.""" + # + # Here we really need a valid 'typeid', not 0 (as the JIT might + # try to send us if there is still a bug). + ll_assert(bool(self.combine(typeid, 0)), + "external_malloc: typeid == 0") + # + # Compute the total size, carefully checking for overflows. + size_gc_header = self.gcheaderbuilder.size_gc_header + nonvarsize = size_gc_header + self.fixed_size(typeid) + if length == 0: + # this includes the case of fixed-size objects, for which we + # should not even ask for the varsize_item_sizes(). + totalsize = nonvarsize + elif length > 0: + # var-sized allocation with at least one item + itemsize = self.varsize_item_sizes(typeid) + try: + varsize = ovfcheck(itemsize * length) + totalsize = ovfcheck(nonvarsize + varsize) + except OverflowError: + raise MemoryError + else: + # negative length! This likely comes from an overflow + # earlier. We will just raise MemoryError here. + raise MemoryError + # + # If somebody calls this function a lot, we must eventually + # force a full collection. + if (float(self.get_total_memory_used()) + raw_malloc_usage(totalsize) > + self.next_major_collection_threshold): + self.minor_collection() + self.major_collection(raw_malloc_usage(totalsize)) + # + # Check if the object would fit in the ArenaCollection. + if raw_malloc_usage(totalsize) <= self.small_request_threshold: + # + # Yes. Round up 'totalsize' (it cannot overflow and it + # must remain <= self.small_request_threshold.) + totalsize = llarena.round_up_for_allocation(totalsize) + ll_assert(raw_malloc_usage(totalsize) <= + self.small_request_threshold, + "rounding up made totalsize > small_request_threshold") + # + # Allocate from the ArenaCollection and clear the memory returned. + result = self.ac.malloc(totalsize) + llmemory.raw_memclear(result, totalsize) + # + # An object allocated from ArenaCollection is always old, even + # if 'can_make_young'. The interesting case of 'can_make_young' + # is for large objects, bigger than the 'large_objects' threshold, + # which are raw-malloced but still young. + extra_flags = GCFLAG_TRACK_YOUNG_PTRS + # + else: + # No, so proceed to allocate it externally with raw_malloc(). + # Check if we need to introduce the card marker bits area. + if (self.card_page_indices <= 0 # <- this check is constant-folded + or not self.has_gcptr_in_varsize(typeid) or + raw_malloc_usage(totalsize) <= self.nonlarge_max): + # + # In these cases, we don't want a card marker bits area. + # This case also includes all fixed-size objects. + cardheadersize = 0 + extra_flags = 0 + # + else: + # Reserve N extra words containing card bits before the object. + extra_words = self.card_marking_words_for_length(length) + cardheadersize = WORD * extra_words + extra_flags = GCFLAG_HAS_CARDS | GCFLAG_TRACK_YOUNG_PTRS + # if 'can_make_young', then we also immediately set + # GCFLAG_CARDS_SET, but without adding the object to + # 'old_objects_with_cards_set'. In this way it should + # never be added to that list as long as it is young. + if can_make_young: + extra_flags |= GCFLAG_CARDS_SET + # + # Detect very rare cases of overflows + if raw_malloc_usage(totalsize) > (sys.maxint - (WORD-1) + - cardheadersize): + raise MemoryError("rare case of overflow") + # + # Now we know that the following computations cannot overflow. + # Note that round_up_for_allocation() is also needed to get the + # correct number added to 'rawmalloced_total_size'. + allocsize = (cardheadersize + raw_malloc_usage( + llarena.round_up_for_allocation(totalsize))) + # + # Allocate the object using arena_malloc(), which we assume here + # is just the same as raw_malloc(), but allows the extra + # flexibility of saying that we have extra words in the header. + # The memory returned is cleared by a raw_memclear(). + arena = llarena.arena_malloc(allocsize, 2) + if not arena: + raise MemoryError("cannot allocate large object") + # + # Reserve the card mark bits as a list of single bytes + # (the loop is empty in C). + i = 0 + while i < cardheadersize: + llarena.arena_reserve(arena + i, llmemory.sizeof(lltype.Char)) + i += 1 + # + # Reserve the actual object. (This is also a no-op in C). + result = arena + cardheadersize + llarena.arena_reserve(result, totalsize) + # + # Record the newly allocated object and its full malloced size. + # The object is young or old depending on the argument. + self.rawmalloced_total_size += r_uint(allocsize) + if can_make_young: + if not self.young_rawmalloced_objects: + self.young_rawmalloced_objects = self.AddressDict() + self.young_rawmalloced_objects.add(result + size_gc_header) + else: + self.old_rawmalloced_objects.append(result + size_gc_header) + extra_flags |= GCFLAG_TRACK_YOUNG_PTRS + # + # Common code to fill the header and length of the object. + self.init_gc_object(result, typeid, extra_flags) + if self.is_varsize(typeid): + offset_to_length = self.varsize_offset_to_length(typeid) + (result + size_gc_header + offset_to_length).signed[0] = length + return result + size_gc_header + + + # ---------- + # Other functions in the GC API + + def set_max_heap_size(self, size): + self.max_heap_size = float(size) + if self.max_heap_size > 0.0: + if self.max_heap_size < self.next_major_collection_initial: + self.next_major_collection_initial = self.max_heap_size + if self.max_heap_size < self.next_major_collection_threshold: + self.next_major_collection_threshold = self.max_heap_size + + def raw_malloc_memory_pressure(self, sizehint): + self.next_major_collection_threshold -= sizehint + if self.next_major_collection_threshold < 0: + # cannot trigger a full collection now, but we can ensure + # that one will occur very soon + self.nursery_top = self.nursery_real_top + self.nursery_free = self.nursery_real_top + + def can_malloc_nonmovable(self): + return True + + def can_optimize_clean_setarrayitems(self): + if self.card_page_indices > 0: + return False + return MovingGCBase.can_optimize_clean_setarrayitems(self) + + def can_move(self, obj): + """Overrides the parent can_move().""" + return self.is_in_nursery(obj) + + + def shrink_array(self, obj, smallerlength): + # + # Only objects in the nursery can be "resized". Resizing them + # means recording that they have a smaller size, so that when + # moved out of the nursery, they will consume less memory. + # In particular, an array with GCFLAG_HAS_CARDS is never resized. + # Also, a nursery object with GCFLAG_HAS_SHADOW is not resized + # either, as this would potentially loose part of the memory in + # the already-allocated shadow. + if not self.is_in_nursery(obj): + return False + if self.header(obj).tid & GCFLAG_HAS_SHADOW: + return False + # + size_gc_header = self.gcheaderbuilder.size_gc_header + typeid = self.get_type_id(obj) + totalsmallersize = ( + size_gc_header + self.fixed_size(typeid) + + self.varsize_item_sizes(typeid) * smallerlength) + llarena.arena_shrink_obj(obj - size_gc_header, totalsmallersize) + # + offset_to_length = self.varsize_offset_to_length(typeid) + (obj + offset_to_length).signed[0] = smallerlength + return True + + + def malloc_fixedsize_nonmovable(self, typeid): + obj = self.external_malloc(typeid, 0) + return llmemory.cast_adr_to_ptr(obj, llmemory.GCREF) + + def malloc_varsize_nonmovable(self, typeid, length): + obj = self.external_malloc(typeid, length) + return llmemory.cast_adr_to_ptr(obj, llmemory.GCREF) + + def malloc_nonmovable(self, typeid, length, zero): + # helper for testing, same as GCBase.malloc + return self.external_malloc(typeid, length or 0) # None -> 0 + + + # ---------- + # Simple helpers + + def get_type_id(self, obj): + tid = self.header(obj).tid + return llop.extract_ushort(llgroup.HALFWORD, tid) + + def combine(self, typeid16, flags): + return llop.combine_ushort(lltype.Signed, typeid16, flags) + + def init_gc_object(self, addr, typeid16, flags=0): + # The default 'flags' is zero. The flags GCFLAG_NO_xxx_PTRS + # have been chosen to allow 'flags' to be zero in the common + # case (hence the 'NO' in their name). + hdr = llmemory.cast_adr_to_ptr(addr, lltype.Ptr(self.HDR)) + hdr.tid = self.combine(typeid16, flags) + + def init_gc_object_immortal(self, addr, typeid16, flags=0): + # For prebuilt GC objects, the flags must contain + # GCFLAG_NO_xxx_PTRS, at least initially. + flags |= GCFLAG_NO_HEAP_PTRS | GCFLAG_TRACK_YOUNG_PTRS + self.init_gc_object(addr, typeid16, flags) + + def is_in_nursery(self, addr): + ll_assert(llmemory.cast_adr_to_int(addr) & 1 == 0, + "odd-valued (i.e. tagged) pointer unexpected here") + return self.nursery <= addr < self.nursery_real_top + + def appears_to_be_young(self, addr): + # "is a valid addr to a young object?" + # but it's ok to occasionally return True accidentally. + # Maybe the best implementation would be a bloom filter + # of some kind instead of the dictionary lookup that is + # sometimes done below. But the expected common answer + # is "Yes" because addr points to the nursery, so it may + # not be useful to optimize the other case too much. + # + # First, if 'addr' appears to be a pointer to some place within + # the nursery, return True + if not self.translated_to_c: + # When non-translated, filter out tagged pointers explicitly. + # When translated, it may occasionally give a wrong answer + # of True if 'addr' is a tagged pointer with just the wrong value. + if not self.is_valid_gc_object(addr): + return False + + if self.nursery <= addr < self.nursery_real_top: + return True # addr is in the nursery + # + # Else, it may be in the set 'young_rawmalloced_objects' + return (bool(self.young_rawmalloced_objects) and + self.young_rawmalloced_objects.contains(addr)) + appears_to_be_young._always_inline_ = True + + def debug_is_old_object(self, addr): + return (self.is_valid_gc_object(addr) + and not self.appears_to_be_young(addr)) + + def is_forwarded(self, obj): + """Returns True if the nursery obj is marked as forwarded. + Implemented a bit obscurely by checking an unrelated flag + that can never be set on a young object -- except if tid == -42. + """ + assert self.is_in_nursery(obj) + tid = self.header(obj).tid + result = (tid & GCFLAG_FINALIZATION_ORDERING != 0) + if result: + ll_assert(tid == -42, "bogus header for young obj") + else: + ll_assert(bool(tid), "bogus header (1)") + ll_assert(tid & ~TID_MASK == 0, "bogus header (2)") + return result + + def get_forwarding_address(self, obj): + return llmemory.cast_adr_to_ptr(obj, FORWARDSTUBPTR).forw + + def get_possibly_forwarded_type_id(self, obj): + if self.is_in_nursery(obj) and self.is_forwarded(obj): + obj = self.get_forwarding_address(obj) + return self.get_type_id(obj) + + def get_total_memory_used(self): + """Return the total memory used, not counting any object in the + nursery: only objects in the ArenaCollection or raw-malloced. + """ + return self.ac.total_memory_used + self.rawmalloced_total_size + + def card_marking_words_for_length(self, length): + # --- Unoptimized version: + #num_bits = ((length-1) >> self.card_page_shift) + 1 + #return (num_bits + (LONG_BIT - 1)) >> LONG_BIT_SHIFT + # --- Optimized version: + return intmask( + ((r_uint(length) + r_uint((LONG_BIT << self.card_page_shift) - 1)) >> + (self.card_page_shift + LONG_BIT_SHIFT))) + + def card_marking_bytes_for_length(self, length): + # --- Unoptimized version: + #num_bits = ((length-1) >> self.card_page_shift) + 1 + #return (num_bits + 7) >> 3 + # --- Optimized version: + return intmask( + ((r_uint(length) + r_uint((8 << self.card_page_shift) - 1)) >> + (self.card_page_shift + 3))) + + def debug_check_consistency(self): + if self.DEBUG: + ll_assert(not self.young_rawmalloced_objects, + "young raw-malloced objects in a major collection") + ll_assert(not self.young_objects_with_weakrefs.non_empty(), + "young objects with weakrefs in a major collection") + MovingGCBase.debug_check_consistency(self) + + def debug_check_object(self, obj): + # after a minor or major collection, no object should be in the nursery + ll_assert(not self.is_in_nursery(obj), + "object in nursery after collection") + # similarily, all objects should have this flag, except if they + # don't have any GC pointer + typeid = self.get_type_id(obj) + if self.has_gcptr(typeid): + ll_assert(self.header(obj).tid & GCFLAG_TRACK_YOUNG_PTRS != 0, + "missing GCFLAG_TRACK_YOUNG_PTRS") + # the GCFLAG_VISITED should not be set between collections + ll_assert(self.header(obj).tid & GCFLAG_VISITED == 0, + "unexpected GCFLAG_VISITED") + # the GCFLAG_FINALIZATION_ORDERING should not be set between coll. + ll_assert(self.header(obj).tid & GCFLAG_FINALIZATION_ORDERING == 0, + "unexpected GCFLAG_FINALIZATION_ORDERING") + # the GCFLAG_CARDS_SET should not be set between collections + ll_assert(self.header(obj).tid & GCFLAG_CARDS_SET == 0, + "unexpected GCFLAG_CARDS_SET") + # if the GCFLAG_HAS_CARDS is set, check that all bits are zero now + if self.header(obj).tid & GCFLAG_HAS_CARDS: + if self.card_page_indices <= 0: + ll_assert(False, "GCFLAG_HAS_CARDS but not using card marking") + return + typeid = self.get_type_id(obj) + ll_assert(self.has_gcptr_in_varsize(typeid), + "GCFLAG_HAS_CARDS but not has_gcptr_in_varsize") + ll_assert(self.header(obj).tid & GCFLAG_NO_HEAP_PTRS == 0, + "GCFLAG_HAS_CARDS && GCFLAG_NO_HEAP_PTRS") + offset_to_length = self.varsize_offset_to_length(typeid) + length = (obj + offset_to_length).signed[0] + extra_words = self.card_marking_words_for_length(length) + # + size_gc_header = self.gcheaderbuilder.size_gc_header + p = llarena.getfakearenaaddress(obj - size_gc_header) + i = extra_words * WORD + while i > 0: + p -= 1 + ll_assert(p.char[0] == '\x00', + "the card marker bits are not cleared") + i -= 1 + + # ---------- + # Write barrier + + # for the JIT: a minimal description of the write_barrier() method + # (the JIT assumes it is of the shape + # "if addr_struct.int0 & JIT_WB_IF_FLAG: remember_young_pointer()") + JIT_WB_IF_FLAG = GCFLAG_TRACK_YOUNG_PTRS + + # for the JIT to generate custom code corresponding to the array + # write barrier for the simplest case of cards. If JIT_CARDS_SET + # is already set on an object, it will execute code like this: + # MOV eax, index + # SHR eax, JIT_WB_CARD_PAGE_SHIFT + # XOR eax, -8 + # BTS [object], eax + if TRANSLATION_PARAMS['card_page_indices'] > 0: + JIT_WB_CARDS_SET = GCFLAG_CARDS_SET + JIT_WB_CARD_PAGE_SHIFT = 1 + while ((1 << JIT_WB_CARD_PAGE_SHIFT) != + TRANSLATION_PARAMS['card_page_indices']): + JIT_WB_CARD_PAGE_SHIFT += 1 + + @classmethod + def JIT_max_size_of_young_obj(cls): + return cls.TRANSLATION_PARAMS['large_object'] + + @classmethod + def JIT_minimal_size_in_nursery(cls): + return cls.minimal_size_in_nursery + + def write_barrier(self, newvalue, addr_struct): + if self.header(addr_struct).tid & GCFLAG_TRACK_YOUNG_PTRS: + self.remember_young_pointer(addr_struct, newvalue) + + def write_barrier_from_array(self, newvalue, addr_array, index): + if self.header(addr_array).tid & GCFLAG_TRACK_YOUNG_PTRS: + if self.card_page_indices > 0: # <- constant-folded + self.remember_young_pointer_from_array2(addr_array, index) + else: + self.remember_young_pointer(addr_array, newvalue) + + def _init_writebarrier_logic(self): + DEBUG = self.DEBUG + # The purpose of attaching remember_young_pointer to the instance + # instead of keeping it as a regular method is to + # make the code in write_barrier() marginally smaller + # (which is important because it is inlined *everywhere*). + def remember_young_pointer(addr_struct, newvalue): + # 'addr_struct' is the address of the object in which we write. + # 'newvalue' is the address that we are going to write in there. + # We know that 'addr_struct' has GCFLAG_TRACK_YOUNG_PTRS so far. + # + if DEBUG: # note: PYPY_GC_DEBUG=1 does not enable this + ll_assert(self.debug_is_old_object(addr_struct) or + self.header(addr_struct).tid & GCFLAG_HAS_CARDS != 0, + "young object with GCFLAG_TRACK_YOUNG_PTRS and no cards") + # + # If it seems that what we are writing is a pointer to a young obj + # (as checked with appears_to_be_young()), then we need + # to remove the flag GCFLAG_TRACK_YOUNG_PTRS and add the object + # to the list 'old_objects_pointing_to_young'. We know that + # 'addr_struct' cannot be in the nursery, because nursery objects + # never have the flag GCFLAG_TRACK_YOUNG_PTRS to start with. + objhdr = self.header(addr_struct) + if self.appears_to_be_young(newvalue): + self.old_objects_pointing_to_young.append(addr_struct) + objhdr.tid &= ~GCFLAG_TRACK_YOUNG_PTRS + # + # Second part: if 'addr_struct' is actually a prebuilt GC + # object and it's the first time we see a write to it, we + # add it to the list 'prebuilt_root_objects'. Note that we + # do it even in the (rare?) case of 'addr' being NULL or another + # prebuilt object, to simplify code. + if objhdr.tid & GCFLAG_NO_HEAP_PTRS: + objhdr.tid &= ~GCFLAG_NO_HEAP_PTRS + self.prebuilt_root_objects.append(addr_struct) + + remember_young_pointer._dont_inline_ = True + self.remember_young_pointer = remember_young_pointer + # + def jit_remember_young_pointer(addr_struct): + # minimal version of the above, with just one argument, + # called by the JIT when GCFLAG_TRACK_YOUNG_PTRS is set + self.old_objects_pointing_to_young.append(addr_struct) + objhdr = self.header(addr_struct) + objhdr.tid &= ~GCFLAG_TRACK_YOUNG_PTRS + if objhdr.tid & GCFLAG_NO_HEAP_PTRS: + objhdr.tid &= ~GCFLAG_NO_HEAP_PTRS + self.prebuilt_root_objects.append(addr_struct) + self.jit_remember_young_pointer = jit_remember_young_pointer + # + if self.card_page_indices > 0: + self._init_writebarrier_with_card_marker() + + + def _init_writebarrier_with_card_marker(self): + DEBUG = self.DEBUG + def remember_young_pointer_from_array2(addr_array, index): + # 'addr_array' is the address of the object in which we write, + # which must have an array part; 'index' is the index of the + # item that is (or contains) the pointer that we write. + # We know that 'addr_array' has GCFLAG_TRACK_YOUNG_PTRS so far. + # + objhdr = self.header(addr_array) + if objhdr.tid & GCFLAG_HAS_CARDS == 0: + # + if DEBUG: # note: PYPY_GC_DEBUG=1 does not enable this + ll_assert(self.debug_is_old_object(addr_array), + "young array with no card but GCFLAG_TRACK_YOUNG_PTRS") + # + # no cards, use default logic. Mostly copied from above. + self.old_objects_pointing_to_young.append(addr_array) + objhdr.tid &= ~GCFLAG_TRACK_YOUNG_PTRS + if objhdr.tid & GCFLAG_NO_HEAP_PTRS: + objhdr.tid &= ~GCFLAG_NO_HEAP_PTRS + self.prebuilt_root_objects.append(addr_array) + return + # + # 'addr_array' is a raw_malloc'ed array with card markers + # in front. Compute the index of the bit to set: + bitindex = index >> self.card_page_shift + byteindex = bitindex >> 3 + bitmask = 1 << (bitindex & 7) + # + # If the bit is already set, leave now. + addr_byte = self.get_card(addr_array, byteindex) + byte = ord(addr_byte.char[0]) + if byte & bitmask: + return + # + # We set the flag (even if the newly written address does not + # actually point to the nursery, which seems to be ok -- actually + # it seems more important that remember_young_pointer_from_array2() + # does not take 3 arguments). + addr_byte.char[0] = chr(byte | bitmask) + # + if objhdr.tid & GCFLAG_CARDS_SET == 0: + self.old_objects_with_cards_set.append(addr_array) + objhdr.tid |= GCFLAG_CARDS_SET + + remember_young_pointer_from_array2._dont_inline_ = True + assert self.card_page_indices > 0 + self.remember_young_pointer_from_array2 = ( + remember_young_pointer_from_array2) + + def jit_remember_young_pointer_from_array(addr_array): + # minimal version of the above, with just one argument, + # called by the JIT when GCFLAG_TRACK_YOUNG_PTRS is set + # but GCFLAG_CARDS_SET is cleared. This tries to set + # GCFLAG_CARDS_SET if possible; otherwise, it falls back + # to jit_remember_young_pointer(). + objhdr = self.header(addr_array) + if objhdr.tid & GCFLAG_HAS_CARDS: + self.old_objects_with_cards_set.append(addr_array) + objhdr.tid |= GCFLAG_CARDS_SET + else: + self.jit_remember_young_pointer(addr_array) + + self.jit_remember_young_pointer_from_array = ( + jit_remember_young_pointer_from_array) + + def get_card(self, obj, byteindex): + size_gc_header = self.gcheaderbuilder.size_gc_header + addr_byte = obj - size_gc_header + return llarena.getfakearenaaddress(addr_byte) + (~byteindex) + + + def assume_young_pointers(self, addr_struct): + """Called occasionally by the JIT to mean ``assume that 'addr_struct' + may now contain young pointers.'' + """ + objhdr = self.header(addr_struct) + if objhdr.tid & GCFLAG_TRACK_YOUNG_PTRS: + self.old_objects_pointing_to_young.append(addr_struct) + objhdr.tid &= ~GCFLAG_TRACK_YOUNG_PTRS + # + if objhdr.tid & GCFLAG_NO_HEAP_PTRS: + objhdr.tid &= ~GCFLAG_NO_HEAP_PTRS + self.prebuilt_root_objects.append(addr_struct) + + def writebarrier_before_copy(self, source_addr, dest_addr, + source_start, dest_start, length): + """ This has the same effect as calling writebarrier over + each element in dest copied from source, except it might reset + one of the following flags a bit too eagerly, which means we'll have + a bit more objects to track, but being on the safe side. + """ + source_hdr = self.header(source_addr) + dest_hdr = self.header(dest_addr) + if dest_hdr.tid & GCFLAG_TRACK_YOUNG_PTRS == 0: + return True + # ^^^ a fast path of write-barrier + # + if source_hdr.tid & GCFLAG_HAS_CARDS != 0: + # + if source_hdr.tid & GCFLAG_TRACK_YOUNG_PTRS == 0: + # The source object may have random young pointers. + # Return False to mean "do it manually in ll_arraycopy". + return False + # + if source_hdr.tid & GCFLAG_CARDS_SET == 0: + # The source object has no young pointers at all. Done. + return True + # + if dest_hdr.tid & GCFLAG_HAS_CARDS == 0: + # The dest object doesn't have cards. Do it manually. + return False + # + if source_start != 0 or dest_start != 0: + # Misaligned. Do it manually. + return False + # + self.manually_copy_card_bits(source_addr, dest_addr, length) + return True + # + if source_hdr.tid & GCFLAG_TRACK_YOUNG_PTRS == 0: + # there might be in source a pointer to a young object + self.old_objects_pointing_to_young.append(dest_addr) + dest_hdr.tid &= ~GCFLAG_TRACK_YOUNG_PTRS + # + if dest_hdr.tid & GCFLAG_NO_HEAP_PTRS: + if source_hdr.tid & GCFLAG_NO_HEAP_PTRS == 0: + dest_hdr.tid &= ~GCFLAG_NO_HEAP_PTRS + self.prebuilt_root_objects.append(dest_addr) + return True + + def manually_copy_card_bits(self, source_addr, dest_addr, length): + # manually copy the individual card marks from source to dest + bytes = self.card_marking_bytes_for_length(length) + # + anybyte = 0 + i = 0 + while i < bytes: + addr_srcbyte = self.get_card(source_addr, i) + addr_dstbyte = self.get_card(dest_addr, i) + byte = ord(addr_srcbyte.char[0]) + anybyte |= byte + addr_dstbyte.char[0] = chr(ord(addr_dstbyte.char[0]) | byte) + i += 1 + # + if anybyte: + dest_hdr = self.header(dest_addr) + if dest_hdr.tid & GCFLAG_CARDS_SET == 0: + self.old_objects_with_cards_set.append(dest_addr) + dest_hdr.tid |= GCFLAG_CARDS_SET + + # ---------- + # Nursery collection + + def minor_collection(self): + """Perform a minor collection: find the objects from the nursery + that remain alive and move them out.""" + # + debug_start("gc-minor") + # + # Before everything else, remove from 'old_objects_pointing_to_young' + # the young arrays. + if self.young_rawmalloced_objects: + self.remove_young_arrays_from_old_objects_pointing_to_young() + # + # First, find the roots that point to young objects. All nursery + # objects found are copied out of the nursery, and the occasional + # young raw-malloced object is flagged with GCFLAG_VISITED. + # Note that during this step, we ignore references to further + # young objects; only objects directly referenced by roots + # are copied out or flagged. They are also added to the list + # 'old_objects_pointing_to_young'. + self.collect_roots_in_nursery() + # + while True: + # If we are using card marking, do a partial trace of the arrays + # that are flagged with GCFLAG_CARDS_SET. + if self.card_page_indices > 0: + self.collect_cardrefs_to_nursery() + # + # Now trace objects from 'old_objects_pointing_to_young'. + # All nursery objects they reference are copied out of the + # nursery, and again added to 'old_objects_pointing_to_young'. + # All young raw-malloced object found are flagged GCFLAG_VISITED. + # We proceed until 'old_objects_pointing_to_young' is empty. + self.collect_oldrefs_to_nursery() + # + # We have to loop back if collect_oldrefs_to_nursery caused + # new objects to show up in old_objects_with_cards_set + if self.card_page_indices > 0: + if self.old_objects_with_cards_set.non_empty(): + continue + break + # + # Now all live nursery objects should be out. Update the young + # weakrefs' targets. + if self.young_objects_with_weakrefs.non_empty(): + self.invalidate_young_weakrefs() + if self.young_objects_with_light_finalizers.non_empty(): + self.deal_with_young_objects_with_finalizers() + # + # Clear this mapping. + if self.nursery_objects_shadows.length() > 0: + self.nursery_objects_shadows.clear() + # + # Walk the list of young raw-malloced objects, and either free + # them or make them old. + if self.young_rawmalloced_objects: + self.free_young_rawmalloced_objects() + # + # All live nursery objects are out, and the rest dies. Fill + # the nursery up to the cleanup point with zeros + llarena.arena_reset(self.nursery, self.nursery_size, 0) + llarena.arena_reset(self.nursery, self.initial_cleanup, 2) + self.debug_rotate_nursery() + self.nursery_free = self.nursery + self.nursery_top = self.nursery + self.initial_cleanup + self.nursery_real_top = self.nursery + self.nursery_size + # + debug_print("minor collect, total memory used:", + self.get_total_memory_used()) + if self.DEBUG >= 2: + self.debug_check_consistency() # expensive! + debug_stop("gc-minor") + + + def collect_roots_in_nursery(self): + # we don't need to trace prebuilt GcStructs during a minor collect: + # if a prebuilt GcStruct contains a pointer to a young object, + # then the write_barrier must have ensured that the prebuilt + # GcStruct is in the list self.old_objects_pointing_to_young. + debug_start("gc-minor-walkroots") + self.root_walker.walk_roots( + IncrementalMiniMarkGC._trace_drag_out1, # stack roots + IncrementalMiniMarkGC._trace_drag_out1, # static in prebuilt non-gc + None) # static in prebuilt gc + debug_stop("gc-minor-walkroots") + + def collect_cardrefs_to_nursery(self): + size_gc_header = self.gcheaderbuilder.size_gc_header + oldlist = self.old_objects_with_cards_set + while oldlist.non_empty(): + obj = oldlist.pop() + # + # Remove the GCFLAG_CARDS_SET flag. + ll_assert(self.header(obj).tid & GCFLAG_CARDS_SET != 0, + "!GCFLAG_CARDS_SET but object in 'old_objects_with_cards_set'") + self.header(obj).tid &= ~GCFLAG_CARDS_SET + # + # Get the number of card marker bytes in the header. + typeid = self.get_type_id(obj) + offset_to_length = self.varsize_offset_to_length(typeid) + length = (obj + offset_to_length).signed[0] + bytes = self.card_marking_bytes_for_length(length) + p = llarena.getfakearenaaddress(obj - size_gc_header) + # + # If the object doesn't have GCFLAG_TRACK_YOUNG_PTRS, then it + # means that it is in 'old_objects_pointing_to_young' and + # will be fully traced by collect_oldrefs_to_nursery() just + # afterwards. + if self.header(obj).tid & GCFLAG_TRACK_YOUNG_PTRS == 0: + # + # In that case, we just have to reset all card bits. + while bytes > 0: + p -= 1 + p.char[0] = '\x00' + bytes -= 1 + # + else: + # Walk the bytes encoding the card marker bits, and for + # each bit set, call trace_and_drag_out_of_nursery_partial(). + interval_start = 0 + while bytes > 0: + p -= 1 + cardbyte = ord(p.char[0]) + p.char[0] = '\x00' # reset the bits + bytes -= 1 + next_byte_start = interval_start + 8*self.card_page_indices + # + while cardbyte != 0: + interval_stop = interval_start + self.card_page_indices + # + if cardbyte & 1: + if interval_stop > length: + interval_stop = length + ll_assert(cardbyte <= 1 and bytes == 0, + "premature end of object") + self.trace_and_drag_out_of_nursery_partial( + obj, interval_start, interval_stop) + # + interval_start = interval_stop + cardbyte >>= 1 + interval_start = next_byte_start + + + def collect_oldrefs_to_nursery(self): + # Follow the old_objects_pointing_to_young list and move the + # young objects they point to out of the nursery. + oldlist = self.old_objects_pointing_to_young + while oldlist.non_empty(): + obj = oldlist.pop() + # + # Check that the flags are correct: we must not have + # GCFLAG_TRACK_YOUNG_PTRS so far. + ll_assert(self.header(obj).tid & GCFLAG_TRACK_YOUNG_PTRS == 0, + "old_objects_pointing_to_young contains obj with " + "GCFLAG_TRACK_YOUNG_PTRS") + # + # Add the flag GCFLAG_TRACK_YOUNG_PTRS. All live objects should + # have this flag set after a nursery collection. + self.header(obj).tid |= GCFLAG_TRACK_YOUNG_PTRS + # + # Trace the 'obj' to replace pointers to nursery with pointers + # outside the nursery, possibly forcing nursery objects out + # and adding them to 'old_objects_pointing_to_young' as well. + self.trace_and_drag_out_of_nursery(obj) + + def trace_and_drag_out_of_nursery(self, obj): + """obj must not be in the nursery. This copies all the + young objects it references out of the nursery. + """ + self.trace(obj, self._trace_drag_out, None) + + def trace_and_drag_out_of_nursery_partial(self, obj, start, stop): + """Like trace_and_drag_out_of_nursery(), but limited to the array + indices in range(start, stop). + """ + ll_assert(start < stop, "empty or negative range " + "in trace_and_drag_out_of_nursery_partial()") + #print 'trace_partial:', start, stop, '\t', obj + self.trace_partial(obj, start, stop, self._trace_drag_out, None) + + + def _trace_drag_out1(self, root): + self._trace_drag_out(root, None) + + def _trace_drag_out(self, root, ignored): + obj = root.address[0] + #print '_trace_drag_out(%x: %r)' % (hash(obj.ptr._obj), obj) + # + # If 'obj' is not in the nursery, nothing to change -- expect + # that we must set GCFLAG_VISITED on young raw-malloced objects. + if not self.is_in_nursery(obj): + # cache usage trade-off: I think that it is a better idea to + # check if 'obj' is in young_rawmalloced_objects with an access + # to this (small) dictionary, rather than risk a lot of cache + # misses by reading a flag in the header of all the 'objs' that + # arrive here. + if (bool(self.young_rawmalloced_objects) + and self.young_rawmalloced_objects.contains(obj)): + self._visit_young_rawmalloced_object(obj) + return + # + size_gc_header = self.gcheaderbuilder.size_gc_header + if self.header(obj).tid & GCFLAG_HAS_SHADOW == 0: + # + # Common case: 'obj' was not already forwarded (otherwise + # tid == -42, containing all flags), and it doesn't have the + # HAS_SHADOW flag either. We must move it out of the nursery, + # into a new nonmovable location. + totalsize = size_gc_header + self.get_size(obj) + newhdr = self._malloc_out_of_nursery(totalsize) + # + elif self.is_forwarded(obj): + # + # 'obj' was already forwarded. Change the original reference + # to point to its forwarding address, and we're done. + root.address[0] = self.get_forwarding_address(obj) + return + # + else: + # First visit to an object that has already a shadow. + newobj = self.nursery_objects_shadows.get(obj) + ll_assert(newobj != NULL, "GCFLAG_HAS_SHADOW but no shadow found") + newhdr = newobj - size_gc_header + # + # Remove the flag GCFLAG_HAS_SHADOW, so that it doesn't get + # copied to the shadow itself. + self.header(obj).tid &= ~GCFLAG_HAS_SHADOW + # + totalsize = size_gc_header + self.get_size(obj) + # + # Copy it. Note that references to other objects in the + # nursery are kept unchanged in this step. + llmemory.raw_memcopy(obj - size_gc_header, newhdr, totalsize) + # + # Set the old object's tid to -42 (containing all flags) and + # replace the old object's content with the target address. + # A bit of no-ops to convince llarena that we are changing + # the layout, in non-translated versions. + typeid = self.get_type_id(obj) + obj = llarena.getfakearenaaddress(obj) + llarena.arena_reset(obj - size_gc_header, totalsize, 0) + llarena.arena_reserve(obj - size_gc_header, + size_gc_header + llmemory.sizeof(FORWARDSTUB)) + self.header(obj).tid = -42 + newobj = newhdr + size_gc_header + llmemory.cast_adr_to_ptr(obj, FORWARDSTUBPTR).forw = newobj + # + # Change the original pointer to this object. + root.address[0] = newobj + # + # Add the newobj to the list 'old_objects_pointing_to_young', + # because it can contain further pointers to other young objects. + # We will fix such references to point to the copy of the young + # objects when we walk 'old_objects_pointing_to_young'. + if self.has_gcptr(typeid): + # we only have to do it if we have any gcptrs + self.old_objects_pointing_to_young.append(newobj) + _trace_drag_out._always_inline_ = True + + def _visit_young_rawmalloced_object(self, obj): + # 'obj' points to a young, raw-malloced object. + # Any young rawmalloced object never seen by the code here + # will end up without GCFLAG_VISITED, and be freed at the + # end of the current minor collection. Note that there was + # a bug in which dying young arrays with card marks would + # still be scanned before being freed, keeping a lot of + # objects unnecessarily alive. + hdr = self.header(obj) + if hdr.tid & GCFLAG_VISITED: + return + hdr.tid |= GCFLAG_VISITED + # + # we just made 'obj' old, so we need to add it to the correct lists + added_somewhere = False + # + if hdr.tid & GCFLAG_TRACK_YOUNG_PTRS == 0: + self.old_objects_pointing_to_young.append(obj) + added_somewhere = True + # + if hdr.tid & GCFLAG_HAS_CARDS != 0: + ll_assert(hdr.tid & GCFLAG_CARDS_SET != 0, + "young array: GCFLAG_HAS_CARDS without GCFLAG_CARDS_SET") + self.old_objects_with_cards_set.append(obj) + added_somewhere = True + # + ll_assert(added_somewhere, "wrong flag combination on young array") + + + def _malloc_out_of_nursery(self, totalsize): + """Allocate non-movable memory for an object of the given + 'totalsize' that lives so far in the nursery.""" + if raw_malloc_usage(totalsize) <= self.small_request_threshold: + # most common path + return self.ac.malloc(totalsize) + else: + # for nursery objects that are not small + return self._malloc_out_of_nursery_nonsmall(totalsize) + _malloc_out_of_nursery._always_inline_ = True + + def _malloc_out_of_nursery_nonsmall(self, totalsize): + # 'totalsize' should be aligned. + ll_assert(raw_malloc_usage(totalsize) & (WORD-1) == 0, + "misaligned totalsize in _malloc_out_of_nursery_nonsmall") + # + arena = llarena.arena_malloc(raw_malloc_usage(totalsize), False) + if not arena: + raise MemoryError("cannot allocate object") + llarena.arena_reserve(arena, totalsize) + # + size_gc_header = self.gcheaderbuilder.size_gc_header + self.rawmalloced_total_size += r_uint(raw_malloc_usage(totalsize)) + self.old_rawmalloced_objects.append(arena + size_gc_header) + return arena + + def free_young_rawmalloced_objects(self): + self.young_rawmalloced_objects.foreach( + self._free_young_rawmalloced_obj, None) + self.young_rawmalloced_objects.delete() + self.young_rawmalloced_objects = self.null_address_dict() + + def _free_young_rawmalloced_obj(self, obj, ignored1, ignored2): + # If 'obj' has GCFLAG_VISITED, it was seen by _trace_drag_out + # and survives. Otherwise, it dies. + self.free_rawmalloced_object_if_unvisited(obj) + + def remove_young_arrays_from_old_objects_pointing_to_young(self): + old = self.old_objects_pointing_to_young + new = self.AddressStack() + while old.non_empty(): + obj = old.pop() + if not self.young_rawmalloced_objects.contains(obj): + new.append(obj) + # an extra copy, to avoid assignments to + # 'self.old_objects_pointing_to_young' + while new.non_empty(): + old.append(new.pop()) + new.delete() + + # ---------- + # Full collection + + def major_collection(self, reserving_size=0): + """Do a major collection. Only for when the nursery is empty.""" + # + debug_start("gc-collect") + debug_print() + debug_print(".----------- Full collection ------------------") + debug_print("| used before collection:") + debug_print("| in ArenaCollection: ", + self.ac.total_memory_used, "bytes") + debug_print("| raw_malloced: ", + self.rawmalloced_total_size, "bytes") + # + # Debugging checks + ll_assert(self.nursery_free == self.nursery, + "nursery not empty in major_collection()") + self.debug_check_consistency() + # + # Note that a major collection is non-moving. The goal is only to + # find and free some of the objects allocated by the ArenaCollection. + # We first visit all objects and toggle the flag GCFLAG_VISITED on + # them, starting from the roots. + self.objects_to_trace = self.AddressStack() + self.collect_roots() + self.visit_all_objects() + # + # Finalizer support: adds the flag GCFLAG_VISITED to all objects + # with a finalizer and all objects reachable from there (and also + # moves some objects from 'objects_with_finalizers' to + # 'run_finalizers'). + if self.objects_with_finalizers.non_empty(): + self.deal_with_objects_with_finalizers() + # + self.objects_to_trace.delete() + # + # Weakref support: clear the weak pointers to dying objects + if self.old_objects_with_weakrefs.non_empty(): + self.invalidate_old_weakrefs() + if self.old_objects_with_light_finalizers.non_empty(): + self.deal_with_old_objects_with_finalizers() + + # + # Walk all rawmalloced objects and free the ones that don't + # have the GCFLAG_VISITED flag. + self.free_unvisited_rawmalloc_objects() + # + # Ask the ArenaCollection to visit all objects. Free the ones + # that have not been visited above, and reset GCFLAG_VISITED on + # the others. + self.ac.mass_free(self._free_if_unvisited) + # + # We also need to reset the GCFLAG_VISITED on prebuilt GC objects. + self.prebuilt_root_objects.foreach(self._reset_gcflag_visited, None) + # + self.debug_check_consistency() + # + self.num_major_collects += 1 + debug_print("| used after collection:") + debug_print("| in ArenaCollection: ", + self.ac.total_memory_used, "bytes") + debug_print("| raw_malloced: ", + self.rawmalloced_total_size, "bytes") + debug_print("| number of major collects: ", + self.num_major_collects) + debug_print("`----------------------------------------------") + debug_stop("gc-collect") + # + # Set the threshold for the next major collection to be when we + # have allocated 'major_collection_threshold' times more than + # we currently have -- but no more than 'max_delta' more than + # we currently have. + total_memory_used = float(self.get_total_memory_used()) + bounded = self.set_major_threshold_from( + min(total_memory_used * self.major_collection_threshold, + total_memory_used + self.max_delta), + reserving_size) + # + # Max heap size: gives an upper bound on the threshold. If we + # already have at least this much allocated, raise MemoryError. + if bounded and (float(self.get_total_memory_used()) + reserving_size >= + self.next_major_collection_initial): + # + # First raise MemoryError, giving the program a chance to + # quit cleanly. It might still allocate in the nursery, + # which might eventually be emptied, triggering another + # major collect and (possibly) reaching here again with an + # even higher memory consumption. To prevent it, if it's + # the second time we are here, then abort the program. + if self.max_heap_size_already_raised: + llop.debug_fatalerror(lltype.Void, + "Using too much memory, aborting") + self.max_heap_size_already_raised = True + raise MemoryError + # + # At the end, we can execute the finalizers of the objects + # listed in 'run_finalizers'. Note that this will typically do + # more allocations. + self.execute_finalizers() + + + def _free_if_unvisited(self, hdr): + size_gc_header = self.gcheaderbuilder.size_gc_header + obj = hdr + size_gc_header + if self.header(obj).tid & GCFLAG_VISITED: + self.header(obj).tid &= ~GCFLAG_VISITED + return False # survives + return True # dies + + def _reset_gcflag_visited(self, obj, ignored): + self.header(obj).tid &= ~GCFLAG_VISITED + + def free_rawmalloced_object_if_unvisited(self, obj): + if self.header(obj).tid & GCFLAG_VISITED: + self.header(obj).tid &= ~GCFLAG_VISITED # survives + self.old_rawmalloced_objects.append(obj) + else: + size_gc_header = self.gcheaderbuilder.size_gc_header + totalsize = size_gc_header + self.get_size(obj) + allocsize = raw_malloc_usage(totalsize) + arena = llarena.getfakearenaaddress(obj - size_gc_header) + # + # Must also include the card marker area, if any + if (self.card_page_indices > 0 # <- this is constant-folded + and self.header(obj).tid & GCFLAG_HAS_CARDS): + # + # Get the length and compute the number of extra bytes + typeid = self.get_type_id(obj) + ll_assert(self.has_gcptr_in_varsize(typeid), + "GCFLAG_HAS_CARDS but not has_gcptr_in_varsize") + offset_to_length = self.varsize_offset_to_length(typeid) + length = (obj + offset_to_length).signed[0] + extra_words = self.card_marking_words_for_length(length) + arena -= extra_words * WORD + allocsize += extra_words * WORD + # + llarena.arena_free(arena) + self.rawmalloced_total_size -= r_uint(allocsize) + + def free_unvisited_rawmalloc_objects(self): + list = self.old_rawmalloced_objects + self.old_rawmalloced_objects = self.AddressStack() + # + while list.non_empty(): + self.free_rawmalloced_object_if_unvisited(list.pop()) + # + list.delete() + + + def collect_roots(self): + # Collect all roots. Starts from all the objects + # from 'prebuilt_root_objects'. + self.prebuilt_root_objects.foreach(self._collect_obj, + self.objects_to_trace) + # + # Add the roots from the other sources. + self.root_walker.walk_roots( + IncrementalMiniMarkGC._collect_ref_stk, # stack roots + IncrementalMiniMarkGC._collect_ref_stk, # static in prebuilt non-gc structures + None) # we don't need the static in all prebuilt gc objects + # + # If we are in an inner collection caused by a call to a finalizer, + # the 'run_finalizers' objects also need to be kept alive. + self.run_finalizers.foreach(self._collect_obj, + self.objects_to_trace) + + def enumerate_all_roots(self, callback, arg): + self.prebuilt_root_objects.foreach(callback, arg) + MovingGCBase.enumerate_all_roots(self, callback, arg) + enumerate_all_roots._annspecialcase_ = 'specialize:arg(1)' + + @staticmethod + def _collect_obj(obj, objects_to_trace): + objects_to_trace.append(obj) + + def _collect_ref_stk(self, root): + obj = root.address[0] + llop.debug_nonnull_pointer(lltype.Void, obj) + self.objects_to_trace.append(obj) + + def _collect_ref_rec(self, root, ignored): + self.objects_to_trace.append(root.address[0]) + + def visit_all_objects(self): + pending = self.objects_to_trace + while pending.non_empty(): + obj = pending.pop() + self.visit(obj) + + def visit(self, obj): + # + # 'obj' is a live object. Check GCFLAG_VISITED to know if we + # have already seen it before. + # + # Moreover, we can ignore prebuilt objects with GCFLAG_NO_HEAP_PTRS. + # If they have this flag set, then they cannot point to heap + # objects, so ignoring them is fine. If they don't have this + # flag set, then the object should be in 'prebuilt_root_objects', + # and the GCFLAG_VISITED will be reset at the end of the + # collection. + hdr = self.header(obj) + if hdr.tid & (GCFLAG_VISITED | GCFLAG_NO_HEAP_PTRS): + return + # + # It's the first time. We set the flag. + hdr.tid |= GCFLAG_VISITED + if not self.has_gcptr(llop.extract_ushort(llgroup.HALFWORD, hdr.tid)): + return + # + # Trace the content of the object and put all objects it references + # into the 'objects_to_trace' list. + self.trace(obj, self._collect_ref_rec, None) + + + # ---------- + # id() and identityhash() support + + def _allocate_shadow(self, obj): + size_gc_header = self.gcheaderbuilder.size_gc_header + size = self.get_size(obj) + shadowhdr = self._malloc_out_of_nursery(size_gc_header + + size) + # Initialize the shadow enough to be considered a + # valid gc object. If the original object stays + # alive at the next minor collection, it will anyway + # be copied over the shadow and overwrite the + # following fields. But if the object dies, then + # the shadow will stay around and only be freed at + # the next major collection, at which point we want + # it to look valid (but ready to be freed). + shadow = shadowhdr + size_gc_header + self.header(shadow).tid = self.header(obj).tid + typeid = self.get_type_id(obj) + if self.is_varsize(typeid): + lenofs = self.varsize_offset_to_length(typeid) + (shadow + lenofs).signed[0] = (obj + lenofs).signed[0] + # + self.header(obj).tid |= GCFLAG_HAS_SHADOW + self.nursery_objects_shadows.setitem(obj, shadow) + return shadow + + def _find_shadow(self, obj): + # + # The object is not a tagged pointer, and it is still in the + # nursery. Find or allocate a "shadow" object, which is + # where the object will be moved by the next minor + # collection + if self.header(obj).tid & GCFLAG_HAS_SHADOW: + shadow = self.nursery_objects_shadows.get(obj) + ll_assert(shadow != NULL, + "GCFLAG_HAS_SHADOW but no shadow found") + else: + shadow = self._allocate_shadow(obj) + # + # The answer is the address of the shadow. + return shadow + _find_shadow._dont_inline_ = True + + @specialize.arg(2) + def id_or_identityhash(self, gcobj, is_hash): + """Implement the common logic of id() and identityhash() + of an object, given as a GCREF. + """ + obj = llmemory.cast_ptr_to_adr(gcobj) + # + if self.is_valid_gc_object(obj): + if self.is_in_nursery(obj): + obj = self._find_shadow(obj) + elif is_hash: + if self.header(obj).tid & GCFLAG_HAS_SHADOW: + # + # For identityhash(), we need a special case for some + # prebuilt objects: their hash must be the same before + # and after translation. It is stored as an extra word + # after the object. But we cannot use it for id() + # because the stored value might clash with a real one. + size = self.get_size(obj) + i = (obj + size).signed[0] + # Important: the returned value is not mangle_hash()ed! + return i + # + i = llmemory.cast_adr_to_int(obj) + if is_hash: + i = mangle_hash(i) + return i + id_or_identityhash._always_inline_ = True + + def id(self, gcobj): + return self.id_or_identityhash(gcobj, False) + + def identityhash(self, gcobj): + return self.id_or_identityhash(gcobj, True) + + # ---------- + # Finalizers + + def deal_with_young_objects_with_finalizers(self): + """ This is a much simpler version of dealing with finalizers + and an optimization - we can reasonably assume that those finalizers + don't do anything fancy and *just* call them. Among other things + they won't resurrect objects + """ + while self.young_objects_with_light_finalizers.non_empty(): + obj = self.young_objects_with_light_finalizers.pop() + if not self.is_forwarded(obj): + finalizer = self.getlightfinalizer(self.get_type_id(obj)) + ll_assert(bool(finalizer), "no light finalizer found") + finalizer(obj) + else: + obj = self.get_forwarding_address(obj) + self.old_objects_with_light_finalizers.append(obj) + + def deal_with_old_objects_with_finalizers(self): + """ This is a much simpler version of dealing with finalizers + and an optimization - we can reasonably assume that those finalizers + don't do anything fancy and *just* call them. Among other things + they won't resurrect objects + """ + new_objects = self.AddressStack() + while self.old_objects_with_light_finalizers.non_empty(): + obj = self.old_objects_with_light_finalizers.pop() + if self.header(obj).tid & GCFLAG_VISITED: + # surviving + new_objects.append(obj) + else: + # dying + finalizer = self.getlightfinalizer(self.get_type_id(obj)) + ll_assert(bool(finalizer), "no light finalizer found") + finalizer(obj) + self.old_objects_with_light_finalizers.delete() + self.old_objects_with_light_finalizers = new_objects + + def deal_with_objects_with_finalizers(self): + # Walk over list of objects with finalizers. + # If it is not surviving, add it to the list of to-be-called + # finalizers and make it survive, to make the finalizer runnable. + # We try to run the finalizers in a "reasonable" order, like + # CPython does. The details of this algorithm are in + # pypy/doc/discussion/finalizer-order.txt. + new_with_finalizer = self.AddressDeque() + marked = self.AddressDeque() + pending = self.AddressStack() + self.tmpstack = self.AddressStack() + while self.objects_with_finalizers.non_empty(): + x = self.objects_with_finalizers.popleft() + ll_assert(self._finalization_state(x) != 1, + "bad finalization state 1") From noreply at buildbot.pypy.org Wed Aug 7 09:59:55 2013 From: noreply at buildbot.pypy.org (andrewchambers) Date: Wed, 7 Aug 2013 09:59:55 +0200 (CEST) Subject: [pypy-commit] pypy incremental-gc: making incminimark slightly more incremental. still called as non incremental. Message-ID: <20130807075955.D1A821C3607@cobra.cs.uni-duesseldorf.de> Author: Andrew Chambers Branch: incremental-gc Changeset: r65988:199872039820 Date: 2013-08-07 19:57 +1200 http://bitbucket.org/pypy/pypy/changeset/199872039820/ Log: making incminimark slightly more incremental. still called as non incremental. diff --git a/rpython/memory/gc/incminimark.py b/rpython/memory/gc/incminimark.py --- a/rpython/memory/gc/incminimark.py +++ b/rpython/memory/gc/incminimark.py @@ -1022,6 +1022,11 @@ # the GCFLAG_VISITED should not be set between collections ll_assert(self.header(obj).tid & GCFLAG_VISITED == 0, "unexpected GCFLAG_VISITED") + + # the GCFLAG_VISITED should never be set at the start of a collection + ll_assert(self.header(obj).tid & GCFLAG_GRAY == 0, + "unexpected GCFLAG_GRAY") + # the GCFLAG_FINALIZATION_ORDERING should not be set between coll. ll_assert(self.header(obj).tid & GCFLAG_FINALIZATION_ORDERING == 0, "unexpected GCFLAG_FINALIZATION_ORDERING") @@ -1651,23 +1656,37 @@ if self.gc_state == STATE_SCANNING: self.objects_to_trace = self.AddressStack() self.collect_roots() + #set all found roots to gray before entering marking state + self.objects_to_trace.foreach(self._set_gcflag_gray,None) self.gc_state = STATE_MARKING #END SCANNING elif self.gc_state == STATE_MARKING: - self.visit_all_objects() - if self.objects_with_finalizers.non_empty(): - self.deal_with_objects_with_finalizers() + # XXX need a heuristic to tell how many objects to mark. + # Maybe based on previous mark time average + self.visit_all_objects_step(1) - self.objects_to_trace.delete() - # - # Weakref support: clear the weak pointers to dying objects - if self.old_objects_with_weakrefs.non_empty(): - self.invalidate_old_weakrefs() - if self.old_objects_with_light_finalizers.non_empty(): - self.deal_with_old_objects_with_finalizers() - - self.gc_state = STATE_SWEEPING + # XXX A simplifying assumption that should be checked, + # finalizers/weak references are rare and short which means that + # they do not need a seperate state and do not need to be + # made incremental. + if not self.objects_to_trace.non_empty(): + + self.objects_to_trace.delete() + + if self.objects_with_finalizers.non_empty(): + self.deal_with_objects_with_finalizers() + # + # Weakref support: clear the weak pointers to dying objects + if self.old_objects_with_weakrefs.non_empty(): + self.invalidate_old_weakrefs() + if self.old_objects_with_light_finalizers.non_empty(): + self.deal_with_old_objects_with_finalizers() + #objects_to_trace processed fully, can move on to sweeping + self.gc_state = STATE_SWEEPING + + #SWEEPING not yet incrementalised + self.major_collection_step(reserving_size) #END MARKING elif self.gc_state == STATE_SWEEPING: # @@ -1710,8 +1729,14 @@ self.max_heap_size_already_raised = True raise MemoryError self.gc_state = STATE_FINALIZING - #END SWEEPING + # END SWEEPING + # FINALIZING not yet incrementalised + # but it seems safe to allow mutator to run after sweeping and + # before finalizers are called. This is because run_finalizers + # is a different list to objects_with_finalizers. elif self.gc_state == STATE_FINALIZING: + # XXX This is considered rare, + # so should we make the calling incremental? or leave as is self.execute_finalizers() self.num_major_collects += 1 self.gc_state = STATE_SCANNING @@ -1746,6 +1771,9 @@ def _reset_gcflag_visited(self, obj, ignored): self.header(obj).tid &= ~GCFLAG_VISITED + def _set_gcflag_gray(self, obj, ignored): + self.header(obj).tid |= GCFLAG_GRAY + def free_rawmalloced_object_if_unvisited(self, obj): if self.header(obj).tid & GCFLAG_VISITED: self.header(obj).tid &= ~GCFLAG_VISITED # survives @@ -1822,7 +1850,15 @@ while pending.non_empty(): obj = pending.pop() self.visit(obj) - + + def visit_all_objects_step(self,nobjects=1): + # Objects can be added to pending by visit_step + pending = self.objects_to_trace + while nobjects > 0 and pending.non_empty(): + obj = pending.pop() + self.visit(obj) + nobjects -= 1 + def visit(self, obj): # # 'obj' is a live object. Check GCFLAG_VISITED to know if we @@ -1840,6 +1876,8 @@ # # It's the first time. We set the flag. hdr.tid |= GCFLAG_VISITED + #visited objects are no longer grey + hdr.tid &= ~GCFLAG_GRAY if not self.has_gcptr(llop.extract_ushort(llgroup.HALFWORD, hdr.tid)): return # From noreply at buildbot.pypy.org Wed Aug 7 09:59:50 2013 From: noreply at buildbot.pypy.org (andrewchambers) Date: Wed, 7 Aug 2013 09:59:50 +0200 (CEST) Subject: [pypy-commit] pypy incremental-gc: Added gray flag, and make incminimark use a state machine but not incremental Message-ID: <20130807075950.5BB651C3601@cobra.cs.uni-duesseldorf.de> Author: Andrew Chambers Branch: incremental-gc Changeset: r65986:82c35088f668 Date: 2013-08-07 17:26 +1200 http://bitbucket.org/pypy/pypy/changeset/82c35088f668/ Log: Added gray flag, and make incminimark use a state machine but not incremental diff --git a/rpython/memory/gc/incminimark.py b/rpython/memory/gc/incminimark.py --- a/rpython/memory/gc/incminimark.py +++ b/rpython/memory/gc/incminimark.py @@ -124,7 +124,26 @@ # note that GCFLAG_CARDS_SET is the most significant bit of a byte: # this is required for the JIT (x86) -TID_MASK = (first_gcflag << 8) - 1 +# This flag is used by the tri color algorithm. An object which +# has the gray bit set has been marked reachable, but not yet walked +# by the incremental collection +GCFLAG_GRAY = first_gcflag << 8 + +# States for the incremental GC + +# The scanning phase, next step call will scan the current roots +# This state must complete in a single step +STATE_SCANNING = 0 + +#XXX describe +# marking of objects can be done over multiple +STATE_MARKING = 1 +STATE_SWEEPING = 2 +STATE_FINALIZING = 3 + + + +TID_MASK = (first_gcflag << 9) - 1 FORWARDSTUB = lltype.GcStruct('forwarding_stub', @@ -308,6 +327,9 @@ self.young_rawmalloced_objects = self.null_address_dict() self.old_rawmalloced_objects = self.AddressStack() self.rawmalloced_total_size = r_uint(0) + + self.gc_state = r_uint(0) #XXX Only really needs to be a byte + # # A list of all objects with finalizers (these are never young). self.objects_with_finalizers = self.AddressDeque() @@ -1613,107 +1635,208 @@ while new.non_empty(): old.append(new.pop()) new.delete() - + + # Note - minor collections seem fast enough so that one + # is done before every major collection step + def major_collection_step(self,reserving_size): + debug_start("gc-collect-step") + debug_print("stating gc state: ",self.gc_state) + # Debugging checks + ll_assert(self.nursery_free == self.nursery, + "nursery not empty in major_collection_step()") + + + # XXX currently very course increments, get this working then split + # to smaller increments using stacks for resuming + if self.gc_state == STATE_SCANNING: + self.objects_to_trace = self.AddressStack() + self.collect_roots() + self.gc_state = STATE_MARKING + #END SCANNING + elif self.gc_state == STATE_MARKING: + self.visit_all_objects() + + if self.objects_with_finalizers.non_empty(): + self.deal_with_objects_with_finalizers() + + self.objects_to_trace.delete() + # + # Weakref support: clear the weak pointers to dying objects + if self.old_objects_with_weakrefs.non_empty(): + self.invalidate_old_weakrefs() + if self.old_objects_with_light_finalizers.non_empty(): + self.deal_with_old_objects_with_finalizers() + + self.gc_state = STATE_SWEEPING + #END MARKING + elif self.gc_state == STATE_SWEEPING: + # + # Walk all rawmalloced objects and free the ones that don't + # have the GCFLAG_VISITED flag. + self.free_unvisited_rawmalloc_objects() + # + # Ask the ArenaCollection to visit all objects. Free the ones + # that have not been visited above, and reset GCFLAG_VISITED on + # the others. + self.ac.mass_free(self._free_if_unvisited) + # + # We also need to reset the GCFLAG_VISITED on prebuilt GC objects. + self.prebuilt_root_objects.foreach(self._reset_gcflag_visited, None) + # + # Set the threshold for the next major collection to be when we + # have allocated 'major_collection_threshold' times more than + # we currently have -- but no more than 'max_delta' more than + # we currently have. + total_memory_used = float(self.get_total_memory_used()) + bounded = self.set_major_threshold_from( + min(total_memory_used * self.major_collection_threshold, + total_memory_used + self.max_delta), + reserving_size) + # + # Max heap size: gives an upper bound on the threshold. If we + # already have at least this much allocated, raise MemoryError. + if bounded and (float(self.get_total_memory_used()) + reserving_size >= + self.next_major_collection_initial): + # + # First raise MemoryError, giving the program a chance to + # quit cleanly. It might still allocate in the nursery, + # which might eventually be emptied, triggering another + # major collect and (possibly) reaching here again with an + # even higher memory consumption. To prevent it, if it's + # the second time we are here, then abort the program. + if self.max_heap_size_already_raised: + llop.debug_fatalerror(lltype.Void, + "Using too much memory, aborting") + self.max_heap_size_already_raised = True + raise MemoryError + self.gc_state = STATE_FINALIZING + #END SWEEPING + elif self.gc_state == STATE_FINALIZING: + self.execute_finalizers() + self.num_major_collects += 1 + self.gc_state = STATE_SCANNING + #END FINALIZING + else: + pass #XXX which exception to raise here. Should be unreachable. + + debug_stop("gc-collect-step") + + def major_collection(self,reserving_size=0): + # For now keep things compatible with the existing GC + # and do all steps in a loop + + # We start in scanning state + ll_assert(self.gc_state == STATE_SCANNING, + "Scan start state incorrect") + self.debug_check_consistency() + self.major_collection_step(reserving_size) + ll_assert(self.gc_state == STATE_MARKING, "Initial Scan did not complete") + + while self.gc_state != STATE_SCANNING: + self.major_collection_step(reserving_size) + + + # ---------- # Full collection - def major_collection(self, reserving_size=0): - """Do a major collection. Only for when the nursery is empty.""" - # - debug_start("gc-collect") - debug_print() - debug_print(".----------- Full collection ------------------") - debug_print("| used before collection:") - debug_print("| in ArenaCollection: ", - self.ac.total_memory_used, "bytes") - debug_print("| raw_malloced: ", - self.rawmalloced_total_size, "bytes") - # - # Debugging checks - ll_assert(self.nursery_free == self.nursery, - "nursery not empty in major_collection()") - self.debug_check_consistency() - # - # Note that a major collection is non-moving. The goal is only to - # find and free some of the objects allocated by the ArenaCollection. - # We first visit all objects and toggle the flag GCFLAG_VISITED on - # them, starting from the roots. - self.objects_to_trace = self.AddressStack() - self.collect_roots() - self.visit_all_objects() - # - # Finalizer support: adds the flag GCFLAG_VISITED to all objects - # with a finalizer and all objects reachable from there (and also - # moves some objects from 'objects_with_finalizers' to - # 'run_finalizers'). - if self.objects_with_finalizers.non_empty(): - self.deal_with_objects_with_finalizers() - # - self.objects_to_trace.delete() - # - # Weakref support: clear the weak pointers to dying objects - if self.old_objects_with_weakrefs.non_empty(): - self.invalidate_old_weakrefs() - if self.old_objects_with_light_finalizers.non_empty(): - self.deal_with_old_objects_with_finalizers() +# def major_collection(self, reserving_size=0): +# """Do a major collection. Only for when the nursery is empty.""" +# # +# debug_start("gc-collect") +# debug_print() +# debug_print(".----------- Full collection ------------------") +# debug_print("| used before collection:") +# debug_print("| in ArenaCollection: ", +# self.ac.total_memory_used, "bytes") +# debug_print("| raw_malloced: ", +# self.rawmalloced_total_size, "bytes") +# # +# # Debugging checks +# ll_assert(self.nursery_free == self.nursery, +# "nursery not empty in major_collection()") +# self.debug_check_consistency() +# # +# # Note that a major collection is non-moving. The goal is only to +# # find and free some of the objects allocated by the ArenaCollection. +# # We first visit all objects and toggle the flag GCFLAG_VISITED on +# # them, starting from the roots. +# self.objects_to_trace = self.AddressStack() +# self.collect_roots() +# self.visit_all_objects() +# # +# # Finalizer support: adds the flag GCFLAG_VISITED to all objects +# # with a finalizer and all objects reachable from there (and also +# # moves some objects from 'objects_with_finalizers' to +# # 'run_finalizers'). +# if self.objects_with_finalizers.non_empty(): +# self.deal_with_objects_with_finalizers() +# # +# self.objects_to_trace.delete() +# # +# # Weakref support: clear the weak pointers to dying objects +# if self.old_objects_with_weakrefs.non_empty(): +# self.invalidate_old_weakrefs() +# if self.old_objects_with_light_finalizers.non_empty(): +# self.deal_with_old_objects_with_finalizers() - # - # Walk all rawmalloced objects and free the ones that don't - # have the GCFLAG_VISITED flag. - self.free_unvisited_rawmalloc_objects() - # - # Ask the ArenaCollection to visit all objects. Free the ones - # that have not been visited above, and reset GCFLAG_VISITED on - # the others. - self.ac.mass_free(self._free_if_unvisited) - # - # We also need to reset the GCFLAG_VISITED on prebuilt GC objects. - self.prebuilt_root_objects.foreach(self._reset_gcflag_visited, None) - # - self.debug_check_consistency() - # - self.num_major_collects += 1 - debug_print("| used after collection:") - debug_print("| in ArenaCollection: ", - self.ac.total_memory_used, "bytes") - debug_print("| raw_malloced: ", - self.rawmalloced_total_size, "bytes") - debug_print("| number of major collects: ", - self.num_major_collects) - debug_print("`----------------------------------------------") - debug_stop("gc-collect") - # - # Set the threshold for the next major collection to be when we - # have allocated 'major_collection_threshold' times more than - # we currently have -- but no more than 'max_delta' more than - # we currently have. - total_memory_used = float(self.get_total_memory_used()) - bounded = self.set_major_threshold_from( - min(total_memory_used * self.major_collection_threshold, - total_memory_used + self.max_delta), - reserving_size) - # - # Max heap size: gives an upper bound on the threshold. If we - # already have at least this much allocated, raise MemoryError. - if bounded and (float(self.get_total_memory_used()) + reserving_size >= - self.next_major_collection_initial): - # - # First raise MemoryError, giving the program a chance to - # quit cleanly. It might still allocate in the nursery, - # which might eventually be emptied, triggering another - # major collect and (possibly) reaching here again with an - # even higher memory consumption. To prevent it, if it's - # the second time we are here, then abort the program. - if self.max_heap_size_already_raised: - llop.debug_fatalerror(lltype.Void, - "Using too much memory, aborting") - self.max_heap_size_already_raised = True - raise MemoryError - # - # At the end, we can execute the finalizers of the objects - # listed in 'run_finalizers'. Note that this will typically do - # more allocations. - self.execute_finalizers() +# # +# # Walk all rawmalloced objects and free the ones that don't +# # have the GCFLAG_VISITED flag. +# self.free_unvisited_rawmalloc_objects() +# # +# # Ask the ArenaCollection to visit all objects. Free the ones +# # that have not been visited above, and reset GCFLAG_VISITED on +# # the others. +# self.ac.mass_free(self._free_if_unvisited) +# # +# # We also need to reset the GCFLAG_VISITED on prebuilt GC objects. +# self.prebuilt_root_objects.foreach(self._reset_gcflag_visited, None) +# # +# self.debug_check_consistency() +# # +# self.num_major_collects += 1 +# debug_print("| used after collection:") +# debug_print("| in ArenaCollection: ", +# self.ac.total_memory_used, "bytes") +# debug_print("| raw_malloced: ", +# self.rawmalloced_total_size, "bytes") +# debug_print("| number of major collects: ", +# self.num_major_collects) +# debug_print("`----------------------------------------------") +# debug_stop("gc-collect") +# # +# # Set the threshold for the next major collection to be when we +# # have allocated 'major_collection_threshold' times more than +# # we currently have -- but no more than 'max_delta' more than +# # we currently have. +# total_memory_used = float(self.get_total_memory_used()) +# bounded = self.set_major_threshold_from( +# min(total_memory_used * self.major_collection_threshold, +# total_memory_used + self.max_delta), +# reserving_size) +# # +# # Max heap size: gives an upper bound on the threshold. If we +# # already have at least this much allocated, raise MemoryError. +# if bounded and (float(self.get_total_memory_used()) + reserving_size >= +# self.next_major_collection_initial): +# # +# # First raise MemoryError, giving the program a chance to +# # quit cleanly. It might still allocate in the nursery, +# # which might eventually be emptied, triggering another +# # major collect and (possibly) reaching here again with an +# # even higher memory consumption. To prevent it, if it's +# # the second time we are here, then abort the program. +# if self.max_heap_size_already_raised: +# llop.debug_fatalerror(lltype.Void, +# "Using too much memory, aborting") +# self.max_heap_size_already_raised = True +# raise MemoryError +# # +# # At the end, we can execute the finalizers of the objects +# # listed in 'run_finalizers'. Note that this will typically do +# # more allocations. +# self.execute_finalizers() def _free_if_unvisited(self, hdr): From noreply at buildbot.pypy.org Wed Aug 7 09:59:51 2013 From: noreply at buildbot.pypy.org (andrewchambers) Date: Wed, 7 Aug 2013 09:59:51 +0200 (CEST) Subject: [pypy-commit] pypy incremental-gc: remove useless comment Message-ID: <20130807075951.9A2D31C3604@cobra.cs.uni-duesseldorf.de> Author: Andrew Chambers Branch: incremental-gc Changeset: r65987:d3689913003e Date: 2013-08-07 17:46 +1200 http://bitbucket.org/pypy/pypy/changeset/d3689913003e/ Log: remove useless comment diff --git a/rpython/memory/gc/incminimark.py b/rpython/memory/gc/incminimark.py --- a/rpython/memory/gc/incminimark.py +++ b/rpython/memory/gc/incminimark.py @@ -1734,110 +1734,6 @@ while self.gc_state != STATE_SCANNING: self.major_collection_step(reserving_size) - - - - # ---------- - # Full collection - -# def major_collection(self, reserving_size=0): -# """Do a major collection. Only for when the nursery is empty.""" -# # -# debug_start("gc-collect") -# debug_print() -# debug_print(".----------- Full collection ------------------") -# debug_print("| used before collection:") -# debug_print("| in ArenaCollection: ", -# self.ac.total_memory_used, "bytes") -# debug_print("| raw_malloced: ", -# self.rawmalloced_total_size, "bytes") -# # -# # Debugging checks -# ll_assert(self.nursery_free == self.nursery, -# "nursery not empty in major_collection()") -# self.debug_check_consistency() -# # -# # Note that a major collection is non-moving. The goal is only to -# # find and free some of the objects allocated by the ArenaCollection. -# # We first visit all objects and toggle the flag GCFLAG_VISITED on -# # them, starting from the roots. -# self.objects_to_trace = self.AddressStack() -# self.collect_roots() -# self.visit_all_objects() -# # -# # Finalizer support: adds the flag GCFLAG_VISITED to all objects -# # with a finalizer and all objects reachable from there (and also -# # moves some objects from 'objects_with_finalizers' to -# # 'run_finalizers'). -# if self.objects_with_finalizers.non_empty(): -# self.deal_with_objects_with_finalizers() -# # -# self.objects_to_trace.delete() -# # -# # Weakref support: clear the weak pointers to dying objects -# if self.old_objects_with_weakrefs.non_empty(): -# self.invalidate_old_weakrefs() -# if self.old_objects_with_light_finalizers.non_empty(): -# self.deal_with_old_objects_with_finalizers() - -# # -# # Walk all rawmalloced objects and free the ones that don't -# # have the GCFLAG_VISITED flag. -# self.free_unvisited_rawmalloc_objects() -# # -# # Ask the ArenaCollection to visit all objects. Free the ones -# # that have not been visited above, and reset GCFLAG_VISITED on -# # the others. -# self.ac.mass_free(self._free_if_unvisited) -# # -# # We also need to reset the GCFLAG_VISITED on prebuilt GC objects. -# self.prebuilt_root_objects.foreach(self._reset_gcflag_visited, None) -# # -# self.debug_check_consistency() -# # -# self.num_major_collects += 1 -# debug_print("| used after collection:") -# debug_print("| in ArenaCollection: ", -# self.ac.total_memory_used, "bytes") -# debug_print("| raw_malloced: ", -# self.rawmalloced_total_size, "bytes") -# debug_print("| number of major collects: ", -# self.num_major_collects) -# debug_print("`----------------------------------------------") -# debug_stop("gc-collect") -# # -# # Set the threshold for the next major collection to be when we -# # have allocated 'major_collection_threshold' times more than -# # we currently have -- but no more than 'max_delta' more than -# # we currently have. -# total_memory_used = float(self.get_total_memory_used()) -# bounded = self.set_major_threshold_from( -# min(total_memory_used * self.major_collection_threshold, -# total_memory_used + self.max_delta), -# reserving_size) -# # -# # Max heap size: gives an upper bound on the threshold. If we -# # already have at least this much allocated, raise MemoryError. -# if bounded and (float(self.get_total_memory_used()) + reserving_size >= -# self.next_major_collection_initial): -# # -# # First raise MemoryError, giving the program a chance to -# # quit cleanly. It might still allocate in the nursery, -# # which might eventually be emptied, triggering another -# # major collect and (possibly) reaching here again with an -# # even higher memory consumption. To prevent it, if it's -# # the second time we are here, then abort the program. -# if self.max_heap_size_already_raised: -# llop.debug_fatalerror(lltype.Void, -# "Using too much memory, aborting") -# self.max_heap_size_already_raised = True -# raise MemoryError -# # -# # At the end, we can execute the finalizers of the objects -# # listed in 'run_finalizers'. Note that this will typically do -# # more allocations. -# self.execute_finalizers() - def _free_if_unvisited(self, hdr): size_gc_header = self.gcheaderbuilder.size_gc_header From noreply at buildbot.pypy.org Wed Aug 7 10:42:03 2013 From: noreply at buildbot.pypy.org (RonnyPfannschmidt) Date: Wed, 7 Aug 2013 10:42:03 +0200 (CEST) Subject: [pypy-commit] pypy refine-testrunner: merge from default Message-ID: <20130807084203.6DCA81C073E@cobra.cs.uni-duesseldorf.de> Author: Ronny Pfannschmidt Branch: refine-testrunner Changeset: r65989:05d671b708f2 Date: 2013-08-07 10:39 +0200 http://bitbucket.org/pypy/pypy/changeset/05d671b708f2/ Log: merge from default diff too long, truncating to 2000 out of 7321 lines diff --git a/dotviewer/drawgraph.py b/dotviewer/drawgraph.py --- a/dotviewer/drawgraph.py +++ b/dotviewer/drawgraph.py @@ -22,6 +22,7 @@ 'yellow': (255,255,0), } re_nonword=re.compile(r'([^0-9a-zA-Z_.]+)') +re_linewidth=re.compile(r'setlinewidth\((\d+(\.\d*)?|\.\d+)\)') def combine(color1, color2, alpha): r1, g1, b1 = color1 @@ -138,6 +139,13 @@ self.yl = float(yl) rest = rest[3:] self.style, self.color = rest + linematch = re_linewidth.match(self.style) + if linematch: + num = linematch.group(1) + self.linewidth = int(round(float(num))) + self.style = self.style[linematch.end(0):] + else: + self.linewidth = 1 self.highlight = False self.cachedbezierpoints = None self.cachedarrowhead = None @@ -520,8 +528,8 @@ fgcolor = highlight_color(fgcolor) points = [self.map(*xy) for xy in edge.bezierpoints()] - def drawedgebody(points=points, fgcolor=fgcolor): - pygame.draw.lines(self.screen, fgcolor, False, points) + def drawedgebody(points=points, fgcolor=fgcolor, width=edge.linewidth): + pygame.draw.lines(self.screen, fgcolor, False, points, width) edgebodycmd.append(drawedgebody) points = [self.map(*xy) for xy in edge.arrowhead()] diff --git a/dotviewer/test/test_interactive.py b/dotviewer/test/test_interactive.py --- a/dotviewer/test/test_interactive.py +++ b/dotviewer/test/test_interactive.py @@ -34,6 +34,23 @@ } ''' +SOURCE2=r'''digraph f { + a; d; e; f; g; h; i; j; k; l; + a -> d [penwidth=1, style="setlinewidth(1)"]; + d -> e [penwidth=2, style="setlinewidth(2)"]; + e -> f [penwidth=4, style="setlinewidth(4)"]; + f -> g [penwidth=8, style="setlinewidth(8)"]; + g -> h [penwidth=16, style="setlinewidth(16)"]; + h -> i [penwidth=32, style="setlinewidth(32)"]; + i -> j [penwidth=64, style="setlinewidth(64)"]; + j -> k [penwidth=128, style="setlinewidth(128)"]; + k -> l [penwidth=256, style="setlinewidth(256)"]; +}''' + + + + + def setup_module(mod): if not option.pygame: py.test.skip("--pygame not enabled") @@ -161,3 +178,10 @@ page = MyPage(str(dotfile)) page.fixedfont = True graphclient.display_page(page) + +def test_linewidth(): + udir.join("graph2.dot").write(SOURCE2) + from dotviewer import graphpage, graphclient + dotfile = udir.join('graph2.dot') + page = graphpage.DotFileGraphPage(str(dotfile)) + graphclient.display_page(page) diff --git a/lib-python/2.7/socket.py b/lib-python/2.7/socket.py --- a/lib-python/2.7/socket.py +++ b/lib-python/2.7/socket.py @@ -165,6 +165,8 @@ # All _delegate_methods must also be initialized here. send = recv = recv_into = sendto = recvfrom = recvfrom_into = _dummy __getattr__ = _dummy + def _drop(self): + pass # Wrapper around platform socket objects. This implements # a platform-independent dup() functionality. The @@ -179,12 +181,21 @@ def __init__(self, family=AF_INET, type=SOCK_STREAM, proto=0, _sock=None): if _sock is None: _sock = _realsocket(family, type, proto) - elif _type(_sock) is _realsocket: + else: + # PyPy note about refcounting: implemented with _reuse()/_drop() + # on the class '_socket.socket'. Python 3 did it differently + # with a reference counter on this class 'socket._socketobject' + # instead, but it is a less compatible change. + + # Note that a few libraries (like eventlet) poke at the + # private implementation of socket.py, passing custom + # objects to _socketobject(). These libraries need the + # following fix for use on PyPy: the custom objects need + # methods _reuse() and _drop() that maintains an explicit + # reference counter, starting at 0. When it drops back to + # zero, close() must be called. _sock._reuse() - # PyPy note about refcounting: implemented with _reuse()/_drop() - # on the class '_socket.socket'. Python 3 did it differently - # with a reference counter on this class 'socket._socketobject' - # instead, but it is a less compatible change (breaks eventlet). + self._sock = _sock def send(self, data, flags=0): @@ -216,9 +227,8 @@ def close(self): s = self._sock - if type(s) is _realsocket: - s._drop() self._sock = _closedsocket() + s._drop() close.__doc__ = _realsocket.close.__doc__ def accept(self): @@ -280,8 +290,14 @@ "_close"] def __init__(self, sock, mode='rb', bufsize=-1, close=False): - if type(sock) is _realsocket: - sock._reuse() + # Note that a few libraries (like eventlet) poke at the + # private implementation of socket.py, passing custom + # objects to _fileobject(). These libraries need the + # following fix for use on PyPy: the custom objects need + # methods _reuse() and _drop() that maintains an explicit + # reference counter, starting at 0. When it drops back to + # zero, close() must be called. + sock._reuse() self._sock = sock self.mode = mode # Not actually used in this version if bufsize < 0: @@ -317,11 +333,11 @@ self.flush() finally: s = self._sock - if type(s) is _realsocket: + self._sock = None + if s is not None: s._drop() - if self._close: - self._sock.close() - self._sock = None + if self._close: + s.close() def __del__(self): try: diff --git a/lib-python/2.7/ssl.py b/lib-python/2.7/ssl.py --- a/lib-python/2.7/ssl.py +++ b/lib-python/2.7/ssl.py @@ -110,6 +110,12 @@ suppress_ragged_eofs=True, ciphers=None): socket.__init__(self, _sock=sock._sock) + # "close" the original socket: it is not usable any more. + # this only calls _drop(), which should not actually call + # the operating system's close() because the reference + # counter is greater than 1 (we hold one too). + sock.close() + if ciphers is None and ssl_version != _SSLv2_IF_EXISTS: ciphers = _DEFAULT_CIPHERS @@ -352,11 +358,19 @@ works with the SSL connection. Just use the code from the socket module.""" - self._makefile_refs += 1 # close=True so as to decrement the reference count when done with # the file-like object. return _fileobject(self, mode, bufsize, close=True) + def _reuse(self): + self._makefile_refs += 1 + + def _drop(self): + if self._makefile_refs < 1: + self.close() + else: + self._makefile_refs -= 1 + def wrap_socket(sock, keyfile=None, certfile=None, diff --git a/lib-python/2.7/test/test_socket.py b/lib-python/2.7/test/test_socket.py --- a/lib-python/2.7/test/test_socket.py +++ b/lib-python/2.7/test/test_socket.py @@ -1066,6 +1066,9 @@ def recv(self, size): return self._recv_step.next()() + def _reuse(self): pass + def _drop(self): pass + @staticmethod def _raise_eintr(): raise socket.error(errno.EINTR) @@ -1321,7 +1324,8 @@ closed = False def flush(self): pass def close(self): self.closed = True - def _decref_socketios(self): pass + def _reuse(self): pass + def _drop(self): pass # must not close unless we request it: the original use of _fileobject # by module socket requires that the underlying socket not be closed until diff --git a/lib-python/2.7/test/test_urllib2.py b/lib-python/2.7/test/test_urllib2.py --- a/lib-python/2.7/test/test_urllib2.py +++ b/lib-python/2.7/test/test_urllib2.py @@ -270,6 +270,8 @@ self.reason = reason def read(self): return '' + def _reuse(self): pass + def _drop(self): pass class MockHTTPClass: def __init__(self): diff --git a/lib-python/2.7/urllib2.py b/lib-python/2.7/urllib2.py --- a/lib-python/2.7/urllib2.py +++ b/lib-python/2.7/urllib2.py @@ -1193,6 +1193,8 @@ # out of socket._fileobject() and into a base class. r.recv = r.read + r._reuse = lambda: None + r._drop = lambda: None fp = socket._fileobject(r, close=True) resp = addinfourl(fp, r.msg, req.get_full_url()) diff --git a/lib_pypy/_curses.py b/lib_pypy/_curses.py --- a/lib_pypy/_curses.py +++ b/lib_pypy/_curses.py @@ -966,7 +966,7 @@ r, g, b = ffi.new("short *"), ffi.new("short *"), ffi.new("short *") if lib.color_content(color, r, g, b) == lib.ERR: raise error("Argument 1 was out of range. Check value of COLORS.") - return (r, g, b) + return (r[0], g[0], b[0]) def color_pair(n): @@ -1121,6 +1121,7 @@ term = ffi.NULL err = ffi.new("int *") if lib.setupterm(term, fd, err) == lib.ERR: + err = err[0] if err == 0: raise error("setupterm: could not find terminal") elif err == -1: diff --git a/lib_pypy/_sqlite3.py b/lib_pypy/_sqlite3.py --- a/lib_pypy/_sqlite3.py +++ b/lib_pypy/_sqlite3.py @@ -1305,7 +1305,7 @@ for i in xrange(_lib.sqlite3_column_count(self._statement)): name = _lib.sqlite3_column_name(self._statement, i) if name: - name = _ffi.string(name).decode('utf-8').split("[")[0].strip() + name = _ffi.string(name).split("[")[0].strip() desc.append((name, None, None, None, None, None, None)) return desc diff --git a/pypy/doc/_ref.txt b/pypy/doc/_ref.txt --- a/pypy/doc/_ref.txt +++ b/pypy/doc/_ref.txt @@ -98,8 +98,6 @@ .. _`rpython/rtyper/memory/gc/hybrid.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/memory/gc/hybrid.py .. _`rpython/rtyper/memory/gc/minimarkpage.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/memory/gc/minimarkpage.py .. _`rpython/rtyper/memory/gc/semispace.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/memory/gc/semispace.py -.. _`rpython/rtyper/ootypesystem/`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/ootypesystem/ -.. _`rpython/rtyper/ootypesystem/ootype.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/ootypesystem/ootype.py .. _`rpython/rtyper/rint.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/rint.py .. _`rpython/rtyper/rlist.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/rlist.py .. _`rpython/rtyper/rmodel.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/rmodel.py diff --git a/pypy/doc/conf.py b/pypy/doc/conf.py --- a/pypy/doc/conf.py +++ b/pypy/doc/conf.py @@ -45,9 +45,9 @@ # built documents. # # The short X.Y version. -version = '2.0' +version = '2.1' # The full version, including alpha/beta/rc tags. -release = '2.0.2' +release = '2.1.0' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/pypy/doc/getting-started-python.rst b/pypy/doc/getting-started-python.rst --- a/pypy/doc/getting-started-python.rst +++ b/pypy/doc/getting-started-python.rst @@ -104,8 +104,8 @@ executable. The executable behaves mostly like a normal Python interpreter:: $ ./pypy-c - Python 2.7.3 (7e4f0faa3d51, Nov 22 2012, 10:35:18) - [PyPy 2.0.0 with GCC 4.7.1] on linux2 + Python 2.7.3 (480845e6b1dd, Jul 31 2013, 11:05:31) + [PyPy 2.1.0 with GCC 4.7.1] on linux2 Type "help", "copyright", "credits" or "license" for more information. And now for something completely different: ``RPython magically makes you rich and famous (says so on the tin)'' @@ -171,7 +171,7 @@ the ``bin/pypy`` executable. To install PyPy system wide on unix-like systems, it is recommended to put the -whole hierarchy alone (e.g. in ``/opt/pypy2.0``) and put a symlink to the +whole hierarchy alone (e.g. in ``/opt/pypy2.1``) and put a symlink to the ``pypy`` executable into ``/usr/bin`` or ``/usr/local/bin`` If the executable fails to find suitable libraries, it will report diff --git a/pypy/doc/getting-started.rst b/pypy/doc/getting-started.rst --- a/pypy/doc/getting-started.rst +++ b/pypy/doc/getting-started.rst @@ -53,10 +53,10 @@ PyPy is ready to be executed as soon as you unpack the tarball or the zip file, with no need to install it in any specific location:: - $ tar xf pypy-2.0.tar.bz2 - $ ./pypy-2.0/bin/pypy - Python 2.7.3 (7e4f0faa3d51, Nov 22 2012, 10:35:18) - [PyPy 2.0.0 with GCC 4.7.1] on linux2 + $ tar xf pypy-2.1.tar.bz2 + $ ./pypy-2.1/bin/pypy + Python 2.7.3 (480845e6b1dd, Jul 31 2013, 11:05:31) + [PyPy 2.1.0 with GCC 4.4.3] on linux2 Type "help", "copyright", "credits" or "license" for more information. And now for something completely different: ``PyPy is an exciting technology that lets you to write fast, portable, multi-platform interpreters with less @@ -75,14 +75,14 @@ $ curl -O https://raw.github.com/pypa/pip/master/contrib/get-pip.py - $ ./pypy-2.0/bin/pypy distribute_setup.py + $ ./pypy-2.1/bin/pypy distribute_setup.py - $ ./pypy-2.0/bin/pypy get-pip.py + $ ./pypy-2.1/bin/pypy get-pip.py - $ ./pypy-2.0/bin/pip install pygments # for example + $ ./pypy-2.1/bin/pip install pygments # for example -3rd party libraries will be installed in ``pypy-2.0/site-packages``, and -the scripts in ``pypy-2.0/bin``. +3rd party libraries will be installed in ``pypy-2.1/site-packages``, and +the scripts in ``pypy-2.1/bin``. Installing using virtualenv --------------------------- diff --git a/pypy/doc/index.rst b/pypy/doc/index.rst --- a/pypy/doc/index.rst +++ b/pypy/doc/index.rst @@ -40,7 +40,7 @@ * `FAQ`_: some frequently asked questions. -* `Release 2.0.2`_: the latest official release +* `Release 2.1.0`_: the latest official release * `PyPy Blog`_: news and status info about PyPy @@ -110,7 +110,7 @@ .. _`Getting Started`: getting-started.html .. _`Papers`: extradoc.html .. _`Videos`: video-index.html -.. _`Release 2.0.2`: http://pypy.org/download.html +.. _`Release 2.1.0`: http://pypy.org/download.html .. _`speed.pypy.org`: http://speed.pypy.org .. _`RPython toolchain`: translation.html .. _`potential project ideas`: project-ideas.html diff --git a/pypy/doc/jit/index.rst b/pypy/doc/jit/index.rst --- a/pypy/doc/jit/index.rst +++ b/pypy/doc/jit/index.rst @@ -23,7 +23,10 @@ - Hooks_ debugging facilities available to a python programmer +- Virtualizable_ how virtualizables work and what they are (in other words how + to make frames more efficient). .. _Overview: overview.html .. _Notes: pyjitpl5.html .. _Hooks: ../jit-hooks.html +.. _Virtualizable: virtualizable.html diff --git a/pypy/doc/jit/virtualizable.rst b/pypy/doc/jit/virtualizable.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/jit/virtualizable.rst @@ -0,0 +1,60 @@ + +Virtualizables +============== + +**Note:** this document does not have a proper introduction as to how +to understand the basics. We should write some. If you happen to be here +and you're missing context, feel free to pester us on IRC. + +Problem description +------------------- + +The JIT is very good at making sure some objects are never allocated if they +don't escape from the trace. Such objects are called ``virtuals``. However, +if we're dealing with frames, virtuals are often not good enough. Frames +can escape and they can also be allocated already at the moment we enter the +JIT. In such cases we need some extra object that can still be optimized away, +despite existing on the heap. + +Solution +-------- + +We introduce virtualizables. They're objects that exist on the heap, but their +fields are not always in sync with whatever happens in the assembler. One +example is that virtualizable fields can store virtual objects without +forcing them. This is very useful for frames. Declaring an object to be +virtualizable works like this: + + class Frame(object): + _virtualizable_ = ['locals[*]', 'stackdepth'] + +And we use them in ``JitDriver`` like this:: + + jitdriver = JitDriver(greens=[], reds=['frame'], virtualizables=['frame']) + +This declaration means that ``stackdepth`` is a virtualizable **field**, while +``locals`` is a virtualizable **array** (a list stored on a virtualizable). +There are various rules about using virtualizables, especially using +virtualizable arrays that can be very confusing. Those will usually end +up with a compile-time error (as opposed to strange behavior). The rules are: + +* Each array access must be with a known positive index that cannot raise + an ``IndexError``. Using ``no = jit.hint(no, promote=True)`` might be useful + to get a constant-number access. This is only safe if the index is actually + constant or changing rarely within the context of the user's code. + +* If you initialize a new virtualizable in the JIT, it has to be done like this + (for example if we're in ``Frame.__init__``):: + + self = hint(self, access_directly=True, fresh_virtualizable=True) + + that way you can populate the fields directly. + +* If you use virtualizable outside of the JIT – it's very expensive and + sometimes aborts tracing. Consider it carefully as to how do it only for + debugging purposes and not every time (e.g. ``sys._getframe`` call). + +* If you have something equivalent of a Python generator, where the + virtualizable survives for longer, you want to force it before returning. + It's better to do it that way than by an external call some time later. + It's done using ``jit.hint(frame, force_virtualizable=True)`` diff --git a/pypy/doc/release-2.1.0.rst b/pypy/doc/release-2.1.0.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/release-2.1.0.rst @@ -0,0 +1,89 @@ +============================ +PyPy 2.1 - Considered ARMful +============================ + +We're pleased to announce PyPy 2.1, which targets version 2.7.3 of the Python +language. This is the first release with official support for ARM processors in the JIT. +This release also contains several bugfixes and performance improvements. + +You can download the PyPy 2.1 release here: + + http://pypy.org/download.html + +We would like to thank the `Raspberry Pi Foundation`_ for supporting the work +to finish PyPy's ARM support. + +.. _`Raspberry Pi Foundation`: http://www.raspberrypi.org + +The first beta of PyPy3 2.1, targeting version 3 of the Python language, was +just released, more details can be found `here`_. + +.. _`here`: http://morepypy.blogspot.com/2013/07/pypy3-21-beta-1.html + +What is PyPy? +============= + +PyPy is a very compliant Python interpreter, almost a drop-in replacement for +CPython 2.7. It's fast (`pypy 2.1 and cpython 2.7.2`_ performance comparison) +due to its integrated tracing JIT compiler. + +This release supports x86 machines running Linux 32/64, Mac OS X 64 or Windows +32. This release also supports ARM machines running Linux 32bit - anything with +``ARMv6`` (like the Raspberry Pi) or ``ARMv7`` (like the Beagleboard, +Chromebook, Cubieboard, etc.) that supports ``VFPv3`` should work. Both +hard-float ``armhf/gnueabihf`` and soft-float ``armel/gnueabi`` builds are +provided. ``armhf`` builds for Raspbian are created using the Raspberry Pi +`custom cross-compilation toolchain `_ +based on ``gcc-arm-linux-gnueabihf`` and should work on ``ARMv6`` and +``ARMv7`` devices running Debian or Raspbian. ``armel`` builds are built +using the ``gcc-arm-linux-gnuebi`` toolchain provided by Ubuntu and +currently target ``ARMv7``. + +Windows 64 work is still stalling, we would welcome a volunteer +to handle that. + +.. _`pypy 2.1 and cpython 2.7.2`: http://speed.pypy.org + +Highlights +========== + +* JIT support for ARM, architecture versions 6 and 7, hard- and soft-float ABI + +* Stacklet support for ARM + +* Support for os.statvfs and os.fstatvfs on unix systems + +* Improved logging performance + +* Faster sets for objects + +* Interpreter improvements + +* During packaging, compile the CFFI based TK extension + +* Pickling of numpy arrays and dtypes + +* Subarrays for numpy + +* Bugfixes to numpy + +* Bugfixes to cffi and ctypes + +* Bugfixes to the x86 stacklet support + +* Fixed issue `1533`_: fix an RPython-level OverflowError for space.float_w(w_big_long_number). + +* Fixed issue `1552`_: GreenletExit should inherit from BaseException. + +* Fixed issue `1537`_: numpypy __array_interface__ + +* Fixed issue `1238`_: Writing to an SSL socket in PyPy sometimes failed with a "bad write retry" message. + +.. _`1533`: https://bugs.pypy.org/issue1533 +.. _`1552`: https://bugs.pypy.org/issue1552 +.. _`1537`: https://bugs.pypy.org/issue1537 +.. _`1238`: https://bugs.pypy.org/issue1238 + +Cheers, + +David Schneider for the PyPy team. diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -52,3 +52,24 @@ .. branch: fast-slowpath Added an abstraction for functions with a fast and slow path in the JIT. This speeds up list.append() and list.pop(). + +.. branch: curses_fixes + +.. branch: foldable-getarrayitem-indexerror +Constant-fold reading out of constant tuples in PyPy. + +.. branch: mro-reorder-numpypy-str +No longer delegate numpy string_ methods to space.StringObject, in numpy +this works by kind of by accident. Support for merging the refactor-str-types +branch + +.. branch: kill-typesystem +Remove the "type system" abstraction, now that there is only ever one kind of +type system used. + +.. branch: kill-gen-store-back-in +Kills gen_store_back_in_virtualizable - should improve non-inlined calls by +a bit + +.. branch: dotviewer-linewidth +.. branch: reflex-support diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py --- a/pypy/interpreter/pyframe.py +++ b/pypy/interpreter/pyframe.py @@ -123,8 +123,8 @@ # CO_OPTIMIZED: no locals dict needed at all # NB: this method is overridden in nestedscope.py flags = code.co_flags - if flags & pycode.CO_OPTIMIZED: - return + if flags & pycode.CO_OPTIMIZED: + return if flags & pycode.CO_NEWLOCALS: self.w_locals = self.space.newdict(module=True) else: @@ -176,9 +176,10 @@ executioncontext.return_trace(self, self.space.w_None) raise executioncontext.return_trace(self, w_exitvalue) - # clean up the exception, might be useful for not - # allocating exception objects in some cases - self.last_exception = None + # it used to say self.last_exception = None + # this is now done by the code in pypyjit module + # since we don't want to invalidate the virtualizable + # for no good reason got_exception = False finally: executioncontext.leave(self, w_exitvalue, got_exception) @@ -260,7 +261,7 @@ break w_value = self.peekvalue(delta) self.pushvalue(w_value) - + def peekvalue(self, index_from_top=0): # NOTE: top of the stack is peekvalue(0). # Contrast this with CPython where it's PEEK(-1). @@ -330,7 +331,7 @@ nlocals = self.pycode.co_nlocals values_w = self.locals_stack_w[nlocals:self.valuestackdepth] w_valuestack = maker.slp_into_tuple_with_nulls(space, values_w) - + w_blockstack = nt([block._get_state_(space) for block in self.get_blocklist()]) w_fastlocals = maker.slp_into_tuple_with_nulls( space, self.locals_stack_w[:nlocals]) @@ -340,7 +341,7 @@ else: w_exc_value = self.last_exception.get_w_value(space) w_tb = w(self.last_exception.get_traceback()) - + tup_state = [ w(self.f_backref()), w(self.get_builtin()), @@ -355,7 +356,7 @@ w(f_lineno), w_fastlocals, space.w_None, #XXX placeholder for f_locals - + #f_restricted requires no additional data! space.w_None, ## self.w_f_trace, ignore for now @@ -389,7 +390,7 @@ ncellvars = len(pycode.co_cellvars) cellvars = cells[:ncellvars] closure = cells[ncellvars:] - + # do not use the instance's __init__ but the base's, because we set # everything like cells from here # XXX hack @@ -476,7 +477,7 @@ ### line numbers ### - def fget_f_lineno(self, space): + def fget_f_lineno(self, space): "Returns the line number of the instruction currently being executed." if self.w_f_trace is None: return space.wrap(self.get_last_lineno()) @@ -490,7 +491,7 @@ except OperationError, e: raise OperationError(space.w_ValueError, space.wrap("lineno must be an integer")) - + if self.w_f_trace is None: raise OperationError(space.w_ValueError, space.wrap("f_lineno can only be set by a trace function.")) @@ -522,7 +523,7 @@ if ord(code[new_lasti]) in (DUP_TOP, POP_TOP): raise OperationError(space.w_ValueError, space.wrap("can't jump to 'except' line as there's no exception")) - + # Don't jump into or out of a finally block. f_lasti_setup_addr = -1 new_lasti_setup_addr = -1 @@ -553,12 +554,12 @@ if addr == self.last_instr: f_lasti_setup_addr = setup_addr break - + if op >= HAVE_ARGUMENT: addr += 3 else: addr += 1 - + assert len(blockstack) == 0 if new_lasti_setup_addr != f_lasti_setup_addr: @@ -609,10 +610,10 @@ block = self.pop_block() block.cleanup(self) f_iblock -= 1 - + self.f_lineno = new_lineno self.last_instr = new_lasti - + def get_last_lineno(self): "Returns the line number of the instruction currently being executed." return pytraceback.offset2lineno(self.pycode, self.last_instr) @@ -638,8 +639,8 @@ self.f_lineno = self.get_last_lineno() space.frame_trace_action.fire() - def fdel_f_trace(self, space): - self.w_f_trace = None + def fdel_f_trace(self, space): + self.w_f_trace = None def fget_f_exc_type(self, space): if self.last_exception is not None: @@ -649,7 +650,7 @@ if f is not None: return f.last_exception.w_type return space.w_None - + def fget_f_exc_value(self, space): if self.last_exception is not None: f = self.f_backref() @@ -667,7 +668,7 @@ if f is not None: return space.wrap(f.last_exception.get_traceback()) return space.w_None - + def fget_f_restricted(self, space): if space.config.objspace.honor__builtins__: return space.wrap(self.builtin is not space.builtin) diff --git a/pypy/module/array/interp_array.py b/pypy/module/array/interp_array.py --- a/pypy/module/array/interp_array.py +++ b/pypy/module/array/interp_array.py @@ -56,7 +56,7 @@ def descr_typecode(space, self): return space.wrap(self.typecode) -arr_eq_driver = jit.JitDriver(greens = ['comp_func'], reds = 'auto') +arr_eq_driver = jit.JitDriver(name='array_eq_driver', greens = ['comp_func'], reds = 'auto') EQ, NE, LT, LE, GT, GE = range(6) def compare_arrays(space, arr1, arr2, comp_op): diff --git a/pypy/module/cppyy/interp_cppyy.py b/pypy/module/cppyy/interp_cppyy.py --- a/pypy/module/cppyy/interp_cppyy.py +++ b/pypy/module/cppyy/interp_cppyy.py @@ -592,11 +592,15 @@ def get(self, w_cppinstance, w_pycppclass): cppinstance = self.space.interp_w(W_CPPInstance, w_cppinstance, can_be_None=True) + if not cppinstance: + raise OperationError(self.space.w_ReferenceError, self.space.wrap("attribute access requires an instance")) offset = self._get_offset(cppinstance) return self.converter.from_memory(self.space, w_cppinstance, w_pycppclass, offset) def set(self, w_cppinstance, w_value): cppinstance = self.space.interp_w(W_CPPInstance, w_cppinstance, can_be_None=True) + if not cppinstance: + raise OperationError(self.space.w_ReferenceError, self.space.wrap("attribute access requires an instance")) offset = self._get_offset(cppinstance) self.converter.to_memory(self.space, w_cppinstance, w_value, offset) return self.space.w_None diff --git a/pypy/module/cppyy/pythonify.py b/pypy/module/cppyy/pythonify.py --- a/pypy/module/cppyy/pythonify.py +++ b/pypy/module/cppyy/pythonify.py @@ -426,6 +426,11 @@ # mostly for the benefit of the CINT backend, which treats std as special gbl.std = make_cppnamespace(None, "std", None, False) + # install a type for enums to refer to + # TODO: this is correct for C++98, not for C++11 and in general there will + # be the same issue for all typedef'd builtin types + setattr(gbl, 'unsigned int', int) + # install for user access cppyy.gbl = gbl diff --git a/pypy/module/cppyy/test/test_datatypes.py b/pypy/module/cppyy/test/test_datatypes.py --- a/pypy/module/cppyy/test/test_datatypes.py +++ b/pypy/module/cppyy/test/test_datatypes.py @@ -105,6 +105,13 @@ raises(IndexError, c.m_float_array.__getitem__, self.N) raises(IndexError, c.m_double_array.__getitem__, self.N) + # can not access an instance member on the class + raises(ReferenceError, getattr, cppyy_test_data, 'm_bool') + raises(ReferenceError, getattr, cppyy_test_data, 'm_int') + + assert not hasattr(cppyy_test_data, 'm_bool') + assert not hasattr(cppyy_test_data, 'm_int') + c.destruct() def test03_instance_data_write_access(self): @@ -428,12 +435,17 @@ c = cppyy_test_data() assert isinstance(c, cppyy_test_data) - # TODO: test that the enum is accessible as a type + # test that the enum is accessible as a type + assert cppyy_test_data.what assert cppyy_test_data.kNothing == 6 assert cppyy_test_data.kSomething == 111 assert cppyy_test_data.kLots == 42 + assert cppyy_test_data.what(cppyy_test_data.kNothing) == cppyy_test_data.kNothing + assert cppyy_test_data.what(6) == cppyy_test_data.kNothing + # TODO: only allow instantiations with correct values (C++11) + assert c.get_enum() == cppyy_test_data.kNothing assert c.m_enum == cppyy_test_data.kNothing @@ -455,6 +467,7 @@ assert cppyy_test_data.s_enum == cppyy_test_data.kSomething # global enums + assert gbl.fruit # test type accessible assert gbl.kApple == 78 assert gbl.kBanana == 29 assert gbl.kCitrus == 34 diff --git a/pypy/module/cpyext/include/patchlevel.h b/pypy/module/cpyext/include/patchlevel.h --- a/pypy/module/cpyext/include/patchlevel.h +++ b/pypy/module/cpyext/include/patchlevel.h @@ -29,7 +29,7 @@ #define PY_VERSION "2.7.3" /* PyPy version as a string */ -#define PYPY_VERSION "2.1.0-alpha0" +#define PYPY_VERSION "2.2.0-alpha0" /* Subversion Revision number of this file (not of the repository). * Empty since Mercurial migration. */ diff --git a/pypy/module/cpyext/pystate.py b/pypy/module/cpyext/pystate.py --- a/pypy/module/cpyext/pystate.py +++ b/pypy/module/cpyext/pystate.py @@ -3,7 +3,6 @@ from pypy.module.cpyext.pyobject import PyObject, Py_DecRef, make_ref, from_ref from rpython.rtyper.lltypesystem import rffi, lltype from rpython.rlib import rthread -from pypy.module.thread import os_thread PyInterpreterStateStruct = lltype.ForwardReference() PyInterpreterState = lltype.Ptr(PyInterpreterStateStruct) @@ -17,6 +16,9 @@ ('dict', PyObject), ])) +class NoThreads(Exception): + pass + @cpython_api([], PyThreadState, error=CANNOT_FAIL) def PyEval_SaveThread(space): """Release the global interpreter lock (if it has been created and thread @@ -45,10 +47,15 @@ @cpython_api([], lltype.Void) def PyEval_InitThreads(space): + if not space.config.translation.thread: + raise NoThreads + from pypy.module.thread import os_thread os_thread.setup_threads(space) @cpython_api([], rffi.INT_real, error=CANNOT_FAIL) def PyEval_ThreadsInitialized(space): + if not space.config.translation.thread: + return 0 return 1 # XXX: might be generally useful @@ -232,6 +239,8 @@ """Create a new thread state object belonging to the given interpreter object. The global interpreter lock need not be held, but may be held if it is necessary to serialize calls to this function.""" + if not space.config.translation.thread: + raise NoThreads rthread.gc_thread_prepare() # PyThreadState_Get will allocate a new execution context, # we need to protect gc and other globals with the GIL. @@ -246,6 +255,8 @@ def PyThreadState_Clear(space, tstate): """Reset all information in a thread state object. The global interpreter lock must be held.""" + if not space.config.translation.thread: + raise NoThreads Py_DecRef(space, tstate.c_dict) tstate.c_dict = lltype.nullptr(PyObject.TO) space.threadlocals.leave_thread(space) diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -205,6 +205,7 @@ descr_neg = _unaryop_impl("negative") descr_abs = _unaryop_impl("absolute") descr_invert = _unaryop_impl("invert") + descr_conjugate = _unaryop_impl("conjugate") def descr_divmod(self, space, w_other): w_quotient = self.descr_div(space, w_other) @@ -378,12 +379,14 @@ return self class W_CharacterBox(W_FlexibleBox): - pass + def convert_to(self, dtype): + # XXX assert dtype is str type + return self + class W_StringBox(W_CharacterBox): def descr__new__string_box(space, w_subtype, w_arg): from pypy.module.micronumpy.interp_dtype import new_string_dtype - arg = space.str_w(space.str(w_arg)) arr = VoidBoxStorage(len(arg), new_string_dtype(space, len(arg))) for i in range(len(arg)): @@ -517,6 +520,7 @@ all = interp2app(W_GenericBox.descr_all), ravel = interp2app(W_GenericBox.descr_ravel), round = interp2app(W_GenericBox.descr_round), + conjugate = interp2app(W_GenericBox.descr_conjugate), view = interp2app(W_GenericBox.descr_view), ) @@ -682,12 +686,12 @@ __module__ = "numpypy", ) -W_StringBox.typedef = TypeDef("string_", (str_typedef, W_CharacterBox.typedef), +W_StringBox.typedef = TypeDef("string_", (W_CharacterBox.typedef, str_typedef), __module__ = "numpypy", __new__ = interp2app(W_StringBox.descr__new__string_box.im_func), ) -W_UnicodeBox.typedef = TypeDef("unicode_", (unicode_typedef, W_CharacterBox.typedef), +W_UnicodeBox.typedef = TypeDef("unicode_", (W_CharacterBox.typedef, unicode_typedef), __module__ = "numpypy", __new__ = interp2app(W_UnicodeBox.descr__new__unicode_box.im_func), ) diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -234,6 +234,9 @@ def is_record_type(self): return self.fields is not None + def is_str_type(self): + return self.num == 18 + def is_str_or_unicode(self): return (self.num == 18 or self.num == 19) diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -328,14 +328,19 @@ w_out = None w_lhs = convert_to_array(space, w_lhs) w_rhs = convert_to_array(space, w_rhs) - if (w_lhs.get_dtype().is_flexible_type() or \ - w_rhs.get_dtype().is_flexible_type()): + w_ldtype = w_lhs.get_dtype() + w_rdtype = w_rhs.get_dtype() + if w_ldtype.is_str_type() and w_rdtype.is_str_type() and \ + self.comparison_func: + pass + elif (w_ldtype.is_flexible_type() or \ + w_rdtype.is_flexible_type()): raise OperationError(space.w_TypeError, space.wrap( 'unsupported operand dtypes %s and %s for "%s"' % \ - (w_rhs.get_dtype().get_name(), w_lhs.get_dtype().get_name(), + (w_rdtype.get_name(), w_ldtype.get_name(), self.name))) calc_dtype = find_binop_result_dtype(space, - w_lhs.get_dtype(), w_rhs.get_dtype(), + w_ldtype, w_rdtype, int_only=self.int_only, promote_to_float=self.promote_to_float, promote_bools=self.promote_bools, diff --git a/pypy/module/micronumpy/loop.py b/pypy/module/micronumpy/loop.py --- a/pypy/module/micronumpy/loop.py +++ b/pypy/module/micronumpy/loop.py @@ -155,7 +155,8 @@ obj_iter.next() return cur_value -reduce_cum_driver = jit.JitDriver(greens = ['shapelen', 'func', 'dtype'], +reduce_cum_driver = jit.JitDriver(name='numpy_reduce_cum_driver', + greens = ['shapelen', 'func', 'dtype'], reds = 'auto') def compute_reduce_cumultative(obj, out, calc_dtype, func, identity): @@ -529,8 +530,9 @@ val_arr.descr_getitem(space, w_idx)) iter.next() -byteswap_driver = jit.JitDriver(greens = ['dtype'], - reds = 'auto') +byteswap_driver = jit.JitDriver(name='numpy_byteswap_driver', + greens = ['dtype'], + reds = 'auto') def byteswap(from_, to): dtype = from_.dtype @@ -542,8 +544,9 @@ to_iter.next() from_iter.next() -choose_driver = jit.JitDriver(greens = ['shapelen', 'mode', 'dtype'], - reds = 'auto') +choose_driver = jit.JitDriver(name='numpy_choose_driver', + greens = ['shapelen', 'mode', 'dtype'], + reds = 'auto') def choose(space, arr, choices, shape, dtype, out, mode): shapelen = len(shape) @@ -572,8 +575,9 @@ out_iter.next() arr_iter.next() -clip_driver = jit.JitDriver(greens = ['shapelen', 'dtype'], - reds = 'auto') +clip_driver = jit.JitDriver(name='numpy_clip_driver', + greens = ['shapelen', 'dtype'], + reds = 'auto') def clip(space, arr, shape, min, max, out): arr_iter = arr.create_iter(shape) @@ -597,8 +601,9 @@ out_iter.next() min_iter.next() -round_driver = jit.JitDriver(greens = ['shapelen', 'dtype'], - reds = 'auto') +round_driver = jit.JitDriver(name='numpy_round_driver', + greens = ['shapelen', 'dtype'], + reds = 'auto') def round(space, arr, dtype, shape, decimals, out): arr_iter = arr.create_iter(shape) @@ -612,7 +617,8 @@ arr_iter.next() out_iter.next() -diagonal_simple_driver = jit.JitDriver(greens = ['axis1', 'axis2'], +diagonal_simple_driver = jit.JitDriver(name='numpy_diagonal_simple_driver', + greens = ['axis1', 'axis2'], reds = 'auto') def diagonal_simple(space, arr, out, offset, axis1, axis2, size): diff --git a/pypy/module/micronumpy/stdobjspace.py b/pypy/module/micronumpy/stdobjspace.py deleted file mode 100644 --- a/pypy/module/micronumpy/stdobjspace.py +++ /dev/null @@ -1,11 +0,0 @@ - -from pypy.objspace.std import stringobject -from pypy.module.micronumpy import interp_boxes - -def delegate_stringbox2stringobj(space, w_box): - return space.wrap(w_box.dtype.itemtype.to_str(w_box)) - -def register_delegates(typeorder): - typeorder[interp_boxes.W_StringBox] = [ - (stringobject.W_StringObject, delegate_stringbox2stringobj), - ] diff --git a/pypy/module/micronumpy/test/test_complex.py b/pypy/module/micronumpy/test/test_complex.py --- a/pypy/module/micronumpy/test/test_complex.py +++ b/pypy/module/micronumpy/test/test_complex.py @@ -400,6 +400,7 @@ assert conj is conjugate assert conj(c0) == c0 + assert c0.conjugate() == c0 assert conj(c1) == complex(1, -2) assert conj(1) == 1 assert conj(-3) == -3 @@ -625,6 +626,8 @@ a = array([1 + 2j, 1 - 2j]) assert (a.conj() == [1 - 2j, 1 + 2j]).all() + a = array([1,2,3.4J],dtype=complex) + assert a[2].conjugate() == 0-3.4j def test_math(self): if self.isWindows: diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -740,6 +740,7 @@ class AppTestStrUnicodeDtypes(BaseNumpyAppTest): def test_str_unicode(self): + skip('numpypy differs from numpy') from numpypy import str_, unicode_, character, flexible, generic assert str_.mro() == [str_, str, basestring, character, flexible, generic, object] diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -2755,6 +2755,19 @@ assert a[2] == 'ab' raises(TypeError, a, 'sum') raises(TypeError, 'a+a') + b = array(['abcdefg', 'ab', 'cd']) + assert a[2] == b[1] + assert bool(a[1]) + c = array(['ab','cdefg','hi','jk']) + # not implemented yet + #c[0] += c[3] + #assert c[0] == 'abjk' + + def test_to_str(self): + from numpypy import array + a = array(['abc','abc', 'def', 'ab'], 'S3') + b = array(['mnopqr','abcdef', 'ab', 'cd']) + assert b[1] != a[1] def test_string_scalar(self): from numpypy import array @@ -2766,8 +2779,7 @@ assert str(a.dtype) == '|S1' a = array('x', dtype='c') assert str(a.dtype) == '|S1' - # XXX can sort flexible types, why not comparison? - #assert a == 'x' + assert a == 'x' def test_flexible_repr(self): from numpypy import array diff --git a/pypy/module/micronumpy/tool/numready/main.py b/pypy/module/micronumpy/tool/numready/main.py --- a/pypy/module/micronumpy/tool/numready/main.py +++ b/pypy/module/micronumpy/tool/numready/main.py @@ -78,6 +78,11 @@ items.add(Item(name, kind, subitems)) return items +def get_version_str(python): + args = [python, '-c', 'import sys; print sys.version'] + lines = subprocess.check_output(args).splitlines() + return lines[0] + def split(lst): SPLIT = 5 lgt = len(lst) // SPLIT + 1 @@ -93,6 +98,7 @@ def main(argv): cpy_items = find_numpy_items("/usr/bin/python") pypy_items = find_numpy_items(argv[1], "numpypy") + ver = get_version_str(argv[1]) all_items = [] msg = "{:d}/{:d} names".format(len(pypy_items), len(cpy_items)) + " " @@ -113,7 +119,8 @@ env = jinja2.Environment( loader=jinja2.FileSystemLoader(os.path.dirname(__file__)) ) - html = env.get_template("page.html").render(all_items=split(sorted(all_items)), msg=msg) + html = env.get_template("page.html").render(all_items=split(sorted(all_items)), + msg=msg, ver=ver) if len(argv) > 2: with open(argv[2], 'w') as f: f.write(html.encode("utf-8")) diff --git a/pypy/module/micronumpy/tool/numready/page.html b/pypy/module/micronumpy/tool/numready/page.html --- a/pypy/module/micronumpy/tool/numready/page.html +++ b/pypy/module/micronumpy/tool/numready/page.html @@ -34,6 +34,7 @@

NumPyPy Status

+

Version: {{ ver }}

Overall: {{ msg }}

diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -1689,6 +1689,22 @@ def get_size(self): return self.size +def str_unary_op(func): + specialize.argtype(1)(func) + @functools.wraps(func) + def dispatcher(self, v1): + return func(self, self.to_str(v1)) + return dispatcher + +def str_binary_op(func): + specialize.argtype(1, 2)(func) + @functools.wraps(func) + def dispatcher(self, v1, v2): + return func(self, + self.to_str(v1), + self.to_str(v2) + ) + return dispatcher class StringType(BaseType, BaseStringType): T = lltype.Char @@ -1696,6 +1712,8 @@ @jit.unroll_safe def coerce(self, space, dtype, w_item): from pypy.module.micronumpy.interp_dtype import new_string_dtype + if isinstance(w_item, interp_boxes.W_StringBox): + return w_item arg = space.str_w(space.str(w_item)) arr = VoidBoxStorage(len(arg), new_string_dtype(space, len(arg))) for i in range(len(arg)): @@ -1705,6 +1723,7 @@ @jit.unroll_safe def store(self, arr, i, offset, box): assert isinstance(box, interp_boxes.W_StringBox) + # XXX simplify to range(box.dtype.get_size()) ? for k in range(min(self.size, box.arr.size-offset)): arr.storage[k + i] = box.arr.storage[k + offset] @@ -1718,7 +1737,7 @@ builder = StringBuilder() assert isinstance(item, interp_boxes.W_StringBox) i = item.ofs - end = i+self.size + end = i + item.dtype.get_size() while i < end: assert isinstance(item.arr.storage[i], str) if item.arr.storage[i] == '\x00': @@ -1734,10 +1753,53 @@ builder.append("'") return builder.build() - # XXX move to base class when UnicodeType is supported + # XXX move the rest of this to base class when UnicodeType is supported def to_builtin_type(self, space, box): return space.wrap(self.to_str(box)) + @str_binary_op + def eq(self, v1, v2): + return v1 == v2 + + @str_binary_op + def ne(self, v1, v2): + return v1 != v2 + + @str_binary_op + def lt(self, v1, v2): + return v1 < v2 + + @str_binary_op + def le(self, v1, v2): + return v1 <= v2 + + @str_binary_op + def gt(self, v1, v2): + return v1 > v2 + + @str_binary_op + def ge(self, v1, v2): + return v1 >= v2 + + @str_binary_op + def logical_and(self, v1, v2): + return bool(v1) and bool(v2) + + @str_binary_op + def logical_or(self, v1, v2): + return bool(v1) or bool(v2) + + @str_unary_op + def logical_not(self, v): + return not bool(v) + + @str_binary_op + def logical_xor(self, v1, v2): + return bool(v1) ^ bool(v2) + + def bool(self, v): + return bool(self.to_str(v)) + def build_and_convert(self, space, mydtype, box): assert isinstance(box, interp_boxes.W_GenericBox) if box.get_dtype(space).is_str_or_unicode(): @@ -1753,6 +1815,13 @@ arr.storage[j] = '\x00' return interp_boxes.W_StringBox(arr, 0, arr.dtype) +NonNativeStringType = StringType + +class UnicodeType(BaseType, BaseStringType): + T = lltype.UniChar + +NonNativeUnicodeType = UnicodeType + class VoidType(BaseType, BaseStringType): T = lltype.Char @@ -1798,12 +1867,6 @@ return W_NDimArray(implementation) NonNativeVoidType = VoidType -NonNativeStringType = StringType - -class UnicodeType(BaseType, BaseStringType): - T = lltype.UniChar - -NonNativeUnicodeType = UnicodeType class RecordType(BaseType): diff --git a/pypy/module/pypyjit/interp_jit.py b/pypy/module/pypyjit/interp_jit.py --- a/pypy/module/pypyjit/interp_jit.py +++ b/pypy/module/pypyjit/interp_jit.py @@ -12,18 +12,18 @@ from pypy.interpreter.error import OperationError, operationerrfmt from pypy.interpreter.pycode import PyCode, CO_GENERATOR from pypy.interpreter.pyframe import PyFrame -from pypy.interpreter.pyopcode import ExitFrame +from pypy.interpreter.pyopcode import ExitFrame, Yield from opcode import opmap -PyFrame._virtualizable2_ = ['last_instr', 'pycode', - 'valuestackdepth', 'locals_stack_w[*]', - 'cells[*]', - 'last_exception', - 'lastblock', - 'is_being_profiled', - 'w_globals', - 'w_f_trace', - ] +PyFrame._virtualizable_ = ['last_instr', 'pycode', + 'valuestackdepth', 'locals_stack_w[*]', + 'cells[*]', + 'last_exception', + 'lastblock', + 'is_being_profiled', + 'w_globals', + 'w_f_trace', + ] JUMP_ABSOLUTE = opmap['JUMP_ABSOLUTE'] @@ -73,7 +73,13 @@ self.valuestackdepth = hint(self.valuestackdepth, promote=True) next_instr = self.handle_bytecode(co_code, next_instr, ec) is_being_profiled = self.is_being_profiled + except Yield: + self.last_exception = None + w_result = self.popvalue() + jit.hint(self, force_virtualizable=True) + return w_result except ExitFrame: + self.last_exception = None return self.popvalue() def jump_absolute(self, jumpto, ec): diff --git a/pypy/module/pypyjit/test_pypy_c/test_containers.py b/pypy/module/pypyjit/test_pypy_c/test_containers.py --- a/pypy/module/pypyjit/test_pypy_c/test_containers.py +++ b/pypy/module/pypyjit/test_pypy_c/test_containers.py @@ -87,7 +87,7 @@ i8 = int_lt(i5, i7) guard_true(i8, descr=...) guard_not_invalidated(descr=...) - p10 = call(ConstClass(ll_int_str), i5, descr=) + p10 = call(ConstClass(ll_str__IntegerR_SignedConst_Signed), i5, descr=) guard_no_exception(descr=...) i12 = call(ConstClass(ll_strhash), p10, descr=) p13 = new(descr=...) @@ -209,6 +209,22 @@ opnames = log.opnames(loop.allops()) assert opnames.count('new_with_vtable') == 0 + def test_constfold_tuple(self): + code = """if 1: + tup = tuple(range(10000)) + l = [1, 2, 3, 4, 5, 6, "a"] + def main(n): + while n > 0: + sub = tup[1] # ID: getitem + l[1] = n # kill cache of tup[1] + n -= sub + """ + log = self.run(code, [1000]) + loop, = log.loops_by_filename(self.filepath) + ops = loop.ops_by_id('getitem', include_guard_not_invalidated=False) + assert log.opnames(ops) == [] + + def test_specialised_tuple(self): def main(n): import pypyjit diff --git a/pypy/module/pypyjit/test_pypy_c/test_generators.py b/pypy/module/pypyjit/test_pypy_c/test_generators.py --- a/pypy/module/pypyjit/test_pypy_c/test_generators.py +++ b/pypy/module/pypyjit/test_pypy_c/test_generators.py @@ -19,6 +19,7 @@ log = self.run(main, [500]) loop, = log.loops_by_filename(self.filepath) assert loop.match_by_id("generator", """ + cond_call(..., descr=...) i16 = force_token() p45 = new_with_vtable(ConstClass(W_IntObject)) setfield_gc(p45, i29, descr=) diff --git a/pypy/module/sys/version.py b/pypy/module/sys/version.py --- a/pypy/module/sys/version.py +++ b/pypy/module/sys/version.py @@ -11,7 +11,7 @@ #XXX # sync CPYTHON_VERSION with patchlevel.h, package.py CPYTHON_API_VERSION = 1013 #XXX # sync with include/modsupport.h -PYPY_VERSION = (2, 1, 0, "alpha", 0) #XXX # sync patchlevel.h +PYPY_VERSION = (2, 2, 0, "alpha", 0) #XXX # sync patchlevel.h if platform.name == 'msvc': COMPILER_INFO = 'MSC v.%d 32 bit' % (platform.version * 10 + 600) diff --git a/pypy/module/test_lib_pypy/test_curses.py b/pypy/module/test_lib_pypy/test_curses.py new file mode 100644 --- /dev/null +++ b/pypy/module/test_lib_pypy/test_curses.py @@ -0,0 +1,57 @@ +import pytest + +# Check that lib_pypy.cffi finds the correct version of _cffi_backend. +# Otherwise, the test is skipped. It should never be skipped when run +# with "pypy py.test -A". +try: + from lib_pypy import cffi; cffi.FFI() +except (ImportError, AssertionError), e: + pytest.skip("no cffi module or wrong version (%s)" % (e,)) + +from lib_pypy import _curses + + +lib = _curses.lib + + +def test_color_content(monkeypatch): + def lib_color_content(color, r, g, b): + r[0], g[0], b[0] = 42, 43, 44 + return lib.OK + + monkeypatch.setattr(_curses, '_ensure_initialised_color', lambda: None) + monkeypatch.setattr(lib, 'color_content', lib_color_content) + + assert _curses.color_content(None) == (42, 43, 44) + + +def test_setupterm(monkeypatch): + def make_setupterm(err_no): + def lib_setupterm(term, fd, err): + err[0] = err_no + + return lib.ERR + + return lib_setupterm + + monkeypatch.setattr(_curses, '_initialised_setupterm', False) + monkeypatch.setattr(lib, 'setupterm', make_setupterm(0)) + + with pytest.raises(Exception) as exc_info: + _curses.setupterm() + + assert "could not find terminal" in exc_info.value.args[0] + + monkeypatch.setattr(lib, 'setupterm', make_setupterm(-1)) + + with pytest.raises(Exception) as exc_info: + _curses.setupterm() + + assert "could not find terminfo database" in exc_info.value.args[0] + + monkeypatch.setattr(lib, 'setupterm', make_setupterm(42)) + + with pytest.raises(Exception) as exc_info: + _curses.setupterm() + + assert "unknown error" in exc_info.value.args[0] diff --git a/pypy/module/test_lib_pypy/test_sqlite3.py b/pypy/module/test_lib_pypy/test_sqlite3.py --- a/pypy/module/test_lib_pypy/test_sqlite3.py +++ b/pypy/module/test_lib_pypy/test_sqlite3.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- """Tests for _sqlite3.py""" import pytest, sys @@ -222,3 +223,8 @@ cur.execute("create table test(a)") cur.executemany("insert into test values (?)", [[1], [2], [3]]) assert cur.lastrowid is None + +def test_issue1573(con): + cur = con.cursor() + cur.execute(u'SELECT 1 as méil') + assert cur.description[0][0] == u"méil".encode('utf-8') diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -702,11 +702,13 @@ find_jmp = jit.JitDriver(greens = [], reds = 'auto', name = 'list.find') class ListStrategy(object): - sizehint = -1 def __init__(self, space): self.space = space + def get_sizehint(self): + return -1 + def init_from_list_w(self, w_list, list_w): raise NotImplementedError @@ -894,7 +896,7 @@ else: strategy = self.space.fromcache(ObjectListStrategy) - storage = strategy.get_empty_storage(self.sizehint) + storage = strategy.get_empty_storage(self.get_sizehint()) w_list.strategy = strategy w_list.lstorage = storage @@ -974,6 +976,9 @@ self.sizehint = sizehint ListStrategy.__init__(self, space) + def get_sizehint(self): + return self.sizehint + def _resize_hint(self, w_list, hint): assert hint >= 0 self.sizehint = hint diff --git a/pypy/objspace/std/model.py b/pypy/objspace/std/model.py --- a/pypy/objspace/std/model.py +++ b/pypy/objspace/std/model.py @@ -133,10 +133,6 @@ # when trying to dispatch multimethods. # XXX build these lists a bit more automatically later - if config.objspace.usemodules.micronumpy: - from pypy.module.micronumpy.stdobjspace import register_delegates - register_delegates(self.typeorder) - self.typeorder[boolobject.W_BoolObject] += [ (intobject.W_IntObject, boolobject.delegate_Bool2IntObject), (floatobject.W_FloatObject, floatobject.delegate_Bool2Float), diff --git a/pypy/tool/pypyjit_child.py b/pypy/tool/pypyjit_child.py --- a/pypy/tool/pypyjit_child.py +++ b/pypy/tool/pypyjit_child.py @@ -14,9 +14,9 @@ return lltype.nullptr(T) interp.heap.malloc_nonmovable = returns_null # XXX - from rpython.jit.backend.llgraph.runner import LLtypeCPU + from rpython.jit.backend.llgraph.runner import LLGraphCPU #LLtypeCPU.supports_floats = False # for now - apply_jit(interp, graph, LLtypeCPU) + apply_jit(interp, graph, LLGraphCPU) def apply_jit(interp, graph, CPUClass): diff --git a/pypy/tool/release/force-builds.py b/pypy/tool/release/force-builds.py --- a/pypy/tool/release/force-builds.py +++ b/pypy/tool/release/force-builds.py @@ -19,6 +19,7 @@ BUILDERS = [ 'own-linux-x86-32', 'own-linux-x86-64', + 'own-linux-armhf', # 'own-macosx-x86-32', # 'pypy-c-app-level-linux-x86-32', # 'pypy-c-app-level-linux-x86-64', diff --git a/pypy/tool/release/package.py b/pypy/tool/release/package.py --- a/pypy/tool/release/package.py +++ b/pypy/tool/release/package.py @@ -3,7 +3,7 @@ It uses 'pypy/goal/pypy-c' and parts of the rest of the working copy. Usage: - package.py root-pypy-dir [--nostrip] [--without-tk] [name-of-archive] [name-of-pypy-c] [destination-for-tarball] [pypy-c-path] + package.py [--nostrip] [--without-tk] root-pypy-dir [name-of-archive] [name-of-pypy-c] [destination-for-tarball] [pypy-c-path] Usually you would do: package.py ../../.. pypy-VER-PLATFORM The output is found in the directory /tmp/usession-YOURNAME/build/. diff --git a/rpython/annotator/test/test_annrpython.py b/rpython/annotator/test/test_annrpython.py --- a/rpython/annotator/test/test_annrpython.py +++ b/rpython/annotator/test/test_annrpython.py @@ -3088,7 +3088,7 @@ from rpython.rlib.jit import hint class A: - _virtualizable2_ = [] + _virtualizable_ = [] class B(A): def meth(self): return self @@ -3128,7 +3128,7 @@ from rpython.rlib.jit import hint class A: - _virtualizable2_ = [] + _virtualizable_ = [] class I: pass diff --git a/rpython/jit/backend/arm/opassembler.py b/rpython/jit/backend/arm/opassembler.py --- a/rpython/jit/backend/arm/opassembler.py +++ b/rpython/jit/backend/arm/opassembler.py @@ -959,16 +959,6 @@ pmc.B_offs(self.mc.currpos(), c.EQ) return pos - def _call_assembler_reset_vtoken(self, jd, vloc): - from rpython.jit.backend.llsupport.descr import FieldDescr - fielddescr = jd.vable_token_descr - assert isinstance(fielddescr, FieldDescr) - ofs = fielddescr.offset - tmploc = self._regalloc.get_scratch_reg(INT) - self.mov_loc_loc(vloc, r.ip) - self.mc.MOV_ri(tmploc.value, 0) - self.mc.STR_ri(tmploc.value, r.ip.value, ofs) - def _call_assembler_load_result(self, op, result_loc): if op.result is not None: # load the return value from (tmploc, 0) diff --git a/rpython/jit/backend/llgraph/runner.py b/rpython/jit/backend/llgraph/runner.py --- a/rpython/jit/backend/llgraph/runner.py +++ b/rpython/jit/backend/llgraph/runner.py @@ -101,6 +101,9 @@ self.fieldname = fieldname self.FIELD = getattr(S, fieldname) + def get_vinfo(self): + return self.vinfo + def __repr__(self): return 'FieldDescr(%r, %r)' % (self.S, self.fieldname) @@ -170,7 +173,7 @@ translate_support_code = False is_llgraph = True - def __init__(self, rtyper, stats=None, *ignored_args, **ignored_kwds): + def __init__(self, rtyper, stats=None, *ignored_args, **kwds): model.AbstractCPU.__init__(self) self.rtyper = rtyper self.llinterp = LLInterpreter(rtyper) @@ -178,6 +181,7 @@ class MiniStats: pass self.stats = stats or MiniStats() + self.vinfo_for_tests = kwds.get('vinfo_for_tests', None) def compile_loop(self, inputargs, operations, looptoken, log=True, name=''): clt = model.CompiledLoopToken(self, looptoken.number) @@ -316,6 +320,8 @@ except KeyError: descr = FieldDescr(S, fieldname) self.descrs[key] = descr + if self.vinfo_for_tests is not None: + descr.vinfo = self.vinfo_for_tests return descr def arraydescrof(self, A): @@ -600,6 +606,7 @@ forced_deadframe = None overflow_flag = False last_exception = None + force_guard_op = None def __init__(self, cpu, argboxes, args): self.env = {} @@ -766,6 +773,8 @@ if self.forced_deadframe is not None: saved_data = self.forced_deadframe._saved_data self.fail_guard(descr, saved_data) + self.force_guard_op = self.current_op + execute_guard_not_forced_2 = execute_guard_not_forced def execute_guard_not_invalidated(self, descr): if self.lltrace.invalid: @@ -887,7 +896,6 @@ # res = CALL assembler_call_helper(pframe) # jmp @done # @fastpath: - # RESET_VABLE # res = GETFIELD(pframe, 'result') # @done: # @@ -907,25 +915,17 @@ vable = lltype.nullptr(llmemory.GCREF.TO) # # Emulate the fast path - def reset_vable(jd, vable): - if jd.index_of_virtualizable != -1: - fielddescr = jd.vable_token_descr - NULL = lltype.nullptr(llmemory.GCREF.TO) - self.cpu.bh_setfield_gc(vable, NULL, fielddescr) + # faildescr = self.cpu.get_latest_descr(pframe) if faildescr == self.cpu.done_with_this_frame_descr_int: - reset_vable(jd, vable) return self.cpu.get_int_value(pframe, 0) elif faildescr == self.cpu.done_with_this_frame_descr_ref: - reset_vable(jd, vable) return self.cpu.get_ref_value(pframe, 0) elif faildescr == self.cpu.done_with_this_frame_descr_float: - reset_vable(jd, vable) return self.cpu.get_float_value(pframe, 0) elif faildescr == self.cpu.done_with_this_frame_descr_void: - reset_vable(jd, vable) return None - # + assembler_helper_ptr = jd.assembler_helper_adr.ptr # fish try: result = assembler_helper_ptr(pframe, vable) diff --git a/rpython/jit/backend/llsupport/assembler.py b/rpython/jit/backend/llsupport/assembler.py --- a/rpython/jit/backend/llsupport/assembler.py +++ b/rpython/jit/backend/llsupport/assembler.py @@ -2,7 +2,7 @@ from rpython.jit.backend.llsupport.memcpy import memcpy_fn from rpython.jit.backend.llsupport.symbolic import WORD from rpython.jit.metainterp.history import (INT, REF, FLOAT, JitCellToken, - ConstInt, BoxInt) + ConstInt, BoxInt, AbstractFailDescr) from rpython.jit.metainterp.resoperation import ResOperation, rop from rpython.rlib import rgc from rpython.rlib.debug import (debug_start, debug_stop, have_debug_prints, @@ -24,6 +24,7 @@ class GuardToken(object): def __init__(self, cpu, gcmap, faildescr, failargs, fail_locs, exc, frame_depth, is_guard_not_invalidated, is_guard_not_forced): + assert isinstance(faildescr, AbstractFailDescr) self.cpu = cpu self.faildescr = faildescr self.failargs = failargs @@ -232,11 +233,8 @@ jmp_location = self._call_assembler_patch_je(result_loc, je_location) - # Path B: fast path. Must load the return value, and reset the token + # Path B: fast path. Must load the return value - # Reset the vable token --- XXX really too much special logic here:-( - if jd.index_of_virtualizable >= 0: - self._call_assembler_reset_vtoken(jd, vloc) # self._call_assembler_load_result(op, result_loc) # diff --git a/rpython/jit/backend/llsupport/descr.py b/rpython/jit/backend/llsupport/descr.py --- a/rpython/jit/backend/llsupport/descr.py +++ b/rpython/jit/backend/llsupport/descr.py @@ -76,7 +76,13 @@ FLAG_STRUCT = 'X' FLAG_VOID = 'V' -class FieldDescr(AbstractDescr): +class ArrayOrFieldDescr(AbstractDescr): + vinfo = None + + def get_vinfo(self): + return self.vinfo + +class FieldDescr(ArrayOrFieldDescr): name = '' offset = 0 # help translation field_size = 0 @@ -150,12 +156,13 @@ # ____________________________________________________________ # ArrayDescrs -class ArrayDescr(AbstractDescr): +class ArrayDescr(ArrayOrFieldDescr): tid = 0 basesize = 0 # workaround for the annotator itemsize = 0 lendescr = None flag = '\x00' + vinfo = None def __init__(self, basesize, itemsize, lendescr, flag): self.basesize = basesize diff --git a/rpython/jit/backend/llsupport/test/test_gc_integration.py b/rpython/jit/backend/llsupport/test/test_gc_integration.py --- a/rpython/jit/backend/llsupport/test/test_gc_integration.py +++ b/rpython/jit/backend/llsupport/test/test_gc_integration.py @@ -88,7 +88,7 @@ else: assert nos == [0, 1, 25] elif self.cpu.backend_name.startswith('arm'): - assert nos == [9, 10, 47] + assert nos == [0, 1, 47] else: raise Exception("write the data here") assert frame.jf_frame[nos[0]] diff --git a/rpython/jit/backend/llsupport/test/ztranslation_test.py b/rpython/jit/backend/llsupport/test/ztranslation_test.py --- a/rpython/jit/backend/llsupport/test/ztranslation_test.py +++ b/rpython/jit/backend/llsupport/test/ztranslation_test.py @@ -23,7 +23,7 @@ # - floats neg and abs class Frame(object): - _virtualizable2_ = ['i'] + _virtualizable_ = ['i'] def __init__(self, i): self.i = i @@ -98,7 +98,7 @@ self.val = val class Frame(object): - _virtualizable2_ = ['thing'] + _virtualizable_ = ['thing'] driver = JitDriver(greens = ['codeno'], reds = ['i', 'frame'], virtualizables = ['frame'], diff --git a/rpython/jit/backend/model.py b/rpython/jit/backend/model.py --- a/rpython/jit/backend/model.py +++ b/rpython/jit/backend/model.py @@ -6,7 +6,7 @@ total_compiled_loops = 0 total_compiled_bridges = 0 total_freed_loops = 0 - total_freed_bridges = 0 + total_freed_bridges = 0 # for heaptracker # _all_size_descrs_with_vtable = None @@ -182,7 +182,7 @@ def arraydescrof(self, A): raise NotImplementedError - def calldescrof(self, FUNC, ARGS, RESULT): + def calldescrof(self, FUNC, ARGS, RESULT, extrainfo): # FUNC is the original function type, but ARGS is a list of types # with Voids removed raise NotImplementedError @@ -294,7 +294,6 @@ def bh_copyunicodecontent(self, src, dst, srcstart, dststart, length): raise NotImplementedError - class CompiledLoopToken(object): asmmemmgr_blocks = None asmmemmgr_gcroots = 0 diff --git a/rpython/jit/backend/test/support.py b/rpython/jit/backend/test/support.py --- a/rpython/jit/backend/test/support.py +++ b/rpython/jit/backend/test/support.py @@ -59,7 +59,7 @@ t.buildannotator().build_types(function, [int] * len(args), main_entry_point=True) - t.buildrtyper(type_system=self.type_system).specialize() + t.buildrtyper().specialize() warmrunnerdesc = WarmRunnerDesc(t, translate_support_code=True, CPUClass=self.CPUClass, **kwds) diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -79,6 +79,7 @@ allblocks) self.target_tokens_currently_compiling = {} self.frame_depth_to_patch = [] + self._finish_gcmap = lltype.nullptr(jitframe.GCMAP) def teardown(self): self.pending_guard_tokens = None @@ -1846,7 +1847,11 @@ self.mov(fail_descr_loc, RawEbpLoc(ofs)) arglist = op.getarglist() if arglist and arglist[0].type == REF: - gcmap = self.gcmap_for_finish + if self._finish_gcmap: + self._finish_gcmap[0] |= r_uint(1) # rax + gcmap = self._finish_gcmap + else: + gcmap = self.gcmap_for_finish self.push_gcmap(self.mc, gcmap, store=True) else: # note that the 0 here is redundant, but I would rather @@ -1991,15 +1996,6 @@ # return jmp_location - def _call_assembler_reset_vtoken(self, jd, vloc): - from rpython.jit.backend.llsupport.descr import FieldDescr - fielddescr = jd.vable_token_descr - assert isinstance(fielddescr, FieldDescr) - vtoken_ofs = fielddescr.offset - self.mc.MOV(edx, vloc) # we know vloc is on the current frame - self.mc.MOV_mi((edx.value, vtoken_ofs), 0) - # in the line above, TOKEN_NONE = 0 - def _call_assembler_load_result(self, op, result_loc): if op.result is not None: # load the return value from the dead frame's value index 0 @@ -2326,6 +2322,15 @@ assert 0 < offset <= 127 self.mc.overwrite(jmp_location - 1, chr(offset)) + def store_force_descr(self, op, fail_locs, frame_depth): + guard_token = self.implement_guard_recovery(op.opnum, + op.getdescr(), + op.getfailargs(), + fail_locs, frame_depth) + self._finish_gcmap = guard_token.gcmap + self._store_force_index(op) + self.store_info_on_descr(0, guard_token) + def force_token(self, reg): # XXX kill me assert isinstance(reg, RegLoc) diff --git a/rpython/jit/backend/x86/regalloc.py b/rpython/jit/backend/x86/regalloc.py --- a/rpython/jit/backend/x86/regalloc.py +++ b/rpython/jit/backend/x86/regalloc.py @@ -22,7 +22,7 @@ from rpython.jit.codewriter.effectinfo import EffectInfo from rpython.jit.metainterp.history import (Box, Const, ConstInt, ConstPtr, ConstFloat, BoxInt, BoxFloat, INT, REF, FLOAT, TargetToken) -from rpython.jit.metainterp.resoperation import rop +from rpython.jit.metainterp.resoperation import rop, ResOperation from rpython.rlib import rgc from rpython.rlib.objectmodel import we_are_translated from rpython.rlib.rarithmetic import r_longlong, r_uint @@ -1332,6 +1332,13 @@ #if jump_op is not None and jump_op.getdescr() is descr: # self._compute_hint_frame_locations_from_descr(descr) + def consider_guard_not_forced_2(self, op): + self.rm.before_call(op.getfailargs(), save_all_regs=True) + fail_locs = [self.loc(v) for v in op.getfailargs()] + self.assembler.store_force_descr(op, fail_locs, + self.fm.get_frame_depth()) + self.possibly_free_vars(op.getfailargs()) + def consider_keepalive(self, op): pass diff --git a/rpython/jit/codewriter/codewriter.py b/rpython/jit/codewriter/codewriter.py --- a/rpython/jit/codewriter/codewriter.py +++ b/rpython/jit/codewriter/codewriter.py @@ -21,9 +21,9 @@ self.callcontrol = CallControl(cpu, jitdrivers_sd) self._seen_files = set() - def transform_func_to_jitcode(self, func, values, type_system='lltype'): + def transform_func_to_jitcode(self, func, values): """For testing.""" - rtyper = support.annotate(func, values, type_system=type_system) + rtyper = support.annotate(func, values) graph = rtyper.annotator.translator.graphs[0] jitcode = JitCode("test") self.transform_graph_to_jitcode(graph, jitcode, True) diff --git a/rpython/jit/codewriter/effectinfo.py b/rpython/jit/codewriter/effectinfo.py --- a/rpython/jit/codewriter/effectinfo.py +++ b/rpython/jit/codewriter/effectinfo.py @@ -83,6 +83,7 @@ OS_UNI_COPY_TO_RAW = 113 OS_JIT_FORCE_VIRTUAL = 120 + OS_JIT_FORCE_VIRTUALIZABLE = 121 # for debugging: _OS_CANRAISE = set([ @@ -165,6 +166,9 @@ EffectInfo.MOST_GENERAL = EffectInfo(None, None, None, None, EffectInfo.EF_RANDOM_EFFECTS, can_invalidate=True) +EffectInfo.LEAST_GENERAL = EffectInfo([], [], [], [], + EffectInfo.EF_ELIDABLE_CANNOT_RAISE, + can_invalidate=False) def effectinfo_from_writeanalyze(effects, cpu, diff --git a/rpython/jit/codewriter/jtransform.py b/rpython/jit/codewriter/jtransform.py --- a/rpython/jit/codewriter/jtransform.py +++ b/rpython/jit/codewriter/jtransform.py @@ -521,6 +521,8 @@ op1 = SpaceOperation('str_guard_value', [op.args[0], c, descr], op.result) return [SpaceOperation('-live-', [], None), op1, None] + if hints.get('force_virtualizable'): + return SpaceOperation('hint_force_virtualizable', [op.args[0]], None) else: log.WARNING('ignoring hint %r at %r' % (hints, self.graph)) diff --git a/rpython/jit/codewriter/support.py b/rpython/jit/codewriter/support.py --- a/rpython/jit/codewriter/support.py +++ b/rpython/jit/codewriter/support.py @@ -35,7 +35,7 @@ return a.typeannotation(t) def annotate(func, values, inline=None, backendoptimize=True, - type_system="lltype", translationoptions={}): From noreply at buildbot.pypy.org Wed Aug 7 10:47:36 2013 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 7 Aug 2013 10:47:36 +0200 (CEST) Subject: [pypy-commit] pypy default: Add some extra prints in the test Message-ID: <20130807084736.06F101C073E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r65990:7ceed94ec733 Date: 2013-08-07 10:47 +0200 http://bitbucket.org/pypy/pypy/changeset/7ceed94ec733/ Log: Add some extra prints in the test diff --git a/pypy/module/__pypy__/test/test_signal.py b/pypy/module/__pypy__/test/test_signal.py --- a/pypy/module/__pypy__/test/test_signal.py +++ b/pypy/module/__pypy__/test/test_signal.py @@ -37,6 +37,7 @@ except BaseException, e: interrupted.append(e) finally: + print 'subthread stops, interrupted=%r' % (interrupted,) done.append(None) # This is normally called by app_main.py @@ -52,11 +53,13 @@ try: done = [] interrupted = [] + print '--- start ---' thread.start_new_thread(subthread, ()) for j in range(10): if len(done): break print '.' time.sleep(0.1) + print 'main thread loop done' assert len(done) == 1 assert len(interrupted) == 1 assert 'KeyboardInterrupt' in interrupted[0].__class__.__name__ From noreply at buildbot.pypy.org Wed Aug 7 12:03:41 2013 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 7 Aug 2013 12:03:41 +0200 (CEST) Subject: [pypy-commit] cffi default: In this case, we must call backend.load_library() with a path Message-ID: <20130807100341.DB3B21C300E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1344:b04025bcffc5 Date: 2013-08-07 12:03 +0200 http://bitbucket.org/cffi/cffi/changeset/b04025bcffc5/ Log: In this case, we must call backend.load_library() with a path that contains a '/'. diff --git a/cffi/vengine_gen.py b/cffi/vengine_gen.py --- a/cffi/vengine_gen.py +++ b/cffi/vengine_gen.py @@ -61,7 +61,9 @@ def load_library(self): # import it with the CFFI backend backend = self.ffi._backend - module = backend.load_library(self.verifier.modulefilename) + # needs to make a path that contains '/', on Posix + filename = os.path.join(os.curdir, self.verifier.modulefilename) + module = backend.load_library(filename) # # call loading_gen_struct() to get the struct layout inferred by # the C compiler From noreply at buildbot.pypy.org Wed Aug 7 15:19:19 2013 From: noreply at buildbot.pypy.org (stian) Date: Wed, 7 Aug 2013 15:19:19 +0200 (CEST) Subject: [pypy-commit] pypy bigint-with-int: Fix one test, turn off the fast lshift/rshift to avoid potensial rpython errors intead of wrong results. Message-ID: <20130807131919.E3FAA1C1067@cobra.cs.uni-duesseldorf.de> Author: stian Branch: bigint-with-int Changeset: r65992:e94966391252 Date: 2013-08-06 01:10 +0200 http://bitbucket.org/pypy/pypy/changeset/e94966391252/ Log: Fix one test, turn off the fast lshift/rshift to avoid potensial rpython errors intead of wrong results. diff --git a/rpython/rlib/rbigint.py b/rpython/rlib/rbigint.py --- a/rpython/rlib/rbigint.py +++ b/rpython/rlib/rbigint.py @@ -918,13 +918,13 @@ if w == 0: raise ZeroDivisionError("long division or modulo by zero") - digit = abs(w) wsign = (-1 if w < 0 else 1) - if digit >= MASK or not (digit > 0) or v.sign != wsign: + if w == MIN_VALUE or v.sign != wsign: # Divrem1 doesn't deal with the sign difference. Instead of having yet another copy, # Just fallback. return v.divmod(rbigint.fromint(w)) + digit = abs(w) assert digit > 0 div, mod = _divrem1(v, digit) @@ -1450,13 +1450,15 @@ size_a = a.numdigits() + digit = abs(b) # Not cast to unsigned just yet. + sign = 1 if size_a == 1: adigit = a.digit(0) - if adigit == b: + if adigit == digit: return NULLRBIGINT - elif adigit > b: - return rbigint.fromint(adigit - b) + elif adigit < digit: + return rbigint.fromint(adigit - digit) z = rbigint([NULLDIGIT] * size_a, sign, size_a) borrow = UDIGIT_TYPE(0) @@ -1464,7 +1466,7 @@ # The following assumes unsigned arithmetic # works modulo 2**N for some N>SHIFT. - borrow = a.udigit(0) - UDIGIT_TYPE(abs(b)) + borrow = a.udigit(0) - UDIGIT_TYPE(digit) z.setdigit(0, borrow) borrow >>= SHIFT while i < size_a: @@ -1572,8 +1574,8 @@ Returns the absolute value of the product, or None if error. """ - if digit & (digit - 1) == 0: - return a.lqshift(ptwotable[digit]) + #if digit & (digit - 1) == 0: + # return a.lqshift(ptwotable[digit]) return _muladd1(a, digit) From noreply at buildbot.pypy.org Wed Aug 7 15:19:18 2013 From: noreply at buildbot.pypy.org (stian) Date: Wed, 7 Aug 2013 15:19:18 +0200 (CEST) Subject: [pypy-commit] pypy bigint-with-int: Some fixes, some debug code. Message-ID: <20130807131918.9DDA01C00F4@cobra.cs.uni-duesseldorf.de> Author: stian Branch: bigint-with-int Changeset: r65991:9fbd5a1f8450 Date: 2013-08-05 05:50 +0200 http://bitbucket.org/pypy/pypy/changeset/9fbd5a1f8450/ Log: Some fixes, some debug code. diff --git a/rpython/rlib/rbigint.py b/rpython/rlib/rbigint.py --- a/rpython/rlib/rbigint.py +++ b/rpython/rlib/rbigint.py @@ -42,6 +42,7 @@ LONG_TYPE = rffi.LONGLONG MASK = int((1 << SHIFT) - 1) +MIN_VALUE = int((-1 << SHIFT)) FLOAT_MULTIPLIER = float(1 << SHIFT) # Debugging digit array access. @@ -615,10 +616,18 @@ def int_add(self, other): if other == 0: return self - if self.sign == 0: + elif self.sign == 0: return rbigint.fromint(other) + elif other == MIN_VALUE: + # Fallback to long. + return self.add(rbigint.fromint(other)) + + digit = UDIGIT_TYPE(abs(other)) + + assert digit > 0 # Required. + if (self.sign > 0 and other > 0) or (self.sign < 0 and other < 0): - result = _x_int_add(self, abs(other)) + result = _x_int_add(self, digit) else: # XXX: Improve. result = _x_sub(rbigint.fromint(other), self) @@ -643,12 +652,17 @@ def int_sub(self, other): if other == 0: return self - if self.sign == 0: + elif self.sign == 0: return rbigint.fromint(-1 * other) + + if other == MIN_VALUE: + # Fallback to long. + return self.sub(rbigint.fromint(other)) + if (self.sign > 0 and other > 0) or (self.sign < 0 and other < 0): - result = _x_int_sub(self, abs(other)) + result = _x_int_sub(self, other) else: - result = _x_int_add(self, abs(other)) + result = _x_int_add(self, UDIGIT_TYPE(abs(other))) result.sign *= self.sign return result @@ -700,6 +714,11 @@ @jit.elidable def int_mul(self, b): """ Mul with int. """ + if b == MIN_VALUE: + # Fallback to long. + return self.mul(rbigint.fromint(b)) + + digit = _widen_digit(b) asize = self.numdigits() if self.sign == 0 or b == 0: @@ -711,7 +730,7 @@ elif self._digits[0] == ONEDIGIT: return rbigint.fromint(self.sign * b) - res = self.widedigit(0) * abs(b) + res = self.widedigit(0) * digit carry = res >> SHIFT if carry: return rbigint([_store_digit(res & MASK), _store_digit(carry)], self.sign * (-1 if b < 0 else 1), 2) @@ -719,7 +738,7 @@ return rbigint([_store_digit(res & MASK)], self.sign * (-1 if b < 0 else 1), 1) else: - result = _x_int_mul(self, abs(b)) + result = _x_int_mul(self, digit) result.sign = self.sign * (-1 if b < 0 else 1) return result @@ -741,9 +760,9 @@ digit = other.digit(0) if digit == 1: return self - elif digit and digit & (digit - 1) == 0: + """elif digit and digit & (digit - 1) == 0: return self.rshift(ptwotable[digit]) - + """ div, mod = _divrem(self, other) if mod.sign * other.sign == -1: if div.sign == 0: @@ -754,17 +773,21 @@ @jit.elidable def int_floordiv(self, other): - if other == 0: raise ZeroDivisionError("long division or modulo by zero") - + elif other == MIN_VALUE: + # Fallback to long. + return self.floordiv(rbigint.fromint(other)) + digit = abs(other) + assert digit > 0 + if self.sign == 1 and other > 0: if digit == 1: return self - elif digit and digit & (digit - 1) == 0: + """elif digit & (digit - 1) == 0: return self.rshift(ptwotable[digit]) - + """ div, mod = _divrem1(self, digit) if mod != 0 and self.sign * (-1 if other < 0 else 1) == -1: @@ -823,6 +846,9 @@ def int_mod(self, other): if self.sign == 0: return NULLRBIGINT + elif other == MIN_VALUE: + # Fallback to long. + self.mod(rbigint.fromint(other)) digit = abs(other) @@ -892,13 +918,16 @@ if w == 0: raise ZeroDivisionError("long division or modulo by zero") + digit = abs(w) wsign = (-1 if w < 0 else 1) - if v.sign != wsign: + if digit >= MASK or not (digit > 0) or v.sign != wsign: # Divrem1 doesn't deal with the sign difference. Instead of having yet another copy, # Just fallback. return v.divmod(rbigint.fromint(w)) - div, mod = _divrem1(v, abs(w)) + assert digit > 0 + + div, mod = _divrem1(v, digit) mod = rbigint.fromint(mod) mod.sign = wsign @@ -1353,14 +1382,11 @@ def _x_int_add(a, b): """ Add the absolute values of one bigint and one int. """ size_a = a.numdigits() - z = rbigint([NULLDIGIT] * (size_a + 1), 1) - i = UDIGIT_TYPE(0) - carry = a.udigit(0) + b z.setdigit(0, carry) carry >>= SHIFT - i += 1 + i = UDIGIT_TYPE(1) while i < size_a: carry += a.udigit(i) z.setdigit(i, carry) @@ -1412,6 +1438,9 @@ #borrow &= 1 i += 1 + if borrow != 0: + print a.str(), " minus ", b.str() + assert borrow == 0 z._normalize() return z @@ -1420,23 +1449,22 @@ """ Subtract the absolute values of one rbigint and one integer. """ size_a = a.numdigits() + sign = 1 - if size_a == 1: - # Find highest digit where a and b differ: - if a.digit(0) == b: + adigit = a.digit(0) + if adigit == b: return NULLRBIGINT - elif a.digit(0) < b: - sign = -1 - b *= -1 - size_a = size_b = 1 + elif adigit > b: + return rbigint.fromint(adigit - b) z = rbigint([NULLDIGIT] * size_a, sign, size_a) borrow = UDIGIT_TYPE(0) i = _load_unsigned_digit(1) # The following assumes unsigned arithmetic # works modulo 2**N for some N>SHIFT. - borrow = a.udigit(0) - b + + borrow = a.udigit(0) - UDIGIT_TYPE(abs(b)) z.setdigit(0, borrow) borrow >>= SHIFT while i < size_a: @@ -1446,6 +1474,9 @@ #borrow &= 1 i += 1 + if borrow > 0: + print "BORROW IS BAD" + assert borrow == 0 z._normalize() return z @@ -1505,13 +1536,13 @@ z._normalize() return z - elif digit: + """elif digit: if digit & (digit - 1) == 0: return b.lqshift(ptwotable[digit]) # Even if it's not power of two it can still be useful. return _muladd1(b, digit) - + """ z = rbigint([NULLDIGIT] * (size_a + size_b), 1) # gradeschool long mult i = UDIGIT_TYPE(0) @@ -1786,6 +1817,9 @@ and the remainder as a tuple. The sign of a is ignored; n should not be zero. """ + if not (n > 0 and n <= MASK): + print "Trying to divide %s by %d" % (a.str(), n) + assert n > 0 and n <= MASK size = a.numdigits() @@ -1997,6 +2031,10 @@ if b.sign == 0: raise ZeroDivisionError("long division or modulo by zero") + # XXX: Temp + if b.sign != 0 and b.numdigits() == 1 and b.digit(0) == 0: + print "VERY BAD!" + if (size_a < size_b or (size_a == size_b and a.digit(abs(size_a-1)) < b.digit(abs(size_b-1)))): @@ -2540,6 +2578,13 @@ def _int_bitwise(a, op, b): # '&', '|', '^' """ Bitwise and/or/xor operations with ints. """ + digit = abs(b) + if digit > MASK or not (digit >= 0): + # Fallback to long. + return _bitwise(a, op, rbigint.fromint(b)) + + assert digit >= 0 + if a.sign < 0: a = a.invert() maska = MASK diff --git a/rpython/rlib/test/test_rbigint.py b/rpython/rlib/test/test_rbigint.py --- a/rpython/rlib/test/test_rbigint.py +++ b/rpython/rlib/test/test_rbigint.py @@ -41,6 +41,30 @@ r2 = r1.neg() assert r2.str() == str(-n) + def test_int_long_catch_div(self): + # Basically abs(int) > MASK + l = 2**128 + r1 = rbigint.fromlong(l) + assert r1.int_floordiv(-MASK-1).tolong() == (l // (-MASK-1)) + + def test_int_long_catch_mul(self): + # Basically abs(int) > MASK + l = 2**128 + r1 = rbigint.fromlong(l) + assert r1.int_mul(-MASK-1).tolong() == (l * (-MASK-1)) + + def test_int_long_catch_add(self): + # Basically abs(int) > MASK + l = 2**128 + r1 = rbigint.fromlong(-l) + assert r1.int_add(-MASK-1).tolong() == (-l + (-MASK-1)) + + def test_int_long_catch_sub(self): + # Basically abs(int) > MASK + l = 2**128 + r1 = rbigint.fromlong(-l) + assert r1.int_sub(-MASK-1).tolong() == (-l - (-MASK-1)) + def test_floordiv(self): for op1 in [-12, -2, -1, 1, 2, 50]: for op2 in [-4, -2, -1, 1, 2, 8]: @@ -246,7 +270,7 @@ assert rbigint._from_numberstring_parser(parser).tolong() == 1231231241 def test_add(self): - x = 123456789123456789000000L + x = 123456789123456789000000000000000000L y = 123858582373821923936744221L for i in [-1, 1]: for j in [-1, 1]: @@ -256,8 +280,8 @@ assert result.tolong() == x * i + y * j def test_int_add(self): - x = 123456789123456789000000L - y = 1238 + x = 123456789123456789000000000000000000L + y = MASK-2 for i in [-1, 1]: for j in [-1, 1]: f1 = rbigint.fromlong(x * i) @@ -278,7 +302,7 @@ def test_int_sub(self): x = 12378959520302182384345L - y = 8896 + y = MASK for i in [-1, 1]: for j in [-1, 1]: f1 = rbigint.fromlong(x * i) @@ -303,7 +327,7 @@ def test_int_mul(self): x = -1238585838347L - y = 585839 + y = MASK f1 = rbigint.fromlong(x) result = f1.int_mul(y) assert result.tolong() == x * y From noreply at buildbot.pypy.org Wed Aug 7 15:19:21 2013 From: noreply at buildbot.pypy.org (stian) Date: Wed, 7 Aug 2013 15:19:21 +0200 (CEST) Subject: [pypy-commit] pypy bigint-with-int: Fix for LONG * -INT Message-ID: <20130807131921.D98631C00F4@cobra.cs.uni-duesseldorf.de> Author: stian Branch: bigint-with-int Changeset: r65993:0cbc8eed974c Date: 2013-08-06 20:30 +0200 http://bitbucket.org/pypy/pypy/changeset/0cbc8eed974c/ Log: Fix for LONG * -INT diff --git a/rpython/rlib/rbigint.py b/rpython/rlib/rbigint.py --- a/rpython/rlib/rbigint.py +++ b/rpython/rlib/rbigint.py @@ -81,6 +81,8 @@ return rffi.cast(LONG_TYPE, x) def _store_digit(x): + # TMP + assert x >= 0 return rffi.cast(STORE_TYPE, x) _store_digit._annspecialcase_ = 'specialize:argtype(0)' @@ -717,13 +719,15 @@ if b == MIN_VALUE: # Fallback to long. return self.mul(rbigint.fromint(b)) - - digit = _widen_digit(b) + elif self.sign == 0 or b == 0: + return NULLRBIGINT + + + digit = _widen_digit(abs(b)) + + assert digit > 0 asize = self.numdigits() - if self.sign == 0 or b == 0: - return NULLRBIGINT - if asize == 1: if self._digits[0] == NULLDIGIT: return NULLRBIGINT @@ -2036,6 +2040,7 @@ # XXX: Temp if b.sign != 0 and b.numdigits() == 1 and b.digit(0) == 0: print "VERY BAD!" + raise ZeroDivisionError("long division or modulo by zero") if (size_a < size_b or (size_a == size_b and diff --git a/rpython/rlib/test/test_rbigint.py b/rpython/rlib/test/test_rbigint.py --- a/rpython/rlib/test/test_rbigint.py +++ b/rpython/rlib/test/test_rbigint.py @@ -332,6 +332,13 @@ result = f1.int_mul(y) assert result.tolong() == x * y + def test_int_mul_bug1(self): + x = 89731783189318938713891893879123L + y = -1 + f1 = rbigint.fromlong(x) + result = f1.int_mul(y) + assert result.tolong() == x * y + def test_tofloat(self): x = 12345678901234567890L ** 10 f1 = rbigint.fromlong(x) From noreply at buildbot.pypy.org Wed Aug 7 15:19:23 2013 From: noreply at buildbot.pypy.org (stian) Date: Wed, 7 Aug 2013 15:19:23 +0200 (CEST) Subject: [pypy-commit] pypy bigint-with-int: Merge default. Message-ID: <20130807131923.98C181C00F4@cobra.cs.uni-duesseldorf.de> Author: stian Branch: bigint-with-int Changeset: r65994:38f9a2276c21 Date: 2013-08-06 20:31 +0200 http://bitbucket.org/pypy/pypy/changeset/38f9a2276c21/ Log: Merge default. diff too long, truncating to 2000 out of 6141 lines diff --git a/dotviewer/drawgraph.py b/dotviewer/drawgraph.py --- a/dotviewer/drawgraph.py +++ b/dotviewer/drawgraph.py @@ -22,6 +22,7 @@ 'yellow': (255,255,0), } re_nonword=re.compile(r'([^0-9a-zA-Z_.]+)') +re_linewidth=re.compile(r'setlinewidth\((\d+(\.\d*)?|\.\d+)\)') def combine(color1, color2, alpha): r1, g1, b1 = color1 @@ -138,6 +139,13 @@ self.yl = float(yl) rest = rest[3:] self.style, self.color = rest + linematch = re_linewidth.match(self.style) + if linematch: + num = linematch.group(1) + self.linewidth = int(round(float(num))) + self.style = self.style[linematch.end(0):] + else: + self.linewidth = 1 self.highlight = False self.cachedbezierpoints = None self.cachedarrowhead = None @@ -520,8 +528,8 @@ fgcolor = highlight_color(fgcolor) points = [self.map(*xy) for xy in edge.bezierpoints()] - def drawedgebody(points=points, fgcolor=fgcolor): - pygame.draw.lines(self.screen, fgcolor, False, points) + def drawedgebody(points=points, fgcolor=fgcolor, width=edge.linewidth): + pygame.draw.lines(self.screen, fgcolor, False, points, width) edgebodycmd.append(drawedgebody) points = [self.map(*xy) for xy in edge.arrowhead()] diff --git a/dotviewer/test/test_interactive.py b/dotviewer/test/test_interactive.py --- a/dotviewer/test/test_interactive.py +++ b/dotviewer/test/test_interactive.py @@ -34,6 +34,23 @@ } ''' +SOURCE2=r'''digraph f { + a; d; e; f; g; h; i; j; k; l; + a -> d [penwidth=1, style="setlinewidth(1)"]; + d -> e [penwidth=2, style="setlinewidth(2)"]; + e -> f [penwidth=4, style="setlinewidth(4)"]; + f -> g [penwidth=8, style="setlinewidth(8)"]; + g -> h [penwidth=16, style="setlinewidth(16)"]; + h -> i [penwidth=32, style="setlinewidth(32)"]; + i -> j [penwidth=64, style="setlinewidth(64)"]; + j -> k [penwidth=128, style="setlinewidth(128)"]; + k -> l [penwidth=256, style="setlinewidth(256)"]; +}''' + + + + + def setup_module(mod): if not option.pygame: py.test.skip("--pygame not enabled") @@ -161,3 +178,10 @@ page = MyPage(str(dotfile)) page.fixedfont = True graphclient.display_page(page) + +def test_linewidth(): + udir.join("graph2.dot").write(SOURCE2) + from dotviewer import graphpage, graphclient + dotfile = udir.join('graph2.dot') + page = graphpage.DotFileGraphPage(str(dotfile)) + graphclient.display_page(page) diff --git a/lib-python/2.7/socket.py b/lib-python/2.7/socket.py --- a/lib-python/2.7/socket.py +++ b/lib-python/2.7/socket.py @@ -165,6 +165,8 @@ # All _delegate_methods must also be initialized here. send = recv = recv_into = sendto = recvfrom = recvfrom_into = _dummy __getattr__ = _dummy + def _drop(self): + pass # Wrapper around platform socket objects. This implements # a platform-independent dup() functionality. The @@ -179,12 +181,21 @@ def __init__(self, family=AF_INET, type=SOCK_STREAM, proto=0, _sock=None): if _sock is None: _sock = _realsocket(family, type, proto) - elif _type(_sock) is _realsocket: + else: + # PyPy note about refcounting: implemented with _reuse()/_drop() + # on the class '_socket.socket'. Python 3 did it differently + # with a reference counter on this class 'socket._socketobject' + # instead, but it is a less compatible change. + + # Note that a few libraries (like eventlet) poke at the + # private implementation of socket.py, passing custom + # objects to _socketobject(). These libraries need the + # following fix for use on PyPy: the custom objects need + # methods _reuse() and _drop() that maintains an explicit + # reference counter, starting at 0. When it drops back to + # zero, close() must be called. _sock._reuse() - # PyPy note about refcounting: implemented with _reuse()/_drop() - # on the class '_socket.socket'. Python 3 did it differently - # with a reference counter on this class 'socket._socketobject' - # instead, but it is a less compatible change (breaks eventlet). + self._sock = _sock def send(self, data, flags=0): @@ -216,9 +227,8 @@ def close(self): s = self._sock - if type(s) is _realsocket: - s._drop() self._sock = _closedsocket() + s._drop() close.__doc__ = _realsocket.close.__doc__ def accept(self): @@ -280,8 +290,14 @@ "_close"] def __init__(self, sock, mode='rb', bufsize=-1, close=False): - if type(sock) is _realsocket: - sock._reuse() + # Note that a few libraries (like eventlet) poke at the + # private implementation of socket.py, passing custom + # objects to _fileobject(). These libraries need the + # following fix for use on PyPy: the custom objects need + # methods _reuse() and _drop() that maintains an explicit + # reference counter, starting at 0. When it drops back to + # zero, close() must be called. + sock._reuse() self._sock = sock self.mode = mode # Not actually used in this version if bufsize < 0: @@ -317,11 +333,11 @@ self.flush() finally: s = self._sock - if type(s) is _realsocket: + self._sock = None + if s is not None: s._drop() - if self._close: - self._sock.close() - self._sock = None + if self._close: + s.close() def __del__(self): try: diff --git a/lib-python/2.7/ssl.py b/lib-python/2.7/ssl.py --- a/lib-python/2.7/ssl.py +++ b/lib-python/2.7/ssl.py @@ -110,6 +110,12 @@ suppress_ragged_eofs=True, ciphers=None): socket.__init__(self, _sock=sock._sock) + # "close" the original socket: it is not usable any more. + # this only calls _drop(), which should not actually call + # the operating system's close() because the reference + # counter is greater than 1 (we hold one too). + sock.close() + if ciphers is None and ssl_version != _SSLv2_IF_EXISTS: ciphers = _DEFAULT_CIPHERS @@ -352,11 +358,19 @@ works with the SSL connection. Just use the code from the socket module.""" - self._makefile_refs += 1 # close=True so as to decrement the reference count when done with # the file-like object. return _fileobject(self, mode, bufsize, close=True) + def _reuse(self): + self._makefile_refs += 1 + + def _drop(self): + if self._makefile_refs < 1: + self.close() + else: + self._makefile_refs -= 1 + def wrap_socket(sock, keyfile=None, certfile=None, diff --git a/lib-python/2.7/test/test_socket.py b/lib-python/2.7/test/test_socket.py --- a/lib-python/2.7/test/test_socket.py +++ b/lib-python/2.7/test/test_socket.py @@ -1066,6 +1066,9 @@ def recv(self, size): return self._recv_step.next()() + def _reuse(self): pass + def _drop(self): pass + @staticmethod def _raise_eintr(): raise socket.error(errno.EINTR) @@ -1321,7 +1324,8 @@ closed = False def flush(self): pass def close(self): self.closed = True - def _decref_socketios(self): pass + def _reuse(self): pass + def _drop(self): pass # must not close unless we request it: the original use of _fileobject # by module socket requires that the underlying socket not be closed until diff --git a/lib-python/2.7/test/test_urllib2.py b/lib-python/2.7/test/test_urllib2.py --- a/lib-python/2.7/test/test_urllib2.py +++ b/lib-python/2.7/test/test_urllib2.py @@ -270,6 +270,8 @@ self.reason = reason def read(self): return '' + def _reuse(self): pass + def _drop(self): pass class MockHTTPClass: def __init__(self): diff --git a/lib-python/2.7/urllib2.py b/lib-python/2.7/urllib2.py --- a/lib-python/2.7/urllib2.py +++ b/lib-python/2.7/urllib2.py @@ -1193,6 +1193,8 @@ # out of socket._fileobject() and into a base class. r.recv = r.read + r._reuse = lambda: None + r._drop = lambda: None fp = socket._fileobject(r, close=True) resp = addinfourl(fp, r.msg, req.get_full_url()) diff --git a/pypy/doc/_ref.txt b/pypy/doc/_ref.txt --- a/pypy/doc/_ref.txt +++ b/pypy/doc/_ref.txt @@ -98,8 +98,6 @@ .. _`rpython/rtyper/memory/gc/hybrid.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/memory/gc/hybrid.py .. _`rpython/rtyper/memory/gc/minimarkpage.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/memory/gc/minimarkpage.py .. _`rpython/rtyper/memory/gc/semispace.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/memory/gc/semispace.py -.. _`rpython/rtyper/ootypesystem/`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/ootypesystem/ -.. _`rpython/rtyper/ootypesystem/ootype.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/ootypesystem/ootype.py .. _`rpython/rtyper/rint.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/rint.py .. _`rpython/rtyper/rlist.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/rlist.py .. _`rpython/rtyper/rmodel.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/rmodel.py diff --git a/pypy/doc/jit/index.rst b/pypy/doc/jit/index.rst --- a/pypy/doc/jit/index.rst +++ b/pypy/doc/jit/index.rst @@ -23,7 +23,10 @@ - Hooks_ debugging facilities available to a python programmer +- Virtualizable_ how virtualizables work and what they are (in other words how + to make frames more efficient). .. _Overview: overview.html .. _Notes: pyjitpl5.html .. _Hooks: ../jit-hooks.html +.. _Virtualizable: virtualizable.html diff --git a/pypy/doc/jit/virtualizable.rst b/pypy/doc/jit/virtualizable.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/jit/virtualizable.rst @@ -0,0 +1,60 @@ + +Virtualizables +============== + +**Note:** this document does not have a proper introduction as to how +to understand the basics. We should write some. If you happen to be here +and you're missing context, feel free to pester us on IRC. + +Problem description +------------------- + +The JIT is very good at making sure some objects are never allocated if they +don't escape from the trace. Such objects are called ``virtuals``. However, +if we're dealing with frames, virtuals are often not good enough. Frames +can escape and they can also be allocated already at the moment we enter the +JIT. In such cases we need some extra object that can still be optimized away, +despite existing on the heap. + +Solution +-------- + +We introduce virtualizables. They're objects that exist on the heap, but their +fields are not always in sync with whatever happens in the assembler. One +example is that virtualizable fields can store virtual objects without +forcing them. This is very useful for frames. Declaring an object to be +virtualizable works like this: + + class Frame(object): + _virtualizable_ = ['locals[*]', 'stackdepth'] + +And we use them in ``JitDriver`` like this:: + + jitdriver = JitDriver(greens=[], reds=['frame'], virtualizables=['frame']) + +This declaration means that ``stackdepth`` is a virtualizable **field**, while +``locals`` is a virtualizable **array** (a list stored on a virtualizable). +There are various rules about using virtualizables, especially using +virtualizable arrays that can be very confusing. Those will usually end +up with a compile-time error (as opposed to strange behavior). The rules are: + +* Each array access must be with a known positive index that cannot raise + an ``IndexError``. Using ``no = jit.hint(no, promote=True)`` might be useful + to get a constant-number access. This is only safe if the index is actually + constant or changing rarely within the context of the user's code. + +* If you initialize a new virtualizable in the JIT, it has to be done like this + (for example if we're in ``Frame.__init__``):: + + self = hint(self, access_directly=True, fresh_virtualizable=True) + + that way you can populate the fields directly. + +* If you use virtualizable outside of the JIT – it's very expensive and + sometimes aborts tracing. Consider it carefully as to how do it only for + debugging purposes and not every time (e.g. ``sys._getframe`` call). + +* If you have something equivalent of a Python generator, where the + virtualizable survives for longer, you want to force it before returning. + It's better to do it that way than by an external call some time later. + It's done using ``jit.hint(frame, force_virtualizable=True)`` diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -62,3 +62,14 @@ No longer delegate numpy string_ methods to space.StringObject, in numpy this works by kind of by accident. Support for merging the refactor-str-types branch + +.. branch: kill-typesystem +Remove the "type system" abstraction, now that there is only ever one kind of +type system used. + +.. branch: kill-gen-store-back-in +Kills gen_store_back_in_virtualizable - should improve non-inlined calls by +a bit + +.. branch: dotviewer-linewidth +.. branch: reflex-support diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py --- a/pypy/interpreter/pyframe.py +++ b/pypy/interpreter/pyframe.py @@ -123,8 +123,8 @@ # CO_OPTIMIZED: no locals dict needed at all # NB: this method is overridden in nestedscope.py flags = code.co_flags - if flags & pycode.CO_OPTIMIZED: - return + if flags & pycode.CO_OPTIMIZED: + return if flags & pycode.CO_NEWLOCALS: self.w_locals = self.space.newdict(module=True) else: @@ -176,9 +176,10 @@ executioncontext.return_trace(self, self.space.w_None) raise executioncontext.return_trace(self, w_exitvalue) - # clean up the exception, might be useful for not - # allocating exception objects in some cases - self.last_exception = None + # it used to say self.last_exception = None + # this is now done by the code in pypyjit module + # since we don't want to invalidate the virtualizable + # for no good reason got_exception = False finally: executioncontext.leave(self, w_exitvalue, got_exception) @@ -260,7 +261,7 @@ break w_value = self.peekvalue(delta) self.pushvalue(w_value) - + def peekvalue(self, index_from_top=0): # NOTE: top of the stack is peekvalue(0). # Contrast this with CPython where it's PEEK(-1). @@ -330,7 +331,7 @@ nlocals = self.pycode.co_nlocals values_w = self.locals_stack_w[nlocals:self.valuestackdepth] w_valuestack = maker.slp_into_tuple_with_nulls(space, values_w) - + w_blockstack = nt([block._get_state_(space) for block in self.get_blocklist()]) w_fastlocals = maker.slp_into_tuple_with_nulls( space, self.locals_stack_w[:nlocals]) @@ -340,7 +341,7 @@ else: w_exc_value = self.last_exception.get_w_value(space) w_tb = w(self.last_exception.get_traceback()) - + tup_state = [ w(self.f_backref()), w(self.get_builtin()), @@ -355,7 +356,7 @@ w(f_lineno), w_fastlocals, space.w_None, #XXX placeholder for f_locals - + #f_restricted requires no additional data! space.w_None, ## self.w_f_trace, ignore for now @@ -389,7 +390,7 @@ ncellvars = len(pycode.co_cellvars) cellvars = cells[:ncellvars] closure = cells[ncellvars:] - + # do not use the instance's __init__ but the base's, because we set # everything like cells from here # XXX hack @@ -476,7 +477,7 @@ ### line numbers ### - def fget_f_lineno(self, space): + def fget_f_lineno(self, space): "Returns the line number of the instruction currently being executed." if self.w_f_trace is None: return space.wrap(self.get_last_lineno()) @@ -490,7 +491,7 @@ except OperationError, e: raise OperationError(space.w_ValueError, space.wrap("lineno must be an integer")) - + if self.w_f_trace is None: raise OperationError(space.w_ValueError, space.wrap("f_lineno can only be set by a trace function.")) @@ -522,7 +523,7 @@ if ord(code[new_lasti]) in (DUP_TOP, POP_TOP): raise OperationError(space.w_ValueError, space.wrap("can't jump to 'except' line as there's no exception")) - + # Don't jump into or out of a finally block. f_lasti_setup_addr = -1 new_lasti_setup_addr = -1 @@ -553,12 +554,12 @@ if addr == self.last_instr: f_lasti_setup_addr = setup_addr break - + if op >= HAVE_ARGUMENT: addr += 3 else: addr += 1 - + assert len(blockstack) == 0 if new_lasti_setup_addr != f_lasti_setup_addr: @@ -609,10 +610,10 @@ block = self.pop_block() block.cleanup(self) f_iblock -= 1 - + self.f_lineno = new_lineno self.last_instr = new_lasti - + def get_last_lineno(self): "Returns the line number of the instruction currently being executed." return pytraceback.offset2lineno(self.pycode, self.last_instr) @@ -638,8 +639,8 @@ self.f_lineno = self.get_last_lineno() space.frame_trace_action.fire() - def fdel_f_trace(self, space): - self.w_f_trace = None + def fdel_f_trace(self, space): + self.w_f_trace = None def fget_f_exc_type(self, space): if self.last_exception is not None: @@ -649,7 +650,7 @@ if f is not None: return f.last_exception.w_type return space.w_None - + def fget_f_exc_value(self, space): if self.last_exception is not None: f = self.f_backref() @@ -667,7 +668,7 @@ if f is not None: return space.wrap(f.last_exception.get_traceback()) return space.w_None - + def fget_f_restricted(self, space): if space.config.objspace.honor__builtins__: return space.wrap(self.builtin is not space.builtin) diff --git a/pypy/module/array/interp_array.py b/pypy/module/array/interp_array.py --- a/pypy/module/array/interp_array.py +++ b/pypy/module/array/interp_array.py @@ -56,7 +56,7 @@ def descr_typecode(space, self): return space.wrap(self.typecode) -arr_eq_driver = jit.JitDriver(greens = ['comp_func'], reds = 'auto') +arr_eq_driver = jit.JitDriver(name='array_eq_driver', greens = ['comp_func'], reds = 'auto') EQ, NE, LT, LE, GT, GE = range(6) def compare_arrays(space, arr1, arr2, comp_op): diff --git a/pypy/module/cpyext/pystate.py b/pypy/module/cpyext/pystate.py --- a/pypy/module/cpyext/pystate.py +++ b/pypy/module/cpyext/pystate.py @@ -3,7 +3,6 @@ from pypy.module.cpyext.pyobject import PyObject, Py_DecRef, make_ref, from_ref from rpython.rtyper.lltypesystem import rffi, lltype from rpython.rlib import rthread -from pypy.module.thread import os_thread PyInterpreterStateStruct = lltype.ForwardReference() PyInterpreterState = lltype.Ptr(PyInterpreterStateStruct) @@ -17,6 +16,9 @@ ('dict', PyObject), ])) +class NoThreads(Exception): + pass + @cpython_api([], PyThreadState, error=CANNOT_FAIL) def PyEval_SaveThread(space): """Release the global interpreter lock (if it has been created and thread @@ -45,10 +47,15 @@ @cpython_api([], lltype.Void) def PyEval_InitThreads(space): + if not space.config.translation.thread: + raise NoThreads + from pypy.module.thread import os_thread os_thread.setup_threads(space) @cpython_api([], rffi.INT_real, error=CANNOT_FAIL) def PyEval_ThreadsInitialized(space): + if not space.config.translation.thread: + return 0 return 1 # XXX: might be generally useful @@ -232,6 +239,8 @@ """Create a new thread state object belonging to the given interpreter object. The global interpreter lock need not be held, but may be held if it is necessary to serialize calls to this function.""" + if not space.config.translation.thread: + raise NoThreads rthread.gc_thread_prepare() # PyThreadState_Get will allocate a new execution context, # we need to protect gc and other globals with the GIL. @@ -246,6 +255,8 @@ def PyThreadState_Clear(space, tstate): """Reset all information in a thread state object. The global interpreter lock must be held.""" + if not space.config.translation.thread: + raise NoThreads Py_DecRef(space, tstate.c_dict) tstate.c_dict = lltype.nullptr(PyObject.TO) space.threadlocals.leave_thread(space) diff --git a/pypy/module/micronumpy/loop.py b/pypy/module/micronumpy/loop.py --- a/pypy/module/micronumpy/loop.py +++ b/pypy/module/micronumpy/loop.py @@ -155,7 +155,8 @@ obj_iter.next() return cur_value -reduce_cum_driver = jit.JitDriver(greens = ['shapelen', 'func', 'dtype'], +reduce_cum_driver = jit.JitDriver(name='numpy_reduce_cum_driver', + greens = ['shapelen', 'func', 'dtype'], reds = 'auto') def compute_reduce_cumultative(obj, out, calc_dtype, func, identity): @@ -529,8 +530,9 @@ val_arr.descr_getitem(space, w_idx)) iter.next() -byteswap_driver = jit.JitDriver(greens = ['dtype'], - reds = 'auto') +byteswap_driver = jit.JitDriver(name='numpy_byteswap_driver', + greens = ['dtype'], + reds = 'auto') def byteswap(from_, to): dtype = from_.dtype @@ -542,8 +544,9 @@ to_iter.next() from_iter.next() -choose_driver = jit.JitDriver(greens = ['shapelen', 'mode', 'dtype'], - reds = 'auto') +choose_driver = jit.JitDriver(name='numpy_choose_driver', + greens = ['shapelen', 'mode', 'dtype'], + reds = 'auto') def choose(space, arr, choices, shape, dtype, out, mode): shapelen = len(shape) @@ -572,8 +575,9 @@ out_iter.next() arr_iter.next() -clip_driver = jit.JitDriver(greens = ['shapelen', 'dtype'], - reds = 'auto') +clip_driver = jit.JitDriver(name='numpy_clip_driver', + greens = ['shapelen', 'dtype'], + reds = 'auto') def clip(space, arr, shape, min, max, out): arr_iter = arr.create_iter(shape) @@ -597,8 +601,9 @@ out_iter.next() min_iter.next() -round_driver = jit.JitDriver(greens = ['shapelen', 'dtype'], - reds = 'auto') +round_driver = jit.JitDriver(name='numpy_round_driver', + greens = ['shapelen', 'dtype'], + reds = 'auto') def round(space, arr, dtype, shape, decimals, out): arr_iter = arr.create_iter(shape) @@ -612,7 +617,8 @@ arr_iter.next() out_iter.next() -diagonal_simple_driver = jit.JitDriver(greens = ['axis1', 'axis2'], +diagonal_simple_driver = jit.JitDriver(name='numpy_diagonal_simple_driver', + greens = ['axis1', 'axis2'], reds = 'auto') def diagonal_simple(space, arr, out, offset, axis1, axis2, size): diff --git a/pypy/module/pypyjit/interp_jit.py b/pypy/module/pypyjit/interp_jit.py --- a/pypy/module/pypyjit/interp_jit.py +++ b/pypy/module/pypyjit/interp_jit.py @@ -12,18 +12,18 @@ from pypy.interpreter.error import OperationError, operationerrfmt from pypy.interpreter.pycode import PyCode, CO_GENERATOR from pypy.interpreter.pyframe import PyFrame -from pypy.interpreter.pyopcode import ExitFrame +from pypy.interpreter.pyopcode import ExitFrame, Yield from opcode import opmap -PyFrame._virtualizable2_ = ['last_instr', 'pycode', - 'valuestackdepth', 'locals_stack_w[*]', - 'cells[*]', - 'last_exception', - 'lastblock', - 'is_being_profiled', - 'w_globals', - 'w_f_trace', - ] +PyFrame._virtualizable_ = ['last_instr', 'pycode', + 'valuestackdepth', 'locals_stack_w[*]', + 'cells[*]', + 'last_exception', + 'lastblock', + 'is_being_profiled', + 'w_globals', + 'w_f_trace', + ] JUMP_ABSOLUTE = opmap['JUMP_ABSOLUTE'] @@ -73,7 +73,13 @@ self.valuestackdepth = hint(self.valuestackdepth, promote=True) next_instr = self.handle_bytecode(co_code, next_instr, ec) is_being_profiled = self.is_being_profiled + except Yield: + self.last_exception = None + w_result = self.popvalue() + jit.hint(self, force_virtualizable=True) + return w_result except ExitFrame: + self.last_exception = None return self.popvalue() def jump_absolute(self, jumpto, ec): diff --git a/pypy/module/pypyjit/test_pypy_c/test_containers.py b/pypy/module/pypyjit/test_pypy_c/test_containers.py --- a/pypy/module/pypyjit/test_pypy_c/test_containers.py +++ b/pypy/module/pypyjit/test_pypy_c/test_containers.py @@ -87,7 +87,7 @@ i8 = int_lt(i5, i7) guard_true(i8, descr=...) guard_not_invalidated(descr=...) - p10 = call(ConstClass(ll_int_str), i5, descr=) + p10 = call(ConstClass(ll_str__IntegerR_SignedConst_Signed), i5, descr=) guard_no_exception(descr=...) i12 = call(ConstClass(ll_strhash), p10, descr=) p13 = new(descr=...) diff --git a/pypy/module/pypyjit/test_pypy_c/test_generators.py b/pypy/module/pypyjit/test_pypy_c/test_generators.py --- a/pypy/module/pypyjit/test_pypy_c/test_generators.py +++ b/pypy/module/pypyjit/test_pypy_c/test_generators.py @@ -19,6 +19,7 @@ log = self.run(main, [500]) loop, = log.loops_by_filename(self.filepath) assert loop.match_by_id("generator", """ + cond_call(..., descr=...) i16 = force_token() p45 = new_with_vtable(ConstClass(W_IntObject)) setfield_gc(p45, i29, descr=) diff --git a/pypy/module/test_lib_pypy/test_curses.py b/pypy/module/test_lib_pypy/test_curses.py --- a/pypy/module/test_lib_pypy/test_curses.py +++ b/pypy/module/test_lib_pypy/test_curses.py @@ -1,6 +1,15 @@ +import pytest + +# Check that lib_pypy.cffi finds the correct version of _cffi_backend. +# Otherwise, the test is skipped. It should never be skipped when run +# with "pypy py.test -A". +try: + from lib_pypy import cffi; cffi.FFI() +except (ImportError, AssertionError), e: + pytest.skip("no cffi module or wrong version (%s)" % (e,)) + from lib_pypy import _curses -import pytest lib = _curses.lib diff --git a/pypy/tool/pypyjit_child.py b/pypy/tool/pypyjit_child.py --- a/pypy/tool/pypyjit_child.py +++ b/pypy/tool/pypyjit_child.py @@ -14,9 +14,9 @@ return lltype.nullptr(T) interp.heap.malloc_nonmovable = returns_null # XXX - from rpython.jit.backend.llgraph.runner import LLtypeCPU + from rpython.jit.backend.llgraph.runner import LLGraphCPU #LLtypeCPU.supports_floats = False # for now - apply_jit(interp, graph, LLtypeCPU) + apply_jit(interp, graph, LLGraphCPU) def apply_jit(interp, graph, CPUClass): diff --git a/rpython/annotator/test/test_annrpython.py b/rpython/annotator/test/test_annrpython.py --- a/rpython/annotator/test/test_annrpython.py +++ b/rpython/annotator/test/test_annrpython.py @@ -3088,7 +3088,7 @@ from rpython.rlib.jit import hint class A: - _virtualizable2_ = [] + _virtualizable_ = [] class B(A): def meth(self): return self @@ -3128,7 +3128,7 @@ from rpython.rlib.jit import hint class A: - _virtualizable2_ = [] + _virtualizable_ = [] class I: pass diff --git a/rpython/jit/backend/arm/opassembler.py b/rpython/jit/backend/arm/opassembler.py --- a/rpython/jit/backend/arm/opassembler.py +++ b/rpython/jit/backend/arm/opassembler.py @@ -959,16 +959,6 @@ pmc.B_offs(self.mc.currpos(), c.EQ) return pos - def _call_assembler_reset_vtoken(self, jd, vloc): - from rpython.jit.backend.llsupport.descr import FieldDescr - fielddescr = jd.vable_token_descr - assert isinstance(fielddescr, FieldDescr) - ofs = fielddescr.offset - tmploc = self._regalloc.get_scratch_reg(INT) - self.mov_loc_loc(vloc, r.ip) - self.mc.MOV_ri(tmploc.value, 0) - self.mc.STR_ri(tmploc.value, r.ip.value, ofs) - def _call_assembler_load_result(self, op, result_loc): if op.result is not None: # load the return value from (tmploc, 0) diff --git a/rpython/jit/backend/llgraph/runner.py b/rpython/jit/backend/llgraph/runner.py --- a/rpython/jit/backend/llgraph/runner.py +++ b/rpython/jit/backend/llgraph/runner.py @@ -101,6 +101,9 @@ self.fieldname = fieldname self.FIELD = getattr(S, fieldname) + def get_vinfo(self): + return self.vinfo + def __repr__(self): return 'FieldDescr(%r, %r)' % (self.S, self.fieldname) @@ -170,7 +173,7 @@ translate_support_code = False is_llgraph = True - def __init__(self, rtyper, stats=None, *ignored_args, **ignored_kwds): + def __init__(self, rtyper, stats=None, *ignored_args, **kwds): model.AbstractCPU.__init__(self) self.rtyper = rtyper self.llinterp = LLInterpreter(rtyper) @@ -178,6 +181,7 @@ class MiniStats: pass self.stats = stats or MiniStats() + self.vinfo_for_tests = kwds.get('vinfo_for_tests', None) def compile_loop(self, inputargs, operations, looptoken, log=True, name=''): clt = model.CompiledLoopToken(self, looptoken.number) @@ -316,6 +320,8 @@ except KeyError: descr = FieldDescr(S, fieldname) self.descrs[key] = descr + if self.vinfo_for_tests is not None: + descr.vinfo = self.vinfo_for_tests return descr def arraydescrof(self, A): @@ -600,6 +606,7 @@ forced_deadframe = None overflow_flag = False last_exception = None + force_guard_op = None def __init__(self, cpu, argboxes, args): self.env = {} @@ -766,6 +773,8 @@ if self.forced_deadframe is not None: saved_data = self.forced_deadframe._saved_data self.fail_guard(descr, saved_data) + self.force_guard_op = self.current_op + execute_guard_not_forced_2 = execute_guard_not_forced def execute_guard_not_invalidated(self, descr): if self.lltrace.invalid: @@ -887,7 +896,6 @@ # res = CALL assembler_call_helper(pframe) # jmp @done # @fastpath: - # RESET_VABLE # res = GETFIELD(pframe, 'result') # @done: # @@ -907,25 +915,17 @@ vable = lltype.nullptr(llmemory.GCREF.TO) # # Emulate the fast path - def reset_vable(jd, vable): - if jd.index_of_virtualizable != -1: - fielddescr = jd.vable_token_descr - NULL = lltype.nullptr(llmemory.GCREF.TO) - self.cpu.bh_setfield_gc(vable, NULL, fielddescr) + # faildescr = self.cpu.get_latest_descr(pframe) if faildescr == self.cpu.done_with_this_frame_descr_int: - reset_vable(jd, vable) return self.cpu.get_int_value(pframe, 0) elif faildescr == self.cpu.done_with_this_frame_descr_ref: - reset_vable(jd, vable) return self.cpu.get_ref_value(pframe, 0) elif faildescr == self.cpu.done_with_this_frame_descr_float: - reset_vable(jd, vable) return self.cpu.get_float_value(pframe, 0) elif faildescr == self.cpu.done_with_this_frame_descr_void: - reset_vable(jd, vable) return None - # + assembler_helper_ptr = jd.assembler_helper_adr.ptr # fish try: result = assembler_helper_ptr(pframe, vable) diff --git a/rpython/jit/backend/llsupport/assembler.py b/rpython/jit/backend/llsupport/assembler.py --- a/rpython/jit/backend/llsupport/assembler.py +++ b/rpython/jit/backend/llsupport/assembler.py @@ -2,7 +2,7 @@ from rpython.jit.backend.llsupport.memcpy import memcpy_fn from rpython.jit.backend.llsupport.symbolic import WORD from rpython.jit.metainterp.history import (INT, REF, FLOAT, JitCellToken, - ConstInt, BoxInt) + ConstInt, BoxInt, AbstractFailDescr) from rpython.jit.metainterp.resoperation import ResOperation, rop from rpython.rlib import rgc from rpython.rlib.debug import (debug_start, debug_stop, have_debug_prints, @@ -24,6 +24,7 @@ class GuardToken(object): def __init__(self, cpu, gcmap, faildescr, failargs, fail_locs, exc, frame_depth, is_guard_not_invalidated, is_guard_not_forced): + assert isinstance(faildescr, AbstractFailDescr) self.cpu = cpu self.faildescr = faildescr self.failargs = failargs @@ -232,11 +233,8 @@ jmp_location = self._call_assembler_patch_je(result_loc, je_location) - # Path B: fast path. Must load the return value, and reset the token + # Path B: fast path. Must load the return value - # Reset the vable token --- XXX really too much special logic here:-( - if jd.index_of_virtualizable >= 0: - self._call_assembler_reset_vtoken(jd, vloc) # self._call_assembler_load_result(op, result_loc) # diff --git a/rpython/jit/backend/llsupport/descr.py b/rpython/jit/backend/llsupport/descr.py --- a/rpython/jit/backend/llsupport/descr.py +++ b/rpython/jit/backend/llsupport/descr.py @@ -76,7 +76,13 @@ FLAG_STRUCT = 'X' FLAG_VOID = 'V' -class FieldDescr(AbstractDescr): +class ArrayOrFieldDescr(AbstractDescr): + vinfo = None + + def get_vinfo(self): + return self.vinfo + +class FieldDescr(ArrayOrFieldDescr): name = '' offset = 0 # help translation field_size = 0 @@ -150,12 +156,13 @@ # ____________________________________________________________ # ArrayDescrs -class ArrayDescr(AbstractDescr): +class ArrayDescr(ArrayOrFieldDescr): tid = 0 basesize = 0 # workaround for the annotator itemsize = 0 lendescr = None flag = '\x00' + vinfo = None def __init__(self, basesize, itemsize, lendescr, flag): self.basesize = basesize diff --git a/rpython/jit/backend/llsupport/test/ztranslation_test.py b/rpython/jit/backend/llsupport/test/ztranslation_test.py --- a/rpython/jit/backend/llsupport/test/ztranslation_test.py +++ b/rpython/jit/backend/llsupport/test/ztranslation_test.py @@ -23,7 +23,7 @@ # - floats neg and abs class Frame(object): - _virtualizable2_ = ['i'] + _virtualizable_ = ['i'] def __init__(self, i): self.i = i @@ -98,7 +98,7 @@ self.val = val class Frame(object): - _virtualizable2_ = ['thing'] + _virtualizable_ = ['thing'] driver = JitDriver(greens = ['codeno'], reds = ['i', 'frame'], virtualizables = ['frame'], diff --git a/rpython/jit/backend/model.py b/rpython/jit/backend/model.py --- a/rpython/jit/backend/model.py +++ b/rpython/jit/backend/model.py @@ -6,7 +6,7 @@ total_compiled_loops = 0 total_compiled_bridges = 0 total_freed_loops = 0 - total_freed_bridges = 0 + total_freed_bridges = 0 # for heaptracker # _all_size_descrs_with_vtable = None @@ -182,7 +182,7 @@ def arraydescrof(self, A): raise NotImplementedError - def calldescrof(self, FUNC, ARGS, RESULT): + def calldescrof(self, FUNC, ARGS, RESULT, extrainfo): # FUNC is the original function type, but ARGS is a list of types # with Voids removed raise NotImplementedError @@ -294,7 +294,6 @@ def bh_copyunicodecontent(self, src, dst, srcstart, dststart, length): raise NotImplementedError - class CompiledLoopToken(object): asmmemmgr_blocks = None asmmemmgr_gcroots = 0 diff --git a/rpython/jit/backend/test/support.py b/rpython/jit/backend/test/support.py --- a/rpython/jit/backend/test/support.py +++ b/rpython/jit/backend/test/support.py @@ -59,7 +59,7 @@ t.buildannotator().build_types(function, [int] * len(args), main_entry_point=True) - t.buildrtyper(type_system=self.type_system).specialize() + t.buildrtyper().specialize() warmrunnerdesc = WarmRunnerDesc(t, translate_support_code=True, CPUClass=self.CPUClass, **kwds) diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -79,6 +79,7 @@ allblocks) self.target_tokens_currently_compiling = {} self.frame_depth_to_patch = [] + self._finish_gcmap = lltype.nullptr(jitframe.GCMAP) def teardown(self): self.pending_guard_tokens = None @@ -1846,7 +1847,11 @@ self.mov(fail_descr_loc, RawEbpLoc(ofs)) arglist = op.getarglist() if arglist and arglist[0].type == REF: - gcmap = self.gcmap_for_finish + if self._finish_gcmap: + self._finish_gcmap[0] |= r_uint(1) # rax + gcmap = self._finish_gcmap + else: + gcmap = self.gcmap_for_finish self.push_gcmap(self.mc, gcmap, store=True) else: # note that the 0 here is redundant, but I would rather @@ -1991,15 +1996,6 @@ # return jmp_location - def _call_assembler_reset_vtoken(self, jd, vloc): - from rpython.jit.backend.llsupport.descr import FieldDescr - fielddescr = jd.vable_token_descr - assert isinstance(fielddescr, FieldDescr) - vtoken_ofs = fielddescr.offset - self.mc.MOV(edx, vloc) # we know vloc is on the current frame - self.mc.MOV_mi((edx.value, vtoken_ofs), 0) - # in the line above, TOKEN_NONE = 0 - def _call_assembler_load_result(self, op, result_loc): if op.result is not None: # load the return value from the dead frame's value index 0 @@ -2326,6 +2322,15 @@ assert 0 < offset <= 127 self.mc.overwrite(jmp_location - 1, chr(offset)) + def store_force_descr(self, op, fail_locs, frame_depth): + guard_token = self.implement_guard_recovery(op.opnum, + op.getdescr(), + op.getfailargs(), + fail_locs, frame_depth) + self._finish_gcmap = guard_token.gcmap + self._store_force_index(op) + self.store_info_on_descr(0, guard_token) + def force_token(self, reg): # XXX kill me assert isinstance(reg, RegLoc) diff --git a/rpython/jit/backend/x86/regalloc.py b/rpython/jit/backend/x86/regalloc.py --- a/rpython/jit/backend/x86/regalloc.py +++ b/rpython/jit/backend/x86/regalloc.py @@ -22,7 +22,7 @@ from rpython.jit.codewriter.effectinfo import EffectInfo from rpython.jit.metainterp.history import (Box, Const, ConstInt, ConstPtr, ConstFloat, BoxInt, BoxFloat, INT, REF, FLOAT, TargetToken) -from rpython.jit.metainterp.resoperation import rop +from rpython.jit.metainterp.resoperation import rop, ResOperation from rpython.rlib import rgc from rpython.rlib.objectmodel import we_are_translated from rpython.rlib.rarithmetic import r_longlong, r_uint @@ -1332,6 +1332,13 @@ #if jump_op is not None and jump_op.getdescr() is descr: # self._compute_hint_frame_locations_from_descr(descr) + def consider_guard_not_forced_2(self, op): + self.rm.before_call(op.getfailargs(), save_all_regs=True) + fail_locs = [self.loc(v) for v in op.getfailargs()] + self.assembler.store_force_descr(op, fail_locs, + self.fm.get_frame_depth()) + self.possibly_free_vars(op.getfailargs()) + def consider_keepalive(self, op): pass diff --git a/rpython/jit/codewriter/codewriter.py b/rpython/jit/codewriter/codewriter.py --- a/rpython/jit/codewriter/codewriter.py +++ b/rpython/jit/codewriter/codewriter.py @@ -21,9 +21,9 @@ self.callcontrol = CallControl(cpu, jitdrivers_sd) self._seen_files = set() - def transform_func_to_jitcode(self, func, values, type_system='lltype'): + def transform_func_to_jitcode(self, func, values): """For testing.""" - rtyper = support.annotate(func, values, type_system=type_system) + rtyper = support.annotate(func, values) graph = rtyper.annotator.translator.graphs[0] jitcode = JitCode("test") self.transform_graph_to_jitcode(graph, jitcode, True) diff --git a/rpython/jit/codewriter/effectinfo.py b/rpython/jit/codewriter/effectinfo.py --- a/rpython/jit/codewriter/effectinfo.py +++ b/rpython/jit/codewriter/effectinfo.py @@ -83,6 +83,7 @@ OS_UNI_COPY_TO_RAW = 113 OS_JIT_FORCE_VIRTUAL = 120 + OS_JIT_FORCE_VIRTUALIZABLE = 121 # for debugging: _OS_CANRAISE = set([ @@ -165,6 +166,9 @@ EffectInfo.MOST_GENERAL = EffectInfo(None, None, None, None, EffectInfo.EF_RANDOM_EFFECTS, can_invalidate=True) +EffectInfo.LEAST_GENERAL = EffectInfo([], [], [], [], + EffectInfo.EF_ELIDABLE_CANNOT_RAISE, + can_invalidate=False) def effectinfo_from_writeanalyze(effects, cpu, diff --git a/rpython/jit/codewriter/jtransform.py b/rpython/jit/codewriter/jtransform.py --- a/rpython/jit/codewriter/jtransform.py +++ b/rpython/jit/codewriter/jtransform.py @@ -521,6 +521,8 @@ op1 = SpaceOperation('str_guard_value', [op.args[0], c, descr], op.result) return [SpaceOperation('-live-', [], None), op1, None] + if hints.get('force_virtualizable'): + return SpaceOperation('hint_force_virtualizable', [op.args[0]], None) else: log.WARNING('ignoring hint %r at %r' % (hints, self.graph)) diff --git a/rpython/jit/codewriter/support.py b/rpython/jit/codewriter/support.py --- a/rpython/jit/codewriter/support.py +++ b/rpython/jit/codewriter/support.py @@ -35,7 +35,7 @@ return a.typeannotation(t) def annotate(func, values, inline=None, backendoptimize=True, - type_system="lltype", translationoptions={}): + translationoptions={}): # build the normal ll graphs for ll_function t = TranslationContext() for key, value in translationoptions.items(): @@ -44,7 +44,7 @@ a = t.buildannotator(policy=annpolicy) argtypes = getargtypes(a, values) a.build_types(func, argtypes, main_entry_point=True) - rtyper = t.buildrtyper(type_system = type_system) + rtyper = t.buildrtyper() rtyper.specialize() #if inline: # auto_inlining(t, threshold=inline) diff --git a/rpython/jit/codewriter/test/test_flatten.py b/rpython/jit/codewriter/test/test_flatten.py --- a/rpython/jit/codewriter/test/test_flatten.py +++ b/rpython/jit/codewriter/test/test_flatten.py @@ -125,8 +125,8 @@ class TestFlatten: - def make_graphs(self, func, values, type_system='lltype'): - self.rtyper = support.annotate(func, values, type_system=type_system) + def make_graphs(self, func, values): + self.rtyper = support.annotate(func, values) return self.rtyper.annotator.translator.graphs def encoding_test(self, func, args, expected, diff --git a/rpython/jit/codewriter/test/test_policy.py b/rpython/jit/codewriter/test/test_policy.py --- a/rpython/jit/codewriter/test/test_policy.py +++ b/rpython/jit/codewriter/test/test_policy.py @@ -131,7 +131,7 @@ def test_access_directly_but_not_seen(): class X: - _virtualizable2_ = ["a"] + _virtualizable_ = ["a"] def h(x, y): w = 0 for i in range(y): diff --git a/rpython/jit/codewriter/test/test_regalloc.py b/rpython/jit/codewriter/test/test_regalloc.py --- a/rpython/jit/codewriter/test/test_regalloc.py +++ b/rpython/jit/codewriter/test/test_regalloc.py @@ -13,8 +13,8 @@ class TestRegAlloc: - def make_graphs(self, func, values, type_system='lltype'): - self.rtyper = support.annotate(func, values, type_system=type_system) + def make_graphs(self, func, values): + self.rtyper = support.annotate(func, values) return self.rtyper.annotator.translator.graphs def check_assembler(self, graph, expected, transform=False, diff --git a/rpython/jit/metainterp/blackhole.py b/rpython/jit/metainterp/blackhole.py --- a/rpython/jit/metainterp/blackhole.py +++ b/rpython/jit/metainterp/blackhole.py @@ -1320,6 +1320,10 @@ from rpython.jit.metainterp import quasiimmut quasiimmut.do_force_quasi_immutable(cpu, struct, mutatefielddescr) + @arguments("r") + def bhimpl_hint_force_virtualizable(r): + pass + @arguments("cpu", "d", returns="r") def bhimpl_new(cpu, descr): return cpu.bh_new(descr) diff --git a/rpython/jit/metainterp/compile.py b/rpython/jit/metainterp/compile.py --- a/rpython/jit/metainterp/compile.py +++ b/rpython/jit/metainterp/compile.py @@ -708,6 +708,8 @@ rstack._stack_criticalcode_start() try: deadframe = cpu.force(token) + # this should set descr to ResumeGuardForceDescr, if it + # was not that already faildescr = cpu.get_latest_descr(deadframe) assert isinstance(faildescr, ResumeGuardForcedDescr) faildescr.handle_async_forcing(deadframe) @@ -715,12 +717,18 @@ rstack._stack_criticalcode_stop() def handle_async_forcing(self, deadframe): - from rpython.jit.metainterp.resume import force_from_resumedata + from rpython.jit.metainterp.resume import (force_from_resumedata, + AlreadyForced) metainterp_sd = self.metainterp_sd vinfo = self.jitdriver_sd.virtualizable_info ginfo = self.jitdriver_sd.greenfield_info - all_virtuals = force_from_resumedata(metainterp_sd, self, deadframe, - vinfo, ginfo) + # there is some chance that this is already forced. In this case + # the virtualizable would have a token = NULL + try: + all_virtuals = force_from_resumedata(metainterp_sd, self, deadframe, + vinfo, ginfo) + except AlreadyForced: + return # The virtualizable data was stored on the real virtualizable above. # Handle all_virtuals: keep them for later blackholing from the # future failure of the GUARD_NOT_FORCED diff --git a/rpython/jit/metainterp/history.py b/rpython/jit/metainterp/history.py --- a/rpython/jit/metainterp/history.py +++ b/rpython/jit/metainterp/history.py @@ -151,6 +151,8 @@ descr_ptr = cpu.ts.cast_to_baseclass(descr_gcref) return cast_base_ptr_to_instance(AbstractDescr, descr_ptr) + def get_vinfo(self): + raise NotImplementedError class AbstractFailDescr(AbstractDescr): index = -1 diff --git a/rpython/jit/metainterp/jitexc.py b/rpython/jit/metainterp/jitexc.py --- a/rpython/jit/metainterp/jitexc.py +++ b/rpython/jit/metainterp/jitexc.py @@ -62,7 +62,7 @@ def _get_standard_error(rtyper, Class): - exdata = rtyper.getexceptiondata() + exdata = rtyper.exceptiondata clsdef = rtyper.annotator.bookkeeper.getuniqueclassdef(Class) evalue = exdata.get_standard_ll_exc_instance(rtyper, clsdef) return evalue diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py @@ -5101,6 +5101,15 @@ } self.optimize_loop(ops, expected, call_pure_results) + def test_guard_not_forced_2_virtual(self): + ops = """ + [i0] + p0 = new_array(3, descr=arraydescr) + guard_not_forced_2() [p0] + finish(p0) + """ + self.optimize_loop(ops, ops) + class TestLLtype(BaseTestOptimizeBasic, LLtypeMixin): pass diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py @@ -7086,6 +7086,19 @@ """ self.optimize_loop(ops, expected) + def test_force_virtualizable_virtual(self): + ops = """ + [i0] + p1 = new_with_vtable(ConstClass(node_vtable)) + cond_call(1, 123, p1, descr=clear_vable) + jump(i0) + """ + expected = """ + [i0] + jump(i0) + """ + self.optimize_loop(ops, expected) + def test_setgetfield_counter(self): ops = """ [p1] diff --git a/rpython/jit/metainterp/optimizeopt/test/test_util.py b/rpython/jit/metainterp/optimizeopt/test/test_util.py --- a/rpython/jit/metainterp/optimizeopt/test/test_util.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_util.py @@ -254,12 +254,19 @@ asmdescr = LoopToken() # it can be whatever, it's not a descr though from rpython.jit.metainterp.virtualref import VirtualRefInfo + class FakeWarmRunnerDesc: pass FakeWarmRunnerDesc.cpu = cpu vrefinfo = VirtualRefInfo(FakeWarmRunnerDesc) virtualtokendescr = vrefinfo.descr_virtual_token virtualforceddescr = vrefinfo.descr_forced + FUNC = lltype.FuncType([], lltype.Void) + ei = EffectInfo([], [], [], [], EffectInfo.EF_CANNOT_RAISE, + can_invalidate=False, + oopspecindex=EffectInfo.OS_JIT_FORCE_VIRTUALIZABLE) + clear_vable = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, ei) + jit_virtual_ref_vtable = vrefinfo.jit_virtual_ref_vtable jvr_vtable_adr = llmemory.cast_ptr_to_adr(jit_virtual_ref_vtable) diff --git a/rpython/jit/metainterp/optimizeopt/virtualize.py b/rpython/jit/metainterp/optimizeopt/virtualize.py --- a/rpython/jit/metainterp/optimizeopt/virtualize.py +++ b/rpython/jit/metainterp/optimizeopt/virtualize.py @@ -484,6 +484,8 @@ class OptVirtualize(optimizer.Optimization): "Virtualize objects until they escape." + _last_guard_not_forced_2 = None + def new(self): return OptVirtualize() @@ -527,6 +529,20 @@ return self.emit_operation(op) + def optimize_GUARD_NOT_FORCED_2(self, op): + self._last_guard_not_forced_2 = op + + def optimize_FINISH(self, op): + if self._last_guard_not_forced_2 is not None: + guard_op = self._last_guard_not_forced_2 + self.emit_operation(op) + guard_op = self.optimizer.store_final_boxes_in_guard(guard_op, []) + i = len(self.optimizer._newoperations) - 1 + assert i >= 0 + self.optimizer._newoperations.insert(i, guard_op) + else: + self.emit_operation(op) + def optimize_CALL_MAY_FORCE(self, op): effectinfo = op.getdescr().get_extra_info() oopspecindex = effectinfo.oopspecindex @@ -535,6 +551,15 @@ return self.emit_operation(op) + def optimize_COND_CALL(self, op): + effectinfo = op.getdescr().get_extra_info() + oopspecindex = effectinfo.oopspecindex + if oopspecindex == EffectInfo.OS_JIT_FORCE_VIRTUALIZABLE: + value = self.getvalue(op.getarg(2)) + if value.is_virtual(): + return + self.emit_operation(op) + def optimize_VIRTUAL_REF(self, op): # get some constants vrefinfo = self.optimizer.metainterp_sd.virtualref_info @@ -657,6 +682,11 @@ self.do_RAW_MALLOC_VARSIZE_CHAR(op) elif effectinfo.oopspecindex == EffectInfo.OS_RAW_FREE: self.do_RAW_FREE(op) + elif effectinfo.oopspecindex == EffectInfo.OS_JIT_FORCE_VIRTUALIZABLE: + # we might end up having CALL here instead of COND_CALL + value = self.getvalue(op.getarg(1)) + if value.is_virtual(): + return else: self.emit_operation(op) diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -18,7 +18,7 @@ from rpython.rlib.jit import Counters from rpython.rlib.objectmodel import we_are_translated, specialize from rpython.rlib.unroll import unrolling_iterable -from rpython.rtyper.lltypesystem import lltype, rclass +from rpython.rtyper.lltypesystem import lltype, rclass, rffi @@ -313,7 +313,7 @@ opnum = rop.GUARD_TRUE else: opnum = rop.GUARD_FALSE - self.generate_guard(opnum, box) + self.metainterp.generate_guard(opnum, box) if not switchcase: self.pc = target @@ -341,10 +341,12 @@ value = box.nonnull() if value: if not self.metainterp.heapcache.is_class_known(box): - self.generate_guard(rop.GUARD_NONNULL, box, resumepc=orgpc) + self.metainterp.generate_guard(rop.GUARD_NONNULL, box, + resumepc=orgpc) else: if not isinstance(box, Const): - self.generate_guard(rop.GUARD_ISNULL, box, resumepc=orgpc) + self.metainterp.generate_guard(rop.GUARD_ISNULL, box, + resumepc=orgpc) promoted_box = box.constbox() self.metainterp.replace_box(box, promoted_box) return value @@ -604,7 +606,7 @@ def _opimpl_getfield_gc_greenfield_any(self, box, fielddescr, pc): ginfo = self.metainterp.jitdriver_sd.greenfield_info if (ginfo is not None and fielddescr in ginfo.green_field_descrs - and not self._nonstandard_virtualizable(pc, box)): + and not self._nonstandard_virtualizable(pc, box, fielddescr)): # fetch the result, but consider it as a Const box and don't # record any operation resbox = executor.execute(self.metainterp.cpu, self.metainterp, @@ -672,6 +674,10 @@ opimpl_raw_load_i = _opimpl_raw_load opimpl_raw_load_f = _opimpl_raw_load + @arguments("box") + def opimpl_hint_force_virtualizable(self, box): + self.metainterp.gen_store_back_in_vable(box) + @arguments("box", "descr", "descr", "orgpc") def opimpl_record_quasiimmut_field(self, box, fielddescr, mutatefielddescr, orgpc): @@ -680,7 +686,8 @@ descr = QuasiImmutDescr(cpu, box, fielddescr, mutatefielddescr) self.metainterp.history.record(rop.QUASIIMMUT_FIELD, [box], None, descr=descr) - self.generate_guard(rop.GUARD_NOT_INVALIDATED, resumepc=orgpc) + self.metainterp.generate_guard(rop.GUARD_NOT_INVALIDATED, + resumepc=orgpc) @arguments("box", "descr", "orgpc") def opimpl_jit_force_quasi_immutable(self, box, mutatefielddescr, orgpc): @@ -699,28 +706,46 @@ do_force_quasi_immutable(self.metainterp.cpu, box.getref_base(), mutatefielddescr) raise SwitchToBlackhole(Counters.ABORT_FORCE_QUASIIMMUT) - self.generate_guard(rop.GUARD_ISNULL, mutatebox, resumepc=orgpc) + self.metainterp.generate_guard(rop.GUARD_ISNULL, mutatebox, + resumepc=orgpc) - def _nonstandard_virtualizable(self, pc, box): + def _nonstandard_virtualizable(self, pc, box, fielddescr): # returns True if 'box' is actually not the "standard" virtualizable # that is stored in metainterp.virtualizable_boxes[-1] - if (self.metainterp.jitdriver_sd.virtualizable_info is None and - self.metainterp.jitdriver_sd.greenfield_info is None): - return True # can occur in case of multiple JITs - standard_box = self.metainterp.virtualizable_boxes[-1] - if standard_box is box: - return False if self.metainterp.heapcache.is_nonstandard_virtualizable(box): return True - eqbox = self.metainterp.execute_and_record(rop.PTR_EQ, None, - box, standard_box) - eqbox = self.implement_guard_value(eqbox, pc) - isstandard = eqbox.getint() - if isstandard: - self.metainterp.replace_box(box, standard_box) - else: - self.metainterp.heapcache.nonstandard_virtualizables_now_known(box) - return not isstandard + if box is self.metainterp.forced_virtualizable: + self.metainterp.forced_virtualizable = None + if (self.metainterp.jitdriver_sd.virtualizable_info is not None or + self.metainterp.jitdriver_sd.greenfield_info is not None): + standard_box = self.metainterp.virtualizable_boxes[-1] + if standard_box is box: + return False + vinfo = self.metainterp.jitdriver_sd.virtualizable_info + if vinfo is fielddescr.get_vinfo(): + eqbox = self.metainterp.execute_and_record(rop.PTR_EQ, None, + box, standard_box) + eqbox = self.implement_guard_value(eqbox, pc) + isstandard = eqbox.getint() + if isstandard: + self.metainterp.replace_box(box, standard_box) + return False + if not self.metainterp.heapcache.is_unescaped(box): + self.emit_force_virtualizable(fielddescr, box) + self.metainterp.heapcache.nonstandard_virtualizables_now_known(box) + return True + + def emit_force_virtualizable(self, fielddescr, box): + vinfo = fielddescr.get_vinfo() + token_descr = vinfo.vable_token_descr + mi = self.metainterp + tokenbox = mi.execute_and_record(rop.GETFIELD_GC, token_descr, box) + condbox = mi.execute_and_record(rop.PTR_NE, None, tokenbox, + history.CONST_NULL) + funcbox = ConstInt(rffi.cast(lltype.Signed, vinfo.clear_vable_ptr)) + calldescr = vinfo.clear_vable_descr + self.execute_varargs(rop.COND_CALL, [condbox, funcbox, box], + calldescr, False, False) def _get_virtualizable_field_index(self, fielddescr): # Get the index of a fielddescr. Must only be called for @@ -730,7 +755,7 @@ @arguments("box", "descr", "orgpc") def _opimpl_getfield_vable(self, box, fielddescr, pc): - if self._nonstandard_virtualizable(pc, box): + if self._nonstandard_virtualizable(pc, box, fielddescr): return self._opimpl_getfield_gc_any(box, fielddescr) self.metainterp.check_synchronized_virtualizable() index = self._get_virtualizable_field_index(fielddescr) @@ -742,7 +767,7 @@ @arguments("box", "box", "descr", "orgpc") def _opimpl_setfield_vable(self, box, valuebox, fielddescr, pc): - if self._nonstandard_virtualizable(pc, box): + if self._nonstandard_virtualizable(pc, box, fielddescr): return self._opimpl_setfield_gc_any(box, valuebox, fielddescr) index = self._get_virtualizable_field_index(fielddescr) self.metainterp.virtualizable_boxes[index] = valuebox @@ -772,7 +797,7 @@ @arguments("box", "box", "descr", "descr", "orgpc") def _opimpl_getarrayitem_vable(self, box, indexbox, fdescr, adescr, pc): - if self._nonstandard_virtualizable(pc, box): + if self._nonstandard_virtualizable(pc, box, fdescr): arraybox = self._opimpl_getfield_gc_any(box, fdescr) return self._opimpl_getarrayitem_gc_any(arraybox, indexbox, adescr) self.metainterp.check_synchronized_virtualizable() @@ -786,7 +811,7 @@ @arguments("box", "box", "box", "descr", "descr", "orgpc") def _opimpl_setarrayitem_vable(self, box, indexbox, valuebox, fdescr, adescr, pc): - if self._nonstandard_virtualizable(pc, box): + if self._nonstandard_virtualizable(pc, box, fdescr): arraybox = self._opimpl_getfield_gc_any(box, fdescr) self._opimpl_setarrayitem_gc_any(arraybox, indexbox, valuebox, adescr) @@ -802,7 +827,7 @@ @arguments("box", "descr", "descr", "orgpc") def opimpl_arraylen_vable(self, box, fdescr, adescr, pc): - if self._nonstandard_virtualizable(pc, box): + if self._nonstandard_virtualizable(pc, box, fdescr): arraybox = self._opimpl_getfield_gc_any(box, fdescr) return self.opimpl_arraylen_gc(arraybox, adescr) vinfo = self.metainterp.jitdriver_sd.virtualizable_info @@ -958,8 +983,9 @@ promoted_box = resbox.constbox() # This is GUARD_VALUE because GUARD_TRUE assumes the existance # of a label when computing resumepc - self.generate_guard(rop.GUARD_VALUE, resbox, [promoted_box], - resumepc=orgpc) + self.metainterp.generate_guard(rop.GUARD_VALUE, resbox, + [promoted_box], + resumepc=orgpc) self.metainterp.replace_box(box, constbox) return constbox @@ -971,7 +997,8 @@ def opimpl_guard_class(self, box, orgpc): clsbox = self.cls_of_box(box) if not self.metainterp.heapcache.is_class_known(box): - self.generate_guard(rop.GUARD_CLASS, box, [clsbox], resumepc=orgpc) + self.metainterp.generate_guard(rop.GUARD_CLASS, box, [clsbox], + resumepc=orgpc) self.metainterp.heapcache.class_now_known(box) return clsbox @@ -989,7 +1016,7 @@ def opimpl_jit_merge_point(self, jdindex, greenboxes, jcposition, redboxes, orgpc): resumedescr = compile.ResumeAtPositionDescr() - self.capture_resumedata(resumedescr, orgpc) + self.metainterp.capture_resumedata(resumedescr, orgpc) any_operation = len(self.metainterp.history.operations) > 0 jitdriver_sd = self.metainterp.staticdata.jitdrivers_sd[jdindex] @@ -1071,8 +1098,8 @@ # xxx hack if not self.metainterp.heapcache.is_class_known(exc_value_box): clsbox = self.cls_of_box(exc_value_box) - self.generate_guard(rop.GUARD_CLASS, exc_value_box, [clsbox], - resumepc=orgpc) + self.metainterp.generate_guard(rop.GUARD_CLASS, exc_value_box, + [clsbox], resumepc=orgpc) self.metainterp.class_of_last_exc_is_const = True self.metainterp.last_exc_value_box = exc_value_box self.metainterp.popframe() @@ -1271,43 +1298,6 @@ except ChangeFrame: pass - def generate_guard(self, opnum, box=None, extraargs=[], resumepc=-1): - if isinstance(box, Const): # no need for a guard - return - metainterp = self.metainterp - if box is not None: - moreargs = [box] + extraargs - else: - moreargs = list(extraargs) - metainterp_sd = metainterp.staticdata - if opnum == rop.GUARD_NOT_FORCED: - resumedescr = compile.ResumeGuardForcedDescr(metainterp_sd, - metainterp.jitdriver_sd) - elif opnum == rop.GUARD_NOT_INVALIDATED: - resumedescr = compile.ResumeGuardNotInvalidated() - else: - resumedescr = compile.ResumeGuardDescr() - guard_op = metainterp.history.record(opnum, moreargs, None, - descr=resumedescr) - self.capture_resumedata(resumedescr, resumepc) - self.metainterp.staticdata.profiler.count_ops(opnum, Counters.GUARDS) - # count - metainterp.attach_debug_info(guard_op) - return guard_op - - def capture_resumedata(self, resumedescr, resumepc=-1): - metainterp = self.metainterp - virtualizable_boxes = None - if (metainterp.jitdriver_sd.virtualizable_info is not None or - metainterp.jitdriver_sd.greenfield_info is not None): - virtualizable_boxes = metainterp.virtualizable_boxes - saved_pc = self.pc - if resumepc >= 0: - self.pc = resumepc - resume.capture_resumedata(metainterp.framestack, virtualizable_boxes, - metainterp.virtualref_boxes, resumedescr) - self.pc = saved_pc - def implement_guard_value(self, box, orgpc): """Promote the given Box into a Const. Note: be careful, it's a bit unclear what occurs if a single opcode needs to generate @@ -1316,8 +1306,8 @@ return box # no promotion needed, already a Const else: promoted_box = box.constbox() - self.generate_guard(rop.GUARD_VALUE, box, [promoted_box], - resumepc=orgpc) + self.metainterp.generate_guard(rop.GUARD_VALUE, box, [promoted_box], + resumepc=orgpc) self.metainterp.replace_box(box, promoted_box) return promoted_box @@ -1411,7 +1401,7 @@ if resbox is not None: self.make_result_of_lastop(resbox) self.metainterp.vable_after_residual_call() - self.generate_guard(rop.GUARD_NOT_FORCED, None) + self.metainterp.generate_guard(rop.GUARD_NOT_FORCED, None) if vablebox is not None: self.metainterp.history.record(rop.KEEPALIVE, [vablebox], None) self.metainterp.handle_possible_exception() @@ -1660,6 +1650,7 @@ self.portal_trace_positions = [] self.free_frames_list = [] self.last_exc_value_box = None + self.forced_virtualizable = None self.partial_trace = None self.retracing_from = -1 self.call_pure_results = args_dict_box() @@ -1783,6 +1774,45 @@ print jitcode.name raise AssertionError + def generate_guard(self, opnum, box=None, extraargs=[], resumepc=-1): + if isinstance(box, Const): # no need for a guard + return + if box is not None: + moreargs = [box] + extraargs + else: + moreargs = list(extraargs) + metainterp_sd = self.staticdata + if opnum == rop.GUARD_NOT_FORCED or opnum == rop.GUARD_NOT_FORCED_2: + resumedescr = compile.ResumeGuardForcedDescr(metainterp_sd, + self.jitdriver_sd) + elif opnum == rop.GUARD_NOT_INVALIDATED: + resumedescr = compile.ResumeGuardNotInvalidated() + else: + resumedescr = compile.ResumeGuardDescr() + guard_op = self.history.record(opnum, moreargs, None, + descr=resumedescr) + self.capture_resumedata(resumedescr, resumepc) + self.staticdata.profiler.count_ops(opnum, Counters.GUARDS) + # count + self.attach_debug_info(guard_op) + return guard_op + + def capture_resumedata(self, resumedescr, resumepc=-1): + virtualizable_boxes = None + if (self.jitdriver_sd.virtualizable_info is not None or + self.jitdriver_sd.greenfield_info is not None): + virtualizable_boxes = self.virtualizable_boxes + saved_pc = 0 + if self.framestack: + frame = self.framestack[-1] + saved_pc = frame.pc + if resumepc >= 0: + frame.pc = resumepc + resume.capture_resumedata(self.framestack, virtualizable_boxes, + self.virtualref_boxes, resumedescr) + if self.framestack: + self.framestack[-1].pc = saved_pc + def create_empty_history(self): self.history = history.History() self.staticdata.stats.set_history(self.history) @@ -2253,8 +2283,8 @@ self.raise_continue_running_normally(live_arg_boxes, jitcell_token) def compile_done_with_this_frame(self, exitbox): - self.gen_store_back_in_virtualizable() # temporarily put a JUMP to a pseudo-loop + self.store_token_in_vable() sd = self.staticdata result_type = self.jitdriver_sd.result_type if result_type == history.VOID: @@ -2280,8 +2310,24 @@ if target_token is not token: compile.giveup() + def store_token_in_vable(self): + vinfo = self.jitdriver_sd.virtualizable_info + if vinfo is None: + return + vbox = self.virtualizable_boxes[-1] + if vbox is self.forced_virtualizable: + return # we already forced it by hand + force_token_box = history.BoxPtr() + # in case the force_token has not been recorded, record it here + # to make sure we know the virtualizable can be broken. However, the + # contents of the virtualizable should be generally correct + self.history.record(rop.FORCE_TOKEN, [], force_token_box) + self.history.record(rop.SETFIELD_GC, [vbox, force_token_box], + None, descr=vinfo.vable_token_descr) + self.generate_guard(rop.GUARD_NOT_FORCED_2, None) + def compile_exit_frame_with_exception(self, valuebox): - self.gen_store_back_in_virtualizable() + self.store_token_in_vable() sd = self.staticdata token = sd.loop_tokens_exit_frame_with_exception_ref[0].finishdescr self.history.record(rop.FINISH, [valuebox], None, descr=token) @@ -2420,27 +2466,25 @@ self.virtualref_boxes[i+1] = self.cpu.ts.CONST_NULL def handle_possible_exception(self): - frame = self.framestack[-1] if self.last_exc_value_box is not None: exception_box = self.cpu.ts.cls_of_box(self.last_exc_value_box) - op = frame.generate_guard(rop.GUARD_EXCEPTION, - None, [exception_box]) + op = self.generate_guard(rop.GUARD_EXCEPTION, + None, [exception_box]) assert op is not None op.result = self.last_exc_value_box self.class_of_last_exc_is_const = True self.finishframe_exception() else: - frame.generate_guard(rop.GUARD_NO_EXCEPTION, None, []) + self.generate_guard(rop.GUARD_NO_EXCEPTION, None, []) def handle_possible_overflow_error(self): - frame = self.framestack[-1] if self.last_exc_value_box is not None: - frame.generate_guard(rop.GUARD_OVERFLOW, None) + self.generate_guard(rop.GUARD_OVERFLOW, None) assert isinstance(self.last_exc_value_box, Const) assert self.class_of_last_exc_is_const self.finishframe_exception() else: - frame.generate_guard(rop.GUARD_NO_OVERFLOW, None) + self.generate_guard(rop.GUARD_NO_OVERFLOW, None) def assert_no_exception(self): assert self.last_exc_value_box is None @@ -2467,12 +2511,13 @@ if vinfo is not None: self.virtualizable_boxes = virtualizable_boxes # just jumped away from assembler (case 4 in the comment in - # virtualizable.py) into tracing (case 2); check that vable_token - # is and stays NULL. Note the call to reset_vable_token() in - # warmstate.py. + # virtualizable.py) into tracing (case 2); if we get the + # virtualizable from somewhere strange it might not be forced, + # do it virtualizable_box = self.virtualizable_boxes[-1] virtualizable = vinfo.unwrap_virtualizable_box(virtualizable_box) - assert not vinfo.is_token_nonnull_gcref(virtualizable) + if vinfo.is_token_nonnull_gcref(virtualizable): + vinfo.reset_token_gcref(virtualizable) # fill the virtualizable with the local boxes self.synchronize_virtualizable() # @@ -2508,11 +2553,20 @@ virtualizable) self.virtualizable_boxes.append(virtualizable_box) - def gen_store_back_in_virtualizable(self): + def gen_store_back_in_vable(self, box): vinfo = self.jitdriver_sd.virtualizable_info if vinfo is not None: # xxx only write back the fields really modified vbox = self.virtualizable_boxes[-1] + if vbox is not box: + # ignore the hint on non-standard virtualizable + # specifically, ignore it on a virtual + return + if self.forced_virtualizable is not None: + # this can happen only in strange cases, but we don't care + # it was already forced + return + self.forced_virtualizable = vbox for i in range(vinfo.num_static_extra_boxes): fieldbox = self.virtualizable_boxes[i] descr = vinfo.static_field_descrs[i] @@ -2529,6 +2583,9 @@ self.execute_and_record(rop.SETARRAYITEM_GC, descr, abox, ConstInt(j), itembox) assert i + 1 == len(self.virtualizable_boxes) + # we're during tracing, so we should not execute it + self.history.record(rop.SETFIELD_GC, [vbox, self.cpu.ts.CONST_NULL], + None, descr=vinfo.vable_token_descr) def replace_box(self, oldbox, newbox): assert isinstance(oldbox, Box) diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py --- a/rpython/jit/metainterp/resoperation.py +++ b/rpython/jit/metainterp/resoperation.py @@ -397,6 +397,7 @@ 'GUARD_NO_OVERFLOW/0d', 'GUARD_OVERFLOW/0d', 'GUARD_NOT_FORCED/0d', # may be called with an exception currently set + 'GUARD_NOT_FORCED_2/0d', # same as GUARD_NOT_FORCED, but for finish() 'GUARD_NOT_INVALIDATED/0d', '_GUARD_LAST', # ----- end of guard operations ----- @@ -488,6 +489,8 @@ 'VIRTUAL_REF/2', # removed before it's passed to the backend 'READ_TIMESTAMP/0', 'MARK_OPAQUE_PTR/1b', + # this one has no *visible* side effect, since the virtualizable + # must be forced, however we need to execute it anyway '_NOSIDEEFFECT_LAST', # ----- end of no_side_effect operations ----- 'SETARRAYITEM_GC/3d', diff --git a/rpython/jit/metainterp/resume.py b/rpython/jit/metainterp/resume.py --- a/rpython/jit/metainterp/resume.py +++ b/rpython/jit/metainterp/resume.py @@ -17,6 +17,9 @@ # because it needs to support optimize.py which encodes virtuals with # arbitrary cycles and also to compress the information +class AlreadyForced(Exception): + pass + class Snapshot(object): __slots__ = ('prev', 'boxes') @@ -51,20 +54,24 @@ def capture_resumedata(framestack, virtualizable_boxes, virtualref_boxes, storage): - n = len(framestack)-1 - top = framestack[n] - _ensure_parent_resumedata(framestack, n) - frame_info_list = FrameInfo(top.parent_resumedata_frame_info_list, - top.jitcode, top.pc) - storage.rd_frame_info_list = frame_info_list - snapshot = Snapshot(top.parent_resumedata_snapshot, - top.get_list_of_active_boxes(False)) + n = len(framestack) - 1 if virtualizable_boxes is not None: boxes = virtualref_boxes + virtualizable_boxes else: boxes = virtualref_boxes[:] - snapshot = Snapshot(snapshot, boxes) - storage.rd_snapshot = snapshot + if n >= 0: + top = framestack[n] + _ensure_parent_resumedata(framestack, n) + frame_info_list = FrameInfo(top.parent_resumedata_frame_info_list, + top.jitcode, top.pc) + storage.rd_frame_info_list = frame_info_list + snapshot = Snapshot(top.parent_resumedata_snapshot, + top.get_list_of_active_boxes(False)) + snapshot = Snapshot(snapshot, boxes) + storage.rd_snapshot = snapshot + else: + storage.rd_frame_info_list = None + storage.rd_snapshot = Snapshot(None, boxes) # # The following is equivalent to the RPython-level declaration: @@ -1214,16 +1221,8 @@ return len(numb.nums) index = len(numb.nums) - 1 virtualizable = self.decode_ref(numb.nums[index]) - if self.resume_after_guard_not_forced == 1: - # in the middle of handle_async_forcing() - assert vinfo.is_token_nonnull_gcref(virtualizable) - vinfo.reset_token_gcref(virtualizable) - else: - # just jumped away from assembler (case 4 in the comment in - # virtualizable.py) into tracing (case 2); check that vable_token - # is and stays NULL. Note the call to reset_vable_token() in - # warmstate.py. - assert not vinfo.is_token_nonnull_gcref(virtualizable) + # just reset the token, we'll force it later + vinfo.reset_token_gcref(virtualizable) return vinfo.write_from_resume_data_partial(virtualizable, self, numb) def load_value_of_type(self, TYPE, tagged): diff --git a/rpython/jit/metainterp/test/support.py b/rpython/jit/metainterp/test/support.py --- a/rpython/jit/metainterp/test/support.py +++ b/rpython/jit/metainterp/test/support.py @@ -12,7 +12,7 @@ from rpython.translator.backendopt.all import backend_optimizations -def _get_jitcodes(testself, CPUClass, func, values, type_system, +def _get_jitcodes(testself, CPUClass, func, values, supports_floats=True, supports_longlong=False, supports_singlefloats=False, @@ -50,7 +50,7 @@ FakeWarmRunnerState.enable_opts = {} func._jit_unroll_safe_ = True - rtyper = support.annotate(func, values, type_system=type_system, + rtyper = support.annotate(func, values, translationoptions=translationoptions) graphs = rtyper.annotator.translator.graphs testself.all_graphs = graphs @@ -210,7 +210,7 @@ def interp_operations(self, f, args, **kwds): # get the JitCodes for the function f - _get_jitcodes(self, self.CPUClass, f, args, self.type_system, **kwds) + _get_jitcodes(self, self.CPUClass, f, args, **kwds) # try to run it with blackhole.py result1 = _run_with_blackhole(self, args) # try to run it with pyjitpl.py diff --git a/rpython/jit/metainterp/test/test_recursive.py b/rpython/jit/metainterp/test/test_recursive.py --- a/rpython/jit/metainterp/test/test_recursive.py +++ b/rpython/jit/metainterp/test/test_recursive.py @@ -412,7 +412,6 @@ self.i8 = i8 self.i9 = i9 - def loop(n): i = 0 o = A(0, 1, 2, 3, 4, 5, 6, 7, 8, 9) @@ -445,7 +444,6 @@ self.i8 = i8 self.i9 = i9 - def loop(n): i = 0 o = A(0, 1, 2, 3, 4, 5, 6, 7, 8, 9) @@ -643,7 +641,7 @@ # exactly the same logic as the previous test, but with 'frame.j' # instead of just 'j' class Frame(object): - _virtualizable2_ = ['j'] + _virtualizable_ = ['j'] def __init__(self, j): self.j = j @@ -767,9 +765,9 @@ self.val = val From noreply at buildbot.pypy.org Wed Aug 7 15:19:24 2013 From: noreply at buildbot.pypy.org (stian) Date: Wed, 7 Aug 2013 15:19:24 +0200 (CEST) Subject: [pypy-commit] pypy bigint-with-int: Close branch as abandoned. Breaks too much stuff (fractions and decimals). Message-ID: <20130807131924.B985E1C00F4@cobra.cs.uni-duesseldorf.de> Author: stian Branch: bigint-with-int Changeset: r65995:7f1adb56a558 Date: 2013-08-07 15:18 +0200 http://bitbucket.org/pypy/pypy/changeset/7f1adb56a558/ Log: Close branch as abandoned. Breaks too much stuff (fractions and decimals). From noreply at buildbot.pypy.org Wed Aug 7 16:17:49 2013 From: noreply at buildbot.pypy.org (antocuni) Date: Wed, 7 Aug 2013 16:17:49 +0200 (CEST) Subject: [pypy-commit] pypy default: don't include 'identity' in the greens of numpy_axis_reduce: it is useless because it is used only at the first iteration of the loop, and bad because we get a different instance of W_*Box every time we run it, which means that we compile the same loop again and again Message-ID: <20130807141749.CE5231C073E@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: Changeset: r65996:891ccccca71d Date: 2013-08-07 16:17 +0200 http://bitbucket.org/pypy/pypy/changeset/891ccccca71d/ Log: don't include 'identity' in the greens of numpy_axis_reduce: it is useless because it is used only at the first iteration of the loop, and bad because we get a different instance of W_*Box every time we run it, which means that we compile the same loop again and again diff --git a/pypy/module/micronumpy/loop.py b/pypy/module/micronumpy/loop.py --- a/pypy/module/micronumpy/loop.py +++ b/pypy/module/micronumpy/loop.py @@ -215,8 +215,7 @@ axis_reduce__driver = jit.JitDriver(name='numpy_axis_reduce', greens=['shapelen', - 'func', 'dtype', - 'identity'], + 'func', 'dtype'], reds='auto') def do_axis_reduce(shape, func, arr, dtype, axis, out, identity, cumultative, @@ -232,8 +231,7 @@ shapelen = len(shape) while not out_iter.done(): axis_reduce__driver.jit_merge_point(shapelen=shapelen, func=func, - dtype=dtype, identity=identity, - ) + dtype=dtype) w_val = arr_iter.getitem().convert_to(dtype) if out_iter.first_line: if identity is not None: diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -6,7 +6,7 @@ import py from rpython.jit.metainterp import pyjitpl from rpython.jit.metainterp.test.support import LLJitMixin -from rpython.jit.metainterp.warmspot import reset_stats +from rpython.jit.metainterp.warmspot import reset_stats, get_stats from pypy.module.micronumpy import interp_boxes from pypy.module.micronumpy.compile import FakeSpace, Parser, InterpreterState from pypy.module.micronumpy.base import W_NDimArray @@ -35,9 +35,10 @@ cls.code_mapping = d cls.codes = allcodes - def run(self, name): + def compile_graph(self): + if self.graph is not None: + return space = FakeSpace() - i = self.code_mapping[name] codes = self.codes def f(i): @@ -57,14 +58,18 @@ raise TypeError(w_res) if self.graph is None: - interp, graph = self.meta_interp(f, [i], + interp, graph = self.meta_interp(f, [0], listops=True, backendopt=True, graph_and_interp_only=True) self.__class__.interp = interp self.__class__.graph = graph + + def run(self, name): + self.compile_graph() reset_stats() pyjitpl._warmrunnerdesc.memory_manager.alive_loops.clear() + i = self.code_mapping[name] retval = self.interp.eval_graph(self.graph, [i]) py.test.skip("don't run for now") return retval @@ -134,6 +139,29 @@ 'int_add': 3, }) + def test_reduce_compile_only_once(self): + self.compile_graph() + reset_stats() + pyjitpl._warmrunnerdesc.memory_manager.alive_loops.clear() + i = self.code_mapping['sum'] + # run it twice + retval = self.interp.eval_graph(self.graph, [i]) + retval = self.interp.eval_graph(self.graph, [i]) + # check that we got only one loop + assert len(get_stats().loops) == 1 + + def test_reduce_axis_compile_only_once(self): + self.compile_graph() + reset_stats() + pyjitpl._warmrunnerdesc.memory_manager.alive_loops.clear() + i = self.code_mapping['axissum'] + # run it twice + retval = self.interp.eval_graph(self.graph, [i]) + retval = self.interp.eval_graph(self.graph, [i]) + # check that we got only one loop + assert len(get_stats().loops) == 1 + + def define_prod(): return """ a = |30| From noreply at buildbot.pypy.org Wed Aug 7 16:58:52 2013 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 7 Aug 2013 16:58:52 +0200 (CEST) Subject: [pypy-commit] benchmarks single-run: I think I got it right, but who knows - try to kill the implicit warmup Message-ID: <20130807145852.8A51A1C073E@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: single-run Changeset: r229:4ab1e9967170 Date: 2013-08-07 16:58 +0200 http://bitbucket.org/pypy/benchmarks/changeset/4ab1e9967170/ Log: I think I got it right, but who knows - try to kill the implicit warmup diff --git a/bench-data.json b/bench-data.json --- a/bench-data.json +++ b/bench-data.json @@ -5,6 +5,7 @@ "description": "Brute force n-queens solver." }, "bm_chameleon": { + "total_runs": 500 }, "bm_mako": { }, @@ -12,7 +13,8 @@ "description": "Creates chaosgame-like fractals" }, "cpython_doc": { - "description": "Run sphinx over cpython documentation" + "description": "Run sphinx over cpython documentation", + "total_runs": 1 }, "crypto_pyaes": { "description": "A pure python implementation of AES" @@ -30,6 +32,10 @@ "float": { "description": "Creates an array of points using circular projection and then normalizes and maximizes them. Floating-point heavy." }, + "gcbench": { + "total_runs": 5, + "description": "Classic gcbench" + }, "genshi_text": { "description": "Genshi template rendering using text, generator heavy" }, @@ -43,7 +49,7 @@ }, "html5lib": { "warmup": 0, - "total_runs": 50, + "total_runs": 5, "description": "Parses the HTML 5 spec using html5lib." }, "json_bench": { @@ -58,7 +64,8 @@ "description": "Double-precision N-body simulation. It models the orbits of Jovian planets, using a simple symplectic-integrator." }, "pidigits": { - "description": "Computes the digits of PI. Long heavy" + "description": "Computes the digits of PI. Long heavy", + "total_runs": 5 }, "pyflate-fast": { "description": "Stand-alone pure-Python DEFLATE (gzip) and bzip2 decoder/decompressor." @@ -91,31 +98,42 @@ "description": "Uses the Spitfire template system to build a 1000x1000-cell HTML table; it differs from spitfire in that it uses .join(list) instead of cStringIO." }, "spambayes": { - "description": "Spambayes spam classification filter" + "description": "Spambayes spam classification filter", + "warmup": 1, + "total_runs": 51 }, "spectral-norm": { }, "spitfire": { - "description": "Uses the Spitfire template system to build a 100x100-cell HTML table; it differs from spitfire in that it uses .join(list) instead of cStringIO." + "description": "Uses the Spitfire template system to build a 100x100-cell HTML table; it differs from spitfire in that it uses .join(list) instead of cStringIO.", + "warmup": 2, + "total_runs": 52 }, "spitfire_cstringio": { "description": "ses the Spitfire template system to build a 1000x1000-cell HTML table, using the cStringIO module." }, "sympy_expand": { + "total_runs": 5, "description": "Use sympy (pure python symbolic math lib) do to expansion" }, "sympy_integrate": { + "total_runs": 5, "description": "Use sympy (pure python symbolic math lib) do to integration" }, "sympy_str": { + "total_runs": 5, "description": "Use sympy (pure python symbolic math lib) do to str() operation" }, "sympy_sum": { + "total_runs": 5, "description": "Use sympy (pure python symbolic math lib) do to summation" }, "telco": { "description": "A small program which is intended to capture the essence of a telephone company billing application, with a realistic balance between Input/Output activity and application calculations." }, + "translate": { + "description": "Translation benchmarks" + }, "trans2_annotate": { "description": "PyPy translation -O2 - annotation" }, @@ -132,18 +150,28 @@ "description": "PyPy translation -O2 - C source" }, "twisted_iteration" : { - "description": "Iterates a Twisted reactor as quickly as possible without doing any work." + "description": "Iterates a Twisted reactor as quickly as possible without doing any work.", + "total_runs": 65, + "warmup": 15 }, "twisted_names": { - "description": "Runs a DNS server with Twisted Names and then issues requests to it over loopback UDP." + "description": "Runs a DNS server with Twisted Names and then issues requests to it over loopback UDP.", + "total_runs": 65, + "warmup": 15 }, "twisted_pb": { - "description": "Runs a Perspective Broker server with a no-op method and invokes that method over loopback TCP with some strings, dictionaries, and tuples as arguments." + "description": "Runs a Perspective Broker server with a no-op method and invokes that method over loopback TCP with some strings, dictionaries, and tuples as arguments.", + "total_runs": 65, + "warmup": 15 }, "twisted_tcp": { - "description": "Connects one Twised client to one Twisted server over TCP (on the loopback interface) and then writes bytes as fast as it can." + "description": "Connects one Twised client to one Twisted server over TCP (on the loopback interface) and then writes bytes as fast as it can.", + "total_runs": 65, + "warmup": 15 }, "twisted_web": { - "description": "Runs twisted web server and connects through twisted HTTP client" + "description": "Runs twisted web server and connects through twisted HTTP client", + "total_runs": 25, + "warmup": 15 } } diff --git a/benchmarks.py b/benchmarks.py --- a/benchmarks.py +++ b/benchmarks.py @@ -8,9 +8,9 @@ return os.path.join(os.path.dirname(os.path.abspath(__file__)), *args) def _register_new_bm(name, bm_name, d, **opts): - def Measure(python, options): + def Measure(python, options, bench_data): bm_path = relative('own', name + '.py') - return MeasureGeneric(python, options, bm_path, **opts) + return MeasureGeneric(python, options, bench_data, bm_path, **opts) Measure.func_name = 'Measure' + name.capitalize() def BM(*args, **kwds): @@ -20,7 +20,7 @@ d[BM.func_name] = BM def _register_new_bm_twisted(name, bm_name, d, **opts): - def Measure(python, options): + def Measure(python, options, bench_data): def parser(line): number = float(line.split(" ")[0]) if name == 'tcp': @@ -30,7 +30,8 @@ else: return 100/number bm_path = relative('own', 'twisted', name + '.py') - return MeasureGeneric(python, options, bm_path, parser=parser, **opts) + return MeasureGeneric(python, options, bench_data, bm_path, + parser=parser, **opts) Measure.func_name = 'Measure' + name.capitalize() def BM(*args, **kwds): @@ -40,9 +41,9 @@ d[BM.func_name] = BM def _register_new_bm_base_only(name, bm_name, d, **opts): - def benchmark_function(python, options): + def benchmark_function(python, options, bench_data): bm_path = relative('own', name + '.py') - return MeasureGeneric(python, options, bm_path, **opts) + return MeasureGeneric(python, options, bench_data, bm_path, **opts) def BM(python, options, *args, **kwargs): try: @@ -58,19 +59,15 @@ TWISTED = [relative('lib/twisted-trunk'), relative('lib/zope.interface-3.5.3/src'), relative('own/twisted')] opts = { - 'gcbench' : {'iteration_scaling' : .10}, - 'pidigits': {'iteration_scaling' : .10}, 'eparse' : {'bm_env': {'PYTHONPATH': relative('lib/monte')}}, 'bm_mako' : {'bm_env': {'PYTHONPATH': relative('lib/mako')}}, - 'bm_chameleon': {'bm_env': {'PYTHONPATH': relative('lib/chameleon/src')}, - 'iteration_scaling': 3}, + 'bm_chameleon': {'bm_env': {'PYTHONPATH': relative('lib/chameleon/src')}}, } for name in ['expand', 'integrate', 'sum', 'str']: _register_new_bm('bm_sympy', 'sympy_' + name, globals(), bm_env={'PYTHONPATH': relative('lib/sympy')}, - extra_args=['--benchmark=' + name], - iteration_scaling=.1) + extra_args=['--benchmark=' + name]) for name in ['xml', 'text']: _register_new_bm('bm_genshi', 'genshi_' + name, @@ -84,13 +81,8 @@ _register_new_bm(name, name, globals(), **opts.get(name, {})) for name in ['names', 'iteration', 'tcp', 'pb', ]:#'web']:#, 'accepts']: - if name == 'web': - iteration_scaling = 0.2 - else: - iteration_scaling = 1.0 _register_new_bm_twisted(name, 'twisted_' + name, - globals(), bm_env={'PYTHONPATH': ':'.join(TWISTED)}, - iteration_scaling=iteration_scaling) + globals(), bm_env={'PYTHONPATH': ':'.join(TWISTED)}) _register_new_bm('spitfire', 'spitfire', globals(), extra_args=['--benchmark=spitfire_o4']) @@ -141,7 +133,7 @@ ('database', 0.4) ] -def BM_translate(python, options): +def BM_translate(python, options, bench_data): """ Run translate.py and returns a benchmark result for each of the phases. Note that we run it only with ``base_python`` (which corresponds to @@ -177,7 +169,7 @@ return result BM_translate.benchmark_name = 'trans2' -def BM_cpython_doc(python, options): +def BM_cpython_doc(python, options, bench_data): from unladen_swallow.perf import RawResult import subprocess, shutil diff --git a/own/bm_sympy.py b/own/bm_sympy.py --- a/own/bm_sympy.py +++ b/own/bm_sympy.py @@ -1,4 +1,5 @@ +import sys from sympy import expand, symbols, integrate, tan, summation from sympy.core.cache import clear_cache import time @@ -27,6 +28,7 @@ clear_cache() t0 = time.time() func() + print >>sys.stderr, time.time() - t0 l.append(time.time() - t0) return l diff --git a/own/twisted/benchlib.py b/own/twisted/benchlib.py --- a/own/twisted/benchlib.py +++ b/own/twisted/benchlib.py @@ -54,7 +54,7 @@ optParameters = [ ('iterations', 'n', 1, 'number of iterations', int), ('duration', 'd', 1, 'duration of each iteration', float), - ('warmup', 'w', 15, 'number of warmup iterations', int), + ('warmup', 'w', 0, 'number of warmup iterations', int), ] options = BenchmarkOptions() diff --git a/runner.py b/runner.py --- a/runner.py +++ b/runner.py @@ -22,8 +22,12 @@ def run_and_store(benchmark_set, result_filename, path, revision=0, options='', branch='default', args='', upload=False, fast=False, full_store=False): - funcs = perf.BENCH_FUNCS.copy() - funcs.update(perf._FindAllBenchmarks(benchmarks.__dict__)) + _funcs = perf.BENCH_FUNCS.copy() + _funcs.update(perf._FindAllBenchmarks(benchmarks.__dict__)) + bench_data = json.load(open('bench-data.json')) + funcs = {} + for key, value in _funcs.iteritems(): + funcs[key] = (value, bench_data[key]) opts = ['-b', ','.join(benchmark_set), '--inherit_env=PATH', '--no_charts'] @@ -146,7 +150,7 @@ benchmarks = list(BENCHMARK_SET) for benchmark in benchmarks: - if benchmark not in BENCHMARK_SET: + if benchmark not in BENCHMARK_SET and not benchmark.startswith('-'): raise WrongBenchmark(benchmark) path = options.python diff --git a/unladen_swallow/perf.py b/unladen_swallow/perf.py --- a/unladen_swallow/perf.py +++ b/unladen_swallow/perf.py @@ -405,7 +405,7 @@ ### Utility functions def SimpleBenchmark(benchmark_function, python, options, - *args, **kwargs): + bench_data, *args, **kwargs): """Abstract out the body for most simple benchmarks. Example usage: @@ -426,12 +426,12 @@ Comes with string_representation method. """ try: - data = benchmark_function(python, options, + data = benchmark_function(python, options, bench_data, *args, **kwargs) except subprocess.CalledProcessError, e: return ResultError(e) - return CompareBenchmarkData(data, options) + return CompareBenchmarkData(data, options, bench_data) def ShortenUrl(url): @@ -577,7 +577,7 @@ return fixed_env -def CompareMultipleRuns(times, options): +def CompareMultipleRuns(times, options, bench_data): """Compare multiple control vs experiment runs of the same benchmark. Args: @@ -596,15 +596,16 @@ # below. return SimpleResult(times[0]) - times = sorted(times) - - min_time = times[0] - avg_time = avg(times) - std_time = SampleStdDev(times) + min_time = sorted(times)[0] + warmup = bench_data.get('warmup', 0) + if bench_data.get('legacy_multiplier'): + times = [time * bench_data['legacy_multiplier'] for time in times] + avg_time = avg(times[warmup:]) + std_time = SampleStdDev(times[warmup:]) return Result(times, min_time, avg_time, std_time) -def CompareBenchmarkData(data, options): +def CompareBenchmarkData(data, options, bench_data): """Compare performance and memory usage. Args: @@ -625,7 +626,7 @@ return CompareMemoryUsage(base_mem, changed_mem, options) return "Benchmark does not report memory usage yet" - return CompareMultipleRuns(times, options) + return CompareMultipleRuns(times, options, bench_data) def CallAndCaptureOutput(command, env=None, track_memory=False, inherit_env=[]): @@ -663,8 +664,8 @@ return result, mem_usage -def MeasureGeneric(python, options, bm_path, bm_env=None, - extra_args=[], iteration_scaling=1, parser=float): +def MeasureGeneric(python, options, bench_data, bm_path, bm_env=None, + extra_args=[], parser=float): """Abstract measurement function for Unladen's bm_* scripts. Based on the values of options.fast/rigorous, will pass -n {5,50,100} to @@ -690,12 +691,13 @@ if bm_env is None: bm_env = {} - trials = 50 + trials = bench_data.get('total_runs', 50) + warmup = bench_data.get('warmup', 0) if options.rigorous: - trials = 100 + trials = (trials - warmup) * 2 + warmup elif options.fast: - trials = 5 - trials = max(1, int(trials * iteration_scaling)) + trials = (trials - warmup) // 10 + warmup + trials = max(1, trials) RemovePycs() command = python + [bm_path, "-n", trials] + extra_args @@ -708,84 +710,7 @@ ### Benchmarks -_PY_BENCH_TOTALS_LINE = re.compile(""" - Totals:\s+(?P\d+)ms\s+ - (?P\d+)ms\s+ - \S+\s+ # Percent change, which we re-compute - (?P\d+)ms\s+ - (?P\d+)ms\s+ - \S+ # Second percent change, also re-computed - """, re.X) -def MungePyBenchTotals(line): - m = _PY_BENCH_TOTALS_LINE.search(line) - if m: - min_base, min_changed, avg_base, avg_changed = map(float, m.group( - "min_base", "min_changed", "avg_base", "avg_changed")) - delta_min = TimeDelta(min_base, min_changed) - delta_avg = TimeDelta(avg_base, avg_changed) - return (("Min: %(min_base)d -> %(min_changed)d: %(delta_min)s\n" + - "Avg: %(avg_base)d -> %(avg_changed)d: %(delta_avg)s") - % locals()) - return line - - -def BM_PyBench(base_python, changed_python, options): - if options.track_memory: - return "Benchmark does not report memory usage yet" - - warp = "10" - if options.rigorous: - warp = "1" - if options.fast: - warp = "100" - - PYBENCH_PATH = Relative("performance/pybench/pybench.py") - PYBENCH_ENV = BuildEnv({"PYTHONPATH": ""}, inherit_env=options.inherit_env) - - try: - with contextlib.nested(open(os.devnull, "wb"), - TemporaryFilename(prefix="baseline."), - TemporaryFilename(prefix="changed.") - ) as (dev_null, base_pybench, changed_pybench): - RemovePycs() - subprocess.check_call(LogCall(changed_python + [ - PYBENCH_PATH, - "-w", warp, - "-f", changed_pybench, - ]), stdout=dev_null, - env=PYBENCH_ENV) - RemovePycs() - subprocess.check_call(LogCall(base_python + [ - PYBENCH_PATH, - "-w", warp, - "-f", base_pybench, - ]), stdout=dev_null, - env=PYBENCH_ENV) - comparer = subprocess.Popen(base_python + [ - PYBENCH_PATH, - "--debug", - "-s", base_pybench, - "-c", changed_pybench, - ], stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - env=PYBENCH_ENV) - result, err = comparer.communicate() - if comparer.returncode != 0: - return "pybench died: " + err - except subprocess.CalledProcessError, e: - return str(e) - - if options.verbose: - return result - else: - for line in result.splitlines(): - if line.startswith("Totals:"): - return MungePyBenchTotals(line) - # The format's wrong... - return result - - -def Measure2to3(python, options): +def Measure2to3(python, options, bench_data): FAST_TARGET = Relative("lib/2to3/lib2to3/refactor.py") TWO_TO_THREE_PROG = Relative("lib/2to3/2to3") TWO_TO_THREE_DIR = Relative("lib/2to3") @@ -836,24 +761,25 @@ return times, mem_usage -def BM_2to3(*args, **kwargs): +# XXX we should enable this one +def _BM_2to3(*args, **kwargs): return SimpleBenchmark(Measure2to3, *args, **kwargs) DJANGO_DIR = Relative("lib/django") -def MeasureDjango(python, options): +def MeasureDjango(python, options, bench_data): bm_path = Relative("performance/bm_django.py") bm_env = {"PYTHONPATH": DJANGO_DIR} - return MeasureGeneric(python, options, bm_path, bm_env) + return MeasureGeneric(python, options, bench_data, bm_path, bm_env) def BM_Django(*args, **kwargs): return SimpleBenchmark(MeasureDjango, *args, **kwargs) -def MeasureRietveld(python, options): +def MeasureRietveld(python, options, bench_data): PYTHONPATH = ":".join([DJANGO_DIR, # These paths are lifted from # lib/google_appengine.appcfg.py. Note that we use @@ -866,7 +792,7 @@ bm_path = Relative("performance/bm_rietveld.py") bm_env = {"PYTHONPATH": PYTHONPATH, "DJANGO_SETTINGS_MODULE": "settings"} - return MeasureGeneric(python, options, bm_path, bm_env) + return MeasureGeneric(python, options, bench_data, bm_path, bm_env) def BM_Rietveld(*args, **kwargs): @@ -914,7 +840,7 @@ return psyco_build_dir -def MeasureSpitfire(python, options, env=None, extra_args=[]): +def MeasureSpitfire(python, options, bench_data, env=None, extra_args=[]): """Use Spitfire to test a Python binary's performance. Args: @@ -930,7 +856,7 @@ memory usage samples in kilobytes. """ bm_path = Relative("performance/bm_spitfire.py") - return MeasureGeneric(python, options, bm_path, env, extra_args) + return MeasureGeneric(python, options, bench_data, bm_path, env, extra_args) def MeasureSpitfireWithPsyco(python, options): @@ -967,83 +893,19 @@ return SimpleBenchmark(MeasureSpitfireWithPsyco, *args, **kwargs) -def BM_SlowSpitfire(python, options): +def BM_SlowSpitfire(python, options, bench_data): extra_args = ["--disable_psyco"] spitfire_env = {"PYTHONPATH": Relative("lib/spitfire")} try: - data = MeasureSpitfire(python, options, + data = MeasureSpitfire(python, options, bench_data, spitfire_env, extra_args) except subprocess.CalledProcessError, e: return str(e) - return CompareBenchmarkData(data, options) + return CompareBenchmarkData(data, options, bench_data) - -def MeasurePickle(python, options, extra_args): - """Test the performance of Python's pickle implementations. - - Args: - python: prefix of a command line for the Python binary. - options: optparse.Values instance. - extra_args: list of arguments to append to the command line. - - Returns: - (perf_data, mem_usage), where perf_data is a list of floats, each the - time it took to run the pickle test once; mem_usage is a list of - memory usage samples in kilobytes. - """ - bm_path = Relative("performance/bm_pickle.py") - return MeasureGeneric(python, options, bm_path, extra_args=extra_args) - - -def _PickleBenchmark(base_python, changed_python, options, extra_args): - """Test the performance of Python's pickle implementations. - - Args: - base_python: prefix of a command line for the reference - Python binary. - changed_python: prefix of a command line for the - experimental Python binary. - options: optparse.Values instance. - extra_args: list of arguments to append to the command line. - - Returns: - Summary of whether the experiemental Python is better/worse than the - baseline. - """ - return SimpleBenchmark(MeasurePickle, - base_python, changed_python, options, extra_args) - - -def BM_Pickle(base_python, changed_python, options): - args = ["--use_cpickle", "pickle"] - return _PickleBenchmark(base_python, changed_python, options, args) - -def BM_Unpickle(base_python, changed_python, options): - args = ["--use_cpickle", "unpickle"] - return _PickleBenchmark(base_python, changed_python, options, args) - -def BM_Pickle_List(base_python, changed_python, options): - args = ["--use_cpickle", "pickle_list"] - return _PickleBenchmark(base_python, changed_python, options, args) - -def BM_Unpickle_List(base_python, changed_python, options): - args = ["--use_cpickle", "unpickle_list"] - return _PickleBenchmark(base_python, changed_python, options, args) - -def BM_Pickle_Dict(base_python, changed_python, options): - args = ["--use_cpickle", "pickle_dict"] - return _PickleBenchmark(base_python, changed_python, options, args) - -def BM_SlowPickle(base_python, changed_python, options): - return _PickleBenchmark(base_python, changed_python, options, ["pickle"]) - -def BM_SlowUnpickle(base_python, changed_python, options): - return _PickleBenchmark(base_python, changed_python, options, ["unpickle"]) - - -def MeasureAi(python, options): +def MeasureAi(python, options, bench_data): """Test the performance of some small AI problem solvers. Args: @@ -1056,228 +918,12 @@ memory usage samples in kilobytes. """ bm_path = Relative("performance/bm_ai.py") - return MeasureGeneric(python, options, bm_path) - + return MeasureGeneric(python, options, bench_data, bm_path) def BM_Ai(*args, **kwargs): return SimpleBenchmark(MeasureAi, *args, **kwargs) - -def _StartupPython(command, mem_usage, track_memory, inherit_env): - startup_env = BuildEnv(inherit_env=inherit_env) - if not track_memory: - subprocess.check_call(command, env=startup_env) - else: - subproc = subprocess.Popen(command, env=startup_env) - future = MemoryUsageFuture(subproc.pid) - if subproc.wait() != 0: - raise RuntimeError("Startup benchmark died") - mem_usage.extend(future.GetMemoryUsage()) - - -def MeasureStartup(python, cmd_opts, num_loops, track_memory, inherit_env): - times = [] - work = "" - if track_memory: - # Without this, Python may start and exit before the memory sampler - # thread has time to work. We can't just do 'time.sleep(x)', because - # under -S, 'import time' fails. - work = "for _ in xrange(200000): pass" - command = python + cmd_opts + ["-c", work] - mem_usage = [] - info("Running `%s` %d times", command, num_loops * 20) - for _ in xrange(num_loops): - t0 = time.time() - _StartupPython(command, mem_usage, track_memory, inherit_env) - _StartupPython(command, mem_usage, track_memory, inherit_env) - _StartupPython(command, mem_usage, track_memory, inherit_env) - _StartupPython(command, mem_usage, track_memory, inherit_env) - _StartupPython(command, mem_usage, track_memory, inherit_env) - _StartupPython(command, mem_usage, track_memory, inherit_env) - _StartupPython(command, mem_usage, track_memory, inherit_env) - _StartupPython(command, mem_usage, track_memory, inherit_env) - _StartupPython(command, mem_usage, track_memory, inherit_env) - _StartupPython(command, mem_usage, track_memory, inherit_env) - _StartupPython(command, mem_usage, track_memory, inherit_env) - _StartupPython(command, mem_usage, track_memory, inherit_env) - _StartupPython(command, mem_usage, track_memory, inherit_env) - _StartupPython(command, mem_usage, track_memory, inherit_env) - _StartupPython(command, mem_usage, track_memory, inherit_env) - _StartupPython(command, mem_usage, track_memory, inherit_env) - _StartupPython(command, mem_usage, track_memory, inherit_env) - _StartupPython(command, mem_usage, track_memory, inherit_env) - _StartupPython(command, mem_usage, track_memory, inherit_env) - _StartupPython(command, mem_usage, track_memory, inherit_env) - t1 = time.time() - times.append(t1 - t0) - if not track_memory: - mem_usage = None - return times, mem_usage - - -def BM_normal_startup(base_python, changed_python, options): - if options.rigorous: - num_loops = 100 - elif options.fast: - num_loops = 5 - else: - num_loops = 50 - - opts = [] - changed_data = MeasureStartup(changed_python, opts, num_loops, - options.track_memory, options.inherit_env) - base_data = MeasureStartup(base_python, opts, num_loops, - options.track_memory, options.inherit_env) - - return CompareBenchmarkData(base_data, changed_data, options) - - -def BM_startup_nosite(base_python, changed_python, options): - if options.rigorous: - num_loops = 200 - elif options.fast: - num_loops = 10 - else: - num_loops = 100 - - opts = ["-S"] - changed_data = MeasureStartup(changed_python, opts, num_loops, - options.track_memory, options.inherit_env) - base_data = MeasureStartup(base_python, opts, num_loops, - options.track_memory, options.inherit_env) - - return CompareBenchmarkData(base_data, changed_data, options) - - -def MeasureRegexPerformance(python, options, bm_path): - """Test the performance of Python's regex engine. - - Args: - python: prefix of a command line for the Python binary. - options: optparse.Values instance. - bm_path: relative path; which benchmark script to run. - - Returns: - (perf_data, mem_usage), where perf_data is a list of floats, each the - time it took to run all the regexes routines once; mem_usage is a list - of memory usage samples in kilobytes. - """ - return MeasureGeneric(python, options, Relative(bm_path)) - - -def RegexBenchmark(base_python, changed_python, options, bm_path): - return SimpleBenchmark(MeasureRegexPerformance, - base_python, changed_python, options, bm_path) - - -def BM_regex_v8(base_python, changed_python, options): - bm_path = "performance/bm_regex_v8.py" - return RegexBenchmark(base_python, changed_python, options, bm_path) - - -def BM_regex_effbot(base_python, changed_python, options): - bm_path = "performance/bm_regex_effbot.py" - return RegexBenchmark(base_python, changed_python, options, bm_path) - - -def BM_regex_compile(base_python, changed_python, options): - bm_path = "performance/bm_regex_compile.py" - return RegexBenchmark(base_python, changed_python, options, bm_path) - - -def MeasureThreading(python, options, bm_name): - """Test the performance of Python's threading support. - - Args: - python: prefix of a command line for the Python binary. - options: optparse.Values instance. - bm_name: name of the threading benchmark to run. - - Returns: - (perf_data, mem_usage), where perf_data is a list of floats, each the - time it took to run the threading benchmark once; mem_usage is a list - of memory usage samples in kilobytes. - """ - bm_path = Relative("performance/bm_threading.py") - return MeasureGeneric(python, options, bm_path, extra_args=[bm_name]) - - -def ThreadingBenchmark(base_python, changed_python, options, bm_name): - return SimpleBenchmark(MeasureThreading, - base_python, changed_python, options, bm_name) - - -def BM_threaded_count(base_python, changed_python, options): - bm_name = "threaded_count" - return ThreadingBenchmark(base_python, changed_python, options, bm_name) - - -def BM_iterative_count(base_python, changed_python, options): - bm_name = "iterative_count" - return ThreadingBenchmark(base_python, changed_python, options, bm_name) - - -def MeasureUnpackSequence(python, options): - """Test the performance of sequence unpacking. - - Args: - python: prefix of a command line for the Python binary. - options: optparse.Values instance. - - Returns: - (perf_data, mem_usage), where perf_data is a list of floats, each the - time it took to run the threading benchmark once; mem_usage is a list - of memory usage samples in kilobytes. - """ - bm_path = Relative("performance/bm_unpack_sequence.py") - return MeasureGeneric(python, options, bm_path, iteration_scaling=1000) - - -def BM_unpack_sequence(*args, **kwargs): - return SimpleBenchmark(MeasureUnpackSequence, *args, **kwargs) - - -def MeasureCallSimple(python, options): - """Test the performance of simple function calls. - - Args: - python: prefix of a command line for the Python binary. - options: optparse.Values instance. - - Returns: - (perf_data, mem_usage), where perf_data is a list of floats, each the - time it took to run the threading benchmark once; mem_usage is a list - of memory usage samples in kilobytes. - """ - bm_path = Relative("performance/bm_call_simple.py") - return MeasureGeneric(python, options, bm_path) - - -def BM_call_simple(*args, **kwargs): - return SimpleBenchmark(MeasureCallSimple, *args, **kwargs) - - -def MeasureNbody(python, options): - """Test the performance of math operations using an n-body benchmark. - - Args: - python: prefix of a command line for the Python binary. - options: optparse.Values instance. - - Returns: - (perf_data, mem_usage), where perf_data is a list of floats, each the - time it took to run the benchmark loop once; mem_usage is a list - of memory usage samples in kilobytes. - """ - bm_path = Relative("performance/bm_nbody.py") - return MeasureGeneric(python, options, bm_path) - - -def BM_nbody(*args, **kwargs): - return SimpleBenchmark(MeasureNbody, *args, **kwargs) - - -def MeasureSpamBayes(python, options): +def MeasureSpamBayes(python, options, bench_data): """Test the performance of the SpamBayes spam filter and its tokenizer. Args: @@ -1292,14 +938,14 @@ pypath = ":".join([Relative("lib/spambayes"), Relative("lib/lockfile")]) bm_path = Relative("performance/bm_spambayes.py") bm_env = {"PYTHONPATH": pypath} - return MeasureGeneric(python, options, bm_path, bm_env) + return MeasureGeneric(python, options, bench_data, bm_path, bm_env) def BM_spambayes(*args, **kwargs): return SimpleBenchmark(MeasureSpamBayes, *args, **kwargs) -def MeasureHtml5lib(python, options): +def MeasureHtml5lib(python, options, bench_data): """Test the performance of the html5lib HTML 5 parser. Args: @@ -1313,16 +959,14 @@ """ bm_path = Relative("performance/bm_html5lib.py") bm_env = {"PYTHONPATH": Relative("lib/html5lib")} - return MeasureGeneric(python, options, bm_path, bm_env, - iteration_scaling=0.10) - + return MeasureGeneric(python, options, bench_data, bm_path, bm_env) def BM_html5lib(*args, **kwargs): return SimpleBenchmark(MeasureHtml5lib, *args, **kwargs) -def MeasureRichards(python, options): +def MeasureRichards(python, options, bench_data): bm_path = Relative("performance/bm_richards.py") - return MeasureGeneric(python, options, bm_path) + return MeasureGeneric(python, options, bench_data, bm_path) def BM_richards(*args, **kwargs): return SimpleBenchmark(MeasureRichards, *args, **kwargs) @@ -1341,8 +985,8 @@ # If you update the default group, be sure to update the module docstring, too. # An "all" group which includes every benchmark perf.py knows about is generated # automatically. -BENCH_GROUPS = {"default": ["2to3", "django", "nbody", "slowspitfire", - "slowpickle", "slowunpickle", "spambayes"], +BENCH_GROUPS = {"default": ["2to3", "django", "slowspitfire", + "spambayes"], "startup": ["normal_startup", "startup_nosite"], "regex": ["regex_v8", "regex_effbot", "regex_compile"], "threading": ["threaded_count", "iterative_count"], @@ -1389,7 +1033,7 @@ should_run = set() if not positive_benchmarks: - should_run = set(_ExpandBenchmarkName("default", bench_groups)) + should_run = set(bench_groups['all']) for name in positive_benchmarks: for bm in _ExpandBenchmarkName(name, bench_groups): @@ -1482,11 +1126,11 @@ results = [] for name in sorted(should_run): - func = bench_funcs[name] + func, bench_data = bench_funcs[name] print "Running %s..." % name # PyPy specific modification: let the func to return a list of results # for sub-benchmarks - bench_result = func(base_cmd_prefix, options) + bench_result = func(base_cmd_prefix, options, bench_data) name = getattr(func, 'benchmark_name', name) if isinstance(bench_result, list): for subname, subresult in bench_result: diff --git a/unladen_swallow/performance/bm_ai.py b/unladen_swallow/performance/bm_ai.py --- a/unladen_swallow/performance/bm_ai.py +++ b/unladen_swallow/performance/bm_ai.py @@ -70,10 +70,6 @@ def test_n_queens(iterations): - # Warm-up runs. - list(n_queens(8)) - list(n_queens(8)) - times = [] for _ in xrange(iterations): t0 = time.time() diff --git a/unladen_swallow/performance/bm_django.py b/unladen_swallow/performance/bm_django.py --- a/unladen_swallow/performance/bm_django.py +++ b/unladen_swallow/performance/bm_django.py @@ -36,10 +36,6 @@ table = [xrange(150) for _ in xrange(150)] context = Context({"table": table}) - # Warm up Django. - DJANGO_TMPL.render(context) - DJANGO_TMPL.render(context) - times = [] for _ in xrange(count): t0 = time.time() diff --git a/unladen_swallow/performance/bm_nbody.py b/unladen_swallow/performance/bm_nbody.py --- a/unladen_swallow/performance/bm_nbody.py +++ b/unladen_swallow/performance/bm_nbody.py @@ -116,10 +116,6 @@ def test_nbody(iterations): - # Warm-up runs. - report_energy() - advance(0.01, 20000) - report_energy() times = [] for _ in xrange(iterations): diff --git a/unladen_swallow/performance/bm_richards.py b/unladen_swallow/performance/bm_richards.py --- a/unladen_swallow/performance/bm_richards.py +++ b/unladen_swallow/performance/bm_richards.py @@ -21,7 +21,6 @@ def test_richards(iterations): # Warm-up r = richards.Richards() - r.run(iterations=2) times = [] for _ in xrange(iterations): diff --git a/unladen_swallow/performance/bm_rietveld.py b/unladen_swallow/performance/bm_rietveld.py --- a/unladen_swallow/performance/bm_rietveld.py +++ b/unladen_swallow/performance/bm_rietveld.py @@ -89,43 +89,11 @@ def test_rietveld(count, tmpl, context): # Warm up Django. - tmpl.render(context) - tmpl.render(context) - times = [] for _ in xrange(count): t0 = time.time() # 30 calls to render, so that we don't measure loop overhead. tmpl.render(context) - tmpl.render(context) - tmpl.render(context) - tmpl.render(context) - tmpl.render(context) - tmpl.render(context) - tmpl.render(context) - tmpl.render(context) - tmpl.render(context) - tmpl.render(context) - tmpl.render(context) - tmpl.render(context) - tmpl.render(context) - tmpl.render(context) - tmpl.render(context) - tmpl.render(context) - tmpl.render(context) - tmpl.render(context) - tmpl.render(context) - tmpl.render(context) - tmpl.render(context) - tmpl.render(context) - tmpl.render(context) - tmpl.render(context) - tmpl.render(context) - tmpl.render(context) - tmpl.render(context) - tmpl.render(context) - tmpl.render(context) - tmpl.render(context) t1 = time.time() times.append(t1 - t0) return times diff --git a/unladen_swallow/performance/bm_spambayes.py b/unladen_swallow/performance/bm_spambayes.py --- a/unladen_swallow/performance/bm_spambayes.py +++ b/unladen_swallow/performance/bm_spambayes.py @@ -22,10 +22,6 @@ def test_spambayes(iterations, messages, ham_classifier): - # Prime the pump. This still leaves some hot functions uncompiled; these - # will be noticed as hot during the timed loops below. - for msg in messages: - ham_classifier.score(msg) times = [] for _ in xrange(iterations): diff --git a/unladen_swallow/performance/bm_spitfire.py b/unladen_swallow/performance/bm_spitfire.py --- a/unladen_swallow/performance/bm_spitfire.py +++ b/unladen_swallow/performance/bm_spitfire.py @@ -53,10 +53,6 @@ table = [xrange(1000) for _ in xrange(1000)] - # Warm up Spitfire. - spitfire_tmpl_o4(search_list=[{"table": table}]).main() - spitfire_tmpl_o4(search_list=[{"table": table}]).main() - times = [] for _ in xrange(count): t0 = time.time() From noreply at buildbot.pypy.org Wed Aug 7 17:15:16 2013 From: noreply at buildbot.pypy.org (lwassermann) Date: Wed, 7 Aug 2013 17:15:16 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk default: added the removal of the system ui process to minibluebookdebug image in case we benchmark anything Message-ID: <20130807151516.4CE981C140A@cobra.cs.uni-duesseldorf.de> Author: Lars Wassermann Branch: Changeset: r513:9db538bbab54 Date: 2013-08-07 14:54 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/9db538bbab54/ Log: added the removal of the system ui process to minibluebookdebug image in case we benchmark anything diff --git a/images/minibluebookdebug.image b/images/minibluebookdebug.image index ca00384ffd476a836212366990611cb4ce6b79cf..507444a1f410b4233595a16f56da9dd4b89c7615 GIT binary patch [cut] From noreply at buildbot.pypy.org Wed Aug 7 17:29:13 2013 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 7 Aug 2013 17:29:13 +0200 (CEST) Subject: [pypy-commit] pypy default: Test and fix, giving a clearer error message (thanks krono). Message-ID: <20130807152913.AB6B51C3452@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r65997:e98d284a2cae Date: 2013-08-07 17:28 +0200 http://bitbucket.org/pypy/pypy/changeset/e98d284a2cae/ Log: Test and fix, giving a clearer error message (thanks krono). diff --git a/rpython/annotator/description.py b/rpython/annotator/description.py --- a/rpython/annotator/description.py +++ b/rpython/annotator/description.py @@ -391,6 +391,7 @@ instance_level = False all_enforced_attrs = None # or a set settled = False + _detect_invalid_attrs = None def __init__(self, bookkeeper, pyobj=None, name=None, basedesc=None, classdict=None, @@ -714,6 +715,10 @@ # by changing the result's annotation (but not, of course, doing an # actual copy in the rtyper). Tested in rpython.rtyper.test.test_rlist, # test_immutable_list_out_of_instance. + if self._detect_invalid_attrs and attr in self._detect_invalid_attrs: + raise Exception("field %r was migrated to %r from a subclass in " + "which it was declared as _immutable_fields_" % + (attr, self.pyobj)) search1 = '%s[*]' % (attr,) search2 = '%s?[*]' % (attr,) cdesc = self @@ -724,6 +729,14 @@ s_result.listdef.never_resize() s_copy = s_result.listdef.offspring() s_copy.listdef.mark_as_immutable() + # + cdesc = cdesc.basedesc + while cdesc is not None: + if cdesc._detect_invalid_attrs is None: + cdesc._detect_invalid_attrs = set() + cdesc._detect_invalid_attrs.add(attr) + cdesc = cdesc.basedesc + # return s_copy cdesc = cdesc.basedesc return s_result # common case diff --git a/rpython/annotator/test/test_annrpython.py b/rpython/annotator/test/test_annrpython.py --- a/rpython/annotator/test/test_annrpython.py +++ b/rpython/annotator/test/test_annrpython.py @@ -3717,6 +3717,24 @@ a = self.RPythonAnnotator() a.build_types(f, [int]) + def test_immutable_field_subclass(self): + class Root: + pass + class A(Root): + _immutable_fields_ = '_my_lst[*]' + def __init__(self, lst): + self._my_lst = lst + def foo(x): + return len(x._my_lst) + + def f(n): + foo(A([2, n])) + foo(Root()) + + a = self.RPythonAnnotator() + e = py.test.raises(Exception, a.build_types, f, [int]) + assert "field '_my_lst' was migrated" in str(e.value) + def test_call_classes_with_noarg_init(self): class A: foo = 21 From noreply at buildbot.pypy.org Wed Aug 7 19:31:41 2013 From: noreply at buildbot.pypy.org (RonnyPfannschmidt) Date: Wed, 7 Aug 2013 19:31:41 +0200 (CEST) Subject: [pypy-commit] pypy refine-testrunner: cleanup in filetimes Message-ID: <20130807173141.78C7E1C2442@cobra.cs.uni-duesseldorf.de> Author: Ronny Pfannschmidt Branch: refine-testrunner Changeset: r65999:b38869803bca Date: 2013-08-07 19:29 +0200 http://bitbucket.org/pypy/pypy/changeset/b38869803bca/ Log: cleanup in filetimes diff --git a/testrunner/filetimes.py b/testrunner/filetimes.py --- a/testrunner/filetimes.py +++ b/testrunner/filetimes.py @@ -11,13 +11,14 @@ xml = parse(opts.junitxml) root = xml.getroot() +bugstarts = 'interpreter', 'tool', 'module' -bugstarts = 'interpreter', 'tool', 'module' + def findfile(root, classname): if not classname: return parts = classname.split('.') - + #pytest bug workaround first = parts[0] for start in bugstarts: @@ -43,8 +44,6 @@ garbageitems.append(item) - - garbage = accum.pop(None, []) if garbage: print 'garbage', sum(garbage), len(garbage) From noreply at buildbot.pypy.org Wed Aug 7 19:31:40 2013 From: noreply at buildbot.pypy.org (RonnyPfannschmidt) Date: Wed, 7 Aug 2013 19:31:40 +0200 (CEST) Subject: [pypy-commit] pypy refine-testrunner: prepare junitmerge to be used as a module Message-ID: <20130807173140.3A8581C140A@cobra.cs.uni-duesseldorf.de> Author: Ronny Pfannschmidt Branch: refine-testrunner Changeset: r65998:27f12fe4b85e Date: 2013-08-07 19:29 +0200 http://bitbucket.org/pypy/pypy/changeset/27f12fe4b85e/ Log: prepare junitmerge to be used as a module diff --git a/testrunner/junitmerge.py b/testrunner/junitmerge.py --- a/testrunner/junitmerge.py +++ b/testrunner/junitmerge.py @@ -2,7 +2,7 @@ simple scrpt for junitxml file merging """ -from lxml.etree import parse, Element, tostring +from lxml.etree import parse, Element from collections import defaultdict import argparse @@ -13,6 +13,7 @@ TEST_ITEMS = 'test', 'errors', 'skips' + def merge(files): accum = defaultdict(int) children = [] @@ -36,11 +37,16 @@ return new +def run(paths, out): + + files = map(parse, paths) + merged = merge(files) + + with open(out, 'wb') as fp: + merged.getroottree().write(fp) + if __name__ == '__main__': opts = parser.parse_args() - files = map(parse, opts.path) + run(opts.path, opts.out) - with open(opts.out, 'w') as fp: - fp.write(tostring(merge(files))) - From noreply at buildbot.pypy.org Wed Aug 7 20:00:58 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Wed, 7 Aug 2013 20:00:58 +0200 (CEST) Subject: [pypy-commit] pypy default: Kill unused _metatype_ option in extregistry Message-ID: <20130807180058.8E2761C073E@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: Changeset: r66000:fd16a4b769cf Date: 2013-08-07 17:30 +0100 http://bitbucket.org/pypy/pypy/changeset/fd16a4b769cf/ Log: Kill unused _metatype_ option in extregistry diff --git a/rpython/rtyper/extregistry.py b/rpython/rtyper/extregistry.py --- a/rpython/rtyper/extregistry.py +++ b/rpython/rtyper/extregistry.py @@ -13,9 +13,6 @@ if '_type_' in dict: selfcls._register_type(dict['_type_']) del selfcls._type_ - if '_metatype_' in dict: - selfcls._register_metatype(dict['_metatype_']) - del selfcls._metatype_ def _register(selfcls, dict, key): if isinstance(key, tuple): @@ -32,9 +29,6 @@ def _register_type(selfcls, key): selfcls._register(EXT_REGISTRY_BY_TYPE, key) - def _register_metatype(selfcls, key): - selfcls._register(EXT_REGISTRY_BY_METATYPE, key) - class ExtRegistryEntry(object): __metaclass__ = AutoRegisteringType @@ -120,27 +114,19 @@ EXT_REGISTRY_BY_VALUE = FlexibleWeakDict() EXT_REGISTRY_BY_TYPE = weakref.WeakKeyDictionary() -EXT_REGISTRY_BY_METATYPE = weakref.WeakKeyDictionary() # ____________________________________________________________ # Public interface to access the registry def _lookup_type_cls(tp): - try: - return EXT_REGISTRY_BY_TYPE[tp] - except (KeyError, TypeError): - return EXT_REGISTRY_BY_METATYPE[type(tp)] + return EXT_REGISTRY_BY_TYPE[tp] def lookup_type(tp): Entry = _lookup_type_cls(tp) return Entry(tp) def is_registered_type(tp): - try: - _lookup_type_cls(tp) - except KeyError: - return False - return True + return tp in EXT_REGISTRY_BY_TYPE def _lookup_cls(instance): try: diff --git a/rpython/rtyper/test/test_extregistry.py b/rpython/rtyper/test/test_extregistry.py --- a/rpython/rtyper/test/test_extregistry.py +++ b/rpython/rtyper/test/test_extregistry.py @@ -62,50 +62,6 @@ s = a.build_types(func, []) assert isinstance(s, annmodel.SomeInteger) -def test_register_metatype(): - class MetaType(type): - pass - - class RealClass(object): - __metaclass__ = MetaType - - real_class = RealClass() - - def func(): - return real_class - - class Entry(ExtRegistryEntry): - _metatype_ = MetaType - def compute_annotation(self): - assert self.type is RealClass - assert self.instance is real_class - return annmodel.SomeInteger() - - a = RPythonAnnotator() - s = a.build_types(func, []) - assert isinstance(s, annmodel.SomeInteger) - -def test_register_metatype_2(): - class MetaType(type): - pass - - class RealClass(object): - __metaclass__ = MetaType - - def func(real_class): - return real_class - - class Entry(ExtRegistryEntry): - _metatype_ = MetaType - def compute_annotation(self): - assert self.type is RealClass - assert self.instance is None - return annmodel.SomeInteger() - - a = RPythonAnnotator() - s = a.build_types(func, [RealClass]) - assert isinstance(s, annmodel.SomeInteger) - def test_register_value_with_specialization(): def dummy_func(): raiseNameError From noreply at buildbot.pypy.org Wed Aug 7 23:16:01 2013 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 7 Aug 2013 23:16:01 +0200 (CEST) Subject: [pypy-commit] pypy default: Update the version number here Message-ID: <20130807211601.94CCD1C1067@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r66001:0fe2bc61c9a0 Date: 2013-08-07 23:15 +0200 http://bitbucket.org/pypy/pypy/changeset/0fe2bc61c9a0/ Log: Update the version number here diff --git a/lib_pypy/cffi.egg-info b/lib_pypy/cffi.egg-info --- a/lib_pypy/cffi.egg-info +++ b/lib_pypy/cffi.egg-info @@ -1,6 +1,6 @@ Metadata-Version: 1.0 Name: cffi -Version: 0.6 +Version: 0.7 Summary: Foreign Function Interface for Python calling C code. Home-page: http://cffi.readthedocs.org Author: Armin Rigo, Maciej Fijalkowski From noreply at buildbot.pypy.org Wed Aug 7 23:21:57 2013 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 7 Aug 2013 23:21:57 +0200 (CEST) Subject: [pypy-commit] pypy default: Add a hack to make "pip install readline" happy and do nothing Message-ID: <20130807212157.84B711C140A@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r66002:543c2a77e498 Date: 2013-08-07 23:21 +0200 http://bitbucket.org/pypy/pypy/changeset/543c2a77e498/ Log: Add a hack to make "pip install readline" happy and do nothing diff --git a/lib_pypy/readline.egg-info b/lib_pypy/readline.egg-info new file mode 100644 --- /dev/null +++ b/lib_pypy/readline.egg-info @@ -0,0 +1,10 @@ +Metadata-Version: 1.0 +Name: readline +Version: 6.2.4.1 +Summary: Hack to make "pip install readline" happy and do nothing +Home-page: UNKNOWN +Author: UNKNOWN +Author-email: UNKNOWN +License: UNKNOWN +Description: UNKNOWN +Platform: UNKNOWN From noreply at buildbot.pypy.org Wed Aug 7 23:49:14 2013 From: noreply at buildbot.pypy.org (w31rd0) Date: Wed, 7 Aug 2013 23:49:14 +0200 (CEST) Subject: [pypy-commit] pypy numpypy-inplace-op: issue 1539, added inplace array operations Message-ID: <20130807214914.08C181C2442@cobra.cs.uni-duesseldorf.de> Author: w31rd0 Branch: numpypy-inplace-op Changeset: r66003:884521ea5116 Date: 2013-08-01 17:31 -0700 http://bitbucket.org/pypy/pypy/changeset/884521ea5116/ Log: issue 1539, added inplace array operations diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -737,6 +737,27 @@ descr_gt = _binop_comp_impl(_binop_impl("greater")) descr_ge = _binop_comp_impl(_binop_impl("greater_equal")) + def _binop_inplace_impl(ufunc_name): + def impl(self, space, w_other): + w_out = self + ufunc = getattr(interp_ufuncs.get(space), ufunc_name) + return ufunc.call(space, [self, w_other, w_out]) + return func_with_new_name(impl, "binop_inplace_%s_impl" % ufunc_name) + + descr_iadd = _binop_inplace_impl("add") + descr_isub = _binop_inplace_impl("subtract") + descr_imul = _binop_inplace_impl("multiply") + descr_idiv = _binop_inplace_impl("divide") + descr_itruediv = _binop_inplace_impl("true_divide") + descr_ifloordiv = _binop_inplace_impl("floor_divide") + descr_imod = _binop_inplace_impl("mod") + descr_ipow = _binop_inplace_impl("power") + descr_ilshift = _binop_inplace_impl("left_shift") + descr_irshift = _binop_inplace_impl("right_shift") + descr_iand = _binop_inplace_impl("bitwise_and") + descr_ior = _binop_inplace_impl("bitwise_or") + descr_ixor = _binop_inplace_impl("bitwise_xor") + def _binop_right_impl(ufunc_name): def impl(self, space, w_other, w_out=None): w_other = convert_to_array(space, w_other) @@ -1007,6 +1028,20 @@ __ror__ = interp2app(W_NDimArray.descr_ror), __rxor__ = interp2app(W_NDimArray.descr_rxor), + __iadd__ = interp2app(W_NDimArray.descr_iadd), + __isub__ = interp2app(W_NDimArray.descr_isub), + __imul__ = interp2app(W_NDimArray.descr_imul), + __idiv__ = interp2app(W_NDimArray.descr_idiv), + __itruediv__ = interp2app(W_NDimArray.descr_itruediv), + __ifloordiv__ = interp2app(W_NDimArray.descr_ifloordiv), + __imod__ = interp2app(W_NDimArray.descr_imod), + __ipow__ = interp2app(W_NDimArray.descr_ipow), + __ilshift__ = interp2app(W_NDimArray.descr_ilshift), + __irshift__ = interp2app(W_NDimArray.descr_irshift), + __iand__ = interp2app(W_NDimArray.descr_iand), + __ior__ = interp2app(W_NDimArray.descr_ior), + __ixor__ = interp2app(W_NDimArray.descr_ixor), + __eq__ = interp2app(W_NDimArray.descr_eq), __ne__ = interp2app(W_NDimArray.descr_ne), __lt__ = interp2app(W_NDimArray.descr_lt), diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -789,6 +789,49 @@ r = [1, 2] + array([1, 2]) assert (r == [2, 4]).all() + def test_inline_op_scalar(self): + from numpypy import array + for op in [ + '__iadd__', + '__isub__', + '__imul__', + '__idiv__', + '__ifloordiv__', + '__imod__', + '__ipow__', + '__ilshift__', + '__irshift__', + '__iand__', + '__ior__', + '__ixor__']: + a = b = array(range(3)) + getattr(a, op).__call__(2) + assert id(a) == id(b) + + def test_inline_op_array(self): + from numpypy import array + for op in [ + '__iadd__', + '__isub__', + '__imul__', + '__idiv__', + '__ifloordiv__', + '__imod__', + '__ipow__', + '__ilshift__', + '__irshift__', + '__iand__', + '__ior__', + '__ixor__']: + a = b = array(range(5)) + c = array(range(5)) + d = array(5 * [2]) + getattr(a, op).__call__(d) + assert id(a) == id(b) + reg_op = op.replace('__i', '__') + for i in range(5): + assert a[i] == getattr(c[i], reg_op).__call__(d[i]) + def test_add_list(self): from numpypy import array, ndarray a = array(range(5)) From noreply at buildbot.pypy.org Wed Aug 7 23:49:15 2013 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 7 Aug 2013 23:49:15 +0200 (CEST) Subject: [pypy-commit] pypy default: Merged in w31rd0/pypy/numpypy-inplace-op (pull request #177) Message-ID: <20130807214915.833BC1C2442@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r66004:000f4bb7328b Date: 2013-08-07 23:48 +0200 http://bitbucket.org/pypy/pypy/changeset/000f4bb7328b/ Log: Merged in w31rd0/pypy/numpypy-inplace-op (pull request #177) issue 1539, added inplace array operations diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -737,6 +737,27 @@ descr_gt = _binop_comp_impl(_binop_impl("greater")) descr_ge = _binop_comp_impl(_binop_impl("greater_equal")) + def _binop_inplace_impl(ufunc_name): + def impl(self, space, w_other): + w_out = self + ufunc = getattr(interp_ufuncs.get(space), ufunc_name) + return ufunc.call(space, [self, w_other, w_out]) + return func_with_new_name(impl, "binop_inplace_%s_impl" % ufunc_name) + + descr_iadd = _binop_inplace_impl("add") + descr_isub = _binop_inplace_impl("subtract") + descr_imul = _binop_inplace_impl("multiply") + descr_idiv = _binop_inplace_impl("divide") + descr_itruediv = _binop_inplace_impl("true_divide") + descr_ifloordiv = _binop_inplace_impl("floor_divide") + descr_imod = _binop_inplace_impl("mod") + descr_ipow = _binop_inplace_impl("power") + descr_ilshift = _binop_inplace_impl("left_shift") + descr_irshift = _binop_inplace_impl("right_shift") + descr_iand = _binop_inplace_impl("bitwise_and") + descr_ior = _binop_inplace_impl("bitwise_or") + descr_ixor = _binop_inplace_impl("bitwise_xor") + def _binop_right_impl(ufunc_name): def impl(self, space, w_other, w_out=None): w_other = convert_to_array(space, w_other) @@ -1007,6 +1028,20 @@ __ror__ = interp2app(W_NDimArray.descr_ror), __rxor__ = interp2app(W_NDimArray.descr_rxor), + __iadd__ = interp2app(W_NDimArray.descr_iadd), + __isub__ = interp2app(W_NDimArray.descr_isub), + __imul__ = interp2app(W_NDimArray.descr_imul), + __idiv__ = interp2app(W_NDimArray.descr_idiv), + __itruediv__ = interp2app(W_NDimArray.descr_itruediv), + __ifloordiv__ = interp2app(W_NDimArray.descr_ifloordiv), + __imod__ = interp2app(W_NDimArray.descr_imod), + __ipow__ = interp2app(W_NDimArray.descr_ipow), + __ilshift__ = interp2app(W_NDimArray.descr_ilshift), + __irshift__ = interp2app(W_NDimArray.descr_irshift), + __iand__ = interp2app(W_NDimArray.descr_iand), + __ior__ = interp2app(W_NDimArray.descr_ior), + __ixor__ = interp2app(W_NDimArray.descr_ixor), + __eq__ = interp2app(W_NDimArray.descr_eq), __ne__ = interp2app(W_NDimArray.descr_ne), __lt__ = interp2app(W_NDimArray.descr_lt), diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -789,6 +789,49 @@ r = [1, 2] + array([1, 2]) assert (r == [2, 4]).all() + def test_inline_op_scalar(self): + from numpypy import array + for op in [ + '__iadd__', + '__isub__', + '__imul__', + '__idiv__', + '__ifloordiv__', + '__imod__', + '__ipow__', + '__ilshift__', + '__irshift__', + '__iand__', + '__ior__', + '__ixor__']: + a = b = array(range(3)) + getattr(a, op).__call__(2) + assert id(a) == id(b) + + def test_inline_op_array(self): + from numpypy import array + for op in [ + '__iadd__', + '__isub__', + '__imul__', + '__idiv__', + '__ifloordiv__', + '__imod__', + '__ipow__', + '__ilshift__', + '__irshift__', + '__iand__', + '__ior__', + '__ixor__']: + a = b = array(range(5)) + c = array(range(5)) + d = array(5 * [2]) + getattr(a, op).__call__(d) + assert id(a) == id(b) + reg_op = op.replace('__i', '__') + for i in range(5): + assert a[i] == getattr(c[i], reg_op).__call__(d[i]) + def test_add_list(self): from numpypy import array, ndarray a = array(range(5)) From noreply at buildbot.pypy.org Thu Aug 8 01:26:13 2013 From: noreply at buildbot.pypy.org (andrewchambers) Date: Thu, 8 Aug 2013 01:26:13 +0200 (CEST) Subject: [pypy-commit] pypy incremental-gc: adding some more of minimark tests to incminimark Message-ID: <20130807232613.B00D11C1067@cobra.cs.uni-duesseldorf.de> Author: Andrew Chambers Branch: incremental-gc Changeset: r66005:858acb7c946f Date: 2013-08-08 11:25 +1200 http://bitbucket.org/pypy/pypy/changeset/858acb7c946f/ Log: adding some more of minimark tests to incminimark diff --git a/rpython/memory/gc/incminimark.py b/rpython/memory/gc/incminimark.py --- a/rpython/memory/gc/incminimark.py +++ b/rpython/memory/gc/incminimark.py @@ -1672,10 +1672,11 @@ # made incremental. if not self.objects_to_trace.non_empty(): + if self.objects_with_finalizers.non_empty(): + self.deal_with_objects_with_finalizers() + self.objects_to_trace.delete() - if self.objects_with_finalizers.non_empty(): - self.deal_with_objects_with_finalizers() # # Weakref support: clear the weak pointers to dying objects if self.old_objects_with_weakrefs.non_empty(): @@ -1737,9 +1738,12 @@ elif self.gc_state == STATE_FINALIZING: # XXX This is considered rare, # so should we make the calling incremental? or leave as is + + # Must be ready to start another scan + self.gc_state = STATE_SCANNING + # just in case finalizer calls collect again. self.execute_finalizers() self.num_major_collects += 1 - self.gc_state = STATE_SCANNING #END FINALIZING else: pass #XXX which exception to raise here. Should be unreachable. diff --git a/rpython/memory/test/test_incminimark_gc.py b/rpython/memory/test/test_incminimark_gc.py new file mode 100644 --- /dev/null +++ b/rpython/memory/test/test_incminimark_gc.py @@ -0,0 +1,11 @@ +from rpython.rlib.rarithmetic import LONG_BIT + +from rpython.memory.test import test_semispace_gc + +WORD = LONG_BIT // 8 + +class TestIncrementalMiniMarkGC(test_semispace_gc.TestSemiSpaceGC): + from rpython.memory.gc.incminimark import IncrementalMiniMarkGC as GCClass + GC_CAN_SHRINK_BIG_ARRAY = False + GC_CAN_MALLOC_NONMOVABLE = True + BUT_HOW_BIG_IS_A_BIG_STRING = 11*WORD diff --git a/rpython/memory/test/test_incminimark_gc_cardmarking.py b/rpython/memory/test/test_incminimark_gc_cardmarking.py new file mode 100644 --- /dev/null +++ b/rpython/memory/test/test_incminimark_gc_cardmarking.py @@ -0,0 +1,4 @@ +from rpython.memory.test import test_incminimark_gc + +class TestIncrementalMiniMarkGCCardMarking(test_incminimark_gc.TestIncrementalMiniMarkGC): + GC_PARAMS = {'card_page_indices': 4} diff --git a/rpython/memory/test/test_transformed_gc.py b/rpython/memory/test/test_transformed_gc.py --- a/rpython/memory/test/test_transformed_gc.py +++ b/rpython/memory/test/test_transformed_gc.py @@ -1259,6 +1259,50 @@ res = run([]) assert res == 123 +class TestIncrementalMiniMarkGC(TestHybridGC): + gcname = "incminimark" + GC_CAN_TEST_ID = True + + class gcpolicy(gc.BasicFrameworkGcPolicy): + class transformerclass(shadowstack.ShadowStackFrameworkGCTransformer): + from rpython.memory.gc.incminimark \ + import IncrementalMiniMarkGC as GCClass + GC_PARAMS = {'nursery_size': 32*WORD, + 'page_size': 16*WORD, + 'arena_size': 64*WORD, + 'small_request_threshold': 5*WORD, + 'large_object': 8*WORD, + 'card_page_indices': 4, + 'translated_to_c': False, + } + root_stack_depth = 200 + + def define_no_clean_setarrayitems(cls): + # The optimization find_clean_setarrayitems() in + # gctransformer/framework.py does not work with card marking. + # Check that it is turned off. + S = lltype.GcStruct('S', ('x', lltype.Signed)) + A = lltype.GcArray(lltype.Ptr(S)) + def sub(lst): + lst[15] = lltype.malloc(S) # 'lst' is set the single mark "12-15" + lst[15].x = 123 + lst[0] = lst[15] # that would be a "clean_setarrayitem" + def f(): + lst = lltype.malloc(A, 16) # 16 > 10 + rgc.collect() + sub(lst) + null = lltype.nullptr(S) + lst[15] = null # clear, so that A() is only visible via lst[0] + rgc.collect() # -> crash + return lst[0].x + return f + + def test_no_clean_setarrayitems(self): + run = self.runner("no_clean_setarrayitems") + res = run([]) + assert res == 123 + + # ________________________________________________________________ # tagged pointers From noreply at buildbot.pypy.org Thu Aug 8 04:47:38 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Thu, 8 Aug 2013 04:47:38 +0200 (CEST) Subject: [pypy-commit] pypy default: kill unused need_const option in Bookkeeper.immutablevalue() Message-ID: <20130808024738.762731C1067@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: Changeset: r66006:4a467433bb51 Date: 2013-08-08 03:47 +0100 http://bitbucket.org/pypy/pypy/changeset/4a467433bb51/ Log: kill unused need_const option in Bookkeeper.immutablevalue() diff --git a/rpython/annotator/bookkeeper.py b/rpython/annotator/bookkeeper.py --- a/rpython/annotator/bookkeeper.py +++ b/rpython/annotator/bookkeeper.py @@ -314,7 +314,7 @@ def immutableconstant(self, const): return self.immutablevalue(const.value) - def immutablevalue(self, x, need_const=True): + def immutablevalue(self, x): """The most precise SomeValue instance that contains the immutable value x.""" # convert unbound methods to the underlying function @@ -350,69 +350,47 @@ elif tp is bytearray: result = SomeByteArray() elif tp is tuple: - result = SomeTuple(items = [self.immutablevalue(e, need_const) for e in x]) + result = SomeTuple(items = [self.immutablevalue(e) for e in x]) elif tp is float: result = SomeFloat() elif tp is list: - if need_const: - key = Constant(x) - try: - return self.immutable_cache[key] - except KeyError: - result = SomeList(ListDef(self, s_ImpossibleValue)) - self.immutable_cache[key] = result - for e in x: - result.listdef.generalize(self.immutablevalue(e)) - result.const_box = key - return result - else: - listdef = ListDef(self, s_ImpossibleValue) + key = Constant(x) + try: + return self.immutable_cache[key] + except KeyError: + result = SomeList(ListDef(self, s_ImpossibleValue)) + self.immutable_cache[key] = result for e in x: - listdef.generalize(self.immutablevalue(e, False)) - result = SomeList(listdef) + result.listdef.generalize(self.immutablevalue(e)) + result.const_box = key + return result elif tp is dict or tp is r_dict: - if need_const: - key = Constant(x) - try: - return self.immutable_cache[key] - except KeyError: - result = SomeDict(DictDef(self, - s_ImpossibleValue, - s_ImpossibleValue, - is_r_dict = tp is r_dict)) - self.immutable_cache[key] = result - if tp is r_dict: - s_eqfn = self.immutablevalue(x.key_eq) - s_hashfn = self.immutablevalue(x.key_hash) - result.dictdef.dictkey.update_rdict_annotations(s_eqfn, - s_hashfn) - seen_elements = 0 - while seen_elements != len(x): - items = x.items() - for ek, ev in items: - result.dictdef.generalize_key(self.immutablevalue(ek)) - result.dictdef.generalize_value(self.immutablevalue(ev)) - result.dictdef.seen_prebuilt_key(ek) - seen_elements = len(items) - # if the dictionary grew during the iteration, - # start over again - result.const_box = key - return result - else: - dictdef = DictDef(self, - s_ImpossibleValue, - s_ImpossibleValue, - is_r_dict = tp is r_dict) + key = Constant(x) + try: + return self.immutable_cache[key] + except KeyError: + result = SomeDict(DictDef(self, + s_ImpossibleValue, + s_ImpossibleValue, + is_r_dict = tp is r_dict)) + self.immutable_cache[key] = result if tp is r_dict: s_eqfn = self.immutablevalue(x.key_eq) s_hashfn = self.immutablevalue(x.key_hash) - dictdef.dictkey.update_rdict_annotations(s_eqfn, - s_hashfn) - for ek, ev in x.iteritems(): - dictdef.generalize_key(self.immutablevalue(ek, False)) - dictdef.generalize_value(self.immutablevalue(ev, False)) - dictdef.seen_prebuilt_key(ek) - result = SomeDict(dictdef) + result.dictdef.dictkey.update_rdict_annotations(s_eqfn, + s_hashfn) + seen_elements = 0 + while seen_elements != len(x): + items = x.items() + for ek, ev in items: + result.dictdef.generalize_key(self.immutablevalue(ek)) + result.dictdef.generalize_value(self.immutablevalue(ev)) + result.dictdef.seen_prebuilt_key(ek) + seen_elements = len(items) + # if the dictionary grew during the iteration, + # start over again + result.const_box = key + return result elif tp is weakref.ReferenceType: x1 = x() if x1 is None: @@ -441,11 +419,11 @@ if hasattr(x, 'im_self') and hasattr(x, 'im_func'): # on top of PyPy, for cases like 'l.append' where 'l' is a # global constant list, the find_method() returns non-None - s_self = self.immutablevalue(x.im_self, need_const) + s_self = self.immutablevalue(x.im_self) result = s_self.find_method(x.im_func.__name__) elif hasattr(x, '__self__') and x.__self__ is not None: # for cases like 'l.append' where 'l' is a global constant list - s_self = self.immutablevalue(x.__self__, need_const) + s_self = self.immutablevalue(x.__self__) result = s_self.find_method(x.__name__) assert result is not None else: @@ -469,8 +447,7 @@ return s_None else: raise Exception("Don't know how to represent %r" % (x,)) - if need_const: - result.const = x + result.const = x return result def getdesc(self, pyobj): From noreply at buildbot.pypy.org Thu Aug 8 07:38:31 2013 From: noreply at buildbot.pypy.org (andrewchambers) Date: Thu, 8 Aug 2013 07:38:31 +0200 (CEST) Subject: [pypy-commit] pypy incremental-gc: partially changed sweeping to incremental Message-ID: <20130808053831.3A9221C0EF6@cobra.cs.uni-duesseldorf.de> Author: Andrew Chambers Branch: incremental-gc Changeset: r66007:fb31aa0383a3 Date: 2013-08-08 17:37 +1200 http://bitbucket.org/pypy/pypy/changeset/fb31aa0383a3/ Log: partially changed sweeping to incremental diff --git a/rpython/memory/gc/incminimark.py b/rpython/memory/gc/incminimark.py --- a/rpython/memory/gc/incminimark.py +++ b/rpython/memory/gc/incminimark.py @@ -138,8 +138,9 @@ #XXX describe # marking of objects can be done over multiple STATE_MARKING = 1 -STATE_SWEEPING = 2 -STATE_FINALIZING = 3 +STATE_SWEEPING_RAWMALLOC = 2 +STATE_SWEEPING_ARENA = 3 +STATE_FINALIZING = 4 @@ -1684,16 +1685,19 @@ if self.old_objects_with_light_finalizers.non_empty(): self.deal_with_old_objects_with_finalizers() #objects_to_trace processed fully, can move on to sweeping - self.gc_state = STATE_SWEEPING - - #SWEEPING not yet incrementalised - self.major_collection_step(reserving_size) + self.gc_state = STATE_SWEEPING_RAWMALLOC + self.start_free_rawmalloc_objects() #END MARKING - elif self.gc_state == STATE_SWEEPING: + elif self.gc_state == STATE_SWEEPING_RAWMALLOC: # # Walk all rawmalloced objects and free the ones that don't # have the GCFLAG_VISITED flag. - self.free_unvisited_rawmalloc_objects() + # XXX heuristic here? + if self.free_unvisited_rawmalloc_objects_step(1): + self.gc_state = STATE_SWEEPING_ARENA + + elif self.gc_state == STATE_SWEEPING_ARENA: + # # Ask the ArenaCollection to visit all objects. Free the ones # that have not been visited above, and reset GCFLAG_VISITED on @@ -1804,15 +1808,22 @@ # llarena.arena_free(arena) self.rawmalloced_total_size -= r_uint(allocsize) - - def free_unvisited_rawmalloc_objects(self): - list = self.old_rawmalloced_objects + + def start_free_rawmalloc_objects(self): + self.raw_malloc_might_sweep = self.old_rawmalloced_objects self.old_rawmalloced_objects = self.AddressStack() - # - while list.non_empty(): - self.free_rawmalloced_object_if_unvisited(list.pop()) - # - list.delete() + + # Returns true when finished processing objects + def free_unvisited_rawmalloc_objects_step(self,nobjects=1): + + while nobjects > 0 and self.raw_malloc_might_sweep.non_empty(): + self.free_rawmalloced_object_if_unvisited( + self.raw_malloc_might_sweep.pop()) + + if not self.raw_malloc_might_sweep.non_empty(): + self.raw_malloc_might_sweep.delete() + return True + return False def collect_roots(self): From noreply at buildbot.pypy.org Thu Aug 8 12:19:41 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 8 Aug 2013 12:19:41 +0200 (CEST) Subject: [pypy-commit] pypy flow-no-local-exception: Fixes Message-ID: <20130808101941.6EF971C1190@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: flow-no-local-exception Changeset: r66008:4b5f79f9eb17 Date: 2013-08-08 11:39 +0200 http://bitbucket.org/pypy/pypy/changeset/4b5f79f9eb17/ Log: Fixes diff --git a/rpython/rtyper/lltypesystem/rtuple.py b/rpython/rtyper/lltypesystem/rtuple.py --- a/rpython/rtyper/lltypesystem/rtuple.py +++ b/rpython/rtyper/lltypesystem/rtuple.py @@ -56,7 +56,6 @@ cno = inputconst(Signed, nitems) hop.exception_is_here() vlist = hop.gendirectcall(LIST.ll_newlist, cno) - v_func = hop.inputconst(Void, rlist.dum_nocheck) for index in range(nitems): name = self.fieldnames[index] ritem = self.items_r[index] @@ -64,7 +63,7 @@ vitem = hop.genop('getfield', [vtup, cname], resulttype = ritem) vitem = hop.llops.convertvar(vitem, ritem, hop.r_result.item_repr) cindex = inputconst(Signed, index) - hop.gendirectcall(rlist.ll_setitem_nonneg, v_func, vlist, cindex, vitem) + hop.gendirectcall(rlist.ll_setitem_nonneg, vlist, cindex, vitem) return vlist def getitem_internal(self, llops, v_tuple, index): diff --git a/rpython/translator/c/extfunc.py b/rpython/translator/c/extfunc.py --- a/rpython/translator/c/extfunc.py +++ b/rpython/translator/c/extfunc.py @@ -51,7 +51,7 @@ def _RPyListOfString_SetItem(l=p, index=lltype.Signed, newstring=lltype.Ptr(STR)): - rlist.ll_setitem_nonneg(rlist.dum_nocheck, l, index, newstring) + rlist.ll_setitem_nonneg(l, index, newstring) def _RPyListOfString_GetItem(l=p, index=lltype.Signed): From noreply at buildbot.pypy.org Thu Aug 8 12:19:43 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 8 Aug 2013 12:19:43 +0200 (CEST) Subject: [pypy-commit] pypy flow-no-local-exception: Remove the IndexError here Message-ID: <20130808101943.0BAC31C11A6@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: flow-no-local-exception Changeset: r66009:0c095b5010a4 Date: 2013-08-08 11:45 +0200 http://bitbucket.org/pypy/pypy/changeset/0c095b5010a4/ Log: Remove the IndexError here diff --git a/pypy/interpreter/argument.py b/pypy/interpreter/argument.py --- a/pypy/interpreter/argument.py +++ b/pypy/interpreter/argument.py @@ -506,14 +506,15 @@ name = keywords[i] if name is None: # We'll assume it's unicode. Encode it. - # Careful, I *think* it should not be possible to - # get an IndexError here but you never know. - try: - if keyword_names_w is None: - raise IndexError - # note: negative-based indexing from the end - w_name = keyword_names_w[i - len(keywords)] - except IndexError: + w_name = None + if keyword_names_w is not None: + # note: indexing from the end + index = len(keyword_names_w) + (i - len(keywords)) + # Careful, I *think* it should not be possible to + # get a negative index here but you never know + if index >= 0: + w_name = keyword_names_w[index] + if w_name is None: name = '?' else: w_enc = space.wrap(space.sys.defaultencoding) From noreply at buildbot.pypy.org Thu Aug 8 12:19:44 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 8 Aug 2013 12:19:44 +0200 (CEST) Subject: [pypy-commit] pypy flow-no-local-exception: Fix: had double-negative-detection in one path Message-ID: <20130808101944.4FBD61C134C@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: flow-no-local-exception Changeset: r66010:96a686aaa962 Date: 2013-08-08 12:19 +0200 http://bitbucket.org/pypy/pypy/changeset/96a686aaa962/ Log: Fix: had double-negative-detection in one path diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -28,7 +28,7 @@ from pypy.objspace.std.tupleobject import W_AbstractTupleObject from pypy.objspace.std.unicodeobject import W_UnicodeObject from pypy.objspace.std.util import get_positive_index, negate -from pypy.objspace.std.util import ListIndexError, getuindex +from pypy.objspace.std.util import ListIndexError, getuindex, getuindex_nonneg from rpython.rlib import debug, jit, rerased from rpython.rlib.listsort import make_timsort_class from rpython.rlib.objectmodel import ( @@ -1427,7 +1427,7 @@ def pop(self, w_list, index): l = self.unerase(w_list.lstorage) - uindex = getuindex(l, index) + uindex = getuindex_nonneg(l, index) item = l.pop(uindex) w_item = self.wrap(item) return w_item diff --git a/pypy/objspace/std/util.py b/pypy/objspace/std/util.py --- a/pypy/objspace/std/util.py +++ b/pypy/objspace/std/util.py @@ -34,6 +34,13 @@ """A custom RPython class, raised by getitem() and similar methods from listobject.py, and from getuindex() below.""" +def getuindex_nonneg(lst, index): + ulength = len(lst) + uindex = r_uint(index) + if uindex >= ulength: + raise ListIndexError + return uindex + def getuindex(lst, index): ulength = r_uint(len(lst)) uindex = r_uint(index) From noreply at buildbot.pypy.org Thu Aug 8 12:25:19 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 8 Aug 2013 12:25:19 +0200 (CEST) Subject: [pypy-commit] pypy default: Argh Message-ID: <20130808102519.49C921C1190@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r66011:d2c383a543f9 Date: 2013-08-08 12:24 +0200 http://bitbucket.org/pypy/pypy/changeset/d2c383a543f9/ Log: Argh diff --git a/pypy/module/binascii/interp_uu.py b/pypy/module/binascii/interp_uu.py --- a/pypy/module/binascii/interp_uu.py +++ b/pypy/module/binascii/interp_uu.py @@ -61,7 +61,7 @@ return ord(bin[i]) except IndexError: return 0 -_a2b_read._always_inline_ = True +_b2a_read._always_inline_ = True @unwrap_spec(bin='bufferstr') def b2a_uu(space, bin): From noreply at buildbot.pypy.org Thu Aug 8 12:36:48 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 8 Aug 2013 12:36:48 +0200 (CEST) Subject: [pypy-commit] pypy flow-no-local-exception: Fix some modules Message-ID: <20130808103648.54E4D1C11A6@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: flow-no-local-exception Changeset: r66012:71ef9d6bba05 Date: 2013-08-08 12:35 +0200 http://bitbucket.org/pypy/pypy/changeset/71ef9d6bba05/ Log: Fix some modules diff --git a/pypy/module/binascii/interp_uu.py b/pypy/module/binascii/interp_uu.py --- a/pypy/module/binascii/interp_uu.py +++ b/pypy/module/binascii/interp_uu.py @@ -5,10 +5,9 @@ # ____________________________________________________________ def _a2b_read(space, s, index): - try: - c = s[index] - except IndexError: + if index >= len(s): return 0 + c = s[index] # Check the character for legality. The 64 instead of the expected 63 # is because there are a few uuencodes out there that use '`' as zero # instead of space. @@ -57,11 +56,10 @@ # ____________________________________________________________ def _b2a_read(bin, i): - try: - return ord(bin[i]) - except IndexError: + if i >= len(bin): return 0 -_a2b_read._always_inline_ = True + return ord(bin[i]) +_b2a_read._always_inline_ = True @unwrap_spec(bin='bufferstr') def b2a_uu(space, bin): diff --git a/pypy/module/itertools/interp_itertools.py b/pypy/module/itertools/interp_itertools.py --- a/pypy/module/itertools/interp_itertools.py +++ b/pypy/module/itertools/interp_itertools.py @@ -691,37 +691,33 @@ self.space = space self.saved_w = [] self.w_iterable = space.iter(w_iterable) - self.index = 0 - self.exhausted = False + self.index = -1 # during the first run; if >= 0, we are repeating def iter_w(self): return self.space.wrap(self) def next_w(self): - if self.exhausted: - if not self.saved_w: - raise OperationError(self.space.w_StopIteration, self.space.w_None) - try: - w_obj = self.saved_w[self.index] - except IndexError: - self.index = 1 - w_obj = self.saved_w[0] - else: - self.index += 1 + index = self.index + if index >= 0: # if we are repeating + if index >= len(self.saved_w): + index = 0 + if len(self.saved_w) == 0: + raise OperationError(self.space.w_StopIteration, + self.space.w_None) + self.index = index + 1 + w_obj = self.saved_w[index] else: try: w_obj = self.space.next(self.w_iterable) except OperationError, e: if e.match(self.space, self.space.w_StopIteration): - self.exhausted = True + self.index = 1 # exhausted if not self.saved_w: raise - self.index = 1 w_obj = self.saved_w[0] else: raise else: - self.index += 1 self.saved_w.append(w_obj) return w_obj From noreply at buildbot.pypy.org Thu Aug 8 16:15:12 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Thu, 8 Aug 2013 16:15:12 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: remove need for another temporary register in stm_read_barrier fastpath Message-ID: <20130808141512.8F4531C11A6@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r66013:693378095093 Date: 2013-08-06 09:09 +0200 http://bitbucket.org/pypy/pypy/changeset/693378095093/ Log: remove need for another temporary register in stm_read_barrier fastpath diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -2323,14 +2323,12 @@ # calculate: temp = obj & FX_MASK assert StmGC.FX_MASK == 65535 assert not is_frame - temp_loc = arglocs[1] # does not exist if is_frame! - mc.MOVZX16(temp_loc, loc_base) + mc.MOVZX16(X86_64_SCRATCH_REG, loc_base) # calculate: rbc + temp == obj rbc = self._get_stm_read_barrier_cache_addr() stmtlocal.tl_segment_prefix(mc) - mc.MOV_rj(X86_64_SCRATCH_REG.value, rbc) - mc.CMP_ra(loc_base.value, - (X86_64_SCRATCH_REG.value, temp_loc.value, 0, 0)) + mc.ADD_rj(X86_64_SCRATCH_REG.value, rbc) + mc.CMP_rm(loc_base.value, (X86_64_SCRATCH_REG.value, 0)) mc.J_il8(rx86.Conditions['Z'], 0) # patched below jz_location2 = mc.get_relative_pos() diff --git a/rpython/jit/backend/x86/regalloc.py b/rpython/jit/backend/x86/regalloc.py --- a/rpython/jit/backend/x86/regalloc.py +++ b/rpython/jit/backend/x86/regalloc.py @@ -797,24 +797,7 @@ for i in range(N)] self.perform_discard(op, arglocs) - def consider_cond_call_stm_b(self, op): - assert op.result is None - args = op.getarglist() - N = len(args) - assert N == 1 - # we force all arguments in a reg (unless they are Consts), - # because it will be needed anyway by the following setfield_gc - # or setarrayitem_gc. It avoids loading it twice from the memory. - tmp_box = TempBox() - tmp_loc = self.rm.force_allocate_reg(tmp_box, args) - args = args + [tmp_box] - - arglocs = [self.rm.make_sure_var_in_reg(op.getarg(i), args) - for i in range(N)] + [tmp_loc] - - self.perform_discard(op, arglocs) - self.rm.possibly_free_var(tmp_box) - + consider_cond_call_stm_b = consider_cond_call_gc_wb consider_cond_call_gc_wb_array = consider_cond_call_gc_wb def consider_call_malloc_nursery(self, op): From noreply at buildbot.pypy.org Thu Aug 8 16:15:13 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Thu, 8 Aug 2013 16:15:13 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: reenable gcremovetypeptr. seems to work now Message-ID: <20130808141513.E83EA1C11A6@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r66014:dba65ecc13b8 Date: 2013-08-06 10:15 +0200 http://bitbucket.org/pypy/pypy/changeset/dba65ecc13b8/ Log: reenable gcremovetypeptr. seems to work now diff --git a/rpython/config/translationoption.py b/rpython/config/translationoption.py --- a/rpython/config/translationoption.py +++ b/rpython/config/translationoption.py @@ -75,7 +75,7 @@ "minimark": [("translation.gctransformer", "framework")], "stmgc": [("translation.gctransformer", "framework"), ("translation.gcrootfinder", "stm"), - ("translation.gcremovetypeptr", False)], + ("translation.gcremovetypeptr", True)], }, suggests = { }, diff --git a/rpython/jit/backend/llsupport/stmrewrite.py b/rpython/jit/backend/llsupport/stmrewrite.py --- a/rpython/jit/backend/llsupport/stmrewrite.py +++ b/rpython/jit/backend/llsupport/stmrewrite.py @@ -53,6 +53,13 @@ rop.PTR_NE, rop.INSTANCE_PTR_NE): self.handle_ptr_eq(op) continue + # ---------- guard_class ---------- + if op.getopnum() == rop.GUARD_CLASS: + assert self.cpu.vtable_offset is None + # requires gcremovetypeptr translation option + # uses h_tid which doesn't need a read-barrier + self.newops.append(op) + continue # ---------- pure operations, guards ---------- if op.is_always_pure() or op.is_guard() or op.is_ovf(): self.newops.append(op) From noreply at buildbot.pypy.org Thu Aug 8 16:15:15 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Thu, 8 Aug 2013 16:15:15 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: do a ptr_eq on guard_value() comparing two ptrs Message-ID: <20130808141515.43B3A1C11A6@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r66015:2ec55a5b3061 Date: 2013-08-07 10:35 +0200 http://bitbucket.org/pypy/pypy/changeset/2ec55a5b3061/ Log: do a ptr_eq on guard_value() comparing two ptrs diff --git a/rpython/jit/backend/llsupport/test/zrpy_gc_test.py b/rpython/jit/backend/llsupport/test/zrpy_gc_test.py --- a/rpython/jit/backend/llsupport/test/zrpy_gc_test.py +++ b/rpython/jit/backend/llsupport/test/zrpy_gc_test.py @@ -829,6 +829,10 @@ raiseassert(x0 is not None) raiseassert(x1 is not None) raiseassert(x3 is None) + for i in range(1, 4): + ptrs[i].x = i + x0.x = 6 + x1.x = 9 # return n - 1, x, x0, x1, x2, x3, x4, x5, x6, x7, ptrs, s return before, f, None diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -1613,7 +1613,12 @@ # base_loc and ofs_loc should be immediates, but maybe not # fitting in 32-bit base_loc, ofs_loc, size_loc = arglocs - self.mc.INC(addr_add(base_loc, ofs_loc)) + addr = addr_add(base_loc, ofs_loc) + if rx86.fits_in_32bits(addr.value): + self.mc.INC(addr) + else: + self.mc.MOV(X86_64_SCRATCH_REG, base_loc) + self.mc.INC_m((X86_64_SCRATCH_REG.value, ofs_loc.getint())) def genop_discard_setfield_gc(self, op, arglocs): base_loc, ofs_loc, size_loc, value_loc = arglocs @@ -1798,7 +1803,15 @@ genop_guard_guard_isnull = genop_guard_guard_false def genop_guard_guard_value(self, ign_1, guard_op, guard_token, locs, ign_2): - if guard_op.getarg(0).type == FLOAT: + argtype = guard_op.getarg(0).type + if self.cpu.gc_ll_descr.stm and argtype == REF: + assert guard_op.getarg(1).type == REF + # x64 has no support for 64bit immed. Force them into registers! + # XXX: do better for 32 bit + self.genop_guard_ptr_eq(ign_1, guard_op, guard_token, + locs, ign_2) + return + elif argtype == FLOAT: assert guard_op.getarg(1).type == FLOAT self.mc.UCOMISD(locs[0], locs[1]) else: diff --git a/rpython/jit/backend/x86/regalloc.py b/rpython/jit/backend/x86/regalloc.py --- a/rpython/jit/backend/x86/regalloc.py +++ b/rpython/jit/backend/x86/regalloc.py @@ -407,8 +407,16 @@ consider_guard_overflow = consider_guard_no_exception def consider_guard_value(self, op): - x = self.make_sure_var_in_reg(op.getarg(0)) - y = self.loc(op.getarg(1)) + args = op.getarglist() + if args[0].type == REF: + assert args[1].type == REF + # XXX: this is certainly not wanted. + # We force immed64 into registers here. + x = self.make_sure_var_in_reg(args[0], args, selected_reg=ecx) + y = self.make_sure_var_in_reg(args[1], args, selected_reg=eax) + else: + x = self.make_sure_var_in_reg(args[0], args) + y = self.loc(args[1]) self.perform_guard(op, [x, y], None) def consider_guard_class(self, op): From noreply at buildbot.pypy.org Thu Aug 8 16:15:16 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Thu, 8 Aug 2013 16:15:16 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: lots of little changes and possible untested fixes Message-ID: <20130808141516.7930E1C11A6@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r66016:81b701256281 Date: 2013-08-08 12:16 +0200 http://bitbucket.org/pypy/pypy/changeset/81b701256281/ Log: lots of little changes and possible untested fixes diff --git a/pypy/tool/jitlogparser/parser.py b/pypy/tool/jitlogparser/parser.py --- a/pypy/tool/jitlogparser/parser.py +++ b/pypy/tool/jitlogparser/parser.py @@ -395,15 +395,19 @@ addr = int(m.group(1), 16) addrs.setdefault(addr, []).append(name) dumps = {} - executables = set(["??"]) + executables = set(["??",]) symbols = {} for entry in extract_category(log, 'jit-backend-dump'): entry = purge_thread_numbers(entry) backend, executable, dump, _ = entry.split("\n") - _, executable = executable.split(" ") - if executable not in executables: - symbols.update(load_symbols(executable)) - executables.add(executable) + if "(out of memory!)" not in executable: + _, executable = executable.split(" ") + if executable not in executables: + try: + symbols.update(load_symbols(executable)) + except Exception as e: + print e + executables.add(executable) _, addr, _, data = re.split(" +", dump) backend_name = backend.split(" ")[1] addr = int(addr[1:], 16) diff --git a/rpython/jit/backend/llsupport/gc.py b/rpython/jit/backend/llsupport/gc.py --- a/rpython/jit/backend/llsupport/gc.py +++ b/rpython/jit/backend/llsupport/gc.py @@ -441,14 +441,16 @@ def __init__(self, gc_ll_descr, stmcat): assert stmcat == 'P2R' STMBarrierDescr.__init__(self, gc_ll_descr, stmcat, - 'stm_DirectReadBarrier') + 'stm_read_barrier') + # XXX: implement fastpath then change to stm_DirectReadBarrier class STMWriteBarrierDescr(STMBarrierDescr): def __init__(self, gc_ll_descr, stmcat): assert stmcat in ['P2W'] STMBarrierDescr.__init__(self, gc_ll_descr, stmcat, - 'stm_WriteBarrier') + 'stm_write_barrier') + # XXX: implement fastpath, then change to stm_WriteBarrier class GcLLDescr_framework(GcLLDescription): @@ -560,6 +562,8 @@ @specialize.argtype(0) def do_stm_barrier(gcref, cat): if lltype.typeOf(gcref) is lltype.Signed: # ignore if 'raw' + # we are inevitable already because llmodel + # does everything with raw-references return gcref if cat == 'W': descr = self.P2Wdescr diff --git a/rpython/jit/backend/llsupport/llmodel.py b/rpython/jit/backend/llsupport/llmodel.py --- a/rpython/jit/backend/llsupport/llmodel.py +++ b/rpython/jit/backend/llsupport/llmodel.py @@ -41,6 +41,8 @@ if translator and translator.config.translation.gcremovetypeptr: self.vtable_offset = None else: + assert not self.gc_ll_descr.stm, """doesn't work in stm + because it would need a read barrier when reading typeptr""" self.vtable_offset, _ = symbolic.get_field_token(rclass.OBJECT, 'typeptr', translate_support_code) @@ -403,6 +405,7 @@ raise NotImplementedError("size = %d" % size) def read_ref_at_mem(self, gcref, ofs): + gcref = self.gc_ll_descr.do_stm_barrier(gcref, 'R') # --- start of GC unsafe code (no GC operation!) --- items = rffi.ptradd(rffi.cast(rffi.CCHARP, gcref), ofs) items = rffi.cast(rffi.CArrayPtr(lltype.Signed), items) @@ -411,7 +414,7 @@ return pval def write_ref_at_mem(self, gcref, ofs, newvalue): - self.gc_ll_descr.do_write_barrier(gcref, newvalue) + gcref = self.gc_ll_descr.do_stm_barrier(gcref, 'W') # --- start of GC unsafe code (no GC operation!) --- items = rffi.ptradd(rffi.cast(rffi.CCHARP, gcref), ofs) items = rffi.cast(rffi.CArrayPtr(lltype.Signed), items) @@ -420,6 +423,7 @@ @specialize.argtype(1) def read_float_at_mem(self, gcref, ofs): + gcref = self.gc_ll_descr.do_stm_barrier(gcref, 'R') # --- start of GC unsafe code (no GC operation!) --- items = rffi.ptradd(rffi.cast(rffi.CCHARP, gcref), ofs) items = rffi.cast(rffi.CArrayPtr(longlong.FLOATSTORAGE), items) @@ -429,6 +433,7 @@ @specialize.argtype(1) def write_float_at_mem(self, gcref, ofs, newvalue): + gcref = self.gc_ll_descr.do_stm_barrier(gcref, 'W') # --- start of GC unsafe code (no GC operation!) --- items = rffi.ptradd(rffi.cast(rffi.CCHARP, gcref), ofs) items = rffi.cast(rffi.CArrayPtr(longlong.FLOATSTORAGE), items) @@ -590,7 +595,7 @@ ofs, size, _ = self.unpack_arraydescr_size(arraydescr) ofs += descr.fielddescr.offset gcref = self.gc_ll_descr.do_stm_barrier(gcref, 'W') - self.gc_ll_descr.do_write_barrier(gcref, newvalue) + #self.gc_ll_descr.do_write_barrier(gcref, newvalue) # --- start of GC unsafe code (no GC operation!) --- items = rffi.ptradd(rffi.cast(rffi.CCHARP, gcref), ofs + size * itemindex) @@ -698,7 +703,7 @@ assert lltype.typeOf(struct) is not lltype.Signed, ( "can't handle write barriers for setfield_raw") struct = self.gc_ll_descr.do_stm_barrier(struct, 'W') - self.gc_ll_descr.do_write_barrier(struct, newvalue) + #self.gc_ll_descr.do_write_barrier(struct, newvalue) # --- start of GC unsafe code (no GC operation!) --- fieldptr = rffi.ptradd(rffi.cast(rffi.CCHARP, struct), ofs) fieldptr = rffi.cast(rffi.CArrayPtr(lltype.Signed), fieldptr) diff --git a/rpython/jit/backend/tool/viewcode.py b/rpython/jit/backend/tool/viewcode.py --- a/rpython/jit/backend/tool/viewcode.py +++ b/rpython/jit/backend/tool/viewcode.py @@ -114,8 +114,9 @@ p = subprocess.Popen(symbollister % filename, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = p.communicate() - assert not p.returncode, ('Encountered an error running nm: %s' % - stderr) + if not p.returncode: + raise Exception('Encountered an error running nm: %s' % + stderr) for line in stdout.splitlines(True): match = re_symbolentry.match(line) if match: @@ -274,7 +275,10 @@ elif line.startswith('SYS_EXECUTABLE '): filename = line[len('SYS_EXECUTABLE '):].strip() if filename != self.executable_name and filename != '??': - self.symbols.update(load_symbols(filename)) + try: + self.symbols.update(load_symbols(filename)) + except Exception as e: + print e self.executable_name = filename def find_cross_references(self): diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -1039,6 +1039,7 @@ def genop_ptr_eq(self, op, arglocs, result_loc): if not self.cpu.gc_ll_descr.stm: self.genop_int_eq(op, arglocs, result_loc) + return assert self.cpu.gc_ll_descr.stm rl = result_loc.lowest8bits() self._stm_ptr_eq_fastpath(self.mc, arglocs, result_loc) @@ -1048,6 +1049,7 @@ def genop_ptr_ne(self, op, arglocs, result_loc): if not self.cpu.gc_ll_descr.stm: self.genop_int_ne(op, arglocs, result_loc) + return assert self.cpu.gc_ll_descr.stm rl = result_loc.lowest8bits() self._stm_ptr_eq_fastpath(self.mc, arglocs, result_loc) @@ -1059,26 +1061,30 @@ if not self.cpu.gc_ll_descr.stm: self.genop_guard_int_eq(op, guard_op, guard_token, arglocs, result_loc) + return + assert self.cpu.gc_ll_descr.stm + guard_opnum = guard_op.getopnum() + self._stm_ptr_eq_fastpath(self.mc, arglocs, result_loc) + if guard_opnum == rop.GUARD_FALSE: + # jump to failure-code if ptrs are equal + self.implement_guard(guard_token, "NZ") + else: + # jump to failure-code if ptrs are not equal + self.implement_guard(guard_token, "Z") + + def genop_guard_ptr_ne(self, op, guard_op, guard_token, + arglocs, result_loc): + if not self.cpu.gc_ll_descr.stm: + self.genop_guard_int_ne(op, guard_op, guard_token, + arglocs, result_loc) + return assert self.cpu.gc_ll_descr.stm guard_opnum = guard_op.getopnum() self._stm_ptr_eq_fastpath(self.mc, arglocs, result_loc) if guard_opnum == rop.GUARD_FALSE: self.implement_guard(guard_token, "Z") else: - self.implement_guard(guard_token, "NZ") - - def genop_guard_ptr_ne(self, op, guard_op, guard_token, - arglocs, result_loc): - if not self.cpu.gc_ll_descr.stm: - self.genop_guard_int_ne(op, guard_op, guard_token, - arglocs, result_loc) - assert self.cpu.gc_ll_descr.stm - guard_opnum = guard_op.getopnum() - self._stm_ptr_eq_fastpath(self.mc, arglocs, result_loc) - if guard_opnum == rop.GUARD_FALSE: - self.implement_guard(guard_token, "NZ") - else: - self.implement_guard(guard_token, "Z") + self.implement_guard(guard_token, "NZ") def _cmpop(cond, rev_cond): def genop_cmp(self, op, arglocs, result_loc): diff --git a/rpython/jit/backend/x86/regalloc.py b/rpython/jit/backend/x86/regalloc.py --- a/rpython/jit/backend/x86/regalloc.py +++ b/rpython/jit/backend/x86/regalloc.py @@ -414,6 +414,7 @@ # We force immed64 into registers here. x = self.make_sure_var_in_reg(args[0], args, selected_reg=ecx) y = self.make_sure_var_in_reg(args[1], args, selected_reg=eax) + self.rm.possibly_free_var(args[1]) else: x = self.make_sure_var_in_reg(args[0], args) y = self.loc(args[1]) diff --git a/rpython/translator/c/src/rtyper.c b/rpython/translator/c/src/rtyper.c --- a/rpython/translator/c/src/rtyper.c +++ b/rpython/translator/c/src/rtyper.c @@ -16,6 +16,7 @@ char *RPyString_AsCharP(RPyString *rps) { + rps = (RPyString *)stm_read_barrier((gcptr)rps); Signed len = RPyString_Size(rps); struct _RPyString_dump_t *dump = \ malloc(sizeof(struct _RPyString_dump_t) + len); diff --git a/rpython/translator/stm/src_stm/et.c b/rpython/translator/stm/src_stm/et.c --- a/rpython/translator/stm/src_stm/et.c +++ b/rpython/translator/stm/src_stm/et.c @@ -7,7 +7,6 @@ */ #include "stmimpl.h" -#ifdef _GC_DEBUG char tmp_buf[128]; char* stm_dbg_get_hdr_str(gcptr obj) { @@ -27,7 +26,6 @@ cur += sprintf(cur, "tid=%ld", stm_get_tid(obj)); return tmp_buf; } -#endif diff --git a/rpython/translator/stm/src_stm/revision b/rpython/translator/stm/src_stm/revision --- a/rpython/translator/stm/src_stm/revision +++ b/rpython/translator/stm/src_stm/revision @@ -1,1 +1,1 @@ -12cf412eb2d7 +12cf412eb2d7+ diff --git a/rpython/translator/stm/src_stm/stmgc.h b/rpython/translator/stm/src_stm/stmgc.h --- a/rpython/translator/stm/src_stm/stmgc.h +++ b/rpython/translator/stm/src_stm/stmgc.h @@ -167,6 +167,23 @@ #define UNLIKELY(test) __builtin_expect(test, 0) + +static inline gcptr stm_read_barrier(gcptr obj) { + /* XXX optimize to get the smallest code */ + if (UNLIKELY((obj->h_revision != stm_private_rev_num) && + (FXCACHE_AT(obj) != obj))) + obj = stm_DirectReadBarrier(obj); + return obj; +} + +static inline gcptr stm_write_barrier(gcptr obj) { + if (UNLIKELY((obj->h_revision != stm_private_rev_num) | + ((obj->h_tid & GCFLAG_WRITE_BARRIER) != 0))) + obj = stm_WriteBarrier(obj); + return obj; +} + +#if 0 #define stm_read_barrier(obj) \ (UNLIKELY(((obj)->h_revision != stm_private_rev_num) && \ (FXCACHE_AT(obj) != (obj))) ? \ @@ -178,6 +195,6 @@ (((obj)->h_tid & GCFLAG_WRITE_BARRIER) != 0)) ? \ stm_WriteBarrier(obj) \ : (obj)) - +#endif #endif From noreply at buildbot.pypy.org Thu Aug 8 16:15:17 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Thu, 8 Aug 2013 16:15:17 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: first test for stm_read_barrier fastpath Message-ID: <20130808141517.BF4561C11A6@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r66017:406b5e69c06e Date: 2013-08-08 16:14 +0200 http://bitbucket.org/pypy/pypy/changeset/406b5e69c06e/ Log: first test for stm_read_barrier fastpath diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -845,7 +845,8 @@ rst = self._get_root_stack_top_addr() if rx86.fits_in_32bits(rst): - if gcrootmap.is_stm: + if gcrootmap.is_stm and we_are_translated(): + # during testing, it will be an absolute address stmtlocal.tl_segment_prefix(mc) mc.MOV_rj(ebx.value, rst) # MOV ebx, [rootstacktop] else: @@ -861,7 +862,8 @@ self.mc.ADD_ri(ebx.value, WORD) if rx86.fits_in_32bits(rst): - if gcrootmap.is_stm: + if gcrootmap.is_stm and we_are_translated(): + # during testing, it will be an absolute address stmtlocal.tl_segment_prefix(self.mc) self.mc.MOV_jr(rst, ebx.value) # MOV [rootstacktop], ebx else: @@ -875,7 +877,8 @@ rst = self._get_root_stack_top_addr() if rx86.fits_in_32bits(rst): - if gcrootmap.is_stm: + if gcrootmap.is_stm and we_are_translated(): + # during testing, it will be an absolute address stmtlocal.tl_segment_prefix(self.mc) self.mc.SUB_ji8(rst, WORD) # SUB [rootstacktop], WORD else: @@ -1192,7 +1195,8 @@ gcrootmap = self.cpu.gc_ll_descr.gcrootmap rst = gcrootmap.get_root_stack_top_addr() - if gcrootmap.is_stm: + if gcrootmap.is_stm and we_are_translated(): + # during testing, we return an absolute address rst = rst - stmtlocal.threadlocal_base() assert rx86.fits_in_32bits(rst) return rst @@ -1203,7 +1207,8 @@ if gcrootmap and gcrootmap.is_shadow_stack: rst = self._get_root_stack_top_addr() - if gcrootmap.is_stm: + if gcrootmap.is_stm and we_are_translated(): + # during testing, it will be an absolute address stmtlocal.tl_segment_prefix(mc) mc.MOV(ecx, heap(rst)) mc.MOV(ebp, mem(ecx, -WORD)) @@ -2267,16 +2272,17 @@ # OK: flags already set if j_ok1: offset = mc.get_relative_pos() - j_ok1 + assert 0 <= offset <= 127 mc.overwrite(j_ok1 - 1, chr(offset)) if j_ok2: offset = mc.get_relative_pos() - j_ok2 + assert 0 <= offset <= 127 mc.overwrite(j_ok2 - 1, chr(offset)) if j_ok3: offset = mc.get_relative_pos() - j_ok3 + assert 0 <= offset <= 127 mc.overwrite(j_ok3 - 1, chr(offset)) - - def _get_stm_private_rev_num_addr(self): assert self.cpu.gc_ll_descr.stm rn = rstm.get_adr_of_private_rev_num() @@ -2294,9 +2300,9 @@ def _stm_barrier_fastpath(self, mc, descr, arglocs, is_frame=False, align_stack=False): assert self.cpu.gc_ll_descr.stm - from rpython.jit.backend.llsupport.gc import ( - STMBarrierDescr, STMReadBarrierDescr, STMWriteBarrierDescr) - assert isinstance(descr, STMBarrierDescr) + #from rpython.jit.backend.llsupport.gc import ( + # STMBarrierDescr, STMReadBarrierDescr, STMWriteBarrierDescr) + #assert isinstance(descr, STMBarrierDescr) assert descr.returns_modified_object loc_base = arglocs[0] assert isinstance(loc_base, RegLoc) @@ -2321,14 +2327,20 @@ jnz_location = 0 # compare h_revision with stm_private_rev_num (XXX: may be slow) rn = self._get_stm_private_rev_num_addr() - stmtlocal.tl_segment_prefix(mc) - mc.MOV_rj(X86_64_SCRATCH_REG.value, rn) + if we_are_translated(): + # during tests, _get_stm_private_rev_num_addr returns + # an absolute address, not a tl-offset + stmtlocal.tl_segment_prefix(mc) + mc.MOV_rj(X86_64_SCRATCH_REG.value, rn) + else: # testing: + mc.MOV(X86_64_SCRATCH_REG, heap(rn)) + if loc_base == ebp: mc.CMP_rb(X86_64_SCRATCH_REG.value, StmGC.H_REVISION) else: mc.CMP(X86_64_SCRATCH_REG, mem(loc_base, StmGC.H_REVISION)) # - if isinstance(descr, STMReadBarrierDescr): + if descr.stmcat == 'P2R':#isinstance(descr, STMReadBarrierDescr): # jump to end if h_rev==priv_rev mc.J_il8(rx86.Conditions['Z'], 0) # patched below jz_location = mc.get_relative_pos() @@ -2338,21 +2350,29 @@ jnz_location = mc.get_relative_pos() # FXCACHE_AT(obj) != obj - if isinstance(descr, STMReadBarrierDescr): + if descr.stmcat == 'P2R':#isinstance(descr, STMReadBarrierDescr): # calculate: temp = obj & FX_MASK assert StmGC.FX_MASK == 65535 assert not is_frame mc.MOVZX16(X86_64_SCRATCH_REG, loc_base) # calculate: rbc + temp == obj rbc = self._get_stm_read_barrier_cache_addr() - stmtlocal.tl_segment_prefix(mc) - mc.ADD_rj(X86_64_SCRATCH_REG.value, rbc) + if we_are_translated(): + # during tests, _get_stm_rbca returns + # an absolute address, not a tl-offset + stmtlocal.tl_segment_prefix(mc) + mc.ADD_rj(X86_64_SCRATCH_REG.value, rbc) + else: # testing: + mc.PUSH_r(eax.value) + mc.MOV(eax, heap(rbc)) + mc.ADD(X86_64_SCRATCH_REG, eax) + mc.POP_r(eax.value) mc.CMP_rm(loc_base.value, (X86_64_SCRATCH_REG.value, 0)) mc.J_il8(rx86.Conditions['Z'], 0) # patched below jz_location2 = mc.get_relative_pos() # obj->h_tid & GCFLAG_WRITE_BARRIER) != 0 - if isinstance(descr, STMWriteBarrierDescr): + if descr.stmcat == 'P2W':#isinstance(descr, STMWriteBarrierDescr): assert IS_X86_64 and (StmGC.GCFLAG_WRITE_BARRIER >> 32) > 0 assert (StmGC.GCFLAG_WRITE_BARRIER >> 40) == 0 off = 4 @@ -2398,7 +2418,7 @@ offset = mc.get_relative_pos() - jz_location assert 0 < offset <= 127 mc.overwrite(jz_location - 1, chr(offset)) - if isinstance(descr, STMReadBarrierDescr): + if descr.stmcat == 'P2R':#isinstance(descr, STMReadBarrierDescr): offset = mc.get_relative_pos() - jz_location2 assert 0 < offset <= 127 mc.overwrite(jz_location2 - 1, chr(offset)) diff --git a/rpython/jit/backend/x86/test/test_stm_integration.py b/rpython/jit/backend/x86/test/test_stm_integration.py new file mode 100644 --- /dev/null +++ b/rpython/jit/backend/x86/test/test_stm_integration.py @@ -0,0 +1,209 @@ +import py +from rpython.rtyper.lltypesystem import lltype, llmemory, rffi, rstr +from rpython.jit.metainterp.history import ResOperation, TargetToken,\ + JitCellToken +from rpython.jit.metainterp.history import (BoxInt, BoxPtr, ConstInt, + ConstPtr, Box, + BasicFailDescr, BasicFinalDescr) +from rpython.jit.backend.detect_cpu import getcpuclass +from rpython.jit.backend.x86.arch import WORD +from rpython.jit.backend.x86.rx86 import fits_in_32bits +from rpython.jit.backend.llsupport import symbolic +from rpython.jit.metainterp.resoperation import rop +from rpython.jit.metainterp.executor import execute +from rpython.jit.backend.test.runner_test import LLtypeBackendTest +from rpython.jit.tool.oparser import parse +from rpython.rtyper.annlowlevel import llhelper, llhelper_args +from rpython.jit.backend.llsupport.gc import ( + GcRootMap_stm, BarrierDescr) +from rpython.jit.backend.llsupport.test.test_gc_integration import ( + GCDescrShadowstackDirect, BaseTestRegalloc) +from rpython.jit.backend.llsupport import jitframe +import ctypes + +CPU = getcpuclass() + +class MockSTMRootMap(object): + is_shadow_stack = True + is_stm = True + def __init__(self): + TP = rffi.CArray(lltype.Signed) + self.stack = lltype.malloc(TP, 10, flavor='raw') + self.stack_addr = lltype.malloc(TP, 1, + flavor='raw') + self.stack_addr[0] = rffi.cast(lltype.Signed, self.stack) + def __del__(self): + lltype.free(self.stack_addr, flavor='raw') + lltype.free(self.stack, flavor='raw') + def register_asm_addr(self, start, mark): + pass + def get_root_stack_top_addr(self): + return rffi.cast(lltype.Signed, self.stack_addr) + +class FakeSTMBarrier(BarrierDescr): + def __init__(self, gc_ll_descr, stmcat, func): + BarrierDescr.__init__(self, gc_ll_descr) + self.stmcat = stmcat + self.returns_modified_object = True + self.B_FUNCPTR_MOD = lltype.Ptr(lltype.FuncType( + [llmemory.Address], llmemory.Address)) + self.write_barrier_fn = llhelper(self.B_FUNCPTR_MOD, func) + def get_barrier_funcptr(self, returns_modified_object): + assert returns_modified_object + return self.write_barrier_fn + def get_barrier_fn(self, cpu, returns_modified_object): + assert returns_modified_object + return self.write_barrier_fn + +# ____________________________________________________________ + + +def jitframe_allocate(frame_info): + frame = lltype.malloc(JITFRAME, frame_info.jfi_frame_depth, zero=True) + frame.jf_frame_info = frame_info + return frame + +JITFRAME = lltype.GcStruct( + 'JITFRAME', + ('h_tid', lltype.Signed), + ('h_revision', lltype.Signed), + ('h_original', lltype.Signed), + ('jf_frame_info', lltype.Ptr(jitframe.JITFRAMEINFO)), + ('jf_descr', llmemory.GCREF), + ('jf_force_descr', llmemory.GCREF), + ('jf_extra_stack_depth', lltype.Signed), + ('jf_guard_exc', llmemory.GCREF), + ('jf_gcmap', lltype.Ptr(jitframe.GCMAP)), + ('jf_gc_trace_state', lltype.Signed), + ('jf_frame', lltype.Array(lltype.Signed)), + adtmeths = { + 'allocate': jitframe_allocate, + }, +) + +JITFRAMEPTR = lltype.Ptr(JITFRAME) +class FakeGCHeaderBuilder: + size_gc_header = WORD + + +class GCDescrStm(GCDescrShadowstackDirect): + def __init__(self): + GCDescrShadowstackDirect.__init__(self) + self.gcrootmap = MockSTMRootMap() + self.gcheaderbuilder = FakeGCHeaderBuilder() + self.write_barrier_descr = None + self.llop1 = None + self.rb_called_on = [] + self.wb_called_on = [] + self.stm = True + + def read_barrier(obj): + self.rb_called_on.append(obj) + return obj + def write_barrier(obj): + self.wb_called_on.append(obj) + return obj + + self.P2Rdescr = FakeSTMBarrier(self, 'P2R', read_barrier) + self.P2Wdescr = FakeSTMBarrier(self, 'P2W', write_barrier) + + self.do_write_barrier = None + self.get_nursery_top_addr = None + self.get_nursery_free_addr = None + + def malloc_str(length): + assert False + self.generate_function('malloc_str', malloc_str, + [lltype.Signed]) + def malloc_unicode(length): + assert False + self.generate_function('malloc_unicode', malloc_unicode, + [lltype.Signed]) + def inevitable(): + pass + self.generate_function('stm_try_inevitable', + inevitable, [], + RESULT=lltype.Void) + def ptr_eq(x, y): return x == y + def ptr_ne(x, y): return x != y + self.generate_function('stm_ptr_eq', ptr_eq, [llmemory.GCREF] * 2, + RESULT=lltype.Bool) + self.generate_function('stm_ptr_ne', ptr_ne, [llmemory.GCREF] * 2, + RESULT=lltype.Bool) + + def get_malloc_slowpath_addr(self): + return None + + +class TestGcStm(BaseTestRegalloc): + def get_priv_rev_num(self): + return rffi.cast(lltype.Signed, self.priv_rev_num) + + def get_read_cache(self): + return rffi.cast(lltype.Signed, self.read_cache_adr) + + def setup_method(self, meth): + cpu = CPU(None, None) + cpu.gc_ll_descr = GCDescrStm() + self.p2wd = cpu.gc_ll_descr.P2Wdescr + self.p2rd = cpu.gc_ll_descr.P2Rdescr + + TP = rffi.CArray(lltype.Signed) + self.priv_rev_num = lltype.malloc(TP, 1, flavor='raw') + self.read_cache = lltype.malloc(TP, n=65536 / WORD, flavor='raw') + self.read_cache_adr = lltype.malloc(TP, 1, flavor='raw') + self.read_cache_adr[0] = rffi.cast(lltype.Signed, self.read_cache) + + cpu.assembler._get_stm_private_rev_num_addr = self.get_priv_rev_num + cpu.assembler._get_stm_read_barrier_cache_addr = self.get_read_cache + + S = lltype.GcForwardReference() + S.become(lltype.GcStruct( + 'S', ('h_tid', lltype.Signed), + ('h_revision', lltype.Signed), + ('h_original', lltype.Signed))) + cpu.gc_ll_descr.fielddescr_tid = None # not needed + # = cpu.fielddescrof(S, 'h_tid') + self.S = S + self.cpu = cpu + + def teardown_method(self, meth): + rffi.aroundstate._cleanup_() + + def assert_in_read_barrier(self, *args): + rb_called_on = self.cpu.gc_ll_descr.rb_called_on + for i, ref in enumerate(args): + assert rffi.cast_ptr_to_adr(ref) == rb_called_on[i] + def assert_not_in_read_barrier(self, *args): + rb_called_on = self.cpu.gc_ll_descr.rb_called_on + for ref in args: + assert not rffi.cast_ptr_to_adr(ref) in rb_called_on + + def test_read_barrier_fastpath(self): + cpu = self.cpu + cpu.setup_once() + PRIV_REV = 3 + self.priv_rev_num[0] = PRIV_REV + for rev in [PRIV_REV, PRIV_REV+1]: + s = lltype.malloc(self.S) + sgcref = lltype.cast_opaque_ptr(llmemory.GCREF, s) + s.h_tid = 0 + s.h_revision = rev + p0 = BoxPtr() + operations = [ + ResOperation(rop.COND_CALL_STM_B, [p0,], None, + descr=self.p2rd), + ResOperation(rop.FINISH, [p0], None, descr=BasicFinalDescr(0)), + ] + inputargs = [p0] + looptoken = JitCellToken() + cpu.compile_loop(inputargs, operations, looptoken) + self.cpu.execute_token(looptoken, sgcref) + if rev == PRIV_REV: + # fastpath + self.assert_not_in_read_barrier(sgcref) + else: + self.assert_in_read_barrier(sgcref) + + + From noreply at buildbot.pypy.org Thu Aug 8 16:44:59 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 8 Aug 2013 16:44:59 +0200 (CEST) Subject: [pypy-commit] pypy default: Add the merged branch here Message-ID: <20130808144459.DDAA51C00F4@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r66018:0c153ba60257 Date: 2013-08-08 16:44 +0200 http://bitbucket.org/pypy/pypy/changeset/0c153ba60257/ Log: Add the merged branch here diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -73,3 +73,4 @@ .. branch: dotviewer-linewidth .. branch: reflex-support +.. branch: numpypy-inplace-op From noreply at buildbot.pypy.org Thu Aug 8 17:22:40 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Thu, 8 Aug 2013 17:22:40 +0200 (CEST) Subject: [pypy-commit] pypy default: backout 4a467433bb51: breaks translation Message-ID: <20130808152240.B48981C00F4@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: Changeset: r66019:f2b929212889 Date: 2013-08-08 16:22 +0100 http://bitbucket.org/pypy/pypy/changeset/f2b929212889/ Log: backout 4a467433bb51: breaks translation diff --git a/rpython/annotator/bookkeeper.py b/rpython/annotator/bookkeeper.py --- a/rpython/annotator/bookkeeper.py +++ b/rpython/annotator/bookkeeper.py @@ -314,7 +314,7 @@ def immutableconstant(self, const): return self.immutablevalue(const.value) - def immutablevalue(self, x): + def immutablevalue(self, x, need_const=True): """The most precise SomeValue instance that contains the immutable value x.""" # convert unbound methods to the underlying function @@ -350,47 +350,69 @@ elif tp is bytearray: result = SomeByteArray() elif tp is tuple: - result = SomeTuple(items = [self.immutablevalue(e) for e in x]) + result = SomeTuple(items = [self.immutablevalue(e, need_const) for e in x]) elif tp is float: result = SomeFloat() elif tp is list: - key = Constant(x) - try: - return self.immutable_cache[key] - except KeyError: - result = SomeList(ListDef(self, s_ImpossibleValue)) - self.immutable_cache[key] = result + if need_const: + key = Constant(x) + try: + return self.immutable_cache[key] + except KeyError: + result = SomeList(ListDef(self, s_ImpossibleValue)) + self.immutable_cache[key] = result + for e in x: + result.listdef.generalize(self.immutablevalue(e)) + result.const_box = key + return result + else: + listdef = ListDef(self, s_ImpossibleValue) for e in x: - result.listdef.generalize(self.immutablevalue(e)) - result.const_box = key - return result + listdef.generalize(self.immutablevalue(e, False)) + result = SomeList(listdef) elif tp is dict or tp is r_dict: - key = Constant(x) - try: - return self.immutable_cache[key] - except KeyError: - result = SomeDict(DictDef(self, - s_ImpossibleValue, - s_ImpossibleValue, - is_r_dict = tp is r_dict)) - self.immutable_cache[key] = result + if need_const: + key = Constant(x) + try: + return self.immutable_cache[key] + except KeyError: + result = SomeDict(DictDef(self, + s_ImpossibleValue, + s_ImpossibleValue, + is_r_dict = tp is r_dict)) + self.immutable_cache[key] = result + if tp is r_dict: + s_eqfn = self.immutablevalue(x.key_eq) + s_hashfn = self.immutablevalue(x.key_hash) + result.dictdef.dictkey.update_rdict_annotations(s_eqfn, + s_hashfn) + seen_elements = 0 + while seen_elements != len(x): + items = x.items() + for ek, ev in items: + result.dictdef.generalize_key(self.immutablevalue(ek)) + result.dictdef.generalize_value(self.immutablevalue(ev)) + result.dictdef.seen_prebuilt_key(ek) + seen_elements = len(items) + # if the dictionary grew during the iteration, + # start over again + result.const_box = key + return result + else: + dictdef = DictDef(self, + s_ImpossibleValue, + s_ImpossibleValue, + is_r_dict = tp is r_dict) if tp is r_dict: s_eqfn = self.immutablevalue(x.key_eq) s_hashfn = self.immutablevalue(x.key_hash) - result.dictdef.dictkey.update_rdict_annotations(s_eqfn, - s_hashfn) - seen_elements = 0 - while seen_elements != len(x): - items = x.items() - for ek, ev in items: - result.dictdef.generalize_key(self.immutablevalue(ek)) - result.dictdef.generalize_value(self.immutablevalue(ev)) - result.dictdef.seen_prebuilt_key(ek) - seen_elements = len(items) - # if the dictionary grew during the iteration, - # start over again - result.const_box = key - return result + dictdef.dictkey.update_rdict_annotations(s_eqfn, + s_hashfn) + for ek, ev in x.iteritems(): + dictdef.generalize_key(self.immutablevalue(ek, False)) + dictdef.generalize_value(self.immutablevalue(ev, False)) + dictdef.seen_prebuilt_key(ek) + result = SomeDict(dictdef) elif tp is weakref.ReferenceType: x1 = x() if x1 is None: @@ -419,11 +441,11 @@ if hasattr(x, 'im_self') and hasattr(x, 'im_func'): # on top of PyPy, for cases like 'l.append' where 'l' is a # global constant list, the find_method() returns non-None - s_self = self.immutablevalue(x.im_self) + s_self = self.immutablevalue(x.im_self, need_const) result = s_self.find_method(x.im_func.__name__) elif hasattr(x, '__self__') and x.__self__ is not None: # for cases like 'l.append' where 'l' is a global constant list - s_self = self.immutablevalue(x.__self__) + s_self = self.immutablevalue(x.__self__, need_const) result = s_self.find_method(x.__name__) assert result is not None else: @@ -447,7 +469,8 @@ return s_None else: raise Exception("Don't know how to represent %r" % (x,)) - result.const = x + if need_const: + result.const = x return result def getdesc(self, pyobj): From noreply at buildbot.pypy.org Thu Aug 8 19:07:37 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Thu, 8 Aug 2013 19:07:37 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: fix in test-code and test both fastpaths of the read_barrier Message-ID: <20130808170737.398B21C11A6@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r66020:e16d104b5543 Date: 2013-08-08 19:07 +0200 http://bitbucket.org/pypy/pypy/changeset/e16d104b5543/ Log: fix in test-code and test both fastpaths of the read_barrier diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -2364,7 +2364,8 @@ mc.ADD_rj(X86_64_SCRATCH_REG.value, rbc) else: # testing: mc.PUSH_r(eax.value) - mc.MOV(eax, heap(rbc)) + mc.MOV_ri(eax.value, rbc) + mc.MOV_rm(eax.value, (eax.value, 0)) mc.ADD(X86_64_SCRATCH_REG, eax) mc.POP_r(eax.value) mc.CMP_rm(loc_base.value, (X86_64_SCRATCH_REG.value, 0)) diff --git a/rpython/jit/backend/x86/test/test_stm_integration.py b/rpython/jit/backend/x86/test/test_stm_integration.py --- a/rpython/jit/backend/x86/test/test_stm_integration.py +++ b/rpython/jit/backend/x86/test/test_stm_integration.py @@ -19,6 +19,7 @@ from rpython.jit.backend.llsupport.test.test_gc_integration import ( GCDescrShadowstackDirect, BaseTestRegalloc) from rpython.jit.backend.llsupport import jitframe +from rpython.memory.gc.stmgc import StmGC import ctypes CPU = getcpuclass() @@ -130,17 +131,16 @@ RESULT=lltype.Bool) self.generate_function('stm_ptr_ne', ptr_ne, [llmemory.GCREF] * 2, RESULT=lltype.Bool) - + def get_malloc_slowpath_addr(self): return None + def clear_barrier_lists(self): + self.rb_called_on[:] = [] + self.wb_called_on[:] = [] + class TestGcStm(BaseTestRegalloc): - def get_priv_rev_num(self): - return rffi.cast(lltype.Signed, self.priv_rev_num) - - def get_read_cache(self): - return rffi.cast(lltype.Signed, self.read_cache_adr) def setup_method(self, meth): cpu = CPU(None, None) @@ -150,18 +150,16 @@ TP = rffi.CArray(lltype.Signed) self.priv_rev_num = lltype.malloc(TP, 1, flavor='raw') - self.read_cache = lltype.malloc(TP, n=65536 / WORD, flavor='raw') - self.read_cache_adr = lltype.malloc(TP, 1, flavor='raw') - self.read_cache_adr[0] = rffi.cast(lltype.Signed, self.read_cache) + self.clear_read_cache() cpu.assembler._get_stm_private_rev_num_addr = self.get_priv_rev_num cpu.assembler._get_stm_read_barrier_cache_addr = self.get_read_cache S = lltype.GcForwardReference() S.become(lltype.GcStruct( - 'S', ('h_tid', lltype.Signed), + 'S', ('h_tid', lltype.Unsigned), ('h_revision', lltype.Signed), - ('h_original', lltype.Signed))) + ('h_original', lltype.Unsigned))) cpu.gc_ll_descr.fielddescr_tid = None # not needed # = cpu.fielddescrof(S, 'h_tid') self.S = S @@ -170,40 +168,82 @@ def teardown_method(self, meth): rffi.aroundstate._cleanup_() - def assert_in_read_barrier(self, *args): - rb_called_on = self.cpu.gc_ll_descr.rb_called_on + def assert_in(self, called_on, *args): for i, ref in enumerate(args): - assert rffi.cast_ptr_to_adr(ref) == rb_called_on[i] - def assert_not_in_read_barrier(self, *args): - rb_called_on = self.cpu.gc_ll_descr.rb_called_on + assert rffi.cast_ptr_to_adr(ref) == called_on[i] + + def assert_not_in(self, called_on, *args): for ref in args: - assert not rffi.cast_ptr_to_adr(ref) in rb_called_on + assert rffi.cast_ptr_to_adr(ref) not in called_on + + def get_priv_rev_num(self): + return rffi.cast(lltype.Signed, self.priv_rev_num) + + def get_read_cache(self): + return rffi.cast(lltype.Signed, self.read_cache_adr) + + def clear_read_cache(self): + TP = rffi.CArray(lltype.Signed) + entries = (StmGC.FX_MASK + 1) / WORD + self.read_cache = lltype.malloc(TP, n=entries, flavor='raw', + track_allocation=False, zero=True) + self.read_cache_adr = lltype.malloc(TP, 1, flavor='raw', + track_allocation=False) + self.read_cache_adr[0] = rffi.cast(lltype.Signed, self.read_cache) + + def set_cache_item(self, obj): + obj_int = rffi.cast(lltype.Signed, obj) + idx = (obj_int & StmGC.FX_MASK) / WORD + self.read_cache[idx] = obj_int + + def allocate_prebuilt_s(self, tid=66): + s = lltype.malloc(self.S, zero=True) + s.h_tid = rffi.cast(lltype.Unsigned, StmGC.PREBUILT_FLAGS | tid) + s.h_revision = rffi.cast(lltype.Signed, StmGC.PREBUILT_REVISION) + return s def test_read_barrier_fastpath(self): cpu = self.cpu cpu.setup_once() - PRIV_REV = 3 + PRIV_REV = rffi.cast(lltype.Signed, StmGC.PREBUILT_REVISION) self.priv_rev_num[0] = PRIV_REV - for rev in [PRIV_REV, PRIV_REV+1]: - s = lltype.malloc(self.S) + called_on = cpu.gc_ll_descr.rb_called_on + for rev in [PRIV_REV+4, PRIV_REV]: + cpu.gc_ll_descr.clear_barrier_lists() + self.clear_read_cache() + + s = self.allocate_prebuilt_s() sgcref = lltype.cast_opaque_ptr(llmemory.GCREF, s) - s.h_tid = 0 s.h_revision = rev + p0 = BoxPtr() operations = [ - ResOperation(rop.COND_CALL_STM_B, [p0,], None, + ResOperation(rop.COND_CALL_STM_B, [p0], None, descr=self.p2rd), - ResOperation(rop.FINISH, [p0], None, descr=BasicFinalDescr(0)), + ResOperation(rop.FINISH, [p0], None, + descr=BasicFinalDescr(0)), ] inputargs = [p0] looptoken = JitCellToken() cpu.compile_loop(inputargs, operations, looptoken) self.cpu.execute_token(looptoken, sgcref) + + # check if rev-fastpath worked if rev == PRIV_REV: # fastpath - self.assert_not_in_read_barrier(sgcref) + assert not called_on else: - self.assert_in_read_barrier(sgcref) + self.assert_in(called_on, sgcref) + + # now add it to the read-cache and check + # that it will never call the read_barrier + cpu.gc_ll_descr.clear_barrier_lists() + self.set_cache_item(sgcref) + + self.cpu.execute_token(looptoken, sgcref) + # not called: + assert not called_on + From noreply at buildbot.pypy.org Fri Aug 9 00:11:41 2013 From: noreply at buildbot.pypy.org (stian) Date: Fri, 9 Aug 2013 00:11:41 +0200 (CEST) Subject: [pypy-commit] pypy bigint-with-int-ops: Support the shift ops. Message-ID: <20130808221141.69DA21C00F4@cobra.cs.uni-duesseldorf.de> Author: stian Branch: bigint-with-int-ops Changeset: r66021:f49ccf659096 Date: 2013-08-08 18:39 +0200 http://bitbucket.org/pypy/pypy/changeset/f49ccf659096/ Log: Support the shift ops. diff --git a/pypy/objspace/std/longobject.py b/pypy/objspace/std/longobject.py --- a/pypy/objspace/std/longobject.py +++ b/pypy/objspace/std/longobject.py @@ -285,6 +285,14 @@ space.wrap("shift count too large")) return W_LongObject(w_long1.num.lshift(shift)) +def lshift__Long_Int(space, w_long1, w_int2): + # XXX need to replicate some of the logic, to get the errors right + if w_int2.intval < 0: + raise OperationError(space.w_ValueError, + space.wrap("negative shift count")) + + return W_LongObject(w_long1.num.lshift(w_int2.intval)) + def rshift__Long_Long(space, w_long1, w_long2): # XXX need to replicate some of the logic, to get the errors right if w_long2.num.sign < 0: @@ -297,6 +305,14 @@ space.wrap("shift count too large")) return newlong(space, w_long1.num.rshift(shift)) +def rshift__Long_Int(space, w_long1, w_int2): + # XXX need to replicate some of the logic, to get the errors right + if w_int2.intval < 0: + raise OperationError(space.w_ValueError, + space.wrap("negative shift count")) + + return newlong(space, w_long1.num.rshift(w_int2.intval)) + def and__Long_Long(space, w_long1, w_long2): return newlong(space, w_long1.num.and_(w_long2.num)) From noreply at buildbot.pypy.org Fri Aug 9 00:11:44 2013 From: noreply at buildbot.pypy.org (stian) Date: Fri, 9 Aug 2013 00:11:44 +0200 (CEST) Subject: [pypy-commit] pypy bigint-with-int-ops: Support mul ops Message-ID: <20130808221144.072A81C1190@cobra.cs.uni-duesseldorf.de> Author: stian Branch: bigint-with-int-ops Changeset: r66023:3d921becf5f9 Date: 2013-08-08 21:26 +0200 http://bitbucket.org/pypy/pypy/changeset/3d921becf5f9/ Log: Support mul ops diff --git a/pypy/objspace/std/longobject.py b/pypy/objspace/std/longobject.py --- a/pypy/objspace/std/longobject.py +++ b/pypy/objspace/std/longobject.py @@ -204,6 +204,9 @@ def mul__Long_Long(space, w_long1, w_long2): return W_LongObject(w_long1.num.mul(w_long2.num)) +def mul__Long_Int(space, w_long1, w_int2): + return W_LongObject(w_long1.num.int_mul(w_int2.intval)) + def truediv__Long_Long(space, w_long1, w_long2): try: f = w_long1.num.truediv(w_long2.num) diff --git a/rpython/rlib/rbigint.py b/rpython/rlib/rbigint.py --- a/rpython/rlib/rbigint.py +++ b/rpython/rlib/rbigint.py @@ -661,6 +661,37 @@ return result @jit.elidable + def int_mul(self, b): + if not int_in_valid_range(b): + # Fallback to long. + return self.mul(rbigint.fromint(b)) + + asize = self.numdigits() + digit = abs(b) + bsign = -1 if b < 0 else 1 + + if self.sign == 0 or b == 0: + return NULLRBIGINT + + if digit == 1: + return rbigint(self._digits[:self.size], self.sign * bsign, asize) + elif asize == 1: + res = self.widedigit(0) * digit + carry = res >> SHIFT + if carry: + return rbigint([_store_digit(res & MASK), _store_digit(carry)], self.sign * bsign, 2) + else: + return rbigint([_store_digit(res & MASK)], self.sign * bsign, 1) + + elif digit & (digit - 1) == 0: + result = self.lqshift(ptwotable[digit]) + else: + result = _muladd1(self, digit) + + result.sign = self.sign * bsign + return result + + @jit.elidable def truediv(self, other): div = _bigint_true_divide(self, other) return div diff --git a/rpython/rlib/test/test_rbigint.py b/rpython/rlib/test/test_rbigint.py --- a/rpython/rlib/test/test_rbigint.py +++ b/rpython/rlib/test/test_rbigint.py @@ -282,6 +282,17 @@ result = f1.mul(f1) assert result.tolong() == x * x + def test_int_mul(self): + x = -1238585838347L + y = 3 + for i in [-1, 1]: + for j in [-1, 1]: + f1 = rbigint.fromlong(x * i) + f2 = y * j + result = f1.int_mul(f2) + assert result.tolong() == (x * i) * (y * j) + + def test_tofloat(self): x = 12345678901234567890L ** 10 f1 = rbigint.fromlong(x) From noreply at buildbot.pypy.org Fri Aug 9 00:11:42 2013 From: noreply at buildbot.pypy.org (stian) Date: Fri, 9 Aug 2013 00:11:42 +0200 (CEST) Subject: [pypy-commit] pypy bigint-with-int-ops: Support add and sub ops Message-ID: <20130808221142.BFFF01C0397@cobra.cs.uni-duesseldorf.de> Author: stian Branch: bigint-with-int-ops Changeset: r66022:ec59bbb3636b Date: 2013-08-08 20:35 +0200 http://bitbucket.org/pypy/pypy/changeset/ec59bbb3636b/ Log: Support add and sub ops diff --git a/pypy/objspace/std/longobject.py b/pypy/objspace/std/longobject.py --- a/pypy/objspace/std/longobject.py +++ b/pypy/objspace/std/longobject.py @@ -192,9 +192,15 @@ def add__Long_Long(space, w_long1, w_long2): return W_LongObject(w_long1.num.add(w_long2.num)) +def add__Long_Int(space, w_long1, w_int2): + return W_LongObject(w_long1.num.int_add(w_int2.intval)) + def sub__Long_Long(space, w_long1, w_long2): return W_LongObject(w_long1.num.sub(w_long2.num)) +def sub__Long_Int(space, w_long1, w_int2): + return W_LongObject(w_long1.num.int_sub(w_int2.intval)) + def mul__Long_Long(space, w_long1, w_long2): return W_LongObject(w_long1.num.mul(w_long2.num)) diff --git a/rpython/rlib/rbigint.py b/rpython/rlib/rbigint.py --- a/rpython/rlib/rbigint.py +++ b/rpython/rlib/rbigint.py @@ -44,6 +44,23 @@ MASK = int((1 << SHIFT) - 1) FLOAT_MULTIPLIER = float(1 << SHIFT) +# For BIGINT and INT mix. +# +# The VALID range of an int is different than a valid range of a bigint of length one. +# -1 << LONG_BIT is actually TWO digits, because they are stored without the sign. +if SHIFT == LONG_BIT - 1: + MIN_INT_VALUE = -1 << SHIFT + def int_in_valid_range(x): + if x == MIN_INT_VALUE: + return False + return True +else: + # Means we don't have INT128 on 64bit. + def int_in_valid_range(x): + if x > MASK or x < -MASK: + return False + return True + # Debugging digit array access. # # False == no checking at all @@ -551,6 +568,25 @@ return result @jit.elidable + def int_add(self, other): + if not int_in_valid_range(other): + # Fallback to long. + return self.add(rbigint.fromint(other)) + elif self.sign == 0: + return rbigint.fromint(other) + elif other == 0: + return self + + sign = -1 if other < 0 else 1 + if self.sign == sign: + result = _x_int_add(self, other) + else: + result = _x_int_sub(self, other) + result.sign *= -1 + result.sign *= sign + return result + + @jit.elidable def sub(self, other): if other.sign == 0: return self @@ -564,6 +600,22 @@ return result @jit.elidable + def int_sub(self, other): + if not int_in_valid_range(other): + # Fallback to long. + return self.sub(rbigint.fromint(other)) + elif other == 0: + return self + elif self.sign == 0: + return rbigint.fromint(-other) + elif self.sign == (-1 if other < 0 else 1): + result = _x_int_sub(self, other) + else: + result = _x_int_add(self, other) + result.sign *= self.sign + return result + + @jit.elidable def mul(self, b): asize = self.numdigits() bsize = b.numdigits() @@ -1129,6 +1181,25 @@ z._normalize() return z +def _x_int_add(a, b): + """ Add the absolute values of one bigint and one integer. """ + size_a = a.numdigits() + + z = rbigint([NULLDIGIT] * (size_a + 1), 1) + i = UDIGIT_TYPE(1) + carry = a.udigit(0) + abs(b) + z.setdigit(0, carry) + carry >>= SHIFT + + while i < size_a: + carry += a.udigit(i) + z.setdigit(i, carry) + carry >>= SHIFT + i += 1 + z.setdigit(i, carry) + z._normalize() + return z + def _x_sub(a, b): """ Subtract the absolute values of two integers. """ @@ -1175,6 +1246,42 @@ z._normalize() return z +def _x_int_sub(a, b): + """ Subtract the absolute values of two integers. """ + + size_a = a.numdigits() + + bdigit = abs(b) + + if size_a == 1: + # Find highest digit where a and b differ: + adigit = a.digit(0) + + if adigit == bdigit: + return NULLRBIGINT + + return rbigint.fromint(adigit - bdigit) + + z = rbigint([NULLDIGIT] * size_a, 1, size_a) + i = _load_unsigned_digit(1) + # The following assumes unsigned arithmetic + # works modulo 2**N for some N>SHIFT. + borrow = a.udigit(0) - bdigit + z.setdigit(0, borrow) + borrow >>= SHIFT + #borrow &= 1 # Keep only one sign bit + + while i < size_a: + borrow = a.udigit(i) - borrow + z.setdigit(i, borrow) + borrow >>= SHIFT + #borrow &= 1 + i += 1 + + assert borrow == 0 + z._normalize() + return z + # A neat little table of power of twos. ptwotable = {} for x in range(SHIFT-1): diff --git a/rpython/rlib/test/test_rbigint.py b/rpython/rlib/test/test_rbigint.py --- a/rpython/rlib/test/test_rbigint.py +++ b/rpython/rlib/test/test_rbigint.py @@ -237,6 +237,16 @@ result = f1.add(f2) assert result.tolong() == x * i + y * j + def test_int_add(self): + x = 123456789123456789000000L + y = 9999 + for i in [-1, 1]: + for j in [-1, 1]: + f1 = rbigint.fromlong(x * i) + f2 = y * j + result = f1.int_add(f2) + assert result.tolong() == x * i + y * j + def test_sub(self): x = 12378959520302182384345L y = 88961284756491823819191823L @@ -247,6 +257,16 @@ result = f1.sub(f2) assert result.tolong() == x * i - y * j + def test_int_sub(self): + x = 12378959520302182384345L + y = 8888 + for i in [-1, 1]: + for j in [-1, 1]: + f1 = rbigint.fromlong(x * i) + f2 = y * j + result = f1.int_sub(f2) + assert result.tolong() == x * i - y * j + def test_subzz(self): w_l0 = rbigint.fromint(0) assert w_l0.sub(w_l0).tolong() == 0 From noreply at buildbot.pypy.org Fri Aug 9 00:11:45 2013 From: noreply at buildbot.pypy.org (stian) Date: Fri, 9 Aug 2013 00:11:45 +0200 (CEST) Subject: [pypy-commit] pypy bigint-with-int-ops: Support the modulo op Message-ID: <20130808221145.2DCB71C11A6@cobra.cs.uni-duesseldorf.de> Author: stian Branch: bigint-with-int-ops Changeset: r66024:5ce749f54baf Date: 2013-08-08 22:49 +0200 http://bitbucket.org/pypy/pypy/changeset/5ce749f54baf/ Log: Support the modulo op diff --git a/pypy/objspace/std/longobject.py b/pypy/objspace/std/longobject.py --- a/pypy/objspace/std/longobject.py +++ b/pypy/objspace/std/longobject.py @@ -237,6 +237,14 @@ space.wrap("long division or modulo by zero")) return newlong(space, z) +def mod__Long_Int(space, w_long1, w_int2): + try: + z = w_long1.num.int_mod(w_int2.intval) + except ZeroDivisionError: + raise OperationError(space.w_ZeroDivisionError, + space.wrap("long division or modulo by zero")) + return newlong(space, z) + def divmod__Long_Long(space, w_long1, w_long2): try: div, mod = w_long1.num.divmod(w_long2.num) diff --git a/rpython/rlib/rbigint.py b/rpython/rlib/rbigint.py --- a/rpython/rlib/rbigint.py +++ b/rpython/rlib/rbigint.py @@ -755,6 +755,48 @@ return mod @jit.elidable + def int_mod(self, other): + if self.sign == 0: + return NULLRBIGINT + + elif not int_in_valid_range(other): + # Fallback to long. + return self.mod(rbigint.fromint(other)) + + elif other != 0: + digit = abs(other) + if digit == 1: + return NULLRBIGINT + elif digit == 2: + modm = self.digit(0) & 1 + if modm: + return ONENEGATIVERBIGINT if other < 0 else ONERBIGINT + return NULLRBIGINT + elif digit & (digit - 1) == 0: + mod = self.and_(rbigint([_store_digit(digit - 1)], 1, 1)) + else: + # Perform + size = self.numdigits() - 1 + if size > 0: + rem = self.widedigit(size) + size -= 1 + while size >= 0: + rem = ((rem << SHIFT) + self.widedigit(size)) % digit + size -= 1 + else: + rem = self.digit(0) % digit + + if rem == 0: + return NULLRBIGINT + mod = rbigint([_store_digit(rem)], -1 if self.sign < 0 else 1, 1) + else: + raise ZeroDivisionError("long division or modulo by zero") + + if mod.sign * (-1 if other < 0 else 1) == -1: + mod = mod.int_add(other) + return mod + + @jit.elidable def divmod(v, w): """ The / and % operators are now defined in terms of divmod(). diff --git a/rpython/rlib/test/test_rbigint.py b/rpython/rlib/test/test_rbigint.py --- a/rpython/rlib/test/test_rbigint.py +++ b/rpython/rlib/test/test_rbigint.py @@ -106,6 +106,15 @@ print op1, op2 assert r1.tolong() == r2 + def test_int_mod(self): + for op1 in [-50, -12, -2, -1, 1, 2, 50, 52]: + for op2 in [-4, -2, -1, 1, 2, 8]: + rl_op1 = rbigint.fromint(op1) + r1 = rl_op1.int_mod(op2) + r2 = op1 % op2 + print op1, op2 + assert r1.tolong() == r2 + def test_pow(self): for op1 in [-50, -12, -2, -1, 1, 2, 50, 52]: for op2 in [0, 1, 2, 8, 9, 10, 11]: From noreply at buildbot.pypy.org Fri Aug 9 00:11:46 2013 From: noreply at buildbot.pypy.org (stian) Date: Fri, 9 Aug 2013 00:11:46 +0200 (CEST) Subject: [pypy-commit] pypy bigint-with-int-ops: Support Long Int compare ops Message-ID: <20130808221146.6BC2D1C00F4@cobra.cs.uni-duesseldorf.de> Author: stian Branch: bigint-with-int-ops Changeset: r66025:7500e0cc59b1 Date: 2013-08-09 00:10 +0200 http://bitbucket.org/pypy/pypy/changeset/7500e0cc59b1/ Log: Support Long Int compare ops diff --git a/pypy/objspace/std/longobject.py b/pypy/objspace/std/longobject.py --- a/pypy/objspace/std/longobject.py +++ b/pypy/objspace/std/longobject.py @@ -155,17 +155,17 @@ return space.newbool(w_long1.num.ge(w_long2.num)) def lt__Long_Int(space, w_long1, w_int2): - return space.newbool(w_long1.num.lt(rbigint.fromint(w_int2.intval))) + return space.newbool(w_long1.num.int_lt(w_int2.intval)) def le__Long_Int(space, w_long1, w_int2): - return space.newbool(w_long1.num.le(rbigint.fromint(w_int2.intval))) + return space.newbool(w_long1.num.int_le(w_int2.intval)) def eq__Long_Int(space, w_long1, w_int2): - return space.newbool(w_long1.num.eq(rbigint.fromint(w_int2.intval))) + return space.newbool(w_long1.num.int_eq(w_int2.intval)) def ne__Long_Int(space, w_long1, w_int2): - return space.newbool(w_long1.num.ne(rbigint.fromint(w_int2.intval))) + return space.newbool(w_long1.num.int_ne(w_int2.intval)) def gt__Long_Int(space, w_long1, w_int2): - return space.newbool(w_long1.num.gt(rbigint.fromint(w_int2.intval))) + return space.newbool(w_long1.num.int_gt(w_int2.intval)) def ge__Long_Int(space, w_long1, w_int2): - return space.newbool(w_long1.num.ge(rbigint.fromint(w_int2.intval))) + return space.newbool(w_long1.num.int_ge(w_int2.intval)) def lt__Int_Long(space, w_int1, w_long2): return space.newbool(rbigint.fromint(w_int1.intval).lt(w_long2.num)) diff --git a/rpython/rlib/rbigint.py b/rpython/rlib/rbigint.py --- a/rpython/rlib/rbigint.py +++ b/rpython/rlib/rbigint.py @@ -499,10 +499,27 @@ i += 1 return True + @jit.elidable + def int_eq(self, other): + """ eq with int """ + + if not int_in_valid_range(other): + # Fallback to Long. + return self.eq(rbigint.fromint(other)) + + if self.numdigits() > 1: + return False + + return (self.sign * self.digit(0)) == other + @jit.look_inside def ne(self, other): return not self.eq(other) + @jit.look_inside + def int_ne(self, other): + return not self.int_eq(other) + @jit.elidable def lt(self, other): if self.sign > other.sign: @@ -538,18 +555,67 @@ i -= 1 return False + @jit.elidable + def int_lt(self, other): + """ lt where other is an int """ + + if not int_in_valid_range(other): + # Fallback to Long. + return self.lt(rbigint.fromint(other)) + + osign = 1 + if other == 0: + osign = 0 + elif other < 0: + osign = -1 + + if self.sign > osign: + return False + elif self.sign < osign: + return True + + digits = self.numdigits() + + if digits > 1: + if osign == 1: + return False + else: + return True + + d1 = self.sign * self.digit(0) + if d1 < other: + return True + return False + @jit.look_inside def le(self, other): return not other.lt(self) @jit.look_inside + def int_le(self, other): + # Alternative that might be faster, reimplant this. as a check with other + 1. But we got to check for overflow + # or reduce valid range. + + if self.int_eq(other): + return True + return self.int_lt(other) + + @jit.look_inside def gt(self, other): return other.lt(self) @jit.look_inside + def int_gt(self, other): + return not self.int_le(other) + + @jit.look_inside def ge(self, other): return not self.lt(other) + @jit.look_inside + def int_ge(self, other): + return not self.int_lt(other) + @jit.elidable def hash(self): return _hash(self) diff --git a/rpython/rlib/test/test_rbigint.py b/rpython/rlib/test/test_rbigint.py --- a/rpython/rlib/test/test_rbigint.py +++ b/rpython/rlib/test/test_rbigint.py @@ -375,6 +375,13 @@ f2 = rbigint.fromlong(y) assert (x < y) == f1.lt(f2) + def test_int_lt(self): + val = [0, 0x11111111, 0x11111112] + for x in gen_signs(val): + for y in gen_signs(val): + f1 = rbigint.fromlong(x) + assert (x < y) == f1.int_lt(y) + def test_order(self): f6 = rbigint.fromint(6) f7 = rbigint.fromint(7) @@ -383,6 +390,14 @@ assert (f6.gt(f6), f6.gt(f7), f7.gt(f6)) == (0,0,1) assert (f6.ge(f6), f6.ge(f7), f7.ge(f6)) == (1,0,1) + def test_int_order(self): + f6 = rbigint.fromint(6) + f7 = rbigint.fromint(7) + assert (f6.int_lt(6), f6.int_lt(7), f7.int_lt(6)) == (0,1,0) + assert (f6.int_le(6), f6.int_le(7), f7.int_le(6)) == (1,1,0) + assert (f6.int_gt(6), f6.int_gt(7), f7.int_gt(6)) == (0,0,1) + assert (f6.int_ge(6), f6.int_ge(7), f7.int_ge(6)) == (1,0,1) + def test_int_conversion(self): f1 = rbigint.fromlong(12332) f2 = rbigint.fromint(12332) From noreply at buildbot.pypy.org Fri Aug 9 02:33:53 2013 From: noreply at buildbot.pypy.org (stian) Date: Fri, 9 Aug 2013 02:33:53 +0200 (CEST) Subject: [pypy-commit] pypy bigint-with-int-ops: Support the binary xor/or/and ops. Support Long Int compare. Message-ID: <20130809003353.11A4E1C134C@cobra.cs.uni-duesseldorf.de> Author: stian Branch: bigint-with-int-ops Changeset: r66026:f9a280100f28 Date: 2013-08-09 02:33 +0200 http://bitbucket.org/pypy/pypy/changeset/f9a280100f28/ Log: Support the binary xor/or/and ops. Support Long Int compare. - pidigits improve performance by 12%. - INT.__rsub__(LONG) doesn't return NotImplanted anymore (causes an objectspace test to fail) - lib-python tests pass, test_rbigint pass. diff --git a/pypy/objspace/std/longobject.py b/pypy/objspace/std/longobject.py --- a/pypy/objspace/std/longobject.py +++ b/pypy/objspace/std/longobject.py @@ -168,17 +168,17 @@ return space.newbool(w_long1.num.int_ge(w_int2.intval)) def lt__Int_Long(space, w_int1, w_long2): - return space.newbool(rbigint.fromint(w_int1.intval).lt(w_long2.num)) + return space.newbool(w_long2.num.int_gt(w_int1.intval)) def le__Int_Long(space, w_int1, w_long2): - return space.newbool(rbigint.fromint(w_int1.intval).le(w_long2.num)) + return space.newbool(w_long2.num.int_ge(w_int1.intval)) def eq__Int_Long(space, w_int1, w_long2): - return space.newbool(rbigint.fromint(w_int1.intval).eq(w_long2.num)) + return space.newbool(w_long2.num.int_eq(w_int1.intval)) def ne__Int_Long(space, w_int1, w_long2): - return space.newbool(rbigint.fromint(w_int1.intval).ne(w_long2.num)) + return space.newbool(w_long2.num.int_ne(w_int1.intval)) def gt__Int_Long(space, w_int1, w_long2): - return space.newbool(rbigint.fromint(w_int1.intval).gt(w_long2.num)) + return space.newbool(w_long2.num.int_lt(w_int1.intval)) def ge__Int_Long(space, w_int1, w_long2): - return space.newbool(rbigint.fromint(w_int1.intval).ge(w_long2.num)) + return space.newbool(w_long2.num.int_le(w_int1.intval)) def hash__Long(space, w_value): @@ -333,12 +333,21 @@ def and__Long_Long(space, w_long1, w_long2): return newlong(space, w_long1.num.and_(w_long2.num)) +def and__Long_Int(space, w_long1, w_int2): + return newlong(space, w_long1.num.int_and_(w_int2.intval)) + def xor__Long_Long(space, w_long1, w_long2): return W_LongObject(w_long1.num.xor(w_long2.num)) +def xor__Long_Int(space, w_long1, w_int2): + return W_LongObject(w_long1.num.int_xor(w_int2.intval)) + def or__Long_Long(space, w_long1, w_long2): return W_LongObject(w_long1.num.or_(w_long2.num)) +def or__Long_Int(space, w_long1, w_int2): + return W_LongObject(w_long1.num.int_or_(w_int2.intval)) + def oct__Long(space, w_long1): return space.wrap(w_long1.num.oct()) @@ -356,8 +365,22 @@ return (space.config.objspace.std.withsmalllong and sys.maxint == 2147483647) -# binary ops -for opname in ['add', 'sub', 'mul', 'div', 'floordiv', 'truediv', 'mod', 'divmod', 'lshift']: +# binary ops with fast way to handle ints. +for opname in ['add', 'sub', 'mul', 'mod', 'lshift']: + exec compile(""" +def %(opname)s_ovr__Int_Int(space, w_int1, w_int2): + if recover_with_smalllong(space) and %(opname)r != 'truediv': + from pypy.objspace.std.smalllongobject import %(opname)s_ovr + return %(opname)s_ovr(space, w_int1, w_int2) + w_long1 = delegate_Int2Long(space, w_int1) + return %(opname)s__Long_Int(space, w_long1, w_int2) +""" % {'opname': opname}, '', 'exec') + + getattr(model.MM, opname).register(globals()['%s_ovr__Int_Int' % opname], + W_IntObject, W_IntObject, order=1) + +# binary ops without fast way to handle ints. +for opname in ['div', 'floordiv', 'truediv', 'divmod']: exec compile(""" def %(opname)s_ovr__Int_Int(space, w_int1, w_int2): if recover_with_smalllong(space) and %(opname)r != 'truediv': diff --git a/rpython/rlib/rbigint.py b/rpython/rlib/rbigint.py --- a/rpython/rlib/rbigint.py +++ b/rpython/rlib/rbigint.py @@ -61,6 +61,8 @@ return False return True +int_in_valid_range._always_inline_ = True + # Debugging digit array access. # # False == no checking at all @@ -656,9 +658,9 @@ def sub(self, other): if other.sign == 0: return self - if self.sign == 0: + elif self.sign == 0: return rbigint(other._digits[:other.size], -other.sign, other.size) - if self.sign == other.sign: + elif self.sign == other.sign: result = _x_sub(self, other) else: result = _x_add(self, other) @@ -732,13 +734,13 @@ # Fallback to long. return self.mul(rbigint.fromint(b)) + if self.sign == 0 or b == 0: + return NULLRBIGINT + asize = self.numdigits() digit = abs(b) bsign = -1 if b < 0 else 1 - if self.sign == 0 or b == 0: - return NULLRBIGINT - if digit == 1: return rbigint(self._digits[:self.size], self.sign * bsign, asize) elif asize == 1: @@ -775,7 +777,7 @@ if mod.sign * other.sign == -1: if div.sign == 0: return ONENEGATIVERBIGINT - div = div.sub(ONERBIGINT) + div = div.int_sub(1) return div @@ -798,7 +800,7 @@ return ONENEGATIVERBIGINT if other.sign == -1 else ONERBIGINT return NULLRBIGINT elif digit & (digit - 1) == 0: - mod = self.and_(rbigint([_store_digit(digit - 1)], 1, 1)) + mod = self.int_and_(digit - 1) else: # Perform size = self.numdigits() - 1 @@ -839,7 +841,7 @@ return ONENEGATIVERBIGINT if other < 0 else ONERBIGINT return NULLRBIGINT elif digit & (digit - 1) == 0: - mod = self.and_(rbigint([_store_digit(digit - 1)], 1, 1)) + mod = self.int_and_(digit - 1) else: # Perform size = self.numdigits() - 1 @@ -885,7 +887,7 @@ mod = mod.add(w) if div.sign == 0: return ONENEGATIVERBIGINT, mod - div = div.sub(ONERBIGINT) + div = div.int_sub(1) return div, mod @jit.elidable @@ -1037,7 +1039,7 @@ if self.sign == 0: return ONENEGATIVERBIGINT - ret = self.add(ONERBIGINT) + ret = self.int_add(1) ret.sign = -ret.sign return ret @@ -1135,14 +1137,26 @@ return _bitwise(self, '&', other) @jit.elidable + def int_and_(self, other): + return _int_bitwise(self, '&', other) + + @jit.elidable def xor(self, other): return _bitwise(self, '^', other) @jit.elidable + def int_xor(self, other): + return _int_bitwise(self, '^', other) + + @jit.elidable def or_(self, other): return _bitwise(self, '|', other) @jit.elidable + def int_or_(self, other): + return _int_bitwise(self, '|', other) + + @jit.elidable def oct(self): if self.sign == 0: return '0L' @@ -2496,6 +2510,89 @@ return z.invert() _bitwise._annspecialcase_ = "specialize:arg(1)" +def _int_bitwise(a, op, b): # '&', '|', '^' + """ Bitwise and/or/xor operations """ + + if not int_in_valid_range(b): + # Fallback to long. + return _bitwise(a, op, rbigint.fromint(b)) + + if a.sign < 0: + a = a.invert() + maska = MASK + else: + maska = 0 + if b < 0: + b = ~b + maskb = MASK + else: + maskb = 0 + + negz = 0 + if op == '^': + if maska != maskb: + maska ^= MASK + negz = -1 + elif op == '&': + if maska and maskb: + op = '|' + maska ^= MASK + maskb ^= MASK + negz = -1 + elif op == '|': + if maska or maskb: + op = '&' + maska ^= MASK + maskb ^= MASK + negz = -1 + + # JRH: The original logic here was to allocate the result value (z) + # as the longer of the two operands. However, there are some cases + # where the result is guaranteed to be shorter than that: AND of two + # positives, OR of two negatives: use the shorter number. AND with + # mixed signs: use the positive number. OR with mixed signs: use the + # negative number. After the transformations above, op will be '&' + # iff one of these cases applies, and mask will be non-0 for operands + # whose length should be ignored. + + size_a = a.numdigits() + if op == '&': + if maska: + size_z = 1 + else: + if maskb: + size_z = size_a + else: + size_z = 1 + else: + size_z = size_a + + z = rbigint([NULLDIGIT] * size_z, 1, size_z) + i = 0 + while i < size_z: + if i < size_a: + diga = a.digit(i) ^ maska + else: + diga = maska + if i == 0: + digb = b ^ maskb + else: + digb = maskb + + if op == '&': + z.setdigit(i, diga & digb) + elif op == '|': + z.setdigit(i, diga | digb) + elif op == '^': + z.setdigit(i, diga ^ digb) + i += 1 + + z._normalize() + if negz == 0: + return z + + return z.invert() +_int_bitwise._annspecialcase_ = "specialize:arg(1)" ULONGLONG_BOUND = r_ulonglong(1L << (r_longlong.BITS-1)) LONGLONG_MIN = r_longlong(-(1L << (r_longlong.BITS-1))) diff --git a/rpython/rlib/test/test_rbigint.py b/rpython/rlib/test/test_rbigint.py --- a/rpython/rlib/test/test_rbigint.py +++ b/rpython/rlib/test/test_rbigint.py @@ -550,6 +550,15 @@ res2 = getattr(operator, mod)(x, y) assert res1 == res2 + def test_int_bitwise(self): + for x in gen_signs([0, 1, 5, 11, 42, 43, 2 ** 30]): + for y in gen_signs([0, 1, 5, 11, 42, 43, 3 ** 30, 2 ** 31]): + lx = rbigint.fromlong(x) + for mod in "xor and_ or_".split(): + res1 = getattr(lx, 'int_' + mod)(y).tolong() + res2 = getattr(operator, mod)(x, y) + assert res1 == res2 + def test_mul_eq_shift(self): p2 = rbigint.fromlong(1).lshift(63) f1 = rbigint.fromlong(0).lshift(63) From noreply at buildbot.pypy.org Fri Aug 9 03:14:42 2013 From: noreply at buildbot.pypy.org (andrewchambers) Date: Fri, 9 Aug 2013 03:14:42 +0200 (CEST) Subject: [pypy-commit] pypy incremental-gc: adding debug checks Message-ID: <20130809011442.DA3DE1C00F4@cobra.cs.uni-duesseldorf.de> Author: Andrew Chambers Branch: incremental-gc Changeset: r66027:ced33fb96251 Date: 2013-08-09 13:13 +1200 http://bitbucket.org/pypy/pypy/changeset/ced33fb96251/ Log: adding debug checks diff --git a/rpython/memory/gc/incminimark.py b/rpython/memory/gc/incminimark.py --- a/rpython/memory/gc/incminimark.py +++ b/rpython/memory/gc/incminimark.py @@ -1003,14 +1003,88 @@ (self.card_page_shift + 3))) def debug_check_consistency(self): + if self.DEBUG: - ll_assert(not self.young_rawmalloced_objects, - "young raw-malloced objects in a major collection") - ll_assert(not self.young_objects_with_weakrefs.non_empty(), - "young objects with weakrefs in a major collection") - MovingGCBase.debug_check_consistency(self) + + # somewhat of a hack + # some states require custom prep and cleanup + # before calling the check_object functions + already_checked = False + + if self.gc_state == STATE_SCANNING: + # We are just starting a scan. Same as a non incremental here. + ll_assert(not self.young_rawmalloced_objects, + "young raw-malloced objects in a major collection") + ll_assert(not self.young_objects_with_weakrefs.non_empty(), + "young objects with weakrefs in a major collection") + elif self.gc_state == STATE_MARKING: + self._debug_objects_to_trace_dict = \ + self.objects_to_trace.stack2dict() + MovingGCBase.debug_check_consistency(self) + self._debug_objects_to_trace_dict.delete() + already_checked = True + elif self.gc_state == STATE_SWEEPING_RAWMALLOC: + pass + elif self.gc_state == STATE_SWEEPING_ARENA: + pass + elif self.gc_state == STATE_FINALIZING: + pass + else: + ll_assert(False,"uknown gc_state value") + + if not already_checked: + MovingGCBase.debug_check_consistency(self) + def debug_check_object(self, obj): + + ll_assert((self.header(obj).tid & GCFLAG_GRAY != 0 + and self.header(obj).tid & GCFLAG_VISITED != 0) == False, + "object gray and visited at the same time." ) + + if self.gc_state == STATE_SCANNING: + self._debug_check_object_scanning(obj) + elif self.gc_state == STATE_MARKING: + self._debug_check_object_marking(obj) + elif self.gc_state == STATE_SWEEPING_RAWMALLOC: + self._debug_check_object_sweeping_rawmalloc(obj) + elif self.gc_state == STATE_SWEEPING_ARENA: + self._debug_check_object_sweeping_arena(obj) + elif self.gc_state == STATE_FINALIZING: + self._debug_check_object_finalizing(obj) + else: + ll_assert(False,"uknown gc_state value") + + def _debug_check_object_marking(self, obj): + if self.header(obj).tid & GCFLAG_VISITED != 0: + # Visited, should NEVER point to a white object. + self.trace(obj,self._debug_check_not_white,None) + + if self.header(obj).tid & GCFLAG_GRAY != 0: + ll_assert(self._debug_objects_to_trace_dict.contains(obj), + "gray object not in pending trace list.") + else: + ll_assert(not self._debug_objects_to_trace_dict.contains(obj), + "non gray object in pending trace list.") + + def _debug_check_not_white(self, root, ignored): + obj = root.address[0] + ll_assert(self.header(obj).tid & (GCFLAG_GRAY | GCFLAG_VISITED) != 0, + "visited object points to unprocessed (white) object." ) + + def _debug_check_object_sweeping_rawmalloc(self, obj): + pass + + def _debug_check_object_sweeping_arena(self, obj): + pass + + def _debug_check_object_finalizing(self,obj): + pass + + def _debug_check_object_scanning(self, obj): + # This check is called before scanning starts. + # scanning is done in a single step. + # after a minor or major collection, no object should be in the nursery ll_assert(not self.is_in_nursery(obj), "object in nursery after collection") @@ -1024,7 +1098,7 @@ ll_assert(self.header(obj).tid & GCFLAG_VISITED == 0, "unexpected GCFLAG_VISITED") - # the GCFLAG_VISITED should never be set at the start of a collection + # the GCFLAG_GRAY should never be set at the start of a collection ll_assert(self.header(obj).tid & GCFLAG_GRAY == 0, "unexpected GCFLAG_GRAY") @@ -1650,6 +1724,7 @@ # Debugging checks ll_assert(self.nursery_free == self.nursery, "nursery not empty in major_collection_step()") + self.debug_check_consistency() # XXX currently very course increments, get this working then split @@ -1662,9 +1737,9 @@ self.gc_state = STATE_MARKING #END SCANNING elif self.gc_state == STATE_MARKING: - # XXX need a heuristic to tell how many objects to mark. # Maybe based on previous mark time average + self.debug_check_consistency() self.visit_all_objects_step(1) # XXX A simplifying assumption that should be checked, @@ -1692,12 +1767,12 @@ # # Walk all rawmalloced objects and free the ones that don't # have the GCFLAG_VISITED flag. - # XXX heuristic here? + # XXX heuristic here to decide nobjects. if self.free_unvisited_rawmalloc_objects_step(1): + #malloc objects freed self.gc_state = STATE_SWEEPING_ARENA elif self.gc_state == STATE_SWEEPING_ARENA: - # # Ask the ArenaCollection to visit all objects. Free the ones # that have not been visited above, and reset GCFLAG_VISITED on @@ -1761,9 +1836,8 @@ # We start in scanning state ll_assert(self.gc_state == STATE_SCANNING, "Scan start state incorrect") - self.debug_check_consistency() self.major_collection_step(reserving_size) - ll_assert(self.gc_state == STATE_MARKING, "Initial Scan did not complete") + ll_assert(self.gc_state == STATE_MARKING, "initial scan did not complete") while self.gc_state != STATE_SCANNING: self.major_collection_step(reserving_size) @@ -1858,7 +1932,11 @@ self.objects_to_trace.append(obj) def _collect_ref_rec(self, root, ignored): - self.objects_to_trace.append(root.address[0]) + obj = root.address[0] + if self.header(obj).tid & GCFLAG_VISITED != 0: + return + self.header(obj).tid |= GCFLAG_GRAY + self.objects_to_trace.append(obj) def visit_all_objects(self): pending = self.objects_to_trace @@ -1867,10 +1945,12 @@ self.visit(obj) def visit_all_objects_step(self,nobjects=1): - # Objects can be added to pending by visit_step + # Objects can be added to pending by visit pending = self.objects_to_trace while nobjects > 0 and pending.non_empty(): obj = pending.pop() + ll_assert(self.header(obj).tid & GCFLAG_GRAY == 0, + "non gray object being traced") self.visit(obj) nobjects -= 1 @@ -1891,8 +1971,9 @@ # # It's the first time. We set the flag. hdr.tid |= GCFLAG_VISITED - #visited objects are no longer grey + # visited objects are no longer grey hdr.tid &= ~GCFLAG_GRAY + if not self.has_gcptr(llop.extract_ushort(llgroup.HALFWORD, hdr.tid)): return # From noreply at buildbot.pypy.org Fri Aug 9 07:21:24 2013 From: noreply at buildbot.pypy.org (andrewchambers) Date: Fri, 9 Aug 2013 07:21:24 +0200 (CEST) Subject: [pypy-commit] pypy incremental-gc: fixed tests Message-ID: <20130809052124.AA5171C3235@cobra.cs.uni-duesseldorf.de> Author: Andrew Chambers Branch: incremental-gc Changeset: r66028:40efb4fdcb00 Date: 2013-08-09 16:36 +1200 http://bitbucket.org/pypy/pypy/changeset/40efb4fdcb00/ Log: fixed tests diff --git a/rpython/memory/gc/incminimark.py b/rpython/memory/gc/incminimark.py --- a/rpython/memory/gc/incminimark.py +++ b/rpython/memory/gc/incminimark.py @@ -1064,8 +1064,11 @@ ll_assert(self._debug_objects_to_trace_dict.contains(obj), "gray object not in pending trace list.") else: - ll_assert(not self._debug_objects_to_trace_dict.contains(obj), - "non gray object in pending trace list.") + #if not gray and not black + if self.header(obj).tid & GCFLAG_VISITED == 0: + if self.header(obj).tid & GCFLAG_NO_HEAP_PTRS == 0: + ll_assert(not self._debug_objects_to_trace_dict.contains(obj), + "white object in pending trace list.") def _debug_check_not_white(self, root, ignored): obj = root.address[0] @@ -1739,7 +1742,6 @@ elif self.gc_state == STATE_MARKING: # XXX need a heuristic to tell how many objects to mark. # Maybe based on previous mark time average - self.debug_check_consistency() self.visit_all_objects_step(1) # XXX A simplifying assumption that should be checked, @@ -1747,7 +1749,6 @@ # they do not need a seperate state and do not need to be # made incremental. if not self.objects_to_trace.non_empty(): - if self.objects_with_finalizers.non_empty(): self.deal_with_objects_with_finalizers() @@ -1761,6 +1762,7 @@ self.deal_with_old_objects_with_finalizers() #objects_to_trace processed fully, can move on to sweeping self.gc_state = STATE_SWEEPING_RAWMALLOC + #prepare for the next state self.start_free_rawmalloc_objects() #END MARKING elif self.gc_state == STATE_SWEEPING_RAWMALLOC: @@ -1819,8 +1821,9 @@ # so should we make the calling incremental? or leave as is # Must be ready to start another scan + # just in case finalizer calls collect again. self.gc_state = STATE_SCANNING - # just in case finalizer calls collect again. + self.execute_finalizers() self.num_major_collects += 1 #END FINALIZING @@ -1846,19 +1849,19 @@ size_gc_header = self.gcheaderbuilder.size_gc_header obj = hdr + size_gc_header if self.header(obj).tid & GCFLAG_VISITED: - self.header(obj).tid &= ~GCFLAG_VISITED + self.header(obj).tid &= ~(GCFLAG_VISITED|GCFLAG_GRAY) return False # survives return True # dies def _reset_gcflag_visited(self, obj, ignored): - self.header(obj).tid &= ~GCFLAG_VISITED + self.header(obj).tid &= ~(GCFLAG_VISITED|GCFLAG_GRAY) def _set_gcflag_gray(self, obj, ignored): self.header(obj).tid |= GCFLAG_GRAY def free_rawmalloced_object_if_unvisited(self, obj): if self.header(obj).tid & GCFLAG_VISITED: - self.header(obj).tid &= ~GCFLAG_VISITED # survives + self.header(obj).tid &= ~(GCFLAG_VISITED|GCFLAG_GRAY) # survives self.old_rawmalloced_objects.append(obj) else: size_gc_header = self.gcheaderbuilder.size_gc_header @@ -1949,8 +1952,9 @@ pending = self.objects_to_trace while nobjects > 0 and pending.non_empty(): obj = pending.pop() - ll_assert(self.header(obj).tid & GCFLAG_GRAY == 0, - "non gray object being traced") + ll_assert(self.header(obj).tid & + (GCFLAG_GRAY|GCFLAG_VISITED|GCFLAG_NO_HEAP_PTRS) != 0, + "non gray or black object being traced") self.visit(obj) nobjects -= 1 @@ -1966,13 +1970,14 @@ # and the GCFLAG_VISITED will be reset at the end of the # collection. hdr = self.header(obj) + # visited objects are no longer grey + hdr.tid &= ~GCFLAG_GRAY if hdr.tid & (GCFLAG_VISITED | GCFLAG_NO_HEAP_PTRS): return # # It's the first time. We set the flag. hdr.tid |= GCFLAG_VISITED - # visited objects are no longer grey - hdr.tid &= ~GCFLAG_GRAY + if not self.has_gcptr(llop.extract_ushort(llgroup.HALFWORD, hdr.tid)): return From noreply at buildbot.pypy.org Fri Aug 9 10:48:02 2013 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 9 Aug 2013 10:48:02 +0200 (CEST) Subject: [pypy-commit] pypy default: Test was skipped on *both* ootype and lltype. Message-ID: <20130809084802.B69101C2442@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r66029:cad523cf2230 Date: 2013-08-09 10:44 +0200 http://bitbucket.org/pypy/pypy/changeset/cad523cf2230/ Log: Test was skipped on *both* ootype and lltype. diff --git a/rpython/rtyper/test/test_rlist.py b/rpython/rtyper/test/test_rlist.py --- a/rpython/rtyper/test/test_rlist.py +++ b/rpython/rtyper/test/test_rlist.py @@ -190,7 +190,7 @@ return True -class BaseTestRlist(BaseRtypingTest): +class TestRlist(BaseRtypingTest): type_system = 'lltype' rlist = ll_rlist From noreply at buildbot.pypy.org Fri Aug 9 10:48:04 2013 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 9 Aug 2013 10:48:04 +0200 (CEST) Subject: [pypy-commit] pypy default: merge heads Message-ID: <20130809084804.33F8A1C300E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r66030:3daccc98b602 Date: 2013-08-09 10:47 +0200 http://bitbucket.org/pypy/pypy/changeset/3daccc98b602/ Log: merge heads diff --git a/rpython/rtyper/test/test_rlist.py b/rpython/rtyper/test/test_rlist.py --- a/rpython/rtyper/test/test_rlist.py +++ b/rpython/rtyper/test/test_rlist.py @@ -190,7 +190,7 @@ return True -class BaseTestRlist(BaseRtypingTest): +class TestRlist(BaseRtypingTest): type_system = 'lltype' rlist = ll_rlist From noreply at buildbot.pypy.org Fri Aug 9 10:54:58 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Fri, 9 Aug 2013 10:54:58 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: test stm_write_barrier fastpaths Message-ID: <20130809085458.5A8ED1C2442@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r66031:af17370bd3cf Date: 2013-08-09 08:43 +0200 http://bitbucket.org/pypy/pypy/changeset/af17370bd3cf/ Log: test stm_write_barrier fastpaths diff --git a/rpython/jit/backend/llsupport/stmrewrite.py b/rpython/jit/backend/llsupport/stmrewrite.py --- a/rpython/jit/backend/llsupport/stmrewrite.py +++ b/rpython/jit/backend/llsupport/stmrewrite.py @@ -2,6 +2,7 @@ from rpython.jit.metainterp.resoperation import ResOperation, rop from rpython.jit.metainterp.history import BoxPtr, ConstPtr, ConstInt from rpython.rlib.objectmodel import specialize +from rpython.rlib.objectmodel import we_are_translated # # STM Support @@ -43,6 +44,11 @@ # overridden method from parent class # for op in operations: + if not we_are_translated(): + # only possible in tests: + if op.getopnum() in (rop.COND_CALL_STM_B,): + self.newops.append(op) + continue if op.getopnum() == rop.DEBUG_MERGE_POINT: continue if op.getopnum() == rop.INCREMENT_DEBUG_COUNTER: diff --git a/rpython/jit/backend/x86/test/test_stm_integration.py b/rpython/jit/backend/x86/test/test_stm_integration.py --- a/rpython/jit/backend/x86/test/test_stm_integration.py +++ b/rpython/jit/backend/x86/test/test_stm_integration.py @@ -168,11 +168,11 @@ def teardown_method(self, meth): rffi.aroundstate._cleanup_() - def assert_in(self, called_on, *args): + def assert_in(self, called_on, args): for i, ref in enumerate(args): assert rffi.cast_ptr_to_adr(ref) == called_on[i] - def assert_not_in(self, called_on, *args): + def assert_not_in(self, called_on, args): for ref in args: assert rffi.cast_ptr_to_adr(ref) not in called_on @@ -233,7 +233,7 @@ # fastpath assert not called_on else: - self.assert_in(called_on, sgcref) + self.assert_in(called_on, [sgcref]) # now add it to the read-cache and check # that it will never call the read_barrier @@ -243,7 +243,47 @@ self.cpu.execute_token(looptoken, sgcref) # not called: assert not called_on + + def test_write_barrier_fastpath(self): + cpu = self.cpu + cpu.setup_once() + PRIV_REV = rffi.cast(lltype.Signed, StmGC.PREBUILT_REVISION) + self.priv_rev_num[0] = PRIV_REV + called_on = cpu.gc_ll_descr.wb_called_on + + for rev in [PRIV_REV+4, PRIV_REV]: + cpu.gc_ll_descr.clear_barrier_lists() + assert not called_on + s = self.allocate_prebuilt_s() + sgcref = lltype.cast_opaque_ptr(llmemory.GCREF, s) + s.h_revision = rev + + p0 = BoxPtr() + operations = [ + ResOperation(rop.COND_CALL_STM_B, [p0], None, + descr=self.p2wd), + ResOperation(rop.FINISH, [p0], None, + descr=BasicFinalDescr(0)), + ] + inputargs = [p0] + looptoken = JitCellToken() + cpu.compile_loop(inputargs, operations, looptoken) + self.cpu.execute_token(looptoken, sgcref) + + # check if rev-fastpath worked + if rev == PRIV_REV: + # fastpath and WRITE_BARRIER not set + assert not called_on + else: + self.assert_in(called_on, [sgcref]) + + # now set WRITE_BARRIER -> always call slowpath + cpu.gc_ll_descr.clear_barrier_lists() + assert not called_on + s.h_tid |= StmGC.GCFLAG_WRITE_BARRIER + self.cpu.execute_token(looptoken, sgcref) + self.assert_in(called_on, [sgcref]) From noreply at buildbot.pypy.org Fri Aug 9 10:54:59 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Fri, 9 Aug 2013 10:54:59 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: test and fix ptr_eq Message-ID: <20130809085459.914CD1C2442@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r66032:0ca84e25723e Date: 2013-08-09 10:54 +0200 http://bitbucket.org/pypy/pypy/changeset/0ca84e25723e/ Log: test and fix ptr_eq diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -351,12 +351,14 @@ # eax has result if IS_X86_32: # ||val2|val1|retaddr|x||x|x|val2|val1| - mc.MOV_sr(7 * WORD, eax.value) - # ||result|val1|retaddr|x||x|x|val2|val1| + mc.ADD_ri(esp.value, 5 * WORD) + # ||val2|val1|retaddr| else: # ||val2|val1||retaddr|x|| - mc.MOV_sr(3 * WORD, eax.value) - # ||result|val1||retaddr|x|| + mc.ADD_ri(esp.value, WORD) + # ||val2|val1||retaddr| + mc.MOV_sr(2 * WORD, eax.value) + # ||result|val1|retaddr| # self._pop_all_regs_from_frame(mc, [], withfloats=False, callee_only=True) @@ -2218,18 +2220,28 @@ # the explicit MOV before it (CMP(a_base, b_base)) sl = X86_64_SCRATCH_REG.lowest8bits() mc.MOV(X86_64_SCRATCH_REG, a_base) - mc.CMP(X86_64_SCRATCH_REG, b_base) + if isinstance(b_base, ImmedLoc) \ + and rx86.fits_in_32bits(b_base.value): + mc.CMP_ri(X86_64_SCRATCH_REG.value, b_base.value) + elif not isinstance(b_base, ImmedLoc): + mc.CMP(X86_64_SCRATCH_REG, b_base) + else: + # imm64, need another temporary reg :( + mc.PUSH_r(eax.value) + mc.MOV_ri64(eax.value, b_base.value) + mc.CMP_rr(X86_64_SCRATCH_REG.value, eax.value) + mc.POP_r(eax.value) + # reverse flags: if p1==p2, set NZ mc.SET_ir(rx86.Conditions['Z'], sl.value) - mc.MOVZX8_rr(X86_64_SCRATCH_REG.value, sl.value) - # mc.TEST8_rr() without movzx8 - mc.TEST_rr(X86_64_SCRATCH_REG.value, X86_64_SCRATCH_REG.value) + mc.AND8_rr(sl.value, sl.value) mc.J_il8(rx86.Conditions['NZ'], 0) j_ok1 = mc.get_relative_pos() # a == 0 || b == 0 -> SET Z if isinstance(a_base, ImmedLoc): if a_base.getint() == 0: - # Z flag still set from above + # set Z flag: + mc.XOR(X86_64_SCRATCH_REG, X86_64_SCRATCH_REG) mc.JMP_l8(0) j_ok2 = mc.get_relative_pos() else: @@ -2264,7 +2276,7 @@ # result still on stack mc.POP_r(X86_64_SCRATCH_REG.value) # set flags: - mc.TEST(X86_64_SCRATCH_REG, X86_64_SCRATCH_REG) + mc.TEST_rr(X86_64_SCRATCH_REG.value, X86_64_SCRATCH_REG.value) # # END SLOWPATH # diff --git a/rpython/jit/backend/x86/test/test_stm_integration.py b/rpython/jit/backend/x86/test/test_stm_integration.py --- a/rpython/jit/backend/x86/test/test_stm_integration.py +++ b/rpython/jit/backend/x86/test/test_stm_integration.py @@ -3,7 +3,7 @@ from rpython.jit.metainterp.history import ResOperation, TargetToken,\ JitCellToken from rpython.jit.metainterp.history import (BoxInt, BoxPtr, ConstInt, - ConstPtr, Box, + ConstPtr, Box, Const, BasicFailDescr, BasicFinalDescr) from rpython.jit.backend.detect_cpu import getcpuclass from rpython.jit.backend.x86.arch import WORD @@ -20,6 +20,7 @@ GCDescrShadowstackDirect, BaseTestRegalloc) from rpython.jit.backend.llsupport import jitframe from rpython.memory.gc.stmgc import StmGC +import itertools import ctypes CPU = getcpuclass() @@ -96,6 +97,7 @@ self.llop1 = None self.rb_called_on = [] self.wb_called_on = [] + self.ptr_eq_called_on = [] self.stm = True def read_barrier(obj): @@ -125,19 +127,19 @@ self.generate_function('stm_try_inevitable', inevitable, [], RESULT=lltype.Void) - def ptr_eq(x, y): return x == y - def ptr_ne(x, y): return x != y + def ptr_eq(x, y): + self.ptr_eq_called_on.append((x, y)) + return x == y self.generate_function('stm_ptr_eq', ptr_eq, [llmemory.GCREF] * 2, RESULT=lltype.Bool) - self.generate_function('stm_ptr_ne', ptr_ne, [llmemory.GCREF] * 2, - RESULT=lltype.Bool) - + def get_malloc_slowpath_addr(self): return None - def clear_barrier_lists(self): + def clear_lists(self): self.rb_called_on[:] = [] self.wb_called_on[:] = [] + self.ptr_eq_called_on[:] = [] class TestGcStm(BaseTestRegalloc): @@ -209,7 +211,7 @@ self.priv_rev_num[0] = PRIV_REV called_on = cpu.gc_ll_descr.rb_called_on for rev in [PRIV_REV+4, PRIV_REV]: - cpu.gc_ll_descr.clear_barrier_lists() + cpu.gc_ll_descr.clear_lists() self.clear_read_cache() s = self.allocate_prebuilt_s() @@ -237,7 +239,7 @@ # now add it to the read-cache and check # that it will never call the read_barrier - cpu.gc_ll_descr.clear_barrier_lists() + cpu.gc_ll_descr.clear_lists() self.set_cache_item(sgcref) self.cpu.execute_token(looptoken, sgcref) @@ -252,8 +254,7 @@ called_on = cpu.gc_ll_descr.wb_called_on for rev in [PRIV_REV+4, PRIV_REV]: - cpu.gc_ll_descr.clear_barrier_lists() - assert not called_on + cpu.gc_ll_descr.clear_lists() s = self.allocate_prebuilt_s() sgcref = lltype.cast_opaque_ptr(llmemory.GCREF, s) @@ -279,11 +280,53 @@ self.assert_in(called_on, [sgcref]) # now set WRITE_BARRIER -> always call slowpath - cpu.gc_ll_descr.clear_barrier_lists() - assert not called_on + cpu.gc_ll_descr.clear_lists() s.h_tid |= StmGC.GCFLAG_WRITE_BARRIER self.cpu.execute_token(looptoken, sgcref) self.assert_in(called_on, [sgcref]) + def test_ptr_eq_fastpath(self): + cpu = self.cpu + cpu.setup_once() + called_on = cpu.gc_ll_descr.ptr_eq_called_on + + i0 = BoxInt() + sa, sb = (rffi.cast(llmemory.GCREF, self.allocate_prebuilt_s()), + rffi.cast(llmemory.GCREF, self.allocate_prebuilt_s())) + ss = [sa, sa, sb, sb, + lltype.nullptr(llmemory.GCREF.TO), + lltype.nullptr(llmemory.GCREF.TO), + ] + for s1, s2 in itertools.combinations(ss, 2): + ps = [BoxPtr(), BoxPtr(), + ConstPtr(s1), + ConstPtr(s2)] + for p1, p2 in itertools.combinations(ps, 2): + cpu.gc_ll_descr.clear_lists() + + operations = [ + ResOperation(rop.PTR_EQ, [p1, p2], i0), + ResOperation(rop.FINISH, [i0], None, + descr=BasicFinalDescr(0)), + ] + inputargs = [p for p in (p1, p2) if not isinstance(p, Const)] + looptoken = JitCellToken() + c_loop = cpu.compile_loop(inputargs, operations, looptoken) + args = [s for i, s in enumerate((s1, s2)) + if not isinstance((p1, p2)[i], Const)] + self.cpu.execute_token(looptoken, *args) + + a, b = s1, s2 + if isinstance(p1, Const): + s1 = p1.value + if isinstance(p2, Const): + s2 = p2.value + + if s1 == s2 or \ + rffi.cast(lltype.Signed, s1) == 0 or \ + rffi.cast(lltype.Signed, s2) == 0: + assert (s1, s2) not in called_on + else: + assert [(s1, s2)] == called_on From noreply at buildbot.pypy.org Fri Aug 9 10:57:20 2013 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 9 Aug 2013 10:57:20 +0200 (CEST) Subject: [pypy-commit] pypy default: Test and fix for an issue reported by krono on irc Message-ID: <20130809085720.41C1F1C2442@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r66033:8be5fec47a23 Date: 2013-08-09 10:56 +0200 http://bitbucket.org/pypy/pypy/changeset/8be5fec47a23/ Log: Test and fix for an issue reported by krono on irc diff --git a/rpython/rlib/jit.py b/rpython/rlib/jit.py --- a/rpython/rlib/jit.py +++ b/rpython/rlib/jit.py @@ -1003,6 +1003,7 @@ else: if condition: function(*args) +conditional_call._annenforceargs_ = [bool, None, None] class ConditionalCallEntry(ExtRegistryEntry): _about_ = _jit_conditional_call diff --git a/rpython/rlib/test/test_jit.py b/rpython/rlib/test/test_jit.py --- a/rpython/rlib/test/test_jit.py +++ b/rpython/rlib/test/test_jit.py @@ -3,10 +3,13 @@ from rpython.conftest import option from rpython.annotator.model import UnionError from rpython.rlib.jit import (hint, we_are_jitted, JitDriver, elidable_promote, - JitHintError, oopspec, isconstant) + JitHintError, oopspec, isconstant, conditional_call) from rpython.rlib.rarithmetic import r_uint from rpython.rtyper.test.tool import BaseRtypingTest from rpython.rtyper.lltypesystem import lltype +from rpython.translator.translator import TranslationContext +from rpython.rtyper.annlowlevel import MixLevelHelperAnnotator +from rpython.annotator import model as annmodel def test_oopspec(): @@ -247,3 +250,17 @@ # this used to fail on 64-bit, because r_uint == r_ulonglong myjitdriver = JitDriver(greens=['i1'], reds=[]) myjitdriver.jit_merge_point(i1=r_uint(42)) + + def test_conditional_call(self): + def g(): + pass + def f(n): + conditional_call(n >= 0, g) + def later(m): + conditional_call(m, g) + t = TranslationContext() + t.buildannotator().build_types(f, [int]) + t.buildrtyper().specialize() + mix = MixLevelHelperAnnotator(t.rtyper) + mix.getgraph(later, [annmodel.s_Bool], annmodel.s_None) + mix.finish() From noreply at buildbot.pypy.org Fri Aug 9 11:16:42 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Fri, 9 Aug 2013 11:16:42 +0200 (CEST) Subject: [pypy-commit] pypy default: re-enable tests that were mistakenly disabled during ootype removal Message-ID: <20130809091642.6FCA41C1524@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: Changeset: r66034:c6d6c00439b6 Date: 2013-08-09 10:16 +0100 http://bitbucket.org/pypy/pypy/changeset/c6d6c00439b6/ Log: re-enable tests that were mistakenly disabled during ootype removal diff --git a/rpython/rlib/test/test_rerased.py b/rpython/rlib/test/test_rerased.py --- a/rpython/rlib/test/test_rerased.py +++ b/rpython/rlib/test/test_rerased.py @@ -183,7 +183,7 @@ s = a.build_types(f, [int]) assert isinstance(s, annmodel.SomeInteger) -class BaseTestRErased(BaseRtypingTest): +class TestRErased(BaseRtypingTest): ERASED_TYPE = llmemory.GCREF UNERASED_TYPE = OBJECTPTR def castable(self, TO, var): diff --git a/rpython/rtyper/test/test_remptydict.py b/rpython/rtyper/test/test_remptydict.py --- a/rpython/rtyper/test/test_remptydict.py +++ b/rpython/rtyper/test/test_remptydict.py @@ -1,7 +1,7 @@ import py from rpython.rtyper.test.tool import BaseRtypingTest -class BaseTestRemptydict(BaseRtypingTest): +class TestRemptydict(BaseRtypingTest): def test_empty_dict(self): class A: pass diff --git a/rpython/rtyper/test/test_rweakref.py b/rpython/rtyper/test/test_rweakref.py --- a/rpython/rtyper/test/test_rweakref.py +++ b/rpython/rtyper/test/test_rweakref.py @@ -3,7 +3,7 @@ from rpython.rtyper.lltypesystem import lltype, llmemory from rpython.rtyper.test.tool import BaseRtypingTest -class BaseTestRweakref(BaseRtypingTest): +class TestRweakref(BaseRtypingTest): def test_weakref_simple(self): class A: From noreply at buildbot.pypy.org Fri Aug 9 11:29:58 2013 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 9 Aug 2013 11:29:58 +0200 (CEST) Subject: [pypy-commit] pypy default: Use a call_location specialization. Might really fix krono's issue. Message-ID: <20130809092958.D91C31C1524@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r66035:575f1677ec89 Date: 2013-08-09 11:29 +0200 http://bitbucket.org/pypy/pypy/changeset/575f1677ec89/ Log: Use a call_location specialization. Might really fix krono's issue. diff --git a/rpython/annotator/specialize.py b/rpython/annotator/specialize.py --- a/rpython/annotator/specialize.py +++ b/rpython/annotator/specialize.py @@ -379,4 +379,4 @@ def specialize_call_location(funcdesc, args_s, op): assert op is not None - return maybe_star_args(funcdesc, op, args_s) + return maybe_star_args(funcdesc, (op,), args_s) diff --git a/rpython/rlib/jit.py b/rpython/rlib/jit.py --- a/rpython/rlib/jit.py +++ b/rpython/rlib/jit.py @@ -996,14 +996,14 @@ def _jit_conditional_call(condition, function, *args): pass - at specialize.ll_and_arg(1) + at specialize.call_location() def conditional_call(condition, function, *args): if we_are_jitted(): _jit_conditional_call(condition, function, *args) else: if condition: function(*args) -conditional_call._annenforceargs_ = [bool, None, None] +conditional_call._always_inline_ = True class ConditionalCallEntry(ExtRegistryEntry): _about_ = _jit_conditional_call From noreply at buildbot.pypy.org Fri Aug 9 13:17:33 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Fri, 9 Aug 2013 13:17:33 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: in progress Message-ID: <20130809111733.874ED1C0170@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r66036:7ddcfef284da Date: 2013-08-09 13:16 +0200 http://bitbucket.org/pypy/pypy/changeset/7ddcfef284da/ Log: in progress diff --git a/rpython/jit/backend/llsupport/gc.py b/rpython/jit/backend/llsupport/gc.py --- a/rpython/jit/backend/llsupport/gc.py +++ b/rpython/jit/backend/llsupport/gc.py @@ -118,6 +118,8 @@ if we_are_translated(): # tests don't allow this op.setdescr(new_d) + else: + assert new_d is descr gcrefs_output_list.append(new_llref) def rewrite_assembler(self, cpu, operations, gcrefs_output_list): diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -1070,6 +1070,7 @@ assert self.cpu.gc_ll_descr.stm guard_opnum = guard_op.getopnum() self._stm_ptr_eq_fastpath(self.mc, arglocs, result_loc) + # p1==p2 -> NZ if guard_opnum == rop.GUARD_FALSE: # jump to failure-code if ptrs are equal self.implement_guard(guard_token, "NZ") diff --git a/rpython/jit/backend/x86/test/test_stm_integration.py b/rpython/jit/backend/x86/test/test_stm_integration.py --- a/rpython/jit/backend/x86/test/test_stm_integration.py +++ b/rpython/jit/backend/x86/test/test_stm_integration.py @@ -13,11 +13,10 @@ from rpython.jit.metainterp.executor import execute from rpython.jit.backend.test.runner_test import LLtypeBackendTest from rpython.jit.tool.oparser import parse -from rpython.rtyper.annlowlevel import llhelper, llhelper_args -from rpython.jit.backend.llsupport.gc import ( - GcRootMap_stm, BarrierDescr) +from rpython.rtyper.annlowlevel import llhelper +from rpython.jit.backend.llsupport.gc import BarrierDescr from rpython.jit.backend.llsupport.test.test_gc_integration import ( - GCDescrShadowstackDirect, BaseTestRegalloc) + GCDescrShadowstackDirect, BaseTestRegalloc, JitFrameDescrs) from rpython.jit.backend.llsupport import jitframe from rpython.memory.gc.stmgc import StmGC import itertools @@ -61,15 +60,19 @@ def jitframe_allocate(frame_info): + import sys frame = lltype.malloc(JITFRAME, frame_info.jfi_frame_depth, zero=True) + frame.h_tid = rffi.cast(lltype.Unsigned, + StmGC.GCFLAG_OLD|StmGC.GCFLAG_WRITE_BARRIER | 123) + frame.h_revision = rffi.cast(lltype.Signed, -sys.maxint) frame.jf_frame_info = frame_info return frame JITFRAME = lltype.GcStruct( 'JITFRAME', - ('h_tid', lltype.Signed), + ('h_tid', lltype.Unsigned), ('h_revision', lltype.Signed), - ('h_original', lltype.Signed), + ('h_original', lltype.Unsigned), ('jf_frame_info', lltype.Ptr(jitframe.JITFRAMEINFO)), ('jf_descr', llmemory.GCREF), ('jf_force_descr', llmemory.GCREF), @@ -84,9 +87,10 @@ ) JITFRAMEPTR = lltype.Ptr(JITFRAME) + class FakeGCHeaderBuilder: size_gc_header = WORD - + class GCDescrStm(GCDescrShadowstackDirect): def __init__(self): @@ -133,6 +137,25 @@ self.generate_function('stm_ptr_eq', ptr_eq, [llmemory.GCREF] * 2, RESULT=lltype.Bool) + def malloc_jitframe(self, frame_info): + """ Allocate a new frame, overwritten by tests + """ + frame = JITFRAME.allocate(frame_info) + self.frames.append(frame) + return frame + + def getframedescrs(self, cpu): + descrs = JitFrameDescrs() + descrs.arraydescr = cpu.arraydescrof(JITFRAME) + for name in ['jf_descr', 'jf_guard_exc', 'jf_force_descr', + 'jf_frame_info', 'jf_gcmap', 'jf_extra_stack_depth']: + setattr(descrs, name, cpu.fielddescrof(JITFRAME, name)) + descrs.jfi_frame_depth = cpu.fielddescrof(jitframe.JITFRAMEINFO, + 'jfi_frame_depth') + descrs.jfi_frame_size = cpu.fielddescrof(jitframe.JITFRAMEINFO, + 'jfi_frame_size') + return descrs + def get_malloc_slowpath_addr(self): return None @@ -291,6 +314,7 @@ called_on = cpu.gc_ll_descr.ptr_eq_called_on i0 = BoxInt() + i1 = BoxInt() sa, sb = (rffi.cast(llmemory.GCREF, self.allocate_prebuilt_s()), rffi.cast(llmemory.GCREF, self.allocate_prebuilt_s())) ss = [sa, sa, sb, sb, @@ -302,31 +326,65 @@ ConstPtr(s1), ConstPtr(s2)] for p1, p2 in itertools.combinations(ps, 2): - cpu.gc_ll_descr.clear_lists() - - operations = [ - ResOperation(rop.PTR_EQ, [p1, p2], i0), - ResOperation(rop.FINISH, [i0], None, - descr=BasicFinalDescr(0)), - ] - inputargs = [p for p in (p1, p2) if not isinstance(p, Const)] - looptoken = JitCellToken() - c_loop = cpu.compile_loop(inputargs, operations, looptoken) - args = [s for i, s in enumerate((s1, s2)) - if not isinstance((p1, p2)[i], Const)] - self.cpu.execute_token(looptoken, *args) + for guard in [None, rop.GUARD_TRUE, rop.GUARD_FALSE]: + cpu.gc_ll_descr.clear_lists() - a, b = s1, s2 - if isinstance(p1, Const): - s1 = p1.value - if isinstance(p2, Const): - s2 = p2.value + i = i0 + operations = [ResOperation(rop.PTR_EQ, [p1, p2], i0)] + if guard is not None: + gop = ResOperation(guard, [i0], None, + BasicFailDescr()) + gop.setfailargs([]) + operations.append(gop) + i = i1 + # finish must depend on result of ptr_eq if no guard + # is inbetween (otherwise ptr_eq gets deleted) + # if there is a guard, the result of ptr_eq must not + # be used after it again... -> i + operations.append( + ResOperation(rop.FINISH, [i], None, + descr=BasicFinalDescr()) + ) - if s1 == s2 or \ - rffi.cast(lltype.Signed, s1) == 0 or \ - rffi.cast(lltype.Signed, s2) == 0: - assert (s1, s2) not in called_on - else: - assert [(s1, s2)] == called_on + inputargs = [p for p in (p1, p2) if not isinstance(p, Const)] + looptoken = JitCellToken() + c_loop = cpu.compile_loop(inputargs + [i1], operations, looptoken) + print c_loop + args = [s for i, s in enumerate((s1, s2)) + if not isinstance((p1, p2)[i], Const)] + [1] + frame = self.cpu.execute_token(looptoken, *args) + frame = rffi.cast(JITFRAMEPTR, frame) + if frame.jf_descr is operations[-1].getdescr(): + guard_failed = False + else: + guard_failed = True + + a, b = s1, s2 + if isinstance(p1, Const): + s1 = p1.value + if isinstance(p2, Const): + s2 = p2.value + + if s1 == s2 or \ + rffi.cast(lltype.Signed, s1) == 0 or \ + rffi.cast(lltype.Signed, s2) == 0: + assert (s1, s2) not in called_on + else: + assert [(s1, s2)] == called_on + + if guard is not None: + if s1 == s2: + if guard == rop.GUARD_TRUE: + assert not guard_failed + else: + assert guard_failed + elif guard == rop.GUARD_FALSE: + assert not guard_failed + else: + assert guard_failed + + + + From noreply at buildbot.pypy.org Fri Aug 9 14:46:03 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Fri, 9 Aug 2013 14:46:03 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: fix something, still not working Message-ID: <20130809124603.413C51C1524@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r66037:9afebecca934 Date: 2013-08-09 14:44 +0200 http://bitbucket.org/pypy/pypy/changeset/9afebecca934/ Log: fix something, still not working diff --git a/rpython/jit/backend/arm/regalloc.py b/rpython/jit/backend/arm/regalloc.py --- a/rpython/jit/backend/arm/regalloc.py +++ b/rpython/jit/backend/arm/regalloc.py @@ -629,7 +629,7 @@ fail_descr = cast_instance_to_gcref(descr) # we know it does not move, but well fail_descr = rgc._make_sure_does_not_move(fail_descr) - fail_descr = rffi.cast(lltype.Signed, fail_descr) + fail_descr = rgc.cast_gcref_to_int(fail_descr) if op.numargs() == 1: loc = self.make_sure_var_in_reg(op.getarg(0)) locs = [loc, imm(fail_descr)] diff --git a/rpython/jit/backend/x86/regalloc.py b/rpython/jit/backend/x86/regalloc.py --- a/rpython/jit/backend/x86/regalloc.py +++ b/rpython/jit/backend/x86/regalloc.py @@ -366,10 +366,10 @@ # the frame is in ebp, but we have to point where in the frame is # the potential argument to FINISH descr = op.getdescr() - fail_descr = cast_instance_to_gcref(descr) + fail_descr = rgc.cast_instance_to_gcref(descr) # we know it does not move, but well fail_descr = rgc._make_sure_does_not_move(fail_descr) - fail_descr = rffi.cast(lltype.Signed, fail_descr) + fail_descr = rgc.cast_gcref_to_int(fail_descr) if op.numargs() == 1: loc = self.make_sure_var_in_reg(op.getarg(0)) locs = [loc, imm(fail_descr)] diff --git a/rpython/jit/backend/x86/test/test_stm_integration.py b/rpython/jit/backend/x86/test/test_stm_integration.py --- a/rpython/jit/backend/x86/test/test_stm_integration.py +++ b/rpython/jit/backend/x86/test/test_stm_integration.py @@ -19,7 +19,7 @@ GCDescrShadowstackDirect, BaseTestRegalloc, JitFrameDescrs) from rpython.jit.backend.llsupport import jitframe from rpython.memory.gc.stmgc import StmGC -import itertools +import itertools, sys import ctypes CPU = getcpuclass() @@ -58,13 +58,16 @@ # ____________________________________________________________ +def allocate_protected(TP, tid=123, n=1, zero=True): + obj = lltype.malloc(TP, n=n, zero=zero) + obj.h_tid = rffi.cast(lltype.Unsigned, + StmGC.GCFLAG_OLD|StmGC.GCFLAG_WRITE_BARRIER | tid) + obj.h_revision = rffi.cast(lltype.Signed, -sys.maxint) + return obj def jitframe_allocate(frame_info): - import sys - frame = lltype.malloc(JITFRAME, frame_info.jfi_frame_depth, zero=True) - frame.h_tid = rffi.cast(lltype.Unsigned, - StmGC.GCFLAG_OLD|StmGC.GCFLAG_WRITE_BARRIER | 123) - frame.h_revision = rffi.cast(lltype.Signed, -sys.maxint) + frame = allocate_protected(JITFRAME, + frame_info.jfi_frame_depth, zero=True) frame.jf_frame_info = frame_info return frame @@ -329,11 +332,14 @@ for guard in [None, rop.GUARD_TRUE, rop.GUARD_FALSE]: cpu.gc_ll_descr.clear_lists() + # BUILD OPERATIONS: i = i0 + guarddescr = BasicFailDescr() + finaldescr = BasicFinalDescr() operations = [ResOperation(rop.PTR_EQ, [p1, p2], i0)] if guard is not None: gop = ResOperation(guard, [i0], None, - BasicFailDescr()) + descr=guarddescr) gop.setfailargs([]) operations.append(gop) i = i1 @@ -343,22 +349,24 @@ # be used after it again... -> i operations.append( ResOperation(rop.FINISH, [i], None, - descr=BasicFinalDescr()) + descr=finaldescr) ) - - inputargs = [p for p in (p1, p2) if not isinstance(p, Const)] + + # COMPILE & EXECUTE LOOP: + inputargs = [p for p in (p1, p2) + if not isinstance(p, Const)] looptoken = JitCellToken() - c_loop = cpu.compile_loop(inputargs + [i1], operations, looptoken) + c_loop = cpu.compile_loop(inputargs + [i1], operations, + looptoken) print c_loop args = [s for i, s in enumerate((s1, s2)) if not isinstance((p1, p2)[i], Const)] + [1] + frame = self.cpu.execute_token(looptoken, *args) frame = rffi.cast(JITFRAMEPTR, frame) - if frame.jf_descr is operations[-1].getdescr(): - guard_failed = False - else: - guard_failed = True + guard_failed = frame.jf_descr is not finaldescr + # CHECK: a, b = s1, s2 if isinstance(p1, Const): s1 = p1.value From noreply at buildbot.pypy.org Fri Aug 9 15:25:35 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Fri, 9 Aug 2013 15:25:35 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: another fix Message-ID: <20130809132535.361C91C0170@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r66038:ffe6ff0ccf95 Date: 2013-08-09 15:24 +0200 http://bitbucket.org/pypy/pypy/changeset/ffe6ff0ccf95/ Log: another fix diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -2313,9 +2313,11 @@ def _stm_barrier_fastpath(self, mc, descr, arglocs, is_frame=False, align_stack=False): assert self.cpu.gc_ll_descr.stm - #from rpython.jit.backend.llsupport.gc import ( - # STMBarrierDescr, STMReadBarrierDescr, STMWriteBarrierDescr) - #assert isinstance(descr, STMBarrierDescr) + from rpython.jit.backend.llsupport.gc import ( + STMBarrierDescr, STMReadBarrierDescr, STMWriteBarrierDescr) + if we_are_translated(): + # tests use a a mock class, but translation needs it + assert isinstance(descr, STMBarrierDescr) assert descr.returns_modified_object loc_base = arglocs[0] assert isinstance(loc_base, RegLoc) diff --git a/rpython/jit/backend/x86/test/test_stm_integration.py b/rpython/jit/backend/x86/test/test_stm_integration.py --- a/rpython/jit/backend/x86/test/test_stm_integration.py +++ b/rpython/jit/backend/x86/test/test_stm_integration.py @@ -58,16 +58,22 @@ # ____________________________________________________________ -def allocate_protected(TP, tid=123, n=1, zero=True): +def allocate_protected(TP, n=1, zero=True, tid=124): obj = lltype.malloc(TP, n=n, zero=zero) - obj.h_tid = rffi.cast(lltype.Unsigned, - StmGC.GCFLAG_OLD|StmGC.GCFLAG_WRITE_BARRIER | tid) + obj.h_tid = rffi.cast(lltype.Unsigned, + StmGC.GCFLAG_OLD|StmGC.GCFLAG_WRITE_BARRIER | tid) obj.h_revision = rffi.cast(lltype.Signed, -sys.maxint) return obj +def allocate_prebuilt(TP, n=1, zero=True, tid=123): + obj = lltype.malloc(TP, n=n, zero=zero) + obj.h_tid = rffi.cast(lltype.Unsigned, StmGC.PREBUILT_FLAGS | tid) + obj.h_revision = rffi.cast(lltype.Signed, StmGC.PREBUILT_REVISION) + return obj + def jitframe_allocate(frame_info): - frame = allocate_protected(JITFRAME, - frame_info.jfi_frame_depth, zero=True) + frame = allocate_protected(JITFRAME, n=frame_info.jfi_frame_depth, + zero=True) frame.jf_frame_info = frame_info return frame @@ -232,6 +238,7 @@ def test_read_barrier_fastpath(self): cpu = self.cpu + cpu.gc_ll_descr.init_nursery(100) cpu.setup_once() PRIV_REV = rffi.cast(lltype.Signed, StmGC.PREBUILT_REVISION) self.priv_rev_num[0] = PRIV_REV @@ -274,6 +281,7 @@ def test_write_barrier_fastpath(self): cpu = self.cpu + cpu.gc_ll_descr.init_nursery(100) cpu.setup_once() PRIV_REV = rffi.cast(lltype.Signed, StmGC.PREBUILT_REVISION) self.priv_rev_num[0] = PRIV_REV @@ -313,6 +321,7 @@ def test_ptr_eq_fastpath(self): cpu = self.cpu + cpu.gc_ll_descr.init_nursery(100) cpu.setup_once() called_on = cpu.gc_ll_descr.ptr_eq_called_on @@ -360,12 +369,12 @@ looptoken) print c_loop args = [s for i, s in enumerate((s1, s2)) - if not isinstance((p1, p2)[i], Const)] + [1] + if not isinstance((p1, p2)[i], Const)] + [7] frame = self.cpu.execute_token(looptoken, *args) frame = rffi.cast(JITFRAMEPTR, frame) guard_failed = frame.jf_descr is not finaldescr - + # CHECK: a, b = s1, s2 if isinstance(p1, Const): From noreply at buildbot.pypy.org Fri Aug 9 16:02:48 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Fri, 9 Aug 2013 16:02:48 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: even more fixes. still, jf_descr contains some weird value Message-ID: <20130809140248.73DCF1C1524@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r66039:64bf8117bd60 Date: 2013-08-09 15:58 +0200 http://bitbucket.org/pypy/pypy/changeset/64bf8117bd60/ Log: even more fixes. still, jf_descr contains some weird value diff --git a/rpython/jit/backend/llsupport/assembler.py b/rpython/jit/backend/llsupport/assembler.py --- a/rpython/jit/backend/llsupport/assembler.py +++ b/rpython/jit/backend/llsupport/assembler.py @@ -172,8 +172,10 @@ break exc = guardtok.exc target = self.failure_recovery_code[exc + 2 * withfloats] - fail_descr = cast_instance_to_gcref(guardtok.faildescr) - fail_descr = rffi.cast(lltype.Signed, fail_descr) + fail_descr = rgc.cast_instance_to_gcref(guardtok.faildescr) + # already done by gc.py record_constptrs, just to be safe: + fail_descr = rgc._make_sure_does_not_move(fail_descr) + fail_descr = rgc.cast_gcref_to_int(fail_descr) base_ofs = self.cpu.get_baseofs_of_frame_field() positions = [0] * len(guardtok.fail_locs) for i, loc in enumerate(guardtok.fail_locs): @@ -226,9 +228,10 @@ else: raise AssertionError(kind) - gcref = cast_instance_to_gcref(value) + gcref = rgc.cast_instance_to_gcref(value) gcref = rgc._make_sure_does_not_move(gcref) - value = rffi.cast(lltype.Signed, gcref) + value = rgc.cast_gcref_to_int(gcref) + je_location = self._call_assembler_check_descr(value, tmploc) # # Path A: use assembler_helper_adr From noreply at buildbot.pypy.org Fri Aug 9 17:52:57 2013 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 9 Aug 2013 17:52:57 +0200 (CEST) Subject: [pypy-commit] pypy default: Add three tests, one of which fails. Message-ID: <20130809155257.773DF1C0170@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r66040:41a4884c4395 Date: 2013-08-09 17:52 +0200 http://bitbucket.org/pypy/pypy/changeset/41a4884c4395/ Log: Add three tests, one of which fails. diff --git a/rpython/jit/backend/test/runner_test.py b/rpython/jit/backend/test/runner_test.py --- a/rpython/jit/backend/test/runner_test.py +++ b/rpython/jit/backend/test/runner_test.py @@ -3954,8 +3954,12 @@ p = rawstorage.alloc_raw_storage(31) for i in range(31): p[i] = '\xDD' - value = rffi.cast(T, 0x4243444546474849) + value = rffi.cast(T, -0x4243444546474849) rawstorage.raw_storage_setitem(p, 16, value) + got = self.cpu.bh_raw_load_i(rffi.cast(lltype.Signed, p), 16, + arraydescr) + assert got == rffi.cast(lltype.Signed, value) + # loop = parse(ops, self.cpu, namespace=locals()) looptoken = JitCellToken() self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) @@ -3981,6 +3985,11 @@ p[i] = '\xDD' value = rffi.cast(T, 1.12e20) rawstorage.raw_storage_setitem(p, 16, value) + got = self.cpu.bh_raw_load_f(rffi.cast(lltype.Signed, p), 16, + arraydescr) + got = longlong.getrealfloat(got) + assert got == rffi.cast(lltype.Float, value) + # loop = parse(ops, self.cpu, namespace=locals()) looptoken = JitCellToken() self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) diff --git a/rpython/jit/metainterp/test/test_rawmem.py b/rpython/jit/metainterp/test/test_rawmem.py --- a/rpython/jit/metainterp/test/test_rawmem.py +++ b/rpython/jit/metainterp/test/test_rawmem.py @@ -58,5 +58,18 @@ 'raw_store': 1, 'raw_load': 1, 'finish': 1}) + def test_raw_storage_byte(self): + def f(): + p = alloc_raw_storage(15) + raw_storage_setitem(p, 5, rffi.cast(rffi.UCHAR, 254)) + res = raw_storage_getitem(rffi.UCHAR, p, 5) + free_raw_storage(p) + return rffi.cast(lltype.Signed, res) + res = self.interp_operations(f, []) + assert res == 254 + self.check_operations_history({'call': 2, 'guard_no_exception': 1, + 'raw_store': 1, 'raw_load': 1, + 'finish': 1}) + class TestRawMem(RawMemTests, LLJitMixin): pass From noreply at buildbot.pypy.org Fri Aug 9 17:56:42 2013 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 9 Aug 2013 17:56:42 +0200 (CEST) Subject: [pypy-commit] pypy default: Fix for the previous test, and probably for issue #1578. Message-ID: <20130809155642.7F6471C0170@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r66041:06f78c70c495 Date: 2013-08-09 17:56 +0200 http://bitbucket.org/pypy/pypy/changeset/06f78c70c495/ Log: Fix for the previous test, and probably for issue #1578. diff --git a/rpython/jit/backend/llsupport/llmodel.py b/rpython/jit/backend/llsupport/llmodel.py --- a/rpython/jit/backend/llsupport/llmodel.py +++ b/rpython/jit/backend/llsupport/llmodel.py @@ -721,12 +721,8 @@ def bh_raw_load_i(self, addr, offset, descr): ofs, size, sign = self.unpack_arraydescr_size(descr) - items = addr + offset - for TYPE, _, itemsize in unroll_basic_sizes: - if size == itemsize: - items = rffi.cast(rffi.CArrayPtr(TYPE), items) - return rffi.cast(lltype.Signed, items[0]) - assert False # unreachable code + assert ofs == 0 # otherwise, 'descr' is not a raw length-less array + return self.read_int_at_mem(addr, offset, size, sign) def bh_raw_load_f(self, addr, offset, descr): items = rffi.cast(rffi.CArrayPtr(longlong.FLOATSTORAGE), addr + offset) From noreply at buildbot.pypy.org Fri Aug 9 18:14:02 2013 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 9 Aug 2013 18:14:02 +0200 (CEST) Subject: [pypy-commit] pypy default: Add a passing test Message-ID: <20130809161402.633851C0397@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r66042:5a1d53b431d4 Date: 2013-08-09 18:05 +0200 http://bitbucket.org/pypy/pypy/changeset/5a1d53b431d4/ Log: Add a passing test diff --git a/rpython/jit/backend/test/runner_test.py b/rpython/jit/backend/test/runner_test.py --- a/rpython/jit/backend/test/runner_test.py +++ b/rpython/jit/backend/test/runner_test.py @@ -4000,6 +4000,35 @@ assert result == rffi.cast(lltype.Float, value) rawstorage.free_raw_storage(p) + def test_raw_load_singlefloat(self): + if not self.cpu.supports_singlefloats: + py.test.skip("requires singlefloats") + from rpython.rlib import rawstorage + for T in [rffi.FLOAT]: + ops = """ + [i0, i1] + i2 = raw_load(i0, i1, descr=arraydescr) + finish(i2) + """ + arraydescr = self.cpu.arraydescrof(rffi.CArray(T)) + p = rawstorage.alloc_raw_storage(31) + for i in range(31): + p[i] = '\xDD' + value = rffi.cast(T, 1.12e20) + rawstorage.raw_storage_setitem(p, 16, value) + got = self.cpu.bh_raw_load_i(rffi.cast(lltype.Signed, p), 16, + arraydescr) + assert got == longlong.singlefloat2int(value) + # + loop = parse(ops, self.cpu, namespace=locals()) + looptoken = JitCellToken() + self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + deadframe = self.cpu.execute_token(looptoken, + rffi.cast(lltype.Signed, p), 16) + result = self.cpu.get_int_value(deadframe, 0) + assert result == longlong.singlefloat2int(value) + rawstorage.free_raw_storage(p) + def test_raw_store_int(self): from rpython.rlib import rawstorage for T in [rffi.UCHAR, rffi.SIGNEDCHAR, From noreply at buildbot.pypy.org Fri Aug 9 18:14:03 2013 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 9 Aug 2013 18:14:03 +0200 (CEST) Subject: [pypy-commit] pypy default: Extra tests, passing Message-ID: <20130809161403.9A34C1C0397@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r66043:cda20925d344 Date: 2013-08-09 18:13 +0200 http://bitbucket.org/pypy/pypy/changeset/cda20925d344/ Log: Extra tests, passing diff --git a/rpython/jit/backend/test/runner_test.py b/rpython/jit/backend/test/runner_test.py --- a/rpython/jit/backend/test/runner_test.py +++ b/rpython/jit/backend/test/runner_test.py @@ -4035,16 +4035,23 @@ rffi.USHORT, rffi.SHORT, rffi.UINT, rffi.INT, rffi.ULONG, rffi.LONG]: + arraydescr = self.cpu.arraydescrof(rffi.CArray(T)) + p = rawstorage.alloc_raw_storage(31) + value = (-0x4243444546474849) & sys.maxint + self.cpu.bh_raw_store_i(rffi.cast(lltype.Signed, p), 16, value, + arraydescr) + result = rawstorage.raw_storage_getitem(T, p, 16) + assert result == rffi.cast(T, value) + rawstorage.free_raw_storage(p) + # ops = """ [i0, i1, i2] raw_store(i0, i1, i2, descr=arraydescr) finish() """ - arraydescr = self.cpu.arraydescrof(rffi.CArray(T)) p = rawstorage.alloc_raw_storage(31) for i in range(31): p[i] = '\xDD' - value = 0x4243444546474849 & sys.maxint loop = parse(ops, self.cpu, namespace=locals()) looptoken = JitCellToken() self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) @@ -4059,16 +4066,24 @@ py.test.skip("requires floats") from rpython.rlib import rawstorage for T in [rffi.DOUBLE]: + arraydescr = self.cpu.arraydescrof(rffi.CArray(T)) + p = rawstorage.alloc_raw_storage(31) + value = 1.23e20 + self.cpu.bh_raw_store_f(rffi.cast(lltype.Signed, p), 16, + longlong.getfloatstorage(value), + arraydescr) + result = rawstorage.raw_storage_getitem(T, p, 16) + assert result == rffi.cast(T, value) + rawstorage.free_raw_storage(p) + # ops = """ [i0, i1, f2] raw_store(i0, i1, f2, descr=arraydescr) finish() """ - arraydescr = self.cpu.arraydescrof(rffi.CArray(T)) p = rawstorage.alloc_raw_storage(31) for i in range(31): p[i] = '\xDD' - value = 1.23e20 loop = parse(ops, self.cpu, namespace=locals()) looptoken = JitCellToken() self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) @@ -4079,6 +4094,41 @@ assert result == rffi.cast(T, value) rawstorage.free_raw_storage(p) + def test_raw_store_singlefloat(self): + if not self.cpu.supports_singlefloats: + py.test.skip("requires singlefloats") + from rpython.rlib import rawstorage + for T in [rffi.FLOAT]: + arraydescr = self.cpu.arraydescrof(rffi.CArray(T)) + p = rawstorage.alloc_raw_storage(31) + value = rffi.cast(T, 1.23e20) + self.cpu.bh_raw_store_i(rffi.cast(lltype.Signed, p), 16, + longlong.singlefloat2int(value), + arraydescr) + result = rawstorage.raw_storage_getitem(T, p, 16) + assert (rffi.cast(lltype.Float, result) == + rffi.cast(lltype.Float, value)) + rawstorage.free_raw_storage(p) + # + ops = """ + [i0, i1, i2] + raw_store(i0, i1, i2, descr=arraydescr) + finish() + """ + p = rawstorage.alloc_raw_storage(31) + for i in range(31): + p[i] = '\xDD' + loop = parse(ops, self.cpu, namespace=locals()) + looptoken = JitCellToken() + self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + self.cpu.execute_token(looptoken, + rffi.cast(lltype.Signed, p), 16, + longlong.singlefloat2int(value)) + result = rawstorage.raw_storage_getitem(T, p, 16) + assert (rffi.cast(lltype.Float, result) == + rffi.cast(lltype.Float, value)) + rawstorage.free_raw_storage(p) + def test_forcing_op_with_fail_arg_in_reg(self): values = [] def maybe_force(token, flag): From noreply at buildbot.pypy.org Fri Aug 9 18:48:27 2013 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 9 Aug 2013 18:48:27 +0200 (CEST) Subject: [pypy-commit] pypy default: PyNumber_Int and PyNumber_Long also accept strings. As a better Message-ID: <20130809164827.54B661C0170@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r66044:8ac4abd50746 Date: 2013-08-09 18:47 +0200 http://bitbucket.org/pypy/pypy/changeset/8ac4abd50746/ Log: PyNumber_Int and PyNumber_Long also accept strings. As a better approximation than previously, they can be written as the direct equivalent to the app-level "int(x)" or "long(x)". diff --git a/pypy/module/cpyext/number.py b/pypy/module/cpyext/number.py --- a/pypy/module/cpyext/number.py +++ b/pypy/module/cpyext/number.py @@ -41,13 +41,13 @@ def PyNumber_Int(space, w_obj): """Returns the o converted to an integer object on success, or NULL on failure. This is the equivalent of the Python expression int(o).""" - return space.int(w_obj) + return space.call_function(space.w_int, w_obj) @cpython_api([PyObject], PyObject) def PyNumber_Long(space, w_obj): """Returns the o converted to a long integer object on success, or NULL on failure. This is the equivalent of the Python expression long(o).""" - return space.long(w_obj) + return space.call_function(space.w_long, w_obj) @cpython_api([PyObject], PyObject) def PyNumber_Index(space, w_obj): diff --git a/pypy/module/cpyext/test/test_number.py b/pypy/module/cpyext/test/test_number.py --- a/pypy/module/cpyext/test/test_number.py +++ b/pypy/module/cpyext/test/test_number.py @@ -19,6 +19,8 @@ def test_number_long(self, space, api): w_l = api.PyNumber_Long(space.wrap(123)) assert api.PyLong_CheckExact(w_l) + w_l = api.PyNumber_Long(space.wrap("123")) + assert api.PyLong_CheckExact(w_l) def test_number_int(self, space, api): w_l = api.PyNumber_Int(space.wraplong(123L)) @@ -27,6 +29,8 @@ assert api.PyLong_CheckExact(w_l) w_l = api.PyNumber_Int(space.wrap(42.3)) assert api.PyInt_CheckExact(w_l) + w_l = api.PyNumber_Int(space.wrap("42")) + assert api.PyInt_CheckExact(w_l) def test_number_index(self, space, api): w_l = api.PyNumber_Index(space.wraplong(123L)) From noreply at buildbot.pypy.org Fri Aug 9 18:59:20 2013 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 9 Aug 2013 18:59:20 +0200 (CEST) Subject: [pypy-commit] pypy release-2.1.x: Manually merge the fix '06f78c70c495'. Message-ID: <20130809165920.109B01C0170@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: release-2.1.x Changeset: r66045:7adac6b730fd Date: 2013-08-09 18:58 +0200 http://bitbucket.org/pypy/pypy/changeset/7adac6b730fd/ Log: Manually merge the fix '06f78c70c495'. diff --git a/rpython/jit/backend/llsupport/llmodel.py b/rpython/jit/backend/llsupport/llmodel.py --- a/rpython/jit/backend/llsupport/llmodel.py +++ b/rpython/jit/backend/llsupport/llmodel.py @@ -723,12 +723,8 @@ def bh_raw_load_i(self, addr, offset, descr): ofs, size, sign = self.unpack_arraydescr_size(descr) - items = addr + offset - for TYPE, _, itemsize in unroll_basic_sizes: - if size == itemsize: - items = rffi.cast(rffi.CArrayPtr(TYPE), items) - return rffi.cast(lltype.Signed, items[0]) - assert False # unreachable code + assert ofs == 0 # otherwise, 'descr' is not a raw length-less array + return self.read_int_at_mem(addr, offset, size, sign) def bh_raw_load_f(self, addr, offset, descr): items = rffi.cast(rffi.CArrayPtr(longlong.FLOATSTORAGE), addr + offset) From noreply at buildbot.pypy.org Fri Aug 9 20:05:16 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Fri, 9 Aug 2013 20:05:16 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: give up and hack to make test pass (checking guard_true/false with ptr_eq) Message-ID: <20130809180516.9F9EA1C0397@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r66046:7fdbd173748c Date: 2013-08-09 20:04 +0200 http://bitbucket.org/pypy/pypy/changeset/7fdbd173748c/ Log: give up and hack to make test pass (checking guard_true/false with ptr_eq) diff --git a/rpython/jit/backend/llsupport/gc.py b/rpython/jit/backend/llsupport/gc.py --- a/rpython/jit/backend/llsupport/gc.py +++ b/rpython/jit/backend/llsupport/gc.py @@ -5,7 +5,8 @@ from rpython.rtyper.lltypesystem import lltype, llmemory, rffi, rclass, rstr from rpython.rtyper.lltypesystem import llgroup from rpython.rtyper.lltypesystem.lloperation import llop -from rpython.rtyper.annlowlevel import llhelper, cast_instance_to_gcref +from rpython.rtyper.annlowlevel import (llhelper, cast_instance_to_gcref, + cast_base_ptr_to_instance) from rpython.translator.tool.cbuild import ExternalCompilationInfo from rpython.jit.codewriter import heaptracker from rpython.jit.metainterp.history import ConstPtr, AbstractDescr @@ -110,16 +111,14 @@ # the only ops with descrs that get recorded in a trace from rpython.jit.metainterp.history import AbstractDescr descr = op.getdescr() - if not we_are_translated() and descr is None: - return - llref = rgc.cast_instance_to_gcref(descr) + llref = cast_instance_to_gcref(descr) new_llref = rgc._make_sure_does_not_move(llref) - new_d = rgc.try_cast_gcref_to_instance(AbstractDescr, new_llref) if we_are_translated(): - # tests don't allow this + new_d = cast_base_ptr_to_instance(AbstractDescr, new_llref) + # tests don't allow this: op.setdescr(new_d) else: - assert new_d is descr + assert llref == new_llref gcrefs_output_list.append(new_llref) def rewrite_assembler(self, cpu, operations, gcrefs_output_list): diff --git a/rpython/jit/backend/x86/test/test_stm_integration.py b/rpython/jit/backend/x86/test/test_stm_integration.py --- a/rpython/jit/backend/x86/test/test_stm_integration.py +++ b/rpython/jit/backend/x86/test/test_stm_integration.py @@ -19,6 +19,7 @@ GCDescrShadowstackDirect, BaseTestRegalloc, JitFrameDescrs) from rpython.jit.backend.llsupport import jitframe from rpython.memory.gc.stmgc import StmGC +from rpython.jit.metainterp import history import itertools, sys import ctypes @@ -373,7 +374,8 @@ frame = self.cpu.execute_token(looptoken, *args) frame = rffi.cast(JITFRAMEPTR, frame) - guard_failed = frame.jf_descr is not finaldescr + frame_adr = rffi.cast(lltype.Signed, frame.jf_descr) + guard_failed = frame_adr != id(finaldescr) # CHECK: a, b = s1, s2 From noreply at buildbot.pypy.org Sat Aug 10 09:38:35 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 10 Aug 2013 09:38:35 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-2.7.4: Use unwrap_spec to receive an already-unpacked string when convenient. Message-ID: <20130810073835.EBDC31C0E1C@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stdlib-2.7.4 Changeset: r66048:248b5a25ffeb Date: 2013-08-10 09:37 +0200 http://bitbucket.org/pypy/pypy/changeset/248b5a25ffeb/ Log: Use unwrap_spec to receive an already-unpacked string when convenient. diff --git a/pypy/module/__builtin__/interp_classobj.py b/pypy/module/__builtin__/interp_classobj.py --- a/pypy/module/__builtin__/interp_classobj.py +++ b/pypy/module/__builtin__/interp_classobj.py @@ -1,6 +1,6 @@ import new from pypy.interpreter.error import OperationError, operationerrfmt -from pypy.interpreter.gateway import interp2app +from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.typedef import TypeDef, make_weakref_descr from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.typedef import GetSetProperty, descr_get_dict, descr_set_dict @@ -106,8 +106,8 @@ return w_result return None - def descr_getattribute(self, space, w_attr): - name = space.str_w(w_attr) + @unwrap_spec(name=str) + def descr_getattribute(self, space, name): if name and name[0] == "_": if name == "__dict__": return self.w_dict @@ -175,7 +175,7 @@ def get_module_string(self, space): try: - w_mod = self.descr_getattribute(space, space.wrap("__module__")) + w_mod = self.descr_getattribute(space, "__module__") except OperationError, e: if not e.match(space, space.w_AttributeError): raise @@ -360,8 +360,8 @@ else: return None - def descr_getattribute(self, space, w_attr): - name = space.str_w(w_attr) + @unwrap_spec(name=str) + def descr_getattribute(self, space, name): if len(name) >= 8 and name[0] == '_': if name == "__dict__": return self.getdict(space) From noreply at buildbot.pypy.org Sat Aug 10 09:38:34 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 10 Aug 2013 09:38:34 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-2.7.4: Kill unwrap_attr() in interp_classobj, which has no purpose any more. Message-ID: <20130810073834.99CF71C08AF@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stdlib-2.7.4 Changeset: r66047:8fc2d6e55cdf Date: 2013-08-10 09:34 +0200 http://bitbucket.org/pypy/pypy/changeset/8fc2d6e55cdf/ Log: Kill unwrap_attr() in interp_classobj, which has no purpose any more. Gives a simpler solution to pull request #173. diff --git a/pypy/module/__builtin__/interp_classobj.py b/pypy/module/__builtin__/interp_classobj.py --- a/pypy/module/__builtin__/interp_classobj.py +++ b/pypy/module/__builtin__/interp_classobj.py @@ -13,15 +13,6 @@ raise operationerrfmt(space.w_TypeError, "argument %s must be %s, not %T", argument, expected, w_obj) -def unwrap_attr(space, w_attr): - try: - return space.str_w(w_attr) - except OperationError, e: - if not e.match(space, space.w_TypeError): - raise - return "?" # any string different from "__dict__" & co. is fine - # XXX it's not clear that we have to catch the TypeError... - def descr_classobj_new(space, w_subtype, w_name, w_bases, w_dict): if not space.isinstance_w(w_bases, space.w_tuple): raise_type_err(space, 'bases', 'tuple', w_bases) @@ -116,10 +107,7 @@ return None def descr_getattribute(self, space, w_attr): - if not space.isinstance_w(w_attr, space.w_str): - msg = "attribute name must be a string" - raise OperationError(space.w_TypeError, space.wrap(msg)) - name = unwrap_attr(space, w_attr) + name = space.str_w(w_attr) if name and name[0] == "_": if name == "__dict__": return self.w_dict @@ -140,10 +128,7 @@ return space.call_function(w_descr_get, w_value, space.w_None, self) def descr_setattr(self, space, w_attr, w_value): - if not space.isinstance_w(w_attr, space.w_str): - msg = "attribute name must be a string" - raise OperationError(space.w_TypeError, space.wrap(msg)) - name = unwrap_attr(space, w_attr) + name = space.str_w(w_attr) if name and name[0] == "_": if name == "__dict__": self.setdict(space, w_value) @@ -162,7 +147,7 @@ space.setitem(self.w_dict, w_attr, w_value) def descr_delattr(self, space, w_attr): - name = unwrap_attr(space, w_attr) + name = space.str_w(w_attr) if name in ("__dict__", "__name__", "__bases__"): raise operationerrfmt( space.w_TypeError, @@ -376,9 +361,6 @@ return None def descr_getattribute(self, space, w_attr): - if not space.isinstance_w(w_attr, space.w_str): - msg = "attribute name must be a string" - raise OperationError(space.w_TypeError, space.wrap(msg)) name = space.str_w(w_attr) if len(name) >= 8 and name[0] == '_': if name == "__dict__": @@ -388,10 +370,7 @@ return self.getattr(space, name) def descr_setattr(self, space, w_name, w_value): - if not space.isinstance_w(w_name, space.w_str): - msg = "attribute name must be a string" - raise OperationError(space.w_TypeError, space.wrap(msg)) - name = unwrap_attr(space, w_name) + name = space.str_w(w_name) w_meth = self.getattr_from_class(space, '__setattr__') if name and name[0] == "_": if name == '__dict__': @@ -413,7 +392,7 @@ self.setdictvalue(space, name, w_value) def descr_delattr(self, space, w_name): - name = unwrap_attr(space, w_name) + name = space.str_w(w_name) if name and name[0] == "_": if name == '__dict__': # use setdict to raise the error diff --git a/pypy/module/__builtin__/test/test_classobj.py b/pypy/module/__builtin__/test/test_classobj.py --- a/pypy/module/__builtin__/test/test_classobj.py +++ b/pypy/module/__builtin__/test/test_classobj.py @@ -1085,6 +1085,14 @@ raises(TypeError, type(c).__getattribute__, c, []) raises(TypeError, type(c).__setattr__, c, [], []) + def test_attr_unicode(self): + class C: + pass + c = C() + setattr(c, u"x", 1) + assert getattr(c, u"x") == 1 + + class AppTestOldStyleMapDict(AppTestOldstyle): spaceconfig = {"objspace.std.withmapdict": True} From noreply at buildbot.pypy.org Sat Aug 10 11:04:43 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 10 Aug 2013 11:04:43 +0200 (CEST) Subject: [pypy-commit] pypy default: Kill this method, which by mistake gives a w__class__ attribute on a few Message-ID: <20130810090443.3D86E1C0EF6@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r66049:b9699dee6f10 Date: 2013-08-10 10:58 +0200 http://bitbucket.org/pypy/pypy/changeset/b9699dee6f10/ Log: Kill this method, which by mistake gives a w__class__ attribute on a few classes where it doesn't make sense to have one. This was introduced during "shadowtracking" experiments which have long since been killed (see db954a0b9114). diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py --- a/pypy/interpreter/typedef.py +++ b/pypy/interpreter/typedef.py @@ -329,10 +329,6 @@ instance=True) base_user_setup(self, space, w_subtype) - def setclass(self, space, w_subtype): - # only used by descr_set___class__ - self.w__class__ = w_subtype - add(Proto) subcls = type(name, (supercls,), body) From noreply at buildbot.pypy.org Sat Aug 10 11:04:44 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 10 Aug 2013 11:04:44 +0200 (CEST) Subject: [pypy-commit] pypy gc-del: Fix for 6fed217ef39c Message-ID: <20130810090444.938FF1C0EF6@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: gc-del Changeset: r66050:cfb1c8acb1b9 Date: 2013-08-10 10:59 +0200 http://bitbucket.org/pypy/pypy/changeset/cfb1c8acb1b9/ Log: Fix for 6fed217ef39c diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py --- a/pypy/interpreter/typedef.py +++ b/pypy/interpreter/typedef.py @@ -236,6 +236,8 @@ def setclass(self, space, w_subtype): # only used by descr_set___class__ self.w__class__ = w_subtype + if w_subtype.has_del: + self.register_finalizer() def user_setup(self, space, w_subtype): self.space = space @@ -303,10 +305,6 @@ instance=True) base_user_setup(self, space, w_subtype) - def setclass(self, space, w_subtype): - # only used by descr_set___class__ - self.w__class__ = w_subtype - add(Proto) subcls = type(name, (supercls,), body) From noreply at buildbot.pypy.org Sat Aug 10 13:05:44 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 10 Aug 2013 13:05:44 +0200 (CEST) Subject: [pypy-commit] pypy default: Fix for test_raw_store_singlefloat in the llgraph backend. Message-ID: <20130810110544.A05131C0EF6@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r66054:27e883f2a185 Date: 2013-08-10 13:05 +0200 http://bitbucket.org/pypy/pypy/changeset/27e883f2a185/ Log: Fix for test_raw_store_singlefloat in the llgraph backend. diff --git a/rpython/jit/backend/llgraph/runner.py b/rpython/jit/backend/llgraph/runner.py --- a/rpython/jit/backend/llgraph/runner.py +++ b/rpython/jit/backend/llgraph/runner.py @@ -502,6 +502,8 @@ def bh_raw_store_i(self, struct, offset, newvalue, descr): ll_p = rffi.cast(rffi.CCHARP, struct) ll_p = rffi.cast(lltype.Ptr(descr.A), rffi.ptradd(ll_p, offset)) + if descr.A.OF == lltype.SingleFloat: + newvalue = longlong.int2singlefloat(newvalue) ll_p[0] = rffi.cast(descr.A.OF, newvalue) def bh_raw_store_f(self, struct, offset, newvalue, descr): From noreply at buildbot.pypy.org Sat Aug 10 13:08:17 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 10 Aug 2013 13:08:17 +0200 (CEST) Subject: [pypy-commit] pypy gc-del: Fix Message-ID: <20130810110817.355241C0EF6@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: gc-del Changeset: r66055:187d3b98b99f Date: 2013-08-10 13:07 +0200 http://bitbucket.org/pypy/pypy/changeset/187d3b98b99f/ Log: Fix diff --git a/rpython/memory/gctransform/boehm.py b/rpython/memory/gctransform/boehm.py --- a/rpython/memory/gctransform/boehm.py +++ b/rpython/memory/gctransform/boehm.py @@ -68,12 +68,11 @@ resulttype=llmemory.Address) destructor_ptr = self.destructor_funcptr_for_type(TYPE) if destructor_ptr: - from rpython.rtyper.annlowlevel import base_ptr_lltype + from rpython.rtyper.lltypesystem.rclass import OBJECTPTR c_destructor_ptr = Constant(destructor_ptr, self.DESTRUCTOR_PTR) v_llfn = hop.genop('cast_ptr_to_adr', [c_destructor_ptr], resulttype=llmemory.Address) - v_self = hop.genop('cast_adr_to_ptr', [v_raw], - resulttype=base_ptr_lltype()) + v_self = hop.genop('cast_adr_to_ptr', [v_raw], resulttype=OBJECTPTR) hop.genop("gc_register_finalizer", [v_self, v_llfn]) return v_raw From noreply at buildbot.pypy.org Sat Aug 10 13:08:36 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 10 Aug 2013 13:08:36 +0200 (CEST) Subject: [pypy-commit] pypy gc-del: hg merge default Message-ID: <20130810110836.B6CA41C0EF6@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: gc-del Changeset: r66056:099342ffcfe2 Date: 2013-08-10 13:07 +0200 http://bitbucket.org/pypy/pypy/changeset/099342ffcfe2/ Log: hg merge default diff --git a/rpython/jit/backend/llgraph/runner.py b/rpython/jit/backend/llgraph/runner.py --- a/rpython/jit/backend/llgraph/runner.py +++ b/rpython/jit/backend/llgraph/runner.py @@ -502,6 +502,8 @@ def bh_raw_store_i(self, struct, offset, newvalue, descr): ll_p = rffi.cast(rffi.CCHARP, struct) ll_p = rffi.cast(lltype.Ptr(descr.A), rffi.ptradd(ll_p, offset)) + if descr.A.OF == lltype.SingleFloat: + newvalue = longlong.int2singlefloat(newvalue) ll_p[0] = rffi.cast(descr.A.OF, newvalue) def bh_raw_store_f(self, struct, offset, newvalue, descr): From noreply at buildbot.pypy.org Sat Aug 10 13:23:18 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 10 Aug 2013 13:23:18 +0200 (CEST) Subject: [pypy-commit] pypy gc-del: Workaround for the config mechanism Message-ID: <20130810112318.3E8171C0E1C@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: gc-del Changeset: r66057:59540017d614 Date: 2013-08-10 13:22 +0200 http://bitbucket.org/pypy/pypy/changeset/59540017d614/ Log: Workaround for the config mechanism diff --git a/pypy/sandbox/test/test_pypy_interact.py b/pypy/sandbox/test/test_pypy_interact.py --- a/pypy/sandbox/test/test_pypy_interact.py +++ b/pypy/sandbox/test/test_pypy_interact.py @@ -71,7 +71,8 @@ def setup_module(mod): - t = Translation(mini_pypy_like_entry_point, backend='c', sandbox=True) + t = Translation(mini_pypy_like_entry_point, backend='c', sandbox=True, + gc='ref') mod.executable = str(t.compile()) From noreply at buildbot.pypy.org Sat Aug 10 13:35:52 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 10 Aug 2013 13:35:52 +0200 (CEST) Subject: [pypy-commit] pypy default: Avoid calling os.uname() at runtime. Not completely sure this plays Message-ID: <20130810113552.677F11C0EF6@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r66058:ab7580454b32 Date: 2013-08-10 13:35 +0200 http://bitbucket.org/pypy/pypy/changeset/ab7580454b32/ Log: Avoid calling os.uname() at runtime. Not completely sure this plays nice with cross-compilation. How can I check?... Also, a fix for builds on linux3 on some Python versions. diff --git a/rpython/memory/gc/env.py b/rpython/memory/gc/env.py --- a/rpython/memory/gc/env.py +++ b/rpython/memory/gc/env.py @@ -131,8 +131,13 @@ # ---------- Linux2 ---------- +try: + ARCH = os.uname()[4] # machine +except (OSError, AttributeError): + ARCH = '' + def get_L2cache_linux2(): - arch = os.uname()[4] # machine + arch = ARCH # precomputed; the call to os.uname() is not translated if arch.endswith('86') or arch == 'x86_64': return get_L2cache_linux2_cpuinfo() if arch in ('alpha', 'ppc', 'ppc64'): @@ -145,6 +150,8 @@ return get_L2cache_linux2_sparc() return -1 +get_L2cache_linux3 = get_L2cache_linux2 + def get_L2cache_linux2_cpuinfo(filename="/proc/cpuinfo", label='cache size'): debug_start("gc-hardware") From noreply at buildbot.pypy.org Sat Aug 10 13:36:11 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 10 Aug 2013 13:36:11 +0200 (CEST) Subject: [pypy-commit] pypy gc-del: hg merge default Message-ID: <20130810113611.B890D1C0EF6@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: gc-del Changeset: r66059:694e0a037271 Date: 2013-08-10 13:35 +0200 http://bitbucket.org/pypy/pypy/changeset/694e0a037271/ Log: hg merge default diff --git a/rpython/memory/gc/env.py b/rpython/memory/gc/env.py --- a/rpython/memory/gc/env.py +++ b/rpython/memory/gc/env.py @@ -131,8 +131,13 @@ # ---------- Linux2 ---------- +try: + ARCH = os.uname()[4] # machine +except (OSError, AttributeError): + ARCH = '' + def get_L2cache_linux2(): - arch = os.uname()[4] # machine + arch = ARCH # precomputed; the call to os.uname() is not translated if arch.endswith('86') or arch == 'x86_64': return get_L2cache_linux2_cpuinfo() if arch in ('alpha', 'ppc', 'ppc64'): @@ -145,6 +150,8 @@ return get_L2cache_linux2_sparc() return -1 +get_L2cache_linux3 = get_L2cache_linux2 + def get_L2cache_linux2_cpuinfo(filename="/proc/cpuinfo", label='cache size'): debug_start("gc-hardware") From noreply at buildbot.pypy.org Sat Aug 10 19:32:02 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 10 Aug 2013 19:32:02 +0200 (CEST) Subject: [pypy-commit] pypy default: Silence two gcc warnings Message-ID: <20130810173202.A35741C08AF@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r66060:120059a2f5f3 Date: 2013-08-10 19:31 +0200 http://bitbucket.org/pypy/pypy/changeset/120059a2f5f3/ Log: Silence two gcc warnings diff --git a/rpython/memory/gctransform/asmgcroot.py b/rpython/memory/gctransform/asmgcroot.py --- a/rpython/memory/gctransform/asmgcroot.py +++ b/rpython/memory/gctransform/asmgcroot.py @@ -242,7 +242,7 @@ return llmemory.cast_int_to_adr(rthread.get_ident()) def thread_start(): - value = llop.stack_current(llmemory.Address) + value = llmemory.cast_int_to_adr(llop.stack_current(lltype.Signed)) gcdata.aid2stack.setitem(get_aid(), value) thread_start._always_inline_ = True @@ -269,7 +269,8 @@ stack_start = gcdata.aid2stack.get(get_aid(), llmemory.NULL) ll_assert(stack_start != llmemory.NULL, "current thread not found in gcdata.aid2stack!") - stack_stop = llop.stack_current(llmemory.Address) + stack_stop = llmemory.cast_int_to_adr( + llop.stack_current(lltype.Signed)) return (stack_start <= framedata <= stack_stop or stack_start >= framedata >= stack_stop) self.belongs_to_current_thread = belongs_to_current_thread From noreply at buildbot.pypy.org Sat Aug 10 19:44:24 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 10 Aug 2013 19:44:24 +0200 (CEST) Subject: [pypy-commit] pypy gc-del: Failing test Message-ID: <20130810174424.5A18F1C0397@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: gc-del Changeset: r66061:c81cc58efba0 Date: 2013-08-10 19:43 +0200 http://bitbucket.org/pypy/pypy/changeset/c81cc58efba0/ Log: Failing test diff --git a/pypy/objspace/std/test/test_mapdict.py b/pypy/objspace/std/test/test_mapdict.py --- a/pypy/objspace/std/test/test_mapdict.py +++ b/pypy/objspace/std/test/test_mapdict.py @@ -654,6 +654,20 @@ del a.x raises(AttributeError, "a.x") + def test_del(self): + class A(object): + def __del__(self): + seen.append(1) + seen = [] + a = () + del a + for i in range(5): + if not seen: + import gc + gc.collect() + assert seen == [1] + + class AppTestWithMapDictAndCounters(object): spaceconfig = {"objspace.std.withmapdict": True, "objspace.std.withmethodcachecounter": True, From noreply at buildbot.pypy.org Sat Aug 10 20:01:26 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 10 Aug 2013 20:01:26 +0200 (CEST) Subject: [pypy-commit] pypy gc-del: Fix the test, but still failing Message-ID: <20130810180126.4D59D1C0397@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: gc-del Changeset: r66062:6f4e9dceaf8c Date: 2013-08-10 19:57 +0200 http://bitbucket.org/pypy/pypy/changeset/6f4e9dceaf8c/ Log: Fix the test, but still failing diff --git a/pypy/objspace/std/test/test_mapdict.py b/pypy/objspace/std/test/test_mapdict.py --- a/pypy/objspace/std/test/test_mapdict.py +++ b/pypy/objspace/std/test/test_mapdict.py @@ -659,8 +659,7 @@ def __del__(self): seen.append(1) seen = [] - a = () - del a + A() for i in range(5): if not seen: import gc From noreply at buildbot.pypy.org Sat Aug 10 20:01:28 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 10 Aug 2013 20:01:28 +0200 (CEST) Subject: [pypy-commit] pypy gc-del: Fix the case of objects with mapdict, which overrides a number of Message-ID: <20130810180128.436291C0397@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: gc-del Changeset: r66063:e3628e64912d Date: 2013-08-10 20:00 +0200 http://bitbucket.org/pypy/pypy/changeset/e3628e64912d/ Log: Fix the case of objects with mapdict, which overrides a number of methods from typedef. diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -167,6 +167,14 @@ def invoke_finalizer(self): raise NotImplementedError # must be overridden + def _finalizer_perform_del(self, space): + """Invoke the app-level __del__.""" + w_descr = space.lookup(self, '__del__') + if w_descr is not None: + self.finalizer_perform(space, "__del__ method of ", + space.get_and_call_function, + w_descr, self) + def finalizer_perform(self, space, descrname, callback, *args): """For use in invoke_finalizer(). First check if we're called from the random execution of a __del__ or from UserDelAction, diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py --- a/pypy/interpreter/typedef.py +++ b/pypy/interpreter/typedef.py @@ -247,12 +247,7 @@ self.register_finalizer() def invoke_finalizer(self): - space = self.space - w_descr = space.lookup(self, '__del__') - if w_descr is not None: - self.finalizer_perform(self.space, "__del__ method of ", - space.get_and_call_function, - w_descr, self) + self._finalizer_perform_del(self.space) super_invoke_finalizer(self) def user_setup_slots(self, nslots): diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -415,6 +415,12 @@ assert (not self.typedef.hasdict or self.typedef is W_InstanceObject.typedef) self._init_empty(w_subtype.terminator) + if w_subtype.has_del: + self.register_finalizer() + + def invoke_finalizer(self): + self._finalizer_perform_del(self.space) + self._super_invoke_finalizer() def getslotvalue(self, index): key = ("slot", SLOTS_STARTING_FROM + index) @@ -517,7 +523,12 @@ rangen = unroll.unrolling_iterable(range(n)) nmin1 = n - 1 rangenmin1 = unroll.unrolling_iterable(range(nmin1)) + class subcls(BaseMapdictObject, supercls): + _super_invoke_finalizer = supercls.invoke_finalizer.im_func + if _super_invoke_finalizer == W_Root.invoke_finalizer.im_func: + _super_invoke_finalizer = lambda self: None + def _init_empty(self, map): from rpython.rlib.debug import make_sure_not_resized for i in rangen: diff --git a/pypy/objspace/std/test/test_mapdict.py b/pypy/objspace/std/test/test_mapdict.py --- a/pypy/objspace/std/test/test_mapdict.py +++ b/pypy/objspace/std/test/test_mapdict.py @@ -14,6 +14,7 @@ space.config = Config class Class(object): + has_del = False def __init__(self, hasdict=True): self.hasdict = True if hasdict: From noreply at buildbot.pypy.org Sat Aug 10 21:10:31 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 10 Aug 2013 21:10:31 +0200 (CEST) Subject: [pypy-commit] pypy gc-del: Not super happy with super_invoke_finalizer in mapdict. It feels like Message-ID: <20130810191031.7E8B61C0397@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: gc-del Changeset: r66064:bb4ebc09a224 Date: 2013-08-10 20:35 +0200 http://bitbucket.org/pypy/pypy/changeset/bb4ebc09a224/ Log: Not super happy with super_invoke_finalizer in mapdict. It feels like hacking until it works. diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py --- a/pypy/interpreter/typedef.py +++ b/pypy/interpreter/typedef.py @@ -214,19 +214,21 @@ value = func_with_new_name(value, value.func_name) body[key] = value + super_invoke_finalizer = supercls.invoke_finalizer.im_func + if super_invoke_finalizer == W_Root.invoke_finalizer.im_func: + super_invoke_finalizer = lambda self: None + if (config.objspace.std.withmapdict and "dict" in features): from pypy.objspace.std.mapdict import BaseMapdictObject, ObjectMixin add(BaseMapdictObject) add(ObjectMixin) body["user_overridden_class"] = True + body["_super_invoke_finalizer"] = super_invoke_finalizer + assert not hasattr(supercls, '_super_invoke_finalizer') features = () if "user" in features: # generic feature needed by all subcls - super_invoke_finalizer = supercls.invoke_finalizer.im_func - if super_invoke_finalizer == W_Root.invoke_finalizer.im_func: - super_invoke_finalizer = lambda self: None - class Proto(object): user_overridden_class = True diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -478,7 +478,8 @@ self.map = map class Object(ObjectMixin, BaseMapdictObject, W_Root): - pass # mainly for tests + # mainly for tests + _super_invoke_finalizer = lambda self: None def get_subclass_of_correct_size(space, cls, w_type): assert space.config.objspace.std.withmapdict From noreply at buildbot.pypy.org Sat Aug 10 21:49:58 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 10 Aug 2013 21:49:58 +0200 (CEST) Subject: [pypy-commit] pypy gc-del: Remove this line, which simply causes an exception to be printed Message-ID: <20130810194958.709851C08AF@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: gc-del Changeset: r66065:c9b26c923fc6 Date: 2013-08-10 21:49 +0200 http://bitbucket.org/pypy/pypy/changeset/c9b26c923fc6/ Log: Remove this line, which simply causes an exception to be printed to stderr and otherwise ignored. diff --git a/pypy/module/_io/test/test_io.py b/pypy/module/_io/test/test_io.py --- a/pypy/module/_io/test/test_io.py +++ b/pypy/module/_io/test/test_io.py @@ -88,7 +88,7 @@ class MyIO(io.IOBase): def __del__(self): record.append(1) - super(MyIO, self).__del__() + #super(MyIO, self).__del__() --- does not exist def close(self): record.append(2) super(MyIO, self).close() From noreply at buildbot.pypy.org Sat Aug 10 21:55:45 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 10 Aug 2013 21:55:45 +0200 (CEST) Subject: [pypy-commit] pypy gc-del: Same as c9b26c923fc6 again. Message-ID: <20130810195545.926861C08AF@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: gc-del Changeset: r66066:72f1cd32089b Date: 2013-08-10 21:54 +0200 http://bitbucket.org/pypy/pypy/changeset/72f1cd32089b/ Log: Same as c9b26c923fc6 again. diff --git a/pypy/module/_io/test/test_bufferedio.py b/pypy/module/_io/test/test_bufferedio.py --- a/pypy/module/_io/test/test_bufferedio.py +++ b/pypy/module/_io/test/test_bufferedio.py @@ -284,7 +284,7 @@ class MyIO(_io.BufferedWriter): def __del__(self): record.append(1) - super(MyIO, self).__del__() + #super(MyIO, self).__del__() --- does not exist def close(self): record.append(2) super(MyIO, self).close() From noreply at buildbot.pypy.org Sat Aug 10 22:51:31 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 10 Aug 2013 22:51:31 +0200 (CEST) Subject: [pypy-commit] pypy default: Partial backout of ab7580454b32. Message-ID: <20130810205131.C1B321C08AF@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r66067:85607df22ea3 Date: 2013-08-10 22:50 +0200 http://bitbucket.org/pypy/pypy/changeset/85607df22ea3/ Log: Partial backout of ab7580454b32. diff --git a/rpython/memory/gc/env.py b/rpython/memory/gc/env.py --- a/rpython/memory/gc/env.py +++ b/rpython/memory/gc/env.py @@ -131,13 +131,8 @@ # ---------- Linux2 ---------- -try: - ARCH = os.uname()[4] # machine -except (OSError, AttributeError): - ARCH = '' - def get_L2cache_linux2(): - arch = ARCH # precomputed; the call to os.uname() is not translated + arch = os.uname()[4] # machine if arch.endswith('86') or arch == 'x86_64': return get_L2cache_linux2_cpuinfo() if arch in ('alpha', 'ppc', 'ppc64'): From noreply at buildbot.pypy.org Sun Aug 11 11:16:50 2013 From: noreply at buildbot.pypy.org (antocuni) Date: Sun, 11 Aug 2013 11:16:50 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: my dates Message-ID: <20130811091650.DC7FF1C011D@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: extradoc Changeset: r5016:0b9d70a68ac2 Date: 2013-08-11 11:16 +0200 http://bitbucket.org/pypy/extradoc/changeset/0b9d70a68ac2/ Log: my dates diff --git a/sprintinfo/london-2013/people.txt b/sprintinfo/london-2013/people.txt --- a/sprintinfo/london-2013/people.txt +++ b/sprintinfo/london-2013/people.txt @@ -21,6 +21,7 @@ Maciej Fijalkowski 25/8-1/9 private Manuel Jacob ? sth. cheap, pref. share Ronan Lamy 25/8-? ? +Antonio Cuni 26/8-5/9 ? ==================== ============== ======================= @@ -29,7 +30,6 @@ ==================== ============== ===================== Name Arrive/Depart Accomodation ==================== ============== ===================== -Antonio Cuni ? ? Michael Foord ? ? Maciej Fijalkowski ? ? David Schneider ? ? From noreply at buildbot.pypy.org Sun Aug 11 12:01:52 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 11 Aug 2013 12:01:52 +0200 (CEST) Subject: [pypy-commit] pypy default: Clarify comment Message-ID: <20130811100152.414321C1309@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r66068:5e0e991ab179 Date: 2013-08-11 12:01 +0200 http://bitbucket.org/pypy/pypy/changeset/5e0e991ab179/ Log: Clarify comment diff --git a/rpython/memory/gc/minimark.py b/rpython/memory/gc/minimark.py --- a/rpython/memory/gc/minimark.py +++ b/rpython/memory/gc/minimark.py @@ -2046,6 +2046,8 @@ # The code relies on the fact that no weakref can be an old object # weakly pointing to a young object. Indeed, weakrefs are immutable # so they cannot point to an object that was created after it. + # Thanks to this, during a minor collection, we don't have to fix + # or clear the address stored in old weakrefs. def invalidate_young_weakrefs(self): """Called during a nursery collection.""" # walk over the list of objects that contain weakrefs and are in the From noreply at buildbot.pypy.org Sun Aug 11 17:31:58 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 11 Aug 2013 17:31:58 +0200 (CEST) Subject: [pypy-commit] pypy gc-del-2: A branch with a simpler solution to avoid delaying too much calling Message-ID: <20130811153158.7703C1C351C@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: gc-del-2 Changeset: r66069:e887bf3fa400 Date: 2013-08-11 12:08 +0200 http://bitbucket.org/pypy/pypy/changeset/e887bf3fa400/ Log: A branch with a simpler solution to avoid delaying too much calling __del__ on a chain of objects From noreply at buildbot.pypy.org Sun Aug 11 17:31:59 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 11 Aug 2013 17:31:59 +0200 (CEST) Subject: [pypy-commit] pypy gc-del-2: Introduce and use filter() on AddressStack. Message-ID: <20130811153159.D48F01C351D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: gc-del-2 Changeset: r66070:62c37017e2a6 Date: 2013-08-11 12:32 +0200 http://bitbucket.org/pypy/pypy/changeset/62c37017e2a6/ Log: Introduce and use filter() on AddressStack. diff --git a/rpython/memory/gc/minimark.py b/rpython/memory/gc/minimark.py --- a/rpython/memory/gc/minimark.py +++ b/rpython/memory/gc/minimark.py @@ -1602,17 +1602,11 @@ self.free_rawmalloced_object_if_unvisited(obj) def remove_young_arrays_from_old_objects_pointing_to_young(self): - old = self.old_objects_pointing_to_young - new = self.AddressStack() - while old.non_empty(): - obj = old.pop() - if not self.young_rawmalloced_objects.contains(obj): - new.append(obj) - # an extra copy, to avoid assignments to - # 'self.old_objects_pointing_to_young' - while new.non_empty(): - old.append(new.pop()) - new.delete() + self.old_objects_pointing_to_young.filter(self._filter_young_array, + None) + + def _filter_young_array(self, obj, ignored): + return not self.young_rawmalloced_objects.contains(obj) # ---------- # Full collection diff --git a/rpython/memory/support.py b/rpython/memory/support.py --- a/rpython/memory/support.py +++ b/rpython/memory/support.py @@ -143,6 +143,30 @@ count = chunk_size foreach._annspecialcase_ = 'specialize:arg(1)' + def filter(self, callback, arg): + """Invoke 'callback(address, arg)' for all addresses in the stack. + When it returns False, remove the item from the stack (by + replacing it with self.pop(), so the order is destroyed). + Typically, 'callback' is a bound method and 'arg' can be None. + """ + chunk = self.chunk + count = self.used_in_last_chunk + while chunk: + nextchunk = chunk.next + while count > 0: + count -= 1 + if not callback(chunk.items[count], arg): + # a version of pop-and-put-back-at-chunk.items[count] + used = self.used_in_last_chunk - 1 + ll_assert(used >= 0, "pop on empty AddressStack [2]") + chunk.items[count] = self.chunk.items[used] + self.used_in_last_chunk = used + if used == 0 and self.chunk.next: + self.shrink() + chunk = nextchunk + count = chunk_size + filter._annspecialcase_ = 'specialize:arg(1)' + def stack2dict(self): result = AddressDict(self._length_estimate()) self.foreach(_add_in_dict, result) diff --git a/rpython/memory/test/test_support.py b/rpython/memory/test/test_support.py --- a/rpython/memory/test/test_support.py +++ b/rpython/memory/test/test_support.py @@ -78,6 +78,32 @@ ll.foreach(callback, 42) assert seen == addrs or seen[::-1] == addrs # order not guaranteed + def test_filter(self): + import random + AddressStack = get_address_stack() + addrs = [raw_malloc(llmemory.sizeof(lltype.Signed)) + for i in range(1500)] + addrset = set(addrs) + + for kept in [0.0, 0.5, 1.0]: + ll = AddressStack() + for i in range(1500): + ll.append(addrs[i]) + + keep = set([a for a in addrs if random.random() < kept]) + + def filter(addr, fortytwo): + assert fortytwo == 42 + assert addr in addrset + return addr in keep + + ll.filter(filter, 42) + + seen = set() + while ll.non_empty(): + seen.add(ll.pop()) + assert seen == keep + def test_remove(self): AddressStack = get_address_stack() addrs = [raw_malloc(llmemory.sizeof(lltype.Signed)) From noreply at buildbot.pypy.org Sun Aug 11 17:32:01 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 11 Aug 2013 17:32:01 +0200 (CEST) Subject: [pypy-commit] pypy gc-del-2: A test that would pass if it did major collects instead of minor collects. Message-ID: <20130811153201.256391C3566@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: gc-del-2 Changeset: r66071:3ca3e70a4f17 Date: 2013-08-11 17:31 +0200 http://bitbucket.org/pypy/pypy/changeset/3ca3e70a4f17/ Log: A test that would pass if it did major collects instead of minor collects. The goal is to pass this test now with minor collects. diff --git a/rpython/memory/test/test_minimark_gc.py b/rpython/memory/test/test_minimark_gc.py --- a/rpython/memory/test/test_minimark_gc.py +++ b/rpython/memory/test/test_minimark_gc.py @@ -1,3 +1,5 @@ +from rpython.rlib import rgc +from rpython.rlib.debug import ll_assert from rpython.rlib.rarithmetic import LONG_BIT from rpython.memory.test import test_semispace_gc @@ -9,3 +11,37 @@ GC_CAN_SHRINK_BIG_ARRAY = False GC_CAN_MALLOC_NONMOVABLE = True BUT_HOW_BIG_IS_A_BIG_STRING = 11*WORD + + def test_finalizer_chain_minor_collect(self): + class A: + def __init__(self, n, next): + self.n = n + self.next = next + def __del__(self): + state.freed.append(self.n) + class State: + pass + state = State() + + def make(n): + a = None + i = 0 + while i < n: + a = A(i, a) + i += 1 + + def f(n): + state.freed = [] + make(n) + ll_assert(len(state.freed) == 0, "should be empty before collect") + i = 0 + while i < n: + rgc.collect(0) # minor collection only + i += 1 + ll_assert(len(state.freed) == i, "every collect should grow 1") + i = 0 + while i < n: + ll_assert(state.freed[i] == n - i - 1, "bogus ordering") + i += 1 + + self.interpret(f, [4]) From noreply at buildbot.pypy.org Sun Aug 11 17:44:42 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 11 Aug 2013 17:44:42 +0200 (CEST) Subject: [pypy-commit] pypy gc-del-2: Second test: a full collection that discovers a chain of old unreachable Message-ID: <20130811154442.0A2FC1C011D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: gc-del-2 Changeset: r66072:a71ae5ca9f55 Date: 2013-08-11 17:44 +0200 http://bitbucket.org/pypy/pypy/changeset/a71ae5ca9f55/ Log: Second test: a full collection that discovers a chain of old unreachable objects should make them all young again, so that it goes away at the rythm of one per minor collect from that point. diff --git a/rpython/memory/test/test_minimark_gc.py b/rpython/memory/test/test_minimark_gc.py --- a/rpython/memory/test/test_minimark_gc.py +++ b/rpython/memory/test/test_minimark_gc.py @@ -1,5 +1,6 @@ from rpython.rlib import rgc from rpython.rlib.debug import ll_assert +from rpython.rlib.objectmodel import keepalive_until_here from rpython.rlib.rarithmetic import LONG_BIT from rpython.memory.test import test_semispace_gc @@ -45,3 +46,41 @@ i += 1 self.interpret(f, [4]) + + def test_finalizer_chain_minor_collect_old(self): + class A: + def __init__(self, n, next): + self.n = n + self.next = next + def __del__(self): + state.freed.append(self.n) + class State: + pass + state = State() + + def make(n): + a = None + i = 0 + while i < n: + a = A(i, a) + i += 1 + rgc.collect() # make all objects old + keepalive_until_here(a) + + def f(n): + state.freed = [] + make(n) + ll_assert(len(state.freed) == 0, "should be empty before collect") + rgc.collect() # need a full collection to initiate deletion + ll_assert(len(state.freed) == 1, "should be 1 after first collect") + i = 1 + while i < n: + rgc.collect(0) # minor collection only + i += 1 + ll_assert(len(state.freed) == i, "every collect should grow 1") + i = 0 + while i < n: + ll_assert(state.freed[i] == n - i - 1, "bogus ordering") + i += 1 + + self.interpret(f, [4]) From noreply at buildbot.pypy.org Sun Aug 11 18:05:04 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 11 Aug 2013 18:05:04 +0200 (CEST) Subject: [pypy-commit] pypy gc-del-2: Renamings and new flag. Preparation work Message-ID: <20130811160504.E7B231C3582@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: gc-del-2 Changeset: r66073:a33d2133a535 Date: 2013-08-11 18:04 +0200 http://bitbucket.org/pypy/pypy/changeset/a33d2133a535/ Log: Renamings and new flag. Preparation work diff --git a/rpython/memory/gc/minimark.py b/rpython/memory/gc/minimark.py --- a/rpython/memory/gc/minimark.py +++ b/rpython/memory/gc/minimark.py @@ -124,7 +124,13 @@ # note that GCFLAG_CARDS_SET is the most significant bit of a byte: # this is required for the JIT (x86) -_GCFLAG_FIRST_UNUSED = first_gcflag << 8 # the first unused bit +# The following flag is used to distinguish the two categories of objects +# in 'young_objects_not_in_nursery'. Set on a new object that is young and +# raw-malloced. It may be set or not on old raw-malloced objects. It must +# never be set on objects in the minimarkpage arena. +GCFLAG_YOUNG_RAW_MALLOCED = first_gcflag << 8 + +_GCFLAG_FIRST_UNUSED = first_gcflag << 9 # the first unused bit FORWARDSTUB = lltype.GcStruct('forwarding_stub', @@ -304,11 +310,17 @@ # we implement differently anyway. So directly call GCBase.setup(). GCBase.setup(self) # - # Two lists of all raw_malloced objects (the objects too large) - self.young_rawmalloced_objects = self.null_address_dict() + # A list of the old raw_malloced objects (the objects too large) self.old_rawmalloced_objects = self.AddressStack() self.rawmalloced_total_size = r_uint(0) # + # A dict that contains all young objects not in the nursery. These + # are often young big objects allocated directly with raw_malloc, + # but may also contain objects from the minimarkpage arena: if they + # are found to be only reachable from unreferenced objects with + # finalizers, they are made young again. + self.young_objects_not_in_nursery = self.null_address_dict() + # # A list of all objects with finalizers (these are never young). self.objects_with_finalizers = self.AddressDeque() self.young_objects_with_light_finalizers = self.AddressStack() @@ -788,9 +800,10 @@ # The object is young or old depending on the argument. self.rawmalloced_total_size += r_uint(allocsize) if can_make_young: - if not self.young_rawmalloced_objects: - self.young_rawmalloced_objects = self.AddressDict() - self.young_rawmalloced_objects.add(result + size_gc_header) + if not self.young_objects_not_in_nursery: + self.young_objects_not_in_nursery = self.AddressDict() + self.young_objects_not_in_nursery.add(result + size_gc_header) + extra_flags |= GCFLAG_YOUNG_RAW_MALLOCED else: self.old_rawmalloced_objects.append(result + size_gc_header) extra_flags |= GCFLAG_TRACK_YOUNG_PTRS @@ -923,9 +936,9 @@ if self.nursery <= addr < self.nursery_real_top: return True # addr is in the nursery # - # Else, it may be in the set 'young_rawmalloced_objects' - return (bool(self.young_rawmalloced_objects) and - self.young_rawmalloced_objects.contains(addr)) + # Else, it may be in the set 'young_objects_not_in_nursery' + return (bool(self.young_objects_not_in_nursery) and + self.young_objects_not_in_nursery.contains(addr)) appears_to_be_young._always_inline_ = True def debug_is_old_object(self, addr): @@ -981,8 +994,8 @@ def debug_check_consistency(self): if self.DEBUG: - ll_assert(not self.young_rawmalloced_objects, - "young raw-malloced objects in a major collection") + ll_assert(not self.young_objects_not_in_nursery, + "young objects not in nursery in a major collection") ll_assert(not self.young_objects_with_weakrefs.non_empty(), "young objects with weakrefs in a major collection") MovingGCBase.debug_check_consistency(self) @@ -1286,7 +1299,7 @@ # # Before everything else, remove from 'old_objects_pointing_to_young' # the young arrays. - if self.young_rawmalloced_objects: + if self.young_objects_not_in_nursery: self.remove_young_arrays_from_old_objects_pointing_to_young() # # First, find the roots that point to young objects. All nursery @@ -1331,8 +1344,8 @@ # # Walk the list of young raw-malloced objects, and either free # them or make them old. - if self.young_rawmalloced_objects: - self.free_young_rawmalloced_objects() + if self.young_objects_not_in_nursery: + self.free_young_objects_not_in_nursery() # # All live nursery objects are out, and the rest dies. Fill # the nursery up to the cleanup point with zeros @@ -1468,13 +1481,13 @@ # that we must set GCFLAG_VISITED on young raw-malloced objects. if not self.is_in_nursery(obj): # cache usage trade-off: I think that it is a better idea to - # check if 'obj' is in young_rawmalloced_objects with an access + # check if 'obj' is in young_objects_not_in_nursery with an access # to this (small) dictionary, rather than risk a lot of cache # misses by reading a flag in the header of all the 'objs' that # arrive here. - if (bool(self.young_rawmalloced_objects) - and self.young_rawmalloced_objects.contains(obj)): - self._visit_young_rawmalloced_object(obj) + if (bool(self.young_objects_not_in_nursery) + and self.young_objects_not_in_nursery.contains(obj)): + self._visit_young_object_not_in_nursery(obj) return # size_gc_header = self.gcheaderbuilder.size_gc_header @@ -1535,7 +1548,7 @@ self.old_objects_pointing_to_young.append(newobj) _trace_drag_out._always_inline_ = True - def _visit_young_rawmalloced_object(self, obj): + def _visit_young_object_not_in_nursery(self, obj): # 'obj' points to a young, raw-malloced object. # Any young rawmalloced object never seen by the code here # will end up without GCFLAG_VISITED, and be freed at the @@ -1590,23 +1603,26 @@ self.old_rawmalloced_objects.append(arena + size_gc_header) return arena - def free_young_rawmalloced_objects(self): - self.young_rawmalloced_objects.foreach( - self._free_young_rawmalloced_obj, None) - self.young_rawmalloced_objects.delete() - self.young_rawmalloced_objects = self.null_address_dict() + def free_young_objects_not_in_nursery(self): + self.young_objects_not_in_nursery.foreach( + self._free_young_object_not_in_nursery, None) + self.young_objects_not_in_nursery.delete() + self.young_objects_not_in_nursery = self.null_address_dict() - def _free_young_rawmalloced_obj(self, obj, ignored1, ignored2): + def _free_young_object_not_in_nursery(self, obj, ignored1, ignored2): # If 'obj' has GCFLAG_VISITED, it was seen by _trace_drag_out # and survives. Otherwise, it dies. - self.free_rawmalloced_object_if_unvisited(obj) + if self.header(obj).tid & GCFLAG_YOUNG_RAW_MALLOCED: + self.free_rawmalloced_object_if_unvisited(obj) + else: + xxxxx def remove_young_arrays_from_old_objects_pointing_to_young(self): self.old_objects_pointing_to_young.filter(self._filter_young_array, None) def _filter_young_array(self, obj, ignored): - return not self.young_rawmalloced_objects.contains(obj) + return not self.young_objects_not_in_nursery.contains(obj) # ---------- # Full collection @@ -2062,8 +2078,8 @@ (obj + offset).address[0] = llmemory.NULL continue # no need to remember this weakref any longer # - elif (bool(self.young_rawmalloced_objects) and - self.young_rawmalloced_objects.contains(pointing_to)): + elif (bool(self.young_objects_not_in_nursery) and + self.young_objects_not_in_nursery.contains(pointing_to)): # young weakref to a young raw-malloced object if self.header(pointing_to).tid & GCFLAG_VISITED: pass # survives, but does not move From noreply at buildbot.pypy.org Sun Aug 11 18:13:26 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 11 Aug 2013 18:13:26 +0200 (CEST) Subject: [pypy-commit] pypy gc-del-2: The first test to pass would be this: record and detect young objects Message-ID: <20130811161326.87ECE1C1309@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: gc-del-2 Changeset: r66074:42875d86bea5 Date: 2013-08-11 18:12 +0200 http://bitbucket.org/pypy/pypy/changeset/42875d86bea5/ Log: The first test to pass would be this: record and detect young objects with (non-light) finalizers diff --git a/rpython/memory/test/test_minimark_gc.py b/rpython/memory/test/test_minimark_gc.py --- a/rpython/memory/test/test_minimark_gc.py +++ b/rpython/memory/test/test_minimark_gc.py @@ -13,6 +13,22 @@ GC_CAN_MALLOC_NONMOVABLE = True BUT_HOW_BIG_IS_A_BIG_STRING = 11*WORD + def test_finalizer_young_obj(self): + class A: + def __del__(self): + state.seen += 1 + class State: + pass + state = State() + + def f(): + state.seen = 0 + A(); A() + rgc.collect(0) # minor collection only + return state.seen + + assert self.interpret(f, []) == 2 + def test_finalizer_chain_minor_collect(self): class A: def __init__(self, n, next): From noreply at buildbot.pypy.org Sun Aug 11 18:14:27 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 11 Aug 2013 18:14:27 +0200 (CEST) Subject: [pypy-commit] pypy gc-del-2: Oups, needs to be written this way (checked with major collections) Message-ID: <20130811161427.518131C1309@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: gc-del-2 Changeset: r66075:d999fdfa85b3 Date: 2013-08-11 18:13 +0200 http://bitbucket.org/pypy/pypy/changeset/d999fdfa85b3/ Log: Oups, needs to be written this way (checked with major collections) diff --git a/rpython/memory/test/test_minimark_gc.py b/rpython/memory/test/test_minimark_gc.py --- a/rpython/memory/test/test_minimark_gc.py +++ b/rpython/memory/test/test_minimark_gc.py @@ -21,9 +21,13 @@ pass state = State() + def make(): + A() + A() + def f(): state.seen = 0 - A(); A() + make() rgc.collect(0) # minor collection only return state.seen From noreply at buildbot.pypy.org Sun Aug 11 21:08:29 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Sun, 11 Aug 2013 21:08:29 +0200 (CEST) Subject: [pypy-commit] pypy py3k: merge default Message-ID: <20130811190829.35C011C13FF@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r66076:5d26cdb39446 Date: 2013-08-11 12:07 -0700 http://bitbucket.org/pypy/pypy/changeset/5d26cdb39446/ Log: merge default diff too long, truncating to 2000 out of 7083 lines diff --git a/dotviewer/drawgraph.py b/dotviewer/drawgraph.py --- a/dotviewer/drawgraph.py +++ b/dotviewer/drawgraph.py @@ -22,6 +22,7 @@ 'yellow': (255,255,0), } re_nonword=re.compile(r'([^0-9a-zA-Z_.]+)') +re_linewidth=re.compile(r'setlinewidth\((\d+(\.\d*)?|\.\d+)\)') def combine(color1, color2, alpha): r1, g1, b1 = color1 @@ -138,6 +139,13 @@ self.yl = float(yl) rest = rest[3:] self.style, self.color = rest + linematch = re_linewidth.match(self.style) + if linematch: + num = linematch.group(1) + self.linewidth = int(round(float(num))) + self.style = self.style[linematch.end(0):] + else: + self.linewidth = 1 self.highlight = False self.cachedbezierpoints = None self.cachedarrowhead = None @@ -520,8 +528,8 @@ fgcolor = highlight_color(fgcolor) points = [self.map(*xy) for xy in edge.bezierpoints()] - def drawedgebody(points=points, fgcolor=fgcolor): - pygame.draw.lines(self.screen, fgcolor, False, points) + def drawedgebody(points=points, fgcolor=fgcolor, width=edge.linewidth): + pygame.draw.lines(self.screen, fgcolor, False, points, width) edgebodycmd.append(drawedgebody) points = [self.map(*xy) for xy in edge.arrowhead()] diff --git a/dotviewer/test/test_interactive.py b/dotviewer/test/test_interactive.py --- a/dotviewer/test/test_interactive.py +++ b/dotviewer/test/test_interactive.py @@ -34,6 +34,23 @@ } ''' +SOURCE2=r'''digraph f { + a; d; e; f; g; h; i; j; k; l; + a -> d [penwidth=1, style="setlinewidth(1)"]; + d -> e [penwidth=2, style="setlinewidth(2)"]; + e -> f [penwidth=4, style="setlinewidth(4)"]; + f -> g [penwidth=8, style="setlinewidth(8)"]; + g -> h [penwidth=16, style="setlinewidth(16)"]; + h -> i [penwidth=32, style="setlinewidth(32)"]; + i -> j [penwidth=64, style="setlinewidth(64)"]; + j -> k [penwidth=128, style="setlinewidth(128)"]; + k -> l [penwidth=256, style="setlinewidth(256)"]; +}''' + + + + + def setup_module(mod): if not option.pygame: py.test.skip("--pygame not enabled") @@ -161,3 +178,10 @@ page = MyPage(str(dotfile)) page.fixedfont = True graphclient.display_page(page) + +def test_linewidth(): + udir.join("graph2.dot").write(SOURCE2) + from dotviewer import graphpage, graphclient + dotfile = udir.join('graph2.dot') + page = graphpage.DotFileGraphPage(str(dotfile)) + graphclient.display_page(page) diff --git a/lib-python/2.7/socket.py b/lib-python/2.7/socket.py --- a/lib-python/2.7/socket.py +++ b/lib-python/2.7/socket.py @@ -165,6 +165,8 @@ # All _delegate_methods must also be initialized here. send = recv = recv_into = sendto = recvfrom = recvfrom_into = _dummy __getattr__ = _dummy + def _drop(self): + pass # Wrapper around platform socket objects. This implements # a platform-independent dup() functionality. The @@ -179,12 +181,21 @@ def __init__(self, family=AF_INET, type=SOCK_STREAM, proto=0, _sock=None): if _sock is None: _sock = _realsocket(family, type, proto) - elif _type(_sock) is _realsocket: + else: + # PyPy note about refcounting: implemented with _reuse()/_drop() + # on the class '_socket.socket'. Python 3 did it differently + # with a reference counter on this class 'socket._socketobject' + # instead, but it is a less compatible change. + + # Note that a few libraries (like eventlet) poke at the + # private implementation of socket.py, passing custom + # objects to _socketobject(). These libraries need the + # following fix for use on PyPy: the custom objects need + # methods _reuse() and _drop() that maintains an explicit + # reference counter, starting at 0. When it drops back to + # zero, close() must be called. _sock._reuse() - # PyPy note about refcounting: implemented with _reuse()/_drop() - # on the class '_socket.socket'. Python 3 did it differently - # with a reference counter on this class 'socket._socketobject' - # instead, but it is a less compatible change (breaks eventlet). + self._sock = _sock def send(self, data, flags=0): @@ -216,9 +227,8 @@ def close(self): s = self._sock - if type(s) is _realsocket: - s._drop() self._sock = _closedsocket() + s._drop() close.__doc__ = _realsocket.close.__doc__ def accept(self): @@ -280,8 +290,14 @@ "_close"] def __init__(self, sock, mode='rb', bufsize=-1, close=False): - if type(sock) is _realsocket: - sock._reuse() + # Note that a few libraries (like eventlet) poke at the + # private implementation of socket.py, passing custom + # objects to _fileobject(). These libraries need the + # following fix for use on PyPy: the custom objects need + # methods _reuse() and _drop() that maintains an explicit + # reference counter, starting at 0. When it drops back to + # zero, close() must be called. + sock._reuse() self._sock = sock self.mode = mode # Not actually used in this version if bufsize < 0: @@ -317,11 +333,11 @@ self.flush() finally: s = self._sock - if type(s) is _realsocket: + self._sock = None + if s is not None: s._drop() - if self._close: - self._sock.close() - self._sock = None + if self._close: + s.close() def __del__(self): try: diff --git a/lib-python/2.7/ssl.py b/lib-python/2.7/ssl.py --- a/lib-python/2.7/ssl.py +++ b/lib-python/2.7/ssl.py @@ -110,6 +110,12 @@ suppress_ragged_eofs=True, ciphers=None): socket.__init__(self, _sock=sock._sock) + # "close" the original socket: it is not usable any more. + # this only calls _drop(), which should not actually call + # the operating system's close() because the reference + # counter is greater than 1 (we hold one too). + sock.close() + if ciphers is None and ssl_version != _SSLv2_IF_EXISTS: ciphers = _DEFAULT_CIPHERS @@ -352,11 +358,19 @@ works with the SSL connection. Just use the code from the socket module.""" - self._makefile_refs += 1 # close=True so as to decrement the reference count when done with # the file-like object. return _fileobject(self, mode, bufsize, close=True) + def _reuse(self): + self._makefile_refs += 1 + + def _drop(self): + if self._makefile_refs < 1: + self.close() + else: + self._makefile_refs -= 1 + def wrap_socket(sock, keyfile=None, certfile=None, diff --git a/lib-python/2.7/test/test_socket.py b/lib-python/2.7/test/test_socket.py --- a/lib-python/2.7/test/test_socket.py +++ b/lib-python/2.7/test/test_socket.py @@ -1066,6 +1066,9 @@ def recv(self, size): return self._recv_step.next()() + def _reuse(self): pass + def _drop(self): pass + @staticmethod def _raise_eintr(): raise socket.error(errno.EINTR) @@ -1321,7 +1324,8 @@ closed = False def flush(self): pass def close(self): self.closed = True - def _decref_socketios(self): pass + def _reuse(self): pass + def _drop(self): pass # must not close unless we request it: the original use of _fileobject # by module socket requires that the underlying socket not be closed until diff --git a/lib-python/2.7/test/test_urllib2.py b/lib-python/2.7/test/test_urllib2.py --- a/lib-python/2.7/test/test_urllib2.py +++ b/lib-python/2.7/test/test_urllib2.py @@ -270,6 +270,8 @@ self.reason = reason def read(self): return '' + def _reuse(self): pass + def _drop(self): pass class MockHTTPClass: def __init__(self): diff --git a/lib-python/2.7/urllib2.py b/lib-python/2.7/urllib2.py --- a/lib-python/2.7/urllib2.py +++ b/lib-python/2.7/urllib2.py @@ -1193,6 +1193,8 @@ # out of socket._fileobject() and into a base class. r.recv = r.read + r._reuse = lambda: None + r._drop = lambda: None fp = socket._fileobject(r, close=True) resp = addinfourl(fp, r.msg, req.get_full_url()) diff --git a/lib_pypy/cffi.egg-info b/lib_pypy/cffi.egg-info --- a/lib_pypy/cffi.egg-info +++ b/lib_pypy/cffi.egg-info @@ -1,6 +1,6 @@ Metadata-Version: 1.0 Name: cffi -Version: 0.6 +Version: 0.7 Summary: Foreign Function Interface for Python calling C code. Home-page: http://cffi.readthedocs.org Author: Armin Rigo, Maciej Fijalkowski diff --git a/lib_pypy/readline.egg-info b/lib_pypy/readline.egg-info new file mode 100644 --- /dev/null +++ b/lib_pypy/readline.egg-info @@ -0,0 +1,10 @@ +Metadata-Version: 1.0 +Name: readline +Version: 6.2.4.1 +Summary: Hack to make "pip install readline" happy and do nothing +Home-page: UNKNOWN +Author: UNKNOWN +Author-email: UNKNOWN +License: UNKNOWN +Description: UNKNOWN +Platform: UNKNOWN diff --git a/pypy/doc/_ref.txt b/pypy/doc/_ref.txt --- a/pypy/doc/_ref.txt +++ b/pypy/doc/_ref.txt @@ -98,8 +98,6 @@ .. _`rpython/rtyper/memory/gc/hybrid.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/memory/gc/hybrid.py .. _`rpython/rtyper/memory/gc/minimarkpage.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/memory/gc/minimarkpage.py .. _`rpython/rtyper/memory/gc/semispace.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/memory/gc/semispace.py -.. _`rpython/rtyper/ootypesystem/`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/ootypesystem/ -.. _`rpython/rtyper/ootypesystem/ootype.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/ootypesystem/ootype.py .. _`rpython/rtyper/rint.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/rint.py .. _`rpython/rtyper/rlist.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/rlist.py .. _`rpython/rtyper/rmodel.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/rmodel.py diff --git a/pypy/doc/jit/index.rst b/pypy/doc/jit/index.rst --- a/pypy/doc/jit/index.rst +++ b/pypy/doc/jit/index.rst @@ -23,7 +23,10 @@ - Hooks_ debugging facilities available to a python programmer +- Virtualizable_ how virtualizables work and what they are (in other words how + to make frames more efficient). .. _Overview: overview.html .. _Notes: pyjitpl5.html .. _Hooks: ../jit-hooks.html +.. _Virtualizable: virtualizable.html diff --git a/pypy/doc/jit/virtualizable.rst b/pypy/doc/jit/virtualizable.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/jit/virtualizable.rst @@ -0,0 +1,60 @@ + +Virtualizables +============== + +**Note:** this document does not have a proper introduction as to how +to understand the basics. We should write some. If you happen to be here +and you're missing context, feel free to pester us on IRC. + +Problem description +------------------- + +The JIT is very good at making sure some objects are never allocated if they +don't escape from the trace. Such objects are called ``virtuals``. However, +if we're dealing with frames, virtuals are often not good enough. Frames +can escape and they can also be allocated already at the moment we enter the +JIT. In such cases we need some extra object that can still be optimized away, +despite existing on the heap. + +Solution +-------- + +We introduce virtualizables. They're objects that exist on the heap, but their +fields are not always in sync with whatever happens in the assembler. One +example is that virtualizable fields can store virtual objects without +forcing them. This is very useful for frames. Declaring an object to be +virtualizable works like this: + + class Frame(object): + _virtualizable_ = ['locals[*]', 'stackdepth'] + +And we use them in ``JitDriver`` like this:: + + jitdriver = JitDriver(greens=[], reds=['frame'], virtualizables=['frame']) + +This declaration means that ``stackdepth`` is a virtualizable **field**, while +``locals`` is a virtualizable **array** (a list stored on a virtualizable). +There are various rules about using virtualizables, especially using +virtualizable arrays that can be very confusing. Those will usually end +up with a compile-time error (as opposed to strange behavior). The rules are: + +* Each array access must be with a known positive index that cannot raise + an ``IndexError``. Using ``no = jit.hint(no, promote=True)`` might be useful + to get a constant-number access. This is only safe if the index is actually + constant or changing rarely within the context of the user's code. + +* If you initialize a new virtualizable in the JIT, it has to be done like this + (for example if we're in ``Frame.__init__``):: + + self = hint(self, access_directly=True, fresh_virtualizable=True) + + that way you can populate the fields directly. + +* If you use virtualizable outside of the JIT – it's very expensive and + sometimes aborts tracing. Consider it carefully as to how do it only for + debugging purposes and not every time (e.g. ``sys._getframe`` call). + +* If you have something equivalent of a Python generator, where the + virtualizable survives for longer, you want to force it before returning. + It's better to do it that way than by an external call some time later. + It's done using ``jit.hint(frame, force_virtualizable=True)`` diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -62,3 +62,15 @@ No longer delegate numpy string_ methods to space.StringObject, in numpy this works by kind of by accident. Support for merging the refactor-str-types branch + +.. branch: kill-typesystem +Remove the "type system" abstraction, now that there is only ever one kind of +type system used. + +.. branch: kill-gen-store-back-in +Kills gen_store_back_in_virtualizable - should improve non-inlined calls by +a bit + +.. branch: dotviewer-linewidth +.. branch: reflex-support +.. branch: numpypy-inplace-op diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py --- a/pypy/interpreter/pyframe.py +++ b/pypy/interpreter/pyframe.py @@ -123,8 +123,8 @@ # CO_OPTIMIZED: no locals dict needed at all # NB: this method is overridden in nestedscope.py flags = code.co_flags - if flags & pycode.CO_OPTIMIZED: - return + if flags & pycode.CO_OPTIMIZED: + return if flags & pycode.CO_NEWLOCALS: self.w_locals = self.space.newdict(module=True) else: @@ -179,11 +179,10 @@ executioncontext.return_trace(self, self.space.w_None) raise executioncontext.return_trace(self, w_exitvalue) - # clean up the exception, might be useful for not - # allocating exception objects in some cases - # if it's a generator, we have to preserve the exception state - if not self.is_generator(): - self.last_exception = None + # it used to say self.last_exception = None + # this is now done by the code in pypyjit module + # since we don't want to invalidate the virtualizable + # for no good reason got_exception = False finally: executioncontext.leave(self, w_exitvalue, got_exception) @@ -265,7 +264,7 @@ break w_value = self.peekvalue(delta) self.pushvalue(w_value) - + def peekvalue(self, index_from_top=0): # NOTE: top of the stack is peekvalue(0). # Contrast this with CPython where it's PEEK(-1). @@ -335,7 +334,7 @@ nlocals = self.pycode.co_nlocals values_w = self.locals_stack_w[nlocals:self.valuestackdepth] w_valuestack = maker.slp_into_tuple_with_nulls(space, values_w) - + w_blockstack = nt([block._get_state_(space) for block in self.get_blocklist()]) w_fastlocals = maker.slp_into_tuple_with_nulls( space, self.locals_stack_w[:nlocals]) @@ -345,7 +344,7 @@ else: w_exc_value = self.last_exception.get_w_value(space) w_tb = w(self.last_exception.get_traceback()) - + tup_state = [ w(self.f_backref()), w(self.get_builtin()), @@ -360,7 +359,7 @@ w(f_lineno), w_fastlocals, space.w_None, #XXX placeholder for f_locals - + #f_restricted requires no additional data! space.w_None, ## self.w_f_trace, ignore for now @@ -394,7 +393,7 @@ ncellvars = len(pycode.co_cellvars) cellvars = cells[:ncellvars] closure = cells[ncellvars:] - + # do not use the instance's __init__ but the base's, because we set # everything like cells from here # XXX hack @@ -481,7 +480,7 @@ ### line numbers ### - def fget_f_lineno(self, space): + def fget_f_lineno(self, space): "Returns the line number of the instruction currently being executed." if self.w_f_trace is None: return space.wrap(self.get_last_lineno()) @@ -495,7 +494,7 @@ except OperationError, e: raise OperationError(space.w_ValueError, space.wrap("lineno must be an integer")) - + if self.w_f_trace is None: raise OperationError(space.w_ValueError, space.wrap("f_lineno can only be set by a trace function.")) @@ -527,7 +526,7 @@ if ord(code[new_lasti]) in (DUP_TOP, POP_TOP): raise OperationError(space.w_ValueError, space.wrap("can't jump to 'except' line as there's no exception")) - + # Don't jump into or out of a finally block. f_lasti_setup_addr = -1 new_lasti_setup_addr = -1 @@ -558,12 +557,12 @@ if addr == self.last_instr: f_lasti_setup_addr = setup_addr break - + if op >= HAVE_ARGUMENT: addr += 3 else: addr += 1 - + assert len(blockstack) == 0 if new_lasti_setup_addr != f_lasti_setup_addr: @@ -614,10 +613,10 @@ block = self.pop_block() block.cleanup(self) f_iblock -= 1 - + self.f_lineno = new_lineno self.last_instr = new_lasti - + def get_last_lineno(self): "Returns the line number of the instruction currently being executed." return pytraceback.offset2lineno(self.pycode, self.last_instr) @@ -643,8 +642,8 @@ self.f_lineno = self.get_last_lineno() space.frame_trace_action.fire() - def fdel_f_trace(self, space): - self.w_f_trace = None + def fdel_f_trace(self, space): + self.w_f_trace = None def fget_f_exc_type(self, space): if self.last_exception is not None: @@ -654,7 +653,7 @@ if f is not None: return f.last_exception.w_type return space.w_None - + def fget_f_exc_value(self, space): if self.last_exception is not None: f = self.f_backref() @@ -672,7 +671,7 @@ if f is not None: return space.wrap(f.last_exception.get_traceback()) return space.w_None - + def fget_f_restricted(self, space): if space.config.objspace.honor__builtins__: return space.wrap(self.builtin is not space.builtin) diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py --- a/pypy/interpreter/typedef.py +++ b/pypy/interpreter/typedef.py @@ -329,10 +329,6 @@ instance=True) base_user_setup(self, space, w_subtype) - def setclass(self, space, w_subtype): - # only used by descr_set___class__ - self.w__class__ = w_subtype - add(Proto) subcls = type(name, (supercls,), body) diff --git a/pypy/module/__pypy__/test/test_signal.py b/pypy/module/__pypy__/test/test_signal.py --- a/pypy/module/__pypy__/test/test_signal.py +++ b/pypy/module/__pypy__/test/test_signal.py @@ -37,6 +37,7 @@ except BaseException as e: interrupted.append(e) finally: + print('subthread stops, interrupted=%r' % (interrupted,)) done.append(None) # This is normally called by app_main.py @@ -52,11 +53,13 @@ try: done = [] interrupted = [] + print('--- start ---') _thread.start_new_thread(subthread, ()) for j in range(10): if len(done): break print('.') time.sleep(0.1) + print('main thread loop done') assert len(done) == 1 assert len(interrupted) == 1 assert 'KeyboardInterrupt' in interrupted[0].__class__.__name__ diff --git a/pypy/module/array/interp_array.py b/pypy/module/array/interp_array.py --- a/pypy/module/array/interp_array.py +++ b/pypy/module/array/interp_array.py @@ -59,7 +59,7 @@ def descr_typecode(space, self): return space.wrap(self.typecode) -arr_eq_driver = jit.JitDriver(greens = ['comp_func'], reds = 'auto') +arr_eq_driver = jit.JitDriver(name='array_eq_driver', greens = ['comp_func'], reds = 'auto') EQ, NE, LT, LE, GT, GE = range(6) def compare_arrays(space, arr1, arr2, comp_op): diff --git a/pypy/module/binascii/interp_uu.py b/pypy/module/binascii/interp_uu.py --- a/pypy/module/binascii/interp_uu.py +++ b/pypy/module/binascii/interp_uu.py @@ -61,7 +61,7 @@ return ord(bin[i]) except IndexError: return 0 -_a2b_read._always_inline_ = True +_b2a_read._always_inline_ = True @unwrap_spec(bin='bufferstr') def b2a_uu(space, bin): diff --git a/pypy/module/cppyy/interp_cppyy.py b/pypy/module/cppyy/interp_cppyy.py --- a/pypy/module/cppyy/interp_cppyy.py +++ b/pypy/module/cppyy/interp_cppyy.py @@ -592,11 +592,15 @@ def get(self, w_cppinstance, w_pycppclass): cppinstance = self.space.interp_w(W_CPPInstance, w_cppinstance, can_be_None=True) + if not cppinstance: + raise OperationError(self.space.w_ReferenceError, self.space.wrap("attribute access requires an instance")) offset = self._get_offset(cppinstance) return self.converter.from_memory(self.space, w_cppinstance, w_pycppclass, offset) def set(self, w_cppinstance, w_value): cppinstance = self.space.interp_w(W_CPPInstance, w_cppinstance, can_be_None=True) + if not cppinstance: + raise OperationError(self.space.w_ReferenceError, self.space.wrap("attribute access requires an instance")) offset = self._get_offset(cppinstance) self.converter.to_memory(self.space, w_cppinstance, w_value, offset) return self.space.w_None diff --git a/pypy/module/cppyy/pythonify.py b/pypy/module/cppyy/pythonify.py --- a/pypy/module/cppyy/pythonify.py +++ b/pypy/module/cppyy/pythonify.py @@ -426,6 +426,11 @@ # mostly for the benefit of the CINT backend, which treats std as special gbl.std = make_cppnamespace(None, "std", None, False) + # install a type for enums to refer to + # TODO: this is correct for C++98, not for C++11 and in general there will + # be the same issue for all typedef'd builtin types + setattr(gbl, 'unsigned int', int) + # install for user access cppyy.gbl = gbl diff --git a/pypy/module/cppyy/test/test_datatypes.py b/pypy/module/cppyy/test/test_datatypes.py --- a/pypy/module/cppyy/test/test_datatypes.py +++ b/pypy/module/cppyy/test/test_datatypes.py @@ -105,6 +105,13 @@ raises(IndexError, c.m_float_array.__getitem__, self.N) raises(IndexError, c.m_double_array.__getitem__, self.N) + # can not access an instance member on the class + raises(ReferenceError, getattr, cppyy_test_data, 'm_bool') + raises(ReferenceError, getattr, cppyy_test_data, 'm_int') + + assert not hasattr(cppyy_test_data, 'm_bool') + assert not hasattr(cppyy_test_data, 'm_int') + c.destruct() def test03_instance_data_write_access(self): @@ -428,12 +435,17 @@ c = cppyy_test_data() assert isinstance(c, cppyy_test_data) - # TODO: test that the enum is accessible as a type + # test that the enum is accessible as a type + assert cppyy_test_data.what assert cppyy_test_data.kNothing == 6 assert cppyy_test_data.kSomething == 111 assert cppyy_test_data.kLots == 42 + assert cppyy_test_data.what(cppyy_test_data.kNothing) == cppyy_test_data.kNothing + assert cppyy_test_data.what(6) == cppyy_test_data.kNothing + # TODO: only allow instantiations with correct values (C++11) + assert c.get_enum() == cppyy_test_data.kNothing assert c.m_enum == cppyy_test_data.kNothing @@ -455,6 +467,7 @@ assert cppyy_test_data.s_enum == cppyy_test_data.kSomething # global enums + assert gbl.fruit # test type accessible assert gbl.kApple == 78 assert gbl.kBanana == 29 assert gbl.kCitrus == 34 diff --git a/pypy/module/cpyext/number.py b/pypy/module/cpyext/number.py --- a/pypy/module/cpyext/number.py +++ b/pypy/module/cpyext/number.py @@ -41,13 +41,13 @@ def PyNumber_Int(space, w_obj): """Returns the o converted to an integer object on success, or NULL on failure. This is the equivalent of the Python expression int(o).""" - return space.int(w_obj) + return space.call_function(space.w_int, w_obj) @cpython_api([PyObject], PyObject) def PyNumber_Long(space, w_obj): """Returns the o converted to a long integer object on success, or NULL on failure. This is the equivalent of the Python expression long(o).""" - return space.int(w_obj) + return space.call_function(space.w_int, w_obj) @cpython_api([PyObject], PyObject) def PyNumber_Index(space, w_obj): diff --git a/pypy/module/cpyext/pystate.py b/pypy/module/cpyext/pystate.py --- a/pypy/module/cpyext/pystate.py +++ b/pypy/module/cpyext/pystate.py @@ -3,7 +3,6 @@ from pypy.module.cpyext.pyobject import PyObject, Py_DecRef, make_ref, from_ref from rpython.rtyper.lltypesystem import rffi, lltype from rpython.rlib import rthread -from pypy.module.thread import os_thread PyInterpreterStateStruct = lltype.ForwardReference() PyInterpreterState = lltype.Ptr(PyInterpreterStateStruct) @@ -17,6 +16,9 @@ ('dict', PyObject), ])) +class NoThreads(Exception): + pass + @cpython_api([], PyThreadState, error=CANNOT_FAIL) def PyEval_SaveThread(space): """Release the global interpreter lock (if it has been created and thread @@ -45,10 +47,15 @@ @cpython_api([], lltype.Void) def PyEval_InitThreads(space): + if not space.config.translation.thread: + raise NoThreads + from pypy.module.thread import os_thread os_thread.setup_threads(space) @cpython_api([], rffi.INT_real, error=CANNOT_FAIL) def PyEval_ThreadsInitialized(space): + if not space.config.translation.thread: + return 0 return 1 # XXX: might be generally useful @@ -236,6 +243,8 @@ """Create a new thread state object belonging to the given interpreter object. The global interpreter lock need not be held, but may be held if it is necessary to serialize calls to this function.""" + if not space.config.translation.thread: + raise NoThreads rthread.gc_thread_prepare() # PyThreadState_Get will allocate a new execution context, # we need to protect gc and other globals with the GIL. @@ -250,6 +259,8 @@ def PyThreadState_Clear(space, tstate): """Reset all information in a thread state object. The global interpreter lock must be held.""" + if not space.config.translation.thread: + raise NoThreads Py_DecRef(space, tstate.c_dict) tstate.c_dict = lltype.nullptr(PyObject.TO) space.threadlocals.leave_thread(space) diff --git a/pypy/module/cpyext/test/test_number.py b/pypy/module/cpyext/test/test_number.py --- a/pypy/module/cpyext/test/test_number.py +++ b/pypy/module/cpyext/test/test_number.py @@ -19,6 +19,8 @@ def test_number_long(self, space, api): w_l = api.PyNumber_Long(space.wrap(123)) assert api.PyLong_CheckExact(w_l) + w_l = api.PyNumber_Long(space.wrap("123")) + assert api.PyLong_CheckExact(w_l) def test_number_int(self, space, api): w_l = api.PyNumber_Int(space.wraplong(123L)) @@ -27,6 +29,8 @@ assert api.PyLong_CheckExact(w_l) w_l = api.PyNumber_Int(space.wrap(42.3)) assert api.PyLong_CheckExact(w_l) + w_l = api.PyNumber_Int(space.wrap("42")) + assert api.PyLong_CheckExact(w_l) def test_number_index(self, space, api): w_l = api.PyNumber_Index(space.wraplong(123L)) diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -737,6 +737,27 @@ descr_gt = _binop_comp_impl(_binop_impl("greater")) descr_ge = _binop_comp_impl(_binop_impl("greater_equal")) + def _binop_inplace_impl(ufunc_name): + def impl(self, space, w_other): + w_out = self + ufunc = getattr(interp_ufuncs.get(space), ufunc_name) + return ufunc.call(space, [self, w_other, w_out]) + return func_with_new_name(impl, "binop_inplace_%s_impl" % ufunc_name) + + descr_iadd = _binop_inplace_impl("add") + descr_isub = _binop_inplace_impl("subtract") + descr_imul = _binop_inplace_impl("multiply") + descr_idiv = _binop_inplace_impl("divide") + descr_itruediv = _binop_inplace_impl("true_divide") + descr_ifloordiv = _binop_inplace_impl("floor_divide") + descr_imod = _binop_inplace_impl("mod") + descr_ipow = _binop_inplace_impl("power") + descr_ilshift = _binop_inplace_impl("left_shift") + descr_irshift = _binop_inplace_impl("right_shift") + descr_iand = _binop_inplace_impl("bitwise_and") + descr_ior = _binop_inplace_impl("bitwise_or") + descr_ixor = _binop_inplace_impl("bitwise_xor") + def _binop_right_impl(ufunc_name): def impl(self, space, w_other, w_out=None): w_other = convert_to_array(space, w_other) @@ -1007,6 +1028,20 @@ __ror__ = interp2app(W_NDimArray.descr_ror), __rxor__ = interp2app(W_NDimArray.descr_rxor), + __iadd__ = interp2app(W_NDimArray.descr_iadd), + __isub__ = interp2app(W_NDimArray.descr_isub), + __imul__ = interp2app(W_NDimArray.descr_imul), + __idiv__ = interp2app(W_NDimArray.descr_idiv), + __itruediv__ = interp2app(W_NDimArray.descr_itruediv), + __ifloordiv__ = interp2app(W_NDimArray.descr_ifloordiv), + __imod__ = interp2app(W_NDimArray.descr_imod), + __ipow__ = interp2app(W_NDimArray.descr_ipow), + __ilshift__ = interp2app(W_NDimArray.descr_ilshift), + __irshift__ = interp2app(W_NDimArray.descr_irshift), + __iand__ = interp2app(W_NDimArray.descr_iand), + __ior__ = interp2app(W_NDimArray.descr_ior), + __ixor__ = interp2app(W_NDimArray.descr_ixor), + __eq__ = interp2app(W_NDimArray.descr_eq), __ne__ = interp2app(W_NDimArray.descr_ne), __lt__ = interp2app(W_NDimArray.descr_lt), diff --git a/pypy/module/micronumpy/loop.py b/pypy/module/micronumpy/loop.py --- a/pypy/module/micronumpy/loop.py +++ b/pypy/module/micronumpy/loop.py @@ -155,7 +155,8 @@ obj_iter.next() return cur_value -reduce_cum_driver = jit.JitDriver(greens = ['shapelen', 'func', 'dtype'], +reduce_cum_driver = jit.JitDriver(name='numpy_reduce_cum_driver', + greens = ['shapelen', 'func', 'dtype'], reds = 'auto') def compute_reduce_cumultative(obj, out, calc_dtype, func, identity): @@ -214,8 +215,7 @@ axis_reduce__driver = jit.JitDriver(name='numpy_axis_reduce', greens=['shapelen', - 'func', 'dtype', - 'identity'], + 'func', 'dtype'], reds='auto') def do_axis_reduce(shape, func, arr, dtype, axis, out, identity, cumultative, @@ -231,8 +231,7 @@ shapelen = len(shape) while not out_iter.done(): axis_reduce__driver.jit_merge_point(shapelen=shapelen, func=func, - dtype=dtype, identity=identity, - ) + dtype=dtype) w_val = arr_iter.getitem().convert_to(dtype) if out_iter.first_line: if identity is not None: @@ -529,8 +528,9 @@ val_arr.descr_getitem(space, w_idx)) iter.next() -byteswap_driver = jit.JitDriver(greens = ['dtype'], - reds = 'auto') +byteswap_driver = jit.JitDriver(name='numpy_byteswap_driver', + greens = ['dtype'], + reds = 'auto') def byteswap(from_, to): dtype = from_.dtype @@ -542,8 +542,9 @@ to_iter.next() from_iter.next() -choose_driver = jit.JitDriver(greens = ['shapelen', 'mode', 'dtype'], - reds = 'auto') +choose_driver = jit.JitDriver(name='numpy_choose_driver', + greens = ['shapelen', 'mode', 'dtype'], + reds = 'auto') def choose(space, arr, choices, shape, dtype, out, mode): shapelen = len(shape) @@ -572,8 +573,9 @@ out_iter.next() arr_iter.next() -clip_driver = jit.JitDriver(greens = ['shapelen', 'dtype'], - reds = 'auto') +clip_driver = jit.JitDriver(name='numpy_clip_driver', + greens = ['shapelen', 'dtype'], + reds = 'auto') def clip(space, arr, shape, min, max, out): arr_iter = arr.create_iter(shape) @@ -597,8 +599,9 @@ out_iter.next() min_iter.next() -round_driver = jit.JitDriver(greens = ['shapelen', 'dtype'], - reds = 'auto') +round_driver = jit.JitDriver(name='numpy_round_driver', + greens = ['shapelen', 'dtype'], + reds = 'auto') def round(space, arr, dtype, shape, decimals, out): arr_iter = arr.create_iter(shape) @@ -612,7 +615,8 @@ arr_iter.next() out_iter.next() -diagonal_simple_driver = jit.JitDriver(greens = ['axis1', 'axis2'], +diagonal_simple_driver = jit.JitDriver(name='numpy_diagonal_simple_driver', + greens = ['axis1', 'axis2'], reds = 'auto') def diagonal_simple(space, arr, out, offset, axis1, axis2, size): diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -789,6 +789,49 @@ r = [1, 2] + array([1, 2]) assert (r == [2, 4]).all() + def test_inline_op_scalar(self): + from numpypy import array + for op in [ + '__iadd__', + '__isub__', + '__imul__', + '__idiv__', + '__ifloordiv__', + '__imod__', + '__ipow__', + '__ilshift__', + '__irshift__', + '__iand__', + '__ior__', + '__ixor__']: + a = b = array(range(3)) + getattr(a, op).__call__(2) + assert id(a) == id(b) + + def test_inline_op_array(self): + from numpypy import array + for op in [ + '__iadd__', + '__isub__', + '__imul__', + '__idiv__', + '__ifloordiv__', + '__imod__', + '__ipow__', + '__ilshift__', + '__irshift__', + '__iand__', + '__ior__', + '__ixor__']: + a = b = array(range(5)) + c = array(range(5)) + d = array(5 * [2]) + getattr(a, op).__call__(d) + assert id(a) == id(b) + reg_op = op.replace('__i', '__') + for i in range(5): + assert a[i] == getattr(c[i], reg_op).__call__(d[i]) + def test_add_list(self): from numpypy import array, ndarray a = array(range(5)) diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -6,7 +6,7 @@ import py from rpython.jit.metainterp import pyjitpl from rpython.jit.metainterp.test.support import LLJitMixin -from rpython.jit.metainterp.warmspot import reset_stats +from rpython.jit.metainterp.warmspot import reset_stats, get_stats from pypy.module.micronumpy import interp_boxes from pypy.module.micronumpy.compile import FakeSpace, Parser, InterpreterState from pypy.module.micronumpy.base import W_NDimArray @@ -35,9 +35,10 @@ cls.code_mapping = d cls.codes = allcodes - def run(self, name): + def compile_graph(self): + if self.graph is not None: + return space = FakeSpace() - i = self.code_mapping[name] codes = self.codes def f(i): @@ -57,14 +58,18 @@ raise TypeError(w_res) if self.graph is None: - interp, graph = self.meta_interp(f, [i], + interp, graph = self.meta_interp(f, [0], listops=True, backendopt=True, graph_and_interp_only=True) self.__class__.interp = interp self.__class__.graph = graph + + def run(self, name): + self.compile_graph() reset_stats() pyjitpl._warmrunnerdesc.memory_manager.alive_loops.clear() + i = self.code_mapping[name] retval = self.interp.eval_graph(self.graph, [i]) py.test.skip("don't run for now") return retval @@ -134,6 +139,29 @@ 'int_add': 3, }) + def test_reduce_compile_only_once(self): + self.compile_graph() + reset_stats() + pyjitpl._warmrunnerdesc.memory_manager.alive_loops.clear() + i = self.code_mapping['sum'] + # run it twice + retval = self.interp.eval_graph(self.graph, [i]) + retval = self.interp.eval_graph(self.graph, [i]) + # check that we got only one loop + assert len(get_stats().loops) == 1 + + def test_reduce_axis_compile_only_once(self): + self.compile_graph() + reset_stats() + pyjitpl._warmrunnerdesc.memory_manager.alive_loops.clear() + i = self.code_mapping['axissum'] + # run it twice + retval = self.interp.eval_graph(self.graph, [i]) + retval = self.interp.eval_graph(self.graph, [i]) + # check that we got only one loop + assert len(get_stats().loops) == 1 + + def define_prod(): return """ a = |30| diff --git a/pypy/module/pypyjit/interp_jit.py b/pypy/module/pypyjit/interp_jit.py --- a/pypy/module/pypyjit/interp_jit.py +++ b/pypy/module/pypyjit/interp_jit.py @@ -12,18 +12,18 @@ from pypy.interpreter.error import OperationError, operationerrfmt from pypy.interpreter.pycode import PyCode, CO_GENERATOR from pypy.interpreter.pyframe import PyFrame -from pypy.interpreter.pyopcode import ExitFrame +from pypy.interpreter.pyopcode import ExitFrame, Yield from opcode import opmap -PyFrame._virtualizable2_ = ['last_instr', 'pycode', - 'valuestackdepth', 'locals_stack_w[*]', - 'cells[*]', - 'last_exception', - 'lastblock', - 'is_being_profiled', - 'w_globals', - 'w_f_trace', - ] +PyFrame._virtualizable_ = ['last_instr', 'pycode', + 'valuestackdepth', 'locals_stack_w[*]', + 'cells[*]', + 'last_exception', + 'lastblock', + 'is_being_profiled', + 'w_globals', + 'w_f_trace', + ] JUMP_ABSOLUTE = opmap['JUMP_ABSOLUTE'] @@ -77,7 +77,13 @@ self.valuestackdepth = hint(self.valuestackdepth, promote=True) next_instr = self.handle_bytecode(co_code, next_instr, ec) is_being_profiled = self.is_being_profiled + except Yield: + self.last_exception = None + w_result = self.popvalue() + jit.hint(self, force_virtualizable=True) + return w_result except ExitFrame: + self.last_exception = None return self.popvalue() def jump_absolute(self, jumpto, ec): diff --git a/pypy/module/pypyjit/test_pypy_c/test_containers.py b/pypy/module/pypyjit/test_pypy_c/test_containers.py --- a/pypy/module/pypyjit/test_pypy_c/test_containers.py +++ b/pypy/module/pypyjit/test_pypy_c/test_containers.py @@ -87,7 +87,7 @@ i8 = int_lt(i5, i7) guard_true(i8, descr=...) guard_not_invalidated(descr=...) - p10 = call(ConstClass(ll_int_str), i5, descr=) + p10 = call(ConstClass(ll_str__IntegerR_SignedConst_Signed), i5, descr=) guard_no_exception(descr=...) i12 = call(ConstClass(ll_strhash), p10, descr=) p13 = new(descr=...) diff --git a/pypy/module/pypyjit/test_pypy_c/test_generators.py b/pypy/module/pypyjit/test_pypy_c/test_generators.py --- a/pypy/module/pypyjit/test_pypy_c/test_generators.py +++ b/pypy/module/pypyjit/test_pypy_c/test_generators.py @@ -19,6 +19,7 @@ log = self.run(main, [500]) loop, = log.loops_by_filename(self.filepath) assert loop.match_by_id("generator", """ + cond_call(..., descr=...) i16 = force_token() p45 = new_with_vtable(ConstClass(W_IntObject)) setfield_gc(p45, i29, descr=) diff --git a/pypy/module/test_lib_pypy/test_curses.py b/pypy/module/test_lib_pypy/test_curses.py --- a/pypy/module/test_lib_pypy/test_curses.py +++ b/pypy/module/test_lib_pypy/test_curses.py @@ -1,6 +1,15 @@ +import pytest + +# Check that lib_pypy.cffi finds the correct version of _cffi_backend. +# Otherwise, the test is skipped. It should never be skipped when run +# with "pypy py.test -A". +try: + from lib_pypy import cffi; cffi.FFI() +except (ImportError, AssertionError), e: + pytest.skip("no cffi module or wrong version (%s)" % (e,)) + from lib_pypy import _curses -import pytest lib = _curses.lib diff --git a/pypy/tool/pypyjit_child.py b/pypy/tool/pypyjit_child.py --- a/pypy/tool/pypyjit_child.py +++ b/pypy/tool/pypyjit_child.py @@ -14,9 +14,9 @@ return lltype.nullptr(T) interp.heap.malloc_nonmovable = returns_null # XXX - from rpython.jit.backend.llgraph.runner import LLtypeCPU + from rpython.jit.backend.llgraph.runner import LLGraphCPU #LLtypeCPU.supports_floats = False # for now - apply_jit(interp, graph, LLtypeCPU) + apply_jit(interp, graph, LLGraphCPU) def apply_jit(interp, graph, CPUClass): diff --git a/pypy/tool/release/package.py b/pypy/tool/release/package.py --- a/pypy/tool/release/package.py +++ b/pypy/tool/release/package.py @@ -3,7 +3,7 @@ It uses 'pypy/goal/pypy-c' and parts of the rest of the working copy. Usage: - package.py root-pypy-dir [--nostrip] [--without-tk] [name-of-archive] [name-of-pypy-c] [destination-for-tarball] [pypy-c-path] + package.py [--nostrip] [--without-tk] root-pypy-dir [name-of-archive] [name-of-pypy-c] [destination-for-tarball] [pypy-c-path] Usually you would do: package.py ../../.. pypy-VER-PLATFORM The output is found in the directory /tmp/usession-YOURNAME/build/. diff --git a/rpython/annotator/description.py b/rpython/annotator/description.py --- a/rpython/annotator/description.py +++ b/rpython/annotator/description.py @@ -391,6 +391,7 @@ instance_level = False all_enforced_attrs = None # or a set settled = False + _detect_invalid_attrs = None def __init__(self, bookkeeper, pyobj=None, name=None, basedesc=None, classdict=None, @@ -714,6 +715,10 @@ # by changing the result's annotation (but not, of course, doing an # actual copy in the rtyper). Tested in rpython.rtyper.test.test_rlist, # test_immutable_list_out_of_instance. + if self._detect_invalid_attrs and attr in self._detect_invalid_attrs: + raise Exception("field %r was migrated to %r from a subclass in " + "which it was declared as _immutable_fields_" % + (attr, self.pyobj)) search1 = '%s[*]' % (attr,) search2 = '%s?[*]' % (attr,) cdesc = self @@ -724,6 +729,14 @@ s_result.listdef.never_resize() s_copy = s_result.listdef.offspring() s_copy.listdef.mark_as_immutable() + # + cdesc = cdesc.basedesc + while cdesc is not None: + if cdesc._detect_invalid_attrs is None: + cdesc._detect_invalid_attrs = set() + cdesc._detect_invalid_attrs.add(attr) + cdesc = cdesc.basedesc + # return s_copy cdesc = cdesc.basedesc return s_result # common case diff --git a/rpython/annotator/specialize.py b/rpython/annotator/specialize.py --- a/rpython/annotator/specialize.py +++ b/rpython/annotator/specialize.py @@ -379,4 +379,4 @@ def specialize_call_location(funcdesc, args_s, op): assert op is not None - return maybe_star_args(funcdesc, op, args_s) + return maybe_star_args(funcdesc, (op,), args_s) diff --git a/rpython/annotator/test/test_annrpython.py b/rpython/annotator/test/test_annrpython.py --- a/rpython/annotator/test/test_annrpython.py +++ b/rpython/annotator/test/test_annrpython.py @@ -3088,7 +3088,7 @@ from rpython.rlib.jit import hint class A: - _virtualizable2_ = [] + _virtualizable_ = [] class B(A): def meth(self): return self @@ -3128,7 +3128,7 @@ from rpython.rlib.jit import hint class A: - _virtualizable2_ = [] + _virtualizable_ = [] class I: pass @@ -3717,6 +3717,24 @@ a = self.RPythonAnnotator() a.build_types(f, [int]) + def test_immutable_field_subclass(self): + class Root: + pass + class A(Root): + _immutable_fields_ = '_my_lst[*]' + def __init__(self, lst): + self._my_lst = lst + def foo(x): + return len(x._my_lst) + + def f(n): + foo(A([2, n])) + foo(Root()) + + a = self.RPythonAnnotator() + e = py.test.raises(Exception, a.build_types, f, [int]) + assert "field '_my_lst' was migrated" in str(e.value) + def test_call_classes_with_noarg_init(self): class A: foo = 21 diff --git a/rpython/jit/backend/arm/opassembler.py b/rpython/jit/backend/arm/opassembler.py --- a/rpython/jit/backend/arm/opassembler.py +++ b/rpython/jit/backend/arm/opassembler.py @@ -959,16 +959,6 @@ pmc.B_offs(self.mc.currpos(), c.EQ) return pos - def _call_assembler_reset_vtoken(self, jd, vloc): - from rpython.jit.backend.llsupport.descr import FieldDescr - fielddescr = jd.vable_token_descr - assert isinstance(fielddescr, FieldDescr) - ofs = fielddescr.offset - tmploc = self._regalloc.get_scratch_reg(INT) - self.mov_loc_loc(vloc, r.ip) - self.mc.MOV_ri(tmploc.value, 0) - self.mc.STR_ri(tmploc.value, r.ip.value, ofs) - def _call_assembler_load_result(self, op, result_loc): if op.result is not None: # load the return value from (tmploc, 0) diff --git a/rpython/jit/backend/llgraph/runner.py b/rpython/jit/backend/llgraph/runner.py --- a/rpython/jit/backend/llgraph/runner.py +++ b/rpython/jit/backend/llgraph/runner.py @@ -101,6 +101,9 @@ self.fieldname = fieldname self.FIELD = getattr(S, fieldname) + def get_vinfo(self): + return self.vinfo + def __repr__(self): return 'FieldDescr(%r, %r)' % (self.S, self.fieldname) @@ -170,7 +173,7 @@ translate_support_code = False is_llgraph = True - def __init__(self, rtyper, stats=None, *ignored_args, **ignored_kwds): + def __init__(self, rtyper, stats=None, *ignored_args, **kwds): model.AbstractCPU.__init__(self) self.rtyper = rtyper self.llinterp = LLInterpreter(rtyper) @@ -178,6 +181,7 @@ class MiniStats: pass self.stats = stats or MiniStats() + self.vinfo_for_tests = kwds.get('vinfo_for_tests', None) def compile_loop(self, inputargs, operations, looptoken, log=True, name=''): clt = model.CompiledLoopToken(self, looptoken.number) @@ -316,6 +320,8 @@ except KeyError: descr = FieldDescr(S, fieldname) self.descrs[key] = descr + if self.vinfo_for_tests is not None: + descr.vinfo = self.vinfo_for_tests return descr def arraydescrof(self, A): @@ -496,6 +502,8 @@ def bh_raw_store_i(self, struct, offset, newvalue, descr): ll_p = rffi.cast(rffi.CCHARP, struct) ll_p = rffi.cast(lltype.Ptr(descr.A), rffi.ptradd(ll_p, offset)) + if descr.A.OF == lltype.SingleFloat: + newvalue = longlong.int2singlefloat(newvalue) ll_p[0] = rffi.cast(descr.A.OF, newvalue) def bh_raw_store_f(self, struct, offset, newvalue, descr): @@ -600,6 +608,7 @@ forced_deadframe = None overflow_flag = False last_exception = None + force_guard_op = None def __init__(self, cpu, argboxes, args): self.env = {} @@ -766,6 +775,8 @@ if self.forced_deadframe is not None: saved_data = self.forced_deadframe._saved_data self.fail_guard(descr, saved_data) + self.force_guard_op = self.current_op + execute_guard_not_forced_2 = execute_guard_not_forced def execute_guard_not_invalidated(self, descr): if self.lltrace.invalid: @@ -887,7 +898,6 @@ # res = CALL assembler_call_helper(pframe) # jmp @done # @fastpath: - # RESET_VABLE # res = GETFIELD(pframe, 'result') # @done: # @@ -907,25 +917,17 @@ vable = lltype.nullptr(llmemory.GCREF.TO) # # Emulate the fast path - def reset_vable(jd, vable): - if jd.index_of_virtualizable != -1: - fielddescr = jd.vable_token_descr - NULL = lltype.nullptr(llmemory.GCREF.TO) - self.cpu.bh_setfield_gc(vable, NULL, fielddescr) + # faildescr = self.cpu.get_latest_descr(pframe) if faildescr == self.cpu.done_with_this_frame_descr_int: - reset_vable(jd, vable) return self.cpu.get_int_value(pframe, 0) elif faildescr == self.cpu.done_with_this_frame_descr_ref: - reset_vable(jd, vable) return self.cpu.get_ref_value(pframe, 0) elif faildescr == self.cpu.done_with_this_frame_descr_float: - reset_vable(jd, vable) return self.cpu.get_float_value(pframe, 0) elif faildescr == self.cpu.done_with_this_frame_descr_void: - reset_vable(jd, vable) return None - # + assembler_helper_ptr = jd.assembler_helper_adr.ptr # fish try: result = assembler_helper_ptr(pframe, vable) diff --git a/rpython/jit/backend/llsupport/assembler.py b/rpython/jit/backend/llsupport/assembler.py --- a/rpython/jit/backend/llsupport/assembler.py +++ b/rpython/jit/backend/llsupport/assembler.py @@ -2,7 +2,7 @@ from rpython.jit.backend.llsupport.memcpy import memcpy_fn from rpython.jit.backend.llsupport.symbolic import WORD from rpython.jit.metainterp.history import (INT, REF, FLOAT, JitCellToken, - ConstInt, BoxInt) + ConstInt, BoxInt, AbstractFailDescr) from rpython.jit.metainterp.resoperation import ResOperation, rop from rpython.rlib import rgc from rpython.rlib.debug import (debug_start, debug_stop, have_debug_prints, @@ -24,6 +24,7 @@ class GuardToken(object): def __init__(self, cpu, gcmap, faildescr, failargs, fail_locs, exc, frame_depth, is_guard_not_invalidated, is_guard_not_forced): + assert isinstance(faildescr, AbstractFailDescr) self.cpu = cpu self.faildescr = faildescr self.failargs = failargs @@ -232,11 +233,8 @@ jmp_location = self._call_assembler_patch_je(result_loc, je_location) - # Path B: fast path. Must load the return value, and reset the token + # Path B: fast path. Must load the return value - # Reset the vable token --- XXX really too much special logic here:-( - if jd.index_of_virtualizable >= 0: - self._call_assembler_reset_vtoken(jd, vloc) # self._call_assembler_load_result(op, result_loc) # diff --git a/rpython/jit/backend/llsupport/descr.py b/rpython/jit/backend/llsupport/descr.py --- a/rpython/jit/backend/llsupport/descr.py +++ b/rpython/jit/backend/llsupport/descr.py @@ -76,7 +76,13 @@ FLAG_STRUCT = 'X' FLAG_VOID = 'V' -class FieldDescr(AbstractDescr): +class ArrayOrFieldDescr(AbstractDescr): + vinfo = None + + def get_vinfo(self): + return self.vinfo + +class FieldDescr(ArrayOrFieldDescr): name = '' offset = 0 # help translation field_size = 0 @@ -150,12 +156,13 @@ # ____________________________________________________________ # ArrayDescrs -class ArrayDescr(AbstractDescr): +class ArrayDescr(ArrayOrFieldDescr): tid = 0 basesize = 0 # workaround for the annotator itemsize = 0 lendescr = None flag = '\x00' + vinfo = None def __init__(self, basesize, itemsize, lendescr, flag): self.basesize = basesize diff --git a/rpython/jit/backend/llsupport/llmodel.py b/rpython/jit/backend/llsupport/llmodel.py --- a/rpython/jit/backend/llsupport/llmodel.py +++ b/rpython/jit/backend/llsupport/llmodel.py @@ -721,12 +721,8 @@ def bh_raw_load_i(self, addr, offset, descr): ofs, size, sign = self.unpack_arraydescr_size(descr) - items = addr + offset - for TYPE, _, itemsize in unroll_basic_sizes: - if size == itemsize: - items = rffi.cast(rffi.CArrayPtr(TYPE), items) - return rffi.cast(lltype.Signed, items[0]) - assert False # unreachable code + assert ofs == 0 # otherwise, 'descr' is not a raw length-less array + return self.read_int_at_mem(addr, offset, size, sign) def bh_raw_load_f(self, addr, offset, descr): items = rffi.cast(rffi.CArrayPtr(longlong.FLOATSTORAGE), addr + offset) diff --git a/rpython/jit/backend/llsupport/test/ztranslation_test.py b/rpython/jit/backend/llsupport/test/ztranslation_test.py --- a/rpython/jit/backend/llsupport/test/ztranslation_test.py +++ b/rpython/jit/backend/llsupport/test/ztranslation_test.py @@ -23,7 +23,7 @@ # - floats neg and abs class Frame(object): - _virtualizable2_ = ['i'] + _virtualizable_ = ['i'] def __init__(self, i): self.i = i @@ -98,7 +98,7 @@ self.val = val class Frame(object): - _virtualizable2_ = ['thing'] + _virtualizable_ = ['thing'] driver = JitDriver(greens = ['codeno'], reds = ['i', 'frame'], virtualizables = ['frame'], diff --git a/rpython/jit/backend/model.py b/rpython/jit/backend/model.py --- a/rpython/jit/backend/model.py +++ b/rpython/jit/backend/model.py @@ -6,7 +6,7 @@ total_compiled_loops = 0 total_compiled_bridges = 0 total_freed_loops = 0 - total_freed_bridges = 0 + total_freed_bridges = 0 # for heaptracker # _all_size_descrs_with_vtable = None @@ -182,7 +182,7 @@ def arraydescrof(self, A): raise NotImplementedError - def calldescrof(self, FUNC, ARGS, RESULT): + def calldescrof(self, FUNC, ARGS, RESULT, extrainfo): # FUNC is the original function type, but ARGS is a list of types # with Voids removed raise NotImplementedError @@ -294,7 +294,6 @@ def bh_copyunicodecontent(self, src, dst, srcstart, dststart, length): raise NotImplementedError - class CompiledLoopToken(object): asmmemmgr_blocks = None asmmemmgr_gcroots = 0 diff --git a/rpython/jit/backend/test/runner_test.py b/rpython/jit/backend/test/runner_test.py --- a/rpython/jit/backend/test/runner_test.py +++ b/rpython/jit/backend/test/runner_test.py @@ -3954,8 +3954,12 @@ p = rawstorage.alloc_raw_storage(31) for i in range(31): p[i] = '\xDD' - value = rffi.cast(T, 0x4243444546474849) + value = rffi.cast(T, -0x4243444546474849) rawstorage.raw_storage_setitem(p, 16, value) + got = self.cpu.bh_raw_load_i(rffi.cast(lltype.Signed, p), 16, + arraydescr) + assert got == rffi.cast(lltype.Signed, value) + # loop = parse(ops, self.cpu, namespace=locals()) looptoken = JitCellToken() self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) @@ -3981,6 +3985,11 @@ p[i] = '\xDD' value = rffi.cast(T, 1.12e20) rawstorage.raw_storage_setitem(p, 16, value) + got = self.cpu.bh_raw_load_f(rffi.cast(lltype.Signed, p), 16, + arraydescr) + got = longlong.getrealfloat(got) + assert got == rffi.cast(lltype.Float, value) + # loop = parse(ops, self.cpu, namespace=locals()) looptoken = JitCellToken() self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) @@ -3991,22 +4000,58 @@ assert result == rffi.cast(lltype.Float, value) rawstorage.free_raw_storage(p) + def test_raw_load_singlefloat(self): + if not self.cpu.supports_singlefloats: + py.test.skip("requires singlefloats") + from rpython.rlib import rawstorage + for T in [rffi.FLOAT]: + ops = """ + [i0, i1] + i2 = raw_load(i0, i1, descr=arraydescr) + finish(i2) + """ + arraydescr = self.cpu.arraydescrof(rffi.CArray(T)) + p = rawstorage.alloc_raw_storage(31) + for i in range(31): + p[i] = '\xDD' + value = rffi.cast(T, 1.12e20) + rawstorage.raw_storage_setitem(p, 16, value) + got = self.cpu.bh_raw_load_i(rffi.cast(lltype.Signed, p), 16, + arraydescr) + assert got == longlong.singlefloat2int(value) + # + loop = parse(ops, self.cpu, namespace=locals()) + looptoken = JitCellToken() + self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + deadframe = self.cpu.execute_token(looptoken, + rffi.cast(lltype.Signed, p), 16) + result = self.cpu.get_int_value(deadframe, 0) + assert result == longlong.singlefloat2int(value) + rawstorage.free_raw_storage(p) + def test_raw_store_int(self): from rpython.rlib import rawstorage for T in [rffi.UCHAR, rffi.SIGNEDCHAR, rffi.USHORT, rffi.SHORT, rffi.UINT, rffi.INT, rffi.ULONG, rffi.LONG]: + arraydescr = self.cpu.arraydescrof(rffi.CArray(T)) + p = rawstorage.alloc_raw_storage(31) + value = (-0x4243444546474849) & sys.maxint + self.cpu.bh_raw_store_i(rffi.cast(lltype.Signed, p), 16, value, + arraydescr) + result = rawstorage.raw_storage_getitem(T, p, 16) + assert result == rffi.cast(T, value) + rawstorage.free_raw_storage(p) + # ops = """ [i0, i1, i2] raw_store(i0, i1, i2, descr=arraydescr) finish() """ - arraydescr = self.cpu.arraydescrof(rffi.CArray(T)) p = rawstorage.alloc_raw_storage(31) for i in range(31): p[i] = '\xDD' - value = 0x4243444546474849 & sys.maxint loop = parse(ops, self.cpu, namespace=locals()) looptoken = JitCellToken() self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) @@ -4021,16 +4066,24 @@ py.test.skip("requires floats") from rpython.rlib import rawstorage for T in [rffi.DOUBLE]: + arraydescr = self.cpu.arraydescrof(rffi.CArray(T)) + p = rawstorage.alloc_raw_storage(31) + value = 1.23e20 + self.cpu.bh_raw_store_f(rffi.cast(lltype.Signed, p), 16, + longlong.getfloatstorage(value), + arraydescr) + result = rawstorage.raw_storage_getitem(T, p, 16) + assert result == rffi.cast(T, value) + rawstorage.free_raw_storage(p) + # ops = """ [i0, i1, f2] raw_store(i0, i1, f2, descr=arraydescr) finish() """ - arraydescr = self.cpu.arraydescrof(rffi.CArray(T)) p = rawstorage.alloc_raw_storage(31) for i in range(31): p[i] = '\xDD' - value = 1.23e20 loop = parse(ops, self.cpu, namespace=locals()) looptoken = JitCellToken() self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) @@ -4041,6 +4094,41 @@ assert result == rffi.cast(T, value) rawstorage.free_raw_storage(p) + def test_raw_store_singlefloat(self): + if not self.cpu.supports_singlefloats: + py.test.skip("requires singlefloats") + from rpython.rlib import rawstorage + for T in [rffi.FLOAT]: + arraydescr = self.cpu.arraydescrof(rffi.CArray(T)) + p = rawstorage.alloc_raw_storage(31) + value = rffi.cast(T, 1.23e20) + self.cpu.bh_raw_store_i(rffi.cast(lltype.Signed, p), 16, + longlong.singlefloat2int(value), + arraydescr) + result = rawstorage.raw_storage_getitem(T, p, 16) + assert (rffi.cast(lltype.Float, result) == + rffi.cast(lltype.Float, value)) + rawstorage.free_raw_storage(p) + # + ops = """ + [i0, i1, i2] + raw_store(i0, i1, i2, descr=arraydescr) + finish() + """ + p = rawstorage.alloc_raw_storage(31) + for i in range(31): + p[i] = '\xDD' + loop = parse(ops, self.cpu, namespace=locals()) + looptoken = JitCellToken() + self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + self.cpu.execute_token(looptoken, + rffi.cast(lltype.Signed, p), 16, + longlong.singlefloat2int(value)) + result = rawstorage.raw_storage_getitem(T, p, 16) + assert (rffi.cast(lltype.Float, result) == + rffi.cast(lltype.Float, value)) + rawstorage.free_raw_storage(p) + def test_forcing_op_with_fail_arg_in_reg(self): values = [] def maybe_force(token, flag): diff --git a/rpython/jit/backend/test/support.py b/rpython/jit/backend/test/support.py --- a/rpython/jit/backend/test/support.py +++ b/rpython/jit/backend/test/support.py @@ -59,7 +59,7 @@ t.buildannotator().build_types(function, [int] * len(args), main_entry_point=True) - t.buildrtyper(type_system=self.type_system).specialize() + t.buildrtyper().specialize() warmrunnerdesc = WarmRunnerDesc(t, translate_support_code=True, CPUClass=self.CPUClass, **kwds) diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -79,6 +79,7 @@ allblocks) self.target_tokens_currently_compiling = {} self.frame_depth_to_patch = [] + self._finish_gcmap = lltype.nullptr(jitframe.GCMAP) def teardown(self): self.pending_guard_tokens = None @@ -1846,7 +1847,11 @@ self.mov(fail_descr_loc, RawEbpLoc(ofs)) arglist = op.getarglist() if arglist and arglist[0].type == REF: - gcmap = self.gcmap_for_finish + if self._finish_gcmap: + self._finish_gcmap[0] |= r_uint(1) # rax + gcmap = self._finish_gcmap + else: + gcmap = self.gcmap_for_finish self.push_gcmap(self.mc, gcmap, store=True) else: # note that the 0 here is redundant, but I would rather @@ -1991,15 +1996,6 @@ # return jmp_location - def _call_assembler_reset_vtoken(self, jd, vloc): - from rpython.jit.backend.llsupport.descr import FieldDescr - fielddescr = jd.vable_token_descr - assert isinstance(fielddescr, FieldDescr) - vtoken_ofs = fielddescr.offset - self.mc.MOV(edx, vloc) # we know vloc is on the current frame - self.mc.MOV_mi((edx.value, vtoken_ofs), 0) - # in the line above, TOKEN_NONE = 0 - def _call_assembler_load_result(self, op, result_loc): if op.result is not None: # load the return value from the dead frame's value index 0 @@ -2326,6 +2322,15 @@ assert 0 < offset <= 127 self.mc.overwrite(jmp_location - 1, chr(offset)) + def store_force_descr(self, op, fail_locs, frame_depth): + guard_token = self.implement_guard_recovery(op.opnum, + op.getdescr(), + op.getfailargs(), + fail_locs, frame_depth) + self._finish_gcmap = guard_token.gcmap + self._store_force_index(op) + self.store_info_on_descr(0, guard_token) + def force_token(self, reg): # XXX kill me assert isinstance(reg, RegLoc) diff --git a/rpython/jit/backend/x86/regalloc.py b/rpython/jit/backend/x86/regalloc.py --- a/rpython/jit/backend/x86/regalloc.py +++ b/rpython/jit/backend/x86/regalloc.py @@ -22,7 +22,7 @@ from rpython.jit.codewriter.effectinfo import EffectInfo from rpython.jit.metainterp.history import (Box, Const, ConstInt, ConstPtr, ConstFloat, BoxInt, BoxFloat, INT, REF, FLOAT, TargetToken) -from rpython.jit.metainterp.resoperation import rop +from rpython.jit.metainterp.resoperation import rop, ResOperation from rpython.rlib import rgc from rpython.rlib.objectmodel import we_are_translated from rpython.rlib.rarithmetic import r_longlong, r_uint @@ -1332,6 +1332,13 @@ #if jump_op is not None and jump_op.getdescr() is descr: # self._compute_hint_frame_locations_from_descr(descr) + def consider_guard_not_forced_2(self, op): + self.rm.before_call(op.getfailargs(), save_all_regs=True) + fail_locs = [self.loc(v) for v in op.getfailargs()] + self.assembler.store_force_descr(op, fail_locs, + self.fm.get_frame_depth()) + self.possibly_free_vars(op.getfailargs()) + def consider_keepalive(self, op): pass diff --git a/rpython/jit/codewriter/codewriter.py b/rpython/jit/codewriter/codewriter.py --- a/rpython/jit/codewriter/codewriter.py +++ b/rpython/jit/codewriter/codewriter.py @@ -21,9 +21,9 @@ self.callcontrol = CallControl(cpu, jitdrivers_sd) self._seen_files = set() - def transform_func_to_jitcode(self, func, values, type_system='lltype'): + def transform_func_to_jitcode(self, func, values): """For testing.""" - rtyper = support.annotate(func, values, type_system=type_system) + rtyper = support.annotate(func, values) graph = rtyper.annotator.translator.graphs[0] jitcode = JitCode("test") self.transform_graph_to_jitcode(graph, jitcode, True) diff --git a/rpython/jit/codewriter/effectinfo.py b/rpython/jit/codewriter/effectinfo.py --- a/rpython/jit/codewriter/effectinfo.py +++ b/rpython/jit/codewriter/effectinfo.py @@ -83,6 +83,7 @@ OS_UNI_COPY_TO_RAW = 113 OS_JIT_FORCE_VIRTUAL = 120 + OS_JIT_FORCE_VIRTUALIZABLE = 121 # for debugging: _OS_CANRAISE = set([ @@ -165,6 +166,9 @@ EffectInfo.MOST_GENERAL = EffectInfo(None, None, None, None, EffectInfo.EF_RANDOM_EFFECTS, can_invalidate=True) +EffectInfo.LEAST_GENERAL = EffectInfo([], [], [], [], + EffectInfo.EF_ELIDABLE_CANNOT_RAISE, + can_invalidate=False) def effectinfo_from_writeanalyze(effects, cpu, diff --git a/rpython/jit/codewriter/jtransform.py b/rpython/jit/codewriter/jtransform.py --- a/rpython/jit/codewriter/jtransform.py +++ b/rpython/jit/codewriter/jtransform.py @@ -521,6 +521,8 @@ op1 = SpaceOperation('str_guard_value', [op.args[0], c, descr], op.result) return [SpaceOperation('-live-', [], None), op1, None] + if hints.get('force_virtualizable'): + return SpaceOperation('hint_force_virtualizable', [op.args[0]], None) else: log.WARNING('ignoring hint %r at %r' % (hints, self.graph)) diff --git a/rpython/jit/codewriter/support.py b/rpython/jit/codewriter/support.py --- a/rpython/jit/codewriter/support.py +++ b/rpython/jit/codewriter/support.py @@ -35,7 +35,7 @@ return a.typeannotation(t) def annotate(func, values, inline=None, backendoptimize=True, - type_system="lltype", translationoptions={}): + translationoptions={}): # build the normal ll graphs for ll_function t = TranslationContext() for key, value in translationoptions.items(): @@ -44,7 +44,7 @@ a = t.buildannotator(policy=annpolicy) argtypes = getargtypes(a, values) a.build_types(func, argtypes, main_entry_point=True) - rtyper = t.buildrtyper(type_system = type_system) + rtyper = t.buildrtyper() rtyper.specialize() #if inline: # auto_inlining(t, threshold=inline) diff --git a/rpython/jit/codewriter/test/test_flatten.py b/rpython/jit/codewriter/test/test_flatten.py --- a/rpython/jit/codewriter/test/test_flatten.py +++ b/rpython/jit/codewriter/test/test_flatten.py @@ -125,8 +125,8 @@ class TestFlatten: - def make_graphs(self, func, values, type_system='lltype'): - self.rtyper = support.annotate(func, values, type_system=type_system) + def make_graphs(self, func, values): + self.rtyper = support.annotate(func, values) return self.rtyper.annotator.translator.graphs def encoding_test(self, func, args, expected, diff --git a/rpython/jit/codewriter/test/test_policy.py b/rpython/jit/codewriter/test/test_policy.py --- a/rpython/jit/codewriter/test/test_policy.py +++ b/rpython/jit/codewriter/test/test_policy.py @@ -131,7 +131,7 @@ def test_access_directly_but_not_seen(): class X: - _virtualizable2_ = ["a"] + _virtualizable_ = ["a"] def h(x, y): w = 0 for i in range(y): diff --git a/rpython/jit/codewriter/test/test_regalloc.py b/rpython/jit/codewriter/test/test_regalloc.py --- a/rpython/jit/codewriter/test/test_regalloc.py +++ b/rpython/jit/codewriter/test/test_regalloc.py @@ -13,8 +13,8 @@ class TestRegAlloc: - def make_graphs(self, func, values, type_system='lltype'): - self.rtyper = support.annotate(func, values, type_system=type_system) + def make_graphs(self, func, values): + self.rtyper = support.annotate(func, values) return self.rtyper.annotator.translator.graphs def check_assembler(self, graph, expected, transform=False, diff --git a/rpython/jit/metainterp/blackhole.py b/rpython/jit/metainterp/blackhole.py --- a/rpython/jit/metainterp/blackhole.py +++ b/rpython/jit/metainterp/blackhole.py @@ -1320,6 +1320,10 @@ from rpython.jit.metainterp import quasiimmut quasiimmut.do_force_quasi_immutable(cpu, struct, mutatefielddescr) + @arguments("r") + def bhimpl_hint_force_virtualizable(r): + pass + @arguments("cpu", "d", returns="r") def bhimpl_new(cpu, descr): return cpu.bh_new(descr) diff --git a/rpython/jit/metainterp/compile.py b/rpython/jit/metainterp/compile.py --- a/rpython/jit/metainterp/compile.py +++ b/rpython/jit/metainterp/compile.py @@ -708,6 +708,8 @@ rstack._stack_criticalcode_start() try: deadframe = cpu.force(token) + # this should set descr to ResumeGuardForceDescr, if it + # was not that already faildescr = cpu.get_latest_descr(deadframe) assert isinstance(faildescr, ResumeGuardForcedDescr) faildescr.handle_async_forcing(deadframe) @@ -715,12 +717,18 @@ rstack._stack_criticalcode_stop() def handle_async_forcing(self, deadframe): - from rpython.jit.metainterp.resume import force_from_resumedata + from rpython.jit.metainterp.resume import (force_from_resumedata, + AlreadyForced) metainterp_sd = self.metainterp_sd vinfo = self.jitdriver_sd.virtualizable_info ginfo = self.jitdriver_sd.greenfield_info - all_virtuals = force_from_resumedata(metainterp_sd, self, deadframe, - vinfo, ginfo) + # there is some chance that this is already forced. In this case + # the virtualizable would have a token = NULL + try: + all_virtuals = force_from_resumedata(metainterp_sd, self, deadframe, + vinfo, ginfo) + except AlreadyForced: + return # The virtualizable data was stored on the real virtualizable above. # Handle all_virtuals: keep them for later blackholing from the # future failure of the GUARD_NOT_FORCED diff --git a/rpython/jit/metainterp/history.py b/rpython/jit/metainterp/history.py --- a/rpython/jit/metainterp/history.py +++ b/rpython/jit/metainterp/history.py @@ -151,6 +151,8 @@ descr_ptr = cpu.ts.cast_to_baseclass(descr_gcref) return cast_base_ptr_to_instance(AbstractDescr, descr_ptr) + def get_vinfo(self): + raise NotImplementedError class AbstractFailDescr(AbstractDescr): index = -1 diff --git a/rpython/jit/metainterp/jitexc.py b/rpython/jit/metainterp/jitexc.py --- a/rpython/jit/metainterp/jitexc.py +++ b/rpython/jit/metainterp/jitexc.py @@ -62,7 +62,7 @@ def _get_standard_error(rtyper, Class): - exdata = rtyper.getexceptiondata() + exdata = rtyper.exceptiondata clsdef = rtyper.annotator.bookkeeper.getuniqueclassdef(Class) evalue = exdata.get_standard_ll_exc_instance(rtyper, clsdef) return evalue diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py @@ -5101,6 +5101,15 @@ } self.optimize_loop(ops, expected, call_pure_results) + def test_guard_not_forced_2_virtual(self): + ops = """ + [i0] + p0 = new_array(3, descr=arraydescr) + guard_not_forced_2() [p0] + finish(p0) + """ + self.optimize_loop(ops, ops) + class TestLLtype(BaseTestOptimizeBasic, LLtypeMixin): pass diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py @@ -7086,6 +7086,19 @@ """ self.optimize_loop(ops, expected) + def test_force_virtualizable_virtual(self): + ops = """ + [i0] + p1 = new_with_vtable(ConstClass(node_vtable)) + cond_call(1, 123, p1, descr=clear_vable) + jump(i0) + """ + expected = """ + [i0] + jump(i0) + """ + self.optimize_loop(ops, expected) + def test_setgetfield_counter(self): ops = """ [p1] diff --git a/rpython/jit/metainterp/optimizeopt/test/test_util.py b/rpython/jit/metainterp/optimizeopt/test/test_util.py --- a/rpython/jit/metainterp/optimizeopt/test/test_util.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_util.py @@ -254,12 +254,19 @@ asmdescr = LoopToken() # it can be whatever, it's not a descr though from rpython.jit.metainterp.virtualref import VirtualRefInfo + class FakeWarmRunnerDesc: pass FakeWarmRunnerDesc.cpu = cpu vrefinfo = VirtualRefInfo(FakeWarmRunnerDesc) virtualtokendescr = vrefinfo.descr_virtual_token virtualforceddescr = vrefinfo.descr_forced + FUNC = lltype.FuncType([], lltype.Void) + ei = EffectInfo([], [], [], [], EffectInfo.EF_CANNOT_RAISE, + can_invalidate=False, + oopspecindex=EffectInfo.OS_JIT_FORCE_VIRTUALIZABLE) From noreply at buildbot.pypy.org Mon Aug 12 02:30:43 2013 From: noreply at buildbot.pypy.org (andrewchambers) Date: Mon, 12 Aug 2013 02:30:43 +0200 (CEST) Subject: [pypy-commit] pypy incremental-gc: alternate arena (probably will be removed when altenative created), more tests Message-ID: <20130812003043.961A01C146E@cobra.cs.uni-duesseldorf.de> Author: Andrew Chambers Branch: incremental-gc Changeset: r66077:5e851020f0ec Date: 2013-08-12 12:29 +1200 http://bitbucket.org/pypy/pypy/changeset/5e851020f0ec/ Log: alternate arena (probably will be removed when altenative created), more tests diff --git a/rpython/memory/gc/incminimark.py b/rpython/memory/gc/incminimark.py --- a/rpython/memory/gc/incminimark.py +++ b/rpython/memory/gc/incminimark.py @@ -139,8 +139,9 @@ # marking of objects can be done over multiple STATE_MARKING = 1 STATE_SWEEPING_RAWMALLOC = 2 -STATE_SWEEPING_ARENA = 3 -STATE_FINALIZING = 4 +STATE_SWEEPING_ARENA_1 = 3 +STATE_SWEEPING_ARENA_2 = 4 +STATE_FINALIZING = 5 @@ -293,6 +294,8 @@ ArenaCollectionClass = minimarkpage.ArenaCollection self.ac = ArenaCollectionClass(arena_size, page_size, small_request_threshold) + self.ac_alternate = ArenaCollectionClass(arena_size, page_size, + small_request_threshold) # # Used by minor collection: a list of (mostly non-young) objects that # (may) contain a pointer to a young object. Populated by @@ -982,7 +985,9 @@ """Return the total memory used, not counting any object in the nursery: only objects in the ArenaCollection or raw-malloced. """ - return self.ac.total_memory_used + self.rawmalloced_total_size + return self.ac.total_memory_used + self.ac_alternate.total_memory_used \ + + self.rawmalloced_total_size + def card_marking_words_for_length(self, length): # --- Unoptimized version: @@ -1025,7 +1030,9 @@ already_checked = True elif self.gc_state == STATE_SWEEPING_RAWMALLOC: pass - elif self.gc_state == STATE_SWEEPING_ARENA: + elif self.gc_state == STATE_SWEEPING_ARENA_1: + pass + elif self.gc_state == STATE_SWEEPING_ARENA_2: pass elif self.gc_state == STATE_FINALIZING: pass @@ -1048,7 +1055,9 @@ self._debug_check_object_marking(obj) elif self.gc_state == STATE_SWEEPING_RAWMALLOC: self._debug_check_object_sweeping_rawmalloc(obj) - elif self.gc_state == STATE_SWEEPING_ARENA: + elif self.gc_state == STATE_SWEEPING_ARENA_1: + self._debug_check_object_sweeping_arena(obj) + elif self.gc_state == STATE_SWEEPING_ARENA_2: self._debug_check_object_sweeping_arena(obj) elif self.gc_state == STATE_FINALIZING: self._debug_check_object_finalizing(obj) @@ -1772,14 +1781,24 @@ # XXX heuristic here to decide nobjects. if self.free_unvisited_rawmalloc_objects_step(1): #malloc objects freed - self.gc_state = STATE_SWEEPING_ARENA - - elif self.gc_state == STATE_SWEEPING_ARENA: + self.gc_state = STATE_SWEEPING_ARENA_1 + + elif self.gc_state == STATE_SWEEPING_ARENA_1: # # Ask the ArenaCollection to visit all objects. Free the ones # that have not been visited above, and reset GCFLAG_VISITED on # the others. - self.ac.mass_free(self._free_if_unvisited) + self.ac_alternate.mass_free(self._free_if_unvisited) + self.gc_state = STATE_SWEEPING_ARENA_2 + #swap arenas and start clearing the other one + self.ac,self.ac_alternate = self.ac_alternate,self.ac + + elif self.gc_state == STATE_SWEEPING_ARENA_2: + + self.ac_alternate.mass_free(self._free_if_unvisited) + + self.num_major_collects += 1 + # # We also need to reset the GCFLAG_VISITED on prebuilt GC objects. self.prebuilt_root_objects.foreach(self._reset_gcflag_visited, None) @@ -1810,12 +1829,13 @@ "Using too much memory, aborting") self.max_heap_size_already_raised = True raise MemoryError + self.gc_state = STATE_FINALIZING - # END SWEEPING # FINALIZING not yet incrementalised # but it seems safe to allow mutator to run after sweeping and # before finalizers are called. This is because run_finalizers # is a different list to objects_with_finalizers. + # END SWEEPING elif self.gc_state == STATE_FINALIZING: # XXX This is considered rare, # so should we make the calling incremental? or leave as is @@ -1825,7 +1845,6 @@ self.gc_state = STATE_SCANNING self.execute_finalizers() - self.num_major_collects += 1 #END FINALIZING else: pass #XXX which exception to raise here. Should be unreachable. diff --git a/rpython/translator/c/test/test_newgc.py b/rpython/translator/c/test/test_newgc.py --- a/rpython/translator/c/test/test_newgc.py +++ b/rpython/translator/c/test/test_newgc.py @@ -1468,6 +1468,74 @@ res = self.run("nongc_opaque_attached_to_gc") assert res == 0 +class TestIncrementalMiniMarkGC(TestSemiSpaceGC): + gcpolicy = "incminimark" + should_be_moving = True + GC_CAN_MALLOC_NONMOVABLE = True + GC_CAN_SHRINK_ARRAY = True + + def test_gc_heap_stats(self): + py.test.skip("not implemented") + + def define_nongc_attached_to_gc(cls): + from rpython.rtyper.lltypesystem import rffi + ARRAY = rffi.CArray(rffi.INT) + class A: + def __init__(self, n): + self.buf = lltype.malloc(ARRAY, n, flavor='raw', + add_memory_pressure=True) + def __del__(self): + lltype.free(self.buf, flavor='raw') + A(6) + def f(): + # allocate a total of ~77GB, but if the automatic gc'ing works, + # it should never need more than a few MBs at once + am1 = am2 = am3 = None + res = 0 + for i in range(1, 100001): + if am3 is not None: + res += rffi.cast(lltype.Signed, am3.buf[0]) + am3 = am2 + am2 = am1 + am1 = A(i * 4) + am1.buf[0] = rffi.cast(rffi.INT, i - 50000) + return res + return f + + def test_nongc_attached_to_gc(self): + res = self.run("nongc_attached_to_gc") + assert res == -99997 + + def define_nongc_opaque_attached_to_gc(cls): + from rpython.rlib import rgc, ropenssl + + class A: + def __init__(self): + self.ctx = lltype.malloc(ropenssl.EVP_MD_CTX.TO, + flavor='raw') + digest = ropenssl.EVP_get_digestbyname('sha1') + ropenssl.EVP_DigestInit(self.ctx, digest) + rgc.add_memory_pressure(ropenssl.HASH_MALLOC_SIZE + 64) + + def __del__(self): + ropenssl.EVP_MD_CTX_cleanup(self.ctx) + lltype.free(self.ctx, flavor='raw') + #A() --- can't call it here?? get glibc crashes on tannit64 + def f(): + am1 = am2 = am3 = None + for i in range(100000): + am3 = am2 + am2 = am1 + am1 = A() + # what can we use for the res? + return 0 + return f + + def test_nongc_opaque_attached_to_gc(self): + res = self.run("nongc_opaque_attached_to_gc") + assert res == 0 + + # ____________________________________________________________________ class TaggedPointersTest(object): @@ -1560,3 +1628,6 @@ class TestMiniMarkGCMostCompact(TaggedPointersTest, TestMiniMarkGC): removetypeptr = True + +class TestIncrementalMiniMarkGCMostCompact(TaggedPointersTest, TestIncrementalMiniMarkGC): + removetypeptr = True From noreply at buildbot.pypy.org Mon Aug 12 08:23:25 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Mon, 12 Aug 2013 08:23:25 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: add guard_value to stm_integration tests Message-ID: <20130812062325.0CADD1C011D@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r66078:9be958070a83 Date: 2013-08-12 08:22 +0200 http://bitbucket.org/pypy/pypy/changeset/9be958070a83/ Log: add guard_value to stm_integration tests diff --git a/rpython/jit/backend/x86/test/test_stm_integration.py b/rpython/jit/backend/x86/test/test_stm_integration.py --- a/rpython/jit/backend/x86/test/test_stm_integration.py +++ b/rpython/jit/backend/x86/test/test_stm_integration.py @@ -320,6 +320,7 @@ self.cpu.execute_token(looptoken, sgcref) self.assert_in(called_on, [sgcref]) + def test_ptr_eq_fastpath(self): cpu = self.cpu cpu.gc_ll_descr.init_nursery(100) @@ -339,20 +340,28 @@ ConstPtr(s1), ConstPtr(s2)] for p1, p2 in itertools.combinations(ps, 2): - for guard in [None, rop.GUARD_TRUE, rop.GUARD_FALSE]: + for guard in [None, rop.GUARD_TRUE, rop.GUARD_FALSE, + rop.GUARD_VALUE]: cpu.gc_ll_descr.clear_lists() # BUILD OPERATIONS: i = i0 guarddescr = BasicFailDescr() finaldescr = BasicFinalDescr() - operations = [ResOperation(rop.PTR_EQ, [p1, p2], i0)] - if guard is not None: - gop = ResOperation(guard, [i0], None, + if guard == rop.GUARD_VALUE: + gop = ResOperation(rop.GUARD_VALUE, [p1, p2], None, descr=guarddescr) gop.setfailargs([]) - operations.append(gop) + operations = [gop] i = i1 + else: + operations = [ResOperation(rop.PTR_EQ, [p1, p2], i0)] + if guard is not None: + gop = ResOperation(guard, [i0], None, + descr=guarddescr) + gop.setfailargs([]) + operations.append(gop) + i = i1 # finish must depend on result of ptr_eq if no guard # is inbetween (otherwise ptr_eq gets deleted) # if there is a guard, the result of ptr_eq must not @@ -393,7 +402,7 @@ if guard is not None: if s1 == s2: - if guard == rop.GUARD_TRUE: + if guard in (rop.GUARD_TRUE, rop.GUARD_VALUE): assert not guard_failed else: assert guard_failed From noreply at buildbot.pypy.org Mon Aug 12 09:47:39 2013 From: noreply at buildbot.pypy.org (andrewsmedina) Date: Mon, 12 Aug 2013 09:47:39 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-2.7.4-pwd-fix: fixed support for pwd stdlib 2.7.4 Message-ID: <20130812074739.110341C011D@cobra.cs.uni-duesseldorf.de> Author: Andrews Medina Branch: stdlib-2.7.4-pwd-fix Changeset: r66079:8886e7984343 Date: 2013-07-30 21:32 -0300 http://bitbucket.org/pypy/pypy/changeset/8886e7984343/ Log: fixed support for pwd stdlib 2.7.4 diff --git a/pypy/module/pwd/interp_pwd.py b/pypy/module/pwd/interp_pwd.py --- a/pypy/module/pwd/interp_pwd.py +++ b/pypy/module/pwd/interp_pwd.py @@ -2,7 +2,7 @@ from rpython.rtyper.tool import rffi_platform from rpython.rtyper.lltypesystem import rffi, lltype from pypy.interpreter.gateway import unwrap_spec -from pypy.interpreter.error import operationerrfmt +from pypy.interpreter.error import OperationError, operationerrfmt from rpython.rlib.rarithmetic import intmask eci = ExternalCompilationInfo( @@ -52,14 +52,19 @@ ]) return space.call_function(w_passwd_struct, w_tuple) - at unwrap_spec(uid=int) -def getpwuid(space, uid): +def getpwuid(space, w_uid): """ getpwuid(uid) -> (pw_name,pw_passwd,pw_uid, pw_gid,pw_gecos,pw_dir,pw_shell) Return the password database entry for the given numeric user ID. See pwd.__doc__ for more on password database entries. """ + import sys + if space.is_true(space.or_(space.gt(w_uid, space.wrap(sys.maxint)), + space.lt(w_uid, space.wrap(-sys.maxint - 1)))): + msg = "getpwuid(): uid not found" + raise OperationError(space.w_KeyError, space.wrap(msg)) + uid = space.int_w(w_uid) pw = c_getpwuid(uid) if not pw: raise operationerrfmt(space.w_KeyError, @@ -92,4 +97,3 @@ finally: c_endpwent() return space.newlist(users_w) - diff --git a/pypy/module/pwd/test/test_pwd.py b/pypy/module/pwd/test/test_pwd.py --- a/pypy/module/pwd/test/test_pwd.py +++ b/pypy/module/pwd/test/test_pwd.py @@ -20,9 +20,11 @@ else: assert pw.pw_dir.startswith('/') assert pw.pw_shell.startswith('/') - # assert type(pw.pw_uid) is int assert type(pw.pw_gid) is int + # should be out of uid_t range + raises(KeyError, pwd.getpwuid, 2**128) + raises(KeyError, pwd.getpwuid, -2**128) def test_getpwnam(self): import pwd From noreply at buildbot.pypy.org Mon Aug 12 09:47:41 2013 From: noreply at buildbot.pypy.org (andrewsmedina) Date: Mon, 12 Aug 2013 09:47:41 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-2.7.4-pwd-fix: improved the uid range check Message-ID: <20130812074741.718031C011D@cobra.cs.uni-duesseldorf.de> Author: Andrews Medina Branch: stdlib-2.7.4-pwd-fix Changeset: r66080:210278811677 Date: 2013-08-11 23:23 -0300 http://bitbucket.org/pypy/pypy/changeset/210278811677/ Log: improved the uid range check diff --git a/pypy/module/pwd/interp_pwd.py b/pypy/module/pwd/interp_pwd.py --- a/pypy/module/pwd/interp_pwd.py +++ b/pypy/module/pwd/interp_pwd.py @@ -5,6 +5,19 @@ from pypy.interpreter.error import OperationError, operationerrfmt from rpython.rlib.rarithmetic import intmask +import sys + + +if sys.maxint == 2147483647: + def check_uid_range(space, num): + pass +else: + def check_uid_range(space, num): + if num < -(1<<31) or num >= (1<<32): + msg = "getpwuid(): uid not found" + raise OperationError(space.w_KeyError, space.wrap(msg)) + + eci = ExternalCompilationInfo( includes=['pwd.h'] ) @@ -52,6 +65,7 @@ ]) return space.call_function(w_passwd_struct, w_tuple) + def getpwuid(space, w_uid): """ getpwuid(uid) -> (pw_name,pw_passwd,pw_uid, @@ -59,12 +73,14 @@ Return the password database entry for the given numeric user ID. See pwd.__doc__ for more on password database entries. """ - import sys - if space.is_true(space.or_(space.gt(w_uid, space.wrap(sys.maxint)), - space.lt(w_uid, space.wrap(-sys.maxint - 1)))): - msg = "getpwuid(): uid not found" - raise OperationError(space.w_KeyError, space.wrap(msg)) - uid = space.int_w(w_uid) + try: + uid = space.int_w(w_uid) + except OperationError, e: + if e.match(space, space.w_OverflowError): + msg = "getpwuid(): uid not found" + raise OperationError(space.w_KeyError, space.wrap(msg)) + raise + check_uid_range(space, uid) pw = c_getpwuid(uid) if not pw: raise operationerrfmt(space.w_KeyError, diff --git a/pypy/module/pwd/test/test_pwd.py b/pypy/module/pwd/test/test_pwd.py --- a/pypy/module/pwd/test/test_pwd.py +++ b/pypy/module/pwd/test/test_pwd.py @@ -25,6 +25,8 @@ # should be out of uid_t range raises(KeyError, pwd.getpwuid, 2**128) raises(KeyError, pwd.getpwuid, -2**128) + raises(KeyError, pwd.getpwuid, (1<<32)) + raises(KeyError, pwd.getpwuid, -(1<<32)) def test_getpwnam(self): import pwd From noreply at buildbot.pypy.org Mon Aug 12 09:47:43 2013 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 12 Aug 2013 09:47:43 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-2.7.4: Merged in andrewsmedina/numpypy/stdlib-2.7.4-pwd-fix (pull request #175) Message-ID: <20130812074743.24CBB1C011D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stdlib-2.7.4 Changeset: r66081:3725099b37be Date: 2013-08-12 09:47 +0200 http://bitbucket.org/pypy/pypy/changeset/3725099b37be/ Log: Merged in andrewsmedina/numpypy/stdlib-2.7.4-pwd-fix (pull request #175) fixed support for pwd stdlib 2.7.4 diff --git a/pypy/module/pwd/interp_pwd.py b/pypy/module/pwd/interp_pwd.py --- a/pypy/module/pwd/interp_pwd.py +++ b/pypy/module/pwd/interp_pwd.py @@ -2,9 +2,22 @@ from rpython.rtyper.tool import rffi_platform from rpython.rtyper.lltypesystem import rffi, lltype from pypy.interpreter.gateway import unwrap_spec -from pypy.interpreter.error import operationerrfmt +from pypy.interpreter.error import OperationError, operationerrfmt from rpython.rlib.rarithmetic import intmask +import sys + + +if sys.maxint == 2147483647: + def check_uid_range(space, num): + pass +else: + def check_uid_range(space, num): + if num < -(1<<31) or num >= (1<<32): + msg = "getpwuid(): uid not found" + raise OperationError(space.w_KeyError, space.wrap(msg)) + + eci = ExternalCompilationInfo( includes=['pwd.h'] ) @@ -52,14 +65,22 @@ ]) return space.call_function(w_passwd_struct, w_tuple) - at unwrap_spec(uid=int) -def getpwuid(space, uid): + +def getpwuid(space, w_uid): """ getpwuid(uid) -> (pw_name,pw_passwd,pw_uid, pw_gid,pw_gecos,pw_dir,pw_shell) Return the password database entry for the given numeric user ID. See pwd.__doc__ for more on password database entries. """ + try: + uid = space.int_w(w_uid) + except OperationError, e: + if e.match(space, space.w_OverflowError): + msg = "getpwuid(): uid not found" + raise OperationError(space.w_KeyError, space.wrap(msg)) + raise + check_uid_range(space, uid) pw = c_getpwuid(uid) if not pw: raise operationerrfmt(space.w_KeyError, @@ -92,4 +113,3 @@ finally: c_endpwent() return space.newlist(users_w) - diff --git a/pypy/module/pwd/test/test_pwd.py b/pypy/module/pwd/test/test_pwd.py --- a/pypy/module/pwd/test/test_pwd.py +++ b/pypy/module/pwd/test/test_pwd.py @@ -20,9 +20,13 @@ else: assert pw.pw_dir.startswith('/') assert pw.pw_shell.startswith('/') - # assert type(pw.pw_uid) is int assert type(pw.pw_gid) is int + # should be out of uid_t range + raises(KeyError, pwd.getpwuid, 2**128) + raises(KeyError, pwd.getpwuid, -2**128) + raises(KeyError, pwd.getpwuid, (1<<32)) + raises(KeyError, pwd.getpwuid, -(1<<32)) def test_getpwnam(self): import pwd From noreply at buildbot.pypy.org Mon Aug 12 10:07:04 2013 From: noreply at buildbot.pypy.org (bivab) Date: Mon, 12 Aug 2013 10:07:04 +0200 (CEST) Subject: [pypy-commit] pypy default: implemente changes to the ARM backend from kill-gen-store-back-in Message-ID: <20130812080704.C2DD01C02EE@cobra.cs.uni-duesseldorf.de> Author: David Schneider Branch: Changeset: r66082:b1f3ded4536f Date: 2013-08-12 02:49 -0500 http://bitbucket.org/pypy/pypy/changeset/b1f3ded4536f/ Log: implemente changes to the ARM backend from kill-gen-store-back-in diff --git a/rpython/jit/backend/arm/assembler.py b/rpython/jit/backend/arm/assembler.py --- a/rpython/jit/backend/arm/assembler.py +++ b/rpython/jit/backend/arm/assembler.py @@ -69,6 +69,7 @@ self.mc.datablockwrapper = self.datablockwrapper self.target_tokens_currently_compiling = {} self.frame_depth_to_patch = [] + self._finish_gcmap = lltype.nullptr(jitframe.GCMAP) def teardown(self): self.current_clt = None @@ -889,7 +890,7 @@ relative_offset = tok.pos_recovery_stub - tok.offset guard_pos = block_start + tok.offset if not tok.is_guard_not_invalidated: - # patch the guard jumpt to the stub + # patch the guard jump to the stub # overwrite the generate NOP with a B_offs to the pos of the # stub mc = InstrBuilder(self.cpu.cpuinfo.arch_version) diff --git a/rpython/jit/backend/arm/opassembler.py b/rpython/jit/backend/arm/opassembler.py --- a/rpython/jit/backend/arm/opassembler.py +++ b/rpython/jit/backend/arm/opassembler.py @@ -33,6 +33,7 @@ from rpython.rtyper.lltypesystem import rstr, rffi, lltype from rpython.rtyper.annlowlevel import cast_instance_to_gcref from rpython.jit.backend.arm import callbuilder +from rpython.rlib.rarithmetic import r_uint class ArmGuardToken(GuardToken): @@ -190,7 +191,7 @@ self.mc.RSB_ri(resloc.value, l0.value, imm=0) return fcond - def _emit_guard(self, op, arglocs, fcond, save_exc, + def build_guard_token(self, op, frame_depth, arglocs, offset, fcond, save_exc, is_guard_not_invalidated=False, is_guard_not_forced=False): assert isinstance(save_exc, bool) @@ -198,7 +199,27 @@ descr = op.getdescr() assert isinstance(descr, AbstractFailDescr) + gcmap = allocate_gcmap(self, frame_depth, JITFRAME_FIXED_SIZE) + token = ArmGuardToken(self.cpu, gcmap, + descr, + failargs=op.getfailargs(), + fail_locs=arglocs, + offset=offset, + exc=save_exc, + frame_depth=frame_depth, + is_guard_not_invalidated=is_guard_not_invalidated, + is_guard_not_forced=is_guard_not_forced, + fcond=fcond) + return token + + def _emit_guard(self, op, arglocs, fcond, save_exc, + is_guard_not_invalidated=False, + is_guard_not_forced=False): pos = self.mc.currpos() + token = self.build_guard_token(op, arglocs[0].value, arglocs[1:], pos, fcond, save_exc, + is_guard_not_invalidated, + is_guard_not_forced) + self.pending_guards.append(token) # For all guards that are not GUARD_NOT_INVALIDATED we emit a # breakpoint to ensure the location is patched correctly. In the case # of GUARD_NOT_INVALIDATED we use just a NOP, because it is only @@ -207,17 +228,6 @@ self.mc.NOP() else: self.mc.BKPT() - gcmap = allocate_gcmap(self, arglocs[0].value, JITFRAME_FIXED_SIZE) - self.pending_guards.append(ArmGuardToken(self.cpu, gcmap, - descr, - failargs=op.getfailargs(), - fail_locs=arglocs[1:], - offset=pos, - exc=save_exc, - frame_depth=arglocs[0].value, - is_guard_not_invalidated=is_guard_not_invalidated, - is_guard_not_forced=is_guard_not_forced, - fcond=fcond)) return c.AL def _emit_guard_overflow(self, guard, failargs, fcond): @@ -351,7 +361,11 @@ # XXX self.mov(fail_descr_loc, RawStackLoc(ofs)) self.store_reg(self.mc, r.ip, r.fp, ofs, helper=r.lr) if op.numargs() > 0 and op.getarg(0).type == REF: - gcmap = self.gcmap_for_finish + if self._finish_gcmap: + self._finish_gcmap[0] |= r_uint(0) # r0 + gcmap = self._finish_gcmap + else: + gcmap = self.gcmap_for_finish self.push_gcmap(self.mc, gcmap, store=True) else: # note that the 0 here is redundant, but I would rather @@ -912,6 +926,14 @@ return fcond + def store_force_descr(self, op, fail_locs, frame_depth): + pos = self.mc.currpos() + guard_token = self.build_guard_token(op, frame_depth, fail_locs, pos, c.AL, True, False, True) + #self.pending_guards.append(guard_token) + self._finish_gcmap = guard_token.gcmap + self._store_force_index(op) + self.store_info_on_descr(pos, guard_token) + def emit_op_force_token(self, op, arglocs, regalloc, fcond): # XXX kill me res_loc = arglocs[0] diff --git a/rpython/jit/backend/arm/regalloc.py b/rpython/jit/backend/arm/regalloc.py --- a/rpython/jit/backend/arm/regalloc.py +++ b/rpython/jit/backend/arm/regalloc.py @@ -1194,6 +1194,13 @@ # self._compute_hint_frame_locations_from_descr(descr) return [] + def prepare_op_guard_not_forced_2(self, op, fcond): + self.rm.before_call(op.getfailargs(), save_all_regs=True) + fail_locs = [self.loc(v) for v in op.getfailargs()] + self.assembler.store_force_descr(op, fail_locs, + self.fm.get_frame_depth()) + self.possibly_free_vars(op.getfailargs()) + def prepare_guard_call_may_force(self, op, guard_op, fcond): args = self._prepare_call(op, save_all_regs=True) return self._prepare_guard(guard_op, args) From noreply at buildbot.pypy.org Mon Aug 12 10:07:06 2013 From: noreply at buildbot.pypy.org (bivab) Date: Mon, 12 Aug 2013 10:07:06 +0200 (CEST) Subject: [pypy-commit] pypy default: merge heads Message-ID: <20130812080706.2480E1C02EE@cobra.cs.uni-duesseldorf.de> Author: David Schneider Branch: Changeset: r66083:0ef546bb8d2c Date: 2013-08-12 02:50 -0500 http://bitbucket.org/pypy/pypy/changeset/0ef546bb8d2c/ Log: merge heads diff --git a/rpython/memory/gc/minimark.py b/rpython/memory/gc/minimark.py --- a/rpython/memory/gc/minimark.py +++ b/rpython/memory/gc/minimark.py @@ -2046,6 +2046,8 @@ # The code relies on the fact that no weakref can be an old object # weakly pointing to a young object. Indeed, weakrefs are immutable # so they cannot point to an object that was created after it. + # Thanks to this, during a minor collection, we don't have to fix + # or clear the address stored in old weakrefs. def invalidate_young_weakrefs(self): """Called during a nursery collection.""" # walk over the list of objects that contain weakrefs and are in the From noreply at buildbot.pypy.org Mon Aug 12 10:54:51 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Mon, 12 Aug 2013 10:54:51 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: make exception thread-local stm-aware Message-ID: <20130812085451.1D9611C138A@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r66084:a285d16c08c8 Date: 2013-08-12 10:54 +0200 http://bitbucket.org/pypy/pypy/changeset/a285d16c08c8/ Log: make exception thread-local stm-aware diff --git a/rpython/jit/backend/llsupport/assembler.py b/rpython/jit/backend/llsupport/assembler.py --- a/rpython/jit/backend/llsupport/assembler.py +++ b/rpython/jit/backend/llsupport/assembler.py @@ -172,10 +172,8 @@ break exc = guardtok.exc target = self.failure_recovery_code[exc + 2 * withfloats] - fail_descr = rgc.cast_instance_to_gcref(guardtok.faildescr) - # already done by gc.py record_constptrs, just to be safe: - fail_descr = rgc._make_sure_does_not_move(fail_descr) - fail_descr = rgc.cast_gcref_to_int(fail_descr) + fail_descr = cast_instance_to_gcref(guardtok.faildescr) + fail_descr = rffi.cast(lltype.Signed, fail_descr) base_ofs = self.cpu.get_baseofs_of_frame_field() positions = [0] * len(guardtok.fail_locs) for i, loc in enumerate(guardtok.fail_locs): @@ -228,10 +226,10 @@ else: raise AssertionError(kind) - gcref = rgc.cast_instance_to_gcref(value) + import pdb;pdb.set_trace() + gcref = cast_instance_to_gcref(value) gcref = rgc._make_sure_does_not_move(gcref) - value = rgc.cast_gcref_to_int(gcref) - + value = rffi.cast(lltype.Signed, gcref) je_location = self._call_assembler_check_descr(value, tmploc) # # Path A: use assembler_helper_adr diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -260,6 +260,21 @@ self.propagate_exception_path = rawstart self.mc = None + def _get_stm_tl(self, adr): + """Makes 'adr' relative to threadlocal-base if we run in STM. + Before using such a relative address, call + self._stm_tl_segment_prefix_if_necessary.""" + if self.cpu.gc_ll_descr.stm and we_are_translated(): + # also not during tests + result = adr - stmtlocal.threadlocal_base() + assert rx86.fits_in_32bits(result) + return result + return adr + + def _stm_tl_segment_prefix_if_necessary(self, mc): + if self.cpu.gc_ll_descr.stm and we_are_translated(): + stmtlocal.tl_segment_prefix(mc) + def _build_stack_check_slowpath(self): _, _, slowpathaddr = self.cpu.insert_stack_check() if slowpathaddr == 0 or not self.cpu.propagate_exception_descr: @@ -294,7 +309,9 @@ else: mc.ADD_ri(esp.value, WORD) # - mc.MOV(eax, heap(self.cpu.pos_exception())) + ea = self._get_stm_tl(self.cpu.pos_exception()) + self._stm_tl_segment_prefix_if_necessary(mc) + mc.MOV(eax, heap(ea)) mc.TEST_rr(eax.value, eax.value) mc.J_il8(rx86.Conditions['NZ'], 0) jnz_location = mc.get_relative_pos() @@ -1740,7 +1757,9 @@ def genop_guard_guard_no_exception(self, ign_1, guard_op, guard_token, locs, ign_2): - self.mc.CMP(heap(self.cpu.pos_exception()), imm0) + ea = self._get_stm_tl(self.cpu.pos_exception()) + self._stm_tl_segment_prefix_if_necessary(self.mc) + self.mc.CMP(heap(ea), imm0) self.implement_guard(guard_token, 'NZ') def genop_guard_guard_not_invalidated(self, ign_1, guard_op, guard_token, @@ -1753,7 +1772,9 @@ locs, resloc): loc = locs[0] loc1 = locs[1] - self.mc.MOV(loc1, heap(self.cpu.pos_exception())) + ea = self._get_stm_tl(self.cpu.pos_exception()) + self._stm_tl_segment_prefix_if_necessary(self.mc) + self.mc.MOV(loc1, heap(ea)) self.mc.CMP(loc1, loc) self.implement_guard(guard_token, 'NE') self._store_and_reset_exception(self.mc, resloc) @@ -1763,30 +1784,43 @@ """ Resest the exception. If excvalloc is None, then store it on the frame in jf_guard_exc """ + eva = self._get_stm_tl(self.cpu.pos_exc_value()) + ea = self._get_stm_tl(self.cpu.pos_exception()) + # + self._stm_tl_segment_prefix_if_necessary(mc) if excvalloc is not None: assert excvalloc.is_core_reg() - mc.MOV(excvalloc, heap(self.cpu.pos_exc_value())) + mc.MOV(excvalloc, heap(eva)) elif tmploc is not None: # if both are None, just ignore ofs = self.cpu.get_ofs_of_frame_field('jf_guard_exc') - mc.MOV(tmploc, heap(self.cpu.pos_exc_value())) + mc.MOV(tmploc, heap(eva)) mc.MOV(RawEbpLoc(ofs), tmploc) + # if exctploc is not None: assert exctploc.is_core_reg() - mc.MOV(exctploc, heap(self.cpu.pos_exception())) - - mc.MOV(heap(self.cpu.pos_exception()), imm0) - mc.MOV(heap(self.cpu.pos_exc_value()), imm0) + self._stm_tl_segment_prefix_if_necessary(mc) + mc.MOV(exctploc, heap(ea)) + # + self._stm_tl_segment_prefix_if_necessary(mc) + mc.MOV(heap(ea), imm0) + self._stm_tl_segment_prefix_if_necessary(mc) + mc.MOV(heap(eva), imm0) def _restore_exception(self, mc, excvalloc, exctploc, tmploc=None): + eva = self._get_stm_tl(self.cpu.pos_exc_value()) + ea = self._get_stm_tl(self.cpu.pos_exception()) if excvalloc is not None: - mc.MOV(heap(self.cpu.pos_exc_value()), excvalloc) + self._stm_tl_segment_prefix_if_necessary(mc) + mc.MOV(heap(eva), excvalloc) else: assert tmploc is not None ofs = self.cpu.get_ofs_of_frame_field('jf_guard_exc') mc.MOV(tmploc, RawEbpLoc(ofs)) mc.MOV_bi(ofs, 0) - mc.MOV(heap(self.cpu.pos_exc_value()), tmploc) - mc.MOV(heap(self.cpu.pos_exception()), exctploc) + self._stm_tl_segment_prefix_if_necessary(mc) + mc.MOV(heap(eva), tmploc) + self._stm_tl_segment_prefix_if_necessary(mc) + mc.MOV(heap(ea), exctploc) def _gen_guard_overflow(self, guard_op, guard_token): guard_opnum = guard_op.getopnum() @@ -1983,9 +2017,14 @@ if exc: # We might have an exception pending. Load it into ebx... - mc.MOV(ebx, heap(self.cpu.pos_exc_value())) - mc.MOV(heap(self.cpu.pos_exception()), imm0) - mc.MOV(heap(self.cpu.pos_exc_value()), imm0) + eva = self._get_stm_tl(self.cpu.pos_exc_value()) + ea = self._get_stm_tl(self.cpu.pos_exception()) + self._stm_tl_segment_prefix_if_necessary(mc) + mc.MOV(ebx, heap(eva)) + self._stm_tl_segment_prefix_if_necessary(mc) + mc.MOV(heap(ea), imm0) + self._stm_tl_segment_prefix_if_necessary(mc) + mc.MOV(heap(eva), imm0) # ...and save ebx into 'jf_guard_exc' offset = self.cpu.get_ofs_of_frame_field('jf_guard_exc') mc.MOV_br(offset, ebx.value) From noreply at buildbot.pypy.org Mon Aug 12 11:15:31 2013 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 12 Aug 2013 11:15:31 +0200 (CEST) Subject: [pypy-commit] pypy default: Don't fall back to codespeak, which doesn't exist any more Message-ID: <20130812091531.286DB1C1309@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r66085:f4d0526d45eb Date: 2013-08-12 11:15 +0200 http://bitbucket.org/pypy/pypy/changeset/f4d0526d45eb/ Log: Don't fall back to codespeak, which doesn't exist any more diff --git a/dotviewer/graphparse.py b/dotviewer/graphparse.py --- a/dotviewer/graphparse.py +++ b/dotviewer/graphparse.py @@ -152,7 +152,8 @@ try: plaincontent = dot2plain_graphviz(content, contenttype) except PlainParseError, e: - print e - # failed, retry via codespeak - plaincontent = dot2plain_codespeak(content, contenttype) + raise + ##print e + ### failed, retry via codespeak + ##plaincontent = dot2plain_codespeak(content, contenttype) return list(parse_plain(graph_id, plaincontent, links, fixedfont)) From noreply at buildbot.pypy.org Mon Aug 12 17:13:32 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Mon, 12 Aug 2013 17:13:32 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: hack to show full 64bit addresses in assembler dump of jitviewer. This doesn't work in cases where the truncated (by objdump) 32bit address already overflows Message-ID: <20130812151332.6478D1C011D@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r66086:f3193a9bf2f3 Date: 2013-08-12 17:12 +0200 http://bitbucket.org/pypy/pypy/changeset/f3193a9bf2f3/ Log: hack to show full 64bit addresses in assembler dump of jitviewer. This doesn't work in cases where the truncated (by objdump) 32bit address already overflows diff --git a/pypy/tool/jitlogparser/parser.py b/pypy/tool/jitlogparser/parser.py --- a/pypy/tool/jitlogparser/parser.py +++ b/pypy/tool/jitlogparser/parser.py @@ -90,7 +90,7 @@ v = " ".join(e[2:]) if not start: start = int(adr.strip(":"), 16) - ofs = int(adr.strip(":"), 16) - start + ofs = int(adr.strip(":"), 16) # add symbols to addresses: for addr in lineaddresses(v): sym = symbols.get(addr) @@ -100,6 +100,7 @@ if ofs >= 0: asm.append((ofs, v.strip("\n"))) # + prefix = hex(dump_start)[:-8] asm_index = 0 for i, op in enumerate(loop.operations): end = 0 @@ -113,12 +114,14 @@ else: end = loop.operations[j].offset if op.offset is not None: - while asm[asm_index][0] < op.offset: + while asm[asm_index][0] - start < op.offset: asm_index += 1 end_index = asm_index - while asm[end_index][0] < end and end_index < len(asm) - 1: + while asm[end_index][0] - start < end and end_index < len(asm) - 1: end_index += 1 - op.asm = '\n'.join([asm[i][1] for i in range(asm_index, end_index)]) + op.asm = '\n'.join([ + prefix+hex(asm[i][0])[2:] + ": " + asm[i][1] + for i in range(asm_index, end_index)]) return loop def _asm_disassemble(self, d, origin_addr, tp): diff --git a/rpython/jit/backend/tool/viewcode.py b/rpython/jit/backend/tool/viewcode.py --- a/rpython/jit/backend/tool/viewcode.py +++ b/rpython/jit/backend/tool/viewcode.py @@ -59,7 +59,7 @@ 'arm_32': 'arm', } cmd = find_objdump() - objdump = ('%(command)s -M %(backend)s -b binary -m %(machine)s ' + objdump = ('%(command)s -w -M %(backend)s -b binary -m %(machine)s ' '--disassembler-options=intel-mnemonics ' '--adjust-vma=%(origin)d -D %(file)s') # From noreply at buildbot.pypy.org Mon Aug 12 17:30:11 2013 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 12 Aug 2013 17:30:11 +0200 (CEST) Subject: [pypy-commit] pypy default: Fix on 32-bit linux Message-ID: <20130812153011.B0B651C154D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r66087:476cabac8fcb Date: 2013-08-12 17:29 +0200 http://bitbucket.org/pypy/pypy/changeset/476cabac8fcb/ Log: Fix on 32-bit linux diff --git a/pypy/tool/jitlogparser/parser.py b/pypy/tool/jitlogparser/parser.py --- a/pypy/tool/jitlogparser/parser.py +++ b/pypy/tool/jitlogparser/parser.py @@ -385,6 +385,8 @@ _, addr, _, data = re.split(" +", dump) backend_name = backend.split(" ")[1] addr = int(addr[1:], 16) + if addr < 0: + addr += (2 * sys.maxint + 2) if addr in addrs and addrs[addr]: name = addrs[addr].pop(0) # they should come in order dumps[name] = (backend_name, addr, data) From noreply at buildbot.pypy.org Mon Aug 12 17:36:06 2013 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 12 Aug 2013 17:36:06 +0200 (CEST) Subject: [pypy-commit] pypy default: Print assembler including the address of the current instruction Message-ID: <20130812153606.429341C154D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r66088:2d0eaeda9275 Date: 2013-08-12 17:35 +0200 http://bitbucket.org/pypy/pypy/changeset/2d0eaeda9275/ Log: Print assembler including the address of the current instruction and the raw bytes; I think there is little point in hiding information here, and it lets us see that some jumps have a bogus destination address (unpatched) diff --git a/pypy/tool/jitlogparser/parser.py b/pypy/tool/jitlogparser/parser.py --- a/pypy/tool/jitlogparser/parser.py +++ b/pypy/tool/jitlogparser/parser.py @@ -85,7 +85,7 @@ continue e = elem.split("\t") adr = e[0] - v = " ".join(e[2:]) + v = elem # --- more compactly: " ".join(e[2:]) if not start: start = int(adr.strip(":"), 16) ofs = int(adr.strip(":"), 16) - start From noreply at buildbot.pypy.org Mon Aug 12 18:12:28 2013 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 12 Aug 2013 18:12:28 +0200 (CEST) Subject: [pypy-commit] pypy default: Try to reuse the World class from viewcode. It will correctly handle Message-ID: <20130812161229.03F841C011D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r66089:6688b05ff4aa Date: 2013-08-12 18:11 +0200 http://bitbucket.org/pypy/pypy/changeset/6688b05ff4aa/ Log: Try to reuse the World class from viewcode. It will correctly handle the patching, so unlike the previous version, we see correct jump targets, for example. It's still a mess because of various issues like objdump truncating addresses to 4 bytes. diff --git a/pypy/tool/jitlogparser/parser.py b/pypy/tool/jitlogparser/parser.py --- a/pypy/tool/jitlogparser/parser.py +++ b/pypy/tool/jitlogparser/parser.py @@ -379,17 +379,17 @@ name = entry[:entry.find('(') - 1].lower() addr = int(m.group(1), 16) addrs.setdefault(addr, []).append(name) + from rpython.jit.backend.tool.viewcode import World + world = World() + for entry in extract_category(log, 'jit-backend-dump'): + world.parse(entry.splitlines(True), load_symbols=False, + truncate_addr=False) dumps = {} - for entry in extract_category(log, 'jit-backend-dump'): - backend, _, dump, _ = entry.split("\n") - _, addr, _, data = re.split(" +", dump) - backend_name = backend.split(" ")[1] - addr = int(addr[1:], 16) - if addr < 0: - addr += (2 * sys.maxint + 2) - if addr in addrs and addrs[addr]: - name = addrs[addr].pop(0) # they should come in order - dumps[name] = (backend_name, addr, data) + for r in world.ranges: + if r.addr in addrs and addrs[r.addr]: + name = addrs[r.addr].pop(0) # they should come in order + data = r.data.encode('hex') # backward compatibility + dumps[name] = (world.backend_name, r.addr, data) loops = [] for entry in extract_category(log, 'jit-log-opt'): parser = ParserCls(entry, None, {}, 'lltype', None, diff --git a/rpython/jit/backend/tool/viewcode.py b/rpython/jit/backend/tool/viewcode.py --- a/rpython/jit/backend/tool/viewcode.py +++ b/rpython/jit/backend/tool/viewcode.py @@ -240,7 +240,7 @@ self.backend_name = None self.executable_name = None - def parse(self, f, textonly=True): + def parse(self, f, textonly=True, load_symbols=True, truncate_addr=True): for line in f: if line.startswith('BACKEND '): self.backend_name = line.split(' ')[1].strip() @@ -250,7 +250,11 @@ assert pieces[2].startswith('+') if len(pieces) == 3: continue # empty line - baseaddr = long(pieces[1][1:], 16) & 0xFFFFFFFFL + baseaddr = long(pieces[1][1:], 16) + if truncate_addr: + baseaddr &= 0xFFFFFFFFL + elif baseaddr < 0: + baseaddr += (2 * sys.maxint + 2) offset = int(pieces[2][1:]) addr = baseaddr + offset data = pieces[3].replace(':', '').decode('hex') @@ -268,11 +272,17 @@ pieces = line.split(None, 3) assert pieces[1].startswith('@') assert pieces[2].startswith('+') - baseaddr = long(pieces[1][1:], 16) & 0xFFFFFFFFL + baseaddr = long(pieces[1][1:], 16) + if truncate_addr: + baseaddr &= 0xFFFFFFFFL + elif baseaddr < 0: + baseaddr += (2 * sys.maxint + 2) offset = int(pieces[2][1:]) addr = baseaddr + offset self.logentries[addr] = pieces[3] elif line.startswith('SYS_EXECUTABLE '): + if not load_symbols: + continue filename = line[len('SYS_EXECUTABLE '):].strip() if filename != self.executable_name and filename != '??': self.symbols.update(load_symbols(filename)) From noreply at buildbot.pypy.org Mon Aug 12 18:30:05 2013 From: noreply at buildbot.pypy.org (bivab) Date: Mon, 12 Aug 2013 18:30:05 +0200 (CEST) Subject: [pypy-commit] pypy default: use helper method that collects locs for fail args Message-ID: <20130812163005.1FE471C01E5@cobra.cs.uni-duesseldorf.de> Author: David Schneider Branch: Changeset: r66090:46192cfa897e Date: 2013-08-12 18:28 +0200 http://bitbucket.org/pypy/pypy/changeset/46192cfa897e/ Log: use helper method that collects locs for fail args diff --git a/rpython/jit/backend/arm/assembler.py b/rpython/jit/backend/arm/assembler.py --- a/rpython/jit/backend/arm/assembler.py +++ b/rpython/jit/backend/arm/assembler.py @@ -62,7 +62,8 @@ self.current_clt = looptoken.compiled_loop_token self.mc = InstrBuilder(self.cpu.cpuinfo.arch_version) self.pending_guards = [] - assert self.datablockwrapper is None + #assert self.datablockwrapper is None --- but obscure case + # possible, e.g. getting MemoryError and continuing allblocks = self.get_asmmemmgr_blocks(looptoken) self.datablockwrapper = MachineDataBlockWrapper(self.cpu.asmmemmgr, allblocks) @@ -76,7 +77,6 @@ self._regalloc = None self.mc = None self.pending_guards = None - assert self.datablockwrapper is None def setup_failure_recovery(self): self.failure_recovery_code = [0, 0, 0, 0] diff --git a/rpython/jit/backend/arm/regalloc.py b/rpython/jit/backend/arm/regalloc.py --- a/rpython/jit/backend/arm/regalloc.py +++ b/rpython/jit/backend/arm/regalloc.py @@ -1196,9 +1196,8 @@ def prepare_op_guard_not_forced_2(self, op, fcond): self.rm.before_call(op.getfailargs(), save_all_regs=True) - fail_locs = [self.loc(v) for v in op.getfailargs()] - self.assembler.store_force_descr(op, fail_locs, - self.fm.get_frame_depth()) + fail_locs = self._prepare_guard(op) + self.assembler.store_force_descr(op, fail_locs[1:], fail_locs[0].value) self.possibly_free_vars(op.getfailargs()) def prepare_guard_call_may_force(self, op, guard_op, fcond): From noreply at buildbot.pypy.org Mon Aug 12 18:30:06 2013 From: noreply at buildbot.pypy.org (bivab) Date: Mon, 12 Aug 2013 18:30:06 +0200 (CEST) Subject: [pypy-commit] pypy default: merge heads Message-ID: <20130812163006.57E2A1C01E5@cobra.cs.uni-duesseldorf.de> Author: David Schneider Branch: Changeset: r66091:eab7a5e0b341 Date: 2013-08-12 18:29 +0200 http://bitbucket.org/pypy/pypy/changeset/eab7a5e0b341/ Log: merge heads diff --git a/pypy/tool/jitlogparser/parser.py b/pypy/tool/jitlogparser/parser.py --- a/pypy/tool/jitlogparser/parser.py +++ b/pypy/tool/jitlogparser/parser.py @@ -85,7 +85,7 @@ continue e = elem.split("\t") adr = e[0] - v = " ".join(e[2:]) + v = elem # --- more compactly: " ".join(e[2:]) if not start: start = int(adr.strip(":"), 16) ofs = int(adr.strip(":"), 16) - start @@ -379,15 +379,17 @@ name = entry[:entry.find('(') - 1].lower() addr = int(m.group(1), 16) addrs.setdefault(addr, []).append(name) + from rpython.jit.backend.tool.viewcode import World + world = World() + for entry in extract_category(log, 'jit-backend-dump'): + world.parse(entry.splitlines(True), load_symbols=False, + truncate_addr=False) dumps = {} - for entry in extract_category(log, 'jit-backend-dump'): - backend, _, dump, _ = entry.split("\n") - _, addr, _, data = re.split(" +", dump) - backend_name = backend.split(" ")[1] - addr = int(addr[1:], 16) - if addr in addrs and addrs[addr]: - name = addrs[addr].pop(0) # they should come in order - dumps[name] = (backend_name, addr, data) + for r in world.ranges: + if r.addr in addrs and addrs[r.addr]: + name = addrs[r.addr].pop(0) # they should come in order + data = r.data.encode('hex') # backward compatibility + dumps[name] = (world.backend_name, r.addr, data) loops = [] for entry in extract_category(log, 'jit-log-opt'): parser = ParserCls(entry, None, {}, 'lltype', None, diff --git a/rpython/jit/backend/tool/viewcode.py b/rpython/jit/backend/tool/viewcode.py --- a/rpython/jit/backend/tool/viewcode.py +++ b/rpython/jit/backend/tool/viewcode.py @@ -240,7 +240,7 @@ self.backend_name = None self.executable_name = None - def parse(self, f, textonly=True): + def parse(self, f, textonly=True, load_symbols=True, truncate_addr=True): for line in f: if line.startswith('BACKEND '): self.backend_name = line.split(' ')[1].strip() @@ -250,7 +250,11 @@ assert pieces[2].startswith('+') if len(pieces) == 3: continue # empty line - baseaddr = long(pieces[1][1:], 16) & 0xFFFFFFFFL + baseaddr = long(pieces[1][1:], 16) + if truncate_addr: + baseaddr &= 0xFFFFFFFFL + elif baseaddr < 0: + baseaddr += (2 * sys.maxint + 2) offset = int(pieces[2][1:]) addr = baseaddr + offset data = pieces[3].replace(':', '').decode('hex') @@ -268,11 +272,17 @@ pieces = line.split(None, 3) assert pieces[1].startswith('@') assert pieces[2].startswith('+') - baseaddr = long(pieces[1][1:], 16) & 0xFFFFFFFFL + baseaddr = long(pieces[1][1:], 16) + if truncate_addr: + baseaddr &= 0xFFFFFFFFL + elif baseaddr < 0: + baseaddr += (2 * sys.maxint + 2) offset = int(pieces[2][1:]) addr = baseaddr + offset self.logentries[addr] = pieces[3] elif line.startswith('SYS_EXECUTABLE '): + if not load_symbols: + continue filename = line[len('SYS_EXECUTABLE '):].strip() if filename != self.executable_name and filename != '??': self.symbols.update(load_symbols(filename)) From noreply at buildbot.pypy.org Mon Aug 12 19:25:03 2013 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 12 Aug 2013 19:25:03 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: Fix on 32-bit linux Message-ID: <20130812172503.DB6CF1C011D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c4 Changeset: r66092:99a030212c04 Date: 2013-08-12 17:29 +0200 http://bitbucket.org/pypy/pypy/changeset/99a030212c04/ Log: Fix on 32-bit linux (transplanted from 476cabac8fcb9f22775630b8f998ca393697f81d) diff --git a/pypy/tool/jitlogparser/parser.py b/pypy/tool/jitlogparser/parser.py --- a/pypy/tool/jitlogparser/parser.py +++ b/pypy/tool/jitlogparser/parser.py @@ -411,6 +411,8 @@ _, addr, _, data = re.split(" +", dump) backend_name = backend.split(" ")[1] addr = int(addr[1:], 16) + if addr < 0: + addr += (2 * sys.maxint + 2) if addr in addrs and addrs[addr]: name = addrs[addr].pop(0) # they should come in order dumps[name] = (backend_name, addr, data) From noreply at buildbot.pypy.org Mon Aug 12 19:25:05 2013 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 12 Aug 2013 19:25:05 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: Print assembler including the address of the current instruction Message-ID: <20130812172505.22F1C1C01E5@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c4 Changeset: r66093:c6e59f8abe95 Date: 2013-08-12 17:35 +0200 http://bitbucket.org/pypy/pypy/changeset/c6e59f8abe95/ Log: Print assembler including the address of the current instruction and the raw bytes; I think there is little point in hiding information here, and it lets us see that some jumps have a bogus destination address (unpatched) (transplanted from 2d0eaeda9275f908f52a75973b1b63c6fa9baa80) diff --git a/pypy/tool/jitlogparser/parser.py b/pypy/tool/jitlogparser/parser.py --- a/pypy/tool/jitlogparser/parser.py +++ b/pypy/tool/jitlogparser/parser.py @@ -87,7 +87,7 @@ continue e = elem.split("\t") adr = e[0] - v = " ".join(e[2:]) + v = elem # --- more compactly: " ".join(e[2:]) if not start: start = int(adr.strip(":"), 16) ofs = int(adr.strip(":"), 16) - start From noreply at buildbot.pypy.org Mon Aug 12 19:25:10 2013 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 12 Aug 2013 19:25:10 +0200 (CEST) Subject: [pypy-commit] pypy default: Always load symbols from the executable, but just print an error and Message-ID: <20130812172510.221401C011D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r66094:783e5f660b9c Date: 2013-08-12 19:19 +0200 http://bitbucket.org/pypy/pypy/changeset/783e5f660b9c/ Log: Always load symbols from the executable, but just print an error and continue if that fails. diff --git a/pypy/tool/jitlogparser/parser.py b/pypy/tool/jitlogparser/parser.py --- a/pypy/tool/jitlogparser/parser.py +++ b/pypy/tool/jitlogparser/parser.py @@ -382,8 +382,7 @@ from rpython.jit.backend.tool.viewcode import World world = World() for entry in extract_category(log, 'jit-backend-dump'): - world.parse(entry.splitlines(True), load_symbols=False, - truncate_addr=False) + world.parse(entry.splitlines(True), truncate_addr=False) dumps = {} for r in world.ranges: if r.addr in addrs and addrs[r.addr]: diff --git a/rpython/jit/backend/tool/viewcode.py b/rpython/jit/backend/tool/viewcode.py --- a/rpython/jit/backend/tool/viewcode.py +++ b/rpython/jit/backend/tool/viewcode.py @@ -240,7 +240,7 @@ self.backend_name = None self.executable_name = None - def parse(self, f, textonly=True, load_symbols=True, truncate_addr=True): + def parse(self, f, textonly=True, truncate_addr=True): for line in f: if line.startswith('BACKEND '): self.backend_name = line.split(' ')[1].strip() @@ -281,11 +281,12 @@ addr = baseaddr + offset self.logentries[addr] = pieces[3] elif line.startswith('SYS_EXECUTABLE '): - if not load_symbols: - continue filename = line[len('SYS_EXECUTABLE '):].strip() if filename != self.executable_name and filename != '??': - self.symbols.update(load_symbols(filename)) + try: + self.symbols.update(load_symbols(filename)) + except Exception as e: + print e self.executable_name = filename def find_cross_references(self): From noreply at buildbot.pypy.org Mon Aug 12 19:25:11 2013 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 12 Aug 2013 19:25:11 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: Try to reuse the World class from viewcode. It will correctly handle Message-ID: <20130812172511.717411C011D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c4 Changeset: r66095:66dd859b8759 Date: 2013-08-12 18:11 +0200 http://bitbucket.org/pypy/pypy/changeset/66dd859b8759/ Log: Try to reuse the World class from viewcode. It will correctly handle the patching, so unlike the previous version, we see correct jump targets, for example. It's still a mess because of various issues like objdump truncating addresses to 4 bytes. (transplanted from 6688b05ff4aa2c733d5150b9a0a757bc95ea4bb3) diff --git a/pypy/tool/jitlogparser/parser.py b/pypy/tool/jitlogparser/parser.py --- a/pypy/tool/jitlogparser/parser.py +++ b/pypy/tool/jitlogparser/parser.py @@ -394,29 +394,19 @@ name = entry[:entry.find('(') - 1].lower() addr = int(m.group(1), 16) addrs.setdefault(addr, []).append(name) - dumps = {} - executables = set(["??",]) - symbols = {} + from rpython.jit.backend.tool.viewcode import World + world = World() for entry in extract_category(log, 'jit-backend-dump'): entry = purge_thread_numbers(entry) - backend, executable, dump, _ = entry.split("\n") - if "(out of memory!)" not in executable: - _, executable = executable.split(" ") - if executable not in executables: - try: - symbols.update(load_symbols(executable)) - except Exception as e: - print e - executables.add(executable) - _, addr, _, data = re.split(" +", dump) - backend_name = backend.split(" ")[1] - addr = int(addr[1:], 16) - if addr < 0: - addr += (2 * sys.maxint + 2) - if addr in addrs and addrs[addr]: - name = addrs[addr].pop(0) # they should come in order - dumps[name] = (backend_name, addr, data) - + world.parse(entry.splitlines(True), load_symbols=False, + truncate_addr=False) + dumps = {} + symbols = world.symbols + for r in world.ranges: + if r.addr in addrs and addrs[r.addr]: + name = addrs[r.addr].pop(0) # they should come in order + data = r.data.encode('hex') # backward compatibility + dumps[name] = (world.backend_name, r.addr, data) loops = [] for entry in extract_category(log, 'jit-log-opt'): parser = ParserCls(entry, None, {}, 'lltype', None, diff --git a/rpython/jit/backend/tool/viewcode.py b/rpython/jit/backend/tool/viewcode.py --- a/rpython/jit/backend/tool/viewcode.py +++ b/rpython/jit/backend/tool/viewcode.py @@ -239,7 +239,7 @@ self.backend_name = None self.executable_name = None - def parse(self, f, textonly=True): + def parse(self, f, textonly=True, load_symbols=True, truncate_addr=True): for line in f: line = line[line.find('#') + 1:].strip() if line.startswith('BACKEND '): @@ -250,7 +250,11 @@ assert pieces[2].startswith('+') if len(pieces) == 3: continue # empty line - baseaddr = long(pieces[1][1:], 16) & 0xFFFFFFFFL + baseaddr = long(pieces[1][1:], 16) + if truncate_addr: + baseaddr &= 0xFFFFFFFFL + elif baseaddr < 0: + baseaddr += (2 * sys.maxint + 2) offset = int(pieces[2][1:]) addr = baseaddr + offset data = pieces[3].replace(':', '').decode('hex') @@ -268,11 +272,17 @@ pieces = line.split(None, 3) assert pieces[1].startswith('@') assert pieces[2].startswith('+') - baseaddr = long(pieces[1][1:], 16) & 0xFFFFFFFFL + baseaddr = long(pieces[1][1:], 16) + if truncate_addr: + baseaddr &= 0xFFFFFFFFL + elif baseaddr < 0: + baseaddr += (2 * sys.maxint + 2) offset = int(pieces[2][1:]) addr = baseaddr + offset self.logentries[addr] = pieces[3] elif line.startswith('SYS_EXECUTABLE '): + if not load_symbols: + continue filename = line[len('SYS_EXECUTABLE '):].strip() if filename != self.executable_name and filename != '??': try: From noreply at buildbot.pypy.org Mon Aug 12 19:25:12 2013 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 12 Aug 2013 19:25:12 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: Always load symbols from the executable, but just print an error and Message-ID: <20130812172512.A67401C011D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c4 Changeset: r66096:d8014f0977e6 Date: 2013-08-12 19:19 +0200 http://bitbucket.org/pypy/pypy/changeset/d8014f0977e6/ Log: Always load symbols from the executable, but just print an error and continue if that fails. (transplanted from 783e5f660b9c7243739521564f436c0d56af0d99) diff --git a/pypy/tool/jitlogparser/parser.py b/pypy/tool/jitlogparser/parser.py --- a/pypy/tool/jitlogparser/parser.py +++ b/pypy/tool/jitlogparser/parser.py @@ -398,8 +398,7 @@ world = World() for entry in extract_category(log, 'jit-backend-dump'): entry = purge_thread_numbers(entry) - world.parse(entry.splitlines(True), load_symbols=False, - truncate_addr=False) + world.parse(entry.splitlines(True), truncate_addr=False) dumps = {} symbols = world.symbols for r in world.ranges: diff --git a/rpython/jit/backend/tool/viewcode.py b/rpython/jit/backend/tool/viewcode.py --- a/rpython/jit/backend/tool/viewcode.py +++ b/rpython/jit/backend/tool/viewcode.py @@ -239,7 +239,7 @@ self.backend_name = None self.executable_name = None - def parse(self, f, textonly=True, load_symbols=True, truncate_addr=True): + def parse(self, f, textonly=True, truncate_addr=True): for line in f: line = line[line.find('#') + 1:].strip() if line.startswith('BACKEND '): @@ -281,8 +281,6 @@ addr = baseaddr + offset self.logentries[addr] = pieces[3] elif line.startswith('SYS_EXECUTABLE '): - if not load_symbols: - continue filename = line[len('SYS_EXECUTABLE '):].strip() if filename != self.executable_name and filename != '??': try: From noreply at buildbot.pypy.org Mon Aug 12 19:25:13 2013 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 12 Aug 2013 19:25:13 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: merge heads Message-ID: <20130812172513.EDFB21C011D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c4 Changeset: r66097:977c929b6804 Date: 2013-08-12 19:24 +0200 http://bitbucket.org/pypy/pypy/changeset/977c929b6804/ Log: merge heads diff --git a/pypy/tool/jitlogparser/parser.py b/pypy/tool/jitlogparser/parser.py --- a/pypy/tool/jitlogparser/parser.py +++ b/pypy/tool/jitlogparser/parser.py @@ -90,7 +90,7 @@ v = elem # --- more compactly: " ".join(e[2:]) if not start: start = int(adr.strip(":"), 16) - ofs = int(adr.strip(":"), 16) - start + ofs = int(adr.strip(":"), 16) # add symbols to addresses: for addr in lineaddresses(v): sym = symbols.get(addr) @@ -100,6 +100,7 @@ if ofs >= 0: asm.append((ofs, v.strip("\n"))) # + prefix = hex(dump_start)[:-8] asm_index = 0 for i, op in enumerate(loop.operations): end = 0 @@ -113,12 +114,14 @@ else: end = loop.operations[j].offset if op.offset is not None: - while asm[asm_index][0] < op.offset: + while asm[asm_index][0] - start < op.offset: asm_index += 1 end_index = asm_index - while asm[end_index][0] < end and end_index < len(asm) - 1: + while asm[end_index][0] - start < end and end_index < len(asm) - 1: end_index += 1 - op.asm = '\n'.join([asm[i][1] for i in range(asm_index, end_index)]) + op.asm = '\n'.join([ + prefix+hex(asm[i][0])[2:] + ": " + asm[i][1] + for i in range(asm_index, end_index)]) return loop def _asm_disassemble(self, d, origin_addr, tp): diff --git a/rpython/jit/backend/llsupport/assembler.py b/rpython/jit/backend/llsupport/assembler.py --- a/rpython/jit/backend/llsupport/assembler.py +++ b/rpython/jit/backend/llsupport/assembler.py @@ -172,10 +172,8 @@ break exc = guardtok.exc target = self.failure_recovery_code[exc + 2 * withfloats] - fail_descr = rgc.cast_instance_to_gcref(guardtok.faildescr) - # already done by gc.py record_constptrs, just to be safe: - fail_descr = rgc._make_sure_does_not_move(fail_descr) - fail_descr = rgc.cast_gcref_to_int(fail_descr) + fail_descr = cast_instance_to_gcref(guardtok.faildescr) + fail_descr = rffi.cast(lltype.Signed, fail_descr) base_ofs = self.cpu.get_baseofs_of_frame_field() positions = [0] * len(guardtok.fail_locs) for i, loc in enumerate(guardtok.fail_locs): @@ -228,10 +226,10 @@ else: raise AssertionError(kind) - gcref = rgc.cast_instance_to_gcref(value) + import pdb;pdb.set_trace() + gcref = cast_instance_to_gcref(value) gcref = rgc._make_sure_does_not_move(gcref) - value = rgc.cast_gcref_to_int(gcref) - + value = rffi.cast(lltype.Signed, gcref) je_location = self._call_assembler_check_descr(value, tmploc) # # Path A: use assembler_helper_adr diff --git a/rpython/jit/backend/tool/viewcode.py b/rpython/jit/backend/tool/viewcode.py --- a/rpython/jit/backend/tool/viewcode.py +++ b/rpython/jit/backend/tool/viewcode.py @@ -59,7 +59,7 @@ 'arm_32': 'arm', } cmd = find_objdump() - objdump = ('%(command)s -M %(backend)s -b binary -m %(machine)s ' + objdump = ('%(command)s -w -M %(backend)s -b binary -m %(machine)s ' '--disassembler-options=intel-mnemonics ' '--adjust-vma=%(origin)d -D %(file)s') # diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -260,6 +260,21 @@ self.propagate_exception_path = rawstart self.mc = None + def _get_stm_tl(self, adr): + """Makes 'adr' relative to threadlocal-base if we run in STM. + Before using such a relative address, call + self._stm_tl_segment_prefix_if_necessary.""" + if self.cpu.gc_ll_descr.stm and we_are_translated(): + # also not during tests + result = adr - stmtlocal.threadlocal_base() + assert rx86.fits_in_32bits(result) + return result + return adr + + def _stm_tl_segment_prefix_if_necessary(self, mc): + if self.cpu.gc_ll_descr.stm and we_are_translated(): + stmtlocal.tl_segment_prefix(mc) + def _build_stack_check_slowpath(self): _, _, slowpathaddr = self.cpu.insert_stack_check() if slowpathaddr == 0 or not self.cpu.propagate_exception_descr: @@ -294,7 +309,9 @@ else: mc.ADD_ri(esp.value, WORD) # - mc.MOV(eax, heap(self.cpu.pos_exception())) + ea = self._get_stm_tl(self.cpu.pos_exception()) + self._stm_tl_segment_prefix_if_necessary(mc) + mc.MOV(eax, heap(ea)) mc.TEST_rr(eax.value, eax.value) mc.J_il8(rx86.Conditions['NZ'], 0) jnz_location = mc.get_relative_pos() @@ -1740,7 +1757,9 @@ def genop_guard_guard_no_exception(self, ign_1, guard_op, guard_token, locs, ign_2): - self.mc.CMP(heap(self.cpu.pos_exception()), imm0) + ea = self._get_stm_tl(self.cpu.pos_exception()) + self._stm_tl_segment_prefix_if_necessary(self.mc) + self.mc.CMP(heap(ea), imm0) self.implement_guard(guard_token, 'NZ') def genop_guard_guard_not_invalidated(self, ign_1, guard_op, guard_token, @@ -1753,7 +1772,9 @@ locs, resloc): loc = locs[0] loc1 = locs[1] - self.mc.MOV(loc1, heap(self.cpu.pos_exception())) + ea = self._get_stm_tl(self.cpu.pos_exception()) + self._stm_tl_segment_prefix_if_necessary(self.mc) + self.mc.MOV(loc1, heap(ea)) self.mc.CMP(loc1, loc) self.implement_guard(guard_token, 'NE') self._store_and_reset_exception(self.mc, resloc) @@ -1763,30 +1784,43 @@ """ Resest the exception. If excvalloc is None, then store it on the frame in jf_guard_exc """ + eva = self._get_stm_tl(self.cpu.pos_exc_value()) + ea = self._get_stm_tl(self.cpu.pos_exception()) + # + self._stm_tl_segment_prefix_if_necessary(mc) if excvalloc is not None: assert excvalloc.is_core_reg() - mc.MOV(excvalloc, heap(self.cpu.pos_exc_value())) + mc.MOV(excvalloc, heap(eva)) elif tmploc is not None: # if both are None, just ignore ofs = self.cpu.get_ofs_of_frame_field('jf_guard_exc') - mc.MOV(tmploc, heap(self.cpu.pos_exc_value())) + mc.MOV(tmploc, heap(eva)) mc.MOV(RawEbpLoc(ofs), tmploc) + # if exctploc is not None: assert exctploc.is_core_reg() - mc.MOV(exctploc, heap(self.cpu.pos_exception())) - - mc.MOV(heap(self.cpu.pos_exception()), imm0) - mc.MOV(heap(self.cpu.pos_exc_value()), imm0) + self._stm_tl_segment_prefix_if_necessary(mc) + mc.MOV(exctploc, heap(ea)) + # + self._stm_tl_segment_prefix_if_necessary(mc) + mc.MOV(heap(ea), imm0) + self._stm_tl_segment_prefix_if_necessary(mc) + mc.MOV(heap(eva), imm0) def _restore_exception(self, mc, excvalloc, exctploc, tmploc=None): + eva = self._get_stm_tl(self.cpu.pos_exc_value()) + ea = self._get_stm_tl(self.cpu.pos_exception()) if excvalloc is not None: - mc.MOV(heap(self.cpu.pos_exc_value()), excvalloc) + self._stm_tl_segment_prefix_if_necessary(mc) + mc.MOV(heap(eva), excvalloc) else: assert tmploc is not None ofs = self.cpu.get_ofs_of_frame_field('jf_guard_exc') mc.MOV(tmploc, RawEbpLoc(ofs)) mc.MOV_bi(ofs, 0) - mc.MOV(heap(self.cpu.pos_exc_value()), tmploc) - mc.MOV(heap(self.cpu.pos_exception()), exctploc) + self._stm_tl_segment_prefix_if_necessary(mc) + mc.MOV(heap(eva), tmploc) + self._stm_tl_segment_prefix_if_necessary(mc) + mc.MOV(heap(ea), exctploc) def _gen_guard_overflow(self, guard_op, guard_token): guard_opnum = guard_op.getopnum() @@ -1983,9 +2017,14 @@ if exc: # We might have an exception pending. Load it into ebx... - mc.MOV(ebx, heap(self.cpu.pos_exc_value())) - mc.MOV(heap(self.cpu.pos_exception()), imm0) - mc.MOV(heap(self.cpu.pos_exc_value()), imm0) + eva = self._get_stm_tl(self.cpu.pos_exc_value()) + ea = self._get_stm_tl(self.cpu.pos_exception()) + self._stm_tl_segment_prefix_if_necessary(mc) + mc.MOV(ebx, heap(eva)) + self._stm_tl_segment_prefix_if_necessary(mc) + mc.MOV(heap(ea), imm0) + self._stm_tl_segment_prefix_if_necessary(mc) + mc.MOV(heap(eva), imm0) # ...and save ebx into 'jf_guard_exc' offset = self.cpu.get_ofs_of_frame_field('jf_guard_exc') mc.MOV_br(offset, ebx.value) diff --git a/rpython/jit/backend/x86/test/test_stm_integration.py b/rpython/jit/backend/x86/test/test_stm_integration.py --- a/rpython/jit/backend/x86/test/test_stm_integration.py +++ b/rpython/jit/backend/x86/test/test_stm_integration.py @@ -320,6 +320,7 @@ self.cpu.execute_token(looptoken, sgcref) self.assert_in(called_on, [sgcref]) + def test_ptr_eq_fastpath(self): cpu = self.cpu cpu.gc_ll_descr.init_nursery(100) @@ -339,20 +340,28 @@ ConstPtr(s1), ConstPtr(s2)] for p1, p2 in itertools.combinations(ps, 2): - for guard in [None, rop.GUARD_TRUE, rop.GUARD_FALSE]: + for guard in [None, rop.GUARD_TRUE, rop.GUARD_FALSE, + rop.GUARD_VALUE]: cpu.gc_ll_descr.clear_lists() # BUILD OPERATIONS: i = i0 guarddescr = BasicFailDescr() finaldescr = BasicFinalDescr() - operations = [ResOperation(rop.PTR_EQ, [p1, p2], i0)] - if guard is not None: - gop = ResOperation(guard, [i0], None, + if guard == rop.GUARD_VALUE: + gop = ResOperation(rop.GUARD_VALUE, [p1, p2], None, descr=guarddescr) gop.setfailargs([]) - operations.append(gop) + operations = [gop] i = i1 + else: + operations = [ResOperation(rop.PTR_EQ, [p1, p2], i0)] + if guard is not None: + gop = ResOperation(guard, [i0], None, + descr=guarddescr) + gop.setfailargs([]) + operations.append(gop) + i = i1 # finish must depend on result of ptr_eq if no guard # is inbetween (otherwise ptr_eq gets deleted) # if there is a guard, the result of ptr_eq must not @@ -393,7 +402,7 @@ if guard is not None: if s1 == s2: - if guard == rop.GUARD_TRUE: + if guard in (rop.GUARD_TRUE, rop.GUARD_VALUE): assert not guard_failed else: assert guard_failed From noreply at buildbot.pypy.org Mon Aug 12 19:25:15 2013 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 12 Aug 2013 19:25:15 +0200 (CEST) Subject: [pypy-commit] pypy default: merge heads Message-ID: <20130812172515.2E07A1C011D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r66098:95401f5d05ba Date: 2013-08-12 19:24 +0200 http://bitbucket.org/pypy/pypy/changeset/95401f5d05ba/ Log: merge heads diff --git a/rpython/jit/backend/arm/assembler.py b/rpython/jit/backend/arm/assembler.py --- a/rpython/jit/backend/arm/assembler.py +++ b/rpython/jit/backend/arm/assembler.py @@ -62,7 +62,8 @@ self.current_clt = looptoken.compiled_loop_token self.mc = InstrBuilder(self.cpu.cpuinfo.arch_version) self.pending_guards = [] - assert self.datablockwrapper is None + #assert self.datablockwrapper is None --- but obscure case + # possible, e.g. getting MemoryError and continuing allblocks = self.get_asmmemmgr_blocks(looptoken) self.datablockwrapper = MachineDataBlockWrapper(self.cpu.asmmemmgr, allblocks) @@ -76,7 +77,6 @@ self._regalloc = None self.mc = None self.pending_guards = None - assert self.datablockwrapper is None def setup_failure_recovery(self): self.failure_recovery_code = [0, 0, 0, 0] diff --git a/rpython/jit/backend/arm/regalloc.py b/rpython/jit/backend/arm/regalloc.py --- a/rpython/jit/backend/arm/regalloc.py +++ b/rpython/jit/backend/arm/regalloc.py @@ -1196,9 +1196,8 @@ def prepare_op_guard_not_forced_2(self, op, fcond): self.rm.before_call(op.getfailargs(), save_all_regs=True) - fail_locs = [self.loc(v) for v in op.getfailargs()] - self.assembler.store_force_descr(op, fail_locs, - self.fm.get_frame_depth()) + fail_locs = self._prepare_guard(op) + self.assembler.store_force_descr(op, fail_locs[1:], fail_locs[0].value) self.possibly_free_vars(op.getfailargs()) def prepare_guard_call_may_force(self, op, guard_op, fcond): From noreply at buildbot.pypy.org Tue Aug 13 00:42:19 2013 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Tue, 13 Aug 2013 00:42:19 +0200 (CEST) Subject: [pypy-commit] pypy default: Fixed #1582 -- Corrected the behavior of file.seek(X, os.SEEK_CUR) when it raises an IOError Message-ID: <20130812224219.D24BB1C011D@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r66099:b134074e011c Date: 2013-08-12 18:41 -0400 http://bitbucket.org/pypy/pypy/changeset/b134074e011c/ Log: Fixed #1582 -- Corrected the behavior of file.seek(X, os.SEEK_CUR) when it raises an IOError diff --git a/pypy/module/_file/test/test_file.py b/pypy/module/_file/test/test_file.py --- a/pypy/module/_file/test/test_file.py +++ b/pypy/module/_file/test/test_file.py @@ -385,6 +385,24 @@ raise Exception("time out") print 'Passed.' + def test_seek_from_cur_backwards_off_end(self): + import os + + f = self.file(self.temppath, "w+b") + f.write('123456789x12345678><123456789\n') + + f.seek(0, os.SEEK_END) + f.seek(-25, os.SEEK_CUR) + f.read(25) + f.seek(-25, os.SEEK_CUR) + try: + f.seek(-25, os.SEEK_CUR) + except IOError: + pass + else: + raise AssertionError("Didn't raise IOError") + assert f.tell() == 5 + class AppTestFile25: spaceconfig = dict(usemodules=("_file",)) diff --git a/rpython/rlib/streamio.py b/rpython/rlib/streamio.py --- a/rpython/rlib/streamio.py +++ b/rpython/rlib/streamio.py @@ -558,19 +558,22 @@ if -self.pos <= difpos <= currentsize: self.pos += difpos return - self.buf = "" - self.pos = 0 if whence == 1: offset -= currentsize try: self.do_seek(offset, whence) except MyNotImplementedError: + self.buf = "" + self.pos = 0 if difpos < 0: raise if whence == 0: offset = difpos - currentsize intoffset = offset2int(offset) self.read(intoffset) + else: + self.buf = "" + self.pos = 0 return if whence == 2: try: From noreply at buildbot.pypy.org Tue Aug 13 00:42:21 2013 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Tue, 13 Aug 2013 00:42:21 +0200 (CEST) Subject: [pypy-commit] pypy default: Merged upstream Message-ID: <20130812224221.692671C01E5@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r66100:5b2ef39d2fc9 Date: 2013-08-12 18:41 -0400 http://bitbucket.org/pypy/pypy/changeset/5b2ef39d2fc9/ Log: Merged upstream diff --git a/pypy/tool/jitlogparser/parser.py b/pypy/tool/jitlogparser/parser.py --- a/pypy/tool/jitlogparser/parser.py +++ b/pypy/tool/jitlogparser/parser.py @@ -85,7 +85,7 @@ continue e = elem.split("\t") adr = e[0] - v = " ".join(e[2:]) + v = elem # --- more compactly: " ".join(e[2:]) if not start: start = int(adr.strip(":"), 16) ofs = int(adr.strip(":"), 16) - start @@ -379,15 +379,16 @@ name = entry[:entry.find('(') - 1].lower() addr = int(m.group(1), 16) addrs.setdefault(addr, []).append(name) + from rpython.jit.backend.tool.viewcode import World + world = World() + for entry in extract_category(log, 'jit-backend-dump'): + world.parse(entry.splitlines(True), truncate_addr=False) dumps = {} - for entry in extract_category(log, 'jit-backend-dump'): - backend, _, dump, _ = entry.split("\n") - _, addr, _, data = re.split(" +", dump) - backend_name = backend.split(" ")[1] - addr = int(addr[1:], 16) - if addr in addrs and addrs[addr]: - name = addrs[addr].pop(0) # they should come in order - dumps[name] = (backend_name, addr, data) + for r in world.ranges: + if r.addr in addrs and addrs[r.addr]: + name = addrs[r.addr].pop(0) # they should come in order + data = r.data.encode('hex') # backward compatibility + dumps[name] = (world.backend_name, r.addr, data) loops = [] for entry in extract_category(log, 'jit-log-opt'): parser = ParserCls(entry, None, {}, 'lltype', None, diff --git a/rpython/jit/backend/arm/assembler.py b/rpython/jit/backend/arm/assembler.py --- a/rpython/jit/backend/arm/assembler.py +++ b/rpython/jit/backend/arm/assembler.py @@ -62,7 +62,8 @@ self.current_clt = looptoken.compiled_loop_token self.mc = InstrBuilder(self.cpu.cpuinfo.arch_version) self.pending_guards = [] - assert self.datablockwrapper is None + #assert self.datablockwrapper is None --- but obscure case + # possible, e.g. getting MemoryError and continuing allblocks = self.get_asmmemmgr_blocks(looptoken) self.datablockwrapper = MachineDataBlockWrapper(self.cpu.asmmemmgr, allblocks) @@ -76,7 +77,6 @@ self._regalloc = None self.mc = None self.pending_guards = None - assert self.datablockwrapper is None def setup_failure_recovery(self): self.failure_recovery_code = [0, 0, 0, 0] diff --git a/rpython/jit/backend/arm/regalloc.py b/rpython/jit/backend/arm/regalloc.py --- a/rpython/jit/backend/arm/regalloc.py +++ b/rpython/jit/backend/arm/regalloc.py @@ -1196,9 +1196,8 @@ def prepare_op_guard_not_forced_2(self, op, fcond): self.rm.before_call(op.getfailargs(), save_all_regs=True) - fail_locs = [self.loc(v) for v in op.getfailargs()] - self.assembler.store_force_descr(op, fail_locs, - self.fm.get_frame_depth()) + fail_locs = self._prepare_guard(op) + self.assembler.store_force_descr(op, fail_locs[1:], fail_locs[0].value) self.possibly_free_vars(op.getfailargs()) def prepare_guard_call_may_force(self, op, guard_op, fcond): diff --git a/rpython/jit/backend/tool/viewcode.py b/rpython/jit/backend/tool/viewcode.py --- a/rpython/jit/backend/tool/viewcode.py +++ b/rpython/jit/backend/tool/viewcode.py @@ -240,7 +240,7 @@ self.backend_name = None self.executable_name = None - def parse(self, f, textonly=True): + def parse(self, f, textonly=True, truncate_addr=True): for line in f: if line.startswith('BACKEND '): self.backend_name = line.split(' ')[1].strip() @@ -250,7 +250,11 @@ assert pieces[2].startswith('+') if len(pieces) == 3: continue # empty line - baseaddr = long(pieces[1][1:], 16) & 0xFFFFFFFFL + baseaddr = long(pieces[1][1:], 16) + if truncate_addr: + baseaddr &= 0xFFFFFFFFL + elif baseaddr < 0: + baseaddr += (2 * sys.maxint + 2) offset = int(pieces[2][1:]) addr = baseaddr + offset data = pieces[3].replace(':', '').decode('hex') @@ -268,14 +272,21 @@ pieces = line.split(None, 3) assert pieces[1].startswith('@') assert pieces[2].startswith('+') - baseaddr = long(pieces[1][1:], 16) & 0xFFFFFFFFL + baseaddr = long(pieces[1][1:], 16) + if truncate_addr: + baseaddr &= 0xFFFFFFFFL + elif baseaddr < 0: + baseaddr += (2 * sys.maxint + 2) offset = int(pieces[2][1:]) addr = baseaddr + offset self.logentries[addr] = pieces[3] elif line.startswith('SYS_EXECUTABLE '): filename = line[len('SYS_EXECUTABLE '):].strip() if filename != self.executable_name and filename != '??': - self.symbols.update(load_symbols(filename)) + try: + self.symbols.update(load_symbols(filename)) + except Exception as e: + print e self.executable_name = filename def find_cross_references(self): From noreply at buildbot.pypy.org Tue Aug 13 02:16:27 2013 From: noreply at buildbot.pypy.org (kostialopuhin) Date: Tue, 13 Aug 2013 02:16:27 +0200 (CEST) Subject: [pypy-commit] pypy py3k-list-compr-or: two failing tests for list comprehensions with or Message-ID: <20130813001627.E88201C01E5@cobra.cs.uni-duesseldorf.de> Author: Konstantin Lopuhin Branch: py3k-list-compr-or Changeset: r66101:bb9bab43e9dc Date: 2013-08-06 23:21 +0200 http://bitbucket.org/pypy/pypy/changeset/bb9bab43e9dc/ Log: two failing tests for list comprehensions with or diff --git a/pypy/interpreter/astcompiler/test/test_astbuilder.py b/pypy/interpreter/astcompiler/test/test_astbuilder.py --- a/pypy/interpreter/astcompiler/test/test_astbuilder.py +++ b/pypy/interpreter/astcompiler/test/test_astbuilder.py @@ -1236,6 +1236,12 @@ if1, if2 = comps[0].ifs assert isinstance(if1, ast.Name) assert isinstance(if2, ast.Name) + gen = self.get_first_expr(brack("x for x in y or z")) + comp = gen.generators[0] + assert isinstance(comp.iter, ast.BoolOp) + assert len(comp.iter.values, 2) + assert isinstance(comp.iter.values[0], ast.Name) + assert isinstance(comp.iter.values[1], ast.Name) def test_genexp(self): self.check_comprehension("(%s)", ast.GeneratorExp) diff --git a/pypy/interpreter/astcompiler/test/test_compiler.py b/pypy/interpreter/astcompiler/test/test_compiler.py --- a/pypy/interpreter/astcompiler/test/test_compiler.py +++ b/pypy/interpreter/astcompiler/test/test_compiler.py @@ -891,6 +891,10 @@ py.test.raises(SyntaxError, self.simple_test, "for *a in x: pass", None, None) + def test_list_compr_or(self): + yield self.st, 'x = list(d for d in [1] or [])', 'x', [1] + yield self.st, 'y = [d for d in [1] or []]', 'y', [1] + class AppTestCompiler: From noreply at buildbot.pypy.org Tue Aug 13 02:16:29 2013 From: noreply at buildbot.pypy.org (kostialopuhin) Date: Tue, 13 Aug 2013 02:16:29 +0200 (CEST) Subject: [pypy-commit] pypy py3k-list-compr-or: hmm, not sure why handle_testlist was here in the first place - this is the fix Message-ID: <20130813001629.4671F1C01E5@cobra.cs.uni-duesseldorf.de> Author: Konstantin Lopuhin Branch: py3k-list-compr-or Changeset: r66102:d0489e1cc387 Date: 2013-08-06 23:38 +0200 http://bitbucket.org/pypy/pypy/changeset/d0489e1cc387/ Log: hmm, not sure why handle_testlist was here in the first place - this is the fix diff --git a/pypy/interpreter/astcompiler/astbuilder.py b/pypy/interpreter/astcompiler/astbuilder.py --- a/pypy/interpreter/astcompiler/astbuilder.py +++ b/pypy/interpreter/astcompiler/astbuilder.py @@ -1289,7 +1289,7 @@ def handle_listcomp(self, listcomp_node): elt = self.handle_expr(listcomp_node.children[0]) comps = self.comprehension_helper(listcomp_node.children[1], - "handle_testlist", + "handle_expr", syms.comp_for, syms.comp_if, syms.comp_iter, comp_fix_unamed_tuple_location=True) From noreply at buildbot.pypy.org Tue Aug 13 02:16:30 2013 From: noreply at buildbot.pypy.org (kostialopuhin) Date: Tue, 13 Aug 2013 02:16:30 +0200 (CEST) Subject: [pypy-commit] pypy py3k-list-compr-or: fix test Message-ID: <20130813001630.8D7B11C01E5@cobra.cs.uni-duesseldorf.de> Author: Konstantin Lopuhin Branch: py3k-list-compr-or Changeset: r66103:1c07c858164e Date: 2013-08-06 23:39 +0200 http://bitbucket.org/pypy/pypy/changeset/1c07c858164e/ Log: fix test diff --git a/pypy/interpreter/astcompiler/test/test_astbuilder.py b/pypy/interpreter/astcompiler/test/test_astbuilder.py --- a/pypy/interpreter/astcompiler/test/test_astbuilder.py +++ b/pypy/interpreter/astcompiler/test/test_astbuilder.py @@ -1239,7 +1239,7 @@ gen = self.get_first_expr(brack("x for x in y or z")) comp = gen.generators[0] assert isinstance(comp.iter, ast.BoolOp) - assert len(comp.iter.values, 2) + assert len(comp.iter.values) == 2 assert isinstance(comp.iter.values[0], ast.Name) assert isinstance(comp.iter.values[1], ast.Name) From noreply at buildbot.pypy.org Tue Aug 13 02:16:31 2013 From: noreply at buildbot.pypy.org (kostialopuhin) Date: Tue, 13 Aug 2013 02:16:31 +0200 (CEST) Subject: [pypy-commit] pypy py3k-list-compr-or: handle_source_expr_meth is not used any more, remove it Message-ID: <20130813001631.D20761C01E5@cobra.cs.uni-duesseldorf.de> Author: Konstantin Lopuhin Branch: py3k-list-compr-or Changeset: r66104:169a83467a6d Date: 2013-08-07 21:15 +0200 http://bitbucket.org/pypy/pypy/changeset/169a83467a6d/ Log: handle_source_expr_meth is not used any more, remove it diff --git a/pypy/interpreter/astcompiler/astbuilder.py b/pypy/interpreter/astcompiler/astbuilder.py --- a/pypy/interpreter/astcompiler/astbuilder.py +++ b/pypy/interpreter/astcompiler/astbuilder.py @@ -1236,17 +1236,15 @@ @specialize.arg(2) def comprehension_helper(self, comp_node, - handle_source_expr_meth="handle_expr", for_type=syms.comp_for, if_type=syms.comp_if, iter_type=syms.comp_iter, comp_fix_unamed_tuple_location=False): - handle_source_expression = getattr(self, handle_source_expr_meth) fors_count = self.count_comp_fors(comp_node, for_type, if_type) comps = [] for i in range(fors_count): for_node = comp_node.children[1] for_targets = self.handle_exprlist(for_node, ast.Store) - expr = handle_source_expression(comp_node.children[3]) + expr = self.handle_expr(comp_node.children[3]) assert isinstance(expr, ast.expr) if len(for_node.children) == 1: comp = ast.comprehension(for_targets[0], expr, None) @@ -1289,7 +1287,6 @@ def handle_listcomp(self, listcomp_node): elt = self.handle_expr(listcomp_node.children[0]) comps = self.comprehension_helper(listcomp_node.children[1], - "handle_expr", syms.comp_for, syms.comp_if, syms.comp_iter, comp_fix_unamed_tuple_location=True) From noreply at buildbot.pypy.org Tue Aug 13 02:24:05 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Tue, 13 Aug 2013 02:24:05 +0200 (CEST) Subject: [pypy-commit] pypy py3k: pypy doesn't support site-python, only site-packages. reapplied from default Message-ID: <20130813002405.87A4C1C01E5@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r66110:56f39e6adf81 Date: 2013-08-12 17:23 -0700 http://bitbucket.org/pypy/pypy/changeset/56f39e6adf81/ Log: pypy doesn't support site-python, only site-packages. reapplied from default diff --git a/lib-python/3/test/test_site.py b/lib-python/3/test/test_site.py --- a/lib-python/3/test/test_site.py +++ b/lib-python/3/test/test_site.py @@ -223,6 +223,10 @@ self.assertEqual(len(dirs), 1) wanted = os.path.join('xoxo', 'Lib', 'site-packages') self.assertEqual(dirs[0], wanted) + elif '__pypy__' in sys.builtin_module_names: + self.assertEquals(len(dirs), 1) + wanted = os.path.join('xoxo', 'site-packages') + self.assertEquals(dirs[0], wanted) elif (sys.platform == "darwin" and sysconfig.get_config_var("PYTHONFRAMEWORK")): # OS X framework builds From noreply at buildbot.pypy.org Tue Aug 13 08:38:17 2013 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 13 Aug 2013 08:38:17 +0200 (CEST) Subject: [pypy-commit] pypy default: Make the crash clearer, e.g. if we compile old code which still uses Message-ID: <20130813063817.1B3971C13FF@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r66111:1ab21f809581 Date: 2013-08-13 08:37 +0200 http://bitbucket.org/pypy/pypy/changeset/1ab21f809581/ Log: Make the crash clearer, e.g. if we compile old code which still uses '_virtualizable2_' diff --git a/rpython/jit/metainterp/typesystem.py b/rpython/jit/metainterp/typesystem.py --- a/rpython/jit/metainterp/typesystem.py +++ b/rpython/jit/metainterp/typesystem.py @@ -52,7 +52,10 @@ return FUNCTYPE, FUNCPTRTYPE def get_superclass(self, TYPE): - return lltype.Ptr(TYPE.TO._first_struct()[1]) + SUPER = TYPE.TO._first_struct()[1] + if SUPER is None: + return None + return lltype.Ptr(SUPER) def cast_to_instance_maybe(self, TYPE, instance): return lltype.cast_pointer(TYPE, instance) diff --git a/rpython/jit/metainterp/virtualizable.py b/rpython/jit/metainterp/virtualizable.py --- a/rpython/jit/metainterp/virtualizable.py +++ b/rpython/jit/metainterp/virtualizable.py @@ -17,8 +17,14 @@ self.cpu = cpu self.BoxArray = cpu.ts.BoxRef # + VTYPEPTR1 = VTYPEPTR while 'virtualizable_accessor' not in deref(VTYPEPTR)._hints: VTYPEPTR = cpu.ts.get_superclass(VTYPEPTR) + assert VTYPEPTR is not None, ( + "%r is listed in the jit driver's 'virtualizables', " + "but that class doesn't have a '_virtualizable_' attribute " + "(if it has _virtualizable2_, rename it to _virtualizable_)" + % (VTYPEPTR1,)) self.VTYPEPTR = VTYPEPTR self.VTYPE = VTYPE = deref(VTYPEPTR) self.vable_token_descr = cpu.fielddescrof(VTYPE, 'vable_token') From noreply at buildbot.pypy.org Tue Aug 13 10:27:42 2013 From: noreply at buildbot.pypy.org (andrewchambers) Date: Tue, 13 Aug 2013 10:27:42 +0200 (CEST) Subject: [pypy-commit] pypy incremental-gc: starting to add tests for incremental gc operation Message-ID: <20130813082742.C62471C14AA@cobra.cs.uni-duesseldorf.de> Author: Andrew Chambers Branch: incremental-gc Changeset: r66113:34a0002de3cd Date: 2013-08-13 20:26 +1200 http://bitbucket.org/pypy/pypy/changeset/34a0002de3cd/ Log: starting to add tests for incremental gc operation diff --git a/rpython/memory/gc/incminimark.py b/rpython/memory/gc/incminimark.py --- a/rpython/memory/gc/incminimark.py +++ b/rpython/memory/gc/incminimark.py @@ -1183,7 +1183,7 @@ if self.gc_state == STATE_MARKING: if self.header(addr_struct).tid & GCFLAG_VISITED: - self.write_to_visited_object_forward(addr_struct,new_value) + self.write_to_visited_object_forward(addr_struct,newvalue) def write_barrier_from_array(self, newvalue, addr_array, index): @@ -1196,7 +1196,7 @@ if self.gc_state == STATE_MARKING: if self.header(addr_struct).tid & GCFLAG_VISITED: - self.write_to_visited_object_backward(addr_struct,new_value) + self.write_to_visited_object_backward(addr_struct,newvalue) def _init_writebarrier_logic(self): @@ -1775,7 +1775,7 @@ self.minor_collection() self.major_collection_step() - def debug_gc_step_n(self,n): + def debug_gc_step(self,n=1): while n > 0: self.minor_collection() self.major_collection_step() diff --git a/rpython/memory/gc/test/test_direct.py b/rpython/memory/gc/test/test_direct.py --- a/rpython/memory/gc/test/test_direct.py +++ b/rpython/memory/gc/test/test_direct.py @@ -587,11 +587,63 @@ class TestIncrementalMiniMarkGCSimple(TestMiniMarkGCSimple): from rpython.memory.gc.incminimark import IncrementalMiniMarkGC as GCClass - - - def test_write_barrier(self): - pass + def test_write_barrier_marking_simple(self): + from rpython.memory.gc import incminimark + for i in range(2): + curobj = self.malloc(S) + curobj.x = i + self.stackroots.append(curobj) + + + oldobj = self.stackroots[-1] + oldhdr = self.gc.header(llmemory.cast_ptr_to_adr(oldobj)) + + assert oldhdr.tid & incminimark.GCFLAG_VISITED == 0 + self.gc.debug_gc_step_until(incminimark.STATE_MARKING) + oldobj = self.stackroots[-1] + # object shifted by minor collect + oldhdr = self.gc.header(llmemory.cast_ptr_to_adr(oldobj)) + assert oldhdr.tid & incminimark.GCFLAG_VISITED == 0 + #process one object + self.gc.debug_gc_step() + + assert oldhdr.tid & incminimark.GCFLAG_VISITED + + #at this point the first object should have been processed + newobj = self.malloc(S) + self.write(oldobj,'next',newobj) + #the barrier should have made the object gray + newhdr = self.gc.header(llmemory.cast_ptr_to_adr(newobj)) + assert newhdr.tid & incminimark.GCFLAG_GRAY + #checks gray object is in objects_to_trace + self.gc.debug_check_consistency() + + def test_sweeping_simple(self): + from rpython.memory.gc import incminimark + + assert self.gc.gc_state == incminimark.STATE_SCANNING + + for i in range(2): + curobj = self.malloc(S) + curobj.x = i + self.stackroots.append(curobj) + + self.gc.debug_gc_step_until(incminimark.STATE_SWEEPING_RAWMALLOC) + oldobj = self.stackroots[-1] + oldhdr = self.gc.header(llmemory.cast_ptr_to_adr(oldobj)) + assert oldhdr.tid & incminimark.GCFLAG_VISITED + + newobj1 = self.malloc(S) + newobj2 = self.malloc(S) + newobj1.x = 1337 + newobj2.x = 1338 + self.write(oldobj,'next',newobj) + newhdr = self.gc.header(llmemory.cast_ptr_to_adr(newobj)) + #checks gray object is in objects_to_trace + self.gc.debug_gc_step_until(incminimark.STATE_SCANNING) + #should not be cleared even though it was allocated while sweeping + assert newobj.x == 1337 class TestIncrementalMiniMarkGCFull(TestMiniMarkGCFull): from rpython.memory.gc.incminimark import IncrementalMiniMarkGC as GCClass From noreply at buildbot.pypy.org Tue Aug 13 10:27:41 2013 From: noreply at buildbot.pypy.org (andrewchambers) Date: Tue, 13 Aug 2013 10:27:41 +0200 (CEST) Subject: [pypy-commit] pypy incremental-gc: refactoring tests and initial write barrier code Message-ID: <20130813082741.5623F1C14A9@cobra.cs.uni-duesseldorf.de> Author: Andrew Chambers Branch: incremental-gc Changeset: r66112:fdbbced1820f Date: 2013-08-13 16:54 +1200 http://bitbucket.org/pypy/pypy/changeset/fdbbced1820f/ Log: refactoring tests and initial write barrier code diff --git a/rpython/memory/gc/incminimark.py b/rpython/memory/gc/incminimark.py --- a/rpython/memory/gc/incminimark.py +++ b/rpython/memory/gc/incminimark.py @@ -133,15 +133,19 @@ # The scanning phase, next step call will scan the current roots # This state must complete in a single step -STATE_SCANNING = 0 +STATE_SCANNING = 1 << 0 #XXX describe # marking of objects can be done over multiple -STATE_MARKING = 1 -STATE_SWEEPING_RAWMALLOC = 2 -STATE_SWEEPING_ARENA_1 = 3 -STATE_SWEEPING_ARENA_2 = 4 -STATE_FINALIZING = 5 +STATE_MARKING = 1 << 1 +STATE_SWEEPING_RAWMALLOC = 1 << 2 +STATE_SWEEPING_ARENA_1 = 1 << 3 +STATE_SWEEPING_ARENA_2 = 1 << 4 +STATE_FINALIZING = 1 << 5 + +MASK_SWEEPING = (STATE_SWEEPING_RAWMALLOC | + STATE_SWEEPING_ARENA_1 | + STATE_SWEEPING_ARENA_2) @@ -333,7 +337,7 @@ self.rawmalloced_total_size = r_uint(0) self.gc_state = r_uint(0) #XXX Only really needs to be a byte - + self.gc_state = STATE_SCANNING # # A list of all objects with finalizers (these are never young). self.objects_with_finalizers = self.AddressDeque() @@ -1176,13 +1180,24 @@ def write_barrier(self, newvalue, addr_struct): if self.header(addr_struct).tid & GCFLAG_TRACK_YOUNG_PTRS: self.remember_young_pointer(addr_struct, newvalue) + + if self.gc_state == STATE_MARKING: + if self.header(addr_struct).tid & GCFLAG_VISITED: + self.write_to_visited_object_forward(addr_struct,new_value) + def write_barrier_from_array(self, newvalue, addr_array, index): + if self.header(addr_array).tid & GCFLAG_TRACK_YOUNG_PTRS: if self.card_page_indices > 0: # <- constant-folded self.remember_young_pointer_from_array2(addr_array, index) else: self.remember_young_pointer(addr_array, newvalue) + + if self.gc_state == STATE_MARKING: + if self.header(addr_struct).tid & GCFLAG_VISITED: + self.write_to_visited_object_backward(addr_struct,new_value) + def _init_writebarrier_logic(self): DEBUG = self.DEBUG @@ -1190,6 +1205,33 @@ # instead of keeping it as a regular method is to # make the code in write_barrier() marginally smaller # (which is important because it is inlined *everywhere*). + + # move marking process forward + def write_to_visited_object_forward(addr_struct, new_value): + ll_assert(self.gc_state == STATE_MARKING,"expected MARKING state") + if self.header(new_value).tid & (GCFLAG_GRAY | GCFLAG_VISITED) == 0: + # writing a white object into black, make new object gray and + # add to objects_to_trace + # + self.header(new_value).tid |= GCFLAG_GRAY + self.objects_to_trace.append(new_value) + write_to_visited_object_forward._dont_inline_ = True + self.write_to_visited_object_forward = write_to_visited_object_forward + + # move marking process backward + def write_to_visited_object_backward(addr_struct, new_value): + ll_assert(self.gc_state == STATE_MARKING,"expected MARKING state") + if self.header(new_value).tid & (GCFLAG_GRAY | GCFLAG_VISITED) == 0: + # writing a white object into black, make black gray and + # readd to objects_to_trace + # this is useful for arrays because it stops the writebarrier + # from being re-triggered on successive writes + self.header(addr_struct).tid &= ~GCFLAG_VISITED + self.header(addr_struct).tid |= GCFLAG_GRAY + self.objects_to_trace.append(addr_struct) + write_to_visited_object_backward._dont_inline_ = True + self.write_to_visited_object_backward = write_to_visited_object_backward + def remember_young_pointer(addr_struct, newvalue): # 'addr_struct' is the address of the object in which we write. # 'newvalue' is the address that we are going to write in there. @@ -1728,9 +1770,20 @@ old.append(new.pop()) new.delete() + def debug_gc_step_until(self,state): + while self.gc_state != state: + self.minor_collection() + self.major_collection_step() + + def debug_gc_step_n(self,n): + while n > 0: + self.minor_collection() + self.major_collection_step() + n -= 1 + # Note - minor collections seem fast enough so that one # is done before every major collection step - def major_collection_step(self,reserving_size): + def major_collection_step(self,reserving_size=0): debug_start("gc-collect-step") debug_print("stating gc state: ",self.gc_state) # Debugging checks diff --git a/rpython/memory/gc/test/test_direct.py b/rpython/memory/gc/test/test_direct.py --- a/rpython/memory/gc/test/test_direct.py +++ b/rpython/memory/gc/test/test_direct.py @@ -584,109 +584,14 @@ class TestMiniMarkGCFull(DirectGCTest): from rpython.memory.gc.minimark import MiniMarkGC as GCClass +class TestIncrementalMiniMarkGCSimple(TestMiniMarkGCSimple): + from rpython.memory.gc.incminimark import IncrementalMiniMarkGC as GCClass + + + + def test_write_barrier(self): + pass + -class TestIncrementalMiniMarkGCSimple(DirectGCTest): +class TestIncrementalMiniMarkGCFull(TestMiniMarkGCFull): from rpython.memory.gc.incminimark import IncrementalMiniMarkGC as GCClass - #simple arena doesnt change for incremental. - from rpython.memory.gc.minimarktest import SimpleArenaCollection - # test the GC itself, providing a simple class for ArenaCollection - GC_PARAMS = {'ArenaCollectionClass': SimpleArenaCollection} - - def test_card_marker(self): - for arraylength in (range(4, 17) - + [69] # 3 bytes - + [300]): # 10 bytes - print 'array length:', arraylength - nums = {} - a = self.malloc(VAR, arraylength) - self.stackroots.append(a) - for i in range(50): - p = self.malloc(S) - p.x = -i - a = self.stackroots[-1] - index = (i*i) % arraylength - self.writearray(a, index, p) - nums[index] = p.x - # - for index, expected_x in nums.items(): - assert a[index].x == expected_x - self.stackroots.pop() - test_card_marker.GC_PARAMS = {"card_page_indices": 4} - - def test_writebarrier_before_copy(self): - from rpython.memory.gc import incminimark - largeobj_size = self.gc.nonlarge_max + 1 - self.gc.next_major_collection_threshold = 99999.0 - p_src = self.malloc(VAR, largeobj_size) - p_dst = self.malloc(VAR, largeobj_size) - # make them old - self.stackroots.append(p_src) - self.stackroots.append(p_dst) - self.gc.collect() - p_dst = self.stackroots.pop() - p_src = self.stackroots.pop() - # - addr_src = llmemory.cast_ptr_to_adr(p_src) - addr_dst = llmemory.cast_ptr_to_adr(p_dst) - hdr_src = self.gc.header(addr_src) - hdr_dst = self.gc.header(addr_dst) - # - assert hdr_src.tid & incminimark.GCFLAG_TRACK_YOUNG_PTRS - assert hdr_dst.tid & incminimark.GCFLAG_TRACK_YOUNG_PTRS - # - res = self.gc.writebarrier_before_copy(addr_src, addr_dst, 0, 0, 10) - assert res - assert hdr_dst.tid & incminimark.GCFLAG_TRACK_YOUNG_PTRS - # - hdr_src.tid &= ~incminimark.GCFLAG_TRACK_YOUNG_PTRS # pretend we have young ptrs - res = self.gc.writebarrier_before_copy(addr_src, addr_dst, 0, 0, 10) - assert res # we optimized it - assert hdr_dst.tid & incminimark.GCFLAG_TRACK_YOUNG_PTRS == 0 # and we copied the flag - # - hdr_src.tid |= incminimark.GCFLAG_TRACK_YOUNG_PTRS - hdr_dst.tid |= incminimark.GCFLAG_TRACK_YOUNG_PTRS - hdr_src.tid |= incminimark.GCFLAG_HAS_CARDS - hdr_src.tid |= incminimark.GCFLAG_CARDS_SET - # hdr_dst.tid does not have minimark.GCFLAG_HAS_CARDS - res = self.gc.writebarrier_before_copy(addr_src, addr_dst, 0, 0, 10) - assert not res # there might be young ptrs, let ll_arraycopy to find them - - def test_writebarrier_before_copy_preserving_cards(self): - from rpython.rtyper.lltypesystem import llarena - from rpython.memory.gc import incminimark - tid = self.get_type_id(VAR) - largeobj_size = self.gc.nonlarge_max + 1 - self.gc.next_major_collection_threshold = 99999.0 - addr_src = self.gc.external_malloc(tid, largeobj_size) - addr_dst = self.gc.external_malloc(tid, largeobj_size) - hdr_src = self.gc.header(addr_src) - hdr_dst = self.gc.header(addr_dst) - # - assert hdr_src.tid & incminimark.GCFLAG_HAS_CARDS - assert hdr_dst.tid & incminimark.GCFLAG_HAS_CARDS - # - young_p = self.malloc(S) - self.gc.write_barrier_from_array(young_p, addr_src, 0) - index_in_third_page = int(2.5 * self.gc.card_page_indices) - assert index_in_third_page < largeobj_size - self.gc.write_barrier_from_array(young_p, addr_src, - index_in_third_page) - # - assert hdr_src.tid & incminimark.GCFLAG_CARDS_SET - addr_byte = self.gc.get_card(addr_src, 0) - assert ord(addr_byte.char[0]) == 0x01 | 0x04 # bits 0 and 2 - # - res = self.gc.writebarrier_before_copy(addr_src, addr_dst, - 0, 0, 2*self.gc.card_page_indices) - assert res - # - assert hdr_dst.tid & incminimark.GCFLAG_CARDS_SET - addr_byte = self.gc.get_card(addr_dst, 0) - assert ord(addr_byte.char[0]) == 0x01 | 0x04 # bits 0 and 2 - - test_writebarrier_before_copy_preserving_cards.GC_PARAMS = { - "card_page_indices": 4} - - -class TestIncrementalMiniMarkGCFull(DirectGCTest): - from rpython.memory.gc.incminimark import IncrementalMiniMarkGC as GCClass diff --git a/rpython/memory/test/test_incminimark_gc.py b/rpython/memory/test/test_incminimark_gc.py --- a/rpython/memory/test/test_incminimark_gc.py +++ b/rpython/memory/test/test_incminimark_gc.py @@ -1,11 +1,6 @@ from rpython.rlib.rarithmetic import LONG_BIT -from rpython.memory.test import test_semispace_gc +from rpython.memory.test import test_minimark_gc -WORD = LONG_BIT // 8 - -class TestIncrementalMiniMarkGC(test_semispace_gc.TestSemiSpaceGC): +class TestIncrementalMiniMarkGC(test_minimark_gc.TestMiniMarkGC): from rpython.memory.gc.incminimark import IncrementalMiniMarkGC as GCClass - GC_CAN_SHRINK_BIG_ARRAY = False - GC_CAN_MALLOC_NONMOVABLE = True - BUT_HOW_BIG_IS_A_BIG_STRING = 11*WORD diff --git a/rpython/memory/test/test_transformed_gc.py b/rpython/memory/test/test_transformed_gc.py --- a/rpython/memory/test/test_transformed_gc.py +++ b/rpython/memory/test/test_transformed_gc.py @@ -1259,48 +1259,8 @@ res = run([]) assert res == 123 -class TestIncrementalMiniMarkGC(TestHybridGC): +class TestIncrementalMiniMarkGC(TestMiniMarkGC): gcname = "incminimark" - GC_CAN_TEST_ID = True - - class gcpolicy(gc.BasicFrameworkGcPolicy): - class transformerclass(shadowstack.ShadowStackFrameworkGCTransformer): - from rpython.memory.gc.incminimark \ - import IncrementalMiniMarkGC as GCClass - GC_PARAMS = {'nursery_size': 32*WORD, - 'page_size': 16*WORD, - 'arena_size': 64*WORD, - 'small_request_threshold': 5*WORD, - 'large_object': 8*WORD, - 'card_page_indices': 4, - 'translated_to_c': False, - } - root_stack_depth = 200 - - def define_no_clean_setarrayitems(cls): - # The optimization find_clean_setarrayitems() in - # gctransformer/framework.py does not work with card marking. - # Check that it is turned off. - S = lltype.GcStruct('S', ('x', lltype.Signed)) - A = lltype.GcArray(lltype.Ptr(S)) - def sub(lst): - lst[15] = lltype.malloc(S) # 'lst' is set the single mark "12-15" - lst[15].x = 123 - lst[0] = lst[15] # that would be a "clean_setarrayitem" - def f(): - lst = lltype.malloc(A, 16) # 16 > 10 - rgc.collect() - sub(lst) - null = lltype.nullptr(S) - lst[15] = null # clear, so that A() is only visible via lst[0] - rgc.collect() # -> crash - return lst[0].x - return f - - def test_no_clean_setarrayitems(self): - run = self.runner("no_clean_setarrayitems") - res = run([]) - assert res == 123 # ________________________________________________________________ From noreply at buildbot.pypy.org Tue Aug 13 10:51:39 2013 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 13 Aug 2013 10:51:39 +0200 (CEST) Subject: [pypy-commit] pypy default: Accept truncated log files Message-ID: <20130813085139.8EC231C011D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r66114:9ce3b5947b41 Date: 2013-08-13 10:51 +0200 http://bitbucket.org/pypy/pypy/changeset/9ce3b5947b41/ Log: Accept truncated log files diff --git a/rpython/tool/logparser.py b/rpython/tool/logparser.py --- a/rpython/tool/logparser.py +++ b/rpython/tool/logparser.py @@ -133,6 +133,8 @@ def rectime(category1, timestart1, timestop1, subcats): substartstop = [] for entry in getsubcategories(subcats): + if len(entry) != 4: + continue rectime(*entry) substartstop.append(entry[1:3]) # (start, stop) # compute the total time for category1 as the part of the @@ -238,7 +240,11 @@ # def recdraw(sublist, subheight): firstx1 = None - for category1, timestart1, timestop1, subcats in sublist: + for entry in sublist: + try: + category1, timestart1, timestop1, subcats = entry + except ValueError: + continue x1 = int((timestart1 - timestart0) * timefactor) x2 = int((timestop1 - timestart0) * timefactor) y1 = (height - subheight) / 2 From noreply at buildbot.pypy.org Tue Aug 13 15:28:20 2013 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Tue, 13 Aug 2013 15:28:20 +0200 (CEST) Subject: [pypy-commit] pypy refactor-str-types: hg merge default Message-ID: <20130813132820.9AABC1C32CB@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: refactor-str-types Changeset: r66115:93412b6e96bc Date: 2013-08-13 15:25 +0200 http://bitbucket.org/pypy/pypy/changeset/93412b6e96bc/ Log: hg merge default diff too long, truncating to 2000 out of 7450 lines diff --git a/dotviewer/drawgraph.py b/dotviewer/drawgraph.py --- a/dotviewer/drawgraph.py +++ b/dotviewer/drawgraph.py @@ -22,6 +22,7 @@ 'yellow': (255,255,0), } re_nonword=re.compile(r'([^0-9a-zA-Z_.]+)') +re_linewidth=re.compile(r'setlinewidth\((\d+(\.\d*)?|\.\d+)\)') def combine(color1, color2, alpha): r1, g1, b1 = color1 @@ -138,6 +139,13 @@ self.yl = float(yl) rest = rest[3:] self.style, self.color = rest + linematch = re_linewidth.match(self.style) + if linematch: + num = linematch.group(1) + self.linewidth = int(round(float(num))) + self.style = self.style[linematch.end(0):] + else: + self.linewidth = 1 self.highlight = False self.cachedbezierpoints = None self.cachedarrowhead = None @@ -520,8 +528,8 @@ fgcolor = highlight_color(fgcolor) points = [self.map(*xy) for xy in edge.bezierpoints()] - def drawedgebody(points=points, fgcolor=fgcolor): - pygame.draw.lines(self.screen, fgcolor, False, points) + def drawedgebody(points=points, fgcolor=fgcolor, width=edge.linewidth): + pygame.draw.lines(self.screen, fgcolor, False, points, width) edgebodycmd.append(drawedgebody) points = [self.map(*xy) for xy in edge.arrowhead()] diff --git a/dotviewer/graphparse.py b/dotviewer/graphparse.py --- a/dotviewer/graphparse.py +++ b/dotviewer/graphparse.py @@ -152,7 +152,8 @@ try: plaincontent = dot2plain_graphviz(content, contenttype) except PlainParseError, e: - print e - # failed, retry via codespeak - plaincontent = dot2plain_codespeak(content, contenttype) + raise + ##print e + ### failed, retry via codespeak + ##plaincontent = dot2plain_codespeak(content, contenttype) return list(parse_plain(graph_id, plaincontent, links, fixedfont)) diff --git a/dotviewer/test/test_interactive.py b/dotviewer/test/test_interactive.py --- a/dotviewer/test/test_interactive.py +++ b/dotviewer/test/test_interactive.py @@ -34,6 +34,23 @@ } ''' +SOURCE2=r'''digraph f { + a; d; e; f; g; h; i; j; k; l; + a -> d [penwidth=1, style="setlinewidth(1)"]; + d -> e [penwidth=2, style="setlinewidth(2)"]; + e -> f [penwidth=4, style="setlinewidth(4)"]; + f -> g [penwidth=8, style="setlinewidth(8)"]; + g -> h [penwidth=16, style="setlinewidth(16)"]; + h -> i [penwidth=32, style="setlinewidth(32)"]; + i -> j [penwidth=64, style="setlinewidth(64)"]; + j -> k [penwidth=128, style="setlinewidth(128)"]; + k -> l [penwidth=256, style="setlinewidth(256)"]; +}''' + + + + + def setup_module(mod): if not option.pygame: py.test.skip("--pygame not enabled") @@ -161,3 +178,10 @@ page = MyPage(str(dotfile)) page.fixedfont = True graphclient.display_page(page) + +def test_linewidth(): + udir.join("graph2.dot").write(SOURCE2) + from dotviewer import graphpage, graphclient + dotfile = udir.join('graph2.dot') + page = graphpage.DotFileGraphPage(str(dotfile)) + graphclient.display_page(page) diff --git a/lib-python/2.7/socket.py b/lib-python/2.7/socket.py --- a/lib-python/2.7/socket.py +++ b/lib-python/2.7/socket.py @@ -165,6 +165,8 @@ # All _delegate_methods must also be initialized here. send = recv = recv_into = sendto = recvfrom = recvfrom_into = _dummy __getattr__ = _dummy + def _drop(self): + pass # Wrapper around platform socket objects. This implements # a platform-independent dup() functionality. The @@ -179,12 +181,21 @@ def __init__(self, family=AF_INET, type=SOCK_STREAM, proto=0, _sock=None): if _sock is None: _sock = _realsocket(family, type, proto) - elif _type(_sock) is _realsocket: + else: + # PyPy note about refcounting: implemented with _reuse()/_drop() + # on the class '_socket.socket'. Python 3 did it differently + # with a reference counter on this class 'socket._socketobject' + # instead, but it is a less compatible change. + + # Note that a few libraries (like eventlet) poke at the + # private implementation of socket.py, passing custom + # objects to _socketobject(). These libraries need the + # following fix for use on PyPy: the custom objects need + # methods _reuse() and _drop() that maintains an explicit + # reference counter, starting at 0. When it drops back to + # zero, close() must be called. _sock._reuse() - # PyPy note about refcounting: implemented with _reuse()/_drop() - # on the class '_socket.socket'. Python 3 did it differently - # with a reference counter on this class 'socket._socketobject' - # instead, but it is a less compatible change (breaks eventlet). + self._sock = _sock def send(self, data, flags=0): @@ -216,9 +227,8 @@ def close(self): s = self._sock - if type(s) is _realsocket: - s._drop() self._sock = _closedsocket() + s._drop() close.__doc__ = _realsocket.close.__doc__ def accept(self): @@ -280,8 +290,14 @@ "_close"] def __init__(self, sock, mode='rb', bufsize=-1, close=False): - if type(sock) is _realsocket: - sock._reuse() + # Note that a few libraries (like eventlet) poke at the + # private implementation of socket.py, passing custom + # objects to _fileobject(). These libraries need the + # following fix for use on PyPy: the custom objects need + # methods _reuse() and _drop() that maintains an explicit + # reference counter, starting at 0. When it drops back to + # zero, close() must be called. + sock._reuse() self._sock = sock self.mode = mode # Not actually used in this version if bufsize < 0: @@ -317,11 +333,11 @@ self.flush() finally: s = self._sock - if type(s) is _realsocket: + self._sock = None + if s is not None: s._drop() - if self._close: - self._sock.close() - self._sock = None + if self._close: + s.close() def __del__(self): try: diff --git a/lib-python/2.7/ssl.py b/lib-python/2.7/ssl.py --- a/lib-python/2.7/ssl.py +++ b/lib-python/2.7/ssl.py @@ -110,6 +110,12 @@ suppress_ragged_eofs=True, ciphers=None): socket.__init__(self, _sock=sock._sock) + # "close" the original socket: it is not usable any more. + # this only calls _drop(), which should not actually call + # the operating system's close() because the reference + # counter is greater than 1 (we hold one too). + sock.close() + if ciphers is None and ssl_version != _SSLv2_IF_EXISTS: ciphers = _DEFAULT_CIPHERS @@ -352,11 +358,19 @@ works with the SSL connection. Just use the code from the socket module.""" - self._makefile_refs += 1 # close=True so as to decrement the reference count when done with # the file-like object. return _fileobject(self, mode, bufsize, close=True) + def _reuse(self): + self._makefile_refs += 1 + + def _drop(self): + if self._makefile_refs < 1: + self.close() + else: + self._makefile_refs -= 1 + def wrap_socket(sock, keyfile=None, certfile=None, diff --git a/lib-python/2.7/test/test_socket.py b/lib-python/2.7/test/test_socket.py --- a/lib-python/2.7/test/test_socket.py +++ b/lib-python/2.7/test/test_socket.py @@ -1066,6 +1066,9 @@ def recv(self, size): return self._recv_step.next()() + def _reuse(self): pass + def _drop(self): pass + @staticmethod def _raise_eintr(): raise socket.error(errno.EINTR) @@ -1321,7 +1324,8 @@ closed = False def flush(self): pass def close(self): self.closed = True - def _decref_socketios(self): pass + def _reuse(self): pass + def _drop(self): pass # must not close unless we request it: the original use of _fileobject # by module socket requires that the underlying socket not be closed until diff --git a/lib-python/2.7/test/test_urllib2.py b/lib-python/2.7/test/test_urllib2.py --- a/lib-python/2.7/test/test_urllib2.py +++ b/lib-python/2.7/test/test_urllib2.py @@ -270,6 +270,8 @@ self.reason = reason def read(self): return '' + def _reuse(self): pass + def _drop(self): pass class MockHTTPClass: def __init__(self): diff --git a/lib-python/2.7/urllib2.py b/lib-python/2.7/urllib2.py --- a/lib-python/2.7/urllib2.py +++ b/lib-python/2.7/urllib2.py @@ -1193,6 +1193,8 @@ # out of socket._fileobject() and into a base class. r.recv = r.read + r._reuse = lambda: None + r._drop = lambda: None fp = socket._fileobject(r, close=True) resp = addinfourl(fp, r.msg, req.get_full_url()) diff --git a/lib_pypy/_sqlite3.py b/lib_pypy/_sqlite3.py --- a/lib_pypy/_sqlite3.py +++ b/lib_pypy/_sqlite3.py @@ -1305,7 +1305,7 @@ for i in xrange(_lib.sqlite3_column_count(self._statement)): name = _lib.sqlite3_column_name(self._statement, i) if name: - name = _ffi.string(name).decode('utf-8').split("[")[0].strip() + name = _ffi.string(name).split("[")[0].strip() desc.append((name, None, None, None, None, None, None)) return desc diff --git a/lib_pypy/cffi.egg-info b/lib_pypy/cffi.egg-info --- a/lib_pypy/cffi.egg-info +++ b/lib_pypy/cffi.egg-info @@ -1,6 +1,6 @@ Metadata-Version: 1.0 Name: cffi -Version: 0.6 +Version: 0.7 Summary: Foreign Function Interface for Python calling C code. Home-page: http://cffi.readthedocs.org Author: Armin Rigo, Maciej Fijalkowski diff --git a/lib_pypy/readline.egg-info b/lib_pypy/readline.egg-info new file mode 100644 --- /dev/null +++ b/lib_pypy/readline.egg-info @@ -0,0 +1,10 @@ +Metadata-Version: 1.0 +Name: readline +Version: 6.2.4.1 +Summary: Hack to make "pip install readline" happy and do nothing +Home-page: UNKNOWN +Author: UNKNOWN +Author-email: UNKNOWN +License: UNKNOWN +Description: UNKNOWN +Platform: UNKNOWN diff --git a/pypy/doc/_ref.txt b/pypy/doc/_ref.txt --- a/pypy/doc/_ref.txt +++ b/pypy/doc/_ref.txt @@ -98,8 +98,6 @@ .. _`rpython/rtyper/memory/gc/hybrid.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/memory/gc/hybrid.py .. _`rpython/rtyper/memory/gc/minimarkpage.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/memory/gc/minimarkpage.py .. _`rpython/rtyper/memory/gc/semispace.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/memory/gc/semispace.py -.. _`rpython/rtyper/ootypesystem/`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/ootypesystem/ -.. _`rpython/rtyper/ootypesystem/ootype.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/ootypesystem/ootype.py .. _`rpython/rtyper/rint.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/rint.py .. _`rpython/rtyper/rlist.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/rlist.py .. _`rpython/rtyper/rmodel.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/rmodel.py diff --git a/pypy/doc/jit/index.rst b/pypy/doc/jit/index.rst --- a/pypy/doc/jit/index.rst +++ b/pypy/doc/jit/index.rst @@ -23,7 +23,10 @@ - Hooks_ debugging facilities available to a python programmer +- Virtualizable_ how virtualizables work and what they are (in other words how + to make frames more efficient). .. _Overview: overview.html .. _Notes: pyjitpl5.html .. _Hooks: ../jit-hooks.html +.. _Virtualizable: virtualizable.html diff --git a/pypy/doc/jit/virtualizable.rst b/pypy/doc/jit/virtualizable.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/jit/virtualizable.rst @@ -0,0 +1,60 @@ + +Virtualizables +============== + +**Note:** this document does not have a proper introduction as to how +to understand the basics. We should write some. If you happen to be here +and you're missing context, feel free to pester us on IRC. + +Problem description +------------------- + +The JIT is very good at making sure some objects are never allocated if they +don't escape from the trace. Such objects are called ``virtuals``. However, +if we're dealing with frames, virtuals are often not good enough. Frames +can escape and they can also be allocated already at the moment we enter the +JIT. In such cases we need some extra object that can still be optimized away, +despite existing on the heap. + +Solution +-------- + +We introduce virtualizables. They're objects that exist on the heap, but their +fields are not always in sync with whatever happens in the assembler. One +example is that virtualizable fields can store virtual objects without +forcing them. This is very useful for frames. Declaring an object to be +virtualizable works like this: + + class Frame(object): + _virtualizable_ = ['locals[*]', 'stackdepth'] + +And we use them in ``JitDriver`` like this:: + + jitdriver = JitDriver(greens=[], reds=['frame'], virtualizables=['frame']) + +This declaration means that ``stackdepth`` is a virtualizable **field**, while +``locals`` is a virtualizable **array** (a list stored on a virtualizable). +There are various rules about using virtualizables, especially using +virtualizable arrays that can be very confusing. Those will usually end +up with a compile-time error (as opposed to strange behavior). The rules are: + +* Each array access must be with a known positive index that cannot raise + an ``IndexError``. Using ``no = jit.hint(no, promote=True)`` might be useful + to get a constant-number access. This is only safe if the index is actually + constant or changing rarely within the context of the user's code. + +* If you initialize a new virtualizable in the JIT, it has to be done like this + (for example if we're in ``Frame.__init__``):: + + self = hint(self, access_directly=True, fresh_virtualizable=True) + + that way you can populate the fields directly. + +* If you use virtualizable outside of the JIT – it's very expensive and + sometimes aborts tracing. Consider it carefully as to how do it only for + debugging purposes and not every time (e.g. ``sys._getframe`` call). + +* If you have something equivalent of a Python generator, where the + virtualizable survives for longer, you want to force it before returning. + It's better to do it that way than by an external call some time later. + It's done using ``jit.hint(frame, force_virtualizable=True)`` diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -62,3 +62,15 @@ No longer delegate numpy string_ methods to space.StringObject, in numpy this works by kind of by accident. Support for merging the refactor-str-types branch + +.. branch: kill-typesystem +Remove the "type system" abstraction, now that there is only ever one kind of +type system used. + +.. branch: kill-gen-store-back-in +Kills gen_store_back_in_virtualizable - should improve non-inlined calls by +a bit + +.. branch: dotviewer-linewidth +.. branch: reflex-support +.. branch: numpypy-inplace-op diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py --- a/pypy/interpreter/pyframe.py +++ b/pypy/interpreter/pyframe.py @@ -123,8 +123,8 @@ # CO_OPTIMIZED: no locals dict needed at all # NB: this method is overridden in nestedscope.py flags = code.co_flags - if flags & pycode.CO_OPTIMIZED: - return + if flags & pycode.CO_OPTIMIZED: + return if flags & pycode.CO_NEWLOCALS: self.w_locals = self.space.newdict(module=True) else: @@ -176,9 +176,10 @@ executioncontext.return_trace(self, self.space.w_None) raise executioncontext.return_trace(self, w_exitvalue) - # clean up the exception, might be useful for not - # allocating exception objects in some cases - self.last_exception = None + # it used to say self.last_exception = None + # this is now done by the code in pypyjit module + # since we don't want to invalidate the virtualizable + # for no good reason got_exception = False finally: executioncontext.leave(self, w_exitvalue, got_exception) @@ -260,7 +261,7 @@ break w_value = self.peekvalue(delta) self.pushvalue(w_value) - + def peekvalue(self, index_from_top=0): # NOTE: top of the stack is peekvalue(0). # Contrast this with CPython where it's PEEK(-1). @@ -330,7 +331,7 @@ nlocals = self.pycode.co_nlocals values_w = self.locals_stack_w[nlocals:self.valuestackdepth] w_valuestack = maker.slp_into_tuple_with_nulls(space, values_w) - + w_blockstack = nt([block._get_state_(space) for block in self.get_blocklist()]) w_fastlocals = maker.slp_into_tuple_with_nulls( space, self.locals_stack_w[:nlocals]) @@ -340,7 +341,7 @@ else: w_exc_value = self.last_exception.get_w_value(space) w_tb = w(self.last_exception.get_traceback()) - + tup_state = [ w(self.f_backref()), w(self.get_builtin()), @@ -355,7 +356,7 @@ w(f_lineno), w_fastlocals, space.w_None, #XXX placeholder for f_locals - + #f_restricted requires no additional data! space.w_None, ## self.w_f_trace, ignore for now @@ -389,7 +390,7 @@ ncellvars = len(pycode.co_cellvars) cellvars = cells[:ncellvars] closure = cells[ncellvars:] - + # do not use the instance's __init__ but the base's, because we set # everything like cells from here # XXX hack @@ -476,7 +477,7 @@ ### line numbers ### - def fget_f_lineno(self, space): + def fget_f_lineno(self, space): "Returns the line number of the instruction currently being executed." if self.w_f_trace is None: return space.wrap(self.get_last_lineno()) @@ -490,7 +491,7 @@ except OperationError, e: raise OperationError(space.w_ValueError, space.wrap("lineno must be an integer")) - + if self.w_f_trace is None: raise OperationError(space.w_ValueError, space.wrap("f_lineno can only be set by a trace function.")) @@ -522,7 +523,7 @@ if ord(code[new_lasti]) in (DUP_TOP, POP_TOP): raise OperationError(space.w_ValueError, space.wrap("can't jump to 'except' line as there's no exception")) - + # Don't jump into or out of a finally block. f_lasti_setup_addr = -1 new_lasti_setup_addr = -1 @@ -553,12 +554,12 @@ if addr == self.last_instr: f_lasti_setup_addr = setup_addr break - + if op >= HAVE_ARGUMENT: addr += 3 else: addr += 1 - + assert len(blockstack) == 0 if new_lasti_setup_addr != f_lasti_setup_addr: @@ -609,10 +610,10 @@ block = self.pop_block() block.cleanup(self) f_iblock -= 1 - + self.f_lineno = new_lineno self.last_instr = new_lasti - + def get_last_lineno(self): "Returns the line number of the instruction currently being executed." return pytraceback.offset2lineno(self.pycode, self.last_instr) @@ -638,8 +639,8 @@ self.f_lineno = self.get_last_lineno() space.frame_trace_action.fire() - def fdel_f_trace(self, space): - self.w_f_trace = None + def fdel_f_trace(self, space): + self.w_f_trace = None def fget_f_exc_type(self, space): if self.last_exception is not None: @@ -649,7 +650,7 @@ if f is not None: return f.last_exception.w_type return space.w_None - + def fget_f_exc_value(self, space): if self.last_exception is not None: f = self.f_backref() @@ -667,7 +668,7 @@ if f is not None: return space.wrap(f.last_exception.get_traceback()) return space.w_None - + def fget_f_restricted(self, space): if space.config.objspace.honor__builtins__: return space.wrap(self.builtin is not space.builtin) diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py --- a/pypy/interpreter/typedef.py +++ b/pypy/interpreter/typedef.py @@ -329,10 +329,6 @@ instance=True) base_user_setup(self, space, w_subtype) - def setclass(self, space, w_subtype): - # only used by descr_set___class__ - self.w__class__ = w_subtype - add(Proto) subcls = type(name, (supercls,), body) diff --git a/pypy/module/__pypy__/test/test_signal.py b/pypy/module/__pypy__/test/test_signal.py --- a/pypy/module/__pypy__/test/test_signal.py +++ b/pypy/module/__pypy__/test/test_signal.py @@ -37,6 +37,7 @@ except BaseException, e: interrupted.append(e) finally: + print 'subthread stops, interrupted=%r' % (interrupted,) done.append(None) # This is normally called by app_main.py @@ -52,11 +53,13 @@ try: done = [] interrupted = [] + print '--- start ---' thread.start_new_thread(subthread, ()) for j in range(10): if len(done): break print '.' time.sleep(0.1) + print 'main thread loop done' assert len(done) == 1 assert len(interrupted) == 1 assert 'KeyboardInterrupt' in interrupted[0].__class__.__name__ diff --git a/pypy/module/_file/test/test_file.py b/pypy/module/_file/test/test_file.py --- a/pypy/module/_file/test/test_file.py +++ b/pypy/module/_file/test/test_file.py @@ -385,6 +385,24 @@ raise Exception("time out") print 'Passed.' + def test_seek_from_cur_backwards_off_end(self): + import os + + f = self.file(self.temppath, "w+b") + f.write('123456789x12345678><123456789\n') + + f.seek(0, os.SEEK_END) + f.seek(-25, os.SEEK_CUR) + f.read(25) + f.seek(-25, os.SEEK_CUR) + try: + f.seek(-25, os.SEEK_CUR) + except IOError: + pass + else: + raise AssertionError("Didn't raise IOError") + assert f.tell() == 5 + class AppTestFile25: spaceconfig = dict(usemodules=("_file",)) diff --git a/pypy/module/array/interp_array.py b/pypy/module/array/interp_array.py --- a/pypy/module/array/interp_array.py +++ b/pypy/module/array/interp_array.py @@ -56,7 +56,7 @@ def descr_typecode(space, self): return space.wrap(self.typecode) -arr_eq_driver = jit.JitDriver(greens = ['comp_func'], reds = 'auto') +arr_eq_driver = jit.JitDriver(name='array_eq_driver', greens = ['comp_func'], reds = 'auto') EQ, NE, LT, LE, GT, GE = range(6) def compare_arrays(space, arr1, arr2, comp_op): diff --git a/pypy/module/binascii/interp_uu.py b/pypy/module/binascii/interp_uu.py --- a/pypy/module/binascii/interp_uu.py +++ b/pypy/module/binascii/interp_uu.py @@ -61,7 +61,7 @@ return ord(bin[i]) except IndexError: return 0 -_a2b_read._always_inline_ = True +_b2a_read._always_inline_ = True @unwrap_spec(bin='bufferstr') def b2a_uu(space, bin): diff --git a/pypy/module/cppyy/interp_cppyy.py b/pypy/module/cppyy/interp_cppyy.py --- a/pypy/module/cppyy/interp_cppyy.py +++ b/pypy/module/cppyy/interp_cppyy.py @@ -592,11 +592,15 @@ def get(self, w_cppinstance, w_pycppclass): cppinstance = self.space.interp_w(W_CPPInstance, w_cppinstance, can_be_None=True) + if not cppinstance: + raise OperationError(self.space.w_ReferenceError, self.space.wrap("attribute access requires an instance")) offset = self._get_offset(cppinstance) return self.converter.from_memory(self.space, w_cppinstance, w_pycppclass, offset) def set(self, w_cppinstance, w_value): cppinstance = self.space.interp_w(W_CPPInstance, w_cppinstance, can_be_None=True) + if not cppinstance: + raise OperationError(self.space.w_ReferenceError, self.space.wrap("attribute access requires an instance")) offset = self._get_offset(cppinstance) self.converter.to_memory(self.space, w_cppinstance, w_value, offset) return self.space.w_None diff --git a/pypy/module/cppyy/pythonify.py b/pypy/module/cppyy/pythonify.py --- a/pypy/module/cppyy/pythonify.py +++ b/pypy/module/cppyy/pythonify.py @@ -426,6 +426,11 @@ # mostly for the benefit of the CINT backend, which treats std as special gbl.std = make_cppnamespace(None, "std", None, False) + # install a type for enums to refer to + # TODO: this is correct for C++98, not for C++11 and in general there will + # be the same issue for all typedef'd builtin types + setattr(gbl, 'unsigned int', int) + # install for user access cppyy.gbl = gbl diff --git a/pypy/module/cppyy/test/test_datatypes.py b/pypy/module/cppyy/test/test_datatypes.py --- a/pypy/module/cppyy/test/test_datatypes.py +++ b/pypy/module/cppyy/test/test_datatypes.py @@ -105,6 +105,13 @@ raises(IndexError, c.m_float_array.__getitem__, self.N) raises(IndexError, c.m_double_array.__getitem__, self.N) + # can not access an instance member on the class + raises(ReferenceError, getattr, cppyy_test_data, 'm_bool') + raises(ReferenceError, getattr, cppyy_test_data, 'm_int') + + assert not hasattr(cppyy_test_data, 'm_bool') + assert not hasattr(cppyy_test_data, 'm_int') + c.destruct() def test03_instance_data_write_access(self): @@ -428,12 +435,17 @@ c = cppyy_test_data() assert isinstance(c, cppyy_test_data) - # TODO: test that the enum is accessible as a type + # test that the enum is accessible as a type + assert cppyy_test_data.what assert cppyy_test_data.kNothing == 6 assert cppyy_test_data.kSomething == 111 assert cppyy_test_data.kLots == 42 + assert cppyy_test_data.what(cppyy_test_data.kNothing) == cppyy_test_data.kNothing + assert cppyy_test_data.what(6) == cppyy_test_data.kNothing + # TODO: only allow instantiations with correct values (C++11) + assert c.get_enum() == cppyy_test_data.kNothing assert c.m_enum == cppyy_test_data.kNothing @@ -455,6 +467,7 @@ assert cppyy_test_data.s_enum == cppyy_test_data.kSomething # global enums + assert gbl.fruit # test type accessible assert gbl.kApple == 78 assert gbl.kBanana == 29 assert gbl.kCitrus == 34 diff --git a/pypy/module/cpyext/number.py b/pypy/module/cpyext/number.py --- a/pypy/module/cpyext/number.py +++ b/pypy/module/cpyext/number.py @@ -41,13 +41,13 @@ def PyNumber_Int(space, w_obj): """Returns the o converted to an integer object on success, or NULL on failure. This is the equivalent of the Python expression int(o).""" - return space.int(w_obj) + return space.call_function(space.w_int, w_obj) @cpython_api([PyObject], PyObject) def PyNumber_Long(space, w_obj): """Returns the o converted to a long integer object on success, or NULL on failure. This is the equivalent of the Python expression long(o).""" - return space.long(w_obj) + return space.call_function(space.w_long, w_obj) @cpython_api([PyObject], PyObject) def PyNumber_Index(space, w_obj): diff --git a/pypy/module/cpyext/pystate.py b/pypy/module/cpyext/pystate.py --- a/pypy/module/cpyext/pystate.py +++ b/pypy/module/cpyext/pystate.py @@ -3,7 +3,6 @@ from pypy.module.cpyext.pyobject import PyObject, Py_DecRef, make_ref, from_ref from rpython.rtyper.lltypesystem import rffi, lltype from rpython.rlib import rthread -from pypy.module.thread import os_thread PyInterpreterStateStruct = lltype.ForwardReference() PyInterpreterState = lltype.Ptr(PyInterpreterStateStruct) @@ -17,6 +16,9 @@ ('dict', PyObject), ])) +class NoThreads(Exception): + pass + @cpython_api([], PyThreadState, error=CANNOT_FAIL) def PyEval_SaveThread(space): """Release the global interpreter lock (if it has been created and thread @@ -45,10 +47,15 @@ @cpython_api([], lltype.Void) def PyEval_InitThreads(space): + if not space.config.translation.thread: + raise NoThreads + from pypy.module.thread import os_thread os_thread.setup_threads(space) @cpython_api([], rffi.INT_real, error=CANNOT_FAIL) def PyEval_ThreadsInitialized(space): + if not space.config.translation.thread: + return 0 return 1 # XXX: might be generally useful @@ -232,6 +239,8 @@ """Create a new thread state object belonging to the given interpreter object. The global interpreter lock need not be held, but may be held if it is necessary to serialize calls to this function.""" + if not space.config.translation.thread: + raise NoThreads rthread.gc_thread_prepare() # PyThreadState_Get will allocate a new execution context, # we need to protect gc and other globals with the GIL. @@ -246,6 +255,8 @@ def PyThreadState_Clear(space, tstate): """Reset all information in a thread state object. The global interpreter lock must be held.""" + if not space.config.translation.thread: + raise NoThreads Py_DecRef(space, tstate.c_dict) tstate.c_dict = lltype.nullptr(PyObject.TO) space.threadlocals.leave_thread(space) diff --git a/pypy/module/cpyext/test/test_number.py b/pypy/module/cpyext/test/test_number.py --- a/pypy/module/cpyext/test/test_number.py +++ b/pypy/module/cpyext/test/test_number.py @@ -19,6 +19,8 @@ def test_number_long(self, space, api): w_l = api.PyNumber_Long(space.wrap(123)) assert api.PyLong_CheckExact(w_l) + w_l = api.PyNumber_Long(space.wrap("123")) + assert api.PyLong_CheckExact(w_l) def test_number_int(self, space, api): w_l = api.PyNumber_Int(space.wraplong(123L)) @@ -27,6 +29,8 @@ assert api.PyLong_CheckExact(w_l) w_l = api.PyNumber_Int(space.wrap(42.3)) assert api.PyInt_CheckExact(w_l) + w_l = api.PyNumber_Int(space.wrap("42")) + assert api.PyInt_CheckExact(w_l) def test_number_index(self, space, api): w_l = api.PyNumber_Index(space.wraplong(123L)) diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -737,6 +737,27 @@ descr_gt = _binop_comp_impl(_binop_impl("greater")) descr_ge = _binop_comp_impl(_binop_impl("greater_equal")) + def _binop_inplace_impl(ufunc_name): + def impl(self, space, w_other): + w_out = self + ufunc = getattr(interp_ufuncs.get(space), ufunc_name) + return ufunc.call(space, [self, w_other, w_out]) + return func_with_new_name(impl, "binop_inplace_%s_impl" % ufunc_name) + + descr_iadd = _binop_inplace_impl("add") + descr_isub = _binop_inplace_impl("subtract") + descr_imul = _binop_inplace_impl("multiply") + descr_idiv = _binop_inplace_impl("divide") + descr_itruediv = _binop_inplace_impl("true_divide") + descr_ifloordiv = _binop_inplace_impl("floor_divide") + descr_imod = _binop_inplace_impl("mod") + descr_ipow = _binop_inplace_impl("power") + descr_ilshift = _binop_inplace_impl("left_shift") + descr_irshift = _binop_inplace_impl("right_shift") + descr_iand = _binop_inplace_impl("bitwise_and") + descr_ior = _binop_inplace_impl("bitwise_or") + descr_ixor = _binop_inplace_impl("bitwise_xor") + def _binop_right_impl(ufunc_name): def impl(self, space, w_other, w_out=None): w_other = convert_to_array(space, w_other) @@ -1007,6 +1028,20 @@ __ror__ = interp2app(W_NDimArray.descr_ror), __rxor__ = interp2app(W_NDimArray.descr_rxor), + __iadd__ = interp2app(W_NDimArray.descr_iadd), + __isub__ = interp2app(W_NDimArray.descr_isub), + __imul__ = interp2app(W_NDimArray.descr_imul), + __idiv__ = interp2app(W_NDimArray.descr_idiv), + __itruediv__ = interp2app(W_NDimArray.descr_itruediv), + __ifloordiv__ = interp2app(W_NDimArray.descr_ifloordiv), + __imod__ = interp2app(W_NDimArray.descr_imod), + __ipow__ = interp2app(W_NDimArray.descr_ipow), + __ilshift__ = interp2app(W_NDimArray.descr_ilshift), + __irshift__ = interp2app(W_NDimArray.descr_irshift), + __iand__ = interp2app(W_NDimArray.descr_iand), + __ior__ = interp2app(W_NDimArray.descr_ior), + __ixor__ = interp2app(W_NDimArray.descr_ixor), + __eq__ = interp2app(W_NDimArray.descr_eq), __ne__ = interp2app(W_NDimArray.descr_ne), __lt__ = interp2app(W_NDimArray.descr_lt), diff --git a/pypy/module/micronumpy/loop.py b/pypy/module/micronumpy/loop.py --- a/pypy/module/micronumpy/loop.py +++ b/pypy/module/micronumpy/loop.py @@ -155,7 +155,8 @@ obj_iter.next() return cur_value -reduce_cum_driver = jit.JitDriver(greens = ['shapelen', 'func', 'dtype'], +reduce_cum_driver = jit.JitDriver(name='numpy_reduce_cum_driver', + greens = ['shapelen', 'func', 'dtype'], reds = 'auto') def compute_reduce_cumultative(obj, out, calc_dtype, func, identity): @@ -214,8 +215,7 @@ axis_reduce__driver = jit.JitDriver(name='numpy_axis_reduce', greens=['shapelen', - 'func', 'dtype', - 'identity'], + 'func', 'dtype'], reds='auto') def do_axis_reduce(shape, func, arr, dtype, axis, out, identity, cumultative, @@ -231,8 +231,7 @@ shapelen = len(shape) while not out_iter.done(): axis_reduce__driver.jit_merge_point(shapelen=shapelen, func=func, - dtype=dtype, identity=identity, - ) + dtype=dtype) w_val = arr_iter.getitem().convert_to(dtype) if out_iter.first_line: if identity is not None: @@ -529,8 +528,9 @@ val_arr.descr_getitem(space, w_idx)) iter.next() -byteswap_driver = jit.JitDriver(greens = ['dtype'], - reds = 'auto') +byteswap_driver = jit.JitDriver(name='numpy_byteswap_driver', + greens = ['dtype'], + reds = 'auto') def byteswap(from_, to): dtype = from_.dtype @@ -542,8 +542,9 @@ to_iter.next() from_iter.next() -choose_driver = jit.JitDriver(greens = ['shapelen', 'mode', 'dtype'], - reds = 'auto') +choose_driver = jit.JitDriver(name='numpy_choose_driver', + greens = ['shapelen', 'mode', 'dtype'], + reds = 'auto') def choose(space, arr, choices, shape, dtype, out, mode): shapelen = len(shape) @@ -572,8 +573,9 @@ out_iter.next() arr_iter.next() -clip_driver = jit.JitDriver(greens = ['shapelen', 'dtype'], - reds = 'auto') +clip_driver = jit.JitDriver(name='numpy_clip_driver', + greens = ['shapelen', 'dtype'], + reds = 'auto') def clip(space, arr, shape, min, max, out): arr_iter = arr.create_iter(shape) @@ -597,8 +599,9 @@ out_iter.next() min_iter.next() -round_driver = jit.JitDriver(greens = ['shapelen', 'dtype'], - reds = 'auto') +round_driver = jit.JitDriver(name='numpy_round_driver', + greens = ['shapelen', 'dtype'], + reds = 'auto') def round(space, arr, dtype, shape, decimals, out): arr_iter = arr.create_iter(shape) @@ -612,7 +615,8 @@ arr_iter.next() out_iter.next() -diagonal_simple_driver = jit.JitDriver(greens = ['axis1', 'axis2'], +diagonal_simple_driver = jit.JitDriver(name='numpy_diagonal_simple_driver', + greens = ['axis1', 'axis2'], reds = 'auto') def diagonal_simple(space, arr, out, offset, axis1, axis2, size): diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -789,6 +789,49 @@ r = [1, 2] + array([1, 2]) assert (r == [2, 4]).all() + def test_inline_op_scalar(self): + from numpypy import array + for op in [ + '__iadd__', + '__isub__', + '__imul__', + '__idiv__', + '__ifloordiv__', + '__imod__', + '__ipow__', + '__ilshift__', + '__irshift__', + '__iand__', + '__ior__', + '__ixor__']: + a = b = array(range(3)) + getattr(a, op).__call__(2) + assert id(a) == id(b) + + def test_inline_op_array(self): + from numpypy import array + for op in [ + '__iadd__', + '__isub__', + '__imul__', + '__idiv__', + '__ifloordiv__', + '__imod__', + '__ipow__', + '__ilshift__', + '__irshift__', + '__iand__', + '__ior__', + '__ixor__']: + a = b = array(range(5)) + c = array(range(5)) + d = array(5 * [2]) + getattr(a, op).__call__(d) + assert id(a) == id(b) + reg_op = op.replace('__i', '__') + for i in range(5): + assert a[i] == getattr(c[i], reg_op).__call__(d[i]) + def test_add_list(self): from numpypy import array, ndarray a = array(range(5)) diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -6,7 +6,7 @@ import py from rpython.jit.metainterp import pyjitpl from rpython.jit.metainterp.test.support import LLJitMixin -from rpython.jit.metainterp.warmspot import reset_stats +from rpython.jit.metainterp.warmspot import reset_stats, get_stats from pypy.module.micronumpy import interp_boxes from pypy.module.micronumpy.compile import FakeSpace, Parser, InterpreterState from pypy.module.micronumpy.base import W_NDimArray @@ -35,9 +35,10 @@ cls.code_mapping = d cls.codes = allcodes - def run(self, name): + def compile_graph(self): + if self.graph is not None: + return space = FakeSpace() - i = self.code_mapping[name] codes = self.codes def f(i): @@ -57,14 +58,18 @@ raise TypeError(w_res) if self.graph is None: - interp, graph = self.meta_interp(f, [i], + interp, graph = self.meta_interp(f, [0], listops=True, backendopt=True, graph_and_interp_only=True) self.__class__.interp = interp self.__class__.graph = graph + + def run(self, name): + self.compile_graph() reset_stats() pyjitpl._warmrunnerdesc.memory_manager.alive_loops.clear() + i = self.code_mapping[name] retval = self.interp.eval_graph(self.graph, [i]) py.test.skip("don't run for now") return retval @@ -134,6 +139,29 @@ 'int_add': 3, }) + def test_reduce_compile_only_once(self): + self.compile_graph() + reset_stats() + pyjitpl._warmrunnerdesc.memory_manager.alive_loops.clear() + i = self.code_mapping['sum'] + # run it twice + retval = self.interp.eval_graph(self.graph, [i]) + retval = self.interp.eval_graph(self.graph, [i]) + # check that we got only one loop + assert len(get_stats().loops) == 1 + + def test_reduce_axis_compile_only_once(self): + self.compile_graph() + reset_stats() + pyjitpl._warmrunnerdesc.memory_manager.alive_loops.clear() + i = self.code_mapping['axissum'] + # run it twice + retval = self.interp.eval_graph(self.graph, [i]) + retval = self.interp.eval_graph(self.graph, [i]) + # check that we got only one loop + assert len(get_stats().loops) == 1 + + def define_prod(): return """ a = |30| diff --git a/pypy/module/pypyjit/interp_jit.py b/pypy/module/pypyjit/interp_jit.py --- a/pypy/module/pypyjit/interp_jit.py +++ b/pypy/module/pypyjit/interp_jit.py @@ -12,18 +12,18 @@ from pypy.interpreter.error import OperationError, operationerrfmt from pypy.interpreter.pycode import PyCode, CO_GENERATOR from pypy.interpreter.pyframe import PyFrame -from pypy.interpreter.pyopcode import ExitFrame +from pypy.interpreter.pyopcode import ExitFrame, Yield from opcode import opmap -PyFrame._virtualizable2_ = ['last_instr', 'pycode', - 'valuestackdepth', 'locals_stack_w[*]', - 'cells[*]', - 'last_exception', - 'lastblock', - 'is_being_profiled', - 'w_globals', - 'w_f_trace', - ] +PyFrame._virtualizable_ = ['last_instr', 'pycode', + 'valuestackdepth', 'locals_stack_w[*]', + 'cells[*]', + 'last_exception', + 'lastblock', + 'is_being_profiled', + 'w_globals', + 'w_f_trace', + ] JUMP_ABSOLUTE = opmap['JUMP_ABSOLUTE'] @@ -73,7 +73,13 @@ self.valuestackdepth = hint(self.valuestackdepth, promote=True) next_instr = self.handle_bytecode(co_code, next_instr, ec) is_being_profiled = self.is_being_profiled + except Yield: + self.last_exception = None + w_result = self.popvalue() + jit.hint(self, force_virtualizable=True) + return w_result except ExitFrame: + self.last_exception = None return self.popvalue() def jump_absolute(self, jumpto, ec): diff --git a/pypy/module/pypyjit/test_pypy_c/test_containers.py b/pypy/module/pypyjit/test_pypy_c/test_containers.py --- a/pypy/module/pypyjit/test_pypy_c/test_containers.py +++ b/pypy/module/pypyjit/test_pypy_c/test_containers.py @@ -87,7 +87,7 @@ i8 = int_lt(i5, i7) guard_true(i8, descr=...) guard_not_invalidated(descr=...) - p10 = call(ConstClass(ll_int_str), i5, descr=) + p10 = call(ConstClass(ll_str__IntegerR_SignedConst_Signed), i5, descr=) guard_no_exception(descr=...) i12 = call(ConstClass(ll_strhash), p10, descr=) p13 = new(descr=...) diff --git a/pypy/module/pypyjit/test_pypy_c/test_generators.py b/pypy/module/pypyjit/test_pypy_c/test_generators.py --- a/pypy/module/pypyjit/test_pypy_c/test_generators.py +++ b/pypy/module/pypyjit/test_pypy_c/test_generators.py @@ -19,6 +19,7 @@ log = self.run(main, [500]) loop, = log.loops_by_filename(self.filepath) assert loop.match_by_id("generator", """ + cond_call(..., descr=...) i16 = force_token() p45 = new_with_vtable(ConstClass(W_IntObject)) setfield_gc(p45, i29, descr=) diff --git a/pypy/module/test_lib_pypy/test_curses.py b/pypy/module/test_lib_pypy/test_curses.py --- a/pypy/module/test_lib_pypy/test_curses.py +++ b/pypy/module/test_lib_pypy/test_curses.py @@ -1,6 +1,15 @@ +import pytest + +# Check that lib_pypy.cffi finds the correct version of _cffi_backend. +# Otherwise, the test is skipped. It should never be skipped when run +# with "pypy py.test -A". +try: + from lib_pypy import cffi; cffi.FFI() +except (ImportError, AssertionError), e: + pytest.skip("no cffi module or wrong version (%s)" % (e,)) + from lib_pypy import _curses -import pytest lib = _curses.lib diff --git a/pypy/module/test_lib_pypy/test_sqlite3.py b/pypy/module/test_lib_pypy/test_sqlite3.py --- a/pypy/module/test_lib_pypy/test_sqlite3.py +++ b/pypy/module/test_lib_pypy/test_sqlite3.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- """Tests for _sqlite3.py""" import pytest, sys @@ -222,3 +223,8 @@ cur.execute("create table test(a)") cur.executemany("insert into test values (?)", [[1], [2], [3]]) assert cur.lastrowid is None + +def test_issue1573(con): + cur = con.cursor() + cur.execute(u'SELECT 1 as méil') + assert cur.description[0][0] == u"méil".encode('utf-8') diff --git a/pypy/tool/jitlogparser/parser.py b/pypy/tool/jitlogparser/parser.py --- a/pypy/tool/jitlogparser/parser.py +++ b/pypy/tool/jitlogparser/parser.py @@ -85,7 +85,7 @@ continue e = elem.split("\t") adr = e[0] - v = " ".join(e[2:]) + v = elem # --- more compactly: " ".join(e[2:]) if not start: start = int(adr.strip(":"), 16) ofs = int(adr.strip(":"), 16) - start @@ -379,15 +379,16 @@ name = entry[:entry.find('(') - 1].lower() addr = int(m.group(1), 16) addrs.setdefault(addr, []).append(name) + from rpython.jit.backend.tool.viewcode import World + world = World() + for entry in extract_category(log, 'jit-backend-dump'): + world.parse(entry.splitlines(True), truncate_addr=False) dumps = {} - for entry in extract_category(log, 'jit-backend-dump'): - backend, _, dump, _ = entry.split("\n") - _, addr, _, data = re.split(" +", dump) - backend_name = backend.split(" ")[1] - addr = int(addr[1:], 16) - if addr in addrs and addrs[addr]: - name = addrs[addr].pop(0) # they should come in order - dumps[name] = (backend_name, addr, data) + for r in world.ranges: + if r.addr in addrs and addrs[r.addr]: + name = addrs[r.addr].pop(0) # they should come in order + data = r.data.encode('hex') # backward compatibility + dumps[name] = (world.backend_name, r.addr, data) loops = [] for entry in extract_category(log, 'jit-log-opt'): parser = ParserCls(entry, None, {}, 'lltype', None, diff --git a/pypy/tool/pypyjit_child.py b/pypy/tool/pypyjit_child.py --- a/pypy/tool/pypyjit_child.py +++ b/pypy/tool/pypyjit_child.py @@ -14,9 +14,9 @@ return lltype.nullptr(T) interp.heap.malloc_nonmovable = returns_null # XXX - from rpython.jit.backend.llgraph.runner import LLtypeCPU + from rpython.jit.backend.llgraph.runner import LLGraphCPU #LLtypeCPU.supports_floats = False # for now - apply_jit(interp, graph, LLtypeCPU) + apply_jit(interp, graph, LLGraphCPU) def apply_jit(interp, graph, CPUClass): diff --git a/pypy/tool/release/package.py b/pypy/tool/release/package.py --- a/pypy/tool/release/package.py +++ b/pypy/tool/release/package.py @@ -3,7 +3,7 @@ It uses 'pypy/goal/pypy-c' and parts of the rest of the working copy. Usage: - package.py root-pypy-dir [--nostrip] [--without-tk] [name-of-archive] [name-of-pypy-c] [destination-for-tarball] [pypy-c-path] + package.py [--nostrip] [--without-tk] root-pypy-dir [name-of-archive] [name-of-pypy-c] [destination-for-tarball] [pypy-c-path] Usually you would do: package.py ../../.. pypy-VER-PLATFORM The output is found in the directory /tmp/usession-YOURNAME/build/. diff --git a/rpython/annotator/description.py b/rpython/annotator/description.py --- a/rpython/annotator/description.py +++ b/rpython/annotator/description.py @@ -391,6 +391,7 @@ instance_level = False all_enforced_attrs = None # or a set settled = False + _detect_invalid_attrs = None def __init__(self, bookkeeper, pyobj=None, name=None, basedesc=None, classdict=None, @@ -714,6 +715,10 @@ # by changing the result's annotation (but not, of course, doing an # actual copy in the rtyper). Tested in rpython.rtyper.test.test_rlist, # test_immutable_list_out_of_instance. + if self._detect_invalid_attrs and attr in self._detect_invalid_attrs: + raise Exception("field %r was migrated to %r from a subclass in " + "which it was declared as _immutable_fields_" % + (attr, self.pyobj)) search1 = '%s[*]' % (attr,) search2 = '%s?[*]' % (attr,) cdesc = self @@ -724,6 +729,14 @@ s_result.listdef.never_resize() s_copy = s_result.listdef.offspring() s_copy.listdef.mark_as_immutable() + # + cdesc = cdesc.basedesc + while cdesc is not None: + if cdesc._detect_invalid_attrs is None: + cdesc._detect_invalid_attrs = set() + cdesc._detect_invalid_attrs.add(attr) + cdesc = cdesc.basedesc + # return s_copy cdesc = cdesc.basedesc return s_result # common case diff --git a/rpython/annotator/specialize.py b/rpython/annotator/specialize.py --- a/rpython/annotator/specialize.py +++ b/rpython/annotator/specialize.py @@ -379,4 +379,4 @@ def specialize_call_location(funcdesc, args_s, op): assert op is not None - return maybe_star_args(funcdesc, op, args_s) + return maybe_star_args(funcdesc, (op,), args_s) diff --git a/rpython/annotator/test/test_annrpython.py b/rpython/annotator/test/test_annrpython.py --- a/rpython/annotator/test/test_annrpython.py +++ b/rpython/annotator/test/test_annrpython.py @@ -3088,7 +3088,7 @@ from rpython.rlib.jit import hint class A: - _virtualizable2_ = [] + _virtualizable_ = [] class B(A): def meth(self): return self @@ -3128,7 +3128,7 @@ from rpython.rlib.jit import hint class A: - _virtualizable2_ = [] + _virtualizable_ = [] class I: pass @@ -3717,6 +3717,24 @@ a = self.RPythonAnnotator() a.build_types(f, [int]) + def test_immutable_field_subclass(self): + class Root: + pass + class A(Root): + _immutable_fields_ = '_my_lst[*]' + def __init__(self, lst): + self._my_lst = lst + def foo(x): + return len(x._my_lst) + + def f(n): + foo(A([2, n])) + foo(Root()) + + a = self.RPythonAnnotator() + e = py.test.raises(Exception, a.build_types, f, [int]) + assert "field '_my_lst' was migrated" in str(e.value) + def test_call_classes_with_noarg_init(self): class A: foo = 21 diff --git a/rpython/jit/backend/arm/assembler.py b/rpython/jit/backend/arm/assembler.py --- a/rpython/jit/backend/arm/assembler.py +++ b/rpython/jit/backend/arm/assembler.py @@ -62,20 +62,21 @@ self.current_clt = looptoken.compiled_loop_token self.mc = InstrBuilder(self.cpu.cpuinfo.arch_version) self.pending_guards = [] - assert self.datablockwrapper is None + #assert self.datablockwrapper is None --- but obscure case + # possible, e.g. getting MemoryError and continuing allblocks = self.get_asmmemmgr_blocks(looptoken) self.datablockwrapper = MachineDataBlockWrapper(self.cpu.asmmemmgr, allblocks) self.mc.datablockwrapper = self.datablockwrapper self.target_tokens_currently_compiling = {} self.frame_depth_to_patch = [] + self._finish_gcmap = lltype.nullptr(jitframe.GCMAP) def teardown(self): self.current_clt = None self._regalloc = None self.mc = None self.pending_guards = None - assert self.datablockwrapper is None def setup_failure_recovery(self): self.failure_recovery_code = [0, 0, 0, 0] @@ -889,7 +890,7 @@ relative_offset = tok.pos_recovery_stub - tok.offset guard_pos = block_start + tok.offset if not tok.is_guard_not_invalidated: - # patch the guard jumpt to the stub + # patch the guard jump to the stub # overwrite the generate NOP with a B_offs to the pos of the # stub mc = InstrBuilder(self.cpu.cpuinfo.arch_version) diff --git a/rpython/jit/backend/arm/opassembler.py b/rpython/jit/backend/arm/opassembler.py --- a/rpython/jit/backend/arm/opassembler.py +++ b/rpython/jit/backend/arm/opassembler.py @@ -33,6 +33,7 @@ from rpython.rtyper.lltypesystem import rstr, rffi, lltype from rpython.rtyper.annlowlevel import cast_instance_to_gcref from rpython.jit.backend.arm import callbuilder +from rpython.rlib.rarithmetic import r_uint class ArmGuardToken(GuardToken): @@ -190,7 +191,7 @@ self.mc.RSB_ri(resloc.value, l0.value, imm=0) return fcond - def _emit_guard(self, op, arglocs, fcond, save_exc, + def build_guard_token(self, op, frame_depth, arglocs, offset, fcond, save_exc, is_guard_not_invalidated=False, is_guard_not_forced=False): assert isinstance(save_exc, bool) @@ -198,7 +199,27 @@ descr = op.getdescr() assert isinstance(descr, AbstractFailDescr) + gcmap = allocate_gcmap(self, frame_depth, JITFRAME_FIXED_SIZE) + token = ArmGuardToken(self.cpu, gcmap, + descr, + failargs=op.getfailargs(), + fail_locs=arglocs, + offset=offset, + exc=save_exc, + frame_depth=frame_depth, + is_guard_not_invalidated=is_guard_not_invalidated, + is_guard_not_forced=is_guard_not_forced, + fcond=fcond) + return token + + def _emit_guard(self, op, arglocs, fcond, save_exc, + is_guard_not_invalidated=False, + is_guard_not_forced=False): pos = self.mc.currpos() + token = self.build_guard_token(op, arglocs[0].value, arglocs[1:], pos, fcond, save_exc, + is_guard_not_invalidated, + is_guard_not_forced) + self.pending_guards.append(token) # For all guards that are not GUARD_NOT_INVALIDATED we emit a # breakpoint to ensure the location is patched correctly. In the case # of GUARD_NOT_INVALIDATED we use just a NOP, because it is only @@ -207,17 +228,6 @@ self.mc.NOP() else: self.mc.BKPT() - gcmap = allocate_gcmap(self, arglocs[0].value, JITFRAME_FIXED_SIZE) - self.pending_guards.append(ArmGuardToken(self.cpu, gcmap, - descr, - failargs=op.getfailargs(), - fail_locs=arglocs[1:], - offset=pos, - exc=save_exc, - frame_depth=arglocs[0].value, - is_guard_not_invalidated=is_guard_not_invalidated, - is_guard_not_forced=is_guard_not_forced, - fcond=fcond)) return c.AL def _emit_guard_overflow(self, guard, failargs, fcond): @@ -351,7 +361,11 @@ # XXX self.mov(fail_descr_loc, RawStackLoc(ofs)) self.store_reg(self.mc, r.ip, r.fp, ofs, helper=r.lr) if op.numargs() > 0 and op.getarg(0).type == REF: - gcmap = self.gcmap_for_finish + if self._finish_gcmap: + self._finish_gcmap[0] |= r_uint(0) # r0 + gcmap = self._finish_gcmap + else: + gcmap = self.gcmap_for_finish self.push_gcmap(self.mc, gcmap, store=True) else: # note that the 0 here is redundant, but I would rather @@ -912,6 +926,14 @@ return fcond + def store_force_descr(self, op, fail_locs, frame_depth): + pos = self.mc.currpos() + guard_token = self.build_guard_token(op, frame_depth, fail_locs, pos, c.AL, True, False, True) + #self.pending_guards.append(guard_token) + self._finish_gcmap = guard_token.gcmap + self._store_force_index(op) + self.store_info_on_descr(pos, guard_token) + def emit_op_force_token(self, op, arglocs, regalloc, fcond): # XXX kill me res_loc = arglocs[0] @@ -959,16 +981,6 @@ pmc.B_offs(self.mc.currpos(), c.EQ) return pos - def _call_assembler_reset_vtoken(self, jd, vloc): - from rpython.jit.backend.llsupport.descr import FieldDescr - fielddescr = jd.vable_token_descr - assert isinstance(fielddescr, FieldDescr) - ofs = fielddescr.offset - tmploc = self._regalloc.get_scratch_reg(INT) - self.mov_loc_loc(vloc, r.ip) - self.mc.MOV_ri(tmploc.value, 0) - self.mc.STR_ri(tmploc.value, r.ip.value, ofs) - def _call_assembler_load_result(self, op, result_loc): if op.result is not None: # load the return value from (tmploc, 0) diff --git a/rpython/jit/backend/arm/regalloc.py b/rpython/jit/backend/arm/regalloc.py --- a/rpython/jit/backend/arm/regalloc.py +++ b/rpython/jit/backend/arm/regalloc.py @@ -1194,6 +1194,12 @@ # self._compute_hint_frame_locations_from_descr(descr) return [] + def prepare_op_guard_not_forced_2(self, op, fcond): + self.rm.before_call(op.getfailargs(), save_all_regs=True) + fail_locs = self._prepare_guard(op) + self.assembler.store_force_descr(op, fail_locs[1:], fail_locs[0].value) + self.possibly_free_vars(op.getfailargs()) + def prepare_guard_call_may_force(self, op, guard_op, fcond): args = self._prepare_call(op, save_all_regs=True) return self._prepare_guard(guard_op, args) diff --git a/rpython/jit/backend/llgraph/runner.py b/rpython/jit/backend/llgraph/runner.py --- a/rpython/jit/backend/llgraph/runner.py +++ b/rpython/jit/backend/llgraph/runner.py @@ -101,6 +101,9 @@ self.fieldname = fieldname self.FIELD = getattr(S, fieldname) + def get_vinfo(self): + return self.vinfo + def __repr__(self): return 'FieldDescr(%r, %r)' % (self.S, self.fieldname) @@ -170,7 +173,7 @@ translate_support_code = False is_llgraph = True - def __init__(self, rtyper, stats=None, *ignored_args, **ignored_kwds): + def __init__(self, rtyper, stats=None, *ignored_args, **kwds): model.AbstractCPU.__init__(self) self.rtyper = rtyper self.llinterp = LLInterpreter(rtyper) @@ -178,6 +181,7 @@ class MiniStats: pass self.stats = stats or MiniStats() + self.vinfo_for_tests = kwds.get('vinfo_for_tests', None) def compile_loop(self, inputargs, operations, looptoken, log=True, name=''): clt = model.CompiledLoopToken(self, looptoken.number) @@ -316,6 +320,8 @@ except KeyError: descr = FieldDescr(S, fieldname) self.descrs[key] = descr + if self.vinfo_for_tests is not None: + descr.vinfo = self.vinfo_for_tests return descr def arraydescrof(self, A): @@ -496,6 +502,8 @@ def bh_raw_store_i(self, struct, offset, newvalue, descr): ll_p = rffi.cast(rffi.CCHARP, struct) ll_p = rffi.cast(lltype.Ptr(descr.A), rffi.ptradd(ll_p, offset)) + if descr.A.OF == lltype.SingleFloat: + newvalue = longlong.int2singlefloat(newvalue) ll_p[0] = rffi.cast(descr.A.OF, newvalue) def bh_raw_store_f(self, struct, offset, newvalue, descr): @@ -600,6 +608,7 @@ forced_deadframe = None overflow_flag = False last_exception = None + force_guard_op = None def __init__(self, cpu, argboxes, args): self.env = {} @@ -766,6 +775,8 @@ if self.forced_deadframe is not None: saved_data = self.forced_deadframe._saved_data self.fail_guard(descr, saved_data) + self.force_guard_op = self.current_op + execute_guard_not_forced_2 = execute_guard_not_forced def execute_guard_not_invalidated(self, descr): if self.lltrace.invalid: @@ -887,7 +898,6 @@ # res = CALL assembler_call_helper(pframe) # jmp @done # @fastpath: - # RESET_VABLE # res = GETFIELD(pframe, 'result') # @done: # @@ -907,25 +917,17 @@ vable = lltype.nullptr(llmemory.GCREF.TO) # # Emulate the fast path - def reset_vable(jd, vable): - if jd.index_of_virtualizable != -1: - fielddescr = jd.vable_token_descr - NULL = lltype.nullptr(llmemory.GCREF.TO) - self.cpu.bh_setfield_gc(vable, NULL, fielddescr) + # faildescr = self.cpu.get_latest_descr(pframe) if faildescr == self.cpu.done_with_this_frame_descr_int: - reset_vable(jd, vable) return self.cpu.get_int_value(pframe, 0) elif faildescr == self.cpu.done_with_this_frame_descr_ref: - reset_vable(jd, vable) return self.cpu.get_ref_value(pframe, 0) elif faildescr == self.cpu.done_with_this_frame_descr_float: - reset_vable(jd, vable) return self.cpu.get_float_value(pframe, 0) elif faildescr == self.cpu.done_with_this_frame_descr_void: - reset_vable(jd, vable) return None - # + assembler_helper_ptr = jd.assembler_helper_adr.ptr # fish try: result = assembler_helper_ptr(pframe, vable) diff --git a/rpython/jit/backend/llsupport/assembler.py b/rpython/jit/backend/llsupport/assembler.py --- a/rpython/jit/backend/llsupport/assembler.py +++ b/rpython/jit/backend/llsupport/assembler.py @@ -2,7 +2,7 @@ from rpython.jit.backend.llsupport.memcpy import memcpy_fn from rpython.jit.backend.llsupport.symbolic import WORD from rpython.jit.metainterp.history import (INT, REF, FLOAT, JitCellToken, - ConstInt, BoxInt) + ConstInt, BoxInt, AbstractFailDescr) from rpython.jit.metainterp.resoperation import ResOperation, rop from rpython.rlib import rgc from rpython.rlib.debug import (debug_start, debug_stop, have_debug_prints, @@ -24,6 +24,7 @@ class GuardToken(object): def __init__(self, cpu, gcmap, faildescr, failargs, fail_locs, exc, frame_depth, is_guard_not_invalidated, is_guard_not_forced): + assert isinstance(faildescr, AbstractFailDescr) self.cpu = cpu self.faildescr = faildescr self.failargs = failargs @@ -232,11 +233,8 @@ jmp_location = self._call_assembler_patch_je(result_loc, je_location) - # Path B: fast path. Must load the return value, and reset the token + # Path B: fast path. Must load the return value - # Reset the vable token --- XXX really too much special logic here:-( - if jd.index_of_virtualizable >= 0: - self._call_assembler_reset_vtoken(jd, vloc) # self._call_assembler_load_result(op, result_loc) # diff --git a/rpython/jit/backend/llsupport/descr.py b/rpython/jit/backend/llsupport/descr.py --- a/rpython/jit/backend/llsupport/descr.py +++ b/rpython/jit/backend/llsupport/descr.py @@ -76,7 +76,13 @@ FLAG_STRUCT = 'X' FLAG_VOID = 'V' -class FieldDescr(AbstractDescr): +class ArrayOrFieldDescr(AbstractDescr): + vinfo = None + + def get_vinfo(self): + return self.vinfo + +class FieldDescr(ArrayOrFieldDescr): name = '' offset = 0 # help translation field_size = 0 @@ -150,12 +156,13 @@ # ____________________________________________________________ # ArrayDescrs -class ArrayDescr(AbstractDescr): +class ArrayDescr(ArrayOrFieldDescr): tid = 0 basesize = 0 # workaround for the annotator itemsize = 0 lendescr = None flag = '\x00' + vinfo = None def __init__(self, basesize, itemsize, lendescr, flag): self.basesize = basesize diff --git a/rpython/jit/backend/llsupport/llmodel.py b/rpython/jit/backend/llsupport/llmodel.py --- a/rpython/jit/backend/llsupport/llmodel.py +++ b/rpython/jit/backend/llsupport/llmodel.py @@ -721,12 +721,8 @@ def bh_raw_load_i(self, addr, offset, descr): ofs, size, sign = self.unpack_arraydescr_size(descr) - items = addr + offset - for TYPE, _, itemsize in unroll_basic_sizes: - if size == itemsize: - items = rffi.cast(rffi.CArrayPtr(TYPE), items) - return rffi.cast(lltype.Signed, items[0]) - assert False # unreachable code + assert ofs == 0 # otherwise, 'descr' is not a raw length-less array + return self.read_int_at_mem(addr, offset, size, sign) def bh_raw_load_f(self, addr, offset, descr): items = rffi.cast(rffi.CArrayPtr(longlong.FLOATSTORAGE), addr + offset) diff --git a/rpython/jit/backend/llsupport/test/ztranslation_test.py b/rpython/jit/backend/llsupport/test/ztranslation_test.py --- a/rpython/jit/backend/llsupport/test/ztranslation_test.py +++ b/rpython/jit/backend/llsupport/test/ztranslation_test.py @@ -23,7 +23,7 @@ # - floats neg and abs class Frame(object): - _virtualizable2_ = ['i'] + _virtualizable_ = ['i'] def __init__(self, i): self.i = i @@ -98,7 +98,7 @@ self.val = val class Frame(object): - _virtualizable2_ = ['thing'] + _virtualizable_ = ['thing'] driver = JitDriver(greens = ['codeno'], reds = ['i', 'frame'], virtualizables = ['frame'], diff --git a/rpython/jit/backend/model.py b/rpython/jit/backend/model.py --- a/rpython/jit/backend/model.py +++ b/rpython/jit/backend/model.py @@ -6,7 +6,7 @@ total_compiled_loops = 0 total_compiled_bridges = 0 total_freed_loops = 0 - total_freed_bridges = 0 + total_freed_bridges = 0 # for heaptracker # _all_size_descrs_with_vtable = None @@ -182,7 +182,7 @@ def arraydescrof(self, A): raise NotImplementedError - def calldescrof(self, FUNC, ARGS, RESULT): + def calldescrof(self, FUNC, ARGS, RESULT, extrainfo): # FUNC is the original function type, but ARGS is a list of types # with Voids removed raise NotImplementedError @@ -294,7 +294,6 @@ def bh_copyunicodecontent(self, src, dst, srcstart, dststart, length): raise NotImplementedError - class CompiledLoopToken(object): asmmemmgr_blocks = None asmmemmgr_gcroots = 0 diff --git a/rpython/jit/backend/test/runner_test.py b/rpython/jit/backend/test/runner_test.py --- a/rpython/jit/backend/test/runner_test.py +++ b/rpython/jit/backend/test/runner_test.py @@ -3954,8 +3954,12 @@ p = rawstorage.alloc_raw_storage(31) for i in range(31): p[i] = '\xDD' - value = rffi.cast(T, 0x4243444546474849) + value = rffi.cast(T, -0x4243444546474849) rawstorage.raw_storage_setitem(p, 16, value) + got = self.cpu.bh_raw_load_i(rffi.cast(lltype.Signed, p), 16, + arraydescr) + assert got == rffi.cast(lltype.Signed, value) + # loop = parse(ops, self.cpu, namespace=locals()) looptoken = JitCellToken() self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) @@ -3981,6 +3985,11 @@ p[i] = '\xDD' value = rffi.cast(T, 1.12e20) rawstorage.raw_storage_setitem(p, 16, value) + got = self.cpu.bh_raw_load_f(rffi.cast(lltype.Signed, p), 16, + arraydescr) + got = longlong.getrealfloat(got) + assert got == rffi.cast(lltype.Float, value) + # loop = parse(ops, self.cpu, namespace=locals()) looptoken = JitCellToken() self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) @@ -3991,22 +4000,58 @@ assert result == rffi.cast(lltype.Float, value) rawstorage.free_raw_storage(p) + def test_raw_load_singlefloat(self): + if not self.cpu.supports_singlefloats: + py.test.skip("requires singlefloats") + from rpython.rlib import rawstorage + for T in [rffi.FLOAT]: + ops = """ + [i0, i1] + i2 = raw_load(i0, i1, descr=arraydescr) + finish(i2) + """ + arraydescr = self.cpu.arraydescrof(rffi.CArray(T)) + p = rawstorage.alloc_raw_storage(31) + for i in range(31): + p[i] = '\xDD' + value = rffi.cast(T, 1.12e20) + rawstorage.raw_storage_setitem(p, 16, value) + got = self.cpu.bh_raw_load_i(rffi.cast(lltype.Signed, p), 16, + arraydescr) + assert got == longlong.singlefloat2int(value) + # + loop = parse(ops, self.cpu, namespace=locals()) + looptoken = JitCellToken() + self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + deadframe = self.cpu.execute_token(looptoken, + rffi.cast(lltype.Signed, p), 16) + result = self.cpu.get_int_value(deadframe, 0) + assert result == longlong.singlefloat2int(value) + rawstorage.free_raw_storage(p) + def test_raw_store_int(self): from rpython.rlib import rawstorage for T in [rffi.UCHAR, rffi.SIGNEDCHAR, rffi.USHORT, rffi.SHORT, rffi.UINT, rffi.INT, rffi.ULONG, rffi.LONG]: + arraydescr = self.cpu.arraydescrof(rffi.CArray(T)) + p = rawstorage.alloc_raw_storage(31) + value = (-0x4243444546474849) & sys.maxint + self.cpu.bh_raw_store_i(rffi.cast(lltype.Signed, p), 16, value, + arraydescr) + result = rawstorage.raw_storage_getitem(T, p, 16) + assert result == rffi.cast(T, value) + rawstorage.free_raw_storage(p) + # ops = """ [i0, i1, i2] raw_store(i0, i1, i2, descr=arraydescr) finish() """ - arraydescr = self.cpu.arraydescrof(rffi.CArray(T)) p = rawstorage.alloc_raw_storage(31) for i in range(31): p[i] = '\xDD' - value = 0x4243444546474849 & sys.maxint loop = parse(ops, self.cpu, namespace=locals()) looptoken = JitCellToken() self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) @@ -4021,16 +4066,24 @@ py.test.skip("requires floats") from rpython.rlib import rawstorage for T in [rffi.DOUBLE]: + arraydescr = self.cpu.arraydescrof(rffi.CArray(T)) + p = rawstorage.alloc_raw_storage(31) + value = 1.23e20 + self.cpu.bh_raw_store_f(rffi.cast(lltype.Signed, p), 16, + longlong.getfloatstorage(value), + arraydescr) + result = rawstorage.raw_storage_getitem(T, p, 16) + assert result == rffi.cast(T, value) + rawstorage.free_raw_storage(p) + # ops = """ [i0, i1, f2] raw_store(i0, i1, f2, descr=arraydescr) finish() """ - arraydescr = self.cpu.arraydescrof(rffi.CArray(T)) p = rawstorage.alloc_raw_storage(31) for i in range(31): p[i] = '\xDD' - value = 1.23e20 loop = parse(ops, self.cpu, namespace=locals()) looptoken = JitCellToken() self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) @@ -4041,6 +4094,41 @@ assert result == rffi.cast(T, value) rawstorage.free_raw_storage(p) + def test_raw_store_singlefloat(self): + if not self.cpu.supports_singlefloats: + py.test.skip("requires singlefloats") + from rpython.rlib import rawstorage + for T in [rffi.FLOAT]: + arraydescr = self.cpu.arraydescrof(rffi.CArray(T)) + p = rawstorage.alloc_raw_storage(31) + value = rffi.cast(T, 1.23e20) + self.cpu.bh_raw_store_i(rffi.cast(lltype.Signed, p), 16, + longlong.singlefloat2int(value), + arraydescr) + result = rawstorage.raw_storage_getitem(T, p, 16) + assert (rffi.cast(lltype.Float, result) == + rffi.cast(lltype.Float, value)) + rawstorage.free_raw_storage(p) + # + ops = """ + [i0, i1, i2] + raw_store(i0, i1, i2, descr=arraydescr) + finish() + """ + p = rawstorage.alloc_raw_storage(31) + for i in range(31): + p[i] = '\xDD' + loop = parse(ops, self.cpu, namespace=locals()) + looptoken = JitCellToken() + self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + self.cpu.execute_token(looptoken, + rffi.cast(lltype.Signed, p), 16, + longlong.singlefloat2int(value)) + result = rawstorage.raw_storage_getitem(T, p, 16) + assert (rffi.cast(lltype.Float, result) == + rffi.cast(lltype.Float, value)) + rawstorage.free_raw_storage(p) + def test_forcing_op_with_fail_arg_in_reg(self): values = [] def maybe_force(token, flag): diff --git a/rpython/jit/backend/test/support.py b/rpython/jit/backend/test/support.py --- a/rpython/jit/backend/test/support.py +++ b/rpython/jit/backend/test/support.py @@ -59,7 +59,7 @@ t.buildannotator().build_types(function, [int] * len(args), main_entry_point=True) - t.buildrtyper(type_system=self.type_system).specialize() + t.buildrtyper().specialize() warmrunnerdesc = WarmRunnerDesc(t, translate_support_code=True, CPUClass=self.CPUClass, **kwds) diff --git a/rpython/jit/backend/tool/viewcode.py b/rpython/jit/backend/tool/viewcode.py --- a/rpython/jit/backend/tool/viewcode.py +++ b/rpython/jit/backend/tool/viewcode.py @@ -240,7 +240,7 @@ self.backend_name = None self.executable_name = None - def parse(self, f, textonly=True): + def parse(self, f, textonly=True, truncate_addr=True): for line in f: if line.startswith('BACKEND '): self.backend_name = line.split(' ')[1].strip() @@ -250,7 +250,11 @@ assert pieces[2].startswith('+') if len(pieces) == 3: continue # empty line - baseaddr = long(pieces[1][1:], 16) & 0xFFFFFFFFL + baseaddr = long(pieces[1][1:], 16) + if truncate_addr: + baseaddr &= 0xFFFFFFFFL + elif baseaddr < 0: + baseaddr += (2 * sys.maxint + 2) offset = int(pieces[2][1:]) addr = baseaddr + offset data = pieces[3].replace(':', '').decode('hex') @@ -268,14 +272,21 @@ pieces = line.split(None, 3) assert pieces[1].startswith('@') assert pieces[2].startswith('+') - baseaddr = long(pieces[1][1:], 16) & 0xFFFFFFFFL + baseaddr = long(pieces[1][1:], 16) + if truncate_addr: + baseaddr &= 0xFFFFFFFFL + elif baseaddr < 0: + baseaddr += (2 * sys.maxint + 2) offset = int(pieces[2][1:]) addr = baseaddr + offset self.logentries[addr] = pieces[3] elif line.startswith('SYS_EXECUTABLE '): filename = line[len('SYS_EXECUTABLE '):].strip() if filename != self.executable_name and filename != '??': - self.symbols.update(load_symbols(filename)) + try: + self.symbols.update(load_symbols(filename)) + except Exception as e: + print e self.executable_name = filename def find_cross_references(self): diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -79,6 +79,7 @@ allblocks) self.target_tokens_currently_compiling = {} self.frame_depth_to_patch = [] + self._finish_gcmap = lltype.nullptr(jitframe.GCMAP) def teardown(self): self.pending_guard_tokens = None @@ -1846,7 +1847,11 @@ self.mov(fail_descr_loc, RawEbpLoc(ofs)) arglist = op.getarglist() if arglist and arglist[0].type == REF: - gcmap = self.gcmap_for_finish + if self._finish_gcmap: + self._finish_gcmap[0] |= r_uint(1) # rax + gcmap = self._finish_gcmap + else: + gcmap = self.gcmap_for_finish self.push_gcmap(self.mc, gcmap, store=True) else: # note that the 0 here is redundant, but I would rather From noreply at buildbot.pypy.org Tue Aug 13 15:32:38 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Tue, 13 Aug 2013 15:32:38 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: fix forgotten jitframe allocation before call_assembler Message-ID: <20130813133238.1A7E21C32CB@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r66116:49e406509329 Date: 2013-08-13 15:31 +0200 http://bitbucket.org/pypy/pypy/changeset/49e406509329/ Log: fix forgotten jitframe allocation before call_assembler diff --git a/pypy/tool/jitlogparser/parser.py b/pypy/tool/jitlogparser/parser.py --- a/pypy/tool/jitlogparser/parser.py +++ b/pypy/tool/jitlogparser/parser.py @@ -100,7 +100,7 @@ if ofs >= 0: asm.append((ofs, v.strip("\n"))) # - prefix = hex(dump_start)[:-8] + prefix = hex(dump_start)[:-9] asm_index = 0 for i, op in enumerate(loop.operations): end = 0 diff --git a/rpython/jit/backend/llsupport/assembler.py b/rpython/jit/backend/llsupport/assembler.py --- a/rpython/jit/backend/llsupport/assembler.py +++ b/rpython/jit/backend/llsupport/assembler.py @@ -226,7 +226,6 @@ else: raise AssertionError(kind) - import pdb;pdb.set_trace() gcref = cast_instance_to_gcref(value) gcref = rgc._make_sure_does_not_move(gcref) value = rffi.cast(lltype.Signed, gcref) diff --git a/rpython/jit/backend/llsupport/rewrite.py b/rpython/jit/backend/llsupport/rewrite.py --- a/rpython/jit/backend/llsupport/rewrite.py +++ b/rpython/jit/backend/llsupport/rewrite.py @@ -172,8 +172,8 @@ size_box, descr=descrs.jfi_frame_size) self.newops.append(op0) - self.gen_malloc_nursery_varsize_frame(size_box, frame) - self.gen_initialize_tid(frame, descrs.arraydescr.tid) + self.gen_malloc_nursery_varsize_frame(size_box, frame, + descrs.arraydescr.tid) length_box = history.BoxInt() op1 = ResOperation(rop.GETFIELD_GC, [history.ConstInt(frame_info)], length_box, @@ -321,7 +321,7 @@ self.recent_mallocs[v_result] = None return True - def gen_malloc_nursery_varsize_frame(self, sizebox, v_result): + def gen_malloc_nursery_varsize_frame(self, sizebox, v_result, tid): """ Generate CALL_MALLOC_NURSERY_VARSIZE_FRAME """ self.emitting_an_operation_that_can_collect() @@ -332,6 +332,8 @@ self.newops.append(op) self.recent_mallocs[v_result] = None + self.gen_initialize_tid(v_result, tid) + def gen_malloc_nursery(self, size, v_result): """Try to generate or update a CALL_MALLOC_NURSERY. If that fails, generate a plain CALL_MALLOC_GC instead. diff --git a/rpython/jit/backend/llsupport/stmrewrite.py b/rpython/jit/backend/llsupport/stmrewrite.py --- a/rpython/jit/backend/llsupport/stmrewrite.py +++ b/rpython/jit/backend/llsupport/stmrewrite.py @@ -3,6 +3,7 @@ from rpython.jit.metainterp.history import BoxPtr, ConstPtr, ConstInt from rpython.rlib.objectmodel import specialize from rpython.rlib.objectmodel import we_are_translated +from rpython.jit.metainterp import history # # STM Support @@ -92,11 +93,13 @@ continue # ---------- calls ---------- if op.is_call(): - self.known_category.clear() if op.getopnum() == rop.CALL_RELEASE_GIL: self.fallback_inevitable(op) + elif op.getopnum() == rop.CALL_ASSEMBLER: + self.handle_call_assembler(op) else: self.newops.append(op) + self.known_category.clear() continue # ---------- copystrcontent ---------- if op.getopnum() in (rop.COPYSTRCONTENT, @@ -138,8 +141,15 @@ for v, c in self.known_category.items(): if c == 'R': self.known_category[v] = 'P' - - + + def gen_malloc_nursery_varsize_frame(self, sizebox, v_result, tid): + """ Generate CALL_MALLOC_NURSERY_VARSIZE_FRAME + """ + addr = self.gc_ll_descr.get_malloc_fn_addr('malloc_big_fixedsize') + args = [ConstInt(addr), sizebox, ConstInt(tid)] + descr = self.gc_ll_descr.malloc_big_fixedsize_descr + self._gen_call_malloc_gc(args, v_result, descr) + def gen_write_barrier(self, v): raise NotImplementedError diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -876,6 +876,19 @@ return rst def _call_header_shadowstack(self, gcrootmap): + # do a write-barrier on ebp / frame for stm + # XXX: may not be necessary if we are sure that we only get + # freshly allocated frames or already write-ready frames + # from the caller... + gc_ll_descr = self.cpu.gc_ll_descr + gcrootmap = gc_ll_descr.gcrootmap + if gcrootmap and gcrootmap.is_stm: + if not hasattr(gc_ll_descr, 'P2Wdescr'): + raise Exception("unreachable code") + wbdescr = gc_ll_descr.P2Wdescr + self._stm_barrier_fastpath(self.mc, wbdescr, [ebp], is_frame=True) + + # put the frame in ebp on the shadowstack for the GC to find rst = self._load_shadowstack_top_in_ebx(self.mc, gcrootmap) self.mc.MOV_mr((ebx.value, 0), ebp.value) # MOV [ebx], ebp self.mc.ADD_ri(ebx.value, WORD) diff --git a/rpython/jit/backend/x86/test/test_stm_integration.py b/rpython/jit/backend/x86/test/test_stm_integration.py --- a/rpython/jit/backend/x86/test/test_stm_integration.py +++ b/rpython/jit/backend/x86/test/test_stm_integration.py @@ -20,6 +20,7 @@ from rpython.jit.backend.llsupport import jitframe from rpython.memory.gc.stmgc import StmGC from rpython.jit.metainterp import history +from rpython.jit.codewriter.effectinfo import EffectInfo import itertools, sys import ctypes @@ -101,6 +102,11 @@ class FakeGCHeaderBuilder: size_gc_header = WORD +GCPTR = lltype.Ptr(lltype.GcStruct( + 'GCPTR', ('h_tid', lltype.Unsigned), + ('h_revision', lltype.Signed), + ('h_original', lltype.Unsigned))) +HDRSIZE = 3 * WORD class GCDescrStm(GCDescrShadowstackDirect): def __init__(self): @@ -147,6 +153,21 @@ self.generate_function('stm_ptr_eq', ptr_eq, [llmemory.GCREF] * 2, RESULT=lltype.Bool) + def malloc_big_fixedsize(size, tid): + entries = size + HDRSIZE + TP = rffi.CArray(lltype.Char) + obj = lltype.malloc(TP, n=entries, flavor='raw', + track_allocation=False, zero=True) + objptr = rffi.cast(GCPTR, obj) + objptr.h_tid = rffi.cast(lltype.Unsigned, + StmGC.GCFLAG_OLD|StmGC.GCFLAG_WRITE_BARRIER + | tid) + objptr.h_revision = rffi.cast(lltype.Signed, -sys.maxint) + return rffi.cast(llmemory.GCREF, objptr) + self.generate_function('malloc_big_fixedsize', malloc_big_fixedsize, + [lltype.Signed] * 2) + + def malloc_jitframe(self, frame_info): """ Allocate a new frame, overwritten by tests """ @@ -180,6 +201,7 @@ def setup_method(self, meth): cpu = CPU(None, None) cpu.gc_ll_descr = GCDescrStm() + self.p2wd = cpu.gc_ll_descr.P2Wdescr self.p2rd = cpu.gc_ll_descr.P2Rdescr @@ -205,7 +227,7 @@ def assert_in(self, called_on, args): for i, ref in enumerate(args): - assert rffi.cast_ptr_to_adr(ref) == called_on[i] + assert rffi.cast_ptr_to_adr(ref) in called_on def assert_not_in(self, called_on, args): for ref in args: @@ -267,7 +289,7 @@ # check if rev-fastpath worked if rev == PRIV_REV: # fastpath - assert not called_on + self.assert_not_in(called_on, [sgcref]) else: self.assert_in(called_on, [sgcref]) @@ -310,7 +332,7 @@ # check if rev-fastpath worked if rev == PRIV_REV: # fastpath and WRITE_BARRIER not set - assert not called_on + self.assert_not_in(called_on, [sgcref]) else: self.assert_in(called_on, [sgcref]) @@ -412,6 +434,70 @@ assert guard_failed + + + def test_assembler_call(self): + cpu = self.cpu + cpu.gc_ll_descr.init_nursery(100) + cpu.setup_once() + + called = [] + def assembler_helper(deadframe, virtualizable): + frame = rffi.cast(JITFRAMEPTR, deadframe) + frame_adr = rffi.cast(lltype.Signed, frame.jf_descr) + called.append(frame_adr) + return 4 + 9 + + FUNCPTR = lltype.Ptr(lltype.FuncType([llmemory.GCREF, + llmemory.GCREF], + lltype.Signed)) + class FakeJitDriverSD: + index_of_virtualizable = -1 + _assembler_helper_ptr = llhelper(FUNCPTR, assembler_helper) + assembler_helper_adr = llmemory.cast_ptr_to_adr( + _assembler_helper_ptr) + + ops = ''' + [i0, i1, i2, i3, i4, i5, i6, i7, i8, i9] + i10 = int_add(i0, i1) + i11 = int_add(i10, i2) + i12 = int_add(i11, i3) + i13 = int_add(i12, i4) + i14 = int_add(i13, i5) + i15 = int_add(i14, i6) + i16 = int_add(i15, i7) + i17 = int_add(i16, i8) + i18 = int_add(i17, i9) + finish(i18)''' + loop = parse(ops) + looptoken = JitCellToken() + looptoken.outermost_jitdriver_sd = FakeJitDriverSD() + finish_descr = loop.operations[-1].getdescr() + self.cpu.done_with_this_frame_descr_int = BasicFinalDescr() + self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + ARGS = [lltype.Signed] * 10 + RES = lltype.Signed + FakeJitDriverSD.portal_calldescr = self.cpu.calldescrof( + lltype.Ptr(lltype.FuncType(ARGS, RES)), ARGS, RES, + EffectInfo.MOST_GENERAL) + args = [i+1 for i in range(10)] + deadframe = self.cpu.execute_token(looptoken, *args) + + ops = ''' + [i0, i1, i2, i3, i4, i5, i6, i7, i8, i9] + i10 = int_add(i0, 42) + i11 = call_assembler(i10, i1, i2, i3, i4, i5, i6, i7, i8, i9, descr=looptoken) + guard_not_forced()[] + finish(i11) + ''' + loop = parse(ops, namespace=locals()) + othertoken = JitCellToken() + self.cpu.compile_loop(loop.inputargs, loop.operations, othertoken) + args = [i+1 for i in range(10)] + deadframe = self.cpu.execute_token(othertoken, *args) + assert called == [id(finish_descr)] + + From noreply at buildbot.pypy.org Tue Aug 13 17:42:23 2013 From: noreply at buildbot.pypy.org (taavi_burns) Date: Tue, 13 Aug 2013 17:42:23 +0200 (CEST) Subject: [pypy-commit] pypy default: Removes duplicate code (rint is effectively round, for complex types). Message-ID: <20130813154223.46BE01C011D@cobra.cs.uni-duesseldorf.de> Author: Taavi Burns Branch: Changeset: r66118:99a209086e0a Date: 2013-08-13 11:05 -0400 http://bitbucket.org/pypy/pypy/changeset/99a209086e0a/ Log: Removes duplicate code (rint is effectively round, for complex types). diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -1341,15 +1341,6 @@ # return (rfloat.copysign(v1[0], v2[0]), # rfloat.copysign(v1[1], v2[1])) - @specialize.argtype(1) - def rint(self, v): - ans = list(self.for_computation(self.unbox(v))) - if isfinite(ans[0]): - ans[0] = rfloat.round_double(ans[0], 0, half_even=True) - if isfinite(ans[1]): - ans[1] = rfloat.round_double(ans[1], 0, half_even=True) - return self.box_complex(ans[0], ans[1]) - @complex_unary_op def sign(self, v): ''' @@ -1408,11 +1399,14 @@ def round(self, v, decimals=0): ans = list(self.for_computation(self.unbox(v))) if isfinite(ans[0]): - ans[0] = rfloat.round_double(ans[0], decimals, half_even=True) + ans[0] = rfloat.round_double(ans[0], decimals, half_even=True) if isfinite(ans[1]): - ans[1] = rfloat.round_double(ans[1], decimals, half_even=True) + ans[1] = rfloat.round_double(ans[1], decimals, half_even=True) return self.box_complex(ans[0], ans[1]) + def rint(self, v): + return self.round(v) + # No floor, ceil, trunc in numpy for complex #@simple_unary_op #def floor(self, v): From noreply at buildbot.pypy.org Tue Aug 13 17:42:22 2013 From: noreply at buildbot.pypy.org (taavi_burns) Date: Tue, 13 Aug 2013 17:42:22 +0200 (CEST) Subject: [pypy-commit] pypy default: Adds rint ufunc. Message-ID: <20130813154222.09B061C009F@cobra.cs.uni-duesseldorf.de> Author: Taavi Burns Branch: Changeset: r66117:8354267ccd65 Date: 2013-08-13 10:30 -0400 http://bitbucket.org/pypy/pypy/changeset/8354267ccd65/ Log: Adds rint ufunc. diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py --- a/pypy/module/micronumpy/__init__.py +++ b/pypy/module/micronumpy/__init__.py @@ -140,6 +140,7 @@ ("deg2rad", "radians"), ("rad2deg", "degrees"), ("reciprocal", "reciprocal"), + ("rint", "rint"), ("sign", "sign"), ("signbit", "signbit"), ("sin", "sin"), diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -620,6 +620,7 @@ ("positive", "pos", 1), ("negative", "neg", 1), ("absolute", "abs", 1, {"complex_to_float": True}), + ("rint", "rint", 1), ("sign", "sign", 1, {"promote_bools": True}), ("signbit", "signbit", 1, {"bool_result": True, "allow_complex": False}), diff --git a/pypy/module/micronumpy/test/test_ufuncs.py b/pypy/module/micronumpy/test/test_ufuncs.py --- a/pypy/module/micronumpy/test/test_ufuncs.py +++ b/pypy/module/micronumpy/test/test_ufuncs.py @@ -255,6 +255,22 @@ for i in range(3): assert c[i] == a[i] * b[i] + def test_rint(self): + from numpypy import array, complex, rint, isnan + + nnan, nan, inf, ninf = float('-nan'), float('nan'), float('inf'), float('-inf') + + reference = array([ninf, -2., -1., -0., 0., 0., 0., 1., 2., inf]) + a = array([ninf, -1.5, -1., -0.5, -0., 0., 0.5, 1., 1.5, inf]) + b = rint(a) + for i in range(len(a)): + assert b[i] == reference[i] + assert isnan(rint(nan)) + assert isnan(rint(nnan)) + + assert rint(complex(inf, 1.5)) == complex(inf, 2.) + assert rint(complex(0.5, inf)) == complex(0., inf) + def test_sign(self): from numpypy import array, sign, dtype diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -307,6 +307,13 @@ def min(self, v1, v2): return min(v1, v2) + @simple_unary_op + def rint(self, v): + if isfinite(v): + return rfloat.round_double(v, 0, half_even=True) + else: + return v + class NonNativePrimitive(Primitive): _mixin_ = True @@ -1334,6 +1341,15 @@ # return (rfloat.copysign(v1[0], v2[0]), # rfloat.copysign(v1[1], v2[1])) + @specialize.argtype(1) + def rint(self, v): + ans = list(self.for_computation(self.unbox(v))) + if isfinite(ans[0]): + ans[0] = rfloat.round_double(ans[0], 0, half_even=True) + if isfinite(ans[1]): + ans[1] = rfloat.round_double(ans[1], 0, half_even=True) + return self.box_complex(ans[0], ans[1]) + @complex_unary_op def sign(self, v): ''' From noreply at buildbot.pypy.org Tue Aug 13 17:42:24 2013 From: noreply at buildbot.pypy.org (taavi_burns) Date: Tue, 13 Aug 2013 17:42:24 +0200 (CEST) Subject: [pypy-commit] pypy default: Merge default Message-ID: <20130813154224.8B4801C32CB@cobra.cs.uni-duesseldorf.de> Author: Taavi Burns Branch: Changeset: r66119:4e153d9db4c5 Date: 2013-08-13 11:41 -0400 http://bitbucket.org/pypy/pypy/changeset/4e153d9db4c5/ Log: Merge default diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py --- a/pypy/module/micronumpy/__init__.py +++ b/pypy/module/micronumpy/__init__.py @@ -140,6 +140,7 @@ ("deg2rad", "radians"), ("rad2deg", "degrees"), ("reciprocal", "reciprocal"), + ("rint", "rint"), ("sign", "sign"), ("signbit", "signbit"), ("sin", "sin"), diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -620,6 +620,7 @@ ("positive", "pos", 1), ("negative", "neg", 1), ("absolute", "abs", 1, {"complex_to_float": True}), + ("rint", "rint", 1), ("sign", "sign", 1, {"promote_bools": True}), ("signbit", "signbit", 1, {"bool_result": True, "allow_complex": False}), diff --git a/pypy/module/micronumpy/test/test_ufuncs.py b/pypy/module/micronumpy/test/test_ufuncs.py --- a/pypy/module/micronumpy/test/test_ufuncs.py +++ b/pypy/module/micronumpy/test/test_ufuncs.py @@ -255,6 +255,22 @@ for i in range(3): assert c[i] == a[i] * b[i] + def test_rint(self): + from numpypy import array, complex, rint, isnan + + nnan, nan, inf, ninf = float('-nan'), float('nan'), float('inf'), float('-inf') + + reference = array([ninf, -2., -1., -0., 0., 0., 0., 1., 2., inf]) + a = array([ninf, -1.5, -1., -0.5, -0., 0., 0.5, 1., 1.5, inf]) + b = rint(a) + for i in range(len(a)): + assert b[i] == reference[i] + assert isnan(rint(nan)) + assert isnan(rint(nnan)) + + assert rint(complex(inf, 1.5)) == complex(inf, 2.) + assert rint(complex(0.5, inf)) == complex(0., inf) + def test_sign(self): from numpypy import array, sign, dtype diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -307,6 +307,13 @@ def min(self, v1, v2): return min(v1, v2) + @simple_unary_op + def rint(self, v): + if isfinite(v): + return rfloat.round_double(v, 0, half_even=True) + else: + return v + class NonNativePrimitive(Primitive): _mixin_ = True @@ -1392,11 +1399,14 @@ def round(self, v, decimals=0): ans = list(self.for_computation(self.unbox(v))) if isfinite(ans[0]): - ans[0] = rfloat.round_double(ans[0], decimals, half_even=True) + ans[0] = rfloat.round_double(ans[0], decimals, half_even=True) if isfinite(ans[1]): - ans[1] = rfloat.round_double(ans[1], decimals, half_even=True) + ans[1] = rfloat.round_double(ans[1], decimals, half_even=True) return self.box_complex(ans[0], ans[1]) + def rint(self, v): + return self.round(v) + # No floor, ceil, trunc in numpy for complex #@simple_unary_op #def floor(self, v): From noreply at buildbot.pypy.org Tue Aug 13 18:01:47 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Tue, 13 Aug 2013 18:01:47 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: implement fastpath in read and write barriers of gc.py, but fail at testing them Message-ID: <20130813160148.006841C32CB@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r66120:547adb48566a Date: 2013-08-13 18:00 +0200 http://bitbucket.org/pypy/pypy/changeset/547adb48566a/ Log: implement fastpath in read and write barriers of gc.py, but fail at testing them diff --git a/rpython/jit/backend/llsupport/gc.py b/rpython/jit/backend/llsupport/gc.py --- a/rpython/jit/backend/llsupport/gc.py +++ b/rpython/jit/backend/llsupport/gc.py @@ -442,16 +442,52 @@ def __init__(self, gc_ll_descr, stmcat): assert stmcat == 'P2R' STMBarrierDescr.__init__(self, gc_ll_descr, stmcat, - 'stm_read_barrier') + 'stm_DirectReadBarrier') # XXX: implement fastpath then change to stm_DirectReadBarrier + @specialize.arg(2) + def _do_barrier(self, gcref_struct, returns_modified_object): + assert returns_modified_object + from rpython.memory.gc.stmgc import get_hdr_revision + objadr = llmemory.cast_ptr_to_adr(gcref_struct) + + # if h_revision == privat_rev of transaction + rev = get_hdr_revision(objadr) + priv_rev = self.llop1.stm_get_adr_of_private_rev_num(rffi.SIGNEDP) + if rev[0] == priv_rev[0]: + return gcref_struct + + # XXX: readcache! + funcptr = self.get_barrier_funcptr(returns_modified_object) + res = funcptr(objadr) + return llmemory.cast_adr_to_ptr(res, llmemory.GCREF) + class STMWriteBarrierDescr(STMBarrierDescr): def __init__(self, gc_ll_descr, stmcat): assert stmcat in ['P2W'] STMBarrierDescr.__init__(self, gc_ll_descr, stmcat, - 'stm_write_barrier') - # XXX: implement fastpath, then change to stm_WriteBarrier + 'stm_WriteBarrier') + + @specialize.arg(2) + def _do_barrier(self, gcref_struct, returns_modified_object): + assert returns_modified_object + from rpython.memory.gc.stmgc import (StmGC, get_hdr_revision, + get_hdr_tid) + objadr = llmemory.cast_ptr_to_adr(gcref_struct) + + # if h_revision == privat_rev of transaction + rev = get_hdr_revision(objadr) + priv_rev = self.llop1.stm_get_adr_of_private_rev_num(rffi.SIGNEDP) + if rev[0] == priv_rev[0]: + # also WRITE_BARRIER not set? + tid = get_hdr_tid(objadr)[0] + if not (tid & StmGC.GCFLAG_WRITE_BARRIER): + return gcref_struct + + funcptr = self.get_barrier_funcptr(returns_modified_object) + res = funcptr(objadr) + return llmemory.cast_adr_to_ptr(res, llmemory.GCREF) class GcLLDescr_framework(GcLLDescription): diff --git a/rpython/jit/backend/llsupport/stmrewrite.py b/rpython/jit/backend/llsupport/stmrewrite.py --- a/rpython/jit/backend/llsupport/stmrewrite.py +++ b/rpython/jit/backend/llsupport/stmrewrite.py @@ -143,7 +143,7 @@ self.known_category[v] = 'P' def gen_malloc_nursery_varsize_frame(self, sizebox, v_result, tid): - """ Generate CALL_MALLOC_NURSERY_VARSIZE_FRAME + """ For now don't generate CALL_MALLOC_NURSERY_VARSIZE_FRAME """ addr = self.gc_ll_descr.get_malloc_fn_addr('malloc_big_fixedsize') args = [ConstInt(addr), sizebox, ConstInt(tid)] diff --git a/rpython/jit/backend/x86/test/test_stm_integration.py b/rpython/jit/backend/x86/test/test_stm_integration.py --- a/rpython/jit/backend/x86/test/test_stm_integration.py +++ b/rpython/jit/backend/x86/test/test_stm_integration.py @@ -258,6 +258,57 @@ s.h_tid = rffi.cast(lltype.Unsigned, StmGC.PREBUILT_FLAGS | tid) s.h_revision = rffi.cast(lltype.Signed, StmGC.PREBUILT_REVISION) return s + + def test_gc_read_barrier_fastpath(self): + from rpython.jit.backend.llsupport.gc import STMReadBarrierDescr + descr = STMReadBarrierDescr(self.cpu.gc_ll_descr, 'P2R') + + called = [] + def read(obj): + called.append(obj) + return obj + + PRIV_REV = 66 + class fakellop: + def stm_get_adr_of_private_rev_num(self, _): + TP = rffi.SIGNEDP + p = lltype.malloc(TP, n=1, flavor='raw', + track_allocation=False, zero=True) + p[0] = PRIV_REV + return rffi.cast(llmemory.Address, p) + + functype = lltype.Ptr(lltype.FuncType( + [llmemory.Address], llmemory.Address)) + funcptr = llhelper(functype, read) + descr.b_failing_case_ptr = funcptr + descr.llop1 = fakellop() + + # -------- TEST -------- + for rev in [PRIV_REV+4, PRIV_REV]: + called[:] = [] + + s = self.allocate_prebuilt_s() + sgcref = lltype.cast_opaque_ptr(llmemory.GCREF, s) + s.h_revision = rev + + descr._do_barrier(llmemory.AddressAsInt(sgcref), + returns_modified_object=True) + + # check if rev-fastpath worked + if rev == PRIV_REV: + # fastpath + assert sgcref not in called + else: + assert sgcref in called + + # XXX: read_cache test! + # # now add it to the read-cache and check + # # that it will never call the read_barrier + # assert not called_on + + + + def test_read_barrier_fastpath(self): cpu = self.cpu diff --git a/rpython/memory/gc/stmgc.py b/rpython/memory/gc/stmgc.py --- a/rpython/memory/gc/stmgc.py +++ b/rpython/memory/gc/stmgc.py @@ -15,6 +15,18 @@ first_gcflag = 1 << (LONG_BIT//2) + + +def get_hdr_tid(addr): + return llmemory.cast_adr_to_ptr(addr + StmGC.H_TID, rffi.SIGNEDP) + +def get_hdr_revision(addr): + return llmemory.cast_adr_to_ptr(addr + StmGC.H_REVISION, rffi.SIGNEDP) + +def get_hdr_original(addr): + return llmemory.cast_adr_to_ptr(addr + StmGC.H_ORIGINAL, rffi.SIGNEDP) + + class StmGC(MovingGCBase): _alloc_flavor_ = "raw" inline_simple_malloc = True @@ -58,6 +70,8 @@ FX_MASK = 65535 + def get_type_id(self, obj): + return llop.stm_get_tid(llgroup.HALFWORD, obj) def setup(self): # Hack: MovingGCBase.setup() sets up stuff related to id(), which @@ -67,24 +81,12 @@ llop.stm_initialize(lltype.Void) - def get_type_id(self, obj): - return llop.stm_get_tid(llgroup.HALFWORD, obj) - - def get_hdr_tid(self, addr): - return llmemory.cast_adr_to_ptr(addr + self.H_TID, rffi.SIGNEDP) - - def get_hdr_revision(self, addr): - return llmemory.cast_adr_to_ptr(addr + self.H_REVISION, rffi.SIGNEDP) - - def get_hdr_original(self, addr): - return llmemory.cast_adr_to_ptr(addr + self.H_ORIGINAL, rffi.SIGNEDP) - def get_original_copy(self, obj): addr = llmemory.cast_ptr_to_adr(obj) - if bool(self.get_hdr_tid(addr)[0] & self.GCFLAG_PREBUILT_ORIGINAL): + if bool(get_hdr_tid(addr)[0] & StmGC.GCFLAG_PREBUILT_ORIGINAL): return obj # - orig = self.get_hdr_original(addr)[0] + orig = get_hdr_original(addr)[0] if orig == 0: return obj # @@ -127,11 +129,12 @@ return llop.stm_weakref_allocate(llmemory.GCREF, size, typeid16, obj) + def can_move(self, obj): """Means the reference will stay valid, except if not seen by the GC, then it can get collected.""" - tid = self.get_hdr_tid(obj)[0] - if bool(tid & self.GCFLAG_OLD): + tid = get_hdr_tid(obj)[0] + if bool(tid & StmGC.GCFLAG_OLD): return False # XXX wrong so far. We should add a flag to the # object that means "don't ever kill this copy" return True @@ -157,7 +160,7 @@ source_start, dest_start, length): ll_assert(False, 'XXX') return False - + def id(self, gcobj): return llop.stm_id(lltype.Signed, gcobj) diff --git a/rpython/translator/stm/src_stm/stmgc.h b/rpython/translator/stm/src_stm/stmgc.h --- a/rpython/translator/stm/src_stm/stmgc.h +++ b/rpython/translator/stm/src_stm/stmgc.h @@ -167,23 +167,6 @@ #define UNLIKELY(test) __builtin_expect(test, 0) - -static inline gcptr stm_read_barrier(gcptr obj) { - /* XXX optimize to get the smallest code */ - if (UNLIKELY((obj->h_revision != stm_private_rev_num) && - (FXCACHE_AT(obj) != obj))) - obj = stm_DirectReadBarrier(obj); - return obj; -} - -static inline gcptr stm_write_barrier(gcptr obj) { - if (UNLIKELY((obj->h_revision != stm_private_rev_num) | - ((obj->h_tid & GCFLAG_WRITE_BARRIER) != 0))) - obj = stm_WriteBarrier(obj); - return obj; -} - -#if 0 #define stm_read_barrier(obj) \ (UNLIKELY(((obj)->h_revision != stm_private_rev_num) && \ (FXCACHE_AT(obj) != (obj))) ? \ @@ -195,6 +178,6 @@ (((obj)->h_tid & GCFLAG_WRITE_BARRIER) != 0)) ? \ stm_WriteBarrier(obj) \ : (obj)) -#endif + #endif From noreply at buildbot.pypy.org Tue Aug 13 18:04:56 2013 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 13 Aug 2013 18:04:56 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-static-barrier: A branch to improve the static placement of barriers Message-ID: <20130813160456.A6F781C32CB@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-static-barrier Changeset: r66121:dfc54ae67a9e Date: 2013-08-13 16:25 +0200 http://bitbucket.org/pypy/pypy/changeset/dfc54ae67a9e/ Log: A branch to improve the static placement of barriers From noreply at buildbot.pypy.org Tue Aug 13 18:04:57 2013 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 13 Aug 2013 18:04:57 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-static-barrier: import stmgc/9dc18268f0da Message-ID: <20130813160457.EFDB41C32CB@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-static-barrier Changeset: r66122:82d9f20cc182 Date: 2013-08-13 16:29 +0200 http://bitbucket.org/pypy/pypy/changeset/82d9f20cc182/ Log: import stmgc/9dc18268f0da diff --git a/rpython/translator/stm/src_stm/et.c b/rpython/translator/stm/src_stm/et.c --- a/rpython/translator/stm/src_stm/et.c +++ b/rpython/translator/stm/src_stm/et.c @@ -7,6 +7,7 @@ */ #include "stmimpl.h" +#ifdef _GC_DEBUG char tmp_buf[128]; char* stm_dbg_get_hdr_str(gcptr obj) { @@ -26,6 +27,7 @@ cur += sprintf(cur, "tid=%ld", stm_get_tid(obj)); return tmp_buf; } +#endif @@ -275,29 +277,81 @@ /* Version of stm_DirectReadBarrier() that doesn't abort and assumes * that 'P' was already an up-to-date result of a previous * stm_DirectReadBarrier(). We only have to check if we did in the - * meantime a stm_write_barrier(). + * meantime a stm_write_barrier(). Should only be called if we + * have the flag PUBLIC_TO_PRIVATE or on MOVED objects. This version + * should never abort (it is used in stm_decode_abort_info()). */ - if (P->h_tid & GCFLAG_PUBLIC) + assert(P->h_tid & GCFLAG_PUBLIC); + assert(!(P->h_tid & GCFLAG_STUB)); + + if (P->h_tid & GCFLAG_MOVED) { - if (P->h_tid & GCFLAG_MOVED) + dprintf(("repeat_read_barrier: %p -> %p moved\n", P, + (gcptr)P->h_revision)); + P = (gcptr)P->h_revision; + assert(P->h_tid & GCFLAG_PUBLIC); + assert(!(P->h_tid & GCFLAG_STUB)); + assert(!(P->h_tid & GCFLAG_MOVED)); + if (!(P->h_tid & GCFLAG_PUBLIC_TO_PRIVATE)) + return P; + } + assert(P->h_tid & GCFLAG_PUBLIC_TO_PRIVATE); + + struct tx_descriptor *d = thread_descriptor; + wlog_t *item; + G2L_FIND(d->public_to_private, P, item, goto no_private_obj); + + /* We have a key in 'public_to_private'. The value is the + corresponding private object. */ + dprintf(("repeat_read_barrier: %p -> %p public_to_private\n", P, item->val)); + P = item->val; + assert(!(P->h_tid & GCFLAG_PUBLIC)); + assert(!(P->h_tid & GCFLAG_STUB)); + assert(is_private(P)); + return P; + + no_private_obj: + /* Key not found. It should not be waiting in 'stolen_objects', + because this case from steal.c applies to objects to were originally + backup objects. 'P' cannot be a backup object if it was obtained + earlier as a result of stm_read_barrier(). + */ + return P; +} + +gcptr stm_ImmutReadBarrier(gcptr P) +{ + assert(P->h_tid & GCFLAG_STUB); + assert(P->h_tid & GCFLAG_PUBLIC); + + revision_t v = ACCESS_ONCE(P->h_revision); + assert(IS_POINTER(v)); /* "is a pointer", "has a more recent revision" */ + + if (!(v & 2)) + { + P = (gcptr)v; + } + else + { + /* follow a stub reference */ + struct tx_descriptor *d = thread_descriptor; + struct tx_public_descriptor *foreign_pd = STUB_THREAD(P); + if (foreign_pd == d->public_descriptor) { - P = (gcptr)P->h_revision; - assert(P->h_tid & GCFLAG_PUBLIC); + /* Same thread: dereference the pointer directly. */ + dprintf(("immut_read_barrier: %p -> %p via stub\n ", P, + (gcptr)(v - 2))); + P = (gcptr)(v - 2); } - if (P->h_tid & GCFLAG_PUBLIC_TO_PRIVATE) + else { - struct tx_descriptor *d = thread_descriptor; - wlog_t *item; - G2L_FIND(d->public_to_private, P, item, goto no_private_obj); - - P = item->val; - assert(!(P->h_tid & GCFLAG_PUBLIC)); - no_private_obj: - ; + /* stealing: needed because accessing v - 2 from this thread + is forbidden (the target might disappear under our feet) */ + dprintf(("immut_read_barrier: %p -> stealing...\n ", P)); + stm_steal_stub(P); } } - assert(!(P->h_tid & GCFLAG_STUB)); - return P; + return stm_immut_read_barrier(P); /* retry */ } static gcptr _match_public_to_private(gcptr P, gcptr pubobj, gcptr privobj, @@ -565,6 +619,16 @@ } } +gcptr stm_RepeatWriteBarrier(gcptr P) +{ + assert(!(P->h_tid & GCFLAG_IMMUTABLE)); + assert(is_private(P)); + assert(P->h_tid & GCFLAG_WRITE_BARRIER); + P->h_tid &= ~GCFLAG_WRITE_BARRIER; + gcptrlist_insert(&thread_descriptor->old_objects_to_trace, P); + return P; +} + gcptr stm_WriteBarrier(gcptr P) { assert(!(P->h_tid & GCFLAG_IMMUTABLE)); diff --git a/rpython/translator/stm/src_stm/et.h b/rpython/translator/stm/src_stm/et.h --- a/rpython/translator/stm/src_stm/et.h +++ b/rpython/translator/stm/src_stm/et.h @@ -70,11 +70,11 @@ static const revision_t GCFLAG_VISITED = STM_FIRST_GCFLAG << 1; static const revision_t GCFLAG_PUBLIC = STM_FIRST_GCFLAG << 2; static const revision_t GCFLAG_PREBUILT_ORIGINAL = STM_FIRST_GCFLAG << 3; -static const revision_t GCFLAG_PUBLIC_TO_PRIVATE = STM_FIRST_GCFLAG << 4; +// in stmgc.h: GCFLAG_PUBLIC_TO_PRIVATE = STM_FIRST_GCFLAG << 4; // in stmgc.h: GCFLAG_WRITE_BARRIER = STM_FIRST_GCFLAG << 5; -static const revision_t GCFLAG_MOVED = STM_FIRST_GCFLAG << 6; +// in stmgc.h: GCFLAG_MOVED = STM_FIRST_GCFLAG << 6; static const revision_t GCFLAG_BACKUP_COPY /*debug*/ = STM_FIRST_GCFLAG << 7; -static const revision_t GCFLAG_STUB /*debug*/ = STM_FIRST_GCFLAG << 8; +// in stmgc.h: GCFLAG_STUB = STM_FIRST_GCFLAG << 8; static const revision_t GCFLAG_PRIVATE_FROM_PROTECTED = STM_FIRST_GCFLAG << 9; static const revision_t GCFLAG_HAS_ID = STM_FIRST_GCFLAG << 10; static const revision_t GCFLAG_IMMUTABLE = STM_FIRST_GCFLAG << 11; @@ -196,8 +196,10 @@ void SpinLoop(int); gcptr stm_DirectReadBarrier(gcptr); +gcptr stm_WriteBarrier(gcptr); gcptr stm_RepeatReadBarrier(gcptr); -gcptr stm_WriteBarrier(gcptr); +gcptr stm_ImmutReadBarrier(gcptr); +gcptr stm_RepeatWriteBarrier(gcptr); gcptr _stm_nonrecord_barrier(gcptr); /* debugging: read barrier, but not recording anything */ int _stm_is_private(gcptr); /* debugging */ diff --git a/rpython/translator/stm/src_stm/extra.c b/rpython/translator/stm/src_stm/extra.c --- a/rpython/translator/stm/src_stm/extra.c +++ b/rpython/translator/stm/src_stm/extra.c @@ -154,6 +154,19 @@ return (p1 == p2); } +_Bool stm_pointer_equal_prebuilt(gcptr p1, gcptr p2) +{ + assert(p2 != NULL); + assert(p2->h_tid & GCFLAG_PREBUILT_ORIGINAL); + + if (p1 == p2) + return 1; + + /* the only possible case to still get True is if p2 == p1->h_original */ + return (p1 != NULL) && (p1->h_original == p2) && + !(p1->h_tid & GCFLAG_PREBUILT_ORIGINAL); +} + /************************************************************/ void stm_abort_info_push(gcptr obj, long fieldoffsets[]) @@ -205,7 +218,7 @@ WRITE_BUF(buffer, res_size); WRITE('e'); for (i=0; iabortinfo.size; i+=2) { - char *object = (char *)stm_RepeatReadBarrier(d->abortinfo.items[i+0]); + char *object = (char*)stm_repeat_read_barrier(d->abortinfo.items[i+0]); long *fieldoffsets = (long*)d->abortinfo.items[i+1]; long kind, offset; size_t rps_size; diff --git a/rpython/translator/stm/src_stm/revision b/rpython/translator/stm/src_stm/revision --- a/rpython/translator/stm/src_stm/revision +++ b/rpython/translator/stm/src_stm/revision @@ -1,1 +1,1 @@ -12cf412eb2d7+ +9dc18268f0da diff --git a/rpython/translator/stm/src_stm/stmgc.h b/rpython/translator/stm/src_stm/stmgc.h --- a/rpython/translator/stm/src_stm/stmgc.h +++ b/rpython/translator/stm/src_stm/stmgc.h @@ -42,6 +42,7 @@ /* returns nonzero if the two object-copy pointers belong to the same original object */ _Bool stm_pointer_equal(gcptr, gcptr); +_Bool stm_pointer_equal_prebuilt(gcptr, gcptr); /* 2nd arg is known prebuilt */ /* to push/pop objects into the local shadowstack */ #if 0 // (optimized version below) @@ -59,7 +60,7 @@ int stm_enter_callback_call(void); void stm_leave_callback_call(int); -/* read/write barriers (the most general versions only for now). +/* read/write barriers. - the read barrier must be applied before reading from an object. the result is valid as long as we're in the same transaction, @@ -69,10 +70,28 @@ the result is valid for a shorter period of time: we have to do stm_write_barrier() again if we ended the transaction, or if we did a potential collection (e.g. stm_allocate()). + + - as an optimization, stm_repeat_read_barrier() can be used + instead of stm_read_barrier() if the object was already + obtained by a stm_read_barrier() in the same transaction. + The only thing that may have occurred is that a + stm_write_barrier() on the same object could have made it + invalid. + + - a different optimization is to read immutable fields: in order + to do that, use stm_immut_read_barrier(), which only activates + on stubs. + + - stm_repeat_write_barrier() can be used on an object on which + we already did stm_write_barrier(), but a potential collection + can have occurred. */ #if 0 // (optimized version below) gcptr stm_read_barrier(gcptr); gcptr stm_write_barrier(gcptr); +gcptr stm_repeat_read_barrier(gcptr); +gcptr stm_immut_read_barrier(gcptr); +gcptr stm_repeat_write_barrier(gcptr); /* <= always returns its argument */ #endif /* start a new transaction, calls callback(), and when it returns @@ -159,7 +178,10 @@ extern __thread revision_t stm_private_rev_num; gcptr stm_DirectReadBarrier(gcptr); gcptr stm_WriteBarrier(gcptr); +static const revision_t GCFLAG_PUBLIC_TO_PRIVATE = STM_FIRST_GCFLAG << 4; static const revision_t GCFLAG_WRITE_BARRIER = STM_FIRST_GCFLAG << 5; +static const revision_t GCFLAG_MOVED = STM_FIRST_GCFLAG << 6; +static const revision_t GCFLAG_STUB = STM_FIRST_GCFLAG << 8; extern __thread char *stm_read_barrier_cache; #define FX_MASK 65535 #define FXCACHE_AT(obj) \ @@ -167,23 +189,6 @@ #define UNLIKELY(test) __builtin_expect(test, 0) - -static inline gcptr stm_read_barrier(gcptr obj) { - /* XXX optimize to get the smallest code */ - if (UNLIKELY((obj->h_revision != stm_private_rev_num) && - (FXCACHE_AT(obj) != obj))) - obj = stm_DirectReadBarrier(obj); - return obj; -} - -static inline gcptr stm_write_barrier(gcptr obj) { - if (UNLIKELY((obj->h_revision != stm_private_rev_num) | - ((obj->h_tid & GCFLAG_WRITE_BARRIER) != 0))) - obj = stm_WriteBarrier(obj); - return obj; -} - -#if 0 #define stm_read_barrier(obj) \ (UNLIKELY(((obj)->h_revision != stm_private_rev_num) && \ (FXCACHE_AT(obj) != (obj))) ? \ @@ -195,6 +200,21 @@ (((obj)->h_tid & GCFLAG_WRITE_BARRIER) != 0)) ? \ stm_WriteBarrier(obj) \ : (obj)) -#endif + +#define stm_repeat_read_barrier(obj) \ + (UNLIKELY((obj)->h_tid & (GCFLAG_PUBLIC_TO_PRIVATE | GCFLAG_MOVED)) ? \ + stm_RepeatReadBarrier(obj) \ + : (obj)) + +#define stm_immut_read_barrier(obj) \ + (UNLIKELY((obj)->h_tid & GCFLAG_STUB) ? \ + stm_ImmutReadBarrier(obj) \ + : (obj)) + +#define stm_repeat_write_barrier(obj) \ + (UNLIKELY((obj)->h_tid & GCFLAG_WRITE_BARRIER) ? \ + stm_RepeatWriteBarrier(obj) \ + : (obj)) + #endif From noreply at buildbot.pypy.org Tue Aug 13 18:04:59 2013 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 13 Aug 2013 18:04:59 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-static-barrier: Clean up imports Message-ID: <20130813160459.381511C32CB@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-static-barrier Changeset: r66123:dc733a049cd3 Date: 2013-08-13 16:38 +0200 http://bitbucket.org/pypy/pypy/changeset/dc733a049cd3/ Log: Clean up imports diff --git a/rpython/translator/stm/test/test_ztranslated.py b/rpython/translator/stm/test/test_ztranslated.py --- a/rpython/translator/stm/test/test_ztranslated.py +++ b/rpython/translator/stm/test/test_ztranslated.py @@ -1,8 +1,6 @@ -import py from rpython.rlib import rstm, rgc, objectmodel -from rpython.rtyper.lltypesystem import lltype, llmemory, rffi, rclass +from rpython.rtyper.lltypesystem import lltype, rffi from rpython.rtyper.lltypesystem.lloperation import llop -from rpython.rtyper.annlowlevel import cast_instance_to_base_ptr from rpython.translator.stm.test.support import CompiledSTMTests from rpython.translator.stm.test import targetdemo2 From noreply at buildbot.pypy.org Tue Aug 13 18:05:00 2013 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 13 Aug 2013 18:05:00 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-static-barrier: Use stm_pointer_equal_prebuilt; test. Message-ID: <20130813160500.75E031C32CB@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-static-barrier Changeset: r66124:9dad0a4b286c Date: 2013-08-13 16:46 +0200 http://bitbucket.org/pypy/pypy/changeset/9dad0a4b286c/ Log: Use stm_pointer_equal_prebuilt; test. diff --git a/rpython/translator/stm/funcgen.py b/rpython/translator/stm/funcgen.py --- a/rpython/translator/stm/funcgen.py +++ b/rpython/translator/stm/funcgen.py @@ -69,11 +69,20 @@ funcname, arg) def stm_ptr_eq(funcgen, op): - arg0 = funcgen.expr(op.args[0]) - arg1 = funcgen.expr(op.args[1]) + args = [funcgen.expr(v) for v in op.args] result = funcgen.expr(op.result) + # check for prebuilt arguments + for i, j in [(0, 1), (1, 0)]: + if isinstance(op.args[j], Constant): + if op.args[j].value: # non-NULL + return ('%s = stm_pointer_equal_prebuilt((gcptr)%s, (gcptr)%s);' + % (result, args[i], args[j])) + else: + # this case might be unreachable, but better safe than sorry + return '%s = (%s == NULL);' % (result, args[i]) + # return '%s = stm_pointer_equal((gcptr)%s, (gcptr)%s);' % ( - result, arg0, arg1) + result, args[0], args[1]) def stm_become_inevitable(funcgen, op): try: diff --git a/rpython/translator/stm/test/test_ztranslated.py b/rpython/translator/stm/test/test_ztranslated.py --- a/rpython/translator/stm/test/test_ztranslated.py +++ b/rpython/translator/stm/test/test_ztranslated.py @@ -301,3 +301,31 @@ t, cbuilder = self.compile(main) data = cbuilder.cmdexec('a b') assert 'test ok\n' in data + + def test_stm_pointer_equal(self): + class Foo: + pass + prebuilt_foo = Foo() + def make(n): + foo1 = Foo() + foo2 = Foo() + if n < 100: + return foo1, foo2, foo1, None + return None, None, None, foo1 # to annotate as "can be none" + def main(argv): + foo1, foo2, foo3, foo4 = make(len(argv)) + assert foo1 is not prebuilt_foo + assert foo1 is not foo2 + assert foo1 is foo3 + assert foo4 is None + assert foo1 is not None + assert prebuilt_foo is not foo1 + assert None is not foo1 + assert None is foo4 + print 'test ok' + return 0 + + main([]) + t, cbuilder = self.compile(main) + data = cbuilder.cmdexec('') + assert 'test ok\n' in data From noreply at buildbot.pypy.org Tue Aug 13 19:03:55 2013 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 13 Aug 2013 19:03:55 +0200 (CEST) Subject: [pypy-commit] pypy default: Yay, finally found out the objdump option "-m i386:x86-64". Message-ID: <20130813170355.BBD171C011D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r66125:37b092c3f176 Date: 2013-08-13 19:02 +0200 http://bitbucket.org/pypy/pypy/changeset/37b092c3f176/ Log: Yay, finally found out the objdump option "-m i386:x86-64". Solves the truncated addresses. diff --git a/rpython/jit/backend/tool/viewcode.py b/rpython/jit/backend/tool/viewcode.py --- a/rpython/jit/backend/tool/viewcode.py +++ b/rpython/jit/backend/tool/viewcode.py @@ -61,6 +61,9 @@ 'arm': 'arm', 'arm_32': 'arm', } + backend_to_machine = { + 'x86-64': 'i386:x86-64', + } cmd = find_objdump() objdump = ('%(command)s -M %(backend)s -b binary -m %(machine)s ' '--disassembler-options=intel-mnemonics ' @@ -69,12 +72,13 @@ f = open(tmpfile, 'wb') f.write(data) f.close() + backend = objdump_backend_option[backend_name] p = subprocess.Popen(objdump % { 'command': cmd, 'file': tmpfile, 'origin': originaddr, - 'backend': objdump_backend_option[backend_name], - 'machine': 'i386' if not backend_name.startswith('arm') else 'arm', + 'backend': backend, + 'machine': backend_to_machine.get(backend, backend), }, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = p.communicate() assert not p.returncode, ('Encountered an error running objdump: %s' % @@ -240,7 +244,7 @@ self.backend_name = None self.executable_name = None - def parse(self, f, textonly=True, truncate_addr=True): + def parse(self, f, textonly=True): for line in f: if line.startswith('BACKEND '): self.backend_name = line.split(' ')[1].strip() @@ -251,9 +255,7 @@ if len(pieces) == 3: continue # empty line baseaddr = long(pieces[1][1:], 16) - if truncate_addr: - baseaddr &= 0xFFFFFFFFL - elif baseaddr < 0: + if baseaddr < 0: baseaddr += (2 * sys.maxint + 2) offset = int(pieces[2][1:]) addr = baseaddr + offset @@ -273,9 +275,7 @@ assert pieces[1].startswith('@') assert pieces[2].startswith('+') baseaddr = long(pieces[1][1:], 16) - if truncate_addr: - baseaddr &= 0xFFFFFFFFL - elif baseaddr < 0: + if baseaddr < 0: baseaddr += (2 * sys.maxint + 2) offset = int(pieces[2][1:]) addr = baseaddr + offset From noreply at buildbot.pypy.org Tue Aug 13 19:03:57 2013 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 13 Aug 2013 19:03:57 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-static-barrier: Yay, finally found out the objdump option "-m i386:x86-64". Message-ID: <20130813170357.0EC611C32CB@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-static-barrier Changeset: r66126:c37feae39276 Date: 2013-08-13 19:02 +0200 http://bitbucket.org/pypy/pypy/changeset/c37feae39276/ Log: Yay, finally found out the objdump option "-m i386:x86-64". Solves the truncated addresses. (transplanted from 37b092c3f176633f1d5cc053d351c5f2b0684482) diff --git a/rpython/jit/backend/tool/viewcode.py b/rpython/jit/backend/tool/viewcode.py --- a/rpython/jit/backend/tool/viewcode.py +++ b/rpython/jit/backend/tool/viewcode.py @@ -58,6 +58,9 @@ 'arm': 'arm', 'arm_32': 'arm', } + backend_to_machine = { + 'x86-64': 'i386:x86-64', + } cmd = find_objdump() objdump = ('%(command)s -w -M %(backend)s -b binary -m %(machine)s ' '--disassembler-options=intel-mnemonics ' @@ -66,12 +69,13 @@ f = open(tmpfile, 'wb') f.write(data) f.close() + backend = objdump_backend_option[backend_name] p = subprocess.Popen(objdump % { 'command': cmd, 'file': tmpfile, 'origin': originaddr, - 'backend': objdump_backend_option[backend_name], - 'machine': 'i386' if not backend_name.startswith('arm') else 'arm', + 'backend': backend, + 'machine': backend_to_machine.get(backend, backend), }, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = p.communicate() assert not p.returncode, ('Encountered an error running objdump: %s' % @@ -239,7 +243,7 @@ self.backend_name = None self.executable_name = None - def parse(self, f, textonly=True, truncate_addr=True): + def parse(self, f, textonly=True): for line in f: line = line[line.find('#') + 1:].strip() if line.startswith('BACKEND '): @@ -251,9 +255,7 @@ if len(pieces) == 3: continue # empty line baseaddr = long(pieces[1][1:], 16) - if truncate_addr: - baseaddr &= 0xFFFFFFFFL - elif baseaddr < 0: + if baseaddr < 0: baseaddr += (2 * sys.maxint + 2) offset = int(pieces[2][1:]) addr = baseaddr + offset @@ -273,9 +275,7 @@ assert pieces[1].startswith('@') assert pieces[2].startswith('+') baseaddr = long(pieces[1][1:], 16) - if truncate_addr: - baseaddr &= 0xFFFFFFFFL - elif baseaddr < 0: + if baseaddr < 0: baseaddr += (2 * sys.maxint + 2) offset = int(pieces[2][1:]) addr = baseaddr + offset From noreply at buildbot.pypy.org Tue Aug 13 19:21:09 2013 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 13 Aug 2013 19:21:09 +0200 (CEST) Subject: [pypy-commit] pypy default: Simplification: we don't actually need the -M option (backend) Message-ID: <20130813172109.D48D01C0294@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r66127:619e39826584 Date: 2013-08-13 19:18 +0200 http://bitbucket.org/pypy/pypy/changeset/619e39826584/ Log: Simplification: we don't actually need the -M option (backend) if we specify the correct -m option (machine). diff --git a/rpython/jit/backend/tool/viewcode.py b/rpython/jit/backend/tool/viewcode.py --- a/rpython/jit/backend/tool/viewcode.py +++ b/rpython/jit/backend/tool/viewcode.py @@ -51,34 +51,29 @@ raise ObjdumpNotFound('(g)objdump was not found in PATH') def machine_code_dump(data, originaddr, backend_name, label_list=None): - objdump_backend_option = { + objdump_machine_option = { 'x86': 'i386', 'x86-without-sse2': 'i386', 'x86_32': 'i386', - 'x86_64': 'x86-64', - 'x86-64': 'x86-64', + 'x86_64': 'i386:x86-64', + 'x86-64': 'i386:x86-64', 'i386': 'i386', 'arm': 'arm', 'arm_32': 'arm', } - backend_to_machine = { - 'x86-64': 'i386:x86-64', - } cmd = find_objdump() - objdump = ('%(command)s -M %(backend)s -b binary -m %(machine)s ' + objdump = ('%(command)s -b binary -m %(machine)s ' '--disassembler-options=intel-mnemonics ' '--adjust-vma=%(origin)d -D %(file)s') # f = open(tmpfile, 'wb') f.write(data) f.close() - backend = objdump_backend_option[backend_name] p = subprocess.Popen(objdump % { 'command': cmd, 'file': tmpfile, 'origin': originaddr, - 'backend': backend, - 'machine': backend_to_machine.get(backend, backend), + 'machine': objdump_machine_option[backend_name], }, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = p.communicate() assert not p.returncode, ('Encountered an error running objdump: %s' % From noreply at buildbot.pypy.org Tue Aug 13 20:38:28 2013 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Tue, 13 Aug 2013 20:38:28 +0200 (CEST) Subject: [pypy-commit] pypy refactor-str-types: Move some app-level methods of unicode and bytearray from top-level to under the scope of the app-level classes. Message-ID: <20130813183828.11CCF1C0294@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: refactor-str-types Changeset: r66128:7b0bb92b8531 Date: 2013-08-13 20:29 +0200 http://bitbucket.org/pypy/pypy/changeset/7b0bb92b8531/ Log: Move some app-level methods of unicode and bytearray from top-level to under the scope of the app-level classes. diff --git a/pypy/objspace/std/bytearrayobject.py b/pypy/objspace/std/bytearrayobject.py --- a/pypy/objspace/std/bytearrayobject.py +++ b/pypy/objspace/std/bytearrayobject.py @@ -108,6 +108,57 @@ raise operationerrfmt(space.w_TypeError, msg, len(self.data)) return space.wrap(ord(self.data[0])) + @staticmethod + def descr_new(space, w_bytearraytype, __args__): + return new_bytearray(space, w_bytearraytype, []) + + def descr_reduce(self, space): + assert isinstance(self, W_BytearrayObject) + w_dict = self.getdict(space) + if w_dict is None: + w_dict = space.w_None + return space.newtuple([ + space.type(self), space.newtuple([ + space.wrap(''.join(self.data).decode('latin-1')), + space.wrap('latin-1')]), + w_dict]) + + @staticmethod + def descr_fromhex(space, w_bytearraytype, w_hexstring): + "bytearray.fromhex(string) -> bytearray\n" + "\n" + "Create a bytearray object from a string of hexadecimal numbers.\n" + "Spaces between two numbers are accepted.\n" + "Example: bytearray.fromhex('B9 01EF') -> bytearray(b'\\xb9\\x01\\xef')." + hexstring = space.str_w(w_hexstring) + hexstring = hexstring.lower() + data = [] + length = len(hexstring) + i = -2 + while True: + i += 2 + while i < length and hexstring[i] == ' ': + i += 1 + if i >= length: + break + if i+1 == length: + raise OperationError(space.w_ValueError, space.wrap( + "non-hexadecimal number found in fromhex() arg at position %d" % i)) + + top = _hex_digit_to_int(hexstring[i]) + if top == -1: + raise OperationError(space.w_ValueError, space.wrap( + "non-hexadecimal number found in fromhex() arg at position %d" % i)) + bot = _hex_digit_to_int(hexstring[i+1]) + if bot == -1: + raise OperationError(space.w_ValueError, space.wrap( + "non-hexadecimal number found in fromhex() arg at position %d" % (i+1,))) + data.append(chr(top*16 + bot)) + + # in CPython bytearray.fromhex is a staticmethod, so + # we ignore w_type and always return a bytearray + return new_bytearray(space, space.w_bytearray, data) + def descr_init(self, space, __args__): # this is on the silly side w_source, w_encoding, w_errors = __args__.parse_obj( @@ -294,10 +345,6 @@ return w_obj -def descr__new__(space, w_bytearraytype, __args__): - return new_bytearray(space,w_bytearraytype, []) - - def makebytearraydata_w(space, w_source): # String-like argument try: @@ -327,17 +374,6 @@ resizelist_hint(data, extended) return data -def descr_bytearray__reduce__(space, w_self): - assert isinstance(w_self, W_BytearrayObject) - w_dict = w_self.getdict(space) - if w_dict is None: - w_dict = space.w_None - return space.newtuple([ - space.type(w_self), space.newtuple([ - space.wrap(''.join(w_self.data).decode('latin-1')), - space.wrap('latin-1')]), - w_dict]) - def _hex_digit_to_int(d): val = ord(d) if 47 < val < 58: @@ -346,42 +382,6 @@ return val - 87 return -1 -def descr_fromhex(space, w_type, w_hexstring): - "bytearray.fromhex(string) -> bytearray\n" - "\n" - "Create a bytearray object from a string of hexadecimal numbers.\n" - "Spaces between two numbers are accepted.\n" - "Example: bytearray.fromhex('B9 01EF') -> bytearray(b'\\xb9\\x01\\xef')." - hexstring = space.str_w(w_hexstring) - hexstring = hexstring.lower() - data = [] - length = len(hexstring) - i = -2 - while True: - i += 2 - while i < length and hexstring[i] == ' ': - i += 1 - if i >= length: - break - if i+1 == length: - raise OperationError(space.w_ValueError, space.wrap( - "non-hexadecimal number found in fromhex() arg at position %d" % i)) - - top = _hex_digit_to_int(hexstring[i]) - if top == -1: - raise OperationError(space.w_ValueError, space.wrap( - "non-hexadecimal number found in fromhex() arg at position %d" % i)) - bot = _hex_digit_to_int(hexstring[i+1]) - if bot == -1: - raise OperationError(space.w_ValueError, space.wrap( - "non-hexadecimal number found in fromhex() arg at position %d" % (i+1,))) - data.append(chr(top*16 + bot)) - - # in CPython bytearray.fromhex is a staticmethod, so - # we ignore w_type and always return a bytearray - return new_bytearray(space, space.w_bytearray, data) - -# ____________________________________________________________ W_BytearrayObject.typedef = StdTypeDef( "bytearray", @@ -389,10 +389,10 @@ bytearray(sequence) -> bytearray initialized from sequence\'s items If the argument is a bytearray, the return value is the same object.''', - __new__ = interp2app(descr__new__), + __new__ = interp2app(W_BytearrayObject.descr_new), __hash__ = None, - __reduce__ = interp2app(descr_bytearray__reduce__), - fromhex = interp2app(descr_fromhex, as_classmethod=True), + __reduce__ = interp2app(W_BytearrayObject.descr_reduce), + fromhex = interp2app(W_BytearrayObject.descr_fromhex, as_classmethod=True), __repr__ = interp2app(W_BytearrayObject.descr_repr), __str__ = interp2app(W_BytearrayObject.descr_str), diff --git a/pypy/objspace/std/unicodeobject.py b/pypy/objspace/std/unicodeobject.py --- a/pypy/objspace/std/unicodeobject.py +++ b/pypy/objspace/std/unicodeobject.py @@ -140,6 +140,42 @@ def _newlist_unwrapped(self, space, lst): return space.newlist_unicode(lst) + @staticmethod + @unwrap_spec(w_string = WrappedDefault("")) + def descr_new(space, w_unicodetype, w_string, w_encoding=None, + w_errors=None): + # NB. the default value of w_obj is really a *wrapped* empty string: + # there is gateway magic at work + w_obj = w_string + + encoding, errors = _get_encoding_and_errors(space, w_encoding, w_errors) + # convoluted logic for the case when unicode subclass has a __unicode__ + # method, we need to call this method + is_precisely_unicode = space.is_w(space.type(w_obj), space.w_unicode) + if (is_precisely_unicode or + (space.isinstance_w(w_obj, space.w_unicode) and + space.findattr(w_obj, space.wrap('__unicode__')) is None)): + if encoding is not None or errors is not None: + raise OperationError(space.w_TypeError, space.wrap( + 'decoding Unicode is not supported')) + if (is_precisely_unicode and + space.is_w(w_unicodetype, space.w_unicode)): + return w_obj + w_value = w_obj + else: + if encoding is None and errors is None: + w_value = unicode_from_object(space, w_obj) + else: + w_value = unicode_from_encoded_object(space, w_obj, + encoding, errors) + if space.is_w(w_unicodetype, space.w_unicode): + return w_value + + assert isinstance(w_value, W_UnicodeObject) + w_newobj = space.allocate_instance(W_UnicodeObject, w_unicodetype) + W_UnicodeObject.__init__(w_newobj, w_value._value) + return w_newobj + def descr_repr(self, space): chars = self._value size = len(chars) @@ -375,44 +411,11 @@ return unicode_from_encoded_object(space, w_str, "ascii", "strict") - at unwrap_spec(w_string = WrappedDefault("")) -def descr_new_(space, w_unicodetype, w_string, w_encoding=None, w_errors=None): - # NB. the default value of w_obj is really a *wrapped* empty string: - # there is gateway magic at work - w_obj = w_string - - encoding, errors = _get_encoding_and_errors(space, w_encoding, w_errors) - # convoluted logic for the case when unicode subclass has a __unicode__ - # method, we need to call this method - is_precisely_unicode = space.is_w(space.type(w_obj), space.w_unicode) - if (is_precisely_unicode or - (space.isinstance_w(w_obj, space.w_unicode) and - space.findattr(w_obj, space.wrap('__unicode__')) is None)): - if encoding is not None or errors is not None: - raise OperationError(space.w_TypeError, - space.wrap('decoding Unicode is not supported')) - if is_precisely_unicode and space.is_w(w_unicodetype, space.w_unicode): - return w_obj - w_value = w_obj - else: - if encoding is None and errors is None: - w_value = unicode_from_object(space, w_obj) - else: - w_value = unicode_from_encoded_object(space, w_obj, - encoding, errors) - if space.is_w(w_unicodetype, space.w_unicode): - return w_value - - assert isinstance(w_value, W_UnicodeObject) - w_newobj = space.allocate_instance(W_UnicodeObject, w_unicodetype) - W_UnicodeObject.__init__(w_newobj, w_value._value) - return w_newobj - # ____________________________________________________________ W_UnicodeObject.typedef = StdTypeDef( "unicode", basestring_typedef, - __new__ = interp2app(descr_new_), + __new__ = interp2app(W_UnicodeObject.descr_new), __doc__ = '''unicode(string [, encoding[, errors]]) -> object Create a new Unicode object from the given encoded string. From noreply at buildbot.pypy.org Tue Aug 13 20:59:02 2013 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 13 Aug 2013 20:59:02 +0200 (CEST) Subject: [pypy-commit] pypy default: Hack at forkpty() to give it 2-level hooks, like a regular fork(). Message-ID: <20130813185902.6AD791C009F@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r66129:39bf27304f43 Date: 2013-08-13 19:54 +0200 http://bitbucket.org/pypy/pypy/changeset/39bf27304f43/ Log: Hack at forkpty() to give it 2-level hooks, like a regular fork(). No tests for the translation of forkpty()... diff --git a/pypy/module/posix/interp_posix.py b/pypy/module/posix/interp_posix.py --- a/pypy/module/posix/interp_posix.py +++ b/pypy/module/posix/interp_posix.py @@ -723,11 +723,16 @@ for hook in get_fork_hooks(where): hook(space) -def fork(space): +def _run_forking_function(space, kind): run_fork_hooks('before', space) - try: - pid = os.fork() + if kind == "F": + pid = os.fork() + master_fd = -1 + elif kind == "P": + pid, master_fd = os.forkpty() + else: + raise AssertionError except OSError, e: try: run_fork_hooks('parent', space) @@ -735,12 +740,14 @@ # Don't clobber the OSError if the fork failed pass raise wrap_oserror(space, e) - if pid == 0: run_fork_hooks('child', space) else: run_fork_hooks('parent', space) + return pid, master_fd +def fork(space): + pid, irrelevant = _run_forking_function(space, "F") return space.wrap(pid) def openpty(space): @@ -752,10 +759,7 @@ return space.newtuple([space.wrap(master_fd), space.wrap(slave_fd)]) def forkpty(space): - try: - pid, master_fd = os.forkpty() - except OSError, e: - raise wrap_oserror(space, e) + pid, master_fd = _run_forking_function(space, "P") return space.newtuple([space.wrap(pid), space.wrap(master_fd)]) diff --git a/rpython/rtyper/module/ll_os.py b/rpython/rtyper/module/ll_os.py --- a/rpython/rtyper/module/ll_os.py +++ b/rpython/rtyper/module/ll_os.py @@ -1574,6 +1574,7 @@ _nowrapper = True) def fork_llimpl(): + # NB. keep forkpty() up-to-date, too opaqueaddr = rthread.gc_thread_before_fork() childpid = rffi.cast(lltype.Signed, os_fork()) rthread.gc_thread_after_fork(childpid, opaqueaddr) @@ -1609,6 +1610,7 @@ @registering_if(os, 'forkpty') def register_os_forkpty(self): + from rpython.rlib import rthread os_forkpty = self.llexternal( 'forkpty', [rffi.INTP, rffi.VOIDP, rffi.VOIDP, rffi.VOIDP], @@ -1616,7 +1618,10 @@ compilation_info=ExternalCompilationInfo(libraries=['util'])) def forkpty_llimpl(): master_p = lltype.malloc(rffi.INTP.TO, 1, flavor='raw') - childpid = os_forkpty(master_p, None, None, None) + opaqueaddr = rthread.gc_thread_before_fork() + childpid = rffi.cast(lltype.Signed, + os_forkpty(master_p, None, None, None)) + rthread.gc_thread_after_fork(childpid, opaqueaddr) master_fd = master_p[0] lltype.free(master_p, flavor='raw') if childpid == -1: From noreply at buildbot.pypy.org Tue Aug 13 20:59:04 2013 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 13 Aug 2013 20:59:04 +0200 (CEST) Subject: [pypy-commit] pypy default: Initialize master_fd to -1 (like CPython) which is the value that will Message-ID: <20130813185904.94ACE1C009F@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r66130:9445018c9ec4 Date: 2013-08-13 20:03 +0200 http://bitbucket.org/pypy/pypy/changeset/9445018c9ec4/ Log: Initialize master_fd to -1 (like CPython) which is the value that will eventually be seen by the child. diff --git a/rpython/rtyper/module/ll_os.py b/rpython/rtyper/module/ll_os.py --- a/rpython/rtyper/module/ll_os.py +++ b/rpython/rtyper/module/ll_os.py @@ -1618,6 +1618,7 @@ compilation_info=ExternalCompilationInfo(libraries=['util'])) def forkpty_llimpl(): master_p = lltype.malloc(rffi.INTP.TO, 1, flavor='raw') + master_p[0] = rffi.cast(rffi.INT, -1) opaqueaddr = rthread.gc_thread_before_fork() childpid = rffi.cast(lltype.Signed, os_forkpty(master_p, None, None, None)) From noreply at buildbot.pypy.org Tue Aug 13 21:00:26 2013 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 13 Aug 2013 21:00:26 +0200 (CEST) Subject: [pypy-commit] pypy release-2.1.x: Hack at forkpty() to give it 2-level hooks, like a regular fork(). Message-ID: <20130813190026.438F31C009F@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: release-2.1.x Changeset: r66131:4b0d83fde581 Date: 2013-08-13 19:54 +0200 http://bitbucket.org/pypy/pypy/changeset/4b0d83fde581/ Log: Hack at forkpty() to give it 2-level hooks, like a regular fork(). No tests for the translation of forkpty()... (transplanted from 39bf27304f43aa6fcaed5bafc3a8a5b178b4cef0) diff --git a/pypy/module/posix/interp_posix.py b/pypy/module/posix/interp_posix.py --- a/pypy/module/posix/interp_posix.py +++ b/pypy/module/posix/interp_posix.py @@ -733,11 +733,16 @@ for hook in get_fork_hooks(where): hook(space) -def fork(space): +def _run_forking_function(space, kind): run_fork_hooks('before', space) - try: - pid = os.fork() + if kind == "F": + pid = os.fork() + master_fd = -1 + elif kind == "P": + pid, master_fd = os.forkpty() + else: + raise AssertionError except OSError, e: try: run_fork_hooks('parent', space) @@ -745,12 +750,14 @@ # Don't clobber the OSError if the fork failed pass raise wrap_oserror(space, e) - if pid == 0: run_fork_hooks('child', space) else: run_fork_hooks('parent', space) + return pid, master_fd +def fork(space): + pid, irrelevant = _run_forking_function(space, "F") return space.wrap(pid) def openpty(space): @@ -762,10 +769,7 @@ return space.newtuple([space.wrap(master_fd), space.wrap(slave_fd)]) def forkpty(space): - try: - pid, master_fd = os.forkpty() - except OSError, e: - raise wrap_oserror(space, e) + pid, master_fd = _run_forking_function(space, "P") return space.newtuple([space.wrap(pid), space.wrap(master_fd)]) diff --git a/rpython/rtyper/module/ll_os.py b/rpython/rtyper/module/ll_os.py --- a/rpython/rtyper/module/ll_os.py +++ b/rpython/rtyper/module/ll_os.py @@ -1599,6 +1599,7 @@ _nowrapper = True) def fork_llimpl(): + # NB. keep forkpty() up-to-date, too opaqueaddr = rthread.gc_thread_before_fork() childpid = rffi.cast(lltype.Signed, os_fork()) rthread.gc_thread_after_fork(childpid, opaqueaddr) @@ -1634,6 +1635,7 @@ @registering_if(os, 'forkpty') def register_os_forkpty(self): + from rpython.rlib import rthread os_forkpty = self.llexternal( 'forkpty', [rffi.INTP, rffi.VOIDP, rffi.VOIDP, rffi.VOIDP], @@ -1641,7 +1643,10 @@ compilation_info=ExternalCompilationInfo(libraries=['util'])) def forkpty_llimpl(): master_p = lltype.malloc(rffi.INTP.TO, 1, flavor='raw') - childpid = os_forkpty(master_p, None, None, None) + opaqueaddr = rthread.gc_thread_before_fork() + childpid = rffi.cast(lltype.Signed, + os_forkpty(master_p, None, None, None)) + rthread.gc_thread_after_fork(childpid, opaqueaddr) master_fd = master_p[0] lltype.free(master_p, flavor='raw') if childpid == -1: From noreply at buildbot.pypy.org Tue Aug 13 21:00:27 2013 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 13 Aug 2013 21:00:27 +0200 (CEST) Subject: [pypy-commit] pypy release-2.1.x: Initialize master_fd to -1 (like CPython) which is the value that will Message-ID: <20130813190027.81FCD1C009F@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: release-2.1.x Changeset: r66132:1074bd79c42c Date: 2013-08-13 20:03 +0200 http://bitbucket.org/pypy/pypy/changeset/1074bd79c42c/ Log: Initialize master_fd to -1 (like CPython) which is the value that will eventually be seen by the child. (transplanted from 9445018c9ec419b5fe4ffa3501f1abba9e819ddc) diff --git a/rpython/rtyper/module/ll_os.py b/rpython/rtyper/module/ll_os.py --- a/rpython/rtyper/module/ll_os.py +++ b/rpython/rtyper/module/ll_os.py @@ -1643,6 +1643,7 @@ compilation_info=ExternalCompilationInfo(libraries=['util'])) def forkpty_llimpl(): master_p = lltype.malloc(rffi.INTP.TO, 1, flavor='raw') + master_p[0] = rffi.cast(rffi.INT, -1) opaqueaddr = rthread.gc_thread_before_fork() childpid = rffi.cast(lltype.Signed, os_forkpty(master_p, None, None, None)) From noreply at buildbot.pypy.org Tue Aug 13 22:15:26 2013 From: noreply at buildbot.pypy.org (taavi_burns) Date: Tue, 13 Aug 2013 22:15:26 +0200 (CEST) Subject: [pypy-commit] pypy default: Adds ones_like and zeros_like ufuncs. Message-ID: <20130813201526.273151C009F@cobra.cs.uni-duesseldorf.de> Author: Taavi Burns Branch: Changeset: r66133:78d2ef3b1442 Date: 2013-08-13 15:51 -0400 http://bitbucket.org/pypy/pypy/changeset/78d2ef3b1442/ Log: Adds ones_like and zeros_like ufuncs. diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py --- a/pypy/module/micronumpy/__init__.py +++ b/pypy/module/micronumpy/__init__.py @@ -176,6 +176,8 @@ ('logaddexp2', 'logaddexp2'), ('real', 'real'), ('imag', 'imag'), + ('ones_like', 'ones_like'), + ('zeros_like', 'zeros_like'), ]: interpleveldefs[exposed] = "interp_ufuncs.get(space).%s" % impl diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -676,6 +676,8 @@ "allow_complex": False}), ("logaddexp2", "logaddexp2", 2, {"promote_to_float": True, "allow_complex": False}), + ("ones_like", "ones_like", 1), + ("zeros_like", "zeros_like", 1), ]: self.add_ufunc(space, *ufunc_def) diff --git a/pypy/module/micronumpy/test/test_ufuncs.py b/pypy/module/micronumpy/test/test_ufuncs.py --- a/pypy/module/micronumpy/test/test_ufuncs.py +++ b/pypy/module/micronumpy/test/test_ufuncs.py @@ -955,4 +955,14 @@ assert logaddexp2(float('inf'), float('-inf')) == float('inf') assert logaddexp2(float('inf'), float('inf')) == float('inf') + def test_ones_like(self): + from numpypy import array, complex, ones_like + assert ones_like(2) == array(1) + assert ones_like(complex(2)) == array(complex(1)) + + def test_zeros_like(self): + from numpypy import array, complex, zeros_like + + assert zeros_like(2) == array(0) + assert zeros_like(complex(2)) == array(complex(0)) diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -314,6 +314,15 @@ else: return v + @simple_unary_op + def ones_like(self, v): + return 1 + + @simple_unary_op + def zeros_like(self, v): + return 0 + + class NonNativePrimitive(Primitive): _mixin_ = True @@ -1609,6 +1618,15 @@ except ValueError: return rfloat.NAN, rfloat.NAN + @complex_unary_op + def ones_like(self, v): + return 1, 0 + + @complex_unary_op + def zeros_like(self, v): + return 0, 0 + + class Complex64(ComplexFloating, BaseType): _attrs_ = () From noreply at buildbot.pypy.org Tue Aug 13 22:46:28 2013 From: noreply at buildbot.pypy.org (taavi_burns) Date: Tue, 13 Aug 2013 22:46:28 +0200 (CEST) Subject: [pypy-commit] pypy default: Adds a few more tests of types for zeros_like and ones_like. Message-ID: <20130813204628.D50E01C32CC@cobra.cs.uni-duesseldorf.de> Author: Taavi Burns Branch: Changeset: r66134:64bcad26b09c Date: 2013-08-13 16:43 -0400 http://bitbucket.org/pypy/pypy/changeset/64bcad26b09c/ Log: Adds a few more tests of types for zeros_like and ones_like. diff --git a/pypy/module/micronumpy/test/test_ufuncs.py b/pypy/module/micronumpy/test/test_ufuncs.py --- a/pypy/module/micronumpy/test/test_ufuncs.py +++ b/pypy/module/micronumpy/test/test_ufuncs.py @@ -956,13 +956,17 @@ assert logaddexp2(float('inf'), float('inf')) == float('inf') def test_ones_like(self): - from numpypy import array, complex, ones_like + from numpypy import array, ones_like + assert ones_like(False) == array(True) assert ones_like(2) == array(1) + assert ones_like(2.) == array(1.) assert ones_like(complex(2)) == array(complex(1)) def test_zeros_like(self): - from numpypy import array, complex, zeros_like + from numpypy import array, zeros_like + assert zeros_like(True) == array(False) assert zeros_like(2) == array(0) + assert zeros_like(2.) == array(0.) assert zeros_like(complex(2)) == array(complex(0)) From noreply at buildbot.pypy.org Wed Aug 14 02:22:35 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 14 Aug 2013 02:22:35 +0200 (CEST) Subject: [pypy-commit] pypy default: prefer print as a statement and one more debug print Message-ID: <20130814002235.51E1B1C011D@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: Changeset: r66135:ee703e7a6a8a Date: 2013-08-13 17:18 -0700 http://bitbucket.org/pypy/pypy/changeset/ee703e7a6a8a/ Log: prefer print as a statement and one more debug print diff --git a/pypy/module/__pypy__/test/test_signal.py b/pypy/module/__pypy__/test/test_signal.py --- a/pypy/module/__pypy__/test/test_signal.py +++ b/pypy/module/__pypy__/test/test_signal.py @@ -28,16 +28,17 @@ import __pypy__, thread, signal, time, sys def subthread(): + print('subthread started') try: with __pypy__.thread.signals_enabled: thread.interrupt_main() for i in range(10): - print 'x' + print('x') time.sleep(0.1) except BaseException, e: interrupted.append(e) finally: - print 'subthread stops, interrupted=%r' % (interrupted,) + print('subthread stops, interrupted=%r' % (interrupted,)) done.append(None) # This is normally called by app_main.py @@ -53,13 +54,13 @@ try: done = [] interrupted = [] - print '--- start ---' + print('--- start ---') thread.start_new_thread(subthread, ()) for j in range(10): if len(done): break - print '.' + print('.') time.sleep(0.1) - print 'main thread loop done' + print('main thread loop done') assert len(done) == 1 assert len(interrupted) == 1 assert 'KeyboardInterrupt' in interrupted[0].__class__.__name__ @@ -80,7 +81,7 @@ def threadfunction(): pid = fork() if pid == 0: - print 'in child' + print('in child') # signal() only works from the 'main' thread signal.signal(signal.SIGUSR1, signal.SIG_IGN) os._exit(42) From noreply at buildbot.pypy.org Wed Aug 14 02:22:37 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 14 Aug 2013 02:22:37 +0200 (CEST) Subject: [pypy-commit] pypy py3k: merge default Message-ID: <20130814002237.5427B1C011D@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r66136:2822f8aa1a49 Date: 2013-08-13 17:20 -0700 http://bitbucket.org/pypy/pypy/changeset/2822f8aa1a49/ Log: merge default diff --git a/dotviewer/graphparse.py b/dotviewer/graphparse.py --- a/dotviewer/graphparse.py +++ b/dotviewer/graphparse.py @@ -152,7 +152,8 @@ try: plaincontent = dot2plain_graphviz(content, contenttype) except PlainParseError, e: - print e - # failed, retry via codespeak - plaincontent = dot2plain_codespeak(content, contenttype) + raise + ##print e + ### failed, retry via codespeak + ##plaincontent = dot2plain_codespeak(content, contenttype) return list(parse_plain(graph_id, plaincontent, links, fixedfont)) diff --git a/pypy/module/__pypy__/test/test_signal.py b/pypy/module/__pypy__/test/test_signal.py --- a/pypy/module/__pypy__/test/test_signal.py +++ b/pypy/module/__pypy__/test/test_signal.py @@ -28,6 +28,7 @@ import __pypy__, _thread, signal, time, sys def subthread(): + print('subthread started') try: with __pypy__.thread.signals_enabled: _thread.interrupt_main() diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py --- a/pypy/module/micronumpy/__init__.py +++ b/pypy/module/micronumpy/__init__.py @@ -140,6 +140,7 @@ ("deg2rad", "radians"), ("rad2deg", "degrees"), ("reciprocal", "reciprocal"), + ("rint", "rint"), ("sign", "sign"), ("signbit", "signbit"), ("sin", "sin"), @@ -175,6 +176,8 @@ ('logaddexp2', 'logaddexp2'), ('real', 'real'), ('imag', 'imag'), + ('ones_like', 'ones_like'), + ('zeros_like', 'zeros_like'), ]: interpleveldefs[exposed] = "interp_ufuncs.get(space).%s" % impl diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -615,6 +615,7 @@ ("positive", "pos", 1), ("negative", "neg", 1), ("absolute", "abs", 1, {"complex_to_float": True}), + ("rint", "rint", 1), ("sign", "sign", 1, {"promote_bools": True}), ("signbit", "signbit", 1, {"bool_result": True, "allow_complex": False}), @@ -670,6 +671,8 @@ "allow_complex": False}), ("logaddexp2", "logaddexp2", 2, {"promote_to_float": True, "allow_complex": False}), + ("ones_like", "ones_like", 1), + ("zeros_like", "zeros_like", 1), ]: self.add_ufunc(space, *ufunc_def) diff --git a/pypy/module/micronumpy/test/test_ufuncs.py b/pypy/module/micronumpy/test/test_ufuncs.py --- a/pypy/module/micronumpy/test/test_ufuncs.py +++ b/pypy/module/micronumpy/test/test_ufuncs.py @@ -255,6 +255,22 @@ for i in range(3): assert c[i] == a[i] * b[i] + def test_rint(self): + from numpypy import array, complex, rint, isnan + + nnan, nan, inf, ninf = float('-nan'), float('nan'), float('inf'), float('-inf') + + reference = array([ninf, -2., -1., -0., 0., 0., 0., 1., 2., inf]) + a = array([ninf, -1.5, -1., -0.5, -0., 0., 0.5, 1., 1.5, inf]) + b = rint(a) + for i in range(len(a)): + assert b[i] == reference[i] + assert isnan(rint(nan)) + assert isnan(rint(nnan)) + + assert rint(complex(inf, 1.5)) == complex(inf, 2.) + assert rint(complex(0.5, inf)) == complex(0., inf) + def test_sign(self): from numpypy import array, sign, dtype @@ -939,4 +955,18 @@ assert logaddexp2(float('inf'), float('-inf')) == float('inf') assert logaddexp2(float('inf'), float('inf')) == float('inf') + def test_ones_like(self): + from numpypy import array, ones_like + assert ones_like(False) == array(True) + assert ones_like(2) == array(1) + assert ones_like(2.) == array(1.) + assert ones_like(complex(2)) == array(complex(1)) + + def test_zeros_like(self): + from numpypy import array, zeros_like + + assert zeros_like(True) == array(False) + assert zeros_like(2) == array(0) + assert zeros_like(2.) == array(0.) + assert zeros_like(complex(2)) == array(complex(0)) diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -307,6 +307,22 @@ def min(self, v1, v2): return min(v1, v2) + @simple_unary_op + def rint(self, v): + if isfinite(v): + return rfloat.round_double(v, 0, half_even=True) + else: + return v + + @simple_unary_op + def ones_like(self, v): + return 1 + + @simple_unary_op + def zeros_like(self, v): + return 0 + + class NonNativePrimitive(Primitive): _mixin_ = True @@ -1392,11 +1408,14 @@ def round(self, v, decimals=0): ans = list(self.for_computation(self.unbox(v))) if isfinite(ans[0]): - ans[0] = rfloat.round_double(ans[0], decimals, half_even=True) + ans[0] = rfloat.round_double(ans[0], decimals, half_even=True) if isfinite(ans[1]): - ans[1] = rfloat.round_double(ans[1], decimals, half_even=True) + ans[1] = rfloat.round_double(ans[1], decimals, half_even=True) return self.box_complex(ans[0], ans[1]) + def rint(self, v): + return self.round(v) + # No floor, ceil, trunc in numpy for complex #@simple_unary_op #def floor(self, v): @@ -1599,6 +1618,15 @@ except ValueError: return rfloat.NAN, rfloat.NAN + @complex_unary_op + def ones_like(self, v): + return 1, 0 + + @complex_unary_op + def zeros_like(self, v): + return 0, 0 + + class Complex64(ComplexFloating, BaseType): _attrs_ = () diff --git a/pypy/module/posix/interp_posix.py b/pypy/module/posix/interp_posix.py --- a/pypy/module/posix/interp_posix.py +++ b/pypy/module/posix/interp_posix.py @@ -725,11 +725,16 @@ for hook in get_fork_hooks(where): hook(space) -def fork(space): +def _run_forking_function(space, kind): run_fork_hooks('before', space) - try: - pid = os.fork() + if kind == "F": + pid = os.fork() + master_fd = -1 + elif kind == "P": + pid, master_fd = os.forkpty() + else: + raise AssertionError except OSError, e: try: run_fork_hooks('parent', space) @@ -737,12 +742,14 @@ # Don't clobber the OSError if the fork failed pass raise wrap_oserror(space, e) - if pid == 0: run_fork_hooks('child', space) else: run_fork_hooks('parent', space) + return pid, master_fd +def fork(space): + pid, irrelevant = _run_forking_function(space, "F") return space.wrap(pid) def openpty(space): @@ -754,10 +761,7 @@ return space.newtuple([space.wrap(master_fd), space.wrap(slave_fd)]) def forkpty(space): - try: - pid, master_fd = os.forkpty() - except OSError, e: - raise wrap_oserror(space, e) + pid, master_fd = _run_forking_function(space, "P") return space.newtuple([space.wrap(pid), space.wrap(master_fd)]) diff --git a/pypy/tool/jitlogparser/parser.py b/pypy/tool/jitlogparser/parser.py --- a/pypy/tool/jitlogparser/parser.py +++ b/pypy/tool/jitlogparser/parser.py @@ -85,7 +85,7 @@ continue e = elem.split("\t") adr = e[0] - v = " ".join(e[2:]) + v = elem # --- more compactly: " ".join(e[2:]) if not start: start = int(adr.strip(":"), 16) ofs = int(adr.strip(":"), 16) - start @@ -379,15 +379,16 @@ name = entry[:entry.find('(') - 1].lower() addr = int(m.group(1), 16) addrs.setdefault(addr, []).append(name) + from rpython.jit.backend.tool.viewcode import World + world = World() + for entry in extract_category(log, 'jit-backend-dump'): + world.parse(entry.splitlines(True), truncate_addr=False) dumps = {} - for entry in extract_category(log, 'jit-backend-dump'): - backend, _, dump, _ = entry.split("\n") - _, addr, _, data = re.split(" +", dump) - backend_name = backend.split(" ")[1] - addr = int(addr[1:], 16) - if addr in addrs and addrs[addr]: - name = addrs[addr].pop(0) # they should come in order - dumps[name] = (backend_name, addr, data) + for r in world.ranges: + if r.addr in addrs and addrs[r.addr]: + name = addrs[r.addr].pop(0) # they should come in order + data = r.data.encode('hex') # backward compatibility + dumps[name] = (world.backend_name, r.addr, data) loops = [] for entry in extract_category(log, 'jit-log-opt'): parser = ParserCls(entry, None, {}, 'lltype', None, diff --git a/rpython/jit/backend/arm/assembler.py b/rpython/jit/backend/arm/assembler.py --- a/rpython/jit/backend/arm/assembler.py +++ b/rpython/jit/backend/arm/assembler.py @@ -62,20 +62,21 @@ self.current_clt = looptoken.compiled_loop_token self.mc = InstrBuilder(self.cpu.cpuinfo.arch_version) self.pending_guards = [] - assert self.datablockwrapper is None + #assert self.datablockwrapper is None --- but obscure case + # possible, e.g. getting MemoryError and continuing allblocks = self.get_asmmemmgr_blocks(looptoken) self.datablockwrapper = MachineDataBlockWrapper(self.cpu.asmmemmgr, allblocks) self.mc.datablockwrapper = self.datablockwrapper self.target_tokens_currently_compiling = {} self.frame_depth_to_patch = [] + self._finish_gcmap = lltype.nullptr(jitframe.GCMAP) def teardown(self): self.current_clt = None self._regalloc = None self.mc = None self.pending_guards = None - assert self.datablockwrapper is None def setup_failure_recovery(self): self.failure_recovery_code = [0, 0, 0, 0] @@ -889,7 +890,7 @@ relative_offset = tok.pos_recovery_stub - tok.offset guard_pos = block_start + tok.offset if not tok.is_guard_not_invalidated: - # patch the guard jumpt to the stub + # patch the guard jump to the stub # overwrite the generate NOP with a B_offs to the pos of the # stub mc = InstrBuilder(self.cpu.cpuinfo.arch_version) diff --git a/rpython/jit/backend/arm/opassembler.py b/rpython/jit/backend/arm/opassembler.py --- a/rpython/jit/backend/arm/opassembler.py +++ b/rpython/jit/backend/arm/opassembler.py @@ -33,6 +33,7 @@ from rpython.rtyper.lltypesystem import rstr, rffi, lltype from rpython.rtyper.annlowlevel import cast_instance_to_gcref from rpython.jit.backend.arm import callbuilder +from rpython.rlib.rarithmetic import r_uint class ArmGuardToken(GuardToken): @@ -190,7 +191,7 @@ self.mc.RSB_ri(resloc.value, l0.value, imm=0) return fcond - def _emit_guard(self, op, arglocs, fcond, save_exc, + def build_guard_token(self, op, frame_depth, arglocs, offset, fcond, save_exc, is_guard_not_invalidated=False, is_guard_not_forced=False): assert isinstance(save_exc, bool) @@ -198,7 +199,27 @@ descr = op.getdescr() assert isinstance(descr, AbstractFailDescr) + gcmap = allocate_gcmap(self, frame_depth, JITFRAME_FIXED_SIZE) + token = ArmGuardToken(self.cpu, gcmap, + descr, + failargs=op.getfailargs(), + fail_locs=arglocs, + offset=offset, + exc=save_exc, + frame_depth=frame_depth, + is_guard_not_invalidated=is_guard_not_invalidated, + is_guard_not_forced=is_guard_not_forced, + fcond=fcond) + return token + + def _emit_guard(self, op, arglocs, fcond, save_exc, + is_guard_not_invalidated=False, + is_guard_not_forced=False): pos = self.mc.currpos() + token = self.build_guard_token(op, arglocs[0].value, arglocs[1:], pos, fcond, save_exc, + is_guard_not_invalidated, + is_guard_not_forced) + self.pending_guards.append(token) # For all guards that are not GUARD_NOT_INVALIDATED we emit a # breakpoint to ensure the location is patched correctly. In the case # of GUARD_NOT_INVALIDATED we use just a NOP, because it is only @@ -207,17 +228,6 @@ self.mc.NOP() else: self.mc.BKPT() - gcmap = allocate_gcmap(self, arglocs[0].value, JITFRAME_FIXED_SIZE) - self.pending_guards.append(ArmGuardToken(self.cpu, gcmap, - descr, - failargs=op.getfailargs(), - fail_locs=arglocs[1:], - offset=pos, - exc=save_exc, - frame_depth=arglocs[0].value, - is_guard_not_invalidated=is_guard_not_invalidated, - is_guard_not_forced=is_guard_not_forced, - fcond=fcond)) return c.AL def _emit_guard_overflow(self, guard, failargs, fcond): @@ -351,7 +361,11 @@ # XXX self.mov(fail_descr_loc, RawStackLoc(ofs)) self.store_reg(self.mc, r.ip, r.fp, ofs, helper=r.lr) if op.numargs() > 0 and op.getarg(0).type == REF: - gcmap = self.gcmap_for_finish + if self._finish_gcmap: + self._finish_gcmap[0] |= r_uint(0) # r0 + gcmap = self._finish_gcmap + else: + gcmap = self.gcmap_for_finish self.push_gcmap(self.mc, gcmap, store=True) else: # note that the 0 here is redundant, but I would rather @@ -912,6 +926,14 @@ return fcond + def store_force_descr(self, op, fail_locs, frame_depth): + pos = self.mc.currpos() + guard_token = self.build_guard_token(op, frame_depth, fail_locs, pos, c.AL, True, False, True) + #self.pending_guards.append(guard_token) + self._finish_gcmap = guard_token.gcmap + self._store_force_index(op) + self.store_info_on_descr(pos, guard_token) + def emit_op_force_token(self, op, arglocs, regalloc, fcond): # XXX kill me res_loc = arglocs[0] diff --git a/rpython/jit/backend/arm/regalloc.py b/rpython/jit/backend/arm/regalloc.py --- a/rpython/jit/backend/arm/regalloc.py +++ b/rpython/jit/backend/arm/regalloc.py @@ -1194,6 +1194,12 @@ # self._compute_hint_frame_locations_from_descr(descr) return [] + def prepare_op_guard_not_forced_2(self, op, fcond): + self.rm.before_call(op.getfailargs(), save_all_regs=True) + fail_locs = self._prepare_guard(op) + self.assembler.store_force_descr(op, fail_locs[1:], fail_locs[0].value) + self.possibly_free_vars(op.getfailargs()) + def prepare_guard_call_may_force(self, op, guard_op, fcond): args = self._prepare_call(op, save_all_regs=True) return self._prepare_guard(guard_op, args) diff --git a/rpython/jit/backend/tool/viewcode.py b/rpython/jit/backend/tool/viewcode.py --- a/rpython/jit/backend/tool/viewcode.py +++ b/rpython/jit/backend/tool/viewcode.py @@ -51,18 +51,18 @@ raise ObjdumpNotFound('(g)objdump was not found in PATH') def machine_code_dump(data, originaddr, backend_name, label_list=None): - objdump_backend_option = { + objdump_machine_option = { 'x86': 'i386', 'x86-without-sse2': 'i386', 'x86_32': 'i386', - 'x86_64': 'x86-64', - 'x86-64': 'x86-64', + 'x86_64': 'i386:x86-64', + 'x86-64': 'i386:x86-64', 'i386': 'i386', 'arm': 'arm', 'arm_32': 'arm', } cmd = find_objdump() - objdump = ('%(command)s -M %(backend)s -b binary -m %(machine)s ' + objdump = ('%(command)s -b binary -m %(machine)s ' '--disassembler-options=intel-mnemonics ' '--adjust-vma=%(origin)d -D %(file)s') # @@ -73,8 +73,7 @@ 'command': cmd, 'file': tmpfile, 'origin': originaddr, - 'backend': objdump_backend_option[backend_name], - 'machine': 'i386' if not backend_name.startswith('arm') else 'arm', + 'machine': objdump_machine_option[backend_name], }, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = p.communicate() assert not p.returncode, ('Encountered an error running objdump: %s' % @@ -250,7 +249,9 @@ assert pieces[2].startswith('+') if len(pieces) == 3: continue # empty line - baseaddr = long(pieces[1][1:], 16) & 0xFFFFFFFFL + baseaddr = long(pieces[1][1:], 16) + if baseaddr < 0: + baseaddr += (2 * sys.maxint + 2) offset = int(pieces[2][1:]) addr = baseaddr + offset data = pieces[3].replace(':', '').decode('hex') @@ -268,14 +269,19 @@ pieces = line.split(None, 3) assert pieces[1].startswith('@') assert pieces[2].startswith('+') - baseaddr = long(pieces[1][1:], 16) & 0xFFFFFFFFL + baseaddr = long(pieces[1][1:], 16) + if baseaddr < 0: + baseaddr += (2 * sys.maxint + 2) offset = int(pieces[2][1:]) addr = baseaddr + offset self.logentries[addr] = pieces[3] elif line.startswith('SYS_EXECUTABLE '): filename = line[len('SYS_EXECUTABLE '):].strip() if filename != self.executable_name and filename != '??': - self.symbols.update(load_symbols(filename)) + try: + self.symbols.update(load_symbols(filename)) + except Exception as e: + print e self.executable_name = filename def find_cross_references(self): diff --git a/rpython/jit/metainterp/typesystem.py b/rpython/jit/metainterp/typesystem.py --- a/rpython/jit/metainterp/typesystem.py +++ b/rpython/jit/metainterp/typesystem.py @@ -52,7 +52,10 @@ return FUNCTYPE, FUNCPTRTYPE def get_superclass(self, TYPE): - return lltype.Ptr(TYPE.TO._first_struct()[1]) + SUPER = TYPE.TO._first_struct()[1] + if SUPER is None: + return None + return lltype.Ptr(SUPER) def cast_to_instance_maybe(self, TYPE, instance): return lltype.cast_pointer(TYPE, instance) diff --git a/rpython/jit/metainterp/virtualizable.py b/rpython/jit/metainterp/virtualizable.py --- a/rpython/jit/metainterp/virtualizable.py +++ b/rpython/jit/metainterp/virtualizable.py @@ -17,8 +17,14 @@ self.cpu = cpu self.BoxArray = cpu.ts.BoxRef # + VTYPEPTR1 = VTYPEPTR while 'virtualizable_accessor' not in deref(VTYPEPTR)._hints: VTYPEPTR = cpu.ts.get_superclass(VTYPEPTR) + assert VTYPEPTR is not None, ( + "%r is listed in the jit driver's 'virtualizables', " + "but that class doesn't have a '_virtualizable_' attribute " + "(if it has _virtualizable2_, rename it to _virtualizable_)" + % (VTYPEPTR1,)) self.VTYPEPTR = VTYPEPTR self.VTYPE = VTYPE = deref(VTYPEPTR) self.vable_token_descr = cpu.fielddescrof(VTYPE, 'vable_token') diff --git a/rpython/memory/gc/env.py b/rpython/memory/gc/env.py --- a/rpython/memory/gc/env.py +++ b/rpython/memory/gc/env.py @@ -131,13 +131,8 @@ # ---------- Linux2 ---------- -try: - ARCH = os.uname()[4] # machine -except (OSError, AttributeError): - ARCH = '' - def get_L2cache_linux2(): - arch = ARCH # precomputed; the call to os.uname() is not translated + arch = os.uname()[4] # machine if arch.endswith('86') or arch == 'x86_64': return get_L2cache_linux2_cpuinfo() if arch in ('alpha', 'ppc', 'ppc64'): diff --git a/rpython/memory/gc/minimark.py b/rpython/memory/gc/minimark.py --- a/rpython/memory/gc/minimark.py +++ b/rpython/memory/gc/minimark.py @@ -2046,6 +2046,8 @@ # The code relies on the fact that no weakref can be an old object # weakly pointing to a young object. Indeed, weakrefs are immutable # so they cannot point to an object that was created after it. + # Thanks to this, during a minor collection, we don't have to fix + # or clear the address stored in old weakrefs. def invalidate_young_weakrefs(self): """Called during a nursery collection.""" # walk over the list of objects that contain weakrefs and are in the diff --git a/rpython/rlib/streamio.py b/rpython/rlib/streamio.py --- a/rpython/rlib/streamio.py +++ b/rpython/rlib/streamio.py @@ -558,19 +558,22 @@ if -self.pos <= difpos <= currentsize: self.pos += difpos return - self.buf = "" - self.pos = 0 if whence == 1: offset -= currentsize try: self.do_seek(offset, whence) except MyNotImplementedError: + self.buf = "" + self.pos = 0 if difpos < 0: raise if whence == 0: offset = difpos - currentsize intoffset = offset2int(offset) self.read(intoffset) + else: + self.buf = "" + self.pos = 0 return if whence == 2: try: diff --git a/rpython/rtyper/module/ll_os.py b/rpython/rtyper/module/ll_os.py --- a/rpython/rtyper/module/ll_os.py +++ b/rpython/rtyper/module/ll_os.py @@ -1574,6 +1574,7 @@ _nowrapper = True) def fork_llimpl(): + # NB. keep forkpty() up-to-date, too opaqueaddr = rthread.gc_thread_before_fork() childpid = rffi.cast(lltype.Signed, os_fork()) rthread.gc_thread_after_fork(childpid, opaqueaddr) @@ -1609,6 +1610,7 @@ @registering_if(os, 'forkpty') def register_os_forkpty(self): + from rpython.rlib import rthread os_forkpty = self.llexternal( 'forkpty', [rffi.INTP, rffi.VOIDP, rffi.VOIDP, rffi.VOIDP], @@ -1616,7 +1618,11 @@ compilation_info=ExternalCompilationInfo(libraries=['util'])) def forkpty_llimpl(): master_p = lltype.malloc(rffi.INTP.TO, 1, flavor='raw') - childpid = os_forkpty(master_p, None, None, None) + master_p[0] = rffi.cast(rffi.INT, -1) + opaqueaddr = rthread.gc_thread_before_fork() + childpid = rffi.cast(lltype.Signed, + os_forkpty(master_p, None, None, None)) + rthread.gc_thread_after_fork(childpid, opaqueaddr) master_fd = master_p[0] lltype.free(master_p, flavor='raw') if childpid == -1: diff --git a/rpython/tool/logparser.py b/rpython/tool/logparser.py --- a/rpython/tool/logparser.py +++ b/rpython/tool/logparser.py @@ -133,6 +133,8 @@ def rectime(category1, timestart1, timestop1, subcats): substartstop = [] for entry in getsubcategories(subcats): + if len(entry) != 4: + continue rectime(*entry) substartstop.append(entry[1:3]) # (start, stop) # compute the total time for category1 as the part of the @@ -238,7 +240,11 @@ # def recdraw(sublist, subheight): firstx1 = None - for category1, timestart1, timestop1, subcats in sublist: + for entry in sublist: + try: + category1, timestart1, timestop1, subcats = entry + except ValueError: + continue x1 = int((timestart1 - timestart0) * timefactor) x2 = int((timestop1 - timestart0) * timefactor) y1 = (height - subheight) / 2 From noreply at buildbot.pypy.org Wed Aug 14 02:22:38 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 14 Aug 2013 02:22:38 +0200 (CEST) Subject: [pypy-commit] pypy release-2.1.x: issue1573: cursor description names should be plain strs Message-ID: <20130814002238.B42121C011D@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: release-2.1.x Changeset: r66137:e4405b750715 Date: 2013-08-02 11:51 -0700 http://bitbucket.org/pypy/pypy/changeset/e4405b750715/ Log: issue1573: cursor description names should be plain strs (transplanted from 56e44474776161b34b0838d09fdf3e3fa220ce03) diff --git a/lib_pypy/_sqlite3.py b/lib_pypy/_sqlite3.py --- a/lib_pypy/_sqlite3.py +++ b/lib_pypy/_sqlite3.py @@ -1305,7 +1305,7 @@ for i in xrange(_lib.sqlite3_column_count(self._statement)): name = _lib.sqlite3_column_name(self._statement, i) if name: - name = _ffi.string(name).decode('utf-8').split("[")[0].strip() + name = _ffi.string(name).split("[")[0].strip() desc.append((name, None, None, None, None, None, None)) return desc diff --git a/pypy/module/test_lib_pypy/test_sqlite3.py b/pypy/module/test_lib_pypy/test_sqlite3.py --- a/pypy/module/test_lib_pypy/test_sqlite3.py +++ b/pypy/module/test_lib_pypy/test_sqlite3.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- """Tests for _sqlite3.py""" import pytest, sys @@ -222,3 +223,8 @@ cur.execute("create table test(a)") cur.executemany("insert into test values (?)", [[1], [2], [3]]) assert cur.lastrowid is None + +def test_issue1573(con): + cur = con.cursor() + cur.execute(u'SELECT 1 as méil') + assert cur.description[0][0] == u"méil".encode('utf-8') From noreply at buildbot.pypy.org Wed Aug 14 02:24:02 2013 From: noreply at buildbot.pypy.org (chrish42) Date: Wed, 14 Aug 2013 02:24:02 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-2.7.4: In string formatting, if conversion to a number with __int__() fails, we should retry with __long__(). Makes a new testcase introduced in Python 2.7.4 pass. Message-ID: <20130814002402.DF5D31C011D@cobra.cs.uni-duesseldorf.de> Author: Christian Hudon Branch: stdlib-2.7.4 Changeset: r66138:028723fd3b86 Date: 2013-08-12 15:20 -0400 http://bitbucket.org/pypy/pypy/changeset/028723fd3b86/ Log: In string formatting, if conversion to a number with __int__() fails, we should retry with __long__(). Makes a new testcase introduced in Python 2.7.4 pass. diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -231,6 +231,18 @@ msg = "__int__ returned non-int (type '%T')" raise operationerrfmt(space.w_TypeError, msg, w_result) + def long(self, space): + w_impl = space.lookup(self, '__long__') + if w_impl is None: + raise operationerrfmt(space.w_TypeError, + "unsupported operand type for long(): '%T'", self) + w_result = space.get_and_call_function(w_impl, self) + + if space.isinstance_w(w_result, space.w_long): + return w_result + msg = "__long__ returned non-long (type '%T')" + raise operationerrfmt(space.w_TypeError, msg, w_result) + def __spacebind__(self, space): return self diff --git a/pypy/objspace/std/formatting.py b/pypy/objspace/std/formatting.py --- a/pypy/objspace/std/formatting.py +++ b/pypy/objspace/std/formatting.py @@ -543,7 +543,10 @@ def format_num_helper_generator(fmt, digits): def format_num_helper(space, w_value): - w_value = maybe_int(space, w_value) + try: + w_value = maybe_int(space, w_value) + except OperationError: + w_value = space.long(w_value) try: value = space.int_w(w_value) return fmt % (value,) diff --git a/pypy/objspace/std/test/test_stringformat.py b/pypy/objspace/std/test/test_stringformat.py --- a/pypy/objspace/std/test/test_stringformat.py +++ b/pypy/objspace/std/test/test_stringformat.py @@ -186,6 +186,16 @@ def test_broken_unicode(self): raises(UnicodeDecodeError, 'Názov: %s'.__mod__, u'Jerry') + def test_format_retry_with_long_if_int_fails(self): + class IntFails(object): + def __int__(self): + raise Exception + def __long__(self): + return 0L + + assert "%x" % IntFails() == '0' + + class AppTestWidthPrec: def test_width(self): a = 'a' From noreply at buildbot.pypy.org Wed Aug 14 02:24:04 2013 From: noreply at buildbot.pypy.org (chrish42) Date: Wed, 14 Aug 2013 02:24:04 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-2.7.4: Remove unnecessary long method on W_root. Message-ID: <20130814002404.384001C011D@cobra.cs.uni-duesseldorf.de> Author: Christian Hudon Branch: stdlib-2.7.4 Changeset: r66139:e9f522eea11f Date: 2013-08-13 17:19 -0400 http://bitbucket.org/pypy/pypy/changeset/e9f522eea11f/ Log: Remove unnecessary long method on W_root. diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -231,18 +231,6 @@ msg = "__int__ returned non-int (type '%T')" raise operationerrfmt(space.w_TypeError, msg, w_result) - def long(self, space): - w_impl = space.lookup(self, '__long__') - if w_impl is None: - raise operationerrfmt(space.w_TypeError, - "unsupported operand type for long(): '%T'", self) - w_result = space.get_and_call_function(w_impl, self) - - if space.isinstance_w(w_result, space.w_long): - return w_result - msg = "__long__ returned non-long (type '%T')" - raise operationerrfmt(space.w_TypeError, msg, w_result) - def __spacebind__(self, space): return self From noreply at buildbot.pypy.org Wed Aug 14 02:24:05 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 14 Aug 2013 02:24:05 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-2.7.4: Merged in chrish42/pypy/stdlib-2.7.4 (pull request #181) Message-ID: <20130814002405.8E9931C011D@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: stdlib-2.7.4 Changeset: r66140:97fee6f50a83 Date: 2013-08-13 17:23 -0700 http://bitbucket.org/pypy/pypy/changeset/97fee6f50a83/ Log: Merged in chrish42/pypy/stdlib-2.7.4 (pull request #181) In string formatting, if conversion to a number with __int__() fails, we should retry with __long__() diff --git a/pypy/objspace/std/formatting.py b/pypy/objspace/std/formatting.py --- a/pypy/objspace/std/formatting.py +++ b/pypy/objspace/std/formatting.py @@ -543,7 +543,10 @@ def format_num_helper_generator(fmt, digits): def format_num_helper(space, w_value): - w_value = maybe_int(space, w_value) + try: + w_value = maybe_int(space, w_value) + except OperationError: + w_value = space.long(w_value) try: value = space.int_w(w_value) return fmt % (value,) diff --git a/pypy/objspace/std/test/test_stringformat.py b/pypy/objspace/std/test/test_stringformat.py --- a/pypy/objspace/std/test/test_stringformat.py +++ b/pypy/objspace/std/test/test_stringformat.py @@ -186,6 +186,16 @@ def test_broken_unicode(self): raises(UnicodeDecodeError, 'Názov: %s'.__mod__, u'Jerry') + def test_format_retry_with_long_if_int_fails(self): + class IntFails(object): + def __int__(self): + raise Exception + def __long__(self): + return 0L + + assert "%x" % IntFails() == '0' + + class AppTestWidthPrec: def test_width(self): a = 'a' From noreply at buildbot.pypy.org Wed Aug 14 08:45:27 2013 From: noreply at buildbot.pypy.org (wlav) Date: Wed, 14 Aug 2013 08:45:27 +0200 (CEST) Subject: [pypy-commit] pypy reflex-support: skip tests individually instead of wholesale if genreflex is missing Message-ID: <20130814064527.DDC5B1C32CC@cobra.cs.uni-duesseldorf.de> Author: Wim Lavrijsen Branch: reflex-support Changeset: r66141:21041133c1e2 Date: 2013-08-06 05:28 -0700 http://bitbucket.org/pypy/pypy/changeset/21041133c1e2/ Log: skip tests individually instead of wholesale if genreflex is missing diff --git a/pypy/module/cppyy/test/conftest.py b/pypy/module/cppyy/test/conftest.py --- a/pypy/module/cppyy/test/conftest.py +++ b/pypy/module/cppyy/test/conftest.py @@ -1,5 +1,1 @@ import py - -def pytest_runtest_setup(item): - if py.path.local.sysfind('genreflex') is None: - py.test.skip("genreflex is not installed") diff --git a/pypy/module/cppyy/test/test_aclassloader.py b/pypy/module/cppyy/test/test_aclassloader.py --- a/pypy/module/cppyy/test/test_aclassloader.py +++ b/pypy/module/cppyy/test/test_aclassloader.py @@ -1,5 +1,7 @@ import py, os, sys +if py.path.local.sysfind('genreflex') is None: + py.test.skip("genreflex is not installed") currpath = py.path.local(__file__).dirpath() diff --git a/pypy/module/cppyy/test/test_advancedcpp.py b/pypy/module/cppyy/test/test_advancedcpp.py --- a/pypy/module/cppyy/test/test_advancedcpp.py +++ b/pypy/module/cppyy/test/test_advancedcpp.py @@ -1,5 +1,8 @@ import py, os, sys +if py.path.local.sysfind('genreflex') is None: + py.test.skip("genreflex is not installed") + from pypy.module.cppyy import capi diff --git a/pypy/module/cppyy/test/test_cppyy.py b/pypy/module/cppyy/test/test_cppyy.py --- a/pypy/module/cppyy/test/test_cppyy.py +++ b/pypy/module/cppyy/test/test_cppyy.py @@ -1,4 +1,8 @@ import py, os, sys + +if py.path.local.sysfind('genreflex') is None: + py.test.skip("genreflex is not installed") + from pypy.module.cppyy import interp_cppyy, executor diff --git a/pypy/module/cppyy/test/test_crossing.py b/pypy/module/cppyy/test/test_crossing.py --- a/pypy/module/cppyy/test/test_crossing.py +++ b/pypy/module/cppyy/test/test_crossing.py @@ -1,4 +1,8 @@ import py, os, sys + +if py.path.local.sysfind('genreflex') is None: + py.test.skip("genreflex is not installed") + from pypy.interpreter.gateway import interp2app, unwrap_spec from rpython.translator.tool.cbuild import ExternalCompilationInfo from rpython.translator import platform diff --git a/pypy/module/cppyy/test/test_datatypes.py b/pypy/module/cppyy/test/test_datatypes.py --- a/pypy/module/cppyy/test/test_datatypes.py +++ b/pypy/module/cppyy/test/test_datatypes.py @@ -1,5 +1,7 @@ import py, os, sys +if py.path.local.sysfind('genreflex') is None: + py.test.skip("genreflex is not installed") currpath = py.path.local(__file__).dirpath() test_dct = str(currpath.join("datatypesDict.so")) diff --git a/pypy/module/cppyy/test/test_fragile.py b/pypy/module/cppyy/test/test_fragile.py --- a/pypy/module/cppyy/test/test_fragile.py +++ b/pypy/module/cppyy/test/test_fragile.py @@ -1,6 +1,11 @@ import py, os, sys + +if py.path.local.sysfind('genreflex') is None: + py.test.skip("genreflex is not installed") + from pypy.module.cppyy import capi + currpath = py.path.local(__file__).dirpath() test_dct = str(currpath.join("fragileDict.so")) diff --git a/pypy/module/cppyy/test/test_operators.py b/pypy/module/cppyy/test/test_operators.py --- a/pypy/module/cppyy/test/test_operators.py +++ b/pypy/module/cppyy/test/test_operators.py @@ -1,5 +1,7 @@ import py, os, sys +if py.path.local.sysfind('genreflex') is None: + py.test.skip("genreflex is not installed") currpath = py.path.local(__file__).dirpath() test_dct = str(currpath.join("operatorsDict.so")) diff --git a/pypy/module/cppyy/test/test_overloads.py b/pypy/module/cppyy/test/test_overloads.py --- a/pypy/module/cppyy/test/test_overloads.py +++ b/pypy/module/cppyy/test/test_overloads.py @@ -1,5 +1,7 @@ import py, os, sys +if py.path.local.sysfind('genreflex') is None: + py.test.skip("genreflex is not installed") currpath = py.path.local(__file__).dirpath() test_dct = str(currpath.join("overloadsDict.so")) diff --git a/pypy/module/cppyy/test/test_pythonify.py b/pypy/module/cppyy/test/test_pythonify.py --- a/pypy/module/cppyy/test/test_pythonify.py +++ b/pypy/module/cppyy/test/test_pythonify.py @@ -1,4 +1,8 @@ import py, os, sys + +if py.path.local.sysfind('genreflex') is None: + py.test.skip("genreflex is not installed") + from pypy.module.cppyy import interp_cppyy, executor diff --git a/pypy/module/cppyy/test/test_stltypes.py b/pypy/module/cppyy/test/test_stltypes.py --- a/pypy/module/cppyy/test/test_stltypes.py +++ b/pypy/module/cppyy/test/test_stltypes.py @@ -1,5 +1,8 @@ import py, os, sys +if py.path.local.sysfind('genreflex') is None: + py.test.skip("genreflex is not installed") + currpath = py.path.local(__file__).dirpath() test_dct = str(currpath.join("stltypesDict.so")) diff --git a/pypy/module/cppyy/test/test_streams.py b/pypy/module/cppyy/test/test_streams.py --- a/pypy/module/cppyy/test/test_streams.py +++ b/pypy/module/cppyy/test/test_streams.py @@ -1,5 +1,8 @@ import py, os, sys +if py.path.local.sysfind('genreflex') is None: + py.test.skip("genreflex is not installed") + currpath = py.path.local(__file__).dirpath() test_dct = str(currpath.join("std_streamsDict.so")) From noreply at buildbot.pypy.org Wed Aug 14 08:45:29 2013 From: noreply at buildbot.pypy.org (wlav) Date: Wed, 14 Aug 2013 08:45:29 +0200 (CEST) Subject: [pypy-commit] pypy reflex-support: more support for test using the dummy backend Message-ID: <20130814064529.1FC371C32D8@cobra.cs.uni-duesseldorf.de> Author: Wim Lavrijsen Branch: reflex-support Changeset: r66142:c6d9d629e328 Date: 2013-08-13 23:42 -0700 http://bitbucket.org/pypy/pypy/changeset/c6d9d629e328/ Log: more support for test using the dummy backend diff --git a/pypy/module/cppyy/test/Makefile b/pypy/module/cppyy/test/Makefile --- a/pypy/module/cppyy/test/Makefile +++ b/pypy/module/cppyy/test/Makefile @@ -9,15 +9,19 @@ ROOTSYS := ${ROOTSYS} endif -ifeq ($(ROOTSYS),) - genreflex=genreflex - cppflags=-I$(shell root-config --incdir) -L$(shell root-config --libdir) +ifeq ($(DUMMY),t) + cppflags= else - genreflex=$(ROOTSYS)/bin/genreflex - ifeq ($(wildcard $(ROOTSYS)/include),) # standard locations used? - cppflags=-I$(shell root-config --incdir) -L$(shell root-config --libdir) + ifeq ($(ROOTSYS),) + genreflex=genreflex + cppflags=-I$(ROOTSYS)/include -L$(ROOTSYS)/lib64 -L$(ROOTSYS)/lib else - cppflags=-I$(ROOTSYS)/include -L$(ROOTSYS)/lib64 -L$(ROOTSYS)/lib + genreflex=$(ROOTSYS)/bin/genreflex + ifeq ($(wildcard $(ROOTSYS)/include),) # standard locations used? + cppflags=-I$(shell root-config --incdir) -L$(shell root-config --libdir) + else + cppflags=-I$(ROOTSYS)/include -L$(ROOTSYS)/lib64 -L$(ROOTSYS)/lib + endif endif endif @@ -26,7 +30,9 @@ cppflags+=-dynamiclib -single_module -arch x86_64 -undefined dynamic_lookup endif -ifeq ($(CINT),) +ifeq ($(DUMMY),t) + cppflags2=-O3 -fPIC -rdynamic +else ifeq ($(CINT),) ifeq ($(shell $(genreflex) --help | grep -- --with-methptrgetter),) genreflexflags= cppflags2=-O3 -fPIC @@ -34,32 +40,38 @@ genreflexflags=--with-methptrgetter cppflags2=-Wno-pmf-conversions -O3 -fPIC endif -else - cppflags2=-O3 -fPIC -rdynamic endif -ifeq ($(CINT),) +ifeq ($(CINT),t) +%Dict.so: %_cint.cxx %.cxx + g++ -o $@ $^ -shared $(cppflags) $(cppflags2) + rlibmap -f -o $*Dict.rootmap -l $@ -c $*_LinkDef.h + +%_cint.cxx: %.h %_LinkDef.h + rootcint -f $@ -c $*.h $*_LinkDef.h + +else ifeq ($(DUMMY),t) +%Dict.so: %.cxx + g++ -o $@ $^ -shared $(cppflags) $(cppflags2) + +else # reflex %Dict.so: %_rflx.cpp %.cxx echo $(cppflags) g++ -o $@ $^ -shared -lReflex $(cppflags) $(cppflags2) %_rflx.cpp: %.h %.xml $(genreflex) $< $(genreflexflags) --selection=$*.xml --rootmap=$*Dict.rootmap --rootmap-lib=$*Dict.so -else -%Dict.so: %_cint.cxx %.cxx - g++ -o $@ $^ -shared $(cppflags) $(cppflags2) - rlibmap -f -o $*Dict.rootmap -l $@ -c $*_LinkDef.h -%_cint.cxx: %.h %_LinkDef.h - rootcint -f $@ -c $*.h $*_LinkDef.h endif ifeq ($(CINT),) +ifeq ($(DUMMY),) # TODO: methptrgetter causes these tests to crash, so don't use it for now std_streamsDict.so: std_streams.cxx std_streams.h std_streams.xml $(genreflex) std_streams.h --selection=std_streams.xml g++ -o $@ std_streams_rflx.cpp std_streams.cxx -shared -lReflex $(cppflags) $(cppflags2) endif +endif .PHONY: clean clean: diff --git a/pypy/module/cppyy/test/conftest.py b/pypy/module/cppyy/test/conftest.py --- a/pypy/module/cppyy/test/conftest.py +++ b/pypy/module/cppyy/test/conftest.py @@ -1,1 +1,28 @@ import py + +if py.path.local.sysfind('genreflex') is None: + # build the dummy CAPI + + import os + from rpython.translator.tool.cbuild import ExternalCompilationInfo + from rpython.translator import platform + + from rpython.rtyper.lltypesystem import rffi + + pkgpath = py.path.local(__file__).dirpath().join(os.pardir) + srcpath = pkgpath.join('src') + incpath = pkgpath.join('include') + + eci = ExternalCompilationInfo( + separate_module_files=[srcpath.join('dummy_backend.cxx')], + include_dirs=[incpath], + use_cpp_linker=True, + ) + + soname = platform.platform.compile( + [], eci, + outputfilename='libcppyy_backend', + standalone=False) + + import pypy.module.cppyy.capi.loadable_capi as lcapi + lcapi.reflection_library = str(soname) diff --git a/pypy/module/cppyy/test/test_cppyy.py b/pypy/module/cppyy/test/test_cppyy.py --- a/pypy/module/cppyy/test/test_cppyy.py +++ b/pypy/module/cppyy/test/test_cppyy.py @@ -1,7 +1,8 @@ import py, os, sys +isdummy = '' if py.path.local.sysfind('genreflex') is None: - py.test.skip("genreflex is not installed") + isdummy = 'DUMMY=t' from pypy.module.cppyy import interp_cppyy, executor @@ -12,7 +13,7 @@ def setup_module(mod): if sys.platform == 'win32': py.test.skip("win32 not supported so far") - err = os.system("cd '%s' && make example01Dict.so" % currpath) + err = os.system("cd '%s' && make %s example01Dict.so" % (currpath, isdummy)) if err: raise OSError("'make' failed (see stderr)") From noreply at buildbot.pypy.org Wed Aug 14 08:45:30 2013 From: noreply at buildbot.pypy.org (wlav) Date: Wed, 14 Aug 2013 08:45:30 +0200 (CEST) Subject: [pypy-commit] pypy default: updates for the most recent version of genreflex (doc and methptr patch) Message-ID: <20130814064530.6EB571C34F3@cobra.cs.uni-duesseldorf.de> Author: Wim Lavrijsen Branch: Changeset: r66143:b45d526fe75d Date: 2013-08-13 23:44 -0700 http://bitbucket.org/pypy/pypy/changeset/b45d526fe75d/ Log: updates for the most recent version of genreflex (doc and methptr patch) diff --git a/pypy/doc/cppyy.rst b/pypy/doc/cppyy.rst --- a/pypy/doc/cppyy.rst +++ b/pypy/doc/cppyy.rst @@ -83,7 +83,7 @@ the selection of scientific software) will also work for a build with the builtin backend. -.. _`download`: http://cern.ch/wlav/reflex-2013-04-23.tar.bz2 +.. _`download`: http://cern.ch/wlav/reflex-2013-08-14.tar.bz2 .. _`ROOT`: http://root.cern.ch/ Besides Reflex, you probably need a version of `gccxml`_ installed, which is @@ -98,8 +98,8 @@ To install the standalone version of Reflex, after download:: - $ tar jxf reflex-2013-04-23.tar.bz2 - $ cd reflex-2013-04-23 + $ tar jxf reflex-2013-08-14.tar.bz2 + $ cd reflex-2013-08-14 $ ./build/autogen $ ./configure $ make && make install diff --git a/pypy/module/cppyy/genreflex-methptrgetter.patch b/pypy/module/cppyy/genreflex-methptrgetter.patch --- a/pypy/module/cppyy/genreflex-methptrgetter.patch +++ b/pypy/module/cppyy/genreflex-methptrgetter.patch @@ -10,7 +10,7 @@ # The next is to avoid a known problem with gccxml that it generates a # references to id equal '_0' which is not defined anywhere self.xref['_0'] = {'elem':'Unknown', 'attrs':{'id':'_0','name':''}, 'subelems':[]} -@@ -1306,6 +1307,8 @@ +@@ -1328,6 +1329,8 @@ bases = self.getBases( attrs['id'] ) if inner and attrs.has_key('demangled') and self.isUnnamedType(attrs['demangled']) : cls = attrs['demangled'] @@ -19,7 +19,7 @@ clt = '' else: cls = self.genTypeName(attrs['id'],const=True,colon=True) -@@ -1343,7 +1346,7 @@ +@@ -1365,7 +1368,7 @@ # Inner class/struct/union/enum. for m in memList : member = self.xref[m] @@ -28,7 +28,7 @@ and member['attrs'].get('access') in ('private','protected') \ and not self.isUnnamedType(member['attrs'].get('demangled')): cmem = self.genTypeName(member['attrs']['id'],const=True,colon=True) -@@ -1981,8 +1984,15 @@ +@@ -2003,8 +2006,15 @@ else : params = '0' s = ' .AddFunctionMember(%s, Reflex::Literal("%s"), %s%s, 0, %s, %s)' % (self.genTypeID(id), name, type, id, params, mod) s += self.genCommentProperty(attrs) @@ -44,7 +44,7 @@ def genMCODef(self, type, name, attrs, args): id = attrs['id'] cl = self.genTypeName(attrs['context'],colon=True) -@@ -2049,8 +2059,44 @@ +@@ -2071,8 +2081,44 @@ if returns == 'void' : body += ' }\n' else : body += ' }\n' body += '}\n' @@ -105,17 +105,16 @@ -h, --help Print this help\n """ -@@ -127,7 +131,8 @@ - opts, args = getopt.getopt(options, 'ho:s:c:I:U:D:PC', \ +@@ -128,7 +132,7 @@ ['help','debug=', 'output=','selection_file=','pool','dataonly','interpreteronly','deep','gccxmlpath=', 'capabilities=','rootmap=','rootmap-lib=','comments','iocomments','no_membertypedefs', -- 'fail_on_warnings', 'quiet', 'gccxmlopt=', 'reflex', 'split=','no_templatetypedefs','gccxmlpost=']) -+ 'fail_on_warnings', 'quiet', 'gccxmlopt=', 'reflex', 'split=','no_templatetypedefs','gccxmlpost=', -+ 'with-methptrgetter']) + 'fail_on_warnings', 'quiet', 'gccxmlopt=', 'reflex', 'split=','no_templatetypedefs','gccxmlpost=', +- 'library=']) ++ 'library=', 'with-methptrgetter']) except getopt.GetoptError, e: print "--->> genreflex: ERROR:",e self.usage(2) -@@ -186,6 +191,8 @@ +@@ -187,6 +191,8 @@ self.rootmap = a if o in ('--rootmap-lib',): self.rootmaplib = a From noreply at buildbot.pypy.org Wed Aug 14 09:09:53 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Wed, 14 Aug 2013 09:09:53 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: test gc.py's stm barrier fastpaths Message-ID: <20130814070953.CB3791C243C@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r66144:d2096ca14852 Date: 2013-08-14 09:09 +0200 http://bitbucket.org/pypy/pypy/changeset/d2096ca14852/ Log: test gc.py's stm barrier fastpaths diff --git a/rpython/jit/backend/llsupport/gc.py b/rpython/jit/backend/llsupport/gc.py --- a/rpython/jit/backend/llsupport/gc.py +++ b/rpython/jit/backend/llsupport/gc.py @@ -448,13 +448,13 @@ @specialize.arg(2) def _do_barrier(self, gcref_struct, returns_modified_object): assert returns_modified_object - from rpython.memory.gc.stmgc import get_hdr_revision + from rpython.memory.gc.stmgc import StmGC objadr = llmemory.cast_ptr_to_adr(gcref_struct) + objhdr = rffi.cast(StmGC.GCHDRP, gcref_struct) # if h_revision == privat_rev of transaction - rev = get_hdr_revision(objadr) priv_rev = self.llop1.stm_get_adr_of_private_rev_num(rffi.SIGNEDP) - if rev[0] == priv_rev[0]: + if objhdr.h_revision == priv_rev[0]: return gcref_struct # XXX: readcache! @@ -472,17 +472,15 @@ @specialize.arg(2) def _do_barrier(self, gcref_struct, returns_modified_object): assert returns_modified_object - from rpython.memory.gc.stmgc import (StmGC, get_hdr_revision, - get_hdr_tid) + from rpython.memory.gc.stmgc import StmGC objadr = llmemory.cast_ptr_to_adr(gcref_struct) - + objhdr = rffi.cast(StmGC.GCHDRP, gcref_struct) + # if h_revision == privat_rev of transaction - rev = get_hdr_revision(objadr) priv_rev = self.llop1.stm_get_adr_of_private_rev_num(rffi.SIGNEDP) - if rev[0] == priv_rev[0]: + if objhdr.h_revision == priv_rev[0]: # also WRITE_BARRIER not set? - tid = get_hdr_tid(objadr)[0] - if not (tid & StmGC.GCFLAG_WRITE_BARRIER): + if not (objhdr.h_tid & StmGC.GCFLAG_WRITE_BARRIER): return gcref_struct funcptr = self.get_barrier_funcptr(returns_modified_object) diff --git a/rpython/jit/backend/x86/test/test_stm_integration.py b/rpython/jit/backend/x86/test/test_stm_integration.py --- a/rpython/jit/backend/x86/test/test_stm_integration.py +++ b/rpython/jit/backend/x86/test/test_stm_integration.py @@ -102,11 +102,14 @@ class FakeGCHeaderBuilder: size_gc_header = WORD -GCPTR = lltype.Ptr(lltype.GcStruct( - 'GCPTR', ('h_tid', lltype.Unsigned), - ('h_revision', lltype.Signed), - ('h_original', lltype.Unsigned))) -HDRSIZE = 3 * WORD +class fakellop: + PRIV_REV = 66 + def stm_get_adr_of_private_rev_num(self, _): + TP = rffi.CArray(lltype.Signed) + p = lltype.malloc(TP, n=1, flavor='raw', + track_allocation=False, zero=True) + p[0] = fakellop.PRIV_REV + return p class GCDescrStm(GCDescrShadowstackDirect): def __init__(self): @@ -154,11 +157,11 @@ RESULT=lltype.Bool) def malloc_big_fixedsize(size, tid): - entries = size + HDRSIZE + entries = size + StmGC.GCHDRSIZE TP = rffi.CArray(lltype.Char) obj = lltype.malloc(TP, n=entries, flavor='raw', track_allocation=False, zero=True) - objptr = rffi.cast(GCPTR, obj) + objptr = rffi.cast(StmGC.GCHDRP, obj) objptr.h_tid = rffi.cast(lltype.Unsigned, StmGC.GCFLAG_OLD|StmGC.GCFLAG_WRITE_BARRIER | tid) @@ -259,6 +262,8 @@ s.h_revision = rffi.cast(lltype.Signed, StmGC.PREBUILT_REVISION) return s + + def test_gc_read_barrier_fastpath(self): from rpython.jit.backend.llsupport.gc import STMReadBarrierDescr descr = STMReadBarrierDescr(self.cpu.gc_ll_descr, 'P2R') @@ -268,15 +273,6 @@ called.append(obj) return obj - PRIV_REV = 66 - class fakellop: - def stm_get_adr_of_private_rev_num(self, _): - TP = rffi.SIGNEDP - p = lltype.malloc(TP, n=1, flavor='raw', - track_allocation=False, zero=True) - p[0] = PRIV_REV - return rffi.cast(llmemory.Address, p) - functype = lltype.Ptr(lltype.FuncType( [llmemory.Address], llmemory.Address)) funcptr = llhelper(functype, read) @@ -284,28 +280,68 @@ descr.llop1 = fakellop() # -------- TEST -------- - for rev in [PRIV_REV+4, PRIV_REV]: + for rev in [fakellop.PRIV_REV+4, fakellop.PRIV_REV]: called[:] = [] s = self.allocate_prebuilt_s() sgcref = lltype.cast_opaque_ptr(llmemory.GCREF, s) s.h_revision = rev - descr._do_barrier(llmemory.AddressAsInt(sgcref), + descr._do_barrier(sgcref, returns_modified_object=True) # check if rev-fastpath worked - if rev == PRIV_REV: + if rev == fakellop.PRIV_REV: # fastpath - assert sgcref not in called + self.assert_not_in(called, [sgcref]) else: - assert sgcref in called + self.assert_in(called, [sgcref]) # XXX: read_cache test! # # now add it to the read-cache and check # # that it will never call the read_barrier # assert not called_on + def test_gc_write_barrier_fastpath(self): + from rpython.jit.backend.llsupport.gc import STMWriteBarrierDescr + descr = STMWriteBarrierDescr(self.cpu.gc_ll_descr, 'P2W') + + called = [] + def write(obj): + called.append(obj) + return obj + + functype = lltype.Ptr(lltype.FuncType( + [llmemory.Address], llmemory.Address)) + funcptr = llhelper(functype, write) + descr.b_failing_case_ptr = funcptr + descr.llop1 = fakellop() + + # -------- TEST -------- + for rev in [fakellop.PRIV_REV+4, fakellop.PRIV_REV]: + called[:] = [] + + s = self.allocate_prebuilt_s() + sgcref = lltype.cast_opaque_ptr(llmemory.GCREF, s) + s.h_revision = rev + + descr._do_barrier(sgcref, + returns_modified_object=True) + + # check if rev-fastpath worked + if rev == fakellop.PRIV_REV: + # fastpath + self.assert_not_in(called, [sgcref]) + else: + self.assert_in(called, [sgcref]) + + # now set WRITE_BARRIER -> always call slowpath + called[:] = [] + s.h_tid |= StmGC.GCFLAG_WRITE_BARRIER + descr._do_barrier(sgcref, + returns_modified_object=True) + self.assert_in(called, [sgcref]) + diff --git a/rpython/memory/gc/stmgc.py b/rpython/memory/gc/stmgc.py --- a/rpython/memory/gc/stmgc.py +++ b/rpython/memory/gc/stmgc.py @@ -36,10 +36,19 @@ malloc_zero_filled = True #gcflag_extra = GCFLAG_EXTRA + + GCHDR = lltype.GcStruct( + 'GCPTR', + ('h_tid', lltype.Unsigned), + ('h_revision', lltype.Signed), + ('h_original', lltype.Unsigned)) + GCHDRP = lltype.Ptr(GCHDR) + GCHDRSIZE = 3 * WORD + HDR = rffi.COpaque('struct stm_object_s') H_TID = 0 H_REVISION = WORD - H_ORIGINAL = WORD + WORD + H_ORIGINAL = WORD * 2 typeid_is_in_field = None VISIT_FPTR = lltype.Ptr(lltype.FuncType([llmemory.Address], lltype.Void)) From noreply at buildbot.pypy.org Wed Aug 14 10:07:48 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Wed, 14 Aug 2013 10:07:48 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: also add read_cache fastpath to stm_read_barrier in gc.py Message-ID: <20130814080748.7D5B61C32CC@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r66145:5eda89b160ab Date: 2013-08-14 10:06 +0200 http://bitbucket.org/pypy/pypy/changeset/5eda89b160ab/ Log: also add read_cache fastpath to stm_read_barrier in gc.py diff --git a/rpython/jit/backend/llsupport/gc.py b/rpython/jit/backend/llsupport/gc.py --- a/rpython/jit/backend/llsupport/gc.py +++ b/rpython/jit/backend/llsupport/gc.py @@ -456,7 +456,16 @@ priv_rev = self.llop1.stm_get_adr_of_private_rev_num(rffi.SIGNEDP) if objhdr.h_revision == priv_rev[0]: return gcref_struct - + + read_cache = self.llop1.stm_get_adr_of_read_barrier_cache(rffi.SIGNEDP) + objint = llmemory.cast_adr_to_int(objadr) + assert WORD == 8, "check for 32bit compatibility" + index = (objint & StmGC.FX_MASK) / WORD + CP = lltype.Ptr(rffi.CArray(lltype.Signed)) + rcp = rffi.cast(CP, read_cache[0]) + if rcp[index] == objint: + return gcref_struct + # XXX: readcache! funcptr = self.get_barrier_funcptr(returns_modified_object) res = funcptr(objadr) diff --git a/rpython/jit/backend/x86/test/test_stm_integration.py b/rpython/jit/backend/x86/test/test_stm_integration.py --- a/rpython/jit/backend/x86/test/test_stm_integration.py +++ b/rpython/jit/backend/x86/test/test_stm_integration.py @@ -35,9 +35,6 @@ self.stack_addr = lltype.malloc(TP, 1, flavor='raw') self.stack_addr[0] = rffi.cast(lltype.Signed, self.stack) - def __del__(self): - lltype.free(self.stack_addr, flavor='raw') - lltype.free(self.stack, flavor='raw') def register_asm_addr(self, start, mark): pass def get_root_stack_top_addr(self): @@ -104,12 +101,29 @@ class fakellop: PRIV_REV = 66 + def __init__(self): + self.TP = rffi.CArray(lltype.Signed) + self.privrevp = lltype.malloc(self.TP, n=1, flavor='raw', + track_allocation=False, zero=True) + self.privrevp[0] = fakellop.PRIV_REV + + entries = (StmGC.FX_MASK + 1) / WORD + self.read_cache = lltype.malloc(self.TP, n=entries, flavor='raw', + track_allocation=False, zero=True) + self.read_cache_adr = lltype.malloc(self.TP, 1, flavor='raw', + track_allocation=False) + self.read_cache_adr[0] = rffi.cast(lltype.Signed, self.read_cache) + + def set_cache_item(self, obj, value): + obj_int = rffi.cast(lltype.Signed, obj) + idx = (obj_int & StmGC.FX_MASK) / WORD + self.read_cache[idx] = rffi.cast(lltype.Signed, value) + def stm_get_adr_of_private_rev_num(self, _): - TP = rffi.CArray(lltype.Signed) - p = lltype.malloc(TP, n=1, flavor='raw', - track_allocation=False, zero=True) - p[0] = fakellop.PRIV_REV - return p + return self.privrevp + + def stm_get_adr_of_read_barrier_cache(self, _): + return self.read_cache_adr class GCDescrStm(GCDescrShadowstackDirect): def __init__(self): @@ -296,11 +310,15 @@ self.assert_not_in(called, [sgcref]) else: self.assert_in(called, [sgcref]) - - # XXX: read_cache test! - # # now add it to the read-cache and check - # # that it will never call the read_barrier - # assert not called_on + + # now check if sgcref in readcache: + called[:] = [] + descr.llop1.set_cache_item(sgcref, sgcref) + descr._do_barrier(sgcref, + returns_modified_object=True) + self.assert_not_in(called, [sgcref]) + descr.llop1.set_cache_item(sgcref, 0) + def test_gc_write_barrier_fastpath(self): from rpython.jit.backend.llsupport.gc import STMWriteBarrierDescr From noreply at buildbot.pypy.org Wed Aug 14 12:16:49 2013 From: noreply at buildbot.pypy.org (andrewchambers) Date: Wed, 14 Aug 2013 12:16:49 +0200 (CEST) Subject: [pypy-commit] pypy incremental-gc: add some failing tests, remove bad arena hack Message-ID: <20130814101649.2BB931C00EC@cobra.cs.uni-duesseldorf.de> Author: Andrew Chambers Branch: incremental-gc Changeset: r66146:251da710fc7c Date: 2013-08-14 22:10 +1200 http://bitbucket.org/pypy/pypy/changeset/251da710fc7c/ Log: add some failing tests, remove bad arena hack diff --git a/rpython/memory/gc/incminimark.py b/rpython/memory/gc/incminimark.py --- a/rpython/memory/gc/incminimark.py +++ b/rpython/memory/gc/incminimark.py @@ -102,6 +102,7 @@ # and on surviving raw-malloced young objects during a minor collection. GCFLAG_VISITED = first_gcflag << 2 + # The following flag is set on nursery objects of which we asked the id # or the identityhash. It means that a space of the size of the object # has already been allocated in the nonmovable part. The same flag is @@ -129,6 +130,14 @@ # by the incremental collection GCFLAG_GRAY = first_gcflag << 8 +# The following flag is just an alias for the gray flag. It +# is only used by major collections, it is set on objects +# which are allocated during the sweeping and finalization states +# it has different meaning outside of the sweeping state. +# This flag should not be reset by any minor collection operation +GCFLAG_NOSWEEP = first_gcflag << 8 + + # States for the incremental GC # The scanning phase, next step call will scan the current roots @@ -139,13 +148,10 @@ # marking of objects can be done over multiple STATE_MARKING = 1 << 1 STATE_SWEEPING_RAWMALLOC = 1 << 2 -STATE_SWEEPING_ARENA_1 = 1 << 3 -STATE_SWEEPING_ARENA_2 = 1 << 4 -STATE_FINALIZING = 1 << 5 +STATE_SWEEPING_ARENA = 1 << 3 +STATE_FINALIZING = 1 << 4 -MASK_SWEEPING = (STATE_SWEEPING_RAWMALLOC | - STATE_SWEEPING_ARENA_1 | - STATE_SWEEPING_ARENA_2) +MASK_SWEEPING = (STATE_SWEEPING_RAWMALLOC | STATE_SWEEPING_ARENA) @@ -1034,9 +1040,7 @@ already_checked = True elif self.gc_state == STATE_SWEEPING_RAWMALLOC: pass - elif self.gc_state == STATE_SWEEPING_ARENA_1: - pass - elif self.gc_state == STATE_SWEEPING_ARENA_2: + elif self.gc_state == STATE_SWEEPING_ARENA: pass elif self.gc_state == STATE_FINALIZING: pass @@ -1059,9 +1063,7 @@ self._debug_check_object_marking(obj) elif self.gc_state == STATE_SWEEPING_RAWMALLOC: self._debug_check_object_sweeping_rawmalloc(obj) - elif self.gc_state == STATE_SWEEPING_ARENA_1: - self._debug_check_object_sweeping_arena(obj) - elif self.gc_state == STATE_SWEEPING_ARENA_2: + elif self.gc_state == STATE_SWEEPING_ARENA: self._debug_check_object_sweeping_arena(obj) elif self.gc_state == STATE_FINALIZING: self._debug_check_object_finalizing(obj) @@ -1184,6 +1186,7 @@ if self.gc_state == STATE_MARKING: if self.header(addr_struct).tid & GCFLAG_VISITED: self.write_to_visited_object_forward(addr_struct,newvalue) + def write_barrier_from_array(self, newvalue, addr_array, index): @@ -1689,6 +1692,9 @@ if self.has_gcptr(typeid): # we only have to do it if we have any gcptrs self.old_objects_pointing_to_young.append(newobj) + + + _trace_drag_out._always_inline_ = True def _visit_young_rawmalloced_object(self, obj): @@ -1834,24 +1840,16 @@ # XXX heuristic here to decide nobjects. if self.free_unvisited_rawmalloc_objects_step(1): #malloc objects freed - self.gc_state = STATE_SWEEPING_ARENA_1 + self.gc_state = STATE_SWEEPING_ARENA - elif self.gc_state == STATE_SWEEPING_ARENA_1: + elif self.gc_state == STATE_SWEEPING_ARENA: # # Ask the ArenaCollection to visit all objects. Free the ones # that have not been visited above, and reset GCFLAG_VISITED on # the others. - self.ac_alternate.mass_free(self._free_if_unvisited) - self.gc_state = STATE_SWEEPING_ARENA_2 - #swap arenas and start clearing the other one - self.ac,self.ac_alternate = self.ac_alternate,self.ac - - elif self.gc_state == STATE_SWEEPING_ARENA_2: - - self.ac_alternate.mass_free(self._free_if_unvisited) - + # XXX make incremental... + self.ac.mass_free(self._free_if_unvisited) self.num_major_collects += 1 - # # We also need to reset the GCFLAG_VISITED on prebuilt GC objects. self.prebuilt_root_objects.foreach(self._reset_gcflag_visited, None) @@ -2008,7 +2006,7 @@ def _collect_ref_rec(self, root, ignored): obj = root.address[0] - if self.header(obj).tid & GCFLAG_VISITED != 0: + if self.header(obj).tid & (GCFLAG_VISITED|GCFLAG_GRAY) != 0: return self.header(obj).tid |= GCFLAG_GRAY self.objects_to_trace.append(obj) @@ -2024,7 +2022,9 @@ pending = self.objects_to_trace while nobjects > 0 and pending.non_empty(): obj = pending.pop() - ll_assert(self.header(obj).tid & + #XXX can black objects even get into this list? + #XXX tighten this assertion + ll_assert(self.header(obj).tid & (GCFLAG_GRAY|GCFLAG_VISITED|GCFLAG_NO_HEAP_PTRS) != 0, "non gray or black object being traced") self.visit(obj) diff --git a/rpython/memory/gc/test/test_direct.py b/rpython/memory/gc/test/test_direct.py --- a/rpython/memory/gc/test/test_direct.py +++ b/rpython/memory/gc/test/test_direct.py @@ -608,6 +608,9 @@ #process one object self.gc.debug_gc_step() + self.gc.minor_collection() + # make sure minor collect doesnt interfere with visited flag on + # old object assert oldhdr.tid & incminimark.GCFLAG_VISITED #at this point the first object should have been processed @@ -617,7 +620,7 @@ newhdr = self.gc.header(llmemory.cast_ptr_to_adr(newobj)) assert newhdr.tid & incminimark.GCFLAG_GRAY #checks gray object is in objects_to_trace - self.gc.debug_check_consistency() + self.gc.debug_check_consistency() def test_sweeping_simple(self): from rpython.memory.gc import incminimark @@ -637,13 +640,24 @@ newobj1 = self.malloc(S) newobj2 = self.malloc(S) newobj1.x = 1337 - newobj2.x = 1338 - self.write(oldobj,'next',newobj) - newhdr = self.gc.header(llmemory.cast_ptr_to_adr(newobj)) - #checks gray object is in objects_to_trace + #newobj2.x = 1338 + self.write(oldobj,'next',newobj1) self.gc.debug_gc_step_until(incminimark.STATE_SCANNING) #should not be cleared even though it was allocated while sweeping - assert newobj.x == 1337 + assert newobj1.x == 1337 + #assert newobj2.x == 1338 + + def test_new_marking_write_sweeping(self): + + assert False + + def test_finalizing_new_object(self): + # Must test an object with a finalizer + # being added just before finalizers start being called + # must test this new objects finalizer is not called + # XXX maybe cant do this in test_direct and need test_transformed + assert False + class TestIncrementalMiniMarkGCFull(TestMiniMarkGCFull): from rpython.memory.gc.incminimark import IncrementalMiniMarkGC as GCClass From noreply at buildbot.pypy.org Wed Aug 14 13:23:52 2013 From: noreply at buildbot.pypy.org (wlav) Date: Wed, 14 Aug 2013 13:23:52 +0200 (CEST) Subject: [pypy-commit] pypy reflex-support: add first stab at a dummy backend (for testing only) Message-ID: <20130814112352.9607B1C0170@cobra.cs.uni-duesseldorf.de> Author: Wim Lavrijsen Branch: reflex-support Changeset: r66147:83b6d1cd5a87 Date: 2013-08-13 23:48 -0700 http://bitbucket.org/pypy/pypy/changeset/83b6d1cd5a87/ Log: add first stab at a dummy backend (for testing only) diff --git a/pypy/module/cppyy/src/dummy_backend.cxx b/pypy/module/cppyy/src/dummy_backend.cxx new file mode 100644 --- /dev/null +++ b/pypy/module/cppyy/src/dummy_backend.cxx @@ -0,0 +1,35 @@ +#include "cppyy.h" +#include "capi.h" + +#include + +#include +#include + + +/* local helpers ---------------------------------------------------------- */ +static inline char* cppstring_to_cstring(const std::string& name) { + char* name_char = (char*)malloc(name.size() + 1); + strcpy(name_char, name.c_str()); + return name_char; +} + + +/* name to opaque C++ scope representation -------------------------------- */ +int cppyy_num_scopes(cppyy_scope_t handle) { + return 0; +} + +char* cppyy_resolve_name(const char* cppitem_name) { + return cppstring_to_cstring(cppitem_name); +} + +cppyy_scope_t cppyy_get_scope(const char* scope_name) { + return 0; +} + + +/* misc helpers ----------------------------------------------------------- */ +void cppyy_free(void* ptr) { + free(ptr); +} From noreply at buildbot.pypy.org Wed Aug 14 13:23:54 2013 From: noreply at buildbot.pypy.org (wlav) Date: Wed, 14 Aug 2013 13:23:54 +0200 (CEST) Subject: [pypy-commit] pypy reflex-support: further progress on dummy backend for testing Message-ID: <20130814112354.0C14F1C0170@cobra.cs.uni-duesseldorf.de> Author: Wim Lavrijsen Branch: reflex-support Changeset: r66148:4d5c8fb5e858 Date: 2013-08-14 03:52 -0700 http://bitbucket.org/pypy/pypy/changeset/4d5c8fb5e858/ Log: further progress on dummy backend for testing diff --git a/pypy/module/cppyy/src/dummy_backend.cxx b/pypy/module/cppyy/src/dummy_backend.cxx --- a/pypy/module/cppyy/src/dummy_backend.cxx +++ b/pypy/module/cppyy/src/dummy_backend.cxx @@ -1,12 +1,53 @@ #include "cppyy.h" #include "capi.h" +#include #include +#include #include #include +/* pseudo-reflection data ------------------------------------------------- */ +namespace { + +typedef std::map Handles_t; +static Handles_t s_handles; + +class Cppyy_PseudoInfo { +public: + Cppyy_PseudoInfo(int num_methods=0, const char* methods[]=0) : + m_num_methods(num_methods) { + m_methods.reserve(num_methods); + for (int i=0; i < num_methods; ++i) { + m_methods.push_back(methods[i]); + } + } + +public: + int m_num_methods; + std::vector m_methods; +}; + +typedef std::map Scopes_t; +static Scopes_t s_scopes; + +struct Cppyy_InitPseudoReflectionInfo { + Cppyy_InitPseudoReflectionInfo() { + // class example01 -- + static int s_scope_id = 0; + s_handles["example01"] = ++s_scope_id; + const char* methods[] = {"staticAddToDouble"}; + Cppyy_PseudoInfo info(1, methods); + s_scopes[s_scope_id] = info; + // -- class example01 + } +} _init; + +} // unnamed namespace + + /* local helpers ---------------------------------------------------------- */ static inline char* cppstring_to_cstring(const std::string& name) { char* name_char = (char*)malloc(name.size() + 1); @@ -25,6 +66,102 @@ } cppyy_scope_t cppyy_get_scope(const char* scope_name) { + return s_handles[scope_name]; // lookup failure will return 0 (== error) +} + + +/* method/function dispatching -------------------------------------------- */ +cppyy_methptrgetter_t cppyy_get_methptr_getter(cppyy_type_t /* handle */, cppyy_index_t /* method_index */) { + return (cppyy_methptrgetter_t)0; +} + + +/* scope reflection information ------------------------------------------- */ +int cppyy_is_namespace(cppyy_scope_t /* handle */) { + return 0; +} + +int cppyy_is_enum(const char* /* type_name */) { + return 0; +} + + +/* class reflection information ------------------------------------------- */ +char* cppyy_final_name(cppyy_type_t handle) { + for (Handles_t::iterator isp = s_handles.begin(); isp != s_handles.end(); ++isp) { + if (isp->second == handle) + return cppstring_to_cstring(isp->first); + } + return cppstring_to_cstring(""); +} + +char* cppyy_scoped_final_name(cppyy_type_t handle) { + return cppyy_final_name(handle); +} + +int cppyy_has_complex_hierarchy(cppyy_type_t /* handle */) { + return 1; +} + + +/* method/function reflection information --------------------------------- */ +int cppyy_num_methods(cppyy_scope_t handle) { + return s_scopes[handle].m_num_methods; +} + +cppyy_index_t cppyy_method_index_at(cppyy_scope_t /* scope */, int imeth) { + return (cppyy_index_t)imeth; +} + +char* cppyy_method_name(cppyy_scope_t handle, cppyy_index_t method_index) { + return cppstring_to_cstring(s_scopes[handle].m_methods[(int)method_index]); +} + +char* cppyy_method_result_type(cppyy_scope_t /* handle */, cppyy_index_t /* method_index */) { + return cppstring_to_cstring("double"); +} + +int cppyy_method_num_args(cppyy_scope_t /* handle */, cppyy_index_t /* method_index */) { + return 1; +} + +int cppyy_method_req_args(cppyy_scope_t handle, cppyy_index_t method_index) { + return cppyy_method_num_args(handle, method_index); +} + +char* cppyy_method_arg_type(cppyy_scope_t /* handle */, cppyy_index_t /* method_index */, int /* arg_index */) { + return cppstring_to_cstring("double"); +} + +char* cppyy_method_arg_default(cppyy_scope_t handle, cppyy_index_t method_index, int arg_index) { + return cppstring_to_cstring(""); +} + +char* cppyy_method_signature(cppyy_scope_t /* handle */, cppyy_index_t /* method_index */) { + return cppstring_to_cstring("double"); +} + +int cppyy_method_is_template(cppyy_scope_t /* handle */, cppyy_index_t /* method_index */) { + return 0; +} + +cppyy_method_t cppyy_get_method(cppyy_scope_t /* handle */, cppyy_index_t method_index) { + return (cppyy_method_t)method_index; +} + + +/* method properties ----------------------------------------------------- */ +int cppyy_is_constructor(cppyy_type_t /* handle */, cppyy_index_t /* method_index */) { + return 0; +} + +int cppyy_is_staticmethod(cppyy_type_t /* handle */, cppyy_index_t /* method_index */) { + return 1; +} + + +/* data member reflection information ------------------------------------- */ +int cppyy_num_datamembers(cppyy_scope_t /* handle */) { return 0; } diff --git a/pypy/module/cppyy/test/test_cppyy.py b/pypy/module/cppyy/test/test_cppyy.py --- a/pypy/module/cppyy/test/test_cppyy.py +++ b/pypy/module/cppyy/test/test_cppyy.py @@ -36,6 +36,9 @@ spaceconfig = dict(usemodules=['cppyy', '_rawffi', '_ffi', 'itertools']) def setup_class(cls): + if isdummy: + py.test.skip('skipping further tests in dummy mode') + cls.w_example01, cls.w_payload = cls.space.unpackiterable(cls.space.appexec([], """(): import cppyy cppyy.load_reflection_info(%r) From noreply at buildbot.pypy.org Wed Aug 14 13:23:57 2013 From: noreply at buildbot.pypy.org (wlav) Date: Wed, 14 Aug 2013 13:23:57 +0200 (CEST) Subject: [pypy-commit] pypy reflex-support: merge default into branch Message-ID: <20130814112357.EDA5B1C0170@cobra.cs.uni-duesseldorf.de> Author: Wim Lavrijsen Branch: reflex-support Changeset: r66149:3d316c2195a3 Date: 2013-08-14 04:02 -0700 http://bitbucket.org/pypy/pypy/changeset/3d316c2195a3/ Log: merge default into branch diff too long, truncating to 2000 out of 7696 lines diff --git a/dotviewer/drawgraph.py b/dotviewer/drawgraph.py --- a/dotviewer/drawgraph.py +++ b/dotviewer/drawgraph.py @@ -22,6 +22,7 @@ 'yellow': (255,255,0), } re_nonword=re.compile(r'([^0-9a-zA-Z_.]+)') +re_linewidth=re.compile(r'setlinewidth\((\d+(\.\d*)?|\.\d+)\)') def combine(color1, color2, alpha): r1, g1, b1 = color1 @@ -138,6 +139,13 @@ self.yl = float(yl) rest = rest[3:] self.style, self.color = rest + linematch = re_linewidth.match(self.style) + if linematch: + num = linematch.group(1) + self.linewidth = int(round(float(num))) + self.style = self.style[linematch.end(0):] + else: + self.linewidth = 1 self.highlight = False self.cachedbezierpoints = None self.cachedarrowhead = None @@ -520,8 +528,8 @@ fgcolor = highlight_color(fgcolor) points = [self.map(*xy) for xy in edge.bezierpoints()] - def drawedgebody(points=points, fgcolor=fgcolor): - pygame.draw.lines(self.screen, fgcolor, False, points) + def drawedgebody(points=points, fgcolor=fgcolor, width=edge.linewidth): + pygame.draw.lines(self.screen, fgcolor, False, points, width) edgebodycmd.append(drawedgebody) points = [self.map(*xy) for xy in edge.arrowhead()] diff --git a/dotviewer/graphparse.py b/dotviewer/graphparse.py --- a/dotviewer/graphparse.py +++ b/dotviewer/graphparse.py @@ -152,7 +152,8 @@ try: plaincontent = dot2plain_graphviz(content, contenttype) except PlainParseError, e: - print e - # failed, retry via codespeak - plaincontent = dot2plain_codespeak(content, contenttype) + raise + ##print e + ### failed, retry via codespeak + ##plaincontent = dot2plain_codespeak(content, contenttype) return list(parse_plain(graph_id, plaincontent, links, fixedfont)) diff --git a/dotviewer/test/test_interactive.py b/dotviewer/test/test_interactive.py --- a/dotviewer/test/test_interactive.py +++ b/dotviewer/test/test_interactive.py @@ -34,6 +34,23 @@ } ''' +SOURCE2=r'''digraph f { + a; d; e; f; g; h; i; j; k; l; + a -> d [penwidth=1, style="setlinewidth(1)"]; + d -> e [penwidth=2, style="setlinewidth(2)"]; + e -> f [penwidth=4, style="setlinewidth(4)"]; + f -> g [penwidth=8, style="setlinewidth(8)"]; + g -> h [penwidth=16, style="setlinewidth(16)"]; + h -> i [penwidth=32, style="setlinewidth(32)"]; + i -> j [penwidth=64, style="setlinewidth(64)"]; + j -> k [penwidth=128, style="setlinewidth(128)"]; + k -> l [penwidth=256, style="setlinewidth(256)"]; +}''' + + + + + def setup_module(mod): if not option.pygame: py.test.skip("--pygame not enabled") @@ -161,3 +178,10 @@ page = MyPage(str(dotfile)) page.fixedfont = True graphclient.display_page(page) + +def test_linewidth(): + udir.join("graph2.dot").write(SOURCE2) + from dotviewer import graphpage, graphclient + dotfile = udir.join('graph2.dot') + page = graphpage.DotFileGraphPage(str(dotfile)) + graphclient.display_page(page) diff --git a/lib-python/2.7/socket.py b/lib-python/2.7/socket.py --- a/lib-python/2.7/socket.py +++ b/lib-python/2.7/socket.py @@ -165,6 +165,8 @@ # All _delegate_methods must also be initialized here. send = recv = recv_into = sendto = recvfrom = recvfrom_into = _dummy __getattr__ = _dummy + def _drop(self): + pass # Wrapper around platform socket objects. This implements # a platform-independent dup() functionality. The @@ -179,12 +181,21 @@ def __init__(self, family=AF_INET, type=SOCK_STREAM, proto=0, _sock=None): if _sock is None: _sock = _realsocket(family, type, proto) - elif _type(_sock) is _realsocket: + else: + # PyPy note about refcounting: implemented with _reuse()/_drop() + # on the class '_socket.socket'. Python 3 did it differently + # with a reference counter on this class 'socket._socketobject' + # instead, but it is a less compatible change. + + # Note that a few libraries (like eventlet) poke at the + # private implementation of socket.py, passing custom + # objects to _socketobject(). These libraries need the + # following fix for use on PyPy: the custom objects need + # methods _reuse() and _drop() that maintains an explicit + # reference counter, starting at 0. When it drops back to + # zero, close() must be called. _sock._reuse() - # PyPy note about refcounting: implemented with _reuse()/_drop() - # on the class '_socket.socket'. Python 3 did it differently - # with a reference counter on this class 'socket._socketobject' - # instead, but it is a less compatible change (breaks eventlet). + self._sock = _sock def send(self, data, flags=0): @@ -216,9 +227,8 @@ def close(self): s = self._sock - if type(s) is _realsocket: - s._drop() self._sock = _closedsocket() + s._drop() close.__doc__ = _realsocket.close.__doc__ def accept(self): @@ -280,8 +290,14 @@ "_close"] def __init__(self, sock, mode='rb', bufsize=-1, close=False): - if type(sock) is _realsocket: - sock._reuse() + # Note that a few libraries (like eventlet) poke at the + # private implementation of socket.py, passing custom + # objects to _fileobject(). These libraries need the + # following fix for use on PyPy: the custom objects need + # methods _reuse() and _drop() that maintains an explicit + # reference counter, starting at 0. When it drops back to + # zero, close() must be called. + sock._reuse() self._sock = sock self.mode = mode # Not actually used in this version if bufsize < 0: @@ -317,11 +333,11 @@ self.flush() finally: s = self._sock - if type(s) is _realsocket: + self._sock = None + if s is not None: s._drop() - if self._close: - self._sock.close() - self._sock = None + if self._close: + s.close() def __del__(self): try: diff --git a/lib-python/2.7/ssl.py b/lib-python/2.7/ssl.py --- a/lib-python/2.7/ssl.py +++ b/lib-python/2.7/ssl.py @@ -110,6 +110,12 @@ suppress_ragged_eofs=True, ciphers=None): socket.__init__(self, _sock=sock._sock) + # "close" the original socket: it is not usable any more. + # this only calls _drop(), which should not actually call + # the operating system's close() because the reference + # counter is greater than 1 (we hold one too). + sock.close() + if ciphers is None and ssl_version != _SSLv2_IF_EXISTS: ciphers = _DEFAULT_CIPHERS @@ -352,11 +358,19 @@ works with the SSL connection. Just use the code from the socket module.""" - self._makefile_refs += 1 # close=True so as to decrement the reference count when done with # the file-like object. return _fileobject(self, mode, bufsize, close=True) + def _reuse(self): + self._makefile_refs += 1 + + def _drop(self): + if self._makefile_refs < 1: + self.close() + else: + self._makefile_refs -= 1 + def wrap_socket(sock, keyfile=None, certfile=None, diff --git a/lib-python/2.7/test/test_socket.py b/lib-python/2.7/test/test_socket.py --- a/lib-python/2.7/test/test_socket.py +++ b/lib-python/2.7/test/test_socket.py @@ -1066,6 +1066,9 @@ def recv(self, size): return self._recv_step.next()() + def _reuse(self): pass + def _drop(self): pass + @staticmethod def _raise_eintr(): raise socket.error(errno.EINTR) @@ -1321,7 +1324,8 @@ closed = False def flush(self): pass def close(self): self.closed = True - def _decref_socketios(self): pass + def _reuse(self): pass + def _drop(self): pass # must not close unless we request it: the original use of _fileobject # by module socket requires that the underlying socket not be closed until diff --git a/lib-python/2.7/test/test_urllib2.py b/lib-python/2.7/test/test_urllib2.py --- a/lib-python/2.7/test/test_urllib2.py +++ b/lib-python/2.7/test/test_urllib2.py @@ -270,6 +270,8 @@ self.reason = reason def read(self): return '' + def _reuse(self): pass + def _drop(self): pass class MockHTTPClass: def __init__(self): diff --git a/lib-python/2.7/urllib2.py b/lib-python/2.7/urllib2.py --- a/lib-python/2.7/urllib2.py +++ b/lib-python/2.7/urllib2.py @@ -1193,6 +1193,8 @@ # out of socket._fileobject() and into a base class. r.recv = r.read + r._reuse = lambda: None + r._drop = lambda: None fp = socket._fileobject(r, close=True) resp = addinfourl(fp, r.msg, req.get_full_url()) diff --git a/lib_pypy/cffi.egg-info b/lib_pypy/cffi.egg-info --- a/lib_pypy/cffi.egg-info +++ b/lib_pypy/cffi.egg-info @@ -1,6 +1,6 @@ Metadata-Version: 1.0 Name: cffi -Version: 0.6 +Version: 0.7 Summary: Foreign Function Interface for Python calling C code. Home-page: http://cffi.readthedocs.org Author: Armin Rigo, Maciej Fijalkowski diff --git a/lib_pypy/readline.egg-info b/lib_pypy/readline.egg-info new file mode 100644 --- /dev/null +++ b/lib_pypy/readline.egg-info @@ -0,0 +1,10 @@ +Metadata-Version: 1.0 +Name: readline +Version: 6.2.4.1 +Summary: Hack to make "pip install readline" happy and do nothing +Home-page: UNKNOWN +Author: UNKNOWN +Author-email: UNKNOWN +License: UNKNOWN +Description: UNKNOWN +Platform: UNKNOWN diff --git a/pypy/doc/_ref.txt b/pypy/doc/_ref.txt --- a/pypy/doc/_ref.txt +++ b/pypy/doc/_ref.txt @@ -98,8 +98,6 @@ .. _`rpython/rtyper/memory/gc/hybrid.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/memory/gc/hybrid.py .. _`rpython/rtyper/memory/gc/minimarkpage.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/memory/gc/minimarkpage.py .. _`rpython/rtyper/memory/gc/semispace.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/memory/gc/semispace.py -.. _`rpython/rtyper/ootypesystem/`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/ootypesystem/ -.. _`rpython/rtyper/ootypesystem/ootype.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/ootypesystem/ootype.py .. _`rpython/rtyper/rint.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/rint.py .. _`rpython/rtyper/rlist.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/rlist.py .. _`rpython/rtyper/rmodel.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/rmodel.py diff --git a/pypy/doc/cppyy.rst b/pypy/doc/cppyy.rst --- a/pypy/doc/cppyy.rst +++ b/pypy/doc/cppyy.rst @@ -83,7 +83,7 @@ the selection of scientific software) will also work for a build with the builtin backend. -.. _`download`: http://cern.ch/wlav/reflex-2013-04-23.tar.bz2 +.. _`download`: http://cern.ch/wlav/reflex-2013-08-14.tar.bz2 .. _`ROOT`: http://root.cern.ch/ Besides Reflex, you probably need a version of `gccxml`_ installed, which is @@ -98,8 +98,8 @@ To install the standalone version of Reflex, after download:: - $ tar jxf reflex-2013-04-23.tar.bz2 - $ cd reflex-2013-04-23 + $ tar jxf reflex-2013-08-14.tar.bz2 + $ cd reflex-2013-08-14 $ ./build/autogen $ ./configure $ make && make install diff --git a/pypy/doc/jit/index.rst b/pypy/doc/jit/index.rst --- a/pypy/doc/jit/index.rst +++ b/pypy/doc/jit/index.rst @@ -23,7 +23,10 @@ - Hooks_ debugging facilities available to a python programmer +- Virtualizable_ how virtualizables work and what they are (in other words how + to make frames more efficient). .. _Overview: overview.html .. _Notes: pyjitpl5.html .. _Hooks: ../jit-hooks.html +.. _Virtualizable: virtualizable.html diff --git a/pypy/doc/jit/virtualizable.rst b/pypy/doc/jit/virtualizable.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/jit/virtualizable.rst @@ -0,0 +1,60 @@ + +Virtualizables +============== + +**Note:** this document does not have a proper introduction as to how +to understand the basics. We should write some. If you happen to be here +and you're missing context, feel free to pester us on IRC. + +Problem description +------------------- + +The JIT is very good at making sure some objects are never allocated if they +don't escape from the trace. Such objects are called ``virtuals``. However, +if we're dealing with frames, virtuals are often not good enough. Frames +can escape and they can also be allocated already at the moment we enter the +JIT. In such cases we need some extra object that can still be optimized away, +despite existing on the heap. + +Solution +-------- + +We introduce virtualizables. They're objects that exist on the heap, but their +fields are not always in sync with whatever happens in the assembler. One +example is that virtualizable fields can store virtual objects without +forcing them. This is very useful for frames. Declaring an object to be +virtualizable works like this: + + class Frame(object): + _virtualizable_ = ['locals[*]', 'stackdepth'] + +And we use them in ``JitDriver`` like this:: + + jitdriver = JitDriver(greens=[], reds=['frame'], virtualizables=['frame']) + +This declaration means that ``stackdepth`` is a virtualizable **field**, while +``locals`` is a virtualizable **array** (a list stored on a virtualizable). +There are various rules about using virtualizables, especially using +virtualizable arrays that can be very confusing. Those will usually end +up with a compile-time error (as opposed to strange behavior). The rules are: + +* Each array access must be with a known positive index that cannot raise + an ``IndexError``. Using ``no = jit.hint(no, promote=True)`` might be useful + to get a constant-number access. This is only safe if the index is actually + constant or changing rarely within the context of the user's code. + +* If you initialize a new virtualizable in the JIT, it has to be done like this + (for example if we're in ``Frame.__init__``):: + + self = hint(self, access_directly=True, fresh_virtualizable=True) + + that way you can populate the fields directly. + +* If you use virtualizable outside of the JIT – it's very expensive and + sometimes aborts tracing. Consider it carefully as to how do it only for + debugging purposes and not every time (e.g. ``sys._getframe`` call). + +* If you have something equivalent of a Python generator, where the + virtualizable survives for longer, you want to force it before returning. + It's better to do it that way than by an external call some time later. + It's done using ``jit.hint(frame, force_virtualizable=True)`` diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -62,3 +62,15 @@ No longer delegate numpy string_ methods to space.StringObject, in numpy this works by kind of by accident. Support for merging the refactor-str-types branch + +.. branch: kill-typesystem +Remove the "type system" abstraction, now that there is only ever one kind of +type system used. + +.. branch: kill-gen-store-back-in +Kills gen_store_back_in_virtualizable - should improve non-inlined calls by +a bit + +.. branch: dotviewer-linewidth +.. branch: reflex-support +.. branch: numpypy-inplace-op diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py --- a/pypy/interpreter/pyframe.py +++ b/pypy/interpreter/pyframe.py @@ -123,8 +123,8 @@ # CO_OPTIMIZED: no locals dict needed at all # NB: this method is overridden in nestedscope.py flags = code.co_flags - if flags & pycode.CO_OPTIMIZED: - return + if flags & pycode.CO_OPTIMIZED: + return if flags & pycode.CO_NEWLOCALS: self.w_locals = self.space.newdict(module=True) else: @@ -176,9 +176,10 @@ executioncontext.return_trace(self, self.space.w_None) raise executioncontext.return_trace(self, w_exitvalue) - # clean up the exception, might be useful for not - # allocating exception objects in some cases - self.last_exception = None + # it used to say self.last_exception = None + # this is now done by the code in pypyjit module + # since we don't want to invalidate the virtualizable + # for no good reason got_exception = False finally: executioncontext.leave(self, w_exitvalue, got_exception) @@ -260,7 +261,7 @@ break w_value = self.peekvalue(delta) self.pushvalue(w_value) - + def peekvalue(self, index_from_top=0): # NOTE: top of the stack is peekvalue(0). # Contrast this with CPython where it's PEEK(-1). @@ -330,7 +331,7 @@ nlocals = self.pycode.co_nlocals values_w = self.locals_stack_w[nlocals:self.valuestackdepth] w_valuestack = maker.slp_into_tuple_with_nulls(space, values_w) - + w_blockstack = nt([block._get_state_(space) for block in self.get_blocklist()]) w_fastlocals = maker.slp_into_tuple_with_nulls( space, self.locals_stack_w[:nlocals]) @@ -340,7 +341,7 @@ else: w_exc_value = self.last_exception.get_w_value(space) w_tb = w(self.last_exception.get_traceback()) - + tup_state = [ w(self.f_backref()), w(self.get_builtin()), @@ -355,7 +356,7 @@ w(f_lineno), w_fastlocals, space.w_None, #XXX placeholder for f_locals - + #f_restricted requires no additional data! space.w_None, ## self.w_f_trace, ignore for now @@ -389,7 +390,7 @@ ncellvars = len(pycode.co_cellvars) cellvars = cells[:ncellvars] closure = cells[ncellvars:] - + # do not use the instance's __init__ but the base's, because we set # everything like cells from here # XXX hack @@ -476,7 +477,7 @@ ### line numbers ### - def fget_f_lineno(self, space): + def fget_f_lineno(self, space): "Returns the line number of the instruction currently being executed." if self.w_f_trace is None: return space.wrap(self.get_last_lineno()) @@ -490,7 +491,7 @@ except OperationError, e: raise OperationError(space.w_ValueError, space.wrap("lineno must be an integer")) - + if self.w_f_trace is None: raise OperationError(space.w_ValueError, space.wrap("f_lineno can only be set by a trace function.")) @@ -522,7 +523,7 @@ if ord(code[new_lasti]) in (DUP_TOP, POP_TOP): raise OperationError(space.w_ValueError, space.wrap("can't jump to 'except' line as there's no exception")) - + # Don't jump into or out of a finally block. f_lasti_setup_addr = -1 new_lasti_setup_addr = -1 @@ -553,12 +554,12 @@ if addr == self.last_instr: f_lasti_setup_addr = setup_addr break - + if op >= HAVE_ARGUMENT: addr += 3 else: addr += 1 - + assert len(blockstack) == 0 if new_lasti_setup_addr != f_lasti_setup_addr: @@ -609,10 +610,10 @@ block = self.pop_block() block.cleanup(self) f_iblock -= 1 - + self.f_lineno = new_lineno self.last_instr = new_lasti - + def get_last_lineno(self): "Returns the line number of the instruction currently being executed." return pytraceback.offset2lineno(self.pycode, self.last_instr) @@ -638,8 +639,8 @@ self.f_lineno = self.get_last_lineno() space.frame_trace_action.fire() - def fdel_f_trace(self, space): - self.w_f_trace = None + def fdel_f_trace(self, space): + self.w_f_trace = None def fget_f_exc_type(self, space): if self.last_exception is not None: @@ -649,7 +650,7 @@ if f is not None: return f.last_exception.w_type return space.w_None - + def fget_f_exc_value(self, space): if self.last_exception is not None: f = self.f_backref() @@ -667,7 +668,7 @@ if f is not None: return space.wrap(f.last_exception.get_traceback()) return space.w_None - + def fget_f_restricted(self, space): if space.config.objspace.honor__builtins__: return space.wrap(self.builtin is not space.builtin) diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py --- a/pypy/interpreter/typedef.py +++ b/pypy/interpreter/typedef.py @@ -329,10 +329,6 @@ instance=True) base_user_setup(self, space, w_subtype) - def setclass(self, space, w_subtype): - # only used by descr_set___class__ - self.w__class__ = w_subtype - add(Proto) subcls = type(name, (supercls,), body) diff --git a/pypy/module/__pypy__/test/test_signal.py b/pypy/module/__pypy__/test/test_signal.py --- a/pypy/module/__pypy__/test/test_signal.py +++ b/pypy/module/__pypy__/test/test_signal.py @@ -28,15 +28,17 @@ import __pypy__, thread, signal, time, sys def subthread(): + print('subthread started') try: with __pypy__.thread.signals_enabled: thread.interrupt_main() for i in range(10): - print 'x' + print('x') time.sleep(0.1) except BaseException, e: interrupted.append(e) finally: + print('subthread stops, interrupted=%r' % (interrupted,)) done.append(None) # This is normally called by app_main.py @@ -52,11 +54,13 @@ try: done = [] interrupted = [] + print('--- start ---') thread.start_new_thread(subthread, ()) for j in range(10): if len(done): break - print '.' + print('.') time.sleep(0.1) + print('main thread loop done') assert len(done) == 1 assert len(interrupted) == 1 assert 'KeyboardInterrupt' in interrupted[0].__class__.__name__ @@ -77,7 +81,7 @@ def threadfunction(): pid = fork() if pid == 0: - print 'in child' + print('in child') # signal() only works from the 'main' thread signal.signal(signal.SIGUSR1, signal.SIG_IGN) os._exit(42) diff --git a/pypy/module/_file/test/test_file.py b/pypy/module/_file/test/test_file.py --- a/pypy/module/_file/test/test_file.py +++ b/pypy/module/_file/test/test_file.py @@ -385,6 +385,24 @@ raise Exception("time out") print 'Passed.' + def test_seek_from_cur_backwards_off_end(self): + import os + + f = self.file(self.temppath, "w+b") + f.write('123456789x12345678><123456789\n') + + f.seek(0, os.SEEK_END) + f.seek(-25, os.SEEK_CUR) + f.read(25) + f.seek(-25, os.SEEK_CUR) + try: + f.seek(-25, os.SEEK_CUR) + except IOError: + pass + else: + raise AssertionError("Didn't raise IOError") + assert f.tell() == 5 + class AppTestFile25: spaceconfig = dict(usemodules=("_file",)) diff --git a/pypy/module/array/interp_array.py b/pypy/module/array/interp_array.py --- a/pypy/module/array/interp_array.py +++ b/pypy/module/array/interp_array.py @@ -56,7 +56,7 @@ def descr_typecode(space, self): return space.wrap(self.typecode) -arr_eq_driver = jit.JitDriver(greens = ['comp_func'], reds = 'auto') +arr_eq_driver = jit.JitDriver(name='array_eq_driver', greens = ['comp_func'], reds = 'auto') EQ, NE, LT, LE, GT, GE = range(6) def compare_arrays(space, arr1, arr2, comp_op): diff --git a/pypy/module/binascii/interp_uu.py b/pypy/module/binascii/interp_uu.py --- a/pypy/module/binascii/interp_uu.py +++ b/pypy/module/binascii/interp_uu.py @@ -61,7 +61,7 @@ return ord(bin[i]) except IndexError: return 0 -_a2b_read._always_inline_ = True +_b2a_read._always_inline_ = True @unwrap_spec(bin='bufferstr') def b2a_uu(space, bin): diff --git a/pypy/module/cppyy/genreflex-methptrgetter.patch b/pypy/module/cppyy/genreflex-methptrgetter.patch --- a/pypy/module/cppyy/genreflex-methptrgetter.patch +++ b/pypy/module/cppyy/genreflex-methptrgetter.patch @@ -10,7 +10,7 @@ # The next is to avoid a known problem with gccxml that it generates a # references to id equal '_0' which is not defined anywhere self.xref['_0'] = {'elem':'Unknown', 'attrs':{'id':'_0','name':''}, 'subelems':[]} -@@ -1306,6 +1307,8 @@ +@@ -1328,6 +1329,8 @@ bases = self.getBases( attrs['id'] ) if inner and attrs.has_key('demangled') and self.isUnnamedType(attrs['demangled']) : cls = attrs['demangled'] @@ -19,7 +19,7 @@ clt = '' else: cls = self.genTypeName(attrs['id'],const=True,colon=True) -@@ -1343,7 +1346,7 @@ +@@ -1365,7 +1368,7 @@ # Inner class/struct/union/enum. for m in memList : member = self.xref[m] @@ -28,7 +28,7 @@ and member['attrs'].get('access') in ('private','protected') \ and not self.isUnnamedType(member['attrs'].get('demangled')): cmem = self.genTypeName(member['attrs']['id'],const=True,colon=True) -@@ -1981,8 +1984,15 @@ +@@ -2003,8 +2006,15 @@ else : params = '0' s = ' .AddFunctionMember(%s, Reflex::Literal("%s"), %s%s, 0, %s, %s)' % (self.genTypeID(id), name, type, id, params, mod) s += self.genCommentProperty(attrs) @@ -44,7 +44,7 @@ def genMCODef(self, type, name, attrs, args): id = attrs['id'] cl = self.genTypeName(attrs['context'],colon=True) -@@ -2049,8 +2059,44 @@ +@@ -2071,8 +2081,44 @@ if returns == 'void' : body += ' }\n' else : body += ' }\n' body += '}\n' @@ -105,17 +105,16 @@ -h, --help Print this help\n """ -@@ -127,7 +131,8 @@ - opts, args = getopt.getopt(options, 'ho:s:c:I:U:D:PC', \ +@@ -128,7 +132,7 @@ ['help','debug=', 'output=','selection_file=','pool','dataonly','interpreteronly','deep','gccxmlpath=', 'capabilities=','rootmap=','rootmap-lib=','comments','iocomments','no_membertypedefs', -- 'fail_on_warnings', 'quiet', 'gccxmlopt=', 'reflex', 'split=','no_templatetypedefs','gccxmlpost=']) -+ 'fail_on_warnings', 'quiet', 'gccxmlopt=', 'reflex', 'split=','no_templatetypedefs','gccxmlpost=', -+ 'with-methptrgetter']) + 'fail_on_warnings', 'quiet', 'gccxmlopt=', 'reflex', 'split=','no_templatetypedefs','gccxmlpost=', +- 'library=']) ++ 'library=', 'with-methptrgetter']) except getopt.GetoptError, e: print "--->> genreflex: ERROR:",e self.usage(2) -@@ -186,6 +191,8 @@ +@@ -187,6 +191,8 @@ self.rootmap = a if o in ('--rootmap-lib',): self.rootmaplib = a diff --git a/pypy/module/cpyext/number.py b/pypy/module/cpyext/number.py --- a/pypy/module/cpyext/number.py +++ b/pypy/module/cpyext/number.py @@ -41,13 +41,13 @@ def PyNumber_Int(space, w_obj): """Returns the o converted to an integer object on success, or NULL on failure. This is the equivalent of the Python expression int(o).""" - return space.int(w_obj) + return space.call_function(space.w_int, w_obj) @cpython_api([PyObject], PyObject) def PyNumber_Long(space, w_obj): """Returns the o converted to a long integer object on success, or NULL on failure. This is the equivalent of the Python expression long(o).""" - return space.long(w_obj) + return space.call_function(space.w_long, w_obj) @cpython_api([PyObject], PyObject) def PyNumber_Index(space, w_obj): diff --git a/pypy/module/cpyext/pystate.py b/pypy/module/cpyext/pystate.py --- a/pypy/module/cpyext/pystate.py +++ b/pypy/module/cpyext/pystate.py @@ -3,7 +3,6 @@ from pypy.module.cpyext.pyobject import PyObject, Py_DecRef, make_ref, from_ref from rpython.rtyper.lltypesystem import rffi, lltype from rpython.rlib import rthread -from pypy.module.thread import os_thread PyInterpreterStateStruct = lltype.ForwardReference() PyInterpreterState = lltype.Ptr(PyInterpreterStateStruct) @@ -17,6 +16,9 @@ ('dict', PyObject), ])) +class NoThreads(Exception): + pass + @cpython_api([], PyThreadState, error=CANNOT_FAIL) def PyEval_SaveThread(space): """Release the global interpreter lock (if it has been created and thread @@ -45,10 +47,15 @@ @cpython_api([], lltype.Void) def PyEval_InitThreads(space): + if not space.config.translation.thread: + raise NoThreads + from pypy.module.thread import os_thread os_thread.setup_threads(space) @cpython_api([], rffi.INT_real, error=CANNOT_FAIL) def PyEval_ThreadsInitialized(space): + if not space.config.translation.thread: + return 0 return 1 # XXX: might be generally useful @@ -232,6 +239,8 @@ """Create a new thread state object belonging to the given interpreter object. The global interpreter lock need not be held, but may be held if it is necessary to serialize calls to this function.""" + if not space.config.translation.thread: + raise NoThreads rthread.gc_thread_prepare() # PyThreadState_Get will allocate a new execution context, # we need to protect gc and other globals with the GIL. @@ -246,6 +255,8 @@ def PyThreadState_Clear(space, tstate): """Reset all information in a thread state object. The global interpreter lock must be held.""" + if not space.config.translation.thread: + raise NoThreads Py_DecRef(space, tstate.c_dict) tstate.c_dict = lltype.nullptr(PyObject.TO) space.threadlocals.leave_thread(space) diff --git a/pypy/module/cpyext/test/test_number.py b/pypy/module/cpyext/test/test_number.py --- a/pypy/module/cpyext/test/test_number.py +++ b/pypy/module/cpyext/test/test_number.py @@ -19,6 +19,8 @@ def test_number_long(self, space, api): w_l = api.PyNumber_Long(space.wrap(123)) assert api.PyLong_CheckExact(w_l) + w_l = api.PyNumber_Long(space.wrap("123")) + assert api.PyLong_CheckExact(w_l) def test_number_int(self, space, api): w_l = api.PyNumber_Int(space.wraplong(123L)) @@ -27,6 +29,8 @@ assert api.PyLong_CheckExact(w_l) w_l = api.PyNumber_Int(space.wrap(42.3)) assert api.PyInt_CheckExact(w_l) + w_l = api.PyNumber_Int(space.wrap("42")) + assert api.PyInt_CheckExact(w_l) def test_number_index(self, space, api): w_l = api.PyNumber_Index(space.wraplong(123L)) diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py --- a/pypy/module/micronumpy/__init__.py +++ b/pypy/module/micronumpy/__init__.py @@ -140,6 +140,7 @@ ("deg2rad", "radians"), ("rad2deg", "degrees"), ("reciprocal", "reciprocal"), + ("rint", "rint"), ("sign", "sign"), ("signbit", "signbit"), ("sin", "sin"), @@ -175,6 +176,8 @@ ('logaddexp2', 'logaddexp2'), ('real', 'real'), ('imag', 'imag'), + ('ones_like', 'ones_like'), + ('zeros_like', 'zeros_like'), ]: interpleveldefs[exposed] = "interp_ufuncs.get(space).%s" % impl diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -737,6 +737,27 @@ descr_gt = _binop_comp_impl(_binop_impl("greater")) descr_ge = _binop_comp_impl(_binop_impl("greater_equal")) + def _binop_inplace_impl(ufunc_name): + def impl(self, space, w_other): + w_out = self + ufunc = getattr(interp_ufuncs.get(space), ufunc_name) + return ufunc.call(space, [self, w_other, w_out]) + return func_with_new_name(impl, "binop_inplace_%s_impl" % ufunc_name) + + descr_iadd = _binop_inplace_impl("add") + descr_isub = _binop_inplace_impl("subtract") + descr_imul = _binop_inplace_impl("multiply") + descr_idiv = _binop_inplace_impl("divide") + descr_itruediv = _binop_inplace_impl("true_divide") + descr_ifloordiv = _binop_inplace_impl("floor_divide") + descr_imod = _binop_inplace_impl("mod") + descr_ipow = _binop_inplace_impl("power") + descr_ilshift = _binop_inplace_impl("left_shift") + descr_irshift = _binop_inplace_impl("right_shift") + descr_iand = _binop_inplace_impl("bitwise_and") + descr_ior = _binop_inplace_impl("bitwise_or") + descr_ixor = _binop_inplace_impl("bitwise_xor") + def _binop_right_impl(ufunc_name): def impl(self, space, w_other, w_out=None): w_other = convert_to_array(space, w_other) @@ -1007,6 +1028,20 @@ __ror__ = interp2app(W_NDimArray.descr_ror), __rxor__ = interp2app(W_NDimArray.descr_rxor), + __iadd__ = interp2app(W_NDimArray.descr_iadd), + __isub__ = interp2app(W_NDimArray.descr_isub), + __imul__ = interp2app(W_NDimArray.descr_imul), + __idiv__ = interp2app(W_NDimArray.descr_idiv), + __itruediv__ = interp2app(W_NDimArray.descr_itruediv), + __ifloordiv__ = interp2app(W_NDimArray.descr_ifloordiv), + __imod__ = interp2app(W_NDimArray.descr_imod), + __ipow__ = interp2app(W_NDimArray.descr_ipow), + __ilshift__ = interp2app(W_NDimArray.descr_ilshift), + __irshift__ = interp2app(W_NDimArray.descr_irshift), + __iand__ = interp2app(W_NDimArray.descr_iand), + __ior__ = interp2app(W_NDimArray.descr_ior), + __ixor__ = interp2app(W_NDimArray.descr_ixor), + __eq__ = interp2app(W_NDimArray.descr_eq), __ne__ = interp2app(W_NDimArray.descr_ne), __lt__ = interp2app(W_NDimArray.descr_lt), diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -620,6 +620,7 @@ ("positive", "pos", 1), ("negative", "neg", 1), ("absolute", "abs", 1, {"complex_to_float": True}), + ("rint", "rint", 1), ("sign", "sign", 1, {"promote_bools": True}), ("signbit", "signbit", 1, {"bool_result": True, "allow_complex": False}), @@ -675,6 +676,8 @@ "allow_complex": False}), ("logaddexp2", "logaddexp2", 2, {"promote_to_float": True, "allow_complex": False}), + ("ones_like", "ones_like", 1), + ("zeros_like", "zeros_like", 1), ]: self.add_ufunc(space, *ufunc_def) diff --git a/pypy/module/micronumpy/loop.py b/pypy/module/micronumpy/loop.py --- a/pypy/module/micronumpy/loop.py +++ b/pypy/module/micronumpy/loop.py @@ -155,7 +155,8 @@ obj_iter.next() return cur_value -reduce_cum_driver = jit.JitDriver(greens = ['shapelen', 'func', 'dtype'], +reduce_cum_driver = jit.JitDriver(name='numpy_reduce_cum_driver', + greens = ['shapelen', 'func', 'dtype'], reds = 'auto') def compute_reduce_cumultative(obj, out, calc_dtype, func, identity): @@ -214,8 +215,7 @@ axis_reduce__driver = jit.JitDriver(name='numpy_axis_reduce', greens=['shapelen', - 'func', 'dtype', - 'identity'], + 'func', 'dtype'], reds='auto') def do_axis_reduce(shape, func, arr, dtype, axis, out, identity, cumultative, @@ -231,8 +231,7 @@ shapelen = len(shape) while not out_iter.done(): axis_reduce__driver.jit_merge_point(shapelen=shapelen, func=func, - dtype=dtype, identity=identity, - ) + dtype=dtype) w_val = arr_iter.getitem().convert_to(dtype) if out_iter.first_line: if identity is not None: @@ -529,8 +528,9 @@ val_arr.descr_getitem(space, w_idx)) iter.next() -byteswap_driver = jit.JitDriver(greens = ['dtype'], - reds = 'auto') +byteswap_driver = jit.JitDriver(name='numpy_byteswap_driver', + greens = ['dtype'], + reds = 'auto') def byteswap(from_, to): dtype = from_.dtype @@ -542,8 +542,9 @@ to_iter.next() from_iter.next() -choose_driver = jit.JitDriver(greens = ['shapelen', 'mode', 'dtype'], - reds = 'auto') +choose_driver = jit.JitDriver(name='numpy_choose_driver', + greens = ['shapelen', 'mode', 'dtype'], + reds = 'auto') def choose(space, arr, choices, shape, dtype, out, mode): shapelen = len(shape) @@ -572,8 +573,9 @@ out_iter.next() arr_iter.next() -clip_driver = jit.JitDriver(greens = ['shapelen', 'dtype'], - reds = 'auto') +clip_driver = jit.JitDriver(name='numpy_clip_driver', + greens = ['shapelen', 'dtype'], + reds = 'auto') def clip(space, arr, shape, min, max, out): arr_iter = arr.create_iter(shape) @@ -597,8 +599,9 @@ out_iter.next() min_iter.next() -round_driver = jit.JitDriver(greens = ['shapelen', 'dtype'], - reds = 'auto') +round_driver = jit.JitDriver(name='numpy_round_driver', + greens = ['shapelen', 'dtype'], + reds = 'auto') def round(space, arr, dtype, shape, decimals, out): arr_iter = arr.create_iter(shape) @@ -612,7 +615,8 @@ arr_iter.next() out_iter.next() -diagonal_simple_driver = jit.JitDriver(greens = ['axis1', 'axis2'], +diagonal_simple_driver = jit.JitDriver(name='numpy_diagonal_simple_driver', + greens = ['axis1', 'axis2'], reds = 'auto') def diagonal_simple(space, arr, out, offset, axis1, axis2, size): diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -789,6 +789,49 @@ r = [1, 2] + array([1, 2]) assert (r == [2, 4]).all() + def test_inline_op_scalar(self): + from numpypy import array + for op in [ + '__iadd__', + '__isub__', + '__imul__', + '__idiv__', + '__ifloordiv__', + '__imod__', + '__ipow__', + '__ilshift__', + '__irshift__', + '__iand__', + '__ior__', + '__ixor__']: + a = b = array(range(3)) + getattr(a, op).__call__(2) + assert id(a) == id(b) + + def test_inline_op_array(self): + from numpypy import array + for op in [ + '__iadd__', + '__isub__', + '__imul__', + '__idiv__', + '__ifloordiv__', + '__imod__', + '__ipow__', + '__ilshift__', + '__irshift__', + '__iand__', + '__ior__', + '__ixor__']: + a = b = array(range(5)) + c = array(range(5)) + d = array(5 * [2]) + getattr(a, op).__call__(d) + assert id(a) == id(b) + reg_op = op.replace('__i', '__') + for i in range(5): + assert a[i] == getattr(c[i], reg_op).__call__(d[i]) + def test_add_list(self): from numpypy import array, ndarray a = array(range(5)) diff --git a/pypy/module/micronumpy/test/test_ufuncs.py b/pypy/module/micronumpy/test/test_ufuncs.py --- a/pypy/module/micronumpy/test/test_ufuncs.py +++ b/pypy/module/micronumpy/test/test_ufuncs.py @@ -255,6 +255,22 @@ for i in range(3): assert c[i] == a[i] * b[i] + def test_rint(self): + from numpypy import array, complex, rint, isnan + + nnan, nan, inf, ninf = float('-nan'), float('nan'), float('inf'), float('-inf') + + reference = array([ninf, -2., -1., -0., 0., 0., 0., 1., 2., inf]) + a = array([ninf, -1.5, -1., -0.5, -0., 0., 0.5, 1., 1.5, inf]) + b = rint(a) + for i in range(len(a)): + assert b[i] == reference[i] + assert isnan(rint(nan)) + assert isnan(rint(nnan)) + + assert rint(complex(inf, 1.5)) == complex(inf, 2.) + assert rint(complex(0.5, inf)) == complex(0., inf) + def test_sign(self): from numpypy import array, sign, dtype @@ -939,4 +955,18 @@ assert logaddexp2(float('inf'), float('-inf')) == float('inf') assert logaddexp2(float('inf'), float('inf')) == float('inf') + def test_ones_like(self): + from numpypy import array, ones_like + assert ones_like(False) == array(True) + assert ones_like(2) == array(1) + assert ones_like(2.) == array(1.) + assert ones_like(complex(2)) == array(complex(1)) + + def test_zeros_like(self): + from numpypy import array, zeros_like + + assert zeros_like(True) == array(False) + assert zeros_like(2) == array(0) + assert zeros_like(2.) == array(0.) + assert zeros_like(complex(2)) == array(complex(0)) diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -6,7 +6,7 @@ import py from rpython.jit.metainterp import pyjitpl from rpython.jit.metainterp.test.support import LLJitMixin -from rpython.jit.metainterp.warmspot import reset_stats +from rpython.jit.metainterp.warmspot import reset_stats, get_stats from pypy.module.micronumpy import interp_boxes from pypy.module.micronumpy.compile import FakeSpace, Parser, InterpreterState from pypy.module.micronumpy.base import W_NDimArray @@ -35,9 +35,10 @@ cls.code_mapping = d cls.codes = allcodes - def run(self, name): + def compile_graph(self): + if self.graph is not None: + return space = FakeSpace() - i = self.code_mapping[name] codes = self.codes def f(i): @@ -57,14 +58,18 @@ raise TypeError(w_res) if self.graph is None: - interp, graph = self.meta_interp(f, [i], + interp, graph = self.meta_interp(f, [0], listops=True, backendopt=True, graph_and_interp_only=True) self.__class__.interp = interp self.__class__.graph = graph + + def run(self, name): + self.compile_graph() reset_stats() pyjitpl._warmrunnerdesc.memory_manager.alive_loops.clear() + i = self.code_mapping[name] retval = self.interp.eval_graph(self.graph, [i]) py.test.skip("don't run for now") return retval @@ -134,6 +139,29 @@ 'int_add': 3, }) + def test_reduce_compile_only_once(self): + self.compile_graph() + reset_stats() + pyjitpl._warmrunnerdesc.memory_manager.alive_loops.clear() + i = self.code_mapping['sum'] + # run it twice + retval = self.interp.eval_graph(self.graph, [i]) + retval = self.interp.eval_graph(self.graph, [i]) + # check that we got only one loop + assert len(get_stats().loops) == 1 + + def test_reduce_axis_compile_only_once(self): + self.compile_graph() + reset_stats() + pyjitpl._warmrunnerdesc.memory_manager.alive_loops.clear() + i = self.code_mapping['axissum'] + # run it twice + retval = self.interp.eval_graph(self.graph, [i]) + retval = self.interp.eval_graph(self.graph, [i]) + # check that we got only one loop + assert len(get_stats().loops) == 1 + + def define_prod(): return """ a = |30| diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -307,6 +307,22 @@ def min(self, v1, v2): return min(v1, v2) + @simple_unary_op + def rint(self, v): + if isfinite(v): + return rfloat.round_double(v, 0, half_even=True) + else: + return v + + @simple_unary_op + def ones_like(self, v): + return 1 + + @simple_unary_op + def zeros_like(self, v): + return 0 + + class NonNativePrimitive(Primitive): _mixin_ = True @@ -1392,11 +1408,14 @@ def round(self, v, decimals=0): ans = list(self.for_computation(self.unbox(v))) if isfinite(ans[0]): - ans[0] = rfloat.round_double(ans[0], decimals, half_even=True) + ans[0] = rfloat.round_double(ans[0], decimals, half_even=True) if isfinite(ans[1]): - ans[1] = rfloat.round_double(ans[1], decimals, half_even=True) + ans[1] = rfloat.round_double(ans[1], decimals, half_even=True) return self.box_complex(ans[0], ans[1]) + def rint(self, v): + return self.round(v) + # No floor, ceil, trunc in numpy for complex #@simple_unary_op #def floor(self, v): @@ -1599,6 +1618,15 @@ except ValueError: return rfloat.NAN, rfloat.NAN + @complex_unary_op + def ones_like(self, v): + return 1, 0 + + @complex_unary_op + def zeros_like(self, v): + return 0, 0 + + class Complex64(ComplexFloating, BaseType): _attrs_ = () diff --git a/pypy/module/posix/interp_posix.py b/pypy/module/posix/interp_posix.py --- a/pypy/module/posix/interp_posix.py +++ b/pypy/module/posix/interp_posix.py @@ -723,11 +723,16 @@ for hook in get_fork_hooks(where): hook(space) -def fork(space): +def _run_forking_function(space, kind): run_fork_hooks('before', space) - try: - pid = os.fork() + if kind == "F": + pid = os.fork() + master_fd = -1 + elif kind == "P": + pid, master_fd = os.forkpty() + else: + raise AssertionError except OSError, e: try: run_fork_hooks('parent', space) @@ -735,12 +740,14 @@ # Don't clobber the OSError if the fork failed pass raise wrap_oserror(space, e) - if pid == 0: run_fork_hooks('child', space) else: run_fork_hooks('parent', space) + return pid, master_fd +def fork(space): + pid, irrelevant = _run_forking_function(space, "F") return space.wrap(pid) def openpty(space): @@ -752,10 +759,7 @@ return space.newtuple([space.wrap(master_fd), space.wrap(slave_fd)]) def forkpty(space): - try: - pid, master_fd = os.forkpty() - except OSError, e: - raise wrap_oserror(space, e) + pid, master_fd = _run_forking_function(space, "P") return space.newtuple([space.wrap(pid), space.wrap(master_fd)]) diff --git a/pypy/module/pypyjit/interp_jit.py b/pypy/module/pypyjit/interp_jit.py --- a/pypy/module/pypyjit/interp_jit.py +++ b/pypy/module/pypyjit/interp_jit.py @@ -12,18 +12,18 @@ from pypy.interpreter.error import OperationError, operationerrfmt from pypy.interpreter.pycode import PyCode, CO_GENERATOR from pypy.interpreter.pyframe import PyFrame -from pypy.interpreter.pyopcode import ExitFrame +from pypy.interpreter.pyopcode import ExitFrame, Yield from opcode import opmap -PyFrame._virtualizable2_ = ['last_instr', 'pycode', - 'valuestackdepth', 'locals_stack_w[*]', - 'cells[*]', - 'last_exception', - 'lastblock', - 'is_being_profiled', - 'w_globals', - 'w_f_trace', - ] +PyFrame._virtualizable_ = ['last_instr', 'pycode', + 'valuestackdepth', 'locals_stack_w[*]', + 'cells[*]', + 'last_exception', + 'lastblock', + 'is_being_profiled', + 'w_globals', + 'w_f_trace', + ] JUMP_ABSOLUTE = opmap['JUMP_ABSOLUTE'] @@ -73,7 +73,13 @@ self.valuestackdepth = hint(self.valuestackdepth, promote=True) next_instr = self.handle_bytecode(co_code, next_instr, ec) is_being_profiled = self.is_being_profiled + except Yield: + self.last_exception = None + w_result = self.popvalue() + jit.hint(self, force_virtualizable=True) + return w_result except ExitFrame: + self.last_exception = None return self.popvalue() def jump_absolute(self, jumpto, ec): diff --git a/pypy/module/pypyjit/test_pypy_c/test_containers.py b/pypy/module/pypyjit/test_pypy_c/test_containers.py --- a/pypy/module/pypyjit/test_pypy_c/test_containers.py +++ b/pypy/module/pypyjit/test_pypy_c/test_containers.py @@ -87,7 +87,7 @@ i8 = int_lt(i5, i7) guard_true(i8, descr=...) guard_not_invalidated(descr=...) - p10 = call(ConstClass(ll_int_str), i5, descr=) + p10 = call(ConstClass(ll_str__IntegerR_SignedConst_Signed), i5, descr=) guard_no_exception(descr=...) i12 = call(ConstClass(ll_strhash), p10, descr=) p13 = new(descr=...) diff --git a/pypy/module/pypyjit/test_pypy_c/test_generators.py b/pypy/module/pypyjit/test_pypy_c/test_generators.py --- a/pypy/module/pypyjit/test_pypy_c/test_generators.py +++ b/pypy/module/pypyjit/test_pypy_c/test_generators.py @@ -19,6 +19,7 @@ log = self.run(main, [500]) loop, = log.loops_by_filename(self.filepath) assert loop.match_by_id("generator", """ + cond_call(..., descr=...) i16 = force_token() p45 = new_with_vtable(ConstClass(W_IntObject)) setfield_gc(p45, i29, descr=) diff --git a/pypy/module/test_lib_pypy/test_curses.py b/pypy/module/test_lib_pypy/test_curses.py --- a/pypy/module/test_lib_pypy/test_curses.py +++ b/pypy/module/test_lib_pypy/test_curses.py @@ -1,6 +1,15 @@ +import pytest + +# Check that lib_pypy.cffi finds the correct version of _cffi_backend. +# Otherwise, the test is skipped. It should never be skipped when run +# with "pypy py.test -A". +try: + from lib_pypy import cffi; cffi.FFI() +except (ImportError, AssertionError), e: + pytest.skip("no cffi module or wrong version (%s)" % (e,)) + from lib_pypy import _curses -import pytest lib = _curses.lib diff --git a/pypy/tool/jitlogparser/parser.py b/pypy/tool/jitlogparser/parser.py --- a/pypy/tool/jitlogparser/parser.py +++ b/pypy/tool/jitlogparser/parser.py @@ -85,7 +85,7 @@ continue e = elem.split("\t") adr = e[0] - v = " ".join(e[2:]) + v = elem # --- more compactly: " ".join(e[2:]) if not start: start = int(adr.strip(":"), 16) ofs = int(adr.strip(":"), 16) - start @@ -379,15 +379,16 @@ name = entry[:entry.find('(') - 1].lower() addr = int(m.group(1), 16) addrs.setdefault(addr, []).append(name) + from rpython.jit.backend.tool.viewcode import World + world = World() + for entry in extract_category(log, 'jit-backend-dump'): + world.parse(entry.splitlines(True), truncate_addr=False) dumps = {} - for entry in extract_category(log, 'jit-backend-dump'): - backend, _, dump, _ = entry.split("\n") - _, addr, _, data = re.split(" +", dump) - backend_name = backend.split(" ")[1] - addr = int(addr[1:], 16) - if addr in addrs and addrs[addr]: - name = addrs[addr].pop(0) # they should come in order - dumps[name] = (backend_name, addr, data) + for r in world.ranges: + if r.addr in addrs and addrs[r.addr]: + name = addrs[r.addr].pop(0) # they should come in order + data = r.data.encode('hex') # backward compatibility + dumps[name] = (world.backend_name, r.addr, data) loops = [] for entry in extract_category(log, 'jit-log-opt'): parser = ParserCls(entry, None, {}, 'lltype', None, diff --git a/pypy/tool/pypyjit_child.py b/pypy/tool/pypyjit_child.py --- a/pypy/tool/pypyjit_child.py +++ b/pypy/tool/pypyjit_child.py @@ -14,9 +14,9 @@ return lltype.nullptr(T) interp.heap.malloc_nonmovable = returns_null # XXX - from rpython.jit.backend.llgraph.runner import LLtypeCPU + from rpython.jit.backend.llgraph.runner import LLGraphCPU #LLtypeCPU.supports_floats = False # for now - apply_jit(interp, graph, LLtypeCPU) + apply_jit(interp, graph, LLGraphCPU) def apply_jit(interp, graph, CPUClass): diff --git a/pypy/tool/release/package.py b/pypy/tool/release/package.py --- a/pypy/tool/release/package.py +++ b/pypy/tool/release/package.py @@ -3,7 +3,7 @@ It uses 'pypy/goal/pypy-c' and parts of the rest of the working copy. Usage: - package.py root-pypy-dir [--nostrip] [--without-tk] [name-of-archive] [name-of-pypy-c] [destination-for-tarball] [pypy-c-path] + package.py [--nostrip] [--without-tk] root-pypy-dir [name-of-archive] [name-of-pypy-c] [destination-for-tarball] [pypy-c-path] Usually you would do: package.py ../../.. pypy-VER-PLATFORM The output is found in the directory /tmp/usession-YOURNAME/build/. diff --git a/rpython/annotator/description.py b/rpython/annotator/description.py --- a/rpython/annotator/description.py +++ b/rpython/annotator/description.py @@ -391,6 +391,7 @@ instance_level = False all_enforced_attrs = None # or a set settled = False + _detect_invalid_attrs = None def __init__(self, bookkeeper, pyobj=None, name=None, basedesc=None, classdict=None, @@ -714,6 +715,10 @@ # by changing the result's annotation (but not, of course, doing an # actual copy in the rtyper). Tested in rpython.rtyper.test.test_rlist, # test_immutable_list_out_of_instance. + if self._detect_invalid_attrs and attr in self._detect_invalid_attrs: + raise Exception("field %r was migrated to %r from a subclass in " + "which it was declared as _immutable_fields_" % + (attr, self.pyobj)) search1 = '%s[*]' % (attr,) search2 = '%s?[*]' % (attr,) cdesc = self @@ -724,6 +729,14 @@ s_result.listdef.never_resize() s_copy = s_result.listdef.offspring() s_copy.listdef.mark_as_immutable() + # + cdesc = cdesc.basedesc + while cdesc is not None: + if cdesc._detect_invalid_attrs is None: + cdesc._detect_invalid_attrs = set() + cdesc._detect_invalid_attrs.add(attr) + cdesc = cdesc.basedesc + # return s_copy cdesc = cdesc.basedesc return s_result # common case diff --git a/rpython/annotator/specialize.py b/rpython/annotator/specialize.py --- a/rpython/annotator/specialize.py +++ b/rpython/annotator/specialize.py @@ -379,4 +379,4 @@ def specialize_call_location(funcdesc, args_s, op): assert op is not None - return maybe_star_args(funcdesc, op, args_s) + return maybe_star_args(funcdesc, (op,), args_s) diff --git a/rpython/annotator/test/test_annrpython.py b/rpython/annotator/test/test_annrpython.py --- a/rpython/annotator/test/test_annrpython.py +++ b/rpython/annotator/test/test_annrpython.py @@ -3088,7 +3088,7 @@ from rpython.rlib.jit import hint class A: - _virtualizable2_ = [] + _virtualizable_ = [] class B(A): def meth(self): return self @@ -3128,7 +3128,7 @@ from rpython.rlib.jit import hint class A: - _virtualizable2_ = [] + _virtualizable_ = [] class I: pass @@ -3717,6 +3717,24 @@ a = self.RPythonAnnotator() a.build_types(f, [int]) + def test_immutable_field_subclass(self): + class Root: + pass + class A(Root): + _immutable_fields_ = '_my_lst[*]' + def __init__(self, lst): + self._my_lst = lst + def foo(x): + return len(x._my_lst) + + def f(n): + foo(A([2, n])) + foo(Root()) + + a = self.RPythonAnnotator() + e = py.test.raises(Exception, a.build_types, f, [int]) + assert "field '_my_lst' was migrated" in str(e.value) + def test_call_classes_with_noarg_init(self): class A: foo = 21 diff --git a/rpython/jit/backend/arm/assembler.py b/rpython/jit/backend/arm/assembler.py --- a/rpython/jit/backend/arm/assembler.py +++ b/rpython/jit/backend/arm/assembler.py @@ -62,20 +62,21 @@ self.current_clt = looptoken.compiled_loop_token self.mc = InstrBuilder(self.cpu.cpuinfo.arch_version) self.pending_guards = [] - assert self.datablockwrapper is None + #assert self.datablockwrapper is None --- but obscure case + # possible, e.g. getting MemoryError and continuing allblocks = self.get_asmmemmgr_blocks(looptoken) self.datablockwrapper = MachineDataBlockWrapper(self.cpu.asmmemmgr, allblocks) self.mc.datablockwrapper = self.datablockwrapper self.target_tokens_currently_compiling = {} self.frame_depth_to_patch = [] + self._finish_gcmap = lltype.nullptr(jitframe.GCMAP) def teardown(self): self.current_clt = None self._regalloc = None self.mc = None self.pending_guards = None - assert self.datablockwrapper is None def setup_failure_recovery(self): self.failure_recovery_code = [0, 0, 0, 0] @@ -889,7 +890,7 @@ relative_offset = tok.pos_recovery_stub - tok.offset guard_pos = block_start + tok.offset if not tok.is_guard_not_invalidated: - # patch the guard jumpt to the stub + # patch the guard jump to the stub # overwrite the generate NOP with a B_offs to the pos of the # stub mc = InstrBuilder(self.cpu.cpuinfo.arch_version) diff --git a/rpython/jit/backend/arm/opassembler.py b/rpython/jit/backend/arm/opassembler.py --- a/rpython/jit/backend/arm/opassembler.py +++ b/rpython/jit/backend/arm/opassembler.py @@ -33,6 +33,7 @@ from rpython.rtyper.lltypesystem import rstr, rffi, lltype from rpython.rtyper.annlowlevel import cast_instance_to_gcref from rpython.jit.backend.arm import callbuilder +from rpython.rlib.rarithmetic import r_uint class ArmGuardToken(GuardToken): @@ -190,7 +191,7 @@ self.mc.RSB_ri(resloc.value, l0.value, imm=0) return fcond - def _emit_guard(self, op, arglocs, fcond, save_exc, + def build_guard_token(self, op, frame_depth, arglocs, offset, fcond, save_exc, is_guard_not_invalidated=False, is_guard_not_forced=False): assert isinstance(save_exc, bool) @@ -198,7 +199,27 @@ descr = op.getdescr() assert isinstance(descr, AbstractFailDescr) + gcmap = allocate_gcmap(self, frame_depth, JITFRAME_FIXED_SIZE) + token = ArmGuardToken(self.cpu, gcmap, + descr, + failargs=op.getfailargs(), + fail_locs=arglocs, + offset=offset, + exc=save_exc, + frame_depth=frame_depth, + is_guard_not_invalidated=is_guard_not_invalidated, + is_guard_not_forced=is_guard_not_forced, + fcond=fcond) + return token + + def _emit_guard(self, op, arglocs, fcond, save_exc, + is_guard_not_invalidated=False, + is_guard_not_forced=False): pos = self.mc.currpos() + token = self.build_guard_token(op, arglocs[0].value, arglocs[1:], pos, fcond, save_exc, + is_guard_not_invalidated, + is_guard_not_forced) + self.pending_guards.append(token) # For all guards that are not GUARD_NOT_INVALIDATED we emit a # breakpoint to ensure the location is patched correctly. In the case # of GUARD_NOT_INVALIDATED we use just a NOP, because it is only @@ -207,17 +228,6 @@ self.mc.NOP() else: self.mc.BKPT() - gcmap = allocate_gcmap(self, arglocs[0].value, JITFRAME_FIXED_SIZE) - self.pending_guards.append(ArmGuardToken(self.cpu, gcmap, - descr, - failargs=op.getfailargs(), - fail_locs=arglocs[1:], - offset=pos, - exc=save_exc, - frame_depth=arglocs[0].value, - is_guard_not_invalidated=is_guard_not_invalidated, - is_guard_not_forced=is_guard_not_forced, - fcond=fcond)) return c.AL def _emit_guard_overflow(self, guard, failargs, fcond): @@ -351,7 +361,11 @@ # XXX self.mov(fail_descr_loc, RawStackLoc(ofs)) self.store_reg(self.mc, r.ip, r.fp, ofs, helper=r.lr) if op.numargs() > 0 and op.getarg(0).type == REF: - gcmap = self.gcmap_for_finish + if self._finish_gcmap: + self._finish_gcmap[0] |= r_uint(0) # r0 + gcmap = self._finish_gcmap + else: + gcmap = self.gcmap_for_finish self.push_gcmap(self.mc, gcmap, store=True) else: # note that the 0 here is redundant, but I would rather @@ -912,6 +926,14 @@ return fcond + def store_force_descr(self, op, fail_locs, frame_depth): + pos = self.mc.currpos() + guard_token = self.build_guard_token(op, frame_depth, fail_locs, pos, c.AL, True, False, True) + #self.pending_guards.append(guard_token) + self._finish_gcmap = guard_token.gcmap + self._store_force_index(op) + self.store_info_on_descr(pos, guard_token) + def emit_op_force_token(self, op, arglocs, regalloc, fcond): # XXX kill me res_loc = arglocs[0] @@ -959,16 +981,6 @@ pmc.B_offs(self.mc.currpos(), c.EQ) return pos - def _call_assembler_reset_vtoken(self, jd, vloc): - from rpython.jit.backend.llsupport.descr import FieldDescr - fielddescr = jd.vable_token_descr - assert isinstance(fielddescr, FieldDescr) - ofs = fielddescr.offset - tmploc = self._regalloc.get_scratch_reg(INT) - self.mov_loc_loc(vloc, r.ip) - self.mc.MOV_ri(tmploc.value, 0) - self.mc.STR_ri(tmploc.value, r.ip.value, ofs) - def _call_assembler_load_result(self, op, result_loc): if op.result is not None: # load the return value from (tmploc, 0) diff --git a/rpython/jit/backend/arm/regalloc.py b/rpython/jit/backend/arm/regalloc.py --- a/rpython/jit/backend/arm/regalloc.py +++ b/rpython/jit/backend/arm/regalloc.py @@ -1194,6 +1194,12 @@ # self._compute_hint_frame_locations_from_descr(descr) return [] + def prepare_op_guard_not_forced_2(self, op, fcond): + self.rm.before_call(op.getfailargs(), save_all_regs=True) + fail_locs = self._prepare_guard(op) + self.assembler.store_force_descr(op, fail_locs[1:], fail_locs[0].value) + self.possibly_free_vars(op.getfailargs()) + def prepare_guard_call_may_force(self, op, guard_op, fcond): args = self._prepare_call(op, save_all_regs=True) return self._prepare_guard(guard_op, args) diff --git a/rpython/jit/backend/llgraph/runner.py b/rpython/jit/backend/llgraph/runner.py --- a/rpython/jit/backend/llgraph/runner.py +++ b/rpython/jit/backend/llgraph/runner.py @@ -101,6 +101,9 @@ self.fieldname = fieldname self.FIELD = getattr(S, fieldname) + def get_vinfo(self): + return self.vinfo + def __repr__(self): return 'FieldDescr(%r, %r)' % (self.S, self.fieldname) @@ -170,7 +173,7 @@ translate_support_code = False is_llgraph = True - def __init__(self, rtyper, stats=None, *ignored_args, **ignored_kwds): + def __init__(self, rtyper, stats=None, *ignored_args, **kwds): model.AbstractCPU.__init__(self) self.rtyper = rtyper self.llinterp = LLInterpreter(rtyper) @@ -178,6 +181,7 @@ class MiniStats: pass self.stats = stats or MiniStats() + self.vinfo_for_tests = kwds.get('vinfo_for_tests', None) def compile_loop(self, inputargs, operations, looptoken, log=True, name=''): clt = model.CompiledLoopToken(self, looptoken.number) @@ -316,6 +320,8 @@ except KeyError: descr = FieldDescr(S, fieldname) self.descrs[key] = descr + if self.vinfo_for_tests is not None: + descr.vinfo = self.vinfo_for_tests return descr def arraydescrof(self, A): @@ -496,6 +502,8 @@ def bh_raw_store_i(self, struct, offset, newvalue, descr): ll_p = rffi.cast(rffi.CCHARP, struct) ll_p = rffi.cast(lltype.Ptr(descr.A), rffi.ptradd(ll_p, offset)) + if descr.A.OF == lltype.SingleFloat: + newvalue = longlong.int2singlefloat(newvalue) ll_p[0] = rffi.cast(descr.A.OF, newvalue) def bh_raw_store_f(self, struct, offset, newvalue, descr): @@ -600,6 +608,7 @@ forced_deadframe = None overflow_flag = False last_exception = None + force_guard_op = None def __init__(self, cpu, argboxes, args): self.env = {} @@ -766,6 +775,8 @@ if self.forced_deadframe is not None: saved_data = self.forced_deadframe._saved_data self.fail_guard(descr, saved_data) + self.force_guard_op = self.current_op + execute_guard_not_forced_2 = execute_guard_not_forced def execute_guard_not_invalidated(self, descr): if self.lltrace.invalid: @@ -887,7 +898,6 @@ # res = CALL assembler_call_helper(pframe) # jmp @done # @fastpath: - # RESET_VABLE # res = GETFIELD(pframe, 'result') # @done: # @@ -907,25 +917,17 @@ vable = lltype.nullptr(llmemory.GCREF.TO) # # Emulate the fast path - def reset_vable(jd, vable): - if jd.index_of_virtualizable != -1: - fielddescr = jd.vable_token_descr - NULL = lltype.nullptr(llmemory.GCREF.TO) - self.cpu.bh_setfield_gc(vable, NULL, fielddescr) + # faildescr = self.cpu.get_latest_descr(pframe) if faildescr == self.cpu.done_with_this_frame_descr_int: - reset_vable(jd, vable) return self.cpu.get_int_value(pframe, 0) elif faildescr == self.cpu.done_with_this_frame_descr_ref: - reset_vable(jd, vable) return self.cpu.get_ref_value(pframe, 0) elif faildescr == self.cpu.done_with_this_frame_descr_float: - reset_vable(jd, vable) return self.cpu.get_float_value(pframe, 0) elif faildescr == self.cpu.done_with_this_frame_descr_void: - reset_vable(jd, vable) return None - # + assembler_helper_ptr = jd.assembler_helper_adr.ptr # fish try: result = assembler_helper_ptr(pframe, vable) diff --git a/rpython/jit/backend/llsupport/assembler.py b/rpython/jit/backend/llsupport/assembler.py --- a/rpython/jit/backend/llsupport/assembler.py +++ b/rpython/jit/backend/llsupport/assembler.py @@ -2,7 +2,7 @@ from rpython.jit.backend.llsupport.memcpy import memcpy_fn from rpython.jit.backend.llsupport.symbolic import WORD from rpython.jit.metainterp.history import (INT, REF, FLOAT, JitCellToken, - ConstInt, BoxInt) + ConstInt, BoxInt, AbstractFailDescr) from rpython.jit.metainterp.resoperation import ResOperation, rop from rpython.rlib import rgc from rpython.rlib.debug import (debug_start, debug_stop, have_debug_prints, @@ -24,6 +24,7 @@ class GuardToken(object): def __init__(self, cpu, gcmap, faildescr, failargs, fail_locs, exc, frame_depth, is_guard_not_invalidated, is_guard_not_forced): + assert isinstance(faildescr, AbstractFailDescr) self.cpu = cpu self.faildescr = faildescr self.failargs = failargs @@ -232,11 +233,8 @@ jmp_location = self._call_assembler_patch_je(result_loc, je_location) - # Path B: fast path. Must load the return value, and reset the token + # Path B: fast path. Must load the return value - # Reset the vable token --- XXX really too much special logic here:-( - if jd.index_of_virtualizable >= 0: - self._call_assembler_reset_vtoken(jd, vloc) # self._call_assembler_load_result(op, result_loc) # diff --git a/rpython/jit/backend/llsupport/descr.py b/rpython/jit/backend/llsupport/descr.py --- a/rpython/jit/backend/llsupport/descr.py +++ b/rpython/jit/backend/llsupport/descr.py @@ -76,7 +76,13 @@ FLAG_STRUCT = 'X' FLAG_VOID = 'V' -class FieldDescr(AbstractDescr): +class ArrayOrFieldDescr(AbstractDescr): + vinfo = None + + def get_vinfo(self): + return self.vinfo + +class FieldDescr(ArrayOrFieldDescr): name = '' offset = 0 # help translation field_size = 0 @@ -150,12 +156,13 @@ # ____________________________________________________________ # ArrayDescrs -class ArrayDescr(AbstractDescr): +class ArrayDescr(ArrayOrFieldDescr): tid = 0 basesize = 0 # workaround for the annotator itemsize = 0 lendescr = None flag = '\x00' + vinfo = None def __init__(self, basesize, itemsize, lendescr, flag): self.basesize = basesize diff --git a/rpython/jit/backend/llsupport/llmodel.py b/rpython/jit/backend/llsupport/llmodel.py --- a/rpython/jit/backend/llsupport/llmodel.py +++ b/rpython/jit/backend/llsupport/llmodel.py @@ -721,12 +721,8 @@ def bh_raw_load_i(self, addr, offset, descr): ofs, size, sign = self.unpack_arraydescr_size(descr) - items = addr + offset - for TYPE, _, itemsize in unroll_basic_sizes: - if size == itemsize: - items = rffi.cast(rffi.CArrayPtr(TYPE), items) - return rffi.cast(lltype.Signed, items[0]) - assert False # unreachable code + assert ofs == 0 # otherwise, 'descr' is not a raw length-less array + return self.read_int_at_mem(addr, offset, size, sign) def bh_raw_load_f(self, addr, offset, descr): items = rffi.cast(rffi.CArrayPtr(longlong.FLOATSTORAGE), addr + offset) diff --git a/rpython/jit/backend/llsupport/test/ztranslation_test.py b/rpython/jit/backend/llsupport/test/ztranslation_test.py --- a/rpython/jit/backend/llsupport/test/ztranslation_test.py +++ b/rpython/jit/backend/llsupport/test/ztranslation_test.py @@ -23,7 +23,7 @@ # - floats neg and abs class Frame(object): - _virtualizable2_ = ['i'] + _virtualizable_ = ['i'] def __init__(self, i): self.i = i @@ -98,7 +98,7 @@ self.val = val class Frame(object): - _virtualizable2_ = ['thing'] + _virtualizable_ = ['thing'] driver = JitDriver(greens = ['codeno'], reds = ['i', 'frame'], virtualizables = ['frame'], diff --git a/rpython/jit/backend/model.py b/rpython/jit/backend/model.py --- a/rpython/jit/backend/model.py +++ b/rpython/jit/backend/model.py @@ -6,7 +6,7 @@ total_compiled_loops = 0 total_compiled_bridges = 0 total_freed_loops = 0 - total_freed_bridges = 0 + total_freed_bridges = 0 # for heaptracker # _all_size_descrs_with_vtable = None @@ -182,7 +182,7 @@ def arraydescrof(self, A): raise NotImplementedError - def calldescrof(self, FUNC, ARGS, RESULT): + def calldescrof(self, FUNC, ARGS, RESULT, extrainfo): # FUNC is the original function type, but ARGS is a list of types # with Voids removed raise NotImplementedError @@ -294,7 +294,6 @@ def bh_copyunicodecontent(self, src, dst, srcstart, dststart, length): raise NotImplementedError - class CompiledLoopToken(object): asmmemmgr_blocks = None asmmemmgr_gcroots = 0 diff --git a/rpython/jit/backend/test/runner_test.py b/rpython/jit/backend/test/runner_test.py --- a/rpython/jit/backend/test/runner_test.py +++ b/rpython/jit/backend/test/runner_test.py @@ -3954,8 +3954,12 @@ p = rawstorage.alloc_raw_storage(31) for i in range(31): p[i] = '\xDD' - value = rffi.cast(T, 0x4243444546474849) + value = rffi.cast(T, -0x4243444546474849) rawstorage.raw_storage_setitem(p, 16, value) + got = self.cpu.bh_raw_load_i(rffi.cast(lltype.Signed, p), 16, + arraydescr) + assert got == rffi.cast(lltype.Signed, value) + # loop = parse(ops, self.cpu, namespace=locals()) looptoken = JitCellToken() self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) @@ -3981,6 +3985,11 @@ p[i] = '\xDD' value = rffi.cast(T, 1.12e20) rawstorage.raw_storage_setitem(p, 16, value) + got = self.cpu.bh_raw_load_f(rffi.cast(lltype.Signed, p), 16, + arraydescr) + got = longlong.getrealfloat(got) + assert got == rffi.cast(lltype.Float, value) + # loop = parse(ops, self.cpu, namespace=locals()) looptoken = JitCellToken() self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) @@ -3991,22 +4000,58 @@ assert result == rffi.cast(lltype.Float, value) rawstorage.free_raw_storage(p) + def test_raw_load_singlefloat(self): + if not self.cpu.supports_singlefloats: + py.test.skip("requires singlefloats") + from rpython.rlib import rawstorage + for T in [rffi.FLOAT]: + ops = """ + [i0, i1] + i2 = raw_load(i0, i1, descr=arraydescr) + finish(i2) + """ + arraydescr = self.cpu.arraydescrof(rffi.CArray(T)) + p = rawstorage.alloc_raw_storage(31) + for i in range(31): + p[i] = '\xDD' + value = rffi.cast(T, 1.12e20) + rawstorage.raw_storage_setitem(p, 16, value) + got = self.cpu.bh_raw_load_i(rffi.cast(lltype.Signed, p), 16, From noreply at buildbot.pypy.org Wed Aug 14 14:41:51 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Wed, 14 Aug 2013 14:41:51 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: add some another test Message-ID: <20130814124151.B669C1C00EC@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r66150:0172eabba210 Date: 2013-08-14 10:56 +0200 http://bitbucket.org/pypy/pypy/changeset/0172eabba210/ Log: add some another test diff --git a/rpython/jit/backend/x86/test/test_stm_integration.py b/rpython/jit/backend/x86/test/test_stm_integration.py --- a/rpython/jit/backend/x86/test/test_stm_integration.py +++ b/rpython/jit/backend/x86/test/test_stm_integration.py @@ -601,9 +601,40 @@ args = [i+1 for i in range(10)] deadframe = self.cpu.execute_token(othertoken, *args) assert called == [id(finish_descr)] + del called[:] + + # compile a replacement + ops = ''' + [i0, i1, i2, i3, i4, i5, i6, i7, i8, i9] + i10 = int_sub(i0, i1) + i11 = int_sub(i10, i2) + i12 = int_sub(i11, i3) + i13 = int_sub(i12, i4) + i14 = int_sub(i13, i5) + i15 = int_sub(i14, i6) + i16 = int_sub(i15, i7) + i17 = int_sub(i16, i8) + i18 = int_sub(i17, i9) + finish(i18)''' + loop2 = parse(ops) + looptoken2 = JitCellToken() + looptoken2.outermost_jitdriver_sd = FakeJitDriverSD() + self.cpu.compile_loop(loop2.inputargs, loop2.operations, looptoken2) + finish_descr2 = loop2.operations[-1].getdescr() + # install it + self.cpu.redirect_call_assembler(looptoken, looptoken2) - + # now call_assembler should go to looptoken2 + args = [i+1 for i in range(10)] + deadframe = self.cpu.execute_token(othertoken, *args) + assert called == [id(finish_descr2)] + + + + + + From noreply at buildbot.pypy.org Wed Aug 14 14:41:53 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Wed, 14 Aug 2013 14:41:53 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: avoid deadlock by transaction committing and another one because a call to stmcb_size() could call become_inevitable() Message-ID: <20130814124153.A6E3C1C00EC@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r66151:2afd31523d86 Date: 2013-08-14 14:41 +0200 http://bitbucket.org/pypy/pypy/changeset/2afd31523d86/ Log: avoid deadlock by transaction committing and another one because a call to stmcb_size() could call become_inevitable() diff --git a/rpython/rtyper/extfunc.py b/rpython/rtyper/extfunc.py --- a/rpython/rtyper/extfunc.py +++ b/rpython/rtyper/extfunc.py @@ -121,6 +121,7 @@ class ExtFuncEntry(ExtRegistryEntry): safe_not_sandboxed = False + transactionsafe = False # common case: args is a list of annotation or types def normalize_args(self, *args_s): @@ -198,6 +199,7 @@ impl._llfnobjattrs_ = { '_name': self.name, '_safe_not_sandboxed': self.safe_not_sandboxed, + 'transactionsafe': self.transactionsafe } obj = rtyper.getannmixlevel().delayedfunction( impl, signature_args, hop.s_result) @@ -208,7 +210,8 @@ # print '<<<<<<<<<<<<<-----------------------------------' obj = rtyper.type_system.getexternalcallable(args_ll, ll_result, name, _external_name=self.name, _callable=fakeimpl, - _safe_not_sandboxed=self.safe_not_sandboxed) + _safe_not_sandboxed=self.safe_not_sandboxed, + transactionsafe=self.transactionsafe) vlist = [hop.inputconst(typeOf(obj), obj)] + hop.inputargs(*args_r) hop.exception_is_here() return hop.genop('direct_call', vlist, r_result) @@ -216,7 +219,7 @@ def register_external(function, args, result=None, export_name=None, llimpl=None, ooimpl=None, llfakeimpl=None, oofakeimpl=None, - sandboxsafe=False): + sandboxsafe=False, _transactionsafe=False): """ function: the RPython function that will be rendered as an external function (e.g.: math.floor) args: a list containing the annotation of the arguments @@ -225,6 +228,7 @@ llimpl, ooimpl: optional; if provided, these RPython functions are called instead of the target function llfakeimpl, oofakeimpl: optional; if provided, they are called by the llinterpreter sandboxsafe: use True if the function performs no I/O (safe for --sandbox) + _transactionsafe: use True if the llimpl is transactionsafe (see rffi.llexternal) """ if export_name is None: @@ -233,6 +237,7 @@ class FunEntry(ExtFuncEntry): _about_ = function safe_not_sandboxed = sandboxsafe + transactionsafe = _transactionsafe if args is None: def normalize_args(self, *args_s): diff --git a/rpython/rtyper/lltypesystem/llarena.py b/rpython/rtyper/lltypesystem/llarena.py --- a/rpython/rtyper/lltypesystem/llarena.py +++ b/rpython/rtyper/lltypesystem/llarena.py @@ -600,12 +600,14 @@ [lltype.Signed, lltype.Signed], lltype.Signed, sandboxsafe=True, - _nowrapper=True) + _nowrapper=True, + transactionsafe=True) register_external(_round_up_for_allocation, [int, int], int, 'll_arena.round_up_for_allocation', llimpl=llimpl_round_up_for_allocation, llfakeimpl=round_up_for_allocation, - sandboxsafe=True) + sandboxsafe=True, + _transactionsafe=True) def llimpl_arena_new_view(addr): return addr From noreply at buildbot.pypy.org Wed Aug 14 14:46:50 2013 From: noreply at buildbot.pypy.org (antocuni) Date: Wed, 14 Aug 2013 14:46:50 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: my hotel Message-ID: <20130814124651.000701C243C@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: extradoc Changeset: r5017:317adf784d45 Date: 2013-08-14 14:46 +0200 http://bitbucket.org/pypy/extradoc/changeset/317adf784d45/ Log: my hotel diff --git a/sprintinfo/london-2013/people.txt b/sprintinfo/london-2013/people.txt --- a/sprintinfo/london-2013/people.txt +++ b/sprintinfo/london-2013/people.txt @@ -21,7 +21,7 @@ Maciej Fijalkowski 25/8-1/9 private Manuel Jacob ? sth. cheap, pref. share Ronan Lamy 25/8-? ? -Antonio Cuni 26/8-5/9 ? +Antonio Cuni 26/8-5/9 hotel LSE Northumberl. ==================== ============== ======================= From noreply at buildbot.pypy.org Wed Aug 14 18:18:22 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Wed, 14 Aug 2013 18:18:22 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: trying to find all locations where gcrefs are encoded in the trace and make them non-movable. Message-ID: <20130814161822.4495D1C0170@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r66152:86ed2339b723 Date: 2013-08-14 18:17 +0200 http://bitbucket.org/pypy/pypy/changeset/86ed2339b723/ Log: trying to find all locations where gcrefs are encoded in the trace and make them non-movable. diff --git a/rpython/jit/backend/arm/regalloc.py b/rpython/jit/backend/arm/regalloc.py --- a/rpython/jit/backend/arm/regalloc.py +++ b/rpython/jit/backend/arm/regalloc.py @@ -626,7 +626,7 @@ # the frame is in fp, but we have to point where in the frame is # the potential argument to FINISH descr = op.getdescr() - fail_descr = cast_instance_to_gcref(descr) + fail_descr = rgc.cast_instance_to_gcref(descr) # we know it does not move, but well fail_descr = rgc._make_sure_does_not_move(fail_descr) fail_descr = rgc.cast_gcref_to_int(fail_descr) diff --git a/rpython/jit/backend/llsupport/assembler.py b/rpython/jit/backend/llsupport/assembler.py --- a/rpython/jit/backend/llsupport/assembler.py +++ b/rpython/jit/backend/llsupport/assembler.py @@ -172,8 +172,6 @@ break exc = guardtok.exc target = self.failure_recovery_code[exc + 2 * withfloats] - fail_descr = cast_instance_to_gcref(guardtok.faildescr) - fail_descr = rffi.cast(lltype.Signed, fail_descr) base_ofs = self.cpu.get_baseofs_of_frame_field() positions = [0] * len(guardtok.fail_locs) for i, loc in enumerate(guardtok.fail_locs): @@ -196,6 +194,11 @@ guardtok.faildescr.rd_locs = positions # we want the descr to keep alive guardtok.faildescr.rd_loop_token = self.current_clt + fail_descr = rgc.cast_instance_to_gcref(guardtok.faildescr) + if self.cpu.gc_ll_descr.stm: + # only needed with STM, I think.. + fail_descr = rgc._make_sure_does_not_move(fail_descr) + fail_descr = rgc.cast_gcref_to_int(fail_descr) return fail_descr, target def call_assembler(self, op, guard_op, argloc, vloc, result_loc, tmploc): @@ -226,9 +229,9 @@ else: raise AssertionError(kind) - gcref = cast_instance_to_gcref(value) + gcref = rgc.cast_instance_to_gcref(value) gcref = rgc._make_sure_does_not_move(gcref) - value = rffi.cast(lltype.Signed, gcref) + value = rgc.cast_gcref_to_int(gcref) je_location = self._call_assembler_check_descr(value, tmploc) # # Path A: use assembler_helper_adr diff --git a/rpython/jit/backend/llsupport/gc.py b/rpython/jit/backend/llsupport/gc.py --- a/rpython/jit/backend/llsupport/gc.py +++ b/rpython/jit/backend/llsupport/gc.py @@ -102,19 +102,23 @@ for i in range(op.numargs()): v = op.getarg(i) if isinstance(v, ConstPtr) and bool(v.value): - p = v.value + p = rgc.cast_instance_to_gcref(v.value) new_p = rgc._make_sure_does_not_move(p) - v.value = new_p + if we_are_translated(): + v.value = new_p + else: + assert p == new_p gcrefs_output_list.append(new_p) if op.is_guard() or op.getopnum() == rop.FINISH: # the only ops with descrs that get recorded in a trace from rpython.jit.metainterp.history import AbstractDescr descr = op.getdescr() - llref = cast_instance_to_gcref(descr) + llref = rgc.cast_instance_to_gcref(descr) new_llref = rgc._make_sure_does_not_move(llref) if we_are_translated(): - new_d = cast_base_ptr_to_instance(AbstractDescr, new_llref) + new_d = rgc.try_cast_gcref_to_instance(AbstractDescr, + new_llref) # tests don't allow this: op.setdescr(new_d) else: diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -249,8 +249,9 @@ self._store_and_reset_exception(self.mc, eax) ofs = self.cpu.get_ofs_of_frame_field('jf_guard_exc') self.mc.MOV_br(ofs, eax.value) - propagate_exception_descr = rffi.cast(lltype.Signed, - cast_instance_to_gcref(self.cpu.propagate_exception_descr)) + propagate_exception_descr = rgc.cast_gcref_to_int( + rgc._make_sure_does_not_move( + rgc.cast_instance_to_gcref(self.cpu.propagate_exception_descr))) ofs = self.cpu.get_ofs_of_frame_field('jf_descr') self.mc.MOV(RawEbpLoc(ofs), imm(propagate_exception_descr)) self.mc.MOV_rr(eax.value, ebp.value) @@ -2202,6 +2203,15 @@ def _call_assembler_check_descr(self, value, tmploc): ofs = self.cpu.get_ofs_of_frame_field('jf_descr') + + if self.cpu.gc_ll_descr.stm: + # value is non-moving, but jf_descr may have a changed + # descr -> different copy + self._stm_ptr_eq_fastpath(self.mc, [mem(eax, ofs), imm(value)], + tmploc) + self.mc.J_il8(rx86.Conditions['NZ'], 0) + return self.mc.get_relative_pos() + self.mc.CMP(mem(eax, ofs), imm(value)) # patched later self.mc.J_il8(rx86.Conditions['E'], 0) # goto B if we get 'done_with_this_frame' diff --git a/rpython/jit/backend/x86/rx86.py b/rpython/jit/backend/x86/rx86.py --- a/rpython/jit/backend/x86/rx86.py +++ b/rpython/jit/backend/x86/rx86.py @@ -539,6 +539,7 @@ PUSH_r = insn(rex_nw, register(1), '\x50') PUSH_b = insn(rex_nw, '\xFF', orbyte(6<<3), stack_bp(1)) + PUSH_m = insn(rex_nw, '\xFF', orbyte(6<<3), mem_reg_plus_const(1)) PUSH_i8 = insn('\x6A', immediate(1, 'b')) PUSH_i32 = insn('\x68', immediate(1, 'i')) def PUSH_i(mc, immed): @@ -549,6 +550,7 @@ POP_r = insn(rex_nw, register(1), '\x58') POP_b = insn(rex_nw, '\x8F', orbyte(0<<3), stack_bp(1)) + POP_m = insn(rex_nw, '\x8F', orbyte(0<<3), mem_reg_plus_const(1)) LEA_rb = insn(rex_w, '\x8D', register(1,8), stack_bp(2)) LEA_rs = insn(rex_w, '\x8D', register(1,8), stack_sp(2)) diff --git a/rpython/jit/backend/x86/test/test_stm_integration.py b/rpython/jit/backend/x86/test/test_stm_integration.py --- a/rpython/jit/backend/x86/test/test_stm_integration.py +++ b/rpython/jit/backend/x86/test/test_stm_integration.py @@ -1,5 +1,5 @@ import py -from rpython.rtyper.lltypesystem import lltype, llmemory, rffi, rstr +from rpython.rtyper.lltypesystem import lltype, llmemory, rffi, rstr, rclass from rpython.jit.metainterp.history import ResOperation, TargetToken,\ JitCellToken from rpython.jit.metainterp.history import (BoxInt, BoxPtr, ConstInt, @@ -21,6 +21,7 @@ from rpython.memory.gc.stmgc import StmGC from rpython.jit.metainterp import history from rpython.jit.codewriter.effectinfo import EffectInfo +from rpython.rtyper.llinterp import LLException import itertools, sys import ctypes @@ -219,6 +220,17 @@ cpu = CPU(None, None) cpu.gc_ll_descr = GCDescrStm() + def latest_descr(self, deadframe): + deadframe = lltype.cast_opaque_ptr(JITFRAMEPTR, deadframe) + descr = deadframe.jf_descr + res = history.AbstractDescr.show(self, descr) + assert isinstance(res, history.AbstractFailDescr) + return res + import types + cpu.get_latest_descr = types.MethodType(latest_descr, cpu, + cpu.__class__) + + self.p2wd = cpu.gc_ll_descr.P2Wdescr self.p2rd = cpu.gc_ll_descr.P2Rdescr From noreply at buildbot.pypy.org Wed Aug 14 19:24:04 2013 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 14 Aug 2013 19:24:04 +0200 (CEST) Subject: [pypy-commit] stmgc default: Fix warnings Message-ID: <20130814172404.A62751C0EC3@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r475:edbd90784082 Date: 2013-08-14 19:23 +0200 http://bitbucket.org/pypy/stmgc/changeset/edbd90784082/ Log: Fix warnings diff --git a/c4/extra.c b/c4/extra.c --- a/c4/extra.c +++ b/c4/extra.c @@ -162,7 +162,7 @@ return 1; /* the only possible case to still get True is if p2 == p1->h_original */ - return (p1 != NULL) && (p1->h_original == p2) && + return (p1 != NULL) && (p1->h_original == (revision_t)p2) && !(p1->h_tid & GCFLAG_PREBUILT_ORIGINAL); } diff --git a/c4/stmgc.h b/c4/stmgc.h --- a/c4/stmgc.h +++ b/c4/stmgc.h @@ -177,6 +177,9 @@ extern __thread revision_t stm_private_rev_num; gcptr stm_DirectReadBarrier(gcptr); gcptr stm_WriteBarrier(gcptr); +gcptr stm_RepeatReadBarrier(gcptr); +gcptr stm_ImmutReadBarrier(gcptr); +gcptr stm_RepeatWriteBarrier(gcptr); static const revision_t GCFLAG_PUBLIC_TO_PRIVATE = STM_FIRST_GCFLAG << 4; static const revision_t GCFLAG_WRITE_BARRIER = STM_FIRST_GCFLAG << 5; static const revision_t GCFLAG_MOVED = STM_FIRST_GCFLAG << 6; From noreply at buildbot.pypy.org Wed Aug 14 19:24:41 2013 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 14 Aug 2013 19:24:41 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-static-barrier: in-progress: use the 5 barriers Message-ID: <20130814172441.45C011C0EC3@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-static-barrier Changeset: r66153:c7ed6268e742 Date: 2013-08-14 17:45 +0200 http://bitbucket.org/pypy/pypy/changeset/c7ed6268e742/ Log: in-progress: use the 5 barriers diff --git a/rpython/translator/stm/test/test_writebarrier.py b/rpython/translator/stm/test/test_writebarrier.py --- a/rpython/translator/stm/test/test_writebarrier.py +++ b/rpython/translator/stm/test/test_writebarrier.py @@ -24,7 +24,7 @@ res = self.interpret(f1, [-5]) assert res == 42 assert len(self.writemode) == 0 - assert self.barriers == ['P2R'] + assert self.barriers == ['A2R'] def test_simple_write(self): X = lltype.GcStruct('X', ('foo', lltype.Signed)) @@ -37,7 +37,7 @@ self.interpret(f1, [4]) assert x1.foo == 4 assert len(self.writemode) == 1 - assert self.barriers == ['P2W'] + assert self.barriers == ['A2W'] def test_multiple_reads(self): X = lltype.GcStruct('X', ('foo', lltype.Signed), @@ -58,7 +58,7 @@ res = self.interpret(f1, [4]) assert res == -81 assert len(self.writemode) == 0 - assert self.barriers == ['P2R'] + assert self.barriers == ['A2R'] def test_malloc(self): X = lltype.GcStruct('X', ('foo', lltype.Signed)) @@ -81,7 +81,7 @@ self.interpret(f1, [4]) assert len(self.writemode) == 2 - assert self.barriers == ['P2W', 'r2w'] + assert self.barriers == ['A2W', 'V2W'] def test_repeat_read_barrier_after_malloc(self): X = lltype.GcStruct('X', ('foo', lltype.Signed)) @@ -95,7 +95,7 @@ self.interpret(f1, [4]) assert len(self.writemode) == 1 - assert self.barriers == ['P2R'] + assert self.barriers == ['A2R'] def test_write_may_alias(self): X = lltype.GcStruct('X', ('foo', lltype.Signed)) @@ -109,10 +109,10 @@ y = lltype.malloc(X, immortal=True) res = self.interpret(f1, [x, y]) assert res == 36 - assert self.barriers == ['P2R', 'P2W', 'p2r'] + assert self.barriers == ['A2R', 'A2W', 'q2r'] res = self.interpret(f1, [x, x]) assert res == 42 - assert self.barriers == ['P2R', 'P2W', 'P2R'] + assert self.barriers == ['A2R', 'A2W', 'Q2R'] def test_write_cannot_alias(self): X = lltype.GcStruct('X', ('foo', lltype.Signed)) @@ -127,7 +127,7 @@ y = lltype.malloc(Y, immortal=True) res = self.interpret(f1, [x, y]) assert res == 36 - assert self.barriers == ['P2R', 'P2W'] + assert self.barriers == ['A2R', 'A2W'] def test_call_external_random_effects(self): X = lltype.GcStruct('X', ('foo', lltype.Signed)) @@ -140,7 +140,7 @@ x = lltype.malloc(X, immortal=True); x.foo = 6 res = self.interpret(f1, [x]) assert res == 36 - assert self.barriers == ['P2R', 'p2r'] + assert self.barriers == ['A2R', 'a2r'] def test_call_external_no_random_effects(self): X = lltype.GcStruct('X', ('foo', lltype.Signed)) @@ -157,7 +157,7 @@ x = lltype.malloc(X, immortal=True); x.foo = 6 res = self.interpret(f1, [x]) assert res == 36 - assert self.barriers == ['P2R'] + assert self.barriers == ['A2R'] def test_pointer_compare_0(self): X = lltype.GcStruct('X', ('foo', lltype.Signed)) @@ -190,10 +190,10 @@ y = lltype.malloc(X, immortal=True) res = self.interpret(f1, [x, y]) assert res == 0 - assert self.barriers == ['P2W', '='] + assert self.barriers == ['A2W', '='] res = self.interpret(f1, [x, x]) assert res == 1 - assert self.barriers == ['P2W', '='] + assert self.barriers == ['A2W', '='] def test_pointer_compare_3(self): X = lltype.GcStruct('X', ('foo', lltype.Signed)) @@ -204,10 +204,10 @@ y = lltype.malloc(X, immortal=True) res = self.interpret(f1, [x, y]) assert res == 1 - assert self.barriers == ['P2W', '='] + assert self.barriers == ['A2W', '='] res = self.interpret(f1, [x, x]) assert res == 0 - assert self.barriers == ['P2W', '='] + assert self.barriers == ['A2W', '='] def test_pointer_compare_4(self): X = lltype.GcStruct('X', ('foo', lltype.Signed)) @@ -219,10 +219,10 @@ y = lltype.malloc(X, immortal=True) res = self.interpret(f1, [x, y]) assert res == 1 - assert self.barriers == ['P2W', 'P2W'] + assert self.barriers == ['A2W', 'A2W'] res = self.interpret(f1, [x, x]) assert res == 0 - assert self.barriers == ['P2W', 'P2W'] + assert self.barriers == ['A2W', 'A2W'] def test_simple_loop(self): X = lltype.GcStruct('X', ('foo', lltype.Signed)) @@ -235,7 +235,7 @@ res = self.interpret(f1, [x, 5]) assert res == 0 # for now we get this. Later, we could probably optimize it - assert self.barriers == ['P2W', 'p2w', 'p2w', 'p2w', 'p2w'] + assert self.barriers == ['A2W', 'a2w', 'a2w', 'a2w', 'a2w'] def test_subclassing(self): class X: @@ -261,11 +261,11 @@ res = self.interpret(f1, [10]) assert res == 42 + 10 - assert self.barriers == ['p2r', 'p2r', 'p2r'] # from 3 blocks (could be + assert self.barriers == ['a2r', 'a2r', 'a2r'] # from 3 blocks (could be # optimized later) res = self.interpret(f1, [-10]) assert res == 815 - assert self.barriers == ['p2r', 'p2r'] + assert self.barriers == ['a2r', 'a2r'] def test_write_barrier_repeated(self): class X: @@ -278,7 +278,7 @@ return y res = self.interpret(f1, [10]) - assert self.barriers == ['P2W', 'r2w'] + assert self.barriers == ['A2W', 'r2w'] external_stuff = rffi.llexternal('external_stuff', [], lltype.Void, diff --git a/rpython/translator/stm/test/transform_support.py b/rpython/translator/stm/test/transform_support.py --- a/rpython/translator/stm/test/transform_support.py +++ b/rpython/translator/stm/test/transform_support.py @@ -2,7 +2,7 @@ from rpython.rtyper.llinterp import LLFrame from rpython.rtyper.test.test_llinterp import get_interpreter, clear_tcache from rpython.translator.stm.transform import STMTransformer -from rpython.translator.stm.writebarrier import NEEDS_BARRIER +from rpython.translator.stm.writebarrier import needs_barrier from rpython.conftest import option @@ -33,9 +33,9 @@ if isinstance(p, _stmptr): return p._category if not p: - return 'N' + return None if p._solid: - return 'P' # allocated with immortal=True + return 'A' # allocated with immortal=True raise AssertionError("unknown category on %r" % (p,)) def interpret(self, fn, args): @@ -76,14 +76,16 @@ def check_category(self, p, expected): cat = self.get_category_or_null(p) - assert cat in 'NPRW' + assert cat in 'AQRVW' or cat is None + if expected is not None: + assert cat is not None and cat >= expected return cat def op_stm_barrier(self, kind, obj): frm, middledigit, to = kind assert middledigit == '2' cat = self.check_category(obj, frm) - if not NEEDS_BARRIER[cat, to]: + if not needs_barrier(cat, to): # a barrier, but with no effect self.llinterpreter.tester.barriers.append(kind.lower()) return obj @@ -96,32 +98,38 @@ return ptr2 def op_stm_ptr_eq(self, obj1, obj2): - self.check_category(obj1, 'P') - self.check_category(obj2, 'P') + self.check_category(obj1, None) + self.check_category(obj2, None) self.llinterpreter.tester.barriers.append('=') return obj1 == obj2 def op_getfield(self, obj, field): - if not obj._TYPE.TO._immutable_field(field): - self.check_category(obj, 'R') + if obj._TYPE.TO._immutable_field(field): + expected = 'I' + else: + expected = 'R' + self.check_category(obj, expected) return LLFrame.op_getfield(self, obj, field) def op_setfield(self, obj, fieldname, fieldvalue): - if not obj._TYPE.TO._immutable_field(fieldname): - self.check_category(obj, 'W') - # convert R -> P all other pointers to the same object we can find - for p in self.all_stm_ptrs(): - if p._category == 'R' and p._T == obj._T and p == obj: - _stmptr._category.__set__(p, 'P') + self.check_category(obj, 'W') + # convert R -> Q all other pointers to the same object we can find + for p in self.all_stm_ptrs(): + if p._category == 'R' and p._T == obj._T and p == obj: + _stmptr._category.__set__(p, 'Q') return LLFrame.op_setfield(self, obj, fieldname, fieldvalue) def op_cast_pointer(self, RESTYPE, obj): - cat = self.check_category(obj, 'P') + cat = self.check_category(obj, None) p = opimpl.op_cast_pointer(RESTYPE, obj) return _stmptr(p, cat) op_cast_pointer.need_result_type = True def op_malloc(self, obj, flags): + # convert all existing pointers W -> V + for p in self.all_stm_ptrs(): + if p._category == 'W': + _stmptr._category.__set__(p, 'V') p = LLFrame.op_malloc(self, obj, flags) ptr2 = _stmptr(p, 'W') self.llinterpreter.tester.writemode.add(ptr2._obj) diff --git a/rpython/translator/stm/transform.py b/rpython/translator/stm/transform.py --- a/rpython/translator/stm/transform.py +++ b/rpython/translator/stm/transform.py @@ -11,6 +11,7 @@ def __init__(self, translator): self.translator = translator + self.barrier_counts = {} def transform(self): assert not hasattr(self.translator, 'stm_transformation_applied') @@ -30,6 +31,8 @@ self.collect_analyzer = CollectAnalyzer(self.translator) for graph in self.translator.graphs: insert_stm_barrier(self, graph) + for key, value in sorted(self.barrier_counts.items()): + log("%s: %d barriers" % (key, value[0])) del self.write_analyzer del self.collect_analyzer diff --git a/rpython/translator/stm/writebarrier.py b/rpython/translator/stm/writebarrier.py --- a/rpython/translator/stm/writebarrier.py +++ b/rpython/translator/stm/writebarrier.py @@ -9,15 +9,6 @@ 'malloc_nonmovable', 'malloc_nonmovable_varsize', ]) -NEEDS_BARRIER = { - ('P', 'R'): True, - ('P', 'W'): True, - ('R', 'R'): False, - ('R', 'W'): True, - ('W', 'R'): False, - ('W', 'W'): False, - } - def unwraplist(list_v): for v in list_v: if isinstance(v, Constant): @@ -42,23 +33,32 @@ return OUTER._immutable_interiorfield(unwraplist(op.args[1:-1])) raise AssertionError(op) +def needs_barrier(frm, to): + return to > frm + def insert_stm_barrier(stmtransformer, graph): """This function uses the following characters for 'categories': - * 'P': a general pointer + * 'A': any general pointer + * 'I': not a stub (immut_read_barrier was applied) + * 'Q': same as R, except needs a repeat_read_barrier * 'R': the read barrier was applied + * 'V': same as W, except needs a repeat_write_barrier * 'W': the write barrier was applied + + The letters are chosen so that a barrier is needed to change a + pointer from category x to category y if and only if y > x. """ graphinfo = stmtransformer.write_analyzer.compute_graph_info(graph) def get_category(v): - return category.get(v, 'P') + return category.get(v, 'A') def get_category_or_null(v): if isinstance(v, Constant) and not v.value: - return 'N' - return category.get(v, 'P') + return None + return category.get(v, 'A') def renamings_get(v): if v not in renamings: @@ -77,26 +77,31 @@ wants_a_barrier = {} expand_comparison = set() for op in block.operations: - # [1] XXX we can't leave getarraysize or the immutable getfields - # fully unmodified. We'd need at least some lightweight - # read barrier to detect stubs. For now we just put a - # regular read barrier. - if (op.opname in ('getfield', 'getarrayitem', - 'getinteriorfield', - 'getarraysize', 'getinteriorarraysize', # XXX [1] - ) and - op.result.concretetype is not lltype.Void and - op.args[0].concretetype.TO._gckind == 'gc' and - True): #not is_immutable(op)): XXX see [1] + is_getter = (op.opname in ('getfield', 'getarrayitem', + 'getinteriorfield') and + op.result.concretetype is not lltype.Void and + op.args[0].concretetype.TO._gckind == 'gc') + if (op.opname in ('getarraysize', 'getinteriorarraysize') + or (is_getter and is_immutable(op))): + # we can't leave getarraysize or the immutable getfields + # fully unmodified: we need at least immut_read_barrier + # to detect stubs. + wants_a_barrier[op] = 'I' + + elif is_getter: + # the non-immutable getfields need a regular read barrier wants_a_barrier[op] = 'R' + elif (op.opname in ('setfield', 'setarrayitem', 'setinteriorfield') and op.args[-1].concretetype is not lltype.Void and - op.args[0].concretetype.TO._gckind == 'gc' and - not is_immutable(op)): + op.args[0].concretetype.TO._gckind == 'gc'): + # setfields need a regular write barrier wants_a_barrier[op] = 'W' + elif (op.opname in ('ptr_eq', 'ptr_ne') and op.args[0].concretetype.TO._gckind == 'gc'): + # GC pointer comparison might need special care expand_comparison.add(op) # if wants_a_barrier or expand_comparison: @@ -119,22 +124,20 @@ v_holder = renamings.setdefault(v, [v]) v = v_holder[0] frm = get_category(v) - if NEEDS_BARRIER[frm, to]: - c_info = Constant('%s2%s' % (frm, to), lltype.Void) + if needs_barrier(frm, to): + try: + b = stmtransformer.barrier_counts[frm, to] + except KeyError: + c_info = Constant('%s2%s' % (frm, to), lltype.Void) + b = [0, c_info] + stmtransformer.barrier_counts[frm, to] = b + b[0] += 1 + c_info = b[1] w = varoftype(v.concretetype) newop = SpaceOperation('stm_barrier', [c_info, v], w) newoperations.append(newop) v_holder[0] = w category[w] = to - if to == 'W': - # if any of the other vars in the same path - # points to the same object, they must lose - # their read-status now - for u in block.getvariables(): - if get_category(u) == 'R' \ - and u.concretetype == v.concretetype: - category[u] = 'P' - # newop = SpaceOperation(op.opname, [renamings_get(v) for v in op.args], @@ -144,7 +147,7 @@ if op in expand_comparison: cats = (get_category_or_null(newop.args[0]), get_category_or_null(newop.args[1])) - if 'N' not in cats and cats != ('W', 'W'): + if None not in cats and (cats[0] < 'V' or cats[1] < 'V'): if newop.opname == 'ptr_ne': v = varoftype(lltype.Bool) negop = SpaceOperation('bool_not', [v], @@ -155,28 +158,30 @@ if stmtransformer.collect_analyzer.analyze(op): # this operation can collect: we bring all 'W' - # categories back to 'R', because we would need - # another stm_write_barrier on them afterwards + # categories back to 'V', because we would need + # a repeat_write_barrier on them afterwards for v, cat in category.items(): if cat == 'W': - category[v] = 'R' + category[v] = 'V' effectinfo = stmtransformer.write_analyzer.analyze( op, graphinfo=graphinfo) if effectinfo: if effectinfo is top_set: # this operation can perform random writes: any - # 'R'-category object falls back to 'P' because - # we would need another stm_read_barrier() + # 'R'-category object falls back to 'Q' because + # we would need a repeat_read_barrier() for v, cat in category.items(): if cat == 'R': - category[v] = 'P' + category[v] = 'Q' else: # the same, but only on objects of the right types types = set([entry[1] for entry in effectinfo]) for v in category.keys(): if v.concretetype in types and category[v] == 'R': - category[v] = 'P' + category[v] = 'Q' + # XXX this is likely not general enough: we need + # to consider 'types' or any base type if op.opname in MALLOCS: category[op.result] = 'W' From noreply at buildbot.pypy.org Wed Aug 14 19:24:42 2013 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 14 Aug 2013 19:24:42 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-static-barrier: Finish generating all new barrier combinations Message-ID: <20130814172442.7B1221C0EC3@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-static-barrier Changeset: r66154:cc00343ac4ef Date: 2013-08-14 19:16 +0200 http://bitbucket.org/pypy/pypy/changeset/cc00343ac4ef/ Log: Finish generating all new barrier combinations diff --git a/rpython/translator/stm/breakfinder.py b/rpython/translator/stm/breakfinder.py new file mode 100644 --- /dev/null +++ b/rpython/translator/stm/breakfinder.py @@ -0,0 +1,22 @@ +from rpython.translator.backendopt import graphanalyze +from rpython.translator.simplify import get_funcobj + + +TRANSACTION_BREAK = set([ + 'stm_commit_transaction', + 'stm_begin_inevitable_transaction', + 'stm_perform_transaction', + ]) + + +class TransactionBreakAnalyzer(graphanalyze.BoolGraphAnalyzer): + + def analyze_simple_operation(self, op, graphinfo): + return op.opname in TRANSACTION_BREAK + + def analyze_external_call(self, op, seen=None): + # if 'funcobj' releases the GIL, then the GIL-releasing + # functions themselves will call stm_commit_transaction + # and stm_begin_inevitable_transaction. This case is + # covered above. + return False diff --git a/rpython/translator/stm/test/test_writebarrier.py b/rpython/translator/stm/test/test_writebarrier.py --- a/rpython/translator/stm/test/test_writebarrier.py +++ b/rpython/translator/stm/test/test_writebarrier.py @@ -1,3 +1,4 @@ +from rpython.rlib.rstm import register_invoke_around_extcall from rpython.rtyper.lltypesystem import lltype, rffi from rpython.translator.stm.test.transform_support import BaseTestTransform @@ -129,28 +130,40 @@ assert res == 36 assert self.barriers == ['A2R', 'A2W'] - def test_call_external_random_effects(self): + def test_call_external_release_gil(self): X = lltype.GcStruct('X', ('foo', lltype.Signed)) def f1(p): + register_invoke_around_extcall() x1 = p.foo - external_stuff() + external_release_gil() x2 = p.foo return x1 * x2 x = lltype.malloc(X, immortal=True); x.foo = 6 res = self.interpret(f1, [x]) assert res == 36 - assert self.barriers == ['A2R', 'a2r'] + assert self.barriers == ['A2R', 'I2R'] - def test_call_external_no_random_effects(self): + def test_call_external_any_gcobj(self): X = lltype.GcStruct('X', ('foo', lltype.Signed)) - external_stuff = rffi.llexternal('external_stuff2', [], lltype.Void, - _callable=lambda: None, - random_effects_on_gcobjs=False, - threadsafe=False) def f1(p): + register_invoke_around_extcall() x1 = p.foo - external_stuff() + external_any_gcobj() + x2 = p.foo + return x1 * x2 + + x = lltype.malloc(X, immortal=True); x.foo = 6 + res = self.interpret(f1, [x]) + assert res == 36 + assert self.barriers == ['A2R', 'q2r'] + + def test_call_external_safest(self): + X = lltype.GcStruct('X', ('foo', lltype.Signed)) + def f1(p): + register_invoke_around_extcall() + x1 = p.foo + external_safest() x2 = p.foo return x1 * x2 @@ -253,7 +266,7 @@ x = Z() x.foo = 815 x.zbar = 'A' - external_stuff() + external_any_gcobj() result = x.foo # 1 if isinstance(x, Y): # 2 result += x.ybar # 3 @@ -261,11 +274,11 @@ res = self.interpret(f1, [10]) assert res == 42 + 10 - assert self.barriers == ['a2r', 'a2r', 'a2r'] # from 3 blocks (could be + assert self.barriers == ['a2r', 'a2i', 'a2r'] # from 3 blocks (could be # optimized later) res = self.interpret(f1, [-10]) assert res == 815 - assert self.barriers == ['a2r', 'a2r'] + assert self.barriers == ['a2r', 'a2i'] def test_write_barrier_repeated(self): class X: @@ -278,10 +291,18 @@ return y res = self.interpret(f1, [10]) - assert self.barriers == ['A2W', 'r2w'] + assert self.barriers == ['A2W', 'V2W'] -external_stuff = rffi.llexternal('external_stuff', [], lltype.Void, - _callable=lambda: None, - random_effects_on_gcobjs=True, - threadsafe=False) +external_release_gil = rffi.llexternal('external_release_gil', [], lltype.Void, + _callable=lambda: None, + random_effects_on_gcobjs=True, + threadsafe=True) # GIL is released +external_any_gcobj = rffi.llexternal('external_any_gcobj', [], lltype.Void, + _callable=lambda: None, + random_effects_on_gcobjs=True, + threadsafe=False) # GIL is not released +external_safest = rffi.llexternal('external_safest', [], lltype.Void, + _callable=lambda: None, + random_effects_on_gcobjs=False, + threadsafe=False) # GIL is not released diff --git a/rpython/translator/stm/test/transform_support.py b/rpython/translator/stm/test/transform_support.py --- a/rpython/translator/stm/test/transform_support.py +++ b/rpython/translator/stm/test/transform_support.py @@ -76,7 +76,7 @@ def check_category(self, p, expected): cat = self.get_category_or_null(p) - assert cat in 'AQRVW' or cat is None + assert cat in 'AIQRVW' or cat is None if expected is not None: assert cat is not None and cat >= expected return cat @@ -104,28 +104,33 @@ return obj1 == obj2 def op_getfield(self, obj, field): - if obj._TYPE.TO._immutable_field(field): - expected = 'I' - else: - expected = 'R' - self.check_category(obj, expected) + if obj._TYPE.TO._gckind == 'gc': + if obj._TYPE.TO._immutable_field(field): + expected = 'I' + else: + expected = 'R' + self.check_category(obj, expected) return LLFrame.op_getfield(self, obj, field) def op_setfield(self, obj, fieldname, fieldvalue): - self.check_category(obj, 'W') - # convert R -> Q all other pointers to the same object we can find - for p in self.all_stm_ptrs(): - if p._category == 'R' and p._T == obj._T and p == obj: - _stmptr._category.__set__(p, 'Q') + if obj._TYPE.TO._gckind == 'gc': + self.check_category(obj, 'W') + # convert R -> Q all other pointers to the same object we can find + for p in self.all_stm_ptrs(): + if p._category == 'R' and p._T == obj._T and p == obj: + _stmptr._category.__set__(p, 'Q') return LLFrame.op_setfield(self, obj, fieldname, fieldvalue) def op_cast_pointer(self, RESTYPE, obj): - cat = self.check_category(obj, None) - p = opimpl.op_cast_pointer(RESTYPE, obj) - return _stmptr(p, cat) + if obj._TYPE.TO._gckind == 'gc': + cat = self.check_category(obj, None) + p = opimpl.op_cast_pointer(RESTYPE, obj) + return _stmptr(p, cat) + return LLFrame.op_cast_pointer(self, RESTYPE, obj) op_cast_pointer.need_result_type = True def op_malloc(self, obj, flags): + assert flags['flavor'] == 'gc' # convert all existing pointers W -> V for p in self.all_stm_ptrs(): if p._category == 'W': @@ -134,3 +139,15 @@ ptr2 = _stmptr(p, 'W') self.llinterpreter.tester.writemode.add(ptr2._obj) return ptr2 + + def transaction_break(self): + # convert -> I all other pointers to the same object we can find + for p in self.all_stm_ptrs(): + if p._category > 'I': + _stmptr._category.__set__(p, 'I') + + def op_stm_commit_transaction(self): + self.transaction_break() + + def op_stm_begin_inevitable_transaction(self): + self.transaction_break() diff --git a/rpython/translator/stm/transform.py b/rpython/translator/stm/transform.py --- a/rpython/translator/stm/transform.py +++ b/rpython/translator/stm/transform.py @@ -3,6 +3,7 @@ from rpython.translator.stm.inevitable import insert_turn_inevitable from rpython.translator.stm.jitdriver import reorganize_around_jit_driver from rpython.translator.stm.threadlocalref import transform_tlref +from rpython.translator.stm.breakfinder import TransactionBreakAnalyzer from rpython.translator.c.support import log from rpython.memory.gctransform.framework import CollectAnalyzer @@ -29,12 +30,14 @@ def transform_write_barrier(self): self.write_analyzer = WriteAnalyzer(self.translator) self.collect_analyzer = CollectAnalyzer(self.translator) + self.break_analyzer = TransactionBreakAnalyzer(self.translator) for graph in self.translator.graphs: insert_stm_barrier(self, graph) for key, value in sorted(self.barrier_counts.items()): log("%s: %d barriers" % (key, value[0])) del self.write_analyzer del self.collect_analyzer + del self.break_analyzer def transform_turn_inevitable(self): for graph in self.translator.graphs: diff --git a/rpython/translator/stm/writebarrier.py b/rpython/translator/stm/writebarrier.py --- a/rpython/translator/stm/writebarrier.py +++ b/rpython/translator/stm/writebarrier.py @@ -156,6 +156,15 @@ newop.result = v newop.opname = 'stm_ptr_eq' + if stmtransformer.break_analyzer.analyze(op): + # this operation can perform a transaction break: + # all pointers are lowered to 'I', because a non- + # stub cannot suddenly point to a stub, but we + # cannot guarantee anything more + for v, cat in category.items(): + if cat > 'I': + category[v] = 'I' + if stmtransformer.collect_analyzer.analyze(op): # this operation can collect: we bring all 'W' # categories back to 'V', because we would need From noreply at buildbot.pypy.org Wed Aug 14 19:24:43 2013 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 14 Aug 2013 19:24:43 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-static-barrier: Fix barrier choice Message-ID: <20130814172443.BAC391C0EC3@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-static-barrier Changeset: r66155:6cf44c497611 Date: 2013-08-14 19:22 +0200 http://bitbucket.org/pypy/pypy/changeset/6cf44c497611/ Log: Fix barrier choice diff --git a/rpython/translator/stm/funcgen.py b/rpython/translator/stm/funcgen.py --- a/rpython/translator/stm/funcgen.py +++ b/rpython/translator/stm/funcgen.py @@ -48,19 +48,24 @@ def stm_finalize(funcgen, op): return 'stm_finalize();' -_STM_BARRIER_FUNCS = { # XXX try to see if some combinations can be shorter - 'P2R': 'stm_read_barrier', - 'G2R': 'stm_read_barrier', - 'O2R': 'stm_read_barrier', - 'P2W': 'stm_write_barrier', - 'G2W': 'stm_write_barrier', - 'O2W': 'stm_write_barrier', - 'R2W': 'stm_write_barrier', - } - def stm_barrier(funcgen, op): category_change = op.args[0].value - funcname = _STM_BARRIER_FUNCS[category_change] + frm, middle, to = category_change + assert middle == '2' + if to == 'W': + if frm >= 'V': + funcname = 'stm_repeat_write_barrier' + else: + funcname = 'stm_write_barrier' + elif to == 'R': + if frm >= 'Q': + funcname = 'stm_repeat_read_barrier' + else: + funcname = 'stm_read_barrier' + elif to == 'I': + funcname = 'stm_immut_read_barrier' + else: + raise AssertionError(category_change) assert op.args[1].concretetype == op.result.concretetype arg = funcgen.expr(op.args[1]) result = funcgen.expr(op.result) From noreply at buildbot.pypy.org Wed Aug 14 19:25:43 2013 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 14 Aug 2013 19:25:43 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-static-barrier: import stmgc/edbd90784082 Message-ID: <20130814172543.D31A61C0EC3@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-static-barrier Changeset: r66156:fe0fc22afd10 Date: 2013-08-14 19:24 +0200 http://bitbucket.org/pypy/pypy/changeset/fe0fc22afd10/ Log: import stmgc/edbd90784082 diff --git a/rpython/translator/stm/src_stm/extra.c b/rpython/translator/stm/src_stm/extra.c --- a/rpython/translator/stm/src_stm/extra.c +++ b/rpython/translator/stm/src_stm/extra.c @@ -163,7 +163,7 @@ return 1; /* the only possible case to still get True is if p2 == p1->h_original */ - return (p1 != NULL) && (p1->h_original == p2) && + return (p1 != NULL) && (p1->h_original == (revision_t)p2) && !(p1->h_tid & GCFLAG_PREBUILT_ORIGINAL); } diff --git a/rpython/translator/stm/src_stm/revision b/rpython/translator/stm/src_stm/revision --- a/rpython/translator/stm/src_stm/revision +++ b/rpython/translator/stm/src_stm/revision @@ -1,1 +1,1 @@ -9dc18268f0da +edbd90784082 diff --git a/rpython/translator/stm/src_stm/stmgc.h b/rpython/translator/stm/src_stm/stmgc.h --- a/rpython/translator/stm/src_stm/stmgc.h +++ b/rpython/translator/stm/src_stm/stmgc.h @@ -178,6 +178,9 @@ extern __thread revision_t stm_private_rev_num; gcptr stm_DirectReadBarrier(gcptr); gcptr stm_WriteBarrier(gcptr); +gcptr stm_RepeatReadBarrier(gcptr); +gcptr stm_ImmutReadBarrier(gcptr); +gcptr stm_RepeatWriteBarrier(gcptr); static const revision_t GCFLAG_PUBLIC_TO_PRIVATE = STM_FIRST_GCFLAG << 4; static const revision_t GCFLAG_WRITE_BARRIER = STM_FIRST_GCFLAG << 5; static const revision_t GCFLAG_MOVED = STM_FIRST_GCFLAG << 6; From noreply at buildbot.pypy.org Wed Aug 14 20:35:54 2013 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 14 Aug 2013 20:35:54 +0200 (CEST) Subject: [pypy-commit] pypy default: Don't crash when we're completely confused and about to print a "ooops Message-ID: <20130814183554.E00581C357A@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r66157:efbddbfc2569 Date: 2013-08-14 20:35 +0200 http://bitbucket.org/pypy/pypy/changeset/efbddbfc2569/ Log: Don't crash when we're completely confused and about to print a "ooops something went wrong" kind of message, sys.stderr exists, but is not pointing to a writable file. Just shrug and ignore this part of the message rather than blow up. diff --git a/pypy/interpreter/app_main.py b/pypy/interpreter/app_main.py --- a/pypy/interpreter/app_main.py +++ b/pypy/interpreter/app_main.py @@ -119,13 +119,12 @@ except: try: stderr = sys.stderr - except AttributeError: - pass # too bad - else: print >> stderr, 'Error calling sys.excepthook:' originalexcepthook(*sys.exc_info()) print >> stderr print >> stderr, 'Original exception was:' + except: + pass # too bad # we only get here if sys.excepthook didn't do its job originalexcepthook(etype, evalue, etraceback) From noreply at buildbot.pypy.org Wed Aug 14 23:32:56 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 14 Aug 2013 23:32:56 +0200 (CEST) Subject: [pypy-commit] pypy py3k: readapt 50440c0c0292 to the kill-gen-store-back-in branch changes: preserve the Message-ID: <20130814213256.3566D1C00EC@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r66158:9d5d630fae26 Date: 2013-08-14 14:29 -0700 http://bitbucket.org/pypy/pypy/changeset/9d5d630fae26/ Log: readapt 50440c0c0292 to the kill-gen-store-back-in branch changes: preserve the exception state between generator yields diff --git a/pypy/module/pypyjit/interp_jit.py b/pypy/module/pypyjit/interp_jit.py --- a/pypy/module/pypyjit/interp_jit.py +++ b/pypy/module/pypyjit/interp_jit.py @@ -78,7 +78,7 @@ next_instr = self.handle_bytecode(co_code, next_instr, ec) is_being_profiled = self.is_being_profiled except Yield: - self.last_exception = None + # preserve self.last_exception between generator yields w_result = self.popvalue() jit.hint(self, force_virtualizable=True) return w_result From noreply at buildbot.pypy.org Thu Aug 15 07:57:07 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 15 Aug 2013 07:57:07 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-static-barrier: Prebuilt objects cannot be stubs. Message-ID: <20130815055707.2B8A91C02DB@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-static-barrier Changeset: r66159:89a6e347857d Date: 2013-08-15 07:54 +0200 http://bitbucket.org/pypy/pypy/changeset/89a6e347857d/ Log: Prebuilt objects cannot be stubs. diff --git a/rpython/translator/stm/test/test_writebarrier.py b/rpython/translator/stm/test/test_writebarrier.py --- a/rpython/translator/stm/test/test_writebarrier.py +++ b/rpython/translator/stm/test/test_writebarrier.py @@ -25,7 +25,7 @@ res = self.interpret(f1, [-5]) assert res == 42 assert len(self.writemode) == 0 - assert self.barriers == ['A2R'] + assert self.barriers == ['I2R'] def test_simple_write(self): X = lltype.GcStruct('X', ('foo', lltype.Signed)) @@ -38,7 +38,7 @@ self.interpret(f1, [4]) assert x1.foo == 4 assert len(self.writemode) == 1 - assert self.barriers == ['A2W'] + assert self.barriers == ['I2W'] def test_multiple_reads(self): X = lltype.GcStruct('X', ('foo', lltype.Signed), @@ -59,7 +59,7 @@ res = self.interpret(f1, [4]) assert res == -81 assert len(self.writemode) == 0 - assert self.barriers == ['A2R'] + assert self.barriers == ['I2R'] def test_malloc(self): X = lltype.GcStruct('X', ('foo', lltype.Signed)) @@ -82,7 +82,7 @@ self.interpret(f1, [4]) assert len(self.writemode) == 2 - assert self.barriers == ['A2W', 'V2W'] + assert self.barriers == ['I2W', 'V2W'] def test_repeat_read_barrier_after_malloc(self): X = lltype.GcStruct('X', ('foo', lltype.Signed)) @@ -96,7 +96,7 @@ self.interpret(f1, [4]) assert len(self.writemode) == 1 - assert self.barriers == ['A2R'] + assert self.barriers == ['I2R'] def test_write_may_alias(self): X = lltype.GcStruct('X', ('foo', lltype.Signed)) @@ -291,7 +291,39 @@ return y res = self.interpret(f1, [10]) - assert self.barriers == ['A2W', 'V2W'] + assert self.barriers == ['I2W', 'V2W'] + + def test_read_immutable(self): + class Foo: + _immutable_ = True + + def f1(n): + x = Foo() + if n > 1: + x.foo = n + return x.foo + + res = self.interpret(f1, [4]) + assert res == 4 + assert self.barriers == ['a2w', 'a2i'] + + def test_read_immutable_prebuilt(self): + class Foo: + _immutable_ = True + x1 = Foo() + x1.foo = 42 + x2 = Foo() + x2.foo = 81 + + def f1(n): + if n > 1: + return x2.foo + else: + return x1.foo + + res = self.interpret(f1, [4]) + assert res == 81 + assert self.barriers == [] external_release_gil = rffi.llexternal('external_release_gil', [], lltype.Void, diff --git a/rpython/translator/stm/test/transform_support.py b/rpython/translator/stm/test/transform_support.py --- a/rpython/translator/stm/test/transform_support.py +++ b/rpython/translator/stm/test/transform_support.py @@ -35,7 +35,7 @@ if not p: return None if p._solid: - return 'A' # allocated with immortal=True + return 'I' # allocated with immortal=True raise AssertionError("unknown category on %r" % (p,)) def interpret(self, fn, args): diff --git a/rpython/translator/stm/writebarrier.py b/rpython/translator/stm/writebarrier.py --- a/rpython/translator/stm/writebarrier.py +++ b/rpython/translator/stm/writebarrier.py @@ -53,7 +53,11 @@ graphinfo = stmtransformer.write_analyzer.compute_graph_info(graph) def get_category(v): - return category.get(v, 'A') + if isinstance(v, Constant): + default = 'I' # prebuilt objects cannot be stubs + else: + default = 'A' + return category.get(v, default) def get_category_or_null(v): if isinstance(v, Constant) and not v.value: From noreply at buildbot.pypy.org Thu Aug 15 08:03:41 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 15 Aug 2013 08:03:41 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-static-barrier: If gcremovetypeptr, we can access directly the typeptr Message-ID: <20130815060341.C1B321C02DB@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-static-barrier Changeset: r66160:c238e056d75b Date: 2013-08-15 08:03 +0200 http://bitbucket.org/pypy/pypy/changeset/c238e056d75b/ Log: If gcremovetypeptr, we can access directly the typeptr field even on a stub. diff --git a/rpython/translator/stm/test/test_writebarrier.py b/rpython/translator/stm/test/test_writebarrier.py --- a/rpython/translator/stm/test/test_writebarrier.py +++ b/rpython/translator/stm/test/test_writebarrier.py @@ -325,6 +325,42 @@ assert res == 81 assert self.barriers == [] + def test_isinstance(self): + class Base: pass + class A(Base): pass + + def f1(n): + if n > 1: + x = Base() + else: + x = A() + return isinstance(x, A) + + res = self.interpret(f1, [5]) + assert res == False + assert self.barriers == ['a2i'] + res = self.interpret(f1, [-5]) + assert res == True + assert self.barriers == ['a2i'] + + def test_isinstance_gcremovetypeptr(self): + class Base: pass + class A(Base): pass + + def f1(n): + if n > 1: + x = Base() + else: + x = A() + return isinstance(x, A) + + res = self.interpret(f1, [5], gcremovetypeptr=True) + assert res == False + assert self.barriers == [] + res = self.interpret(f1, [-5], gcremovetypeptr=True) + assert res == True + assert self.barriers == [] + external_release_gil = rffi.llexternal('external_release_gil', [], lltype.Void, _callable=lambda: None, diff --git a/rpython/translator/stm/test/transform_support.py b/rpython/translator/stm/test/transform_support.py --- a/rpython/translator/stm/test/transform_support.py +++ b/rpython/translator/stm/test/transform_support.py @@ -38,7 +38,7 @@ return 'I' # allocated with immortal=True raise AssertionError("unknown category on %r" % (p,)) - def interpret(self, fn, args): + def interpret(self, fn, args, gcremovetypeptr=False): self.build_state() clear_tcache() interp, self.graph = get_interpreter(fn, args, view=False) @@ -46,6 +46,7 @@ interp.frame_class = LLSTMFrame # self.translator = interp.typer.annotator.translator + self.translator.config.translation.gcremovetypeptr = gcremovetypeptr self.stmtransformer = STMTransformer(self.translator) if self.do_jit_driver: self.stmtransformer.transform_jit_driver() diff --git a/rpython/translator/stm/writebarrier.py b/rpython/translator/stm/writebarrier.py --- a/rpython/translator/stm/writebarrier.py +++ b/rpython/translator/stm/writebarrier.py @@ -51,6 +51,8 @@ pointer from category x to category y if and only if y > x. """ graphinfo = stmtransformer.write_analyzer.compute_graph_info(graph) + gcremovetypeptr = ( + stmtransformer.translator.config.translation.gcremovetypeptr) def get_category(v): if isinstance(v, Constant): @@ -85,7 +87,15 @@ 'getinteriorfield') and op.result.concretetype is not lltype.Void and op.args[0].concretetype.TO._gckind == 'gc') - if (op.opname in ('getarraysize', 'getinteriorarraysize') + + if (gcremovetypeptr and op.opname in ('getfield', 'setfield') and + op.args[1].value == 'typeptr' and + op.args[0].concretetype.TO._hints.get('typeptr')): + # if gcremovetypeptr, we can access directly the typeptr + # field even on a stub + pass + + elif (op.opname in ('getarraysize', 'getinteriorarraysize') or (is_getter and is_immutable(op))): # we can't leave getarraysize or the immutable getfields # fully unmodified: we need at least immut_read_barrier From noreply at buildbot.pypy.org Thu Aug 15 11:26:48 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 15 Aug 2013 11:26:48 +0200 (CEST) Subject: [pypy-commit] stmgc default: Forgot that d->abortinfo also contains gc ptrs Message-ID: <20130815092648.596041C02DB@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r476:50d9d16d6327 Date: 2013-08-15 11:26 +0200 http://bitbucket.org/pypy/stmgc/changeset/50d9d16d6327/ Log: Forgot that d->abortinfo also contains gc ptrs diff --git a/c4/gcpage.c b/c4/gcpage.c --- a/c4/gcpage.c +++ b/c4/gcpage.c @@ -496,6 +496,14 @@ visit_take_protected(d->thread_local_obj_ref); visit_take_protected(&d->old_thread_local_obj); + /* the abortinfo objects */ + long i, size = d->abortinfo.size; + gcptr *items = d->abortinfo.items; + for (i = 0; i < size; i += 2) { + visit_take_protected(&items[i]); + /* items[i+1] is not a gc ptr */ + } + /* the current transaction's private copies of public objects */ wlog_t *item; G2L_LOOP_FORWARD(d->public_to_private, item) { @@ -527,8 +535,8 @@ } G2L_LOOP_END; /* reinsert to real pub_to_priv */ - long i, size = new_public_to_private.size; - gcptr *items = new_public_to_private.items; + size = new_public_to_private.size; + items = new_public_to_private.items; for (i = 0; i < size; i += 2) { g2l_insert(&d->public_to_private, items[i], items[i + 1]); } diff --git a/c4/nursery.c b/c4/nursery.c --- a/c4/nursery.c +++ b/c4/nursery.c @@ -436,6 +436,19 @@ spinlock_release(d->public_descriptor->collection_lock); } +static void mark_extra_stuff(struct tx_descriptor *d) +{ + visit_if_young(d->thread_local_obj_ref); + visit_if_young(&d->old_thread_local_obj); + + long i, size = d->abortinfo.size; + gcptr *items = d->abortinfo.items; + for (i = 0; i < size; i += 2) { + visit_if_young(&items[i]); + /* items[i+1] is not a gc ptr */ + } +} + static void minor_collect(struct tx_descriptor *d) { dprintf(("minor collection [%p to %p]\n", @@ -451,8 +464,7 @@ mark_young_roots(d); - visit_if_young(d->thread_local_obj_ref); - visit_if_young(&d->old_thread_local_obj); + mark_extra_stuff(d); mark_stolen_young_stubs(d); From noreply at buildbot.pypy.org Thu Aug 15 11:27:50 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 15 Aug 2013 11:27:50 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-static-barrier: import stmgc/50d9d16d6327 Message-ID: <20130815092750.6020D1C02DB@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-static-barrier Changeset: r66161:246fc4c25260 Date: 2013-08-15 11:27 +0200 http://bitbucket.org/pypy/pypy/changeset/246fc4c25260/ Log: import stmgc/50d9d16d6327 diff --git a/rpython/translator/stm/src_stm/gcpage.c b/rpython/translator/stm/src_stm/gcpage.c --- a/rpython/translator/stm/src_stm/gcpage.c +++ b/rpython/translator/stm/src_stm/gcpage.c @@ -497,6 +497,14 @@ visit_take_protected(d->thread_local_obj_ref); visit_take_protected(&d->old_thread_local_obj); + /* the abortinfo objects */ + long i, size = d->abortinfo.size; + gcptr *items = d->abortinfo.items; + for (i = 0; i < size; i += 2) { + visit_take_protected(&items[i]); + /* items[i+1] is not a gc ptr */ + } + /* the current transaction's private copies of public objects */ wlog_t *item; G2L_LOOP_FORWARD(d->public_to_private, item) { @@ -528,8 +536,8 @@ } G2L_LOOP_END; /* reinsert to real pub_to_priv */ - long i, size = new_public_to_private.size; - gcptr *items = new_public_to_private.items; + size = new_public_to_private.size; + items = new_public_to_private.items; for (i = 0; i < size; i += 2) { g2l_insert(&d->public_to_private, items[i], items[i + 1]); } diff --git a/rpython/translator/stm/src_stm/nursery.c b/rpython/translator/stm/src_stm/nursery.c --- a/rpython/translator/stm/src_stm/nursery.c +++ b/rpython/translator/stm/src_stm/nursery.c @@ -437,6 +437,19 @@ spinlock_release(d->public_descriptor->collection_lock); } +static void mark_extra_stuff(struct tx_descriptor *d) +{ + visit_if_young(d->thread_local_obj_ref); + visit_if_young(&d->old_thread_local_obj); + + long i, size = d->abortinfo.size; + gcptr *items = d->abortinfo.items; + for (i = 0; i < size; i += 2) { + visit_if_young(&items[i]); + /* items[i+1] is not a gc ptr */ + } +} + static void minor_collect(struct tx_descriptor *d) { dprintf(("minor collection [%p to %p]\n", @@ -452,8 +465,7 @@ mark_young_roots(d); - visit_if_young(d->thread_local_obj_ref); - visit_if_young(&d->old_thread_local_obj); + mark_extra_stuff(d); mark_stolen_young_stubs(d); diff --git a/rpython/translator/stm/src_stm/revision b/rpython/translator/stm/src_stm/revision --- a/rpython/translator/stm/src_stm/revision +++ b/rpython/translator/stm/src_stm/revision @@ -1,1 +1,1 @@ -edbd90784082 +50d9d16d6327 From noreply at buildbot.pypy.org Thu Aug 15 13:08:34 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Thu, 15 Aug 2013 13:08:34 +0200 (CEST) Subject: [pypy-commit] stmgc nonmovable-int-ref: implement stm_allocate_public_integer_address(). They need to be explicitely unregistered (freed), otherwise they survive everything. Message-ID: <20130815110834.CC82E1C0170@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: nonmovable-int-ref Changeset: r477:6a465701aaa1 Date: 2013-08-15 13:07 +0200 http://bitbucket.org/pypy/stmgc/changeset/6a465701aaa1/ Log: implement stm_allocate_public_integer_address(). They need to be explicitely unregistered (freed), otherwise they survive everything. diff --git a/c4/demo_random.c b/c4/demo_random.c --- a/c4/demo_random.c +++ b/c4/demo_random.c @@ -20,6 +20,7 @@ #define MAXROOTS 1000 #define SHARED_ROOTS 5 // shared by threads #define DO_MAJOR_COLLECTS 1 +#define MAX_PUBLIC_INTS 5 @@ -82,6 +83,8 @@ int interruptible; int atomic; char to_clear_on_abort[20]; + intptr_t public_ints[MAX_PUBLIC_INTS]; + int num_public_ints; }; __thread struct thread_data td; @@ -266,6 +269,38 @@ } } +void check_public_ints() +{ + int i; + for (i = 0; i < td.num_public_ints; i++) { + intptr_t ip = td.public_ints[i]; + gcptr obj = (gcptr)ip; + assert(obj->h_tid & GCFLAG_PUBLIC); + assert(obj->h_tid & GCFLAG_SMALLSTUB); + check(obj); + check((gcptr)(obj->h_revision - 2)); + } +} + +void add_as_public_int(gcptr p) +{ + if (!p || td.num_public_ints >= MAX_PUBLIC_INTS) + return; + + push_roots(); + intptr_t ip = stm_allocate_public_integer_address(p); + pop_roots(); + td.public_ints[td.num_public_ints++] = ip; +} + +void pop_public_int() +{ + if (td.num_public_ints == 0) + return; + + stm_unregister_integer_address(td.public_ints[--td.num_public_ints]); +} + gcptr read_barrier(gcptr p) { gcptr r = p; @@ -401,6 +436,7 @@ gcptr rare_events(gcptr p, gcptr _r, gcptr _sr) { + check_public_ints(); int k = get_rand(100); if (k < 10) { push_roots(); @@ -408,13 +444,22 @@ stm_become_inevitable("fun"); p = stm_pop_root(); pop_roots(); - } + } else if (k < 40) { push_roots(); stmgc_minor_collect(); pop_roots(); p = NULL; - } else if (k < 41 && DO_MAJOR_COLLECTS) { + } + else if (k < 50) { + add_as_public_int(p); + p = NULL; + } + else if (k < 60) { + pop_public_int(); + p = NULL; + } + else if (k < 61 && DO_MAJOR_COLLECTS) { fprintf(stdout, "major collect\n"); push_roots(); stmgcpage_possibly_major_collect(1); diff --git a/c4/et.c b/c4/et.c --- a/c4/et.c +++ b/c4/et.c @@ -6,7 +6,6 @@ */ #include "stmimpl.h" -#ifdef _GC_DEBUG char tmp_buf[128]; char* stm_dbg_get_hdr_str(gcptr obj) { @@ -26,7 +25,6 @@ cur += sprintf(cur, "tid=%ld", stm_get_tid(obj)); return tmp_buf; } -#endif diff --git a/c4/extra.c b/c4/extra.c --- a/c4/extra.c +++ b/c4/extra.c @@ -23,6 +23,51 @@ stm_bytes_to_clear_on_abort = bytes; } + +intptr_t stm_allocate_public_integer_address(gcptr obj) +{ + struct tx_descriptor *d = thread_descriptor; + gcptr stub; + intptr_t result; + /* plan: we allocate a small stub whose reference + we never give to the caller except in the form + of an integer. + During major collections, we visit them and update + their references. */ + + /* we don't want to deal with young objs */ + if (!(obj->h_tid & GCFLAG_OLD)) { + stm_push_root(obj); + stm_minor_collect(); + obj = stm_pop_root(); + } + + spinlock_acquire(d->public_descriptor->collection_lock, 'P'); + + stub = stm_stub_malloc(d->public_descriptor, 0); + stub->h_tid = (obj->h_tid & STM_USER_TID_MASK) + | GCFLAG_PUBLIC | GCFLAG_STUB | GCFLAG_SMALLSTUB + | GCFLAG_OLD; + + stub->h_revision = ((revision_t)obj) | 2; + if (!(obj->h_tid & GCFLAG_PREBUILT_ORIGINAL) && obj->h_original) { + stub->h_original = obj->h_original; + } + else { + stub->h_original = (revision_t)obj; + } + + result = (intptr_t)stub; + spinlock_release(d->public_descriptor->collection_lock); + stm_register_integer_address(result); + return result; +} + + + + + + /************************************************************/ /* Each object has a h_original pointer to an old copy of the same object (e.g. an old revision), the "original". diff --git a/c4/gcpage.c b/c4/gcpage.c --- a/c4/gcpage.c +++ b/c4/gcpage.c @@ -22,6 +22,9 @@ /* Only computed during a major collection */ static size_t mc_total_in_use, mc_total_reserved; +/* keeps track of registered smallstubs that will survive unless unregistered */ +static struct G2L registered_stubs; + /* For tests */ long stmgcpage_count(int quantity) { @@ -62,6 +65,8 @@ nblocks_for_size[i] = (GC_PAGE_SIZE - sizeof(page_header_t)) / (WORD * i); } + + memset(®istered_stubs, 0, sizeof(registered_stubs)); } void stmgcpage_init_tls(void) @@ -208,6 +213,34 @@ } +/***** registering of small stubs as integer addresses *****/ + +void stm_register_integer_address(intptr_t adr) +{ + gcptr obj = (gcptr)adr; + assert(obj->h_tid & GCFLAG_SMALLSTUB); + assert(obj->h_tid & GCFLAG_PUBLIC); + + stmgcpage_acquire_global_lock(); + g2l_insert(®istered_stubs, obj, NULL); + stmgcpage_release_global_lock(); + dprintf(("registered %p\n", obj)); +} + +void stm_unregister_integer_address(intptr_t adr) +{ + gcptr obj = (gcptr)adr; + assert(obj->h_tid & GCFLAG_SMALLSTUB); + assert(obj->h_tid & GCFLAG_PUBLIC); + + stmgcpage_acquire_global_lock(); + g2l_delete_item(®istered_stubs, obj); + stmgcpage_release_global_lock(); + dprintf(("unregistered %p\n", obj)); +} + + + /***** Major collections: marking *****/ static struct GcPtrList objects_to_trace; @@ -459,6 +492,27 @@ } } +static void mark_registered_stubs(void) +{ + wlog_t *item; + G2L_LOOP_FORWARD(registered_stubs, item) { + gcptr R = item->addr; + assert(R->h_tid & GCFLAG_SMALLSTUB); + + R->h_tid |= (GCFLAG_MARKED | GCFLAG_VISITED); + + gcptr L = (gcptr)(R->h_revision - 2); + L = stmgcpage_visit(L); + R->h_revision = ((revision_t)L) | 2; + + /* h_original will be kept up-to-date because + it is either == L or L's h_original. And + h_originals don't move */ + } G2L_LOOP_END; + +} + + static void mark_roots(gcptr *root, gcptr *end) { assert(*root == END_MARKER_ON); @@ -889,6 +943,7 @@ assert(gcptrlist_size(&objects_to_trace) == 0); mark_prebuilt_roots(); + mark_registered_stubs(); mark_all_stack_roots(); do { visit_all_objects(); diff --git a/c4/steal.c b/c4/steal.c --- a/c4/steal.c +++ b/c4/steal.c @@ -19,6 +19,59 @@ }; static __thread struct tx_steal_data *steal_data; +static void replace_ptr_to_immutable_with_stub(gcptr * pobj) +{ + gcptr stub, obj = *pobj; + assert(obj->h_tid & GCFLAG_IMMUTABLE); + assert(!(obj->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED)); + if (obj->h_tid & GCFLAG_PUBLIC) { + /* young public, replace with stolen old copy */ + assert(obj->h_tid & GCFLAG_MOVED); + assert(IS_POINTER(obj->h_revision)); + stub = (gcptr)obj->h_revision; + assert(!IS_POINTER(stub->h_revision)); /* not outdated */ + goto done; + } + + /* old or young protected! mark as PUBLIC */ + if (!(obj->h_tid & GCFLAG_OLD)) { + /* young protected */ + gcptr O; + + if (obj->h_tid & GCFLAG_HAS_ID) { + /* use id-copy for us */ + O = (gcptr)obj->h_original; + obj->h_tid &= ~GCFLAG_HAS_ID; + stm_copy_to_old_id_copy(obj, O); + O->h_original = 0; + } else { + O = stmgc_duplicate_old(obj); + + /* young and without original? */ + if (!(obj->h_original)) + obj->h_original = (revision_t)O; + } + obj->h_tid |= (GCFLAG_MOVED | GCFLAG_PUBLIC); + obj->h_revision = (revision_t)O; + + O->h_tid |= GCFLAG_PUBLIC; + /* here it is fine if it stays in read caches because + the object is immutable anyway and there are no + write_barriers allowed. */ + dprintf(("steal prot immutable -> public: %p -> %p\n", obj, O)); + stub = O; + goto done; + } + /* old protected: */ + dprintf(("prot immutable -> public: %p\n", obj)); + obj->h_tid |= GCFLAG_PUBLIC; + + return; + done: + *pobj = stub; + dprintf((" stolen: fixing *%p: %p -> %p\n", pobj, obj, stub)); +} + static void replace_ptr_to_protected_with_stub(gcptr *pobj) { gcptr stub, obj = *pobj; @@ -27,49 +80,7 @@ return; if (obj->h_tid & GCFLAG_IMMUTABLE) { - assert(!(obj->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED)); - if (obj->h_tid & GCFLAG_PUBLIC) { - /* young public, replace with stolen old copy */ - assert(obj->h_tid & GCFLAG_MOVED); - assert(IS_POINTER(obj->h_revision)); - stub = (gcptr)obj->h_revision; - assert(!IS_POINTER(stub->h_revision)); /* not outdated */ - goto done; - } - - /* old or young protected! mark as PUBLIC */ - if (!(obj->h_tid & GCFLAG_OLD)) { - /* young protected */ - gcptr O; - - if (obj->h_tid & GCFLAG_HAS_ID) { - /* use id-copy for us */ - O = (gcptr)obj->h_original; - obj->h_tid &= ~GCFLAG_HAS_ID; - stm_copy_to_old_id_copy(obj, O); - O->h_original = 0; - } else { - O = stmgc_duplicate_old(obj); - - /* young and without original? */ - if (!(obj->h_original)) - obj->h_original = (revision_t)O; - } - obj->h_tid |= (GCFLAG_MOVED | GCFLAG_PUBLIC); - obj->h_revision = (revision_t)O; - - O->h_tid |= GCFLAG_PUBLIC; - /* here it is fine if it stays in read caches because - the object is immutable anyway and there are no - write_barriers allowed. */ - dprintf(("steal prot immutable -> public: %p -> %p\n", obj, O)); - stub = O; - goto done; - } - /* old protected: */ - dprintf(("prot immutable -> public: %p\n", obj)); - obj->h_tid |= GCFLAG_PUBLIC; - + replace_ptr_to_immutable_with_stub(pobj); return; } diff --git a/c4/stmgc.h b/c4/stmgc.h --- a/c4/stmgc.h +++ b/c4/stmgc.h @@ -28,12 +28,21 @@ #define PREBUILT_REVISION 1 +/* push roots around allocating functions! */ + /* allocate an object out of the local nursery */ gcptr stm_allocate(size_t size, unsigned long tid); /* allocate an object that is be immutable. it cannot be changed with a stm_write_barrier() or after the next commit */ gcptr stm_allocate_immutable(size_t size, unsigned long tid); +/* allocates a public reference to the object that will + not be freed until stm_unregister_integer_address is + called on the result */ +intptr_t stm_allocate_public_integer_address(gcptr); +void stm_unregister_integer_address(intptr_t); + + /* returns a never changing hash for the object */ revision_t stm_hash(gcptr); /* returns a number for the object which is unique during its lifetime */ @@ -166,6 +175,8 @@ extern __thread void *stm_to_clear_on_abort; extern __thread size_t stm_bytes_to_clear_on_abort; +/* only user currently is stm_allocate_public_integer_address() */ +void stm_register_integer_address(intptr_t); /* macro functionality */ diff --git a/c4/test/support.py b/c4/test/support.py --- a/c4/test/support.py +++ b/c4/test/support.py @@ -47,6 +47,9 @@ #define PREBUILT_REVISION ... gcptr stm_allocate(size_t size, unsigned long tid); + gcptr stm_allocate_immutable(size_t size, unsigned long tid); + intptr_t stm_allocate_public_integer_address(gcptr adr); + void stm_unregister_integer_address(intptr_t adr); revision_t stm_hash(gcptr); revision_t stm_id(gcptr); _Bool stm_pointer_equal(gcptr, gcptr); diff --git a/c4/test/test_extra.py b/c4/test/test_extra.py --- a/c4/test/test_extra.py +++ b/c4/test/test_extra.py @@ -156,3 +156,47 @@ 0, 0, 0, 0, 0, 0] + + + +def test_allocate_public_integer_address(): + p1 = palloc(HDR) + p2 = oalloc(HDR) + p3 = nalloc(HDR) + lib.stm_push_root(p3) + p3p = lib.stm_allocate_public_integer_address(p3) + p1p = lib.stm_allocate_public_integer_address(p1) + p2p = lib.stm_allocate_public_integer_address(p2) + + # p3 stub points to p3o: + p3o = lib.stm_pop_root() + p3po = ffi.cast("gcptr", p3p) + assert ffi.cast("gcptr", p3po.h_revision - 2) == p3o + + # we have stubs here: + assert ffi.cast("gcptr", p1p).h_tid & GCFLAG_PUBLIC + assert classify(ffi.cast("gcptr", p1p)) == 'stub' + assert classify(ffi.cast("gcptr", p2p)) == 'stub' + assert classify(ffi.cast("gcptr", p3p)) == 'stub' + + major_collect() + + # kept alive through stubs: + check_not_free(p3o) + check_not_free(p2) + + check_not_free(ffi.cast("gcptr", p1p)) + check_not_free(ffi.cast("gcptr", p2p)) + check_not_free(ffi.cast("gcptr", p3p)) + + lib.stm_unregister_integer_address(p1p) + lib.stm_unregister_integer_address(p2p) + lib.stm_unregister_integer_address(p3p) + + major_collect() + major_collect() + + check_free_old(p3o) + check_free_old(p2) + + From noreply at buildbot.pypy.org Thu Aug 15 16:58:57 2013 From: noreply at buildbot.pypy.org (hakanardo) Date: Thu, 15 Aug 2013 16:58:57 +0200 (CEST) Subject: [pypy-commit] pypy default: failing test Message-ID: <20130815145857.1967B1C1380@cobra.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: Changeset: r66162:6e2656749ce4 Date: 2013-08-15 16:56 +0200 http://bitbucket.org/pypy/pypy/changeset/6e2656749ce4/ Log: failing test diff --git a/pypy/module/array/test/test_array.py b/pypy/module/array/test/test_array.py --- a/pypy/module/array/test/test_array.py +++ b/pypy/module/array/test/test_array.py @@ -874,6 +874,70 @@ assert l assert l[0] is None or len(l[0]) == 0 + def test_assign_object_with_special_methods(self): + from array import array + + class Num(object): + def __float__(self): + return 5.25 + + def __int__(self): + return 7 + + class NotNum(object): + pass + + class Silly(object): + def __float__(self): + return None + + def __int__(self): + return None + + class OldNum: + def __float__(self): + return 6.25 + + def __int__(self): + return 8 + + class OldNotNum: + pass + + class OldSilly: + def __float__(self): + return None + + def __int__(self): + return None + + for tc in 'bBhHiIlL': + a = array(tc, [0]) + raises(TypeError, a.__setitem__, 0, 1.0) + a[0] = 1 + a[0] = Num() + assert a[0] == 7 + raises(TypeError, a.__setitem__, NotNum()) + a[0] = OldNum() + assert a[0] == 8 + raises(TypeError, a.__setitem__, OldNotNum()) + raises(TypeError, a.__setitem__, Silly()) + raises(TypeError, a.__setitem__, OldSilly()) + + for tc in 'fd': + a = array(tc, [0]) + a[0] = 1.0 + a[0] = 1 + a[0] = Num() + assert a[0] == 5.25 + raises(TypeError, a.__setitem__, NotNum()) + a[0] = OldNum() + assert a[0] == 6.25 + raises(TypeError, a.__setitem__, OldNotNum()) + raises(TypeError, a.__setitem__, Silly()) + raises(TypeError, a.__setitem__, OldSilly()) + + class TestCPythonsOwnArray(BaseArrayTests): From noreply at buildbot.pypy.org Thu Aug 15 17:07:55 2013 From: noreply at buildbot.pypy.org (rguillebert) Date: Thu, 15 Aug 2013 17:07:55 +0200 (CEST) Subject: [pypy-commit] pypy default: Add a test for the conversion of numpy complex to python complex Message-ID: <20130815150755.204DF1C1380@cobra.cs.uni-duesseldorf.de> Author: Romain Guillebert Branch: Changeset: r66163:8d499d0d72d5 Date: 2013-08-15 17:06 +0200 http://bitbucket.org/pypy/pypy/changeset/8d499d0d72d5/ Log: Add a test for the conversion of numpy complex to python complex diff --git a/pypy/module/micronumpy/test/test_complex.py b/pypy/module/micronumpy/test/test_complex.py --- a/pypy/module/micronumpy/test/test_complex.py +++ b/pypy/module/micronumpy/test/test_complex.py @@ -685,3 +685,8 @@ msg=error_message) sys.stderr.write('.') sys.stderr.write('\n') + + def test_complexbox_to_pycomplex(self): + from numpypy import complex128 + x = complex128(3.4j) + assert complex(x) == 3.4j From noreply at buildbot.pypy.org Thu Aug 15 18:27:33 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Thu, 15 Aug 2013 18:27:33 +0200 (CEST) Subject: [pypy-commit] stmgc nonmovable-int-ref: merge Message-ID: <20130815162733.57CE71C0170@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: nonmovable-int-ref Changeset: r478:7924c243aab1 Date: 2013-08-15 13:09 +0200 http://bitbucket.org/pypy/stmgc/changeset/7924c243aab1/ Log: merge diff --git a/c4/gcpage.c b/c4/gcpage.c --- a/c4/gcpage.c +++ b/c4/gcpage.c @@ -550,6 +550,14 @@ visit_take_protected(d->thread_local_obj_ref); visit_take_protected(&d->old_thread_local_obj); + /* the abortinfo objects */ + long i, size = d->abortinfo.size; + gcptr *items = d->abortinfo.items; + for (i = 0; i < size; i += 2) { + visit_take_protected(&items[i]); + /* items[i+1] is not a gc ptr */ + } + /* the current transaction's private copies of public objects */ wlog_t *item; G2L_LOOP_FORWARD(d->public_to_private, item) { @@ -581,8 +589,8 @@ } G2L_LOOP_END; /* reinsert to real pub_to_priv */ - long i, size = new_public_to_private.size; - gcptr *items = new_public_to_private.items; + size = new_public_to_private.size; + items = new_public_to_private.items; for (i = 0; i < size; i += 2) { g2l_insert(&d->public_to_private, items[i], items[i + 1]); } diff --git a/c4/nursery.c b/c4/nursery.c --- a/c4/nursery.c +++ b/c4/nursery.c @@ -436,6 +436,19 @@ spinlock_release(d->public_descriptor->collection_lock); } +static void mark_extra_stuff(struct tx_descriptor *d) +{ + visit_if_young(d->thread_local_obj_ref); + visit_if_young(&d->old_thread_local_obj); + + long i, size = d->abortinfo.size; + gcptr *items = d->abortinfo.items; + for (i = 0; i < size; i += 2) { + visit_if_young(&items[i]); + /* items[i+1] is not a gc ptr */ + } +} + static void minor_collect(struct tx_descriptor *d) { dprintf(("minor collection [%p to %p]\n", @@ -451,8 +464,7 @@ mark_young_roots(d); - visit_if_young(d->thread_local_obj_ref); - visit_if_young(&d->old_thread_local_obj); + mark_extra_stuff(d); mark_stolen_young_stubs(d); From noreply at buildbot.pypy.org Thu Aug 15 18:27:34 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Thu, 15 Aug 2013 18:27:34 +0200 (CEST) Subject: [pypy-commit] stmgc default: fix embarrassing bug Message-ID: <20130815162734.7B6511C04A5@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: Changeset: r479:c3bb5c223595 Date: 2013-08-15 18:19 +0200 http://bitbucket.org/pypy/stmgc/changeset/c3bb5c223595/ Log: fix embarrassing bug diff --git a/c4/et.c b/c4/et.c --- a/c4/et.c +++ b/c4/et.c @@ -1339,9 +1339,13 @@ and then free B, which will not be used any more. */ size_t size = stmgc_size(B); assert(B->h_tid & GCFLAG_BACKUP_COPY); + /* if h_original was 0, it must stay that way and not point + to itself. (B->h_original may point to P) */ + revision_t h_original = P->h_original; memcpy(((char *)P) + offsetof(struct stm_object_s, h_revision), ((char *)B) + offsetof(struct stm_object_s, h_revision), size - offsetof(struct stm_object_s, h_revision)); + P->h_original = h_original; assert(!(P->h_tid & GCFLAG_BACKUP_COPY)); stmgcpage_free(B); dprintf(("abort: free backup at %p\n", B)); diff --git a/c4/extra.c b/c4/extra.c --- a/c4/extra.c +++ b/c4/extra.c @@ -91,6 +91,8 @@ return (revision_t)p; } + assert(p->h_original != (revision_t)p); + dprintf(("stm_id(%p) has orig fst: %p\n", p, (gcptr)p->h_original)); return p->h_original; diff --git a/c4/nursery.c b/c4/nursery.c --- a/c4/nursery.c +++ b/c4/nursery.c @@ -175,6 +175,7 @@ stm_copy_to_old_id_copy(obj, id_obj); fresh_old_copy = id_obj; + fresh_old_copy->h_original = 0; obj->h_tid &= ~GCFLAG_HAS_ID; } else { diff --git a/c4/test/test_extra.py b/c4/test/test_extra.py --- a/c4/test/test_extra.py +++ b/c4/test/test_extra.py @@ -156,3 +156,17 @@ 0, 0, 0, 0, 0, 0] + +def test_bug(): + p1 = nalloc(HDR) + pid = lib.stm_id(p1) + lib.stm_push_root(p1) + minor_collect() + p1o = lib.stm_pop_root() + + assert p1o == ffi.cast("gcptr", pid) + assert follow_original(p1o) == ffi.NULL + + + + From noreply at buildbot.pypy.org Thu Aug 15 18:27:35 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Thu, 15 Aug 2013 18:27:35 +0200 (CEST) Subject: [pypy-commit] stmgc default: other test Message-ID: <20130815162735.9D7251C1055@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: Changeset: r480:e1a459d18ba1 Date: 2013-08-15 18:26 +0200 http://bitbucket.org/pypy/stmgc/changeset/e1a459d18ba1/ Log: other test diff --git a/c4/test/test_extra.py b/c4/test/test_extra.py --- a/c4/test/test_extra.py +++ b/c4/test/test_extra.py @@ -167,6 +167,16 @@ assert p1o == ffi.cast("gcptr", pid) assert follow_original(p1o) == ffi.NULL - +def test_bug2(): + p = oalloc(HDR+WORD) + + def cb(c): + if c == 0: + pw = lib.stm_write_barrier(p) + abort_and_retry() + lib.stm_push_root(p) + perform_transaction(cb) + p = lib.stm_pop_root() + assert follow_original(p) == ffi.NULL From noreply at buildbot.pypy.org Thu Aug 15 19:08:08 2013 From: noreply at buildbot.pypy.org (rguillebert) Date: Thu, 15 Aug 2013 19:08:08 +0200 (CEST) Subject: [pypy-commit] pypy default: Fix PyPy issue 1544 Message-ID: <20130815170808.C31801C04A5@cobra.cs.uni-duesseldorf.de> Author: Romain Guillebert Branch: Changeset: r66164:ce1c1b2ad027 Date: 2013-08-15 19:07 +0200 http://bitbucket.org/pypy/pypy/changeset/ce1c1b2ad027/ Log: Fix PyPy issue 1544 diff --git a/pypy/objspace/std/complextype.py b/pypy/objspace/std/complextype.py --- a/pypy/objspace/std/complextype.py +++ b/pypy/objspace/std/complextype.py @@ -201,7 +201,7 @@ if w_z is not None: # __complex__() must return a complex or (float,int,long) object # (XXX should not use isinstance here) - if not strict_typing and (space.isinstance_w(w_z, space.w_int) or + if not strict_typing and (space.isinstance_w(w_z, space.w_int) or space.isinstance_w(w_z, space.w_long) or space.isinstance_w(w_z, space.w_float)): return (space.float_w(w_z), 0.0) @@ -214,8 +214,10 @@ # # no '__complex__' method, so we assume it is a float, # unless it is an instance of some subclass of complex. - if isinstance(w_complex, W_ComplexObject): - return (w_complex.realval, w_complex.imagval) + if space.is_true(space.isinstance(w_complex, space.gettypefor(W_ComplexObject))): + real = space.float(space.getattr(w_complex, space.wrap("real"))) + imag = space.float(space.getattr(w_complex, space.wrap("imag"))) + return (space.float_w(real), space.float_w(imag)) # # Check that it is not a string (on which space.float() would succeed). if (space.isinstance_w(w_complex, space.w_str) or From noreply at buildbot.pypy.org Thu Aug 15 20:40:21 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Thu, 15 Aug 2013 20:40:21 +0200 (CEST) Subject: [pypy-commit] pypy default: space.isinstance -> space.isinstance_w Message-ID: <20130815184021.85D4D1C00EC@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: Changeset: r66165:05961722cb69 Date: 2013-08-15 11:39 -0700 http://bitbucket.org/pypy/pypy/changeset/05961722cb69/ Log: space.isinstance -> space.isinstance_w diff --git a/pypy/module/_file/interp_file.py b/pypy/module/_file/interp_file.py --- a/pypy/module/_file/interp_file.py +++ b/pypy/module/_file/interp_file.py @@ -437,14 +437,14 @@ return self.getrepr(self.space, info) def getdisplayname(self): + space = self.space w_name = self.w_name if w_name is None: return '?' - elif self.space.is_true(self.space.isinstance(w_name, - self.space.w_str)): - return "'%s'" % self.space.str_w(w_name) + elif space.isinstance_w(w_name, space.w_str): + return "'%s'" % space.str_w(w_name) else: - return self.space.str_w(self.space.repr(w_name)) + return space.str_w(space.repr(w_name)) def file_writelines(self, w_lines): """writelines(sequence_of_strings) -> None. Write the strings to the file. diff --git a/pypy/objspace/std/complextype.py b/pypy/objspace/std/complextype.py --- a/pypy/objspace/std/complextype.py +++ b/pypy/objspace/std/complextype.py @@ -214,7 +214,7 @@ # # no '__complex__' method, so we assume it is a float, # unless it is an instance of some subclass of complex. - if space.is_true(space.isinstance(w_complex, space.gettypefor(W_ComplexObject))): + if space.isinstance_w(w_complex, space.gettypefor(W_ComplexObject)): real = space.float(space.getattr(w_complex, space.wrap("real"))) imag = space.float(space.getattr(w_complex, space.wrap("imag"))) return (space.float_w(real), space.float_w(imag)) From noreply at buildbot.pypy.org Thu Aug 15 21:06:10 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Thu, 15 Aug 2013 21:06:10 +0200 (CEST) Subject: [pypy-commit] pypy default: 37b092c3f176 killed truncate_addr Message-ID: <20130815190610.A82BC1C138A@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: Changeset: r66166:1a05c73021bd Date: 2013-08-15 12:01 -0700 http://bitbucket.org/pypy/pypy/changeset/1a05c73021bd/ Log: 37b092c3f176 killed truncate_addr diff --git a/pypy/tool/jitlogparser/parser.py b/pypy/tool/jitlogparser/parser.py --- a/pypy/tool/jitlogparser/parser.py +++ b/pypy/tool/jitlogparser/parser.py @@ -382,7 +382,7 @@ from rpython.jit.backend.tool.viewcode import World world = World() for entry in extract_category(log, 'jit-backend-dump'): - world.parse(entry.splitlines(True), truncate_addr=False) + world.parse(entry.splitlines(True)) dumps = {} for r in world.ranges: if r.addr in addrs and addrs[r.addr]: From noreply at buildbot.pypy.org Thu Aug 15 21:06:12 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Thu, 15 Aug 2013 21:06:12 +0200 (CEST) Subject: [pypy-commit] pypy py3k: merge default Message-ID: <20130815190612.46AD71C138A@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r66167:8dc61b6df4f4 Date: 2013-08-15 12:05 -0700 http://bitbucket.org/pypy/pypy/changeset/8dc61b6df4f4/ Log: merge default diff --git a/pypy/doc/cppyy.rst b/pypy/doc/cppyy.rst --- a/pypy/doc/cppyy.rst +++ b/pypy/doc/cppyy.rst @@ -83,7 +83,7 @@ the selection of scientific software) will also work for a build with the builtin backend. -.. _`download`: http://cern.ch/wlav/reflex-2013-04-23.tar.bz2 +.. _`download`: http://cern.ch/wlav/reflex-2013-08-14.tar.bz2 .. _`ROOT`: http://root.cern.ch/ Besides Reflex, you probably need a version of `gccxml`_ installed, which is @@ -98,8 +98,8 @@ To install the standalone version of Reflex, after download:: - $ tar jxf reflex-2013-04-23.tar.bz2 - $ cd reflex-2013-04-23 + $ tar jxf reflex-2013-08-14.tar.bz2 + $ cd reflex-2013-08-14 $ ./build/autogen $ ./configure $ make && make install diff --git a/pypy/interpreter/app_main.py b/pypy/interpreter/app_main.py --- a/pypy/interpreter/app_main.py +++ b/pypy/interpreter/app_main.py @@ -114,13 +114,12 @@ except BaseException as e: try: stderr = sys.stderr - except AttributeError: - pass # too bad - else: print('Error calling sys.excepthook:', file=stderr) originalexcepthook(type(e), e, e.__traceback__) print(file=stderr) print('Original exception was:', file=stderr) + except: + pass # too bad # we only get here if sys.excepthook didn't do its job originalexcepthook(etype, evalue, etraceback) diff --git a/pypy/module/array/test/test_array.py b/pypy/module/array/test/test_array.py --- a/pypy/module/array/test/test_array.py +++ b/pypy/module/array/test/test_array.py @@ -856,6 +856,69 @@ assert l assert l[0] is None or len(l[0]) == 0 + def test_assign_object_with_special_methods(self): + from array import array + + class Num(object): + def __float__(self): + return 5.25 + + def __int__(self): + return 7 + + class NotNum(object): + pass + + class Silly(object): + def __float__(self): + return None + + def __int__(self): + return None + + class OldNum: + def __float__(self): + return 6.25 + + def __int__(self): + return 8 + + class OldNotNum: + pass + + class OldSilly: + def __float__(self): + return None + + def __int__(self): + return None + + for tc in 'bBhHiIlL': + a = array(tc, [0]) + raises(TypeError, a.__setitem__, 0, 1.0) + a[0] = 1 + a[0] = Num() + assert a[0] == 7 + raises(TypeError, a.__setitem__, NotNum()) + a[0] = OldNum() + assert a[0] == 8 + raises(TypeError, a.__setitem__, OldNotNum()) + raises(TypeError, a.__setitem__, Silly()) + raises(TypeError, a.__setitem__, OldSilly()) + + for tc in 'fd': + a = array(tc, [0]) + a[0] = 1.0 + a[0] = 1 + a[0] = Num() + assert a[0] == 5.25 + raises(TypeError, a.__setitem__, NotNum()) + a[0] = OldNum() + assert a[0] == 6.25 + raises(TypeError, a.__setitem__, OldNotNum()) + raises(TypeError, a.__setitem__, Silly()) + raises(TypeError, a.__setitem__, OldSilly()) + def test_bytearray(self): a = self.array('u', 'hi') b = self.array('u') diff --git a/pypy/module/cppyy/genreflex-methptrgetter.patch b/pypy/module/cppyy/genreflex-methptrgetter.patch --- a/pypy/module/cppyy/genreflex-methptrgetter.patch +++ b/pypy/module/cppyy/genreflex-methptrgetter.patch @@ -10,7 +10,7 @@ # The next is to avoid a known problem with gccxml that it generates a # references to id equal '_0' which is not defined anywhere self.xref['_0'] = {'elem':'Unknown', 'attrs':{'id':'_0','name':''}, 'subelems':[]} -@@ -1306,6 +1307,8 @@ +@@ -1328,6 +1329,8 @@ bases = self.getBases( attrs['id'] ) if inner and attrs.has_key('demangled') and self.isUnnamedType(attrs['demangled']) : cls = attrs['demangled'] @@ -19,7 +19,7 @@ clt = '' else: cls = self.genTypeName(attrs['id'],const=True,colon=True) -@@ -1343,7 +1346,7 @@ +@@ -1365,7 +1368,7 @@ # Inner class/struct/union/enum. for m in memList : member = self.xref[m] @@ -28,7 +28,7 @@ and member['attrs'].get('access') in ('private','protected') \ and not self.isUnnamedType(member['attrs'].get('demangled')): cmem = self.genTypeName(member['attrs']['id'],const=True,colon=True) -@@ -1981,8 +1984,15 @@ +@@ -2003,8 +2006,15 @@ else : params = '0' s = ' .AddFunctionMember(%s, Reflex::Literal("%s"), %s%s, 0, %s, %s)' % (self.genTypeID(id), name, type, id, params, mod) s += self.genCommentProperty(attrs) @@ -44,7 +44,7 @@ def genMCODef(self, type, name, attrs, args): id = attrs['id'] cl = self.genTypeName(attrs['context'],colon=True) -@@ -2049,8 +2059,44 @@ +@@ -2071,8 +2081,44 @@ if returns == 'void' : body += ' }\n' else : body += ' }\n' body += '}\n' @@ -105,17 +105,16 @@ -h, --help Print this help\n """ -@@ -127,7 +131,8 @@ - opts, args = getopt.getopt(options, 'ho:s:c:I:U:D:PC', \ +@@ -128,7 +132,7 @@ ['help','debug=', 'output=','selection_file=','pool','dataonly','interpreteronly','deep','gccxmlpath=', 'capabilities=','rootmap=','rootmap-lib=','comments','iocomments','no_membertypedefs', -- 'fail_on_warnings', 'quiet', 'gccxmlopt=', 'reflex', 'split=','no_templatetypedefs','gccxmlpost=']) -+ 'fail_on_warnings', 'quiet', 'gccxmlopt=', 'reflex', 'split=','no_templatetypedefs','gccxmlpost=', -+ 'with-methptrgetter']) + 'fail_on_warnings', 'quiet', 'gccxmlopt=', 'reflex', 'split=','no_templatetypedefs','gccxmlpost=', +- 'library=']) ++ 'library=', 'with-methptrgetter']) except getopt.GetoptError, e: print "--->> genreflex: ERROR:",e self.usage(2) -@@ -186,6 +191,8 @@ +@@ -187,6 +191,8 @@ self.rootmap = a if o in ('--rootmap-lib',): self.rootmaplib = a diff --git a/pypy/module/micronumpy/test/test_complex.py b/pypy/module/micronumpy/test/test_complex.py --- a/pypy/module/micronumpy/test/test_complex.py +++ b/pypy/module/micronumpy/test/test_complex.py @@ -685,3 +685,8 @@ msg=error_message) sys.stderr.write('.') sys.stderr.write('\n') + + def test_complexbox_to_pycomplex(self): + from numpypy import complex128 + x = complex128(3.4j) + assert complex(x) == 3.4j diff --git a/pypy/objspace/std/complextype.py b/pypy/objspace/std/complextype.py --- a/pypy/objspace/std/complextype.py +++ b/pypy/objspace/std/complextype.py @@ -194,7 +194,7 @@ if w_z is not None: # __complex__() must return a complex or (float,int,long) object # (XXX should not use isinstance here) - if not strict_typing and (space.isinstance_w(w_z, space.w_int) or + if not strict_typing and (space.isinstance_w(w_z, space.w_int) or space.isinstance_w(w_z, space.w_float)): return (space.float_w(w_z), 0.0) elif isinstance(w_z, W_ComplexObject): @@ -206,8 +206,10 @@ # # no '__complex__' method, so we assume it is a float, # unless it is an instance of some subclass of complex. - if isinstance(w_complex, W_ComplexObject): - return (w_complex.realval, w_complex.imagval) + if space.isinstance_w(w_complex, space.gettypefor(W_ComplexObject)): + real = space.float(space.getattr(w_complex, space.wrap("real"))) + imag = space.float(space.getattr(w_complex, space.wrap("imag"))) + return (space.float_w(real), space.float_w(imag)) # # Check that it is not a string (on which space.float() would succeed). if (space.isinstance_w(w_complex, space.w_str) or diff --git a/pypy/tool/jitlogparser/parser.py b/pypy/tool/jitlogparser/parser.py --- a/pypy/tool/jitlogparser/parser.py +++ b/pypy/tool/jitlogparser/parser.py @@ -382,7 +382,7 @@ from rpython.jit.backend.tool.viewcode import World world = World() for entry in extract_category(log, 'jit-backend-dump'): - world.parse(entry.splitlines(True), truncate_addr=False) + world.parse(entry.splitlines(True)) dumps = {} for r in world.ranges: if r.addr in addrs and addrs[r.addr]: From noreply at buildbot.pypy.org Thu Aug 15 23:57:02 2013 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Thu, 15 Aug 2013 23:57:02 +0200 (CEST) Subject: [pypy-commit] pypy refactor-str-types: Add docstrings for bytearray. Message-ID: <20130815215702.D2DC81C3624@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: refactor-str-types Changeset: r66168:dfad685e1e9c Date: 2013-08-15 23:35 +0200 http://bitbucket.org/pypy/pypy/changeset/dfad685e1e9c/ Log: Add docstrings for bytearray. diff --git a/pypy/objspace/std/bytearrayobject.py b/pypy/objspace/std/bytearrayobject.py --- a/pypy/objspace/std/bytearrayobject.py +++ b/pypy/objspace/std/bytearrayobject.py @@ -383,87 +383,570 @@ return -1 +class BytearrayDocstrings: + """bytearray(iterable_of_ints) -> bytearray + bytearray(string, encoding[, errors]) -> bytearray + bytearray(bytes_or_bytearray) -> mutable copy of bytes_or_bytearray + bytearray(memory_view) -> bytearray + + Construct an mutable bytearray object from: + - an iterable yielding integers in range(256) + - a text string encoded using the specified encoding + - a bytes or a bytearray object + - any object implementing the buffer API. + + bytearray(int) -> bytearray. + + Construct a zero-initialized bytearray of the given length. + + """ + + def __add__(): + """x.__add__(y) <==> x+y""" + + def __alloc__(): + """B.__alloc__() -> int + + Return the number of bytes actually allocated. + """ + + def __contains__(): + """x.__contains__(y) <==> y in x""" + + def __delitem__(): + """x.__delitem__(y) <==> del x[y]""" + + def __eq__(): + """x.__eq__(y) <==> x==y""" + + def __ge__(): + """x.__ge__(y) <==> x>=y""" + + def __getattribute__(): + """x.__getattribute__('name') <==> x.name""" + + def __getitem__(): + """x.__getitem__(y) <==> x[y]""" + + def __gt__(): + """x.__gt__(y) <==> x>y""" + + def __iadd__(): + """x.__iadd__(y) <==> x+=y""" + + def __imul__(): + """x.__imul__(y) <==> x*=y""" + + def __init__(): + """x.__init__(...) initializes x; see help(type(x)) for signature""" + + def __iter__(): + """x.__iter__() <==> iter(x)""" + + def __le__(): + """x.__le__(y) <==> x<=y""" + + def __len__(): + """x.__len__() <==> len(x)""" + + def __lt__(): + """x.__lt__(y) <==> x x*n""" + + def __ne__(): + """x.__ne__(y) <==> x!=y""" + + def __reduce__(): + """Return state information for pickling.""" + + def __repr__(): + """x.__repr__() <==> repr(x)""" + + def __rmul__(): + """x.__rmul__(n) <==> n*x""" + + def __setitem__(): + """x.__setitem__(i, y) <==> x[i]=y""" + + def __sizeof__(): + """B.__sizeof__() -> int + + Returns the size of B in memory, in bytes + """ + + def __str__(): + """x.__str__() <==> str(x)""" + + def append(): + """B.append(int) -> None + + Append a single item to the end of B. + """ + + def capitalize(): + """B.capitalize() -> copy of B + + Return a copy of B with only its first character capitalized (ASCII) + and the rest lower-cased. + """ + + def center(): + """B.center(width[, fillchar]) -> copy of B + + Return B centered in a string of length width. Padding is + done using the specified fill character (default is a space). + """ + + def count(): + """B.count(sub[, start[, end]]) -> int + + Return the number of non-overlapping occurrences of subsection sub in + bytes B[start:end]. Optional arguments start and end are interpreted + as in slice notation. + """ + + def decode(): + """B.decode(encoding=None, errors='strict') -> unicode + + Decode B using the codec registered for encoding. encoding defaults + to the default encoding. errors may be given to set a different error + handling scheme. Default is 'strict' meaning that encoding errors raise + a UnicodeDecodeError. Other possible values are 'ignore' and 'replace' + as well as any other name registered with codecs.register_error that is + able to handle UnicodeDecodeErrors. + """ + + def endswith(): + """B.endswith(suffix[, start[, end]]) -> bool + + Return True if B ends with the specified suffix, False otherwise. + With optional start, test B beginning at that position. + With optional end, stop comparing B at that position. + suffix can also be a tuple of strings to try. + """ + + def expandtabs(): + """B.expandtabs([tabsize]) -> copy of B + + Return a copy of B where all tab characters are expanded using spaces. + If tabsize is not given, a tab size of 8 characters is assumed. + """ + + def extend(): + """B.extend(iterable_of_ints) -> None + + Append all the elements from the iterator or sequence to the + end of B. + """ + + def find(): + """B.find(sub[, start[, end]]) -> int + + Return the lowest index in B where subsection sub is found, + such that sub is contained within B[start,end]. Optional + arguments start and end are interpreted as in slice notation. + + Return -1 on failure. + """ + + def fromhex(): + """bytearray.fromhex(string) -> bytearray (static method) + + Create a bytearray object from a string of hexadecimal numbers. + Spaces between two numbers are accepted. + Example: bytearray.fromhex('B9 01EF') -> bytearray(b'\xb9\x01\xef'). + """ + + def index(): + """B.index(sub[, start[, end]]) -> int + + Like B.find() but raise ValueError when the subsection is not found. + """ + + def insert(): + """B.insert(index, int) -> None + + Insert a single item into the bytearray before the given index. + """ + + def isalnum(): + """B.isalnum() -> bool + + Return True if all characters in B are alphanumeric + and there is at least one character in B, False otherwise. + """ + + def isalpha(): + """B.isalpha() -> bool + + Return True if all characters in B are alphabetic + and there is at least one character in B, False otherwise. + """ + + def isdigit(): + """B.isdigit() -> bool + + Return True if all characters in B are digits + and there is at least one character in B, False otherwise. + """ + + def islower(): + """B.islower() -> bool + + Return True if all cased characters in B are lowercase and there is + at least one cased character in B, False otherwise. + """ + + def isspace(): + """B.isspace() -> bool + + Return True if all characters in B are whitespace + and there is at least one character in B, False otherwise. + """ + + def istitle(): + """B.istitle() -> bool + + Return True if B is a titlecased string and there is at least one + character in B, i.e. uppercase characters may only follow uncased + characters and lowercase characters only cased ones. Return False + otherwise. + """ + + def isupper(): + """B.isupper() -> bool + + Return True if all cased characters in B are uppercase and there is + at least one cased character in B, False otherwise. + """ + + def join(): + """B.join(iterable_of_bytes) -> bytearray + + Concatenate any number of str/bytearray objects, with B + in between each pair, and return the result as a new bytearray. + """ + + def ljust(): + """B.ljust(width[, fillchar]) -> copy of B + + Return B left justified in a string of length width. Padding is + done using the specified fill character (default is a space). + """ + + def lower(): + """B.lower() -> copy of B + + Return a copy of B with all ASCII characters converted to lowercase. + """ + + def lstrip(): + """B.lstrip([bytes]) -> bytearray + + Strip leading bytes contained in the argument + and return the result as a new bytearray. + If the argument is omitted, strip leading ASCII whitespace. + """ + + def partition(): + """B.partition(sep) -> (head, sep, tail) + + Search for the separator sep in B, and return the part before it, + the separator itself, and the part after it. If the separator is not + found, returns B and two empty bytearray objects. + """ + + def pop(): + """B.pop([index]) -> int + + Remove and return a single item from B. If no index + argument is given, will pop the last value. + """ + + def remove(): + """B.remove(int) -> None + + Remove the first occurrence of a value in B. + """ + + def replace(): + """B.replace(old, new[, count]) -> bytearray + + Return a copy of B with all occurrences of subsection + old replaced by new. If the optional argument count is + given, only the first count occurrences are replaced. + """ + + def reverse(): + """B.reverse() -> None + + Reverse the order of the values in B in place. + """ + + def rfind(): + """B.rfind(sub[, start[, end]]) -> int + + Return the highest index in B where subsection sub is found, + such that sub is contained within B[start,end]. Optional + arguments start and end are interpreted as in slice notation. + + Return -1 on failure. + """ + + def rindex(): + """B.rindex(sub[, start[, end]]) -> int + + Like B.rfind() but raise ValueError when the subsection is not found. + """ + + def rjust(): + """B.rjust(width[, fillchar]) -> copy of B + + Return B right justified in a string of length width. Padding is + done using the specified fill character (default is a space) + """ + + def rpartition(): + """B.rpartition(sep) -> (head, sep, tail) + + Search for the separator sep in B, starting at the end of B, + and return the part before it, the separator itself, and the + part after it. If the separator is not found, returns two empty + bytearray objects and B. + """ + + def rsplit(): + """B.rsplit(sep=None, maxsplit=-1) -> list of bytearrays + + Return a list of the sections in B, using sep as the delimiter, + starting at the end of B and working to the front. + If sep is not given, B is split on ASCII whitespace characters + (space, tab, return, newline, formfeed, vertical tab). + If maxsplit is given, at most maxsplit splits are done. + """ + + def rstrip(): + """B.rstrip([bytes]) -> bytearray + + Strip trailing bytes contained in the argument + and return the result as a new bytearray. + If the argument is omitted, strip trailing ASCII whitespace. + """ + + def split(): + """B.split(sep=None, maxsplit=-1) -> list of bytearrays + + Return a list of the sections in B, using sep as the delimiter. + If sep is not given, B is split on ASCII whitespace characters + (space, tab, return, newline, formfeed, vertical tab). + If maxsplit is given, at most maxsplit splits are done. + """ + + def splitlines(): + """B.splitlines(keepends=False) -> list of lines + + Return a list of the lines in B, breaking at line boundaries. + Line breaks are not included in the resulting list unless keepends + is given and true. + """ + + def startswith(): + """B.startswith(prefix[, start[, end]]) -> bool + + Return True if B starts with the specified prefix, False otherwise. + With optional start, test B beginning at that position. + With optional end, stop comparing B at that position. + prefix can also be a tuple of strings to try. + """ + + def strip(): + """B.strip([bytes]) -> bytearray + + Strip leading and trailing bytes contained in the argument + and return the result as a new bytearray. + If the argument is omitted, strip ASCII whitespace. + """ + + def swapcase(): + """B.swapcase() -> copy of B + + Return a copy of B with uppercase ASCII characters converted + to lowercase ASCII and vice versa. + """ + + def title(): + """B.title() -> copy of B + + Return a titlecased version of B, i.e. ASCII words start with uppercase + characters, all remaining cased characters have lowercase. + """ + + def translate(): + """B.translate(table[, deletechars]) -> bytearray + + Return a copy of B, where all characters occurring in the + optional argument deletechars are removed, and the remaining + characters have been mapped through the given translation + table, which must be a bytes object of length 256. + """ + + def upper(): + """B.upper() -> copy of B + + Return a copy of B with all ASCII characters converted to uppercase. + """ + + def zfill(): + """B.zfill(width) -> copy of B + + Pad a numeric string B with zeros on the left, to fill a field + of the specified width. B is never truncated. + """ + + W_BytearrayObject.typedef = StdTypeDef( "bytearray", - __doc__ = '''bytearray() -> an empty bytearray -bytearray(sequence) -> bytearray initialized from sequence\'s items - -If the argument is a bytearray, the return value is the same object.''', + __doc__ = BytearrayDocstrings.__doc__, __new__ = interp2app(W_BytearrayObject.descr_new), __hash__ = None, - __reduce__ = interp2app(W_BytearrayObject.descr_reduce), - fromhex = interp2app(W_BytearrayObject.descr_fromhex, as_classmethod=True), + __reduce__ = interp2app(W_BytearrayObject.descr_reduce, + doc=BytearrayDocstrings.__reduce__.__doc__), + fromhex = interp2app(W_BytearrayObject.descr_fromhex, as_classmethod=True, + doc=BytearrayDocstrings.fromhex.__doc__), - __repr__ = interp2app(W_BytearrayObject.descr_repr), - __str__ = interp2app(W_BytearrayObject.descr_str), + __repr__ = interp2app(W_BytearrayObject.descr_repr, + doc=BytearrayDocstrings.__repr__.__doc__), + __str__ = interp2app(W_BytearrayObject.descr_str, + doc=BytearrayDocstrings.__str__.__doc__), - __eq__ = interp2app(W_BytearrayObject.descr_eq), - __ne__ = interp2app(W_BytearrayObject.descr_ne), - __lt__ = interp2app(W_BytearrayObject.descr_lt), - __le__ = interp2app(W_BytearrayObject.descr_le), - __gt__ = interp2app(W_BytearrayObject.descr_gt), - __ge__ = interp2app(W_BytearrayObject.descr_ge), + __eq__ = interp2app(W_BytearrayObject.descr_eq, + doc=BytearrayDocstrings.__eq__.__doc__), + __ne__ = interp2app(W_BytearrayObject.descr_ne, + doc=BytearrayDocstrings.__ne__.__doc__), + __lt__ = interp2app(W_BytearrayObject.descr_lt, + doc=BytearrayDocstrings.__lt__.__doc__), + __le__ = interp2app(W_BytearrayObject.descr_le, + doc=BytearrayDocstrings.__le__.__doc__), + __gt__ = interp2app(W_BytearrayObject.descr_gt, + doc=BytearrayDocstrings.__gt__.__doc__), + __ge__ = interp2app(W_BytearrayObject.descr_ge, + doc=BytearrayDocstrings.__ge__.__doc__), - __len__ = interp2app(W_BytearrayObject.descr_len), - __contains__ = interp2app(W_BytearrayObject.descr_contains), + __len__ = interp2app(W_BytearrayObject.descr_len, + doc=BytearrayDocstrings.__len__.__doc__), + __contains__ = interp2app(W_BytearrayObject.descr_contains, + doc=BytearrayDocstrings.__contains__.__doc__), - __add__ = interp2app(W_BytearrayObject.descr_add), - __mul__ = interp2app(W_BytearrayObject.descr_mul), - __rmul__ = interp2app(W_BytearrayObject.descr_mul), + __add__ = interp2app(W_BytearrayObject.descr_add, + doc=BytearrayDocstrings.__add__.__doc__), + __mul__ = interp2app(W_BytearrayObject.descr_mul, + doc=BytearrayDocstrings.__mul__.__doc__), + __rmul__ = interp2app(W_BytearrayObject.descr_mul, + doc=BytearrayDocstrings.__rmul__.__doc__), - __getitem__ = interp2app(W_BytearrayObject.descr_getitem), + __getitem__ = interp2app(W_BytearrayObject.descr_getitem, + doc=BytearrayDocstrings.__getitem__.__doc__), - capitalize = interp2app(W_BytearrayObject.descr_capitalize), - center = interp2app(W_BytearrayObject.descr_center), - count = interp2app(W_BytearrayObject.descr_count), - decode = interp2app(W_BytearrayObject.descr_decode), - expandtabs = interp2app(W_BytearrayObject.descr_expandtabs), - find = interp2app(W_BytearrayObject.descr_find), - rfind = interp2app(W_BytearrayObject.descr_rfind), - index = interp2app(W_BytearrayObject.descr_index), - rindex = interp2app(W_BytearrayObject.descr_rindex), - isalnum = interp2app(W_BytearrayObject.descr_isalnum), - isalpha = interp2app(W_BytearrayObject.descr_isalpha), - isdigit = interp2app(W_BytearrayObject.descr_isdigit), - islower = interp2app(W_BytearrayObject.descr_islower), - isspace = interp2app(W_BytearrayObject.descr_isspace), - istitle = interp2app(W_BytearrayObject.descr_istitle), - isupper = interp2app(W_BytearrayObject.descr_isupper), - join = interp2app(W_BytearrayObject.descr_join), - ljust = interp2app(W_BytearrayObject.descr_ljust), - rjust = interp2app(W_BytearrayObject.descr_rjust), - lower = interp2app(W_BytearrayObject.descr_lower), - partition = interp2app(W_BytearrayObject.descr_partition), - rpartition = interp2app(W_BytearrayObject.descr_rpartition), - replace = interp2app(W_BytearrayObject.descr_replace), - split = interp2app(W_BytearrayObject.descr_split), - rsplit = interp2app(W_BytearrayObject.descr_rsplit), - splitlines = interp2app(W_BytearrayObject.descr_splitlines), - startswith = interp2app(W_BytearrayObject.descr_startswith), - endswith = interp2app(W_BytearrayObject.descr_endswith), - strip = interp2app(W_BytearrayObject.descr_strip), - lstrip = interp2app(W_BytearrayObject.descr_lstrip), - rstrip = interp2app(W_BytearrayObject.descr_rstrip), - swapcase = interp2app(W_BytearrayObject.descr_swapcase), - title = interp2app(W_BytearrayObject.descr_title), - translate = interp2app(W_BytearrayObject.descr_translate), - upper = interp2app(W_BytearrayObject.descr_upper), - zfill = interp2app(W_BytearrayObject.descr_zfill), + capitalize = interp2app(W_BytearrayObject.descr_capitalize, + doc=BytearrayDocstrings.capitalize.__doc__), + center = interp2app(W_BytearrayObject.descr_center, + doc=BytearrayDocstrings.center.__doc__), + count = interp2app(W_BytearrayObject.descr_count, + doc=BytearrayDocstrings.count.__doc__), + decode = interp2app(W_BytearrayObject.descr_decode, + doc=BytearrayDocstrings.decode.__doc__), + expandtabs = interp2app(W_BytearrayObject.descr_expandtabs, + doc=BytearrayDocstrings.expandtabs.__doc__), + find = interp2app(W_BytearrayObject.descr_find, + doc=BytearrayDocstrings.find.__doc__), + rfind = interp2app(W_BytearrayObject.descr_rfind, + doc=BytearrayDocstrings.rfind.__doc__), + index = interp2app(W_BytearrayObject.descr_index, + doc=BytearrayDocstrings.index.__doc__), + rindex = interp2app(W_BytearrayObject.descr_rindex, + doc=BytearrayDocstrings.rindex.__doc__), + isalnum = interp2app(W_BytearrayObject.descr_isalnum, + doc=BytearrayDocstrings.isalnum.__doc__), + isalpha = interp2app(W_BytearrayObject.descr_isalpha, + doc=BytearrayDocstrings.isalpha.__doc__), + isdigit = interp2app(W_BytearrayObject.descr_isdigit, + doc=BytearrayDocstrings.isdigit.__doc__), + islower = interp2app(W_BytearrayObject.descr_islower, + doc=BytearrayDocstrings.islower.__doc__), + isspace = interp2app(W_BytearrayObject.descr_isspace, + doc=BytearrayDocstrings.isspace.__doc__), + istitle = interp2app(W_BytearrayObject.descr_istitle, + doc=BytearrayDocstrings.istitle.__doc__), + isupper = interp2app(W_BytearrayObject.descr_isupper, + doc=BytearrayDocstrings.isupper.__doc__), + join = interp2app(W_BytearrayObject.descr_join, + doc=BytearrayDocstrings.join.__doc__), + ljust = interp2app(W_BytearrayObject.descr_ljust, + doc=BytearrayDocstrings.ljust.__doc__), + rjust = interp2app(W_BytearrayObject.descr_rjust, + doc=BytearrayDocstrings.rjust.__doc__), + lower = interp2app(W_BytearrayObject.descr_lower, + doc=BytearrayDocstrings.lower.__doc__), + partition = interp2app(W_BytearrayObject.descr_partition, + doc=BytearrayDocstrings.partition.__doc__), + rpartition = interp2app(W_BytearrayObject.descr_rpartition, + doc=BytearrayDocstrings.rpartition.__doc__), + replace = interp2app(W_BytearrayObject.descr_replace, + doc=BytearrayDocstrings.replace.__doc__), + split = interp2app(W_BytearrayObject.descr_split, + doc=BytearrayDocstrings.split.__doc__), + rsplit = interp2app(W_BytearrayObject.descr_rsplit, + doc=BytearrayDocstrings.rsplit.__doc__), + splitlines = interp2app(W_BytearrayObject.descr_splitlines, + doc=BytearrayDocstrings.splitlines.__doc__), + startswith = interp2app(W_BytearrayObject.descr_startswith, + doc=BytearrayDocstrings.startswith.__doc__), + endswith = interp2app(W_BytearrayObject.descr_endswith, + doc=BytearrayDocstrings.endswith.__doc__), + strip = interp2app(W_BytearrayObject.descr_strip, + doc=BytearrayDocstrings.strip.__doc__), + lstrip = interp2app(W_BytearrayObject.descr_lstrip, + doc=BytearrayDocstrings.lstrip.__doc__), + rstrip = interp2app(W_BytearrayObject.descr_rstrip, + doc=BytearrayDocstrings.rstrip.__doc__), + swapcase = interp2app(W_BytearrayObject.descr_swapcase, + doc=BytearrayDocstrings.swapcase.__doc__), + title = interp2app(W_BytearrayObject.descr_title, + doc=BytearrayDocstrings.title.__doc__), + translate = interp2app(W_BytearrayObject.descr_translate, + doc=BytearrayDocstrings.translate.__doc__), + upper = interp2app(W_BytearrayObject.descr_upper, + doc=BytearrayDocstrings.upper.__doc__), + zfill = interp2app(W_BytearrayObject.descr_zfill, + doc=BytearrayDocstrings.zfill.__doc__), - __init__ = interp2app(W_BytearrayObject.descr_init), + __init__ = interp2app(W_BytearrayObject.descr_init, + doc=BytearrayDocstrings.__init__.__doc__), __buffer__ = interp2app(W_BytearrayObject.descr_buffer), - __iadd__ = interp2app(W_BytearrayObject.descr_inplace_add), - __imul__ = interp2app(W_BytearrayObject.descr_inplace_mul), - __setitem__ = interp2app(W_BytearrayObject.descr_setitem), - __delitem__ = interp2app(W_BytearrayObject.descr_delitem), + __iadd__ = interp2app(W_BytearrayObject.descr_inplace_add, + doc=BytearrayDocstrings.__iadd__.__doc__), + __imul__ = interp2app(W_BytearrayObject.descr_inplace_mul, + doc=BytearrayDocstrings.__imul__.__doc__), + __setitem__ = interp2app(W_BytearrayObject.descr_setitem, + doc=BytearrayDocstrings.__setitem__.__doc__), + __delitem__ = interp2app(W_BytearrayObject.descr_delitem, + doc=BytearrayDocstrings.__delitem__.__doc__), - append = interp2app(W_BytearrayObject.descr_append), - extend = interp2app(W_BytearrayObject.descr_extend), - insert = interp2app(W_BytearrayObject.descr_insert), - pop = interp2app(W_BytearrayObject.descr_pop), - remove = interp2app(W_BytearrayObject.descr_remove), - reverse = interp2app(W_BytearrayObject.descr_reverse), + append = interp2app(W_BytearrayObject.descr_append, + doc=BytearrayDocstrings.append.__doc__), + extend = interp2app(W_BytearrayObject.descr_extend, + doc=BytearrayDocstrings.extend.__doc__), + insert = interp2app(W_BytearrayObject.descr_insert, + doc=BytearrayDocstrings.insert.__doc__), + pop = interp2app(W_BytearrayObject.descr_pop, + doc=BytearrayDocstrings.pop.__doc__), + remove = interp2app(W_BytearrayObject.descr_remove, + doc=BytearrayDocstrings.remove.__doc__), + reverse = interp2app(W_BytearrayObject.descr_reverse, + doc=BytearrayDocstrings.reverse.__doc__), ) init_signature = Signature(['source', 'encoding', 'errors'], None, None) From noreply at buildbot.pypy.org Thu Aug 15 23:57:04 2013 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Thu, 15 Aug 2013 23:57:04 +0200 (CEST) Subject: [pypy-commit] pypy refactor-str-types: Add docstrings for str/unicode. Message-ID: <20130815215704.3DA181C3625@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: refactor-str-types Changeset: r66169:c89210aaa3b7 Date: 2013-08-15 23:54 +0200 http://bitbucket.org/pypy/pypy/changeset/c89210aaa3b7/ Log: Add docstrings for str/unicode. diff --git a/pypy/objspace/std/bytesobject.py b/pypy/objspace/std/bytesobject.py --- a/pypy/objspace/std/bytesobject.py +++ b/pypy/objspace/std/bytesobject.py @@ -299,78 +299,515 @@ else: return W_BytesObject(c) + +class BytesDocstrings: + """str(object='') -> string + + Return a nice string representation of the object. + If the argument is a string, the return value is the same object. + + """ + + def __add__(): + """x.__add__(y) <==> x+y""" + + def __contains__(): + """x.__contains__(y) <==> y in x""" + + def __eq__(): + """x.__eq__(y) <==> x==y""" + + def __format__(): + """S.__format__(format_spec) -> string + + Return a formatted version of S as described by format_spec. + """ + + def __ge__(): + """x.__ge__(y) <==> x>=y""" + + def __getattribute__(): + """x.__getattribute__('name') <==> x.name""" + + def __getitem__(): + """x.__getitem__(y) <==> x[y]""" + + def __getnewargs__(): + """""" + + def __getslice__(): + """x.__getslice__(i, j) <==> x[i:j] + + Use of negative indices is not supported. + """ + + def __gt__(): + """x.__gt__(y) <==> x>y""" + + def __hash__(): + """x.__hash__() <==> hash(x)""" + + def __le__(): + """x.__le__(y) <==> x<=y""" + + def __len__(): + """x.__len__() <==> len(x)""" + + def __lt__(): + """x.__lt__(y) <==> x x%y""" + + def __mul__(): + """x.__mul__(n) <==> x*n""" + + def __ne__(): + """x.__ne__(y) <==> x!=y""" + + def __repr__(): + """x.__repr__() <==> repr(x)""" + + def __rmod__(): + """x.__rmod__(y) <==> y%x""" + + def __rmul__(): + """x.__rmul__(n) <==> n*x""" + + def __sizeof__(): + """S.__sizeof__() -> size of S in memory, in bytes""" + + def __str__(): + """x.__str__() <==> str(x)""" + + def capitalize(): + """S.capitalize() -> string + + Return a capitalized version of S, i.e. make the first character + have upper case and the rest lower case. + """ + + def center(): + """S.center(width[, fillchar]) -> string + + Return S centered in a string of length width. Padding is + done using the specified fill character (default is a space). + """ + + def count(): + """S.count(sub[, start[, end]]) -> int + + Return the number of non-overlapping occurrences of substring sub in + string S[start:end]. Optional arguments start and end are interpreted + as in slice notation. + """ + + def decode(): + """S.decode(encoding=None, errors='strict') -> object + + Decode S using the codec registered for encoding. encoding defaults + to the default encoding. errors may be given to set a different error + handling scheme. Default is 'strict' meaning that encoding errors raise + a UnicodeDecodeError. Other possible values are 'ignore' and 'replace' + as well as any other name registered with codecs.register_error that is + able to handle UnicodeDecodeErrors. + """ + + def encode(): + """S.encode(encoding=None, errors='strict') -> object + + Encode S using the codec registered for encoding. encoding defaults + to the default encoding. errors may be given to set a different error + handling scheme. Default is 'strict' meaning that encoding errors raise + a UnicodeEncodeError. Other possible values are 'ignore', 'replace' and + 'xmlcharrefreplace' as well as any other name registered with + codecs.register_error that is able to handle UnicodeEncodeErrors. + """ + + def endswith(): + """S.endswith(suffix[, start[, end]]) -> bool + + Return True if S ends with the specified suffix, False otherwise. + With optional start, test S beginning at that position. + With optional end, stop comparing S at that position. + suffix can also be a tuple of strings to try. + """ + + def expandtabs(): + """S.expandtabs([tabsize]) -> string + + Return a copy of S where all tab characters are expanded using spaces. + If tabsize is not given, a tab size of 8 characters is assumed. + """ + + def find(): + """S.find(sub[, start[, end]]) -> int + + Return the lowest index in S where substring sub is found, + such that sub is contained within S[start:end]. Optional + arguments start and end are interpreted as in slice notation. + + Return -1 on failure. + """ + + def format(): + """S.format(*args, **kwargs) -> string + + Return a formatted version of S, using substitutions from args and kwargs. + The substitutions are identified by braces ('{' and '}'). + """ + + def index(): + """S.index(sub[, start[, end]]) -> int + + Like S.find() but raise ValueError when the substring is not found. + """ + + def isalnum(): + """S.isalnum() -> bool + + Return True if all characters in S are alphanumeric + and there is at least one character in S, False otherwise. + """ + + def isalpha(): + """S.isalpha() -> bool + + Return True if all characters in S are alphabetic + and there is at least one character in S, False otherwise. + """ + + def isdigit(): + """S.isdigit() -> bool + + Return True if all characters in S are digits + and there is at least one character in S, False otherwise. + """ + + def islower(): + """S.islower() -> bool + + Return True if all cased characters in S are lowercase and there is + at least one cased character in S, False otherwise. + """ + + def isspace(): + """S.isspace() -> bool + + Return True if all characters in S are whitespace + and there is at least one character in S, False otherwise. + """ + + def istitle(): + """S.istitle() -> bool + + Return True if S is a titlecased string and there is at least one + character in S, i.e. uppercase characters may only follow uncased + characters and lowercase characters only cased ones. Return False + otherwise. + """ + + def isupper(): + """S.isupper() -> bool + + Return True if all cased characters in S are uppercase and there is + at least one cased character in S, False otherwise. + """ + + def join(): + """S.join(iterable) -> string + + Return a string which is the concatenation of the strings in the + iterable. The separator between elements is S. + """ + + def ljust(): + """S.ljust(width[, fillchar]) -> string + + Return S left-justified in a string of length width. Padding is + done using the specified fill character (default is a space). + """ + + def lower(): + """S.lower() -> string + + Return a copy of the string S converted to lowercase. + """ + + def lstrip(): + """S.lstrip([chars]) -> string or unicode + + Return a copy of the string S with leading whitespace removed. + If chars is given and not None, remove characters in chars instead. + If chars is unicode, S will be converted to unicode before stripping + """ + + def partition(): + """S.partition(sep) -> (head, sep, tail) + + Search for the separator sep in S, and return the part before it, + the separator itself, and the part after it. If the separator is not + found, return S and two empty strings. + """ + + def replace(): + """S.replace(old, new[, count]) -> string + + Return a copy of string S with all occurrences of substring + old replaced by new. If the optional argument count is + given, only the first count occurrences are replaced. + """ + + def rfind(): + """S.rfind(sub[, start[, end]]) -> int + + Return the highest index in S where substring sub is found, + such that sub is contained within S[start:end]. Optional + arguments start and end are interpreted as in slice notation. + + Return -1 on failure. + """ + + def rindex(): + """S.rindex(sub[, start[, end]]) -> int + + Like S.rfind() but raise ValueError when the substring is not found. + """ + + def rjust(): + """S.rjust(width[, fillchar]) -> string + + Return S right-justified in a string of length width. Padding is + done using the specified fill character (default is a space). + """ + + def rpartition(): + """S.rpartition(sep) -> (head, sep, tail) + + Search for the separator sep in S, starting at the end of S, and return + the part before it, the separator itself, and the part after it. If the + separator is not found, return two empty strings and S. + """ + + def rsplit(): + """S.rsplit(sep=None, maxsplit=-1) -> list of strings + + Return a list of the words in the string S, using sep as the + delimiter string, starting at the end of the string and working + to the front. If maxsplit is given, at most maxsplit splits are + done. If sep is not specified or is None, any whitespace string + is a separator. + """ + + def rstrip(): + """S.rstrip([chars]) -> string or unicode + + Return a copy of the string S with trailing whitespace removed. + If chars is given and not None, remove characters in chars instead. + If chars is unicode, S will be converted to unicode before stripping + """ + + def split(): + """S.split(sep=None, maxsplit=-1) -> list of strings + + Return a list of the words in the string S, using sep as the + delimiter string. If maxsplit is given, at most maxsplit + splits are done. If sep is not specified or is None, any + whitespace string is a separator and empty strings are removed + from the result. + """ + + def splitlines(): + """S.splitlines(keepends=False) -> list of strings + + Return a list of the lines in S, breaking at line boundaries. + Line breaks are not included in the resulting list unless keepends + is given and true. + """ + + def startswith(): + """S.startswith(prefix[, start[, end]]) -> bool + + Return True if S starts with the specified prefix, False otherwise. + With optional start, test S beginning at that position. + With optional end, stop comparing S at that position. + prefix can also be a tuple of strings to try. + """ + + def strip(): + """S.strip([chars]) -> string or unicode + + Return a copy of the string S with leading and trailing + whitespace removed. + If chars is given and not None, remove characters in chars instead. + If chars is unicode, S will be converted to unicode before stripping + """ + + def swapcase(): + """S.swapcase() -> string + + Return a copy of the string S with uppercase characters + converted to lowercase and vice versa. + """ + + def title(): + """S.title() -> string + + Return a titlecased version of S, i.e. words start with uppercase + characters, all remaining cased characters have lowercase. + """ + + def translate(): + """S.translate(table[, deletechars]) -> string + + Return a copy of the string S, where all characters occurring + in the optional argument deletechars are removed, and the + remaining characters have been mapped through the given + translation table, which must be a string of length 256 or None. + If the table argument is None, no translation is applied and + the operation simply removes the characters in deletechars. + """ + + def upper(): + """S.upper() -> string + + Return a copy of the string S converted to uppercase. + """ + + def zfill(): + """S.zfill(width) -> string + + Pad a numeric string S with zeros on the left, to fill a field + of the specified width. The string S is never truncated. + """ + + W_BytesObject.typedef = StdTypeDef( "str", basestring_typedef, __new__ = interp2app(W_BytesObject.descr_new), - __doc__ = '''str(object) -> string + __doc__ = BytesDocstrings.__doc__, -Return a nice string representation of the object. -If the argument is a string, the return value is the same object.''', + __repr__ = interp2app(W_BytesObject.descr_repr, + doc=BytesDocstrings.__repr__.__doc__), + __str__ = interp2app(W_BytesObject.descr_str, + doc=BytesDocstrings.__str__.__doc__), + __hash__ = interp2app(W_BytesObject.descr_hash, + doc=BytesDocstrings.__hash__.__doc__), - __repr__ = interp2app(W_BytesObject.descr_repr), - __str__ = interp2app(W_BytesObject.descr_str), - __hash__ = interp2app(W_BytesObject.descr_hash), + __eq__ = interp2app(W_BytesObject.descr_eq, + doc=BytesDocstrings.__eq__.__doc__), + __ne__ = interp2app(W_BytesObject.descr_ne, + doc=BytesDocstrings.__ne__.__doc__), + __lt__ = interp2app(W_BytesObject.descr_lt, + doc=BytesDocstrings.__lt__.__doc__), + __le__ = interp2app(W_BytesObject.descr_le, + doc=BytesDocstrings.__le__.__doc__), + __gt__ = interp2app(W_BytesObject.descr_gt, + doc=BytesDocstrings.__gt__.__doc__), + __ge__ = interp2app(W_BytesObject.descr_ge, + doc=BytesDocstrings.__ge__.__doc__), - __eq__ = interp2app(W_BytesObject.descr_eq), - __ne__ = interp2app(W_BytesObject.descr_ne), - __lt__ = interp2app(W_BytesObject.descr_lt), - __le__ = interp2app(W_BytesObject.descr_le), - __gt__ = interp2app(W_BytesObject.descr_gt), - __ge__ = interp2app(W_BytesObject.descr_ge), + __len__ = interp2app(W_BytesObject.descr_len, + doc=BytesDocstrings.__len__.__doc__), + __contains__ = interp2app(W_BytesObject.descr_contains, + doc=BytesDocstrings.__contains__.__doc__), - __len__ = interp2app(W_BytesObject.descr_len), - __contains__ = interp2app(W_BytesObject.descr_contains), + __add__ = interp2app(W_BytesObject.descr_add, + doc=BytesDocstrings.__add__.__doc__), + __mul__ = interp2app(W_BytesObject.descr_mul, + doc=BytesDocstrings.__mul__.__doc__), + __rmul__ = interp2app(W_BytesObject.descr_mul, + doc=BytesDocstrings.__rmul__.__doc__), - __add__ = interp2app(W_BytesObject.descr_add), - __mul__ = interp2app(W_BytesObject.descr_mul), - __rmul__ = interp2app(W_BytesObject.descr_mul), + __getitem__ = interp2app(W_BytesObject.descr_getitem, + doc=BytesDocstrings.__getitem__.__doc__), + __getslice__ = interp2app(W_BytesObject.descr_getslice, + doc=BytesDocstrings.__getslice__.__doc__), - __getitem__ = interp2app(W_BytesObject.descr_getitem), - __getslice__ = interp2app(W_BytesObject.descr_getslice), + capitalize = interp2app(W_BytesObject.descr_capitalize, + doc=BytesDocstrings.capitalize.__doc__), + center = interp2app(W_BytesObject.descr_center, + doc=BytesDocstrings.center.__doc__), + count = interp2app(W_BytesObject.descr_count, + doc=BytesDocstrings.count.__doc__), + decode = interp2app(W_BytesObject.descr_decode, + doc=BytesDocstrings.decode.__doc__), + encode = interp2app(W_BytesObject.descr_encode, + doc=BytesDocstrings.encode.__doc__), + expandtabs = interp2app(W_BytesObject.descr_expandtabs, + doc=BytesDocstrings.expandtabs.__doc__), + find = interp2app(W_BytesObject.descr_find, + doc=BytesDocstrings.find.__doc__), + rfind = interp2app(W_BytesObject.descr_rfind, + doc=BytesDocstrings.rfind.__doc__), + index = interp2app(W_BytesObject.descr_index, + doc=BytesDocstrings.index.__doc__), + rindex = interp2app(W_BytesObject.descr_rindex, + doc=BytesDocstrings.rindex.__doc__), + isalnum = interp2app(W_BytesObject.descr_isalnum, + doc=BytesDocstrings.isalnum.__doc__), + isalpha = interp2app(W_BytesObject.descr_isalpha, + doc=BytesDocstrings.isalpha.__doc__), + isdigit = interp2app(W_BytesObject.descr_isdigit, + doc=BytesDocstrings.isdigit.__doc__), + islower = interp2app(W_BytesObject.descr_islower, + doc=BytesDocstrings.islower.__doc__), + isspace = interp2app(W_BytesObject.descr_isspace, + doc=BytesDocstrings.isspace.__doc__), + istitle = interp2app(W_BytesObject.descr_istitle, + doc=BytesDocstrings.istitle.__doc__), + isupper = interp2app(W_BytesObject.descr_isupper, + doc=BytesDocstrings.isupper.__doc__), + join = interp2app(W_BytesObject.descr_join, + doc=BytesDocstrings.join.__doc__), + ljust = interp2app(W_BytesObject.descr_ljust, + doc=BytesDocstrings.ljust.__doc__), + rjust = interp2app(W_BytesObject.descr_rjust, + doc=BytesDocstrings.rjust.__doc__), + lower = interp2app(W_BytesObject.descr_lower, + doc=BytesDocstrings.lower.__doc__), + partition = interp2app(W_BytesObject.descr_partition, + doc=BytesDocstrings.partition.__doc__), + rpartition = interp2app(W_BytesObject.descr_rpartition, + doc=BytesDocstrings.rpartition.__doc__), + replace = interp2app(W_BytesObject.descr_replace, + doc=BytesDocstrings.replace.__doc__), + split = interp2app(W_BytesObject.descr_split, + doc=BytesDocstrings.split.__doc__), + rsplit = interp2app(W_BytesObject.descr_rsplit, + doc=BytesDocstrings.rsplit.__doc__), + splitlines = interp2app(W_BytesObject.descr_splitlines, + doc=BytesDocstrings.splitlines.__doc__), + startswith = interp2app(W_BytesObject.descr_startswith, + doc=BytesDocstrings.startswith.__doc__), + endswith = interp2app(W_BytesObject.descr_endswith, + doc=BytesDocstrings.endswith.__doc__), + strip = interp2app(W_BytesObject.descr_strip, + doc=BytesDocstrings.strip.__doc__), + lstrip = interp2app(W_BytesObject.descr_lstrip, + doc=BytesDocstrings.lstrip.__doc__), + rstrip = interp2app(W_BytesObject.descr_rstrip, + doc=BytesDocstrings.rstrip.__doc__), + swapcase = interp2app(W_BytesObject.descr_swapcase, + doc=BytesDocstrings.swapcase.__doc__), + title = interp2app(W_BytesObject.descr_title, + doc=BytesDocstrings.title.__doc__), + translate = interp2app(W_BytesObject.descr_translate, + doc=BytesDocstrings.translate.__doc__), + upper = interp2app(W_BytesObject.descr_upper, + doc=BytesDocstrings.upper.__doc__), + zfill = interp2app(W_BytesObject.descr_zfill, + doc=BytesDocstrings.zfill.__doc__), - capitalize = interp2app(W_BytesObject.descr_capitalize), - center = interp2app(W_BytesObject.descr_center), - count = interp2app(W_BytesObject.descr_count), - decode = interp2app(W_BytesObject.descr_decode), - encode = interp2app(W_BytesObject.descr_encode), - expandtabs = interp2app(W_BytesObject.descr_expandtabs), - find = interp2app(W_BytesObject.descr_find), - rfind = interp2app(W_BytesObject.descr_rfind), - index = interp2app(W_BytesObject.descr_index), - rindex = interp2app(W_BytesObject.descr_rindex), - isalnum = interp2app(W_BytesObject.descr_isalnum), - isalpha = interp2app(W_BytesObject.descr_isalpha), - isdigit = interp2app(W_BytesObject.descr_isdigit), - islower = interp2app(W_BytesObject.descr_islower), - isspace = interp2app(W_BytesObject.descr_isspace), - istitle = interp2app(W_BytesObject.descr_istitle), - isupper = interp2app(W_BytesObject.descr_isupper), - join = interp2app(W_BytesObject.descr_join), - ljust = interp2app(W_BytesObject.descr_ljust), - rjust = interp2app(W_BytesObject.descr_rjust), - lower = interp2app(W_BytesObject.descr_lower), - partition = interp2app(W_BytesObject.descr_partition), - rpartition = interp2app(W_BytesObject.descr_rpartition), - replace = interp2app(W_BytesObject.descr_replace), - split = interp2app(W_BytesObject.descr_split), - rsplit = interp2app(W_BytesObject.descr_rsplit), - splitlines = interp2app(W_BytesObject.descr_splitlines), - startswith = interp2app(W_BytesObject.descr_startswith), - endswith = interp2app(W_BytesObject.descr_endswith), - strip = interp2app(W_BytesObject.descr_strip), - lstrip = interp2app(W_BytesObject.descr_lstrip), - rstrip = interp2app(W_BytesObject.descr_rstrip), - swapcase = interp2app(W_BytesObject.descr_swapcase), - title = interp2app(W_BytesObject.descr_title), - translate = interp2app(W_BytesObject.descr_translate), - upper = interp2app(W_BytesObject.descr_upper), - zfill = interp2app(W_BytesObject.descr_zfill), - - format = interp2app(W_BytesObject.descr_format), - __format__ = interp2app(W_BytesObject.descr__format__), - __mod__ = interp2app(W_BytesObject.descr_mod), + format = interp2app(W_BytesObject.descr_format, + doc=BytesDocstrings.format.__doc__), + __format__ = interp2app(W_BytesObject.descr__format__, + doc=BytesDocstrings.__format__.__doc__), + __mod__ = interp2app(W_BytesObject.descr_mod, + doc=BytesDocstrings.__mod__.__doc__), __buffer__ = interp2app(W_BytesObject.descr_buffer), - __getnewargs__ = interp2app(W_BytesObject.descr_getnewargs), + __getnewargs__ = interp2app(W_BytesObject.descr_getnewargs, + doc=BytesDocstrings.__getnewargs__.__doc__), _formatter_parser = interp2app(W_BytesObject.descr_formatter_parser), _formatter_field_name_split = interp2app(W_BytesObject.descr_formatter_field_name_split), @@ -378,7 +815,6 @@ def string_escape_encode(s, quote): - buf = StringBuilder(len(s) + 2) buf.append(quote) diff --git a/pypy/objspace/std/unicodeobject.py b/pypy/objspace/std/unicodeobject.py --- a/pypy/objspace/std/unicodeobject.py +++ b/pypy/objspace/std/unicodeobject.py @@ -411,82 +411,532 @@ return unicode_from_encoded_object(space, w_str, "ascii", "strict") -# ____________________________________________________________ +class UnicodeDocstrings: + """unicode(object='') -> unicode object + unicode(string[, encoding[, errors]]) -> unicode object + + Create a new Unicode object from the given encoded string. + encoding defaults to the current default string encoding. + errors can be 'strict', 'replace' or 'ignore' and defaults to 'strict'. + + """ + + def __add__(): + """x.__add__(y) <==> x+y""" + + def __contains__(): + """x.__contains__(y) <==> y in x""" + + def __eq__(): + """x.__eq__(y) <==> x==y""" + + def __format__(): + """S.__format__(format_spec) -> unicode + + Return a formatted version of S as described by format_spec. + """ + + def __ge__(): + """x.__ge__(y) <==> x>=y""" + + def __getattribute__(): + """x.__getattribute__('name') <==> x.name""" + + def __getitem__(): + """x.__getitem__(y) <==> x[y]""" + + def __getnewargs__(): + """""" + + def __getslice__(): + """x.__getslice__(i, j) <==> x[i:j] + + Use of negative indices is not supported. + """ + + def __gt__(): + """x.__gt__(y) <==> x>y""" + + def __hash__(): + """x.__hash__() <==> hash(x)""" + + def __le__(): + """x.__le__(y) <==> x<=y""" + + def __len__(): + """x.__len__() <==> len(x)""" + + def __lt__(): + """x.__lt__(y) <==> x x%y""" + + def __mul__(): + """x.__mul__(n) <==> x*n""" + + def __ne__(): + """x.__ne__(y) <==> x!=y""" + + def __repr__(): + """x.__repr__() <==> repr(x)""" + + def __rmod__(): + """x.__rmod__(y) <==> y%x""" + + def __rmul__(): + """x.__rmul__(n) <==> n*x""" + + def __sizeof__(): + """S.__sizeof__() -> size of S in memory, in bytes""" + + def __str__(): + """x.__str__() <==> str(x)""" + + def capitalize(): + """S.capitalize() -> unicode + + Return a capitalized version of S, i.e. make the first character + have upper case and the rest lower case. + """ + + def center(): + """S.center(width[, fillchar]) -> unicode + + Return S centered in a Unicode string of length width. Padding is + done using the specified fill character (default is a space). + """ + + def count(): + """S.count(sub[, start[, end]]) -> int + + Return the number of non-overlapping occurrences of substring sub in + Unicode string S[start:end]. Optional arguments start and end are + interpreted as in slice notation. + """ + + def decode(): + """S.decode(encoding=None, errors='strict') -> string or unicode + + Decode S using the codec registered for encoding. encoding defaults + to the default encoding. errors may be given to set a different error + handling scheme. Default is 'strict' meaning that encoding errors raise + a UnicodeDecodeError. Other possible values are 'ignore' and 'replace' + as well as any other name registered with codecs.register_error that is + able to handle UnicodeDecodeErrors. + """ + + def encode(): + """S.encode(encoding=None, errors='strict') -> string or unicode + + Encode S using the codec registered for encoding. encoding defaults + to the default encoding. errors may be given to set a different error + handling scheme. Default is 'strict' meaning that encoding errors raise + a UnicodeEncodeError. Other possible values are 'ignore', 'replace' and + 'xmlcharrefreplace' as well as any other name registered with + codecs.register_error that can handle UnicodeEncodeErrors. + """ + + def endswith(): + """S.endswith(suffix[, start[, end]]) -> bool + + Return True if S ends with the specified suffix, False otherwise. + With optional start, test S beginning at that position. + With optional end, stop comparing S at that position. + suffix can also be a tuple of strings to try. + """ + + def expandtabs(): + """S.expandtabs([tabsize]) -> unicode + + Return a copy of S where all tab characters are expanded using spaces. + If tabsize is not given, a tab size of 8 characters is assumed. + """ + + def find(): + """S.find(sub[, start[, end]]) -> int + + Return the lowest index in S where substring sub is found, + such that sub is contained within S[start:end]. Optional + arguments start and end are interpreted as in slice notation. + + Return -1 on failure. + """ + + def format(): + """S.format(*args, **kwargs) -> unicode + + Return a formatted version of S, using substitutions from args and kwargs. + The substitutions are identified by braces ('{' and '}'). + """ + + def index(): + """S.index(sub[, start[, end]]) -> int + + Like S.find() but raise ValueError when the substring is not found. + """ + + def isalnum(): + """S.isalnum() -> bool + + Return True if all characters in S are alphanumeric + and there is at least one character in S, False otherwise. + """ + + def isalpha(): + """S.isalpha() -> bool + + Return True if all characters in S are alphabetic + and there is at least one character in S, False otherwise. + """ + + def isdecimal(): + """S.isdecimal() -> bool + + Return True if there are only decimal characters in S, + False otherwise. + """ + + def isdigit(): + """S.isdigit() -> bool + + Return True if all characters in S are digits + and there is at least one character in S, False otherwise. + """ + + def islower(): + """S.islower() -> bool + + Return True if all cased characters in S are lowercase and there is + at least one cased character in S, False otherwise. + """ + + def isnumeric(): + """S.isnumeric() -> bool + + Return True if there are only numeric characters in S, + False otherwise. + """ + + def isspace(): + """S.isspace() -> bool + + Return True if all characters in S are whitespace + and there is at least one character in S, False otherwise. + """ + + def istitle(): + """S.istitle() -> bool + + Return True if S is a titlecased string and there is at least one + character in S, i.e. upper- and titlecase characters may only + follow uncased characters and lowercase characters only cased ones. + Return False otherwise. + """ + + def isupper(): + """S.isupper() -> bool + + Return True if all cased characters in S are uppercase and there is + at least one cased character in S, False otherwise. + """ + + def join(): + """S.join(iterable) -> unicode + + Return a string which is the concatenation of the strings in the + iterable. The separator between elements is S. + """ + + def ljust(): + """S.ljust(width[, fillchar]) -> int + + Return S left-justified in a Unicode string of length width. Padding is + done using the specified fill character (default is a space). + """ + + def lower(): + """S.lower() -> unicode + + Return a copy of the string S converted to lowercase. + """ + + def lstrip(): + """S.lstrip([chars]) -> unicode + + Return a copy of the string S with leading whitespace removed. + If chars is given and not None, remove characters in chars instead. + If chars is a str, it will be converted to unicode before stripping + """ + + def partition(): + """S.partition(sep) -> (head, sep, tail) + + Search for the separator sep in S, and return the part before it, + the separator itself, and the part after it. If the separator is not + found, return S and two empty strings. + """ + + def replace(): + """S.replace(old, new[, count]) -> unicode + + Return a copy of S with all occurrences of substring + old replaced by new. If the optional argument count is + given, only the first count occurrences are replaced. + """ + + def rfind(): + """S.rfind(sub[, start[, end]]) -> int + + Return the highest index in S where substring sub is found, + such that sub is contained within S[start:end]. Optional + arguments start and end are interpreted as in slice notation. + + Return -1 on failure. + """ + + def rindex(): + """S.rindex(sub[, start[, end]]) -> int + + Like S.rfind() but raise ValueError when the substring is not found. + """ + + def rjust(): + """S.rjust(width[, fillchar]) -> unicode + + Return S right-justified in a Unicode string of length width. Padding is + done using the specified fill character (default is a space). + """ + + def rpartition(): + """S.rpartition(sep) -> (head, sep, tail) + + Search for the separator sep in S, starting at the end of S, and return + the part before it, the separator itself, and the part after it. If the + separator is not found, return two empty strings and S. + """ + + def rsplit(): + """S.rsplit(sep=None, maxsplit=-1) -> list of strings + + Return a list of the words in S, using sep as the + delimiter string, starting at the end of the string and + working to the front. If maxsplit is given, at most maxsplit + splits are done. If sep is not specified, any whitespace string + is a separator. + """ + + def rstrip(): + """S.rstrip([chars]) -> unicode + + Return a copy of the string S with trailing whitespace removed. + If chars is given and not None, remove characters in chars instead. + If chars is a str, it will be converted to unicode before stripping + """ + + def split(): + """S.split(sep=None, maxsplit=-1) -> list of strings + + Return a list of the words in S, using sep as the + delimiter string. If maxsplit is given, at most maxsplit + splits are done. If sep is not specified or is None, any + whitespace string is a separator and empty strings are + removed from the result. + """ + + def splitlines(): + """S.splitlines(keepends=False) -> list of strings + + Return a list of the lines in S, breaking at line boundaries. + Line breaks are not included in the resulting list unless keepends + is given and true. + """ + + def startswith(): + """S.startswith(prefix[, start[, end]]) -> bool + + Return True if S starts with the specified prefix, False otherwise. + With optional start, test S beginning at that position. + With optional end, stop comparing S at that position. + prefix can also be a tuple of strings to try. + """ + + def strip(): + """S.strip([chars]) -> unicode + + Return a copy of the string S with leading and trailing + whitespace removed. + If chars is given and not None, remove characters in chars instead. + If chars is a str, it will be converted to unicode before stripping + """ + + def swapcase(): + """S.swapcase() -> unicode + + Return a copy of S with uppercase characters converted to lowercase + and vice versa. + """ + + def title(): + """S.title() -> unicode + + Return a titlecased version of S, i.e. words start with title case + characters, all remaining cased characters have lower case. + """ + + def translate(): + """S.translate(table) -> unicode + + Return a copy of the string S, where all characters have been mapped + through the given translation table, which must be a mapping of + Unicode ordinals to Unicode ordinals, Unicode strings or None. + Unmapped characters are left untouched. Characters mapped to None + are deleted. + """ + + def upper(): + """S.upper() -> unicode + + Return a copy of S converted to uppercase. + """ + + def zfill(): + """S.zfill(width) -> unicode + + Pad a numeric string S with zeros on the left, to fill a field + of the specified width. The string S is never truncated. + """ + W_UnicodeObject.typedef = StdTypeDef( "unicode", basestring_typedef, __new__ = interp2app(W_UnicodeObject.descr_new), - __doc__ = '''unicode(string [, encoding[, errors]]) -> object + __doc__ = UnicodeDocstrings.__doc__, -Create a new Unicode object from the given encoded string. -encoding defaults to the current default string encoding. -errors can be 'strict', 'replace' or 'ignore' and defaults to 'strict'.''', + __repr__ = interp2app(W_UnicodeObject.descr_repr, + doc=UnicodeDocstrings.__repr__.__doc__), + __str__ = interp2app(W_UnicodeObject.descr_str, + doc=UnicodeDocstrings.__str__.__doc__), + __hash__ = interp2app(W_UnicodeObject.descr_hash, + doc=UnicodeDocstrings.__hash__.__doc__), - __repr__ = interp2app(W_UnicodeObject.descr_repr), - __str__ = interp2app(W_UnicodeObject.descr_str), - __hash__ = interp2app(W_UnicodeObject.descr_hash), + __eq__ = interp2app(W_UnicodeObject.descr_eq, + doc=UnicodeDocstrings.__eq__.__doc__), + __ne__ = interp2app(W_UnicodeObject.descr_ne, + doc=UnicodeDocstrings.__ne__.__doc__), + __lt__ = interp2app(W_UnicodeObject.descr_lt, + doc=UnicodeDocstrings.__lt__.__doc__), + __le__ = interp2app(W_UnicodeObject.descr_le, + doc=UnicodeDocstrings.__le__.__doc__), + __gt__ = interp2app(W_UnicodeObject.descr_gt, + doc=UnicodeDocstrings.__gt__.__doc__), + __ge__ = interp2app(W_UnicodeObject.descr_ge, + doc=UnicodeDocstrings.__ge__.__doc__), - __eq__ = interp2app(W_UnicodeObject.descr_eq), - __ne__ = interp2app(W_UnicodeObject.descr_ne), - __lt__ = interp2app(W_UnicodeObject.descr_lt), - __le__ = interp2app(W_UnicodeObject.descr_le), - __gt__ = interp2app(W_UnicodeObject.descr_gt), - __ge__ = interp2app(W_UnicodeObject.descr_ge), + __len__ = interp2app(W_UnicodeObject.descr_len, + doc=UnicodeDocstrings.__len__.__doc__), + __contains__ = interp2app(W_UnicodeObject.descr_contains, + doc=UnicodeDocstrings.__contains__.__doc__), - __len__ = interp2app(W_UnicodeObject.descr_len), - __contains__ = interp2app(W_UnicodeObject.descr_contains), + __add__ = interp2app(W_UnicodeObject.descr_add, + doc=UnicodeDocstrings.__add__.__doc__), + __mul__ = interp2app(W_UnicodeObject.descr_mul, + doc=UnicodeDocstrings.__mul__.__doc__), + __rmul__ = interp2app(W_UnicodeObject.descr_mul, + doc=UnicodeDocstrings.__rmul__.__doc__), - __add__ = interp2app(W_UnicodeObject.descr_add), - __mul__ = interp2app(W_UnicodeObject.descr_mul), - __rmul__ = interp2app(W_UnicodeObject.descr_mul), + __getitem__ = interp2app(W_UnicodeObject.descr_getitem, + doc=UnicodeDocstrings.__getitem__.__doc__), + __getslice__ = interp2app(W_UnicodeObject.descr_getslice, + doc=UnicodeDocstrings.__getslice__.__doc__), - __getitem__ = interp2app(W_UnicodeObject.descr_getitem), - __getslice__ = interp2app(W_UnicodeObject.descr_getslice), + capitalize = interp2app(W_UnicodeObject.descr_capitalize, + doc=UnicodeDocstrings.capitalize.__doc__), + center = interp2app(W_UnicodeObject.descr_center, + doc=UnicodeDocstrings.center.__doc__), + count = interp2app(W_UnicodeObject.descr_count, + doc=UnicodeDocstrings.count.__doc__), + decode = interp2app(W_UnicodeObject.descr_decode, + doc=UnicodeDocstrings.decode.__doc__), + encode = interp2app(W_UnicodeObject.descr_encode, + doc=UnicodeDocstrings.encode.__doc__), + expandtabs = interp2app(W_UnicodeObject.descr_expandtabs, + doc=UnicodeDocstrings.expandtabs.__doc__), + find = interp2app(W_UnicodeObject.descr_find, + doc=UnicodeDocstrings.find.__doc__), + rfind = interp2app(W_UnicodeObject.descr_rfind, + doc=UnicodeDocstrings.rfind.__doc__), + index = interp2app(W_UnicodeObject.descr_index, + doc=UnicodeDocstrings.index.__doc__), + rindex = interp2app(W_UnicodeObject.descr_rindex, + doc=UnicodeDocstrings.rindex.__doc__), + isalnum = interp2app(W_UnicodeObject.descr_isalnum, + doc=UnicodeDocstrings.isalnum.__doc__), + isalpha = interp2app(W_UnicodeObject.descr_isalpha, + doc=UnicodeDocstrings.isalpha.__doc__), + isdecimal = interp2app(W_UnicodeObject.descr_isdecimal, + doc=UnicodeDocstrings.isdecimal.__doc__), + isdigit = interp2app(W_UnicodeObject.descr_isdigit, + doc=UnicodeDocstrings.isdigit.__doc__), + islower = interp2app(W_UnicodeObject.descr_islower, + doc=UnicodeDocstrings.islower.__doc__), + isnumeric = interp2app(W_UnicodeObject.descr_isnumeric, + doc=UnicodeDocstrings.isnumeric.__doc__), + isspace = interp2app(W_UnicodeObject.descr_isspace, + doc=UnicodeDocstrings.isspace.__doc__), + istitle = interp2app(W_UnicodeObject.descr_istitle, + doc=UnicodeDocstrings.istitle.__doc__), + isupper = interp2app(W_UnicodeObject.descr_isupper, + doc=UnicodeDocstrings.isupper.__doc__), + join = interp2app(W_UnicodeObject.descr_join, + doc=UnicodeDocstrings.join.__doc__), + ljust = interp2app(W_UnicodeObject.descr_ljust, + doc=UnicodeDocstrings.ljust.__doc__), + rjust = interp2app(W_UnicodeObject.descr_rjust, + doc=UnicodeDocstrings.rjust.__doc__), + lower = interp2app(W_UnicodeObject.descr_lower, + doc=UnicodeDocstrings.lower.__doc__), + partition = interp2app(W_UnicodeObject.descr_partition, + doc=UnicodeDocstrings.partition.__doc__), + rpartition = interp2app(W_UnicodeObject.descr_rpartition, + doc=UnicodeDocstrings.rpartition.__doc__), + replace = interp2app(W_UnicodeObject.descr_replace, + doc=UnicodeDocstrings.replace.__doc__), + split = interp2app(W_UnicodeObject.descr_split, + doc=UnicodeDocstrings.split.__doc__), + rsplit = interp2app(W_UnicodeObject.descr_rsplit, + doc=UnicodeDocstrings.rsplit.__doc__), + splitlines = interp2app(W_UnicodeObject.descr_splitlines, + doc=UnicodeDocstrings.splitlines.__doc__), + startswith = interp2app(W_UnicodeObject.descr_startswith, + doc=UnicodeDocstrings.startswith.__doc__), + endswith = interp2app(W_UnicodeObject.descr_endswith, + doc=UnicodeDocstrings.endswith.__doc__), + strip = interp2app(W_UnicodeObject.descr_strip, + doc=UnicodeDocstrings.strip.__doc__), + lstrip = interp2app(W_UnicodeObject.descr_lstrip, + doc=UnicodeDocstrings.lstrip.__doc__), + rstrip = interp2app(W_UnicodeObject.descr_rstrip, + doc=UnicodeDocstrings.rstrip.__doc__), + swapcase = interp2app(W_UnicodeObject.descr_swapcase, + doc=UnicodeDocstrings.swapcase.__doc__), + title = interp2app(W_UnicodeObject.descr_title, + doc=UnicodeDocstrings.title.__doc__), + translate = interp2app(W_UnicodeObject.descr_translate, + doc=UnicodeDocstrings.translate.__doc__), + upper = interp2app(W_UnicodeObject.descr_upper, + doc=UnicodeDocstrings.upper.__doc__), + zfill = interp2app(W_UnicodeObject.descr_zfill, + doc=UnicodeDocstrings.zfill.__doc__), - capitalize = interp2app(W_UnicodeObject.descr_capitalize), - center = interp2app(W_UnicodeObject.descr_center), - count = interp2app(W_UnicodeObject.descr_count), - decode = interp2app(W_UnicodeObject.descr_decode), - encode = interp2app(W_UnicodeObject.descr_encode), - expandtabs = interp2app(W_UnicodeObject.descr_expandtabs), - find = interp2app(W_UnicodeObject.descr_find), - rfind = interp2app(W_UnicodeObject.descr_rfind), - index = interp2app(W_UnicodeObject.descr_index), - rindex = interp2app(W_UnicodeObject.descr_rindex), - isalnum = interp2app(W_UnicodeObject.descr_isalnum), - isalpha = interp2app(W_UnicodeObject.descr_isalpha), - isdecimal = interp2app(W_UnicodeObject.descr_isdecimal), - isdigit = interp2app(W_UnicodeObject.descr_isdigit), - islower = interp2app(W_UnicodeObject.descr_islower), - isnumeric = interp2app(W_UnicodeObject.descr_isnumeric), - isspace = interp2app(W_UnicodeObject.descr_isspace), - istitle = interp2app(W_UnicodeObject.descr_istitle), - isupper = interp2app(W_UnicodeObject.descr_isupper), - join = interp2app(W_UnicodeObject.descr_join), - ljust = interp2app(W_UnicodeObject.descr_ljust), - rjust = interp2app(W_UnicodeObject.descr_rjust), - lower = interp2app(W_UnicodeObject.descr_lower), - partition = interp2app(W_UnicodeObject.descr_partition), - rpartition = interp2app(W_UnicodeObject.descr_rpartition), - replace = interp2app(W_UnicodeObject.descr_replace), - split = interp2app(W_UnicodeObject.descr_split), - rsplit = interp2app(W_UnicodeObject.descr_rsplit), - splitlines = interp2app(W_UnicodeObject.descr_splitlines), - startswith = interp2app(W_UnicodeObject.descr_startswith), - endswith = interp2app(W_UnicodeObject.descr_endswith), - strip = interp2app(W_UnicodeObject.descr_strip), - lstrip = interp2app(W_UnicodeObject.descr_lstrip), - rstrip = interp2app(W_UnicodeObject.descr_rstrip), - swapcase = interp2app(W_UnicodeObject.descr_swapcase), - title = interp2app(W_UnicodeObject.descr_title), - translate = interp2app(W_UnicodeObject.descr_translate), - upper = interp2app(W_UnicodeObject.descr_upper), - zfill = interp2app(W_UnicodeObject.descr_zfill), - - format = interp2app(W_UnicodeObject.descr_format), - __format__ = interp2app(W_UnicodeObject.descr__format__), - __mod__ = interp2app(W_UnicodeObject.descr_mod), - __getnewargs__ = interp2app(W_UnicodeObject.descr_getnewargs), + format = interp2app(W_UnicodeObject.descr_format, + doc=UnicodeDocstrings.format.__doc__), + __format__ = interp2app(W_UnicodeObject.descr__format__, + doc=UnicodeDocstrings.__format__.__doc__), + __mod__ = interp2app(W_UnicodeObject.descr_mod, + doc=UnicodeDocstrings.__mod__.__doc__), + __getnewargs__ = interp2app(W_UnicodeObject.descr_getnewargs, + doc=UnicodeDocstrings.__getnewargs__.__doc__), _formatter_parser = interp2app(W_UnicodeObject.descr_formatter_parser), _formatter_field_name_split = interp2app(W_UnicodeObject.descr_formatter_field_name_split), From noreply at buildbot.pypy.org Fri Aug 16 02:08:53 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Fri, 16 Aug 2013 02:08:53 +0200 (CEST) Subject: [pypy-commit] pypy default: wrapint -> space.newint Message-ID: <20130816000854.024821C0170@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: Changeset: r66170:b9e203336e31 Date: 2013-08-15 17:07 -0700 http://bitbucket.org/pypy/pypy/changeset/b9e203336e31/ Log: wrapint -> space.newint diff --git a/pypy/objspace/std/bytearrayobject.py b/pypy/objspace/std/bytearrayobject.py --- a/pypy/objspace/std/bytearrayobject.py +++ b/pypy/objspace/std/bytearrayobject.py @@ -7,7 +7,6 @@ from pypy.objspace.std.bytearraytype import ( getbytevalue, makebytearraydata_w, new_bytearray) from pypy.objspace.std.intobject import W_IntObject -from pypy.objspace.std.inttype import wrapint from pypy.objspace.std.model import W_Object, registerimplementation from pypy.objspace.std.multimethod import FailedToImplement from pypy.objspace.std.noneobject import W_NoneObject @@ -76,7 +75,7 @@ def len__Bytearray(space, w_bytearray): result = len(w_bytearray.data) - return wrapint(space, result) + return space.newint(result) def ord__Bytearray(space, w_bytearray): if len(w_bytearray.data) != 1: diff --git a/pypy/objspace/std/intobject.py b/pypy/objspace/std/intobject.py --- a/pypy/objspace/std/intobject.py +++ b/pypy/objspace/std/intobject.py @@ -1,6 +1,6 @@ from pypy.interpreter.error import OperationError from pypy.objspace.std import newformat -from pypy.objspace.std.inttype import wrapint, W_AbstractIntObject +from pypy.objspace.std.inttype import W_AbstractIntObject from pypy.objspace.std.model import registerimplementation, W_Object from pypy.objspace.std.multimethod import FailedToImplementArgs from pypy.objspace.std.noneobject import W_NoneObject @@ -55,7 +55,7 @@ if space.is_w(space.type(self), space.w_int): return self a = self.intval - return wrapint(space, a) + return space.newint(a) registerimplementation(W_IntObject) @@ -104,7 +104,7 @@ except OverflowError: raise FailedToImplementArgs(space.w_OverflowError, space.wrap("integer addition")) - return wrapint(space, z) + return space.newint(z) def sub__Int_Int(space, w_int1, w_int2): x = w_int1.intval @@ -114,7 +114,7 @@ except OverflowError: raise FailedToImplementArgs(space.w_OverflowError, space.wrap("integer substraction")) - return wrapint(space, z) + return space.newint(z) def mul__Int_Int(space, w_int1, w_int2): x = w_int1.intval @@ -124,7 +124,7 @@ except OverflowError: raise FailedToImplementArgs(space.w_OverflowError, space.wrap("integer multiplication")) - return wrapint(space, z) + return space.newint(z) def floordiv__Int_Int(space, w_int1, w_int2): x = w_int1.intval @@ -137,7 +137,7 @@ except OverflowError: raise FailedToImplementArgs(space.w_OverflowError, space.wrap("integer division")) - return wrapint(space, z) + return space.newint(z) div__Int_Int = floordiv__Int_Int def truediv__Int_Int(space, w_int1, w_int2): @@ -158,7 +158,7 @@ except OverflowError: raise FailedToImplementArgs(space.w_OverflowError, space.wrap("integer modulo")) - return wrapint(space, z) + return space.newint(z) def divmod__Int_Int(space, w_int1, w_int2): x = w_int1.intval @@ -231,7 +231,7 @@ except OverflowError: raise FailedToImplementArgs(space.w_OverflowError, space.wrap("integer negation")) - return wrapint(space, x) + return space.newint(x) get_negint = neg__Int @@ -247,7 +247,7 @@ def invert__Int(space, w_int1): x = w_int1.intval a = ~x - return wrapint(space, a) + return space.newint(a) def lshift__Int_Int(space, w_int1, w_int2): a = w_int1.intval @@ -258,7 +258,7 @@ except OverflowError: raise FailedToImplementArgs(space.w_OverflowError, space.wrap("integer left shift")) - return wrapint(space, c) + return space.newint(c) if b < 0: raise OperationError(space.w_ValueError, space.wrap("negative shift count")) @@ -284,25 +284,25 @@ a = 0 else: a = a >> b - return wrapint(space, a) + return space.newint(a) def and__Int_Int(space, w_int1, w_int2): a = w_int1.intval b = w_int2.intval res = a & b - return wrapint(space, res) + return space.newint(res) def xor__Int_Int(space, w_int1, w_int2): a = w_int1.intval b = w_int2.intval res = a ^ b - return wrapint(space, res) + return space.newint(res) def or__Int_Int(space, w_int1, w_int2): a = w_int1.intval b = w_int2.intval res = a | b - return wrapint(space, res) + return space.newint(res) def pos__Int(self, space): return self.int(space) @@ -323,7 +323,7 @@ return space.wrap(hex(w_int1.intval)) def getnewargs__Int(space, w_int1): - return space.newtuple([wrapint(space, w_int1.intval)]) + return space.newtuple([space.newint(w_int1.intval)]) register_all(vars()) diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -19,7 +19,6 @@ from pypy.objspace.std import slicetype from pypy.objspace.std.floatobject import W_FloatObject from pypy.objspace.std.intobject import W_IntObject -from pypy.objspace.std.inttype import wrapint from pypy.objspace.std.iterobject import (W_FastListIterObject, W_ReverseSeqIterObject) from pypy.objspace.std.sliceobject import W_SliceObject, normalize_simple_slice @@ -427,7 +426,7 @@ def descr_len(self, space): result = self.length() - return wrapint(space, result) + return space.newint(result) def descr_iter(self, space): return W_FastListIterObject(self) diff --git a/pypy/objspace/std/stringobject.py b/pypy/objspace/std/stringobject.py --- a/pypy/objspace/std/stringobject.py +++ b/pypy/objspace/std/stringobject.py @@ -4,7 +4,6 @@ from pypy.interpreter.error import OperationError, operationerrfmt from pypy.objspace.std import newformat, slicetype from pypy.objspace.std.formatting import mod_format -from pypy.objspace.std.inttype import wrapint from pypy.objspace.std.model import W_Object, registerimplementation from pypy.objspace.std.multimethod import FailedToImplement from pypy.objspace.std.noneobject import W_NoneObject @@ -589,7 +588,7 @@ def str_count__String_String_ANY_ANY(space, w_self, w_arg, w_start, w_end): u_self, u_start, u_end = _convert_idx_params(space, w_self, w_start, w_end) - return wrapint(space, u_self.count(w_arg._value, u_start, u_end)) + return space.newint(u_self.count(w_arg._value, u_start, u_end)) def str_endswith__String_String_ANY_ANY(space, w_self, w_suffix, w_start, w_end): (u_self, start, end) = _convert_idx_params(space, w_self, w_start, @@ -709,7 +708,7 @@ def hash__String(space, w_str): s = w_str._value x = compute_hash(s) - return wrapint(space, x) + return space.newint(x) def lt__String_String(space, w_str1, w_str2): s1 = w_str1._value diff --git a/pypy/objspace/std/tupleobject.py b/pypy/objspace/std/tupleobject.py --- a/pypy/objspace/std/tupleobject.py +++ b/pypy/objspace/std/tupleobject.py @@ -7,7 +7,6 @@ from pypy.interpreter.gateway import ( WrappedDefault, interp2app, interpindirect2app, unwrap_spec) from pypy.objspace.std import slicetype -from pypy.objspace.std.inttype import wrapint from pypy.objspace.std.sliceobject import W_SliceObject, normalize_simple_slice from pypy.objspace.std.stdtypedef import StdTypeDef from pypy.objspace.std.util import negate @@ -56,7 +55,7 @@ def descr_len(self, space): result = self.length() - return wrapint(space, result) + return space.newint(result) def descr_iter(self, space): from pypy.objspace.std import iterobject From noreply at buildbot.pypy.org Fri Aug 16 09:56:22 2013 From: noreply at buildbot.pypy.org (hakanardo) Date: Fri, 16 Aug 2013 09:56:22 +0200 (CEST) Subject: [pypy-commit] pypy default: Call __int__ or __float__ on item assignment in some cases. Fixes test_assign_object_with_special_methods from 6e2656749ce4 Message-ID: <20130816075622.7F4CB1C0E1B@cobra.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: Changeset: r66171:2abb5e5f6bdf Date: 2013-08-16 09:55 +0200 http://bitbucket.org/pypy/pypy/changeset/2abb5e5f6bdf/ Log: Call __int__ or __float__ on item assignment in some cases. Fixes test_assign_object_with_special_methods from 6e2656749ce4 diff --git a/pypy/module/array/interp_array.py b/pypy/module/array/interp_array.py --- a/pypy/module/array/interp_array.py +++ b/pypy/module/array/interp_array.py @@ -11,7 +11,7 @@ from rpython.rlib.unroll import unrolling_iterable from rpython.rlib.objectmodel import keepalive_until_here from rpython.rtyper.lltypesystem import lltype, rffi - +from pypy.objspace.std.floatobject import W_FloatObject @unwrap_spec(typecode=str) def w_array(space, w_cls, typecode, __args__): @@ -532,7 +532,7 @@ class TypeCode(object): - def __init__(self, itemtype, unwrap, canoverflow=False, signed=False): + def __init__(self, itemtype, unwrap, canoverflow=False, signed=False, method='__int__'): self.itemtype = itemtype self.bytes = rffi.sizeof(itemtype) self.arraytype = lltype.Array(itemtype, hints={'nolength': True}) @@ -540,6 +540,7 @@ self.signed = signed self.canoverflow = canoverflow self.w_class = None + self.method = method if self.canoverflow: assert self.bytes <= rffi.sizeof(rffi.ULONG) @@ -554,8 +555,8 @@ return True types = { - 'c': TypeCode(lltype.Char, 'str_w'), - 'u': TypeCode(lltype.UniChar, 'unicode_w'), + 'c': TypeCode(lltype.Char, 'str_w', method=''), + 'u': TypeCode(lltype.UniChar, 'unicode_w', method=''), 'b': TypeCode(rffi.SIGNEDCHAR, 'int_w', True, True), 'B': TypeCode(rffi.UCHAR, 'int_w', True), 'h': TypeCode(rffi.SHORT, 'int_w', True, True), @@ -567,8 +568,8 @@ # rbigint.touint() which # corresponds to the # C-type unsigned long - 'f': TypeCode(lltype.SingleFloat, 'float_w'), - 'd': TypeCode(lltype.Float, 'float_w'), + 'f': TypeCode(lltype.SingleFloat, 'float_w', method='__float__'), + 'd': TypeCode(lltype.Float, 'float_w', method='__float__'), } for k, v in types.items(): v.typecode = k @@ -613,7 +614,19 @@ def item_w(self, w_item): space = self.space unwrap = getattr(space, mytype.unwrap) - item = unwrap(w_item) + try: + item = unwrap(w_item) + except OperationError, e: + if isinstance(w_item, W_FloatObject): # Odd special case from cpython + raise + if mytype.method != '' and e.match(space, space.w_TypeError): + try: + item = unwrap(space.call_method(w_item, mytype.method)) + except OperationError: + msg = 'array item must be ' + mytype.unwrap[:-2] + raise OperationError(space.w_TypeError, space.wrap(msg)) + else: + raise if mytype.unwrap == 'bigint_w': try: item = item.touint() diff --git a/pypy/module/array/test/test_array.py b/pypy/module/array/test/test_array.py --- a/pypy/module/array/test/test_array.py +++ b/pypy/module/array/test/test_array.py @@ -937,6 +937,13 @@ raises(TypeError, a.__setitem__, Silly()) raises(TypeError, a.__setitem__, OldSilly()) + a = array('c', 'hi') + a[0] = 'b' + assert a[0] == 'b' + + a = array('u', u'hi') + a[0] = u'b' + assert a[0] == u'b' class TestCPythonsOwnArray(BaseArrayTests): From noreply at buildbot.pypy.org Fri Aug 16 09:56:24 2013 From: noreply at buildbot.pypy.org (hakanardo) Date: Fri, 16 Aug 2013 09:56:24 +0200 (CEST) Subject: [pypy-commit] pypy default: merge Message-ID: <20130816075624.2BA981C0E1B@cobra.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: Changeset: r66172:f4f5fae1001d Date: 2013-08-16 09:55 +0200 http://bitbucket.org/pypy/pypy/changeset/f4f5fae1001d/ Log: merge diff --git a/pypy/module/_file/interp_file.py b/pypy/module/_file/interp_file.py --- a/pypy/module/_file/interp_file.py +++ b/pypy/module/_file/interp_file.py @@ -437,14 +437,14 @@ return self.getrepr(self.space, info) def getdisplayname(self): + space = self.space w_name = self.w_name if w_name is None: return '?' - elif self.space.is_true(self.space.isinstance(w_name, - self.space.w_str)): - return "'%s'" % self.space.str_w(w_name) + elif space.isinstance_w(w_name, space.w_str): + return "'%s'" % space.str_w(w_name) else: - return self.space.str_w(self.space.repr(w_name)) + return space.str_w(space.repr(w_name)) def file_writelines(self, w_lines): """writelines(sequence_of_strings) -> None. Write the strings to the file. diff --git a/pypy/module/micronumpy/test/test_complex.py b/pypy/module/micronumpy/test/test_complex.py --- a/pypy/module/micronumpy/test/test_complex.py +++ b/pypy/module/micronumpy/test/test_complex.py @@ -685,3 +685,8 @@ msg=error_message) sys.stderr.write('.') sys.stderr.write('\n') + + def test_complexbox_to_pycomplex(self): + from numpypy import complex128 + x = complex128(3.4j) + assert complex(x) == 3.4j diff --git a/pypy/objspace/std/bytearrayobject.py b/pypy/objspace/std/bytearrayobject.py --- a/pypy/objspace/std/bytearrayobject.py +++ b/pypy/objspace/std/bytearrayobject.py @@ -7,7 +7,6 @@ from pypy.objspace.std.bytearraytype import ( getbytevalue, makebytearraydata_w, new_bytearray) from pypy.objspace.std.intobject import W_IntObject -from pypy.objspace.std.inttype import wrapint from pypy.objspace.std.model import W_Object, registerimplementation from pypy.objspace.std.multimethod import FailedToImplement from pypy.objspace.std.noneobject import W_NoneObject @@ -76,7 +75,7 @@ def len__Bytearray(space, w_bytearray): result = len(w_bytearray.data) - return wrapint(space, result) + return space.newint(result) def ord__Bytearray(space, w_bytearray): if len(w_bytearray.data) != 1: diff --git a/pypy/objspace/std/complextype.py b/pypy/objspace/std/complextype.py --- a/pypy/objspace/std/complextype.py +++ b/pypy/objspace/std/complextype.py @@ -201,7 +201,7 @@ if w_z is not None: # __complex__() must return a complex or (float,int,long) object # (XXX should not use isinstance here) - if not strict_typing and (space.isinstance_w(w_z, space.w_int) or + if not strict_typing and (space.isinstance_w(w_z, space.w_int) or space.isinstance_w(w_z, space.w_long) or space.isinstance_w(w_z, space.w_float)): return (space.float_w(w_z), 0.0) @@ -214,8 +214,10 @@ # # no '__complex__' method, so we assume it is a float, # unless it is an instance of some subclass of complex. - if isinstance(w_complex, W_ComplexObject): - return (w_complex.realval, w_complex.imagval) + if space.isinstance_w(w_complex, space.gettypefor(W_ComplexObject)): + real = space.float(space.getattr(w_complex, space.wrap("real"))) + imag = space.float(space.getattr(w_complex, space.wrap("imag"))) + return (space.float_w(real), space.float_w(imag)) # # Check that it is not a string (on which space.float() would succeed). if (space.isinstance_w(w_complex, space.w_str) or diff --git a/pypy/objspace/std/intobject.py b/pypy/objspace/std/intobject.py --- a/pypy/objspace/std/intobject.py +++ b/pypy/objspace/std/intobject.py @@ -1,6 +1,6 @@ from pypy.interpreter.error import OperationError from pypy.objspace.std import newformat -from pypy.objspace.std.inttype import wrapint, W_AbstractIntObject +from pypy.objspace.std.inttype import W_AbstractIntObject from pypy.objspace.std.model import registerimplementation, W_Object from pypy.objspace.std.multimethod import FailedToImplementArgs from pypy.objspace.std.noneobject import W_NoneObject @@ -55,7 +55,7 @@ if space.is_w(space.type(self), space.w_int): return self a = self.intval - return wrapint(space, a) + return space.newint(a) registerimplementation(W_IntObject) @@ -104,7 +104,7 @@ except OverflowError: raise FailedToImplementArgs(space.w_OverflowError, space.wrap("integer addition")) - return wrapint(space, z) + return space.newint(z) def sub__Int_Int(space, w_int1, w_int2): x = w_int1.intval @@ -114,7 +114,7 @@ except OverflowError: raise FailedToImplementArgs(space.w_OverflowError, space.wrap("integer substraction")) - return wrapint(space, z) + return space.newint(z) def mul__Int_Int(space, w_int1, w_int2): x = w_int1.intval @@ -124,7 +124,7 @@ except OverflowError: raise FailedToImplementArgs(space.w_OverflowError, space.wrap("integer multiplication")) - return wrapint(space, z) + return space.newint(z) def floordiv__Int_Int(space, w_int1, w_int2): x = w_int1.intval @@ -137,7 +137,7 @@ except OverflowError: raise FailedToImplementArgs(space.w_OverflowError, space.wrap("integer division")) - return wrapint(space, z) + return space.newint(z) div__Int_Int = floordiv__Int_Int def truediv__Int_Int(space, w_int1, w_int2): @@ -158,7 +158,7 @@ except OverflowError: raise FailedToImplementArgs(space.w_OverflowError, space.wrap("integer modulo")) - return wrapint(space, z) + return space.newint(z) def divmod__Int_Int(space, w_int1, w_int2): x = w_int1.intval @@ -231,7 +231,7 @@ except OverflowError: raise FailedToImplementArgs(space.w_OverflowError, space.wrap("integer negation")) - return wrapint(space, x) + return space.newint(x) get_negint = neg__Int @@ -247,7 +247,7 @@ def invert__Int(space, w_int1): x = w_int1.intval a = ~x - return wrapint(space, a) + return space.newint(a) def lshift__Int_Int(space, w_int1, w_int2): a = w_int1.intval @@ -258,7 +258,7 @@ except OverflowError: raise FailedToImplementArgs(space.w_OverflowError, space.wrap("integer left shift")) - return wrapint(space, c) + return space.newint(c) if b < 0: raise OperationError(space.w_ValueError, space.wrap("negative shift count")) @@ -284,25 +284,25 @@ a = 0 else: a = a >> b - return wrapint(space, a) + return space.newint(a) def and__Int_Int(space, w_int1, w_int2): a = w_int1.intval b = w_int2.intval res = a & b - return wrapint(space, res) + return space.newint(res) def xor__Int_Int(space, w_int1, w_int2): a = w_int1.intval b = w_int2.intval res = a ^ b - return wrapint(space, res) + return space.newint(res) def or__Int_Int(space, w_int1, w_int2): a = w_int1.intval b = w_int2.intval res = a | b - return wrapint(space, res) + return space.newint(res) def pos__Int(self, space): return self.int(space) @@ -323,7 +323,7 @@ return space.wrap(hex(w_int1.intval)) def getnewargs__Int(space, w_int1): - return space.newtuple([wrapint(space, w_int1.intval)]) + return space.newtuple([space.newint(w_int1.intval)]) register_all(vars()) diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -19,7 +19,6 @@ from pypy.objspace.std import slicetype from pypy.objspace.std.floatobject import W_FloatObject from pypy.objspace.std.intobject import W_IntObject -from pypy.objspace.std.inttype import wrapint from pypy.objspace.std.iterobject import (W_FastListIterObject, W_ReverseSeqIterObject) from pypy.objspace.std.sliceobject import W_SliceObject, normalize_simple_slice @@ -427,7 +426,7 @@ def descr_len(self, space): result = self.length() - return wrapint(space, result) + return space.newint(result) def descr_iter(self, space): return W_FastListIterObject(self) diff --git a/pypy/objspace/std/stringobject.py b/pypy/objspace/std/stringobject.py --- a/pypy/objspace/std/stringobject.py +++ b/pypy/objspace/std/stringobject.py @@ -4,7 +4,6 @@ from pypy.interpreter.error import OperationError, operationerrfmt from pypy.objspace.std import newformat, slicetype from pypy.objspace.std.formatting import mod_format -from pypy.objspace.std.inttype import wrapint from pypy.objspace.std.model import W_Object, registerimplementation from pypy.objspace.std.multimethod import FailedToImplement from pypy.objspace.std.noneobject import W_NoneObject @@ -589,7 +588,7 @@ def str_count__String_String_ANY_ANY(space, w_self, w_arg, w_start, w_end): u_self, u_start, u_end = _convert_idx_params(space, w_self, w_start, w_end) - return wrapint(space, u_self.count(w_arg._value, u_start, u_end)) + return space.newint(u_self.count(w_arg._value, u_start, u_end)) def str_endswith__String_String_ANY_ANY(space, w_self, w_suffix, w_start, w_end): (u_self, start, end) = _convert_idx_params(space, w_self, w_start, @@ -709,7 +708,7 @@ def hash__String(space, w_str): s = w_str._value x = compute_hash(s) - return wrapint(space, x) + return space.newint(x) def lt__String_String(space, w_str1, w_str2): s1 = w_str1._value diff --git a/pypy/objspace/std/tupleobject.py b/pypy/objspace/std/tupleobject.py --- a/pypy/objspace/std/tupleobject.py +++ b/pypy/objspace/std/tupleobject.py @@ -7,7 +7,6 @@ from pypy.interpreter.gateway import ( WrappedDefault, interp2app, interpindirect2app, unwrap_spec) from pypy.objspace.std import slicetype -from pypy.objspace.std.inttype import wrapint from pypy.objspace.std.sliceobject import W_SliceObject, normalize_simple_slice from pypy.objspace.std.stdtypedef import StdTypeDef from pypy.objspace.std.util import negate @@ -56,7 +55,7 @@ def descr_len(self, space): result = self.length() - return wrapint(space, result) + return space.newint(result) def descr_iter(self, space): from pypy.objspace.std import iterobject diff --git a/pypy/tool/jitlogparser/parser.py b/pypy/tool/jitlogparser/parser.py --- a/pypy/tool/jitlogparser/parser.py +++ b/pypy/tool/jitlogparser/parser.py @@ -382,7 +382,7 @@ from rpython.jit.backend.tool.viewcode import World world = World() for entry in extract_category(log, 'jit-backend-dump'): - world.parse(entry.splitlines(True), truncate_addr=False) + world.parse(entry.splitlines(True)) dumps = {} for r in world.ranges: if r.addr in addrs and addrs[r.addr]: From noreply at buildbot.pypy.org Fri Aug 16 16:58:26 2013 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 16 Aug 2013 16:58:26 +0200 (CEST) Subject: [pypy-commit] stmgc default: Probably doesn't change anything, but you never know Message-ID: <20130816145826.03A541C0E1B@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r481:d58f5381cada Date: 2013-08-16 16:58 +0200 http://bitbucket.org/pypy/stmgc/changeset/d58f5381cada/ Log: Probably doesn't change anything, but you never know diff --git a/c4/stmgc.h b/c4/stmgc.h --- a/c4/stmgc.h +++ b/c4/stmgc.h @@ -204,17 +204,18 @@ : (obj)) #define stm_repeat_read_barrier(obj) \ - (UNLIKELY((obj)->h_tid & (GCFLAG_PUBLIC_TO_PRIVATE | GCFLAG_MOVED)) ? \ + (UNLIKELY(((obj)->h_tid & (GCFLAG_PUBLIC_TO_PRIVATE | \ + GCFLAG_MOVED)) != 0) ? \ stm_RepeatReadBarrier(obj) \ : (obj)) #define stm_immut_read_barrier(obj) \ - (UNLIKELY((obj)->h_tid & GCFLAG_STUB) ? \ + (UNLIKELY(((obj)->h_tid & GCFLAG_STUB) != 0) ? \ stm_ImmutReadBarrier(obj) \ : (obj)) #define stm_repeat_write_barrier(obj) \ - (UNLIKELY((obj)->h_tid & GCFLAG_WRITE_BARRIER) ? \ + (UNLIKELY(((obj)->h_tid & GCFLAG_WRITE_BARRIER) != 0) ? \ stm_RepeatWriteBarrier(obj) \ : (obj)) From noreply at buildbot.pypy.org Sat Aug 17 00:46:07 2013 From: noreply at buildbot.pypy.org (andrewsmedina) Date: Sat, 17 Aug 2013 00:46:07 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-2.7.4-poll-fix: fixed support for select.poll in stdlib 2.7.4 Message-ID: <20130816224607.09E3E1C0E1B@cobra.cs.uni-duesseldorf.de> Author: Andrews Medina Branch: stdlib-2.7.4-poll-fix Changeset: r66173:1922d4a83a50 Date: 2013-08-14 23:16 -0300 http://bitbucket.org/pypy/pypy/changeset/1922d4a83a50/ Log: fixed support for select.poll in stdlib 2.7.4 diff --git a/pypy/module/select/interp_select.py b/pypy/module/select/interp_select.py --- a/pypy/module/select/interp_select.py +++ b/pypy/module/select/interp_select.py @@ -22,6 +22,9 @@ @unwrap_spec(events=int) def register(self, space, w_fd, events=defaultevents): + if events > 32767: + msg = "signed short integer is greater than maximum" + raise OperationError(space.w_OverflowError, space.wrap(msg)) fd = space.c_filedescriptor_w(w_fd) self.fddict[fd] = events @@ -55,6 +58,10 @@ raise OperationError(space.w_ValueError, space.wrap("math range error")) + if timeout > 2147483647: + msg = "Python int too large to convert to C int" + raise OperationError(space.w_OverflowError, space.wrap(msg)) + try: retval = rpoll.poll(self.fddict, timeout) except rpoll.PollError, e: diff --git a/pypy/module/select/test/test_select.py b/pypy/module/select/test/test_select.py --- a/pypy/module/select/test/test_select.py +++ b/pypy/module/select/test/test_select.py @@ -213,6 +213,20 @@ readend.close() writeend.close() + def test_poll_int_overflow(self): + import select + + pollster = select.poll() + pollster.register(1) + + raises(OverflowError, pollster.poll, 1L << 64) + + pollster = select.poll() + raises(OverflowError, pollster.register, 0, 32768) # SHRT_MAX + 1 + raises(OverflowError, pollster.register, 0, 65535) # USHRT_MAX + 1 + raises(OverflowError, pollster.poll, 2147483648) # INT_MAX + 1 + raises(OverflowError, pollster.poll, 4294967296) # UINT_MAX + 1 + class AppTestSelectWithPipes(_AppTestSelect): "Use a pipe to get pairs of file descriptors" From noreply at buildbot.pypy.org Sat Aug 17 00:46:08 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Sat, 17 Aug 2013 00:46:08 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-2.7.4: Merged in andrewsmedina/numpypy/stdlib-2.7.4-poll-fix (pull request #182) Message-ID: <20130816224608.609411C304F@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: stdlib-2.7.4 Changeset: r66174:300bcb7c3a02 Date: 2013-08-16 15:45 -0700 http://bitbucket.org/pypy/pypy/changeset/300bcb7c3a02/ Log: Merged in andrewsmedina/numpypy/stdlib-2.7.4-poll-fix (pull request #182) fixed support for select.poll in stdlib 2.7.4 diff --git a/pypy/module/select/interp_select.py b/pypy/module/select/interp_select.py --- a/pypy/module/select/interp_select.py +++ b/pypy/module/select/interp_select.py @@ -22,6 +22,9 @@ @unwrap_spec(events=int) def register(self, space, w_fd, events=defaultevents): + if events > 32767: + msg = "signed short integer is greater than maximum" + raise OperationError(space.w_OverflowError, space.wrap(msg)) fd = space.c_filedescriptor_w(w_fd) self.fddict[fd] = events @@ -55,6 +58,10 @@ raise OperationError(space.w_ValueError, space.wrap("math range error")) + if timeout > 2147483647: + msg = "Python int too large to convert to C int" + raise OperationError(space.w_OverflowError, space.wrap(msg)) + try: retval = rpoll.poll(self.fddict, timeout) except rpoll.PollError, e: diff --git a/pypy/module/select/test/test_select.py b/pypy/module/select/test/test_select.py --- a/pypy/module/select/test/test_select.py +++ b/pypy/module/select/test/test_select.py @@ -213,6 +213,20 @@ readend.close() writeend.close() + def test_poll_int_overflow(self): + import select + + pollster = select.poll() + pollster.register(1) + + raises(OverflowError, pollster.poll, 1L << 64) + + pollster = select.poll() + raises(OverflowError, pollster.register, 0, 32768) # SHRT_MAX + 1 + raises(OverflowError, pollster.register, 0, 65535) # USHRT_MAX + 1 + raises(OverflowError, pollster.poll, 2147483648) # INT_MAX + 1 + raises(OverflowError, pollster.poll, 4294967296) # UINT_MAX + 1 + class AppTestSelectWithPipes(_AppTestSelect): "Use a pipe to get pairs of file descriptors" From noreply at buildbot.pypy.org Sat Aug 17 01:14:37 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Sat, 17 Aug 2013 01:14:37 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-2.7.4: followup 1922d4a83a50: check underflow too Message-ID: <20130816231437.CB6941C002A@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: stdlib-2.7.4 Changeset: r66175:7595a86ea213 Date: 2013-08-16 16:14 -0700 http://bitbucket.org/pypy/pypy/changeset/7595a86ea213/ Log: followup 1922d4a83a50: check underflow too diff --git a/pypy/module/select/interp_select.py b/pypy/module/select/interp_select.py --- a/pypy/module/select/interp_select.py +++ b/pypy/module/select/interp_select.py @@ -22,9 +22,10 @@ @unwrap_spec(events=int) def register(self, space, w_fd, events=defaultevents): - if events > 32767: - msg = "signed short integer is greater than maximum" - raise OperationError(space.w_OverflowError, space.wrap(msg)) + if not -32767 - 1 <= events <= 32767: + m = ("signed short integer is " + + "greater than maximum" if events > 0 else "less than minimum") + raise OperationError(space.w_OverflowError, space.wrap(m)) fd = space.c_filedescriptor_w(w_fd) self.fddict[fd] = events @@ -58,7 +59,7 @@ raise OperationError(space.w_ValueError, space.wrap("math range error")) - if timeout > 2147483647: + if not -2147483647 - 1 <= timeout <= 2147483647: msg = "Python int too large to convert to C int" raise OperationError(space.w_OverflowError, space.wrap(msg)) diff --git a/pypy/module/select/test/test_select.py b/pypy/module/select/test/test_select.py --- a/pypy/module/select/test/test_select.py +++ b/pypy/module/select/test/test_select.py @@ -223,8 +223,10 @@ pollster = select.poll() raises(OverflowError, pollster.register, 0, 32768) # SHRT_MAX + 1 + raises(OverflowError, pollster.register, 0, -32768 - 1) raises(OverflowError, pollster.register, 0, 65535) # USHRT_MAX + 1 raises(OverflowError, pollster.poll, 2147483648) # INT_MAX + 1 + raises(OverflowError, pollster.poll, -2147483648 - 1) raises(OverflowError, pollster.poll, 4294967296) # UINT_MAX + 1 From noreply at buildbot.pypy.org Sat Aug 17 10:45:55 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 17 Aug 2013 10:45:55 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-static-barrier: Test failing because of the XXX in writebarrier.py Message-ID: <20130817084555.A5CCE1C3677@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-static-barrier Changeset: r66176:38cddaeaa1c3 Date: 2013-08-17 10:37 +0200 http://bitbucket.org/pypy/pypy/changeset/38cddaeaa1c3/ Log: Test failing because of the XXX in writebarrier.py diff --git a/rpython/translator/stm/test/test_writebarrier.py b/rpython/translator/stm/test/test_writebarrier.py --- a/rpython/translator/stm/test/test_writebarrier.py +++ b/rpython/translator/stm/test/test_writebarrier.py @@ -280,6 +280,50 @@ assert res == 815 assert self.barriers == ['a2r', 'a2i'] + def test_no_subclasses_2(self): + class Y(object): + pass + def handle(y): + y.ybar += 1 + def make_y(i): + y = Y(); y.foo = 42; y.ybar = i + return y + def f1(i): + y = make_y(i) + external_any_gcobj() + prev = y.ybar # a2r + handle(y) # inside handle(): a2r, r2w + return prev + y.ybar # q2r + + res = self.interpret(f1, [10]) + assert res == 21 + assert self.barriers == ['a2r', 'a2r', 'r2w', 'q2r'] + + def test_subclassing_2(self): + class X: + __slots__ = ['foo'] + class Y(X): + pass + class Z(X): + pass + def handle(y): + y.ybar += 1 + def f1(i): + if i > 5: + y = Y(); y.foo = 42; y.ybar = i + x = y + else: + x = Z(); x.foo = 815; x.zbar = 'A' + y = Y(); y.foo = -13; y.ybar = i + external_any_gcobj() + prev = x.foo # a2r + handle(y) # inside handle(): a2r, r2w + return prev + x.foo # q2r + + res = self.interpret(f1, [10]) + assert res == 84 + assert self.barriers == ['a2r', 'a2r', 'r2w', 'q2r'] + def test_write_barrier_repeated(self): class X: pass From noreply at buildbot.pypy.org Sat Aug 17 10:45:57 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 17 Aug 2013 10:45:57 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-static-barrier: Fix the test Message-ID: <20130817084557.073A81C3677@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-static-barrier Changeset: r66177:cb4cc9552a90 Date: 2013-08-17 10:43 +0200 http://bitbucket.org/pypy/pypy/changeset/cb4cc9552a90/ Log: Fix the test diff --git a/rpython/translator/stm/writebarrier.py b/rpython/translator/stm/writebarrier.py --- a/rpython/translator/stm/writebarrier.py +++ b/rpython/translator/stm/writebarrier.py @@ -199,12 +199,19 @@ category[v] = 'Q' else: # the same, but only on objects of the right types - types = set([entry[1] for entry in effectinfo]) + # -- we need to consider 'types' or any base type + types = set() + for entry in effectinfo: + TYPE = entry[1].TO + while TYPE is not None: + types.add(TYPE) + if not isinstance(TYPE, lltype.Struct): + break + _, TYPE = TYPE._first_struct() for v in category.keys(): - if v.concretetype in types and category[v] == 'R': + if (v.concretetype.TO in types and + category[v] == 'R'): category[v] = 'Q' - # XXX this is likely not general enough: we need - # to consider 'types' or any base type if op.opname in MALLOCS: category[op.result] = 'W' From noreply at buildbot.pypy.org Sat Aug 17 11:03:54 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 17 Aug 2013 11:03:54 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-static-barrier: Add a test that actually shows that GCREFs work, even if slightly Message-ID: <20130817090354.55F631C3679@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-static-barrier Changeset: r66178:274f8205ec11 Date: 2013-08-17 11:03 +0200 http://bitbucket.org/pypy/pypy/changeset/274f8205ec11/ Log: Add a test that actually shows that GCREFs work, even if slightly inefficiently --- which is probably fine. diff --git a/rpython/translator/stm/test/test_writebarrier.py b/rpython/translator/stm/test/test_writebarrier.py --- a/rpython/translator/stm/test/test_writebarrier.py +++ b/rpython/translator/stm/test/test_writebarrier.py @@ -1,5 +1,5 @@ from rpython.rlib.rstm import register_invoke_around_extcall -from rpython.rtyper.lltypesystem import lltype, rffi +from rpython.rtyper.lltypesystem import lltype, llmemory, rffi from rpython.translator.stm.test.transform_support import BaseTestTransform @@ -324,6 +324,32 @@ assert res == 84 assert self.barriers == ['a2r', 'a2r', 'r2w', 'q2r'] + def test_subclassing_gcref(self): + Y = lltype.GcStruct('Y', ('foo', lltype.Signed), + ('ybar', lltype.Signed)) + YPTR = lltype.Ptr(Y) + # + def handle(y): + y.ybar += 1 + def f1(i): + if i > 5: + y = lltype.malloc(Y); y.foo = 52 - i; y.ybar = i + x = lltype.cast_opaque_ptr(llmemory.GCREF, y) + else: + y = lltype.nullptr(Y) + x = lltype.cast_opaque_ptr(llmemory.GCREF, y) + external_any_gcobj() + prev = lltype.cast_opaque_ptr(YPTR, x).foo # a2r + handle(y) # inside handle(): a2r, r2w + return prev + lltype.cast_opaque_ptr(YPTR, x).ybar # q2r? + + res = self.interpret(f1, [10]) + assert res == 42 + 11 + assert self.barriers == ['a2r', 'a2r', 'r2w', 'a2r'] + # Ideally we should get [... 'q2r'] but getting 'a2r' is not wrong + # either. This is because from a GCREF the only thing we can do is + # cast_opaque_ptr, which is not special-cased in writebarrier.py. + def test_write_barrier_repeated(self): class X: pass diff --git a/rpython/translator/stm/test/transform_support.py b/rpython/translator/stm/test/transform_support.py --- a/rpython/translator/stm/test/transform_support.py +++ b/rpython/translator/stm/test/transform_support.py @@ -130,6 +130,14 @@ return LLFrame.op_cast_pointer(self, RESTYPE, obj) op_cast_pointer.need_result_type = True + def op_cast_opaque_ptr(self, RESTYPE, obj): + if obj._TYPE.TO._gckind == 'gc': + cat = self.check_category(obj, None) + p = lltype.cast_opaque_ptr(RESTYPE, obj) + return _stmptr(p, cat) + return LLFrame.op_cast_opaque_ptr(self, RESTYPE, obj) + op_cast_opaque_ptr.need_result_type = True + def op_malloc(self, obj, flags): assert flags['flavor'] == 'gc' # convert all existing pointers W -> V From noreply at buildbot.pypy.org Sat Aug 17 12:06:08 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 17 Aug 2013 12:06:08 +0200 (CEST) Subject: [pypy-commit] pypy default: Change COND_CALL_GC_WB to not take as argument the new pointer value, Message-ID: <20130817100608.45D641C1356@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r66179:d3f189146d50 Date: 2013-08-17 11:55 +0200 http://bitbucket.org/pypy/pypy/changeset/d3f189146d50/ Log: Change COND_CALL_GC_WB to not take as argument the new pointer value, but only the target object. The backends don't use this anyway. It lets us optimize more in rewrite.py: no need for several write barriers if there are several setfields to the same object. diff --git a/rpython/jit/backend/llsupport/rewrite.py b/rpython/jit/backend/llsupport/rewrite.py --- a/rpython/jit/backend/llsupport/rewrite.py +++ b/rpython/jit/backend/llsupport/rewrite.py @@ -26,10 +26,11 @@ - Add COND_CALLs to the write barrier before SETFIELD_GC and SETARRAYITEM_GC operations. - recent_mallocs contains a dictionary of variable -> None. If a variable - is in the dictionary, next setfields can be called without a write barrier, - because the variable got allocated after the last potentially collecting - resop + 'write_barrier_applied' contains a dictionary of variable -> None. + If a variable is in the dictionary, next setfields can be called without + a write barrier. The idea is that an object that was freshly allocated + or already write_barrier'd don't need another write_barrier if there + was no potentially collecting resop inbetween. """ _previous_size = -1 @@ -42,7 +43,7 @@ self.cpu = cpu self.newops = [] self.known_lengths = {} - self.recent_mallocs = {} + self.write_barrier_applied = {} def rewrite(self, operations): # we can only remember one malloc since the next malloc can possibly @@ -221,18 +222,18 @@ def emitting_an_operation_that_can_collect(self): # must be called whenever we emit an operation that can collect: # forgets the previous MALLOC_NURSERY, if any; and empty the - # set 'recent_mallocs', so that future SETFIELDs will generate + # set 'write_barrier_applied', so that future SETFIELDs will generate # a write barrier as usual. self._op_malloc_nursery = None - self.recent_mallocs.clear() + self.write_barrier_applied.clear() def _gen_call_malloc_gc(self, args, v_result, descr): """Generate a CALL_MALLOC_GC with the given args.""" self.emitting_an_operation_that_can_collect() op = ResOperation(rop.CALL_MALLOC_GC, args, v_result, descr) self.newops.append(op) - # mark 'v_result' as freshly malloced - self.recent_mallocs[v_result] = None + # mark 'v_result' as freshly malloced, so not needing a write barrier + self.write_barrier_applied[v_result] = None def gen_malloc_fixedsize(self, size, typeid, v_result): """Generate a CALL_MALLOC_GC(malloc_fixedsize_fn, ...). @@ -315,7 +316,7 @@ [ConstInt(kind), ConstInt(itemsize), v_length], v_result, descr=arraydescr) self.newops.append(op) - self.recent_mallocs[v_result] = None + self.write_barrier_applied[v_result] = None return True def gen_malloc_nursery_varsize_frame(self, sizebox, v_result): @@ -327,7 +328,7 @@ v_result) self.newops.append(op) - self.recent_mallocs[v_result] = None + self.write_barrier_applied[v_result] = None def gen_malloc_nursery(self, size, v_result): """Try to generate or update a CALL_MALLOC_NURSERY. @@ -360,7 +361,7 @@ self.newops.append(op) self._previous_size = size self._v_last_malloced_nursery = v_result - self.recent_mallocs[v_result] = None + self.write_barrier_applied[v_result] = None return True def gen_initialize_tid(self, v_newgcobj, tid): @@ -382,45 +383,42 @@ def handle_write_barrier_setfield(self, op): val = op.getarg(0) - # no need for a write barrier in the case of previous malloc - if val not in self.recent_mallocs: + if val not in self.write_barrier_applied: v = op.getarg(1) if isinstance(v, BoxPtr) or (isinstance(v, ConstPtr) and bool(v.value)): # store a non-NULL - self.gen_write_barrier(op.getarg(0), v) - op = op.copy_and_change(rop.SETFIELD_RAW) + self.gen_write_barrier(val) + #op = op.copy_and_change(rop.SETFIELD_RAW) self.newops.append(op) def handle_write_barrier_setinteriorfield(self, op): val = op.getarg(0) - # no need for a write barrier in the case of previous malloc - if val not in self.recent_mallocs: + if val not in self.write_barrier_applied: v = op.getarg(2) if isinstance(v, BoxPtr) or (isinstance(v, ConstPtr) and bool(v.value)): # store a non-NULL - self.gen_write_barrier(op.getarg(0), v) - op = op.copy_and_change(rop.SETINTERIORFIELD_RAW) + self.gen_write_barrier(val) + #op = op.copy_and_change(rop.SETINTERIORFIELD_RAW) self.newops.append(op) def handle_write_barrier_setarrayitem(self, op): val = op.getarg(0) - # no need for a write barrier in the case of previous malloc - if val not in self.recent_mallocs: + if val not in self.write_barrier_applied: v = op.getarg(2) if isinstance(v, BoxPtr) or (isinstance(v, ConstPtr) and bool(v.value)): # store a non-NULL - self.gen_write_barrier_array(op.getarg(0), - op.getarg(1), v) - op = op.copy_and_change(rop.SETARRAYITEM_RAW) + self.gen_write_barrier_array(val, op.getarg(1)) + #op = op.copy_and_change(rop.SETARRAYITEM_RAW) self.newops.append(op) - def gen_write_barrier(self, v_base, v_value): + def gen_write_barrier(self, v_base): write_barrier_descr = self.gc_ll_descr.write_barrier_descr - args = [v_base, v_value] + args = [v_base] self.newops.append(ResOperation(rop.COND_CALL_GC_WB, args, None, descr=write_barrier_descr)) + self.write_barrier_applied[v_base] = None - def gen_write_barrier_array(self, v_base, v_index, v_value): + def gen_write_barrier_array(self, v_base, v_index): write_barrier_descr = self.gc_ll_descr.write_barrier_descr if write_barrier_descr.has_write_barrier_from_array(self.cpu): # If we know statically the length of 'v', and it is not too @@ -430,13 +428,15 @@ length = self.known_lengths.get(v_base, LARGE) if length >= LARGE: # unknown or too big: produce a write_barrier_from_array - args = [v_base, v_index, v_value] + args = [v_base, v_index] self.newops.append( ResOperation(rop.COND_CALL_GC_WB_ARRAY, args, None, descr=write_barrier_descr)) + # a WB_ARRAY is not enough to prevent any future write + # barriers, so don't add to 'write_barrier_applied'! return # fall-back case: produce a write_barrier - self.gen_write_barrier(v_base, v_value) + self.gen_write_barrier(v_base) def round_up_for_allocation(self, size): if not self.gc_ll_descr.round_up: diff --git a/rpython/jit/backend/llsupport/test/test_rewrite.py b/rpython/jit/backend/llsupport/test/test_rewrite.py --- a/rpython/jit/backend/llsupport/test/test_rewrite.py +++ b/rpython/jit/backend/llsupport/test/test_rewrite.py @@ -561,8 +561,8 @@ jump() """, """ [p1, p2] - cond_call_gc_wb(p1, p2, descr=wbdescr) - setfield_raw(p1, p2, descr=tzdescr) + cond_call_gc_wb(p1, descr=wbdescr) + setfield_gc(p1, p2, descr=tzdescr) jump() """) @@ -575,8 +575,8 @@ jump() """, """ [p1, i2, p3] - cond_call_gc_wb(p1, p3, descr=wbdescr) - setarrayitem_raw(p1, i2, p3, descr=cdescr) + cond_call_gc_wb(p1, descr=wbdescr) + setarrayitem_gc(p1, i2, p3, descr=cdescr) jump() """) @@ -595,8 +595,8 @@ setfield_gc(p1, 8111, descr=tiddescr) setfield_gc(p1, 129, descr=clendescr) call(123456) - cond_call_gc_wb(p1, p3, descr=wbdescr) - setarrayitem_raw(p1, i2, p3, descr=cdescr) + cond_call_gc_wb(p1, descr=wbdescr) + setarrayitem_gc(p1, i2, p3, descr=cdescr) jump() """) @@ -616,8 +616,8 @@ setfield_gc(p1, 8111, descr=tiddescr) setfield_gc(p1, 130, descr=clendescr) call(123456) - cond_call_gc_wb_array(p1, i2, p3, descr=wbdescr) - setarrayitem_raw(p1, i2, p3, descr=cdescr) + cond_call_gc_wb_array(p1, i2, descr=wbdescr) + setarrayitem_gc(p1, i2, p3, descr=cdescr) jump() """) @@ -628,8 +628,8 @@ jump() """, """ [p1, i2, p3] - cond_call_gc_wb_array(p1, i2, p3, descr=wbdescr) - setarrayitem_raw(p1, i2, p3, descr=cdescr) + cond_call_gc_wb_array(p1, i2, descr=wbdescr) + setarrayitem_gc(p1, i2, p3, descr=cdescr) jump() """) @@ -647,8 +647,8 @@ setfield_gc(p1, 8111, descr=tiddescr) setfield_gc(p1, 5, descr=clendescr) label(p1, i2, p3) - cond_call_gc_wb_array(p1, i2, p3, descr=wbdescr) - setarrayitem_raw(p1, i2, p3, descr=cdescr) + cond_call_gc_wb_array(p1, i2, descr=wbdescr) + setarrayitem_gc(p1, i2, p3, descr=cdescr) jump() """) @@ -666,8 +666,8 @@ jump(p1, p2) """, """ [p1, p2] - cond_call_gc_wb(p1, p2, descr=wbdescr) - setinteriorfield_raw(p1, 0, p2, descr=interiorzdescr) + cond_call_gc_wb(p1, descr=wbdescr) + setinteriorfield_gc(p1, 0, p2, descr=interiorzdescr) jump(p1, p2) """, interiorzdescr=interiorzdescr) @@ -733,8 +733,8 @@ p1 = call_malloc_nursery_varsize(1, 1, i0, \ descr=strdescr) setfield_gc(p1, i0, descr=strlendescr) - cond_call_gc_wb(p0, p1, descr=wbdescr) - setfield_raw(p0, p1, descr=tzdescr) + cond_call_gc_wb(p0, descr=wbdescr) + setfield_gc(p0, p1, descr=tzdescr) jump() """) @@ -750,11 +750,25 @@ p0 = call_malloc_nursery(%(tdescr.size)d) setfield_gc(p0, 5678, descr=tiddescr) label(p0, p1) - cond_call_gc_wb(p0, p1, descr=wbdescr) - setfield_raw(p0, p1, descr=tzdescr) + cond_call_gc_wb(p0, descr=wbdescr) + setfield_gc(p0, p1, descr=tzdescr) jump() """) + def test_multiple_writes(self): + self.check_rewrite(""" + [p0, p1, p2] + setfield_gc(p0, p1, descr=tzdescr) + setfield_gc(p0, p2, descr=tzdescr) + jump(p1, p2, p0) + """, """ + [p0, p1, p2] + cond_call_gc_wb(p0, descr=wbdescr) + setfield_gc(p0, p1, descr=tzdescr) + setfield_gc(p0, p2, descr=tzdescr) + jump(p1, p2, p0) + """) + def test_rewrite_call_assembler(self): self.check_rewrite(""" [i0, f0] diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py --- a/rpython/jit/metainterp/resoperation.py +++ b/rpython/jit/metainterp/resoperation.py @@ -502,8 +502,8 @@ 'SETFIELD_RAW/2d', 'STRSETITEM/3', 'UNICODESETITEM/3', - 'COND_CALL_GC_WB/2d', # [objptr, newvalue] (for the write barrier) - 'COND_CALL_GC_WB_ARRAY/3d', # [objptr, arrayindex, newvalue] (write barr.) + 'COND_CALL_GC_WB/1d', # [objptr] (for the write barrier) + 'COND_CALL_GC_WB_ARRAY/2d', # [objptr, arrayindex] (write barr. for array) 'DEBUG_MERGE_POINT/*', # debugging only 'JIT_DEBUG/*', # debugging only 'VIRTUAL_REF_FINISH/2', # removed before it's passed to the backend From noreply at buildbot.pypy.org Sat Aug 17 12:06:09 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 17 Aug 2013 12:06:09 +0200 (CEST) Subject: [pypy-commit] pypy default: Fix some more tests Message-ID: <20130817100609.986B11C1356@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r66180:3e3517a45321 Date: 2013-08-17 12:02 +0200 http://bitbucket.org/pypy/pypy/changeset/3e3517a45321/ Log: Fix some more tests diff --git a/rpython/jit/backend/llsupport/test/test_gc.py b/rpython/jit/backend/llsupport/test/test_gc.py --- a/rpython/jit/backend/llsupport/test/test_gc.py +++ b/rpython/jit/backend/llsupport/test/test_gc.py @@ -202,13 +202,11 @@ rewriter = gc.GcRewriterAssembler(gc_ll_descr, None) newops = rewriter.newops v_base = BoxPtr() - v_value = BoxPtr() - rewriter.gen_write_barrier(v_base, v_value) + rewriter.gen_write_barrier(v_base) assert llop1.record == [] assert len(newops) == 1 assert newops[0].getopnum() == rop.COND_CALL_GC_WB assert newops[0].getarg(0) == v_base - assert newops[0].getarg(1) == v_value assert newops[0].result is None wbdescr = newops[0].getdescr() assert is_valid_int(wbdescr.jit_wb_if_flag) diff --git a/rpython/jit/backend/test/runner_test.py b/rpython/jit/backend/test/runner_test.py --- a/rpython/jit/backend/test/runner_test.py +++ b/rpython/jit/backend/test/runner_test.py @@ -2140,11 +2140,9 @@ s = lltype.malloc(S) s.tid = value sgcref = lltype.cast_opaque_ptr(llmemory.GCREF, s) - t = lltype.malloc(S) - tgcref = lltype.cast_opaque_ptr(llmemory.GCREF, t) del record[:] self.execute_operation(rop.COND_CALL_GC_WB, - [BoxPtr(sgcref), ConstPtr(tgcref)], + [BoxPtr(sgcref)], 'void', descr=WriteBarrierDescr()) if cond: assert record == [rffi.cast(lltype.Signed, sgcref)] @@ -2179,7 +2177,7 @@ sgcref = lltype.cast_opaque_ptr(llmemory.GCREF, s) del record[:] self.execute_operation(rop.COND_CALL_GC_WB_ARRAY, - [BoxPtr(sgcref), ConstInt(123), BoxPtr(sgcref)], + [BoxPtr(sgcref), ConstInt(123)], 'void', descr=WriteBarrierDescr()) if cond: assert record == [rffi.cast(lltype.Signed, sgcref)] @@ -2244,7 +2242,7 @@ del record[:] box_index = BoxIndexCls((9<<7) + 17) self.execute_operation(rop.COND_CALL_GC_WB_ARRAY, - [BoxPtr(sgcref), box_index, BoxPtr(sgcref)], + [BoxPtr(sgcref), box_index], 'void', descr=WriteBarrierDescr()) if cond in [0, 1]: assert record == [rffi.cast(lltype.Signed, s.data)] From noreply at buildbot.pypy.org Sat Aug 17 16:47:58 2013 From: noreply at buildbot.pypy.org (fijal) Date: Sat, 17 Aug 2013 16:47:58 +0200 (CEST) Subject: [pypy-commit] pypy optmodel-refactor: start refactoring optimizeopt Message-ID: <20130817144758.DC4BA1C10DD@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: optmodel-refactor Changeset: r66181:775cba7b3be1 Date: 2013-08-15 00:02 +0200 http://bitbucket.org/pypy/pypy/changeset/775cba7b3be1/ Log: start refactoring optimizeopt diff --git a/rpython/jit/metainterp/optimizeopt/heap.py b/rpython/jit/metainterp/optimizeopt/heap.py --- a/rpython/jit/metainterp/optimizeopt/heap.py +++ b/rpython/jit/metainterp/optimizeopt/heap.py @@ -510,5 +510,5 @@ dispatch_opt = make_dispatcher_method(OptHeap, 'optimize_', - default=OptHeap.emit_operation) + emit_op=True) OptHeap.propagate_forward = dispatch_opt diff --git a/rpython/jit/metainterp/optimizeopt/intbounds.py b/rpython/jit/metainterp/optimizeopt/intbounds.py --- a/rpython/jit/metainterp/optimizeopt/intbounds.py +++ b/rpython/jit/metainterp/optimizeopt/intbounds.py @@ -19,10 +19,6 @@ def propagate_forward(self, op): dispatch_opt(self, op) - def opt_default(self, op): - assert not op.is_ovf() - self.emit_operation(op) - def propagate_bounds_backward(self, box): # FIXME: This takes care of the instruction where box is the reuslt # but the bounds produced by all instructions where box is @@ -473,6 +469,5 @@ propagate_bounds_INT_MUL_OVF = propagate_bounds_INT_MUL -dispatch_opt = make_dispatcher_method(OptIntBounds, 'optimize_', - default=OptIntBounds.opt_default) +dispatch_opt = make_dispatcher_method(OptIntBounds, 'optimize_', emit_op=True) dispatch_bounds_ops = make_dispatcher_method(OptIntBounds, 'propagate_bounds_') diff --git a/rpython/jit/metainterp/optimizeopt/optimizer.py b/rpython/jit/metainterp/optimizeopt/optimizer.py --- a/rpython/jit/metainterp/optimizeopt/optimizer.py +++ b/rpython/jit/metainterp/optimizeopt/optimizer.py @@ -372,6 +372,7 @@ for i in range(1, len(optimizations)): optimizations[i - 1].next_optimization = optimizations[i] optimizations[-1].next_optimization = self + for o in optimizations: o.optimizer = self o.last_emitted_operation = None @@ -514,6 +515,22 @@ self.clear_newoperations() for op in self.loop.operations: self.first_optimization.propagate_forward(op) + # for opt in self.optimizations: + # func = getattr(opt, 'optimize_' + op.getopname().upper(), None) + # if func is not None: + # op = func(op) + # if op is None: + # break + # op = opt.emitted_operation(op) + # if op is None: + # break + #else: + # self._emit_operation(op) + #for opt in self.optimizations: + # func = getattr(opt, 'postprocess_' + op.getopname().upper(), + # None) + # if func: + # func(op) self.loop.operations = self.get_newoperations() self.loop.quasi_immutable_deps = self.quasi_immutable_deps # accumulate counters @@ -623,9 +640,6 @@ args[n + 1] = op.getdescr() return args - def optimize_default(self, op): - self.emit_operation(op) - def constant_fold(self, op): argboxes = [self.get_constant_box(op.getarg(i)) for i in range(op.numargs())] @@ -647,21 +661,21 @@ if indexvalue.is_constant(): arrayvalue = self.getvalue(op.getarg(0)) arrayvalue.make_len_gt(MODE_ARRAY, op.getdescr(), indexvalue.box.getint()) - self.optimize_default(op) + self.emit_operation(op) def optimize_STRGETITEM(self, op): indexvalue = self.getvalue(op.getarg(1)) if indexvalue.is_constant(): arrayvalue = self.getvalue(op.getarg(0)) arrayvalue.make_len_gt(MODE_STR, op.getdescr(), indexvalue.box.getint()) - self.optimize_default(op) + self.emit_operation(op) def optimize_UNICODEGETITEM(self, op): indexvalue = self.getvalue(op.getarg(1)) if indexvalue.is_constant(): arrayvalue = self.getvalue(op.getarg(0)) arrayvalue.make_len_gt(MODE_UNICODE, op.getdescr(), indexvalue.box.getint()) - self.optimize_default(op) + self.emit_operation(op) # These are typically removed already by OptRewrite, but it can be # dissabled and unrolling emits some SAME_AS ops to setup the @@ -673,8 +687,7 @@ value = self.getvalue(op.getarg(0)) self.optimizer.opaque_pointers[value] = True -dispatch_opt = make_dispatcher_method(Optimizer, 'optimize_', - default=Optimizer.optimize_default) +dispatch_opt = make_dispatcher_method(Optimizer, 'optimize_', emit_op=True) diff --git a/rpython/jit/metainterp/optimizeopt/pure.py b/rpython/jit/metainterp/optimizeopt/pure.py --- a/rpython/jit/metainterp/optimizeopt/pure.py +++ b/rpython/jit/metainterp/optimizeopt/pure.py @@ -52,7 +52,7 @@ # otherwise, the operation remains self.emit_operation(op) if op.returns_bool_result(): - self.optimizer.bool_boxes[self.getvalue(op.result)] = None + self.optimizer.bool_boxes[self.getvalue(op.result)] = None if nextop: self.emit_operation(nextop) diff --git a/rpython/jit/metainterp/optimizeopt/rewrite.py b/rpython/jit/metainterp/optimizeopt/rewrite.py --- a/rpython/jit/metainterp/optimizeopt/rewrite.py +++ b/rpython/jit/metainterp/optimizeopt/rewrite.py @@ -526,5 +526,5 @@ self.make_equal_to(op.result, self.getvalue(op.getarg(0))) dispatch_opt = make_dispatcher_method(OptRewrite, 'optimize_', - default=OptRewrite.emit_operation) + emit_op=True) optimize_guards = _findall(OptRewrite, 'optimize_', 'GUARD') diff --git a/rpython/jit/metainterp/optimizeopt/simplify.py b/rpython/jit/metainterp/optimizeopt/simplify.py --- a/rpython/jit/metainterp/optimizeopt/simplify.py +++ b/rpython/jit/metainterp/optimizeopt/simplify.py @@ -10,6 +10,7 @@ def emit_operation(self, op): if op.is_guard(): + # XXX WTF is that? if self.optimizer.pendingfields is None: self.optimizer.pendingfields = [] Optimization.emit_operation(self, op) @@ -45,7 +46,7 @@ return self.optimize_JUMP(op.copy_and_change(rop.JUMP)) self.last_label_descr = op.getdescr() self.emit_operation(op) - + def optimize_JUMP(self, op): if not self.unroll: descr = op.getdescr() @@ -61,6 +62,5 @@ op.setdescr(descr.target_tokens[0]) self.emit_operation(op) -dispatch_opt = make_dispatcher_method(OptSimplify, 'optimize_', - default=OptSimplify.emit_operation) +dispatch_opt = make_dispatcher_method(OptSimplify, 'optimize_', emit_op=True) OptSimplify.propagate_forward = dispatch_opt diff --git a/rpython/jit/metainterp/optimizeopt/test/test_multilabel.py b/rpython/jit/metainterp/optimizeopt/test/test_multilabel.py --- a/rpython/jit/metainterp/optimizeopt/test/test_multilabel.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_multilabel.py @@ -490,7 +490,7 @@ self.make_equal_to(op.result, self.getvalue(newop.result)) dispatch_opt = make_dispatcher_method(OptRenameStrlen, 'optimize_', - default=OptRenameStrlen.emit_operation) + emit_op=True) class BaseTestOptimizerRenamingBoxes(BaseTestMultiLabel): diff --git a/rpython/jit/metainterp/optimizeopt/util.py b/rpython/jit/metainterp/optimizeopt/util.py --- a/rpython/jit/metainterp/optimizeopt/util.py +++ b/rpython/jit/metainterp/optimizeopt/util.py @@ -26,7 +26,8 @@ result.append((value, opclass, getattr(Class, name_prefix + name))) return unrolling_iterable(result) -def make_dispatcher_method(Class, name_prefix, op_prefix=None, default=None): +def make_dispatcher_method(Class, name_prefix, op_prefix=None, emit_op=False, + default=None): ops = _findall(Class, name_prefix, op_prefix) def dispatch(self, op, *args): if we_are_translated(): @@ -37,12 +38,16 @@ return func(self, op, *args) if default: return default(self, op, *args) + elif emit_op: + return self.emit_operation(op, *args) else: func = getattr(Class, name_prefix + op.getopname().upper(), None) if func is not None: return func(self, op, *args) if default: return default(self, op, *args) + elif emit_op: + return self.emit_operation(op, *args) dispatch.func_name = "dispatch_" + name_prefix return dispatch diff --git a/rpython/jit/metainterp/optimizeopt/virtualize.py b/rpython/jit/metainterp/optimizeopt/virtualize.py --- a/rpython/jit/metainterp/optimizeopt/virtualize.py +++ b/rpython/jit/metainterp/optimizeopt/virtualize.py @@ -826,7 +826,6 @@ self.emit_operation(op) -dispatch_opt = make_dispatcher_method(OptVirtualize, 'optimize_', - default=OptVirtualize.emit_operation) +dispatch_opt = make_dispatcher_method(OptVirtualize, 'optimize_', emit_op=True) OptVirtualize.propagate_forward = dispatch_opt diff --git a/rpython/jit/metainterp/optimizeopt/vstring.py b/rpython/jit/metainterp/optimizeopt/vstring.py --- a/rpython/jit/metainterp/optimizeopt/vstring.py +++ b/rpython/jit/metainterp/optimizeopt/vstring.py @@ -752,8 +752,7 @@ def propagate_forward(self, op): dispatch_opt(self, op) -dispatch_opt = make_dispatcher_method(OptString, 'optimize_', - default=OptString.emit_operation) +dispatch_opt = make_dispatcher_method(OptString, 'optimize_', emit_op=True) def _findall_call_oopspec(): prefix = 'opt_call_stroruni_' From noreply at buildbot.pypy.org Sat Aug 17 16:48:00 2013 From: noreply at buildbot.pypy.org (fijal) Date: Sat, 17 Aug 2013 16:48:00 +0200 (CEST) Subject: [pypy-commit] pypy rewritten-loop-logging: Enable rewritten loop to be dumped Message-ID: <20130817144800.672B71C10DD@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: rewritten-loop-logging Changeset: r66182:b2ed08e955d9 Date: 2013-08-17 16:47 +0200 http://bitbucket.org/pypy/pypy/changeset/b2ed08e955d9/ Log: Enable rewritten loop to be dumped diff --git a/rpython/jit/backend/arm/assembler.py b/rpython/jit/backend/arm/assembler.py --- a/rpython/jit/backend/arm/assembler.py +++ b/rpython/jit/backend/arm/assembler.py @@ -571,7 +571,8 @@ self.mc.BL(self.stack_check_slowpath, c=c.HI) # call if ip > lr # cpu interface - def assemble_loop(self, loopname, inputargs, operations, looptoken, log): + def assemble_loop(self, logger, loopname, inputargs, operations, looptoken, + log): clt = CompiledLoopToken(self.cpu, looptoken.number) looptoken.compiled_loop_token = clt clt._debug_nbargs = len(inputargs) @@ -644,8 +645,8 @@ frame_depth = max(frame_depth, target_frame_depth) return frame_depth - def assemble_bridge(self, faildescr, inputargs, operations, - original_loop_token, log): + def assemble_bridge(self, logger, faildescr, inputargs, operations, + original_loop_token, log): if not we_are_translated(): # Arguments should be unique assert len(set(inputargs)) == len(inputargs) diff --git a/rpython/jit/backend/arm/runner.py b/rpython/jit/backend/arm/runner.py --- a/rpython/jit/backend/arm/runner.py +++ b/rpython/jit/backend/arm/runner.py @@ -56,17 +56,18 @@ def finish_once(self): self.assembler.finish_once() - def compile_loop(self, inputargs, operations, looptoken, + def compile_loop(self, logger, inputargs, operations, looptoken, log=True, name=''): - return self.assembler.assemble_loop(name, inputargs, operations, - looptoken, log=log) + return self.assembler.assemble_loop(logger, name, inputargs, operations, + looptoken, log=log) - def compile_bridge(self, faildescr, inputargs, operations, + def compile_bridge(self, logger, faildescr, inputargs, operations, original_loop_token, log=True): clt = original_loop_token.compiled_loop_token clt.compiling_a_bridge() - return self.assembler.assemble_bridge(faildescr, inputargs, operations, - original_loop_token, log=log) + return self.assembler.assemble_bridge(logger, faildescr, inputargs, + operations, + original_loop_token, log=log) def clear_latest_values(self, count): setitem = self.assembler.fail_boxes_ptr.setitem diff --git a/rpython/jit/backend/llgraph/runner.py b/rpython/jit/backend/llgraph/runner.py --- a/rpython/jit/backend/llgraph/runner.py +++ b/rpython/jit/backend/llgraph/runner.py @@ -183,7 +183,8 @@ self.stats = stats or MiniStats() self.vinfo_for_tests = kwds.get('vinfo_for_tests', None) - def compile_loop(self, inputargs, operations, looptoken, log=True, name=''): + def compile_loop(self, logger, inputargs, operations, looptoken, log=True, + name=''): clt = model.CompiledLoopToken(self, looptoken.number) looptoken.compiled_loop_token = clt lltrace = LLTrace(inputargs, operations) @@ -191,7 +192,7 @@ clt._llgraph_alltraces = [lltrace] self._record_labels(lltrace) - def compile_bridge(self, faildescr, inputargs, operations, + def compile_bridge(self, logger, faildescr, inputargs, operations, original_loop_token, log=True): clt = original_loop_token.compiled_loop_token clt.compiling_a_bridge() diff --git a/rpython/jit/backend/model.py b/rpython/jit/backend/model.py --- a/rpython/jit/backend/model.py +++ b/rpython/jit/backend/model.py @@ -51,7 +51,8 @@ """ return False - def compile_loop(self, inputargs, operations, looptoken, log=True, name=''): + def compile_loop(self, logger, inputargs, operations, looptoken, + log=True, name=''): """Assemble the given loop. Should create and attach a fresh CompiledLoopToken to looptoken.compiled_loop_token and stick extra attributes @@ -67,7 +68,7 @@ """ raise NotImplementedError - def compile_bridge(self, faildescr, inputargs, operations, + def compile_bridge(self, logger, faildescr, inputargs, operations, original_loop_token, log=True): """Assemble the bridge. The FailDescr is the descr of the original guard that failed. diff --git a/rpython/jit/backend/test/runner_test.py b/rpython/jit/backend/test/runner_test.py --- a/rpython/jit/backend/test/runner_test.py +++ b/rpython/jit/backend/test/runner_test.py @@ -49,7 +49,7 @@ valueboxes, descr) looptoken = JitCellToken() - self.cpu.compile_loop(inputargs, operations, looptoken) + self.cpu.compile_loop(None, inputargs, operations, looptoken) args = [] for box in inputargs: if isinstance(box, BoxInt): @@ -127,7 +127,7 @@ ] inputargs = [i0] looptoken = JitCellToken() - self.cpu.compile_loop(inputargs, operations, looptoken) + self.cpu.compile_loop(None, inputargs, operations, looptoken) deadframe = self.cpu.execute_token(looptoken, 2) fail = self.cpu.get_latest_descr(deadframe) res = self.cpu.get_int_value(deadframe, 0) @@ -145,7 +145,7 @@ ] inputargs = [i0] looptoken = JitCellToken() - self.cpu.compile_loop(inputargs, operations, looptoken) + self.cpu.compile_loop(None, inputargs, operations, looptoken) deadframe = self.cpu.execute_token(looptoken, longlong.getfloatstorage(2.8)) fail = self.cpu.get_latest_descr(deadframe) @@ -170,7 +170,7 @@ inputargs = [i0] operations[3].setfailargs([i1]) - self.cpu.compile_loop(inputargs, operations, looptoken) + self.cpu.compile_loop(None, inputargs, operations, looptoken) deadframe = self.cpu.execute_token(looptoken, 2) fail = self.cpu.get_latest_descr(deadframe) assert fail.identifier == 2 @@ -195,7 +195,7 @@ inputargs = [i3] operations[4].setfailargs([None, None, i1, None]) - self.cpu.compile_loop(inputargs, operations, looptoken) + self.cpu.compile_loop(None, inputargs, operations, looptoken) deadframe = self.cpu.execute_token(looptoken, 44) fail = self.cpu.get_latest_descr(deadframe) assert fail.identifier == 2 @@ -221,7 +221,7 @@ operations[3].setfailargs([i1]) wr_i1 = weakref.ref(i1) wr_guard = weakref.ref(operations[2]) - self.cpu.compile_loop(inputargs, operations, looptoken) + self.cpu.compile_loop(None, inputargs, operations, looptoken) if hasattr(looptoken, '_x86_ops_offset'): del looptoken._x86_ops_offset # else it's kept alive del i0, i1, i2 @@ -249,7 +249,7 @@ ] inputargs = [i0] operations[3].setfailargs([i1]) - self.cpu.compile_loop(inputargs, operations, looptoken) + self.cpu.compile_loop(None, inputargs, operations, looptoken) i1b = BoxInt() i3 = BoxInt() @@ -260,7 +260,7 @@ ] bridge[1].setfailargs([i1b]) - self.cpu.compile_bridge(faildescr1, [i1b], bridge, looptoken) + self.cpu.compile_bridge(None, faildescr1, [i1b], bridge, looptoken) deadframe = self.cpu.execute_token(looptoken, 2) fail = self.cpu.get_latest_descr(deadframe) @@ -291,7 +291,7 @@ ] inputargs = [i3] operations[4].setfailargs([None, i1, None]) - self.cpu.compile_loop(inputargs, operations, looptoken) + self.cpu.compile_loop(None, inputargs, operations, looptoken) i1b = BoxInt() i3 = BoxInt() @@ -302,7 +302,7 @@ ] bridge[1].setfailargs([i1b]) - self.cpu.compile_bridge(faildescr1, [i1b], bridge, looptoken) + self.cpu.compile_bridge(None, faildescr1, [i1b], bridge, looptoken) deadframe = self.cpu.execute_token(looptoken, 2) fail = self.cpu.get_latest_descr(deadframe) @@ -320,7 +320,7 @@ ] inputargs = [i0] operations[0].setfailargs([i0]) - self.cpu.compile_loop(inputargs, operations, looptoken) + self.cpu.compile_loop(None, inputargs, operations, looptoken) i1list = [BoxInt() for i in range(150)] bridge = [] @@ -334,7 +334,7 @@ descr=BasicFinalDescr(4))) bridge[-2].setfailargs(i1list) - self.cpu.compile_bridge(faildescr1, [i0], bridge, looptoken) + self.cpu.compile_bridge(None, faildescr1, [i0], bridge, looptoken) deadframe = self.cpu.execute_token(looptoken, 1) fail = self.cpu.get_latest_descr(deadframe) @@ -358,7 +358,7 @@ operations = [ ResOperation(rop.FINISH, [i0], None, descr=faildescr) ] - self.cpu.compile_loop([i0], operations, looptoken) + self.cpu.compile_loop(None, [i0], operations, looptoken) deadframe = self.cpu.execute_token(looptoken, 99) fail = self.cpu.get_latest_descr(deadframe) assert fail is faildescr @@ -369,7 +369,7 @@ operations = [ ResOperation(rop.FINISH, [ConstInt(42)], None, descr=faildescr) ] - self.cpu.compile_loop([], operations, looptoken) + self.cpu.compile_loop(None, [], operations, looptoken) deadframe = self.cpu.execute_token(looptoken) fail = self.cpu.get_latest_descr(deadframe) assert fail is faildescr @@ -380,7 +380,7 @@ operations = [ ResOperation(rop.FINISH, [], None, descr=faildescr) ] - self.cpu.compile_loop([], operations, looptoken) + self.cpu.compile_loop(None, [], operations, looptoken) deadframe = self.cpu.execute_token(looptoken) fail = self.cpu.get_latest_descr(deadframe) assert fail is faildescr @@ -391,7 +391,7 @@ operations = [ ResOperation(rop.FINISH, [f0], None, descr=faildescr) ] - self.cpu.compile_loop([f0], operations, looptoken) + self.cpu.compile_loop(None, [f0], operations, looptoken) value = longlong.getfloatstorage(-61.25) deadframe = self.cpu.execute_token(looptoken, value) fail = self.cpu.get_latest_descr(deadframe) @@ -403,7 +403,7 @@ operations = [ ResOperation(rop.FINISH, [constfloat(42.5)], None, descr=faildescr) ] - self.cpu.compile_loop([], operations, looptoken) + self.cpu.compile_loop(None, [], operations, looptoken) deadframe = self.cpu.execute_token(looptoken) fail = self.cpu.get_latest_descr(deadframe) assert fail is faildescr @@ -429,7 +429,7 @@ ResOperation(rop.JUMP, [t, z], None, descr=targettoken), ] operations[-2].setfailargs([t, z]) - cpu.compile_loop([x, y], operations, looptoken) + cpu.compile_loop(None, [x, y], operations, looptoken) deadframe = self.cpu.execute_token(looptoken, 0, 10) assert self.cpu.get_int_value(deadframe, 0) == 0 assert self.cpu.get_int_value(deadframe, 1) == 55 @@ -488,7 +488,7 @@ ops[1].setfailargs([v_res]) # looptoken = JitCellToken() - self.cpu.compile_loop([v1, v2], ops, looptoken) + self.cpu.compile_loop(None, [v1, v2], ops, looptoken) for x, y, z in testcases: deadframe = self.cpu.execute_token(looptoken, x, y) fail = self.cpu.get_latest_descr(deadframe) @@ -1238,7 +1238,7 @@ print inputargs for op in operations: print op - self.cpu.compile_loop(inputargs, operations, looptoken) + self.cpu.compile_loop(None, inputargs, operations, looptoken) # deadframe = self.cpu.execute_token(looptoken, *values) fail = self.cpu.get_latest_descr(deadframe) @@ -1305,7 +1305,7 @@ operations[3].setfailargs(inputargs[:]) operations[3].setdescr(faildescr) # - self.cpu.compile_loop(inputargs, operations, looptoken) + self.cpu.compile_loop(None, inputargs, operations, looptoken) # values = [] S = lltype.GcStruct('S') @@ -1366,7 +1366,7 @@ operations[-3].setfailargs(fboxes) operations[-2].setfailargs(fboxes) looptoken = JitCellToken() - self.cpu.compile_loop(fboxes, operations, looptoken) + self.cpu.compile_loop(None, fboxes, operations, looptoken) fboxes2 = [BoxFloat() for i in range(12)] f3 = BoxFloat() @@ -1375,7 +1375,7 @@ ResOperation(rop.JUMP, [f3]+fboxes2[1:], None, descr=targettoken), ] - self.cpu.compile_bridge(faildescr1, fboxes2, bridge, looptoken) + self.cpu.compile_bridge(None, faildescr1, fboxes2, bridge, looptoken) args = [] for i in range(len(fboxes)): @@ -1407,7 +1407,7 @@ finish()""" loop = parse(loopops) looptoken = JitCellToken() - self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + self.cpu.compile_loop(None, loop.inputargs, loop.operations, looptoken) args = [1] args.append(longlong.getfloatstorage(132.25)) args.append(longlong.getfloatstorage(0.75)) @@ -1428,7 +1428,7 @@ ResOperation(rop.FINISH, [], None, descr=faildescr2), ] bridgeops[-2].setfailargs(fboxes[:]) - self.cpu.compile_bridge(loop.operations[-2].getdescr(), fboxes, + self.cpu.compile_bridge(None, loop.operations[-2].getdescr(), fboxes, bridgeops, looptoken) args = [1, longlong.getfloatstorage(132.25), @@ -1463,7 +1463,7 @@ ] operations[1].setfailargs([]) looptoken = JitCellToken() - self.cpu.compile_loop(inputargs, operations, looptoken) + self.cpu.compile_loop(None, inputargs, operations, looptoken) # for value in [-42, 0, 1, 10]: deadframe = self.cpu.execute_token(looptoken, value) @@ -1508,7 +1508,7 @@ ] operations[-2].setfailargs([]) looptoken = JitCellToken() - self.cpu.compile_loop(inputargs, operations, looptoken) + self.cpu.compile_loop(None, inputargs, operations, looptoken) # for test1 in [-65, -42, -11, 0, 1, 10]: if test1 == -42 or combinaison[0] == 'b': @@ -1560,7 +1560,7 @@ ] operations[-2].setfailargs([]) looptoken = JitCellToken() - self.cpu.compile_loop(inputargs, operations, looptoken) + self.cpu.compile_loop(None, inputargs, operations, looptoken) # for test1 in [65, 42, 11, 0, 1]: if test1 == 42 or combinaison[0] == 'b': @@ -1616,7 +1616,7 @@ ] operations[-2].setfailargs([]) looptoken = JitCellToken() - self.cpu.compile_loop(inputargs, operations, looptoken) + self.cpu.compile_loop(None, inputargs, operations, looptoken) # nan = 1e200 * 1e200 nan /= nan @@ -1675,7 +1675,7 @@ descr=faildescr)) looptoken = JitCellToken() # - self.cpu.compile_loop(inputargs, operations, looptoken) + self.cpu.compile_loop(None, inputargs, operations, looptoken) # args = [] for box in inputargs: @@ -1748,7 +1748,7 @@ looptoken = JitCellToken() # Use "set" to unique-ify inputargs unique_testcase_list = list(set(testcase)) - self.cpu.compile_loop(unique_testcase_list, operations, + self.cpu.compile_loop(None, unique_testcase_list, operations, looptoken) args = [box.getfloatstorage() for box in unique_testcase_list] @@ -2065,7 +2065,7 @@ exc_ptr = xptr loop = parse(ops, self.cpu, namespace=locals()) looptoken = JitCellToken() - self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + self.cpu.compile_loop(None, loop.inputargs, loop.operations, looptoken) deadframe = self.cpu.execute_token(looptoken, 1) assert self.cpu.get_ref_value(deadframe, 0) == xptr excvalue = self.cpu.grab_exc_value(deadframe) @@ -2088,7 +2088,7 @@ exc_ptr = yptr loop = parse(ops, self.cpu, namespace=locals()) looptoken = JitCellToken() - self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + self.cpu.compile_loop(None, loop.inputargs, loop.operations, looptoken) deadframe = self.cpu.execute_token(looptoken, 1) assert self.cpu.get_int_value(deadframe, 0) == 1 excvalue = self.cpu.grab_exc_value(deadframe) @@ -2105,7 +2105,7 @@ ''' loop = parse(ops, self.cpu, namespace=locals()) looptoken = JitCellToken() - self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + self.cpu.compile_loop(None, loop.inputargs, loop.operations, looptoken) deadframe = self.cpu.execute_token(looptoken, 1) assert self.cpu.get_int_value(deadframe, 0) == 1 excvalue = self.cpu.grab_exc_value(deadframe) @@ -2286,7 +2286,7 @@ 'func_ptr': func_ptr, 'calldescr': calldescr}) looptoken = JitCellToken() - self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + self.cpu.compile_loop(None, loop.inputargs, loop.operations, looptoken) f1 = longlong.getfloatstorage(1.2) f2 = longlong.getfloatstorage(3.4) frame = self.cpu.execute_token(looptoken, 1, 0, 1, 2, 3, 4, 5, f1, f2) @@ -2331,7 +2331,7 @@ ] ops[2].setfailargs([i1, i0]) looptoken = JitCellToken() - self.cpu.compile_loop([i0, i1], ops, looptoken) + self.cpu.compile_loop(None, [i0, i1], ops, looptoken) deadframe = self.cpu.execute_token(looptoken, 20, 0) fail = self.cpu.get_latest_descr(deadframe) assert fail.identifier == 0 @@ -2377,7 +2377,7 @@ ] ops[2].setfailargs([i1, i2, i0]) looptoken = JitCellToken() - self.cpu.compile_loop([i0, i1], ops, looptoken) + self.cpu.compile_loop(None, [i0, i1], ops, looptoken) deadframe = self.cpu.execute_token(looptoken, 20, 0) fail = self.cpu.get_latest_descr(deadframe) assert fail.identifier == 0 @@ -2425,7 +2425,7 @@ ] ops[2].setfailargs([i1, f2, i0]) looptoken = JitCellToken() - self.cpu.compile_loop([i0, i1], ops, looptoken) + self.cpu.compile_loop(None, [i0, i1], ops, looptoken) deadframe = self.cpu.execute_token(looptoken, 20, 0) fail = self.cpu.get_latest_descr(deadframe) assert fail.identifier == 0 @@ -2467,7 +2467,7 @@ ] ops[1].setfailargs([i1, i2]) looptoken = JitCellToken() - self.cpu.compile_loop([i1], ops, looptoken) + self.cpu.compile_loop(None, [i1], ops, looptoken) deadframe = self.cpu.execute_token(looptoken, ord('G')) fail = self.cpu.get_latest_descr(deadframe) assert fail.identifier == 0 @@ -2525,7 +2525,7 @@ ] ops[1].setfailargs([]) looptoken = JitCellToken() - self.cpu.compile_loop([i0, i1, i2, i3], ops, looptoken) + self.cpu.compile_loop(None, [i0, i1, i2, i3], ops, looptoken) args = [rffi.cast(lltype.Signed, raw), 2, 4, @@ -2582,7 +2582,7 @@ ResOperation(rop.FINISH, [i3], None, descr=BasicFinalDescr(0)) ] looptoken = JitCellToken() - self.cpu.compile_loop([i1, i2], ops, looptoken) + self.cpu.compile_loop(None, [i1, i2], ops, looptoken) buffer = lltype.malloc(rffi.CCHARP.TO, buflen, flavor='raw') args = [buflen, rffi.cast(lltype.Signed, buffer)] @@ -2652,7 +2652,7 @@ ] ops[1].setfailargs([]) looptoken = JitCellToken() - self.cpu.compile_loop([], ops, looptoken) + self.cpu.compile_loop(None, [], ops, looptoken) deadframe = self.cpu.execute_token(looptoken) fail = self.cpu.get_latest_descr(deadframe) @@ -2792,7 +2792,7 @@ ops.insert(-1, ResOperation(rop.SAME_AS, [b1], b1.clonebox())) looptoken = JitCellToken() - self.cpu.compile_loop(argboxes, ops, looptoken) + self.cpu.compile_loop(None, argboxes, ops, looptoken) # seen = [] deadframe = self.cpu.execute_token(looptoken, *argvalues_normal) @@ -2817,7 +2817,7 @@ ] ops[0].setfailargs([i1]) looptoken = JitCellToken() - self.cpu.compile_loop([i0, i1], ops, looptoken) + self.cpu.compile_loop(None, [i0, i1], ops, looptoken) deadframe = self.cpu.execute_token(looptoken, -42, 9) fail = self.cpu.get_latest_descr(deadframe) @@ -2844,7 +2844,7 @@ ResOperation(rop.FINISH, [i2], None, descr=BasicFinalDescr(3)) ] ops[0].setfailargs([]) - self.cpu.compile_bridge(faildescr, [i2], ops, looptoken) + self.cpu.compile_bridge(None, faildescr, [i2], ops, looptoken) deadframe = self.cpu.execute_token(looptoken, -42, 9) fail = self.cpu.get_latest_descr(deadframe) @@ -2877,7 +2877,7 @@ ] ops[0].setfailargs([]) looptoken = JitCellToken() - self.cpu.compile_loop([i0], ops, looptoken) + self.cpu.compile_loop(None, [i0], ops, looptoken) # mark as failing self.cpu.invalidate_loop(looptoken) # attach a bridge @@ -2885,7 +2885,7 @@ ops2 = [ ResOperation(rop.JUMP, [ConstInt(333)], None, descr=labeldescr), ] - self.cpu.compile_bridge(faildescr, [], ops2, looptoken) + self.cpu.compile_bridge(None, faildescr, [], ops2, looptoken) # run: must not be caught in an infinite loop deadframe = self.cpu.execute_token(looptoken, 16) fail = self.cpu.get_latest_descr(deadframe) @@ -3093,7 +3093,7 @@ looptoken.outermost_jitdriver_sd = FakeJitDriverSD() finish_descr = loop.operations[-1].getdescr() self.cpu.done_with_this_frame_descr_int = BasicFinalDescr() - self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + self.cpu.compile_loop(None, loop.inputargs, loop.operations, looptoken) ARGS = [lltype.Signed] * 10 RES = lltype.Signed FakeJitDriverSD.portal_calldescr = self.cpu.calldescrof( @@ -3111,7 +3111,7 @@ ''' loop = parse(ops, namespace=locals()) othertoken = JitCellToken() - self.cpu.compile_loop(loop.inputargs, loop.operations, othertoken) + self.cpu.compile_loop(None, loop.inputargs, loop.operations, othertoken) args = [i+1 for i in range(10)] deadframe = self.cpu.execute_token(othertoken, *args) assert self.cpu.get_int_value(deadframe, 0) == 13 @@ -3121,7 +3121,7 @@ del called[:] self.cpu.done_with_this_frame_descr_int = finish_descr othertoken = JitCellToken() - self.cpu.compile_loop(loop.inputargs, loop.operations, othertoken) + self.cpu.compile_loop(None, loop.inputargs, loop.operations, othertoken) args = [i+1 for i in range(10)] deadframe = self.cpu.execute_token(othertoken, *args) assert self.cpu.get_int_value(deadframe, 0) == 97 @@ -3159,7 +3159,7 @@ loop = parse(ops) looptoken = JitCellToken() looptoken.outermost_jitdriver_sd = FakeJitDriverSD() - self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + self.cpu.compile_loop(None, loop.inputargs, loop.operations, looptoken) ARGS = [lltype.Signed] * 10 RES = lltype.Signed FakeJitDriverSD.portal_calldescr = self.cpu.calldescrof( @@ -3173,7 +3173,7 @@ ''' loop = parse(ops, namespace=locals()) othertoken = JitCellToken() - self.cpu.compile_loop(loop.inputargs, loop.operations, othertoken) + self.cpu.compile_loop(None, loop.inputargs, loop.operations, othertoken) deadframe = self.cpu.execute_token(othertoken, sys.maxint - 1) assert self.cpu.get_int_value(deadframe, 0) == 3 @@ -3211,7 +3211,7 @@ looptoken = JitCellToken() looptoken.outermost_jitdriver_sd = FakeJitDriverSD() self.cpu.done_with_this_frame_descr_float = BasicFinalDescr() - self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + self.cpu.compile_loop(None, loop.inputargs, loop.operations, looptoken) args = [longlong.getfloatstorage(1.2), longlong.getfloatstorage(2.3)] deadframe = self.cpu.execute_token(looptoken, *args) @@ -3225,7 +3225,7 @@ ''' loop = parse(ops, namespace=locals()) othertoken = JitCellToken() - self.cpu.compile_loop(loop.inputargs, loop.operations, othertoken) + self.cpu.compile_loop(None, loop.inputargs, loop.operations, othertoken) args = [longlong.getfloatstorage(1.2), longlong.getfloatstorage(3.2)] deadframe = self.cpu.execute_token(othertoken, *args) @@ -3237,7 +3237,7 @@ del called[:] self.cpu.done_with_this_frame_descr_float = finish_descr othertoken = JitCellToken() - self.cpu.compile_loop(loop.inputargs, loop.operations, othertoken) + self.cpu.compile_loop(None, loop.inputargs, loop.operations, othertoken) args = [longlong.getfloatstorage(1.2), longlong.getfloatstorage(4.2)] deadframe = self.cpu.execute_token(othertoken, *args) @@ -3300,7 +3300,7 @@ looptoken = JitCellToken() looptoken.outermost_jitdriver_sd = FakeJitDriverSD() self.cpu.done_with_this_frame_descr_float = BasicFinalDescr() - self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + self.cpu.compile_loop(None, loop.inputargs, loop.operations, looptoken) finish_descr = loop.operations[-1].getdescr() args = [longlong.getfloatstorage(1.25), longlong.getfloatstorage(2.35)] @@ -3317,7 +3317,7 @@ ''' loop = parse(ops, namespace=locals()) othertoken = JitCellToken() - self.cpu.compile_loop(loop.inputargs, loop.operations, othertoken) + self.cpu.compile_loop(None, loop.inputargs, loop.operations, othertoken) # normal call_assembler: goes to looptoken args = [longlong.getfloatstorage(1.25), @@ -3336,7 +3336,7 @@ loop2 = parse(ops) looptoken2 = JitCellToken() looptoken2.outermost_jitdriver_sd = FakeJitDriverSD() - self.cpu.compile_loop(loop2.inputargs, loop2.operations, looptoken2) + self.cpu.compile_loop(None, loop2.inputargs, loop2.operations, looptoken2) finish_descr2 = loop2.operations[-1].getdescr() # install it @@ -3696,7 +3696,7 @@ ] inputargs = [i0] looptoken = JitCellToken() - self.cpu.compile_loop(inputargs, operations, looptoken) + self.cpu.compile_loop(None, inputargs, operations, looptoken) # overflowing value: deadframe = self.cpu.execute_token(looptoken, sys.maxint // 4 + 1) fail = self.cpu.get_latest_descr(deadframe) @@ -3749,7 +3749,7 @@ operations[3].setfailargs([i1]) operations[6].setfailargs([i1]) - self.cpu.compile_loop(inputargs, operations, looptoken) + self.cpu.compile_loop(None, inputargs, operations, looptoken) deadframe = self.cpu.execute_token(looptoken, 2) fail = self.cpu.get_latest_descr(deadframe) assert fail.identifier == 2 @@ -3761,7 +3761,7 @@ ResOperation(rop.INT_SUB, [i0, ConstInt(20)], i2), ResOperation(rop.JUMP, [i2], None, descr=targettoken2), ] - self.cpu.compile_bridge(faildescr, inputargs2, operations2, looptoken) + self.cpu.compile_bridge(None, faildescr, inputargs2, operations2, looptoken) deadframe = self.cpu.execute_token(looptoken, 2) fail = self.cpu.get_latest_descr(deadframe) @@ -3778,7 +3778,7 @@ descr = BasicFinalDescr() loop = parse(ops, self.cpu, namespace=locals()) looptoken = JitCellToken() - self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + self.cpu.compile_loop(None, loop.inputargs, loop.operations, looptoken) for inp, outp in [(2,2), (-3, 0)]: deadframe = self.cpu.execute_token(looptoken, inp) assert outp == self.cpu.get_int_value(deadframe, 0) @@ -3807,8 +3807,8 @@ bridge = parse(bridge_ops, self.cpu, namespace=locals()) looptoken = JitCellToken() self.cpu.assembler.set_debug(False) - info = self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) - bridge_info = self.cpu.compile_bridge(faildescr, bridge.inputargs, + info = self.cpu.compile_loop(None, loop.inputargs, loop.operations, looptoken) + bridge_info = self.cpu.compile_bridge(None, faildescr, bridge.inputargs, bridge.operations, looptoken) self.cpu.assembler.set_debug(True) # always on untranslated @@ -3852,7 +3852,7 @@ ResOperation(rop.FINISH, [i0], None, descr=BasicFinalDescr(1234)), ] operations[1].setfailargs([i0]) - self.cpu.compile_loop(inputargs, operations, looptoken1) + self.cpu.compile_loop(None, inputargs, operations, looptoken1) def func(a, b, c, d, e, f, g, h, i): assert a + 2 == b @@ -3906,14 +3906,14 @@ ResOperation(rop.JUMP, [i19], None, descr=targettoken1), ] operations2[-2].setfailargs([]) - self.cpu.compile_bridge(faildescr1, inputargs, operations2, looptoken1) + self.cpu.compile_bridge(None, faildescr1, inputargs, operations2, looptoken1) looptoken2 = JitCellToken() inputargs = [BoxInt()] operations3 = [ ResOperation(rop.JUMP, [ConstInt(0)], None, descr=targettoken1), ] - self.cpu.compile_loop(inputargs, operations3, looptoken2) + self.cpu.compile_loop(None, inputargs, operations3, looptoken2) deadframe = self.cpu.execute_token(looptoken2, -9) fail = self.cpu.get_latest_descr(deadframe) @@ -3930,11 +3930,11 @@ operations[0].setfailargs([]) looptoken = JitCellToken() inputargs = [t_box] - self.cpu.compile_loop(inputargs, operations, looptoken) + self.cpu.compile_loop(None, inputargs, operations, looptoken) operations = [ ResOperation(rop.FINISH, [], None, descr=BasicFinalDescr(99)) ] - self.cpu.compile_bridge(faildescr, [], operations, looptoken) + self.cpu.compile_bridge(None, faildescr, [], operations, looptoken) deadframe = self.cpu.execute_token(looptoken, null_box.getref_base()) fail = self.cpu.get_latest_descr(deadframe) assert fail.identifier == 99 @@ -3962,7 +3962,7 @@ # loop = parse(ops, self.cpu, namespace=locals()) looptoken = JitCellToken() - self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + self.cpu.compile_loop(None, loop.inputargs, loop.operations, looptoken) deadframe = self.cpu.execute_token(looptoken, rffi.cast(lltype.Signed, p), 16) result = self.cpu.get_int_value(deadframe, 0) @@ -3992,7 +3992,7 @@ # loop = parse(ops, self.cpu, namespace=locals()) looptoken = JitCellToken() - self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + self.cpu.compile_loop(None, loop.inputargs, loop.operations, looptoken) deadframe = self.cpu.execute_token(looptoken, rffi.cast(lltype.Signed, p), 16) result = self.cpu.get_float_value(deadframe, 0) @@ -4022,7 +4022,7 @@ # loop = parse(ops, self.cpu, namespace=locals()) looptoken = JitCellToken() - self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + self.cpu.compile_loop(None, loop.inputargs, loop.operations, looptoken) deadframe = self.cpu.execute_token(looptoken, rffi.cast(lltype.Signed, p), 16) result = self.cpu.get_int_value(deadframe, 0) @@ -4054,7 +4054,7 @@ p[i] = '\xDD' loop = parse(ops, self.cpu, namespace=locals()) looptoken = JitCellToken() - self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + self.cpu.compile_loop(None, loop.inputargs, loop.operations, looptoken) self.cpu.execute_token(looptoken, rffi.cast(lltype.Signed, p), 16, value) result = rawstorage.raw_storage_getitem(T, p, 16) @@ -4086,7 +4086,7 @@ p[i] = '\xDD' loop = parse(ops, self.cpu, namespace=locals()) looptoken = JitCellToken() - self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + self.cpu.compile_loop(None, loop.inputargs, loop.operations, looptoken) self.cpu.execute_token(looptoken, rffi.cast(lltype.Signed, p), 16, longlong.getfloatstorage(value)) @@ -4120,7 +4120,7 @@ p[i] = '\xDD' loop = parse(ops, self.cpu, namespace=locals()) looptoken = JitCellToken() - self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + self.cpu.compile_loop(None, loop.inputargs, loop.operations, looptoken) self.cpu.execute_token(looptoken, rffi.cast(lltype.Signed, p), 16, longlong.singlefloat2int(value)) @@ -4155,7 +4155,7 @@ ] ops[2].setfailargs([i2]) looptoken = JitCellToken() - self.cpu.compile_loop([i0, i1], ops, looptoken) + self.cpu.compile_loop(None, [i0, i1], ops, looptoken) deadframe = self.cpu.execute_token(looptoken, 20, 0) fail = self.cpu.get_latest_descr(deadframe) assert fail.identifier == 23 @@ -4189,7 +4189,7 @@ finish(i1, descr=finaldescr) """, namespace={'finaldescr': finaldescr, 'calldescr2': calldescr2, 'guarddescr': guarddescr, 'func2_ptr': func2_ptr}) - self.cpu.compile_bridge(faildescr, bridge.inputargs, + self.cpu.compile_bridge(None, faildescr, bridge.inputargs, bridge.operations, looptoken) cpu = self.cpu @@ -4222,7 +4222,7 @@ guard_true(i0, descr=faildescr) [i1, i2, px] finish(i2, descr=finaldescr2) """, namespace=locals()) - self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + self.cpu.compile_loop(None, loop.inputargs, loop.operations, looptoken) frame = self.cpu.execute_token(looptoken, 0, 0, 3) assert self.cpu.get_latest_descr(frame) is guarddescr from rpython.jit.backend.llsupport.llmodel import AbstractLLCPU @@ -4271,7 +4271,7 @@ 'faildescr2': BasicFailDescr(1), 'xtp': xtp }) - self.cpu.compile_bridge(faildescr, bridge.inputargs, + self.cpu.compile_bridge(None, faildescr, bridge.inputargs, bridge.operations, looptoken) raise LLException(xtp, xptr) @@ -4292,7 +4292,7 @@ 'faildescr': faildescr, 'finaldescr2': BasicFinalDescr(1)}) - self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + self.cpu.compile_loop(None, loop.inputargs, loop.operations, looptoken) frame = self.cpu.execute_token(looptoken, 1, 2, 3) descr = self.cpu.get_latest_descr(frame) assert descr.identifier == 42 diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -434,7 +434,8 @@ else: self.wb_slowpath[withcards + 2 * withfloats] = rawstart - def assemble_loop(self, loopname, inputargs, operations, looptoken, log): + def assemble_loop(self, logger, loopname, inputargs, operations, looptoken, + log): '''adds the following attributes to looptoken: _ll_function_addr (address of the generated func, as an int) _ll_loop_code (debug: addr of the start of the ResOps) @@ -467,8 +468,8 @@ # self._call_header_with_stack_check() self._check_frame_depth_debug(self.mc) - operations = regalloc.prepare_loop(inputargs, operations, looptoken, - clt.allgcrefs) + operations = regalloc.prepare_loop(inputargs, operations, + looptoken, clt.allgcrefs) looppos = self.mc.get_relative_pos() frame_depth_no_fixed_size = self._assemble(regalloc, inputargs, operations) @@ -498,6 +499,9 @@ looptoken._x86_fullsize = full_size looptoken._x86_ops_offset = ops_offset looptoken._ll_function_addr = rawstart + if logger: + logger.log_loop(inputargs, operations, 0, "rewritten", + name=loopname, ops_offset=ops_offset) self.fixup_target_tokens(rawstart) self.teardown() @@ -509,7 +513,7 @@ return AsmInfo(ops_offset, rawstart + looppos, size_excluding_failure_stuff - looppos) - def assemble_bridge(self, faildescr, inputargs, operations, + def assemble_bridge(self, logger, faildescr, inputargs, operations, original_loop_token, log): if not we_are_translated(): # Arguments should be unique @@ -544,6 +548,9 @@ ops_offset = self.mc.ops_offset frame_depth = max(self.current_clt.frame_info.jfi_frame_depth, frame_depth_no_fixed_size + JITFRAME_FIXED_SIZE) + if logger: + logger.log_bridge(inputargs, operations, "rewritten", + ops_offset=ops_offset) self.fixup_target_tokens(rawstart) self.update_frame_depth(frame_depth) self.teardown() diff --git a/rpython/jit/backend/x86/runner.py b/rpython/jit/backend/x86/runner.py --- a/rpython/jit/backend/x86/runner.py +++ b/rpython/jit/backend/x86/runner.py @@ -88,15 +88,17 @@ lines = machine_code_dump(data, addr, self.backend_name, label_list) print ''.join(lines) - def compile_loop(self, inputargs, operations, looptoken, log=True, name=''): - return self.assembler.assemble_loop(name, inputargs, operations, + def compile_loop(self, logger, inputargs, operations, looptoken, log=True, + name=''): + return self.assembler.assemble_loop(logger, name, inputargs, operations, looptoken, log=log) - def compile_bridge(self, faildescr, inputargs, operations, + def compile_bridge(self, logger, faildescr, inputargs, operations, original_loop_token, log=True): clt = original_loop_token.compiled_loop_token clt.compiling_a_bridge() - return self.assembler.assemble_bridge(faildescr, inputargs, operations, + return self.assembler.assemble_bridge(logger, faildescr, inputargs, + operations, original_loop_token, log=log) def clear_latest_values(self, count): diff --git a/rpython/jit/backend/x86/test/test_runner.py b/rpython/jit/backend/x86/test/test_runner.py --- a/rpython/jit/backend/x86/test/test_runner.py +++ b/rpython/jit/backend/x86/test/test_runner.py @@ -287,7 +287,7 @@ ] ops[-2].setfailargs([i1]) looptoken = JitCellToken() - self.cpu.compile_loop([b], ops, looptoken) + self.cpu.compile_loop(None, [b], ops, looptoken) deadframe = self.cpu.execute_token(looptoken, b.value) result = self.cpu.get_int_value(deadframe, 0) if guard == rop.GUARD_FALSE: @@ -333,7 +333,7 @@ ops[-2].setfailargs([i1]) inputargs = [i for i in (a, b) if isinstance(i, Box)] looptoken = JitCellToken() - self.cpu.compile_loop(inputargs, ops, looptoken) + self.cpu.compile_loop(None, inputargs, ops, looptoken) inputvalues = [box.value for box in inputargs] deadframe = self.cpu.execute_token(looptoken, *inputvalues) result = self.cpu.get_int_value(deadframe, 0) @@ -377,7 +377,7 @@ ] inputargs = [i0] operations[-2].setfailargs([i1]) - self.cpu.compile_loop(inputargs, operations, looptoken) + self.cpu.compile_loop(None, inputargs, operations, looptoken) name, loopaddress, loopsize = agent.functions[0] assert name == "Loop # 17: hello (loop counter 0)" assert loopaddress <= looptoken._ll_loop_code @@ -393,7 +393,7 @@ ] bridge[1].setfailargs([i1b]) - self.cpu.compile_bridge(faildescr1, [i1b], bridge, looptoken) + self.cpu.compile_bridge(None, faildescr1, [i1b], bridge, looptoken) name, address, size = agent.functions[1] assert name == "Bridge # 0: bye (loop counter 1)" # Would be exactly ==, but there are some guard failure recovery @@ -422,7 +422,7 @@ ] inputargs = [i0] debug._log = dlog = debug.DebugLog() - info = self.cpu.compile_loop(inputargs, operations, looptoken) + info = self.cpu.compile_loop(None, inputargs, operations, looptoken) ops_offset = info.ops_offset debug._log = None # @@ -508,7 +508,7 @@ ops[5].setfailargs([]) ops[7].setfailargs([]) looptoken = JitCellToken() - self.cpu.compile_loop([i1, i2], ops, looptoken) + self.cpu.compile_loop(None, [i1, i2], ops, looptoken) deadframe = self.cpu.execute_token(looptoken, 123450, 123408) fail = self.cpu.get_latest_descr(deadframe) @@ -549,7 +549,7 @@ try: self.cpu.assembler.set_debug(True) looptoken = JitCellToken() - self.cpu.compile_loop(ops.inputargs, ops.operations, looptoken) + self.cpu.compile_loop(None, ops.inputargs, ops.operations, looptoken) self.cpu.execute_token(looptoken, 0) # check debugging info struct = self.cpu.assembler.loop_run_counters[0] diff --git a/rpython/jit/metainterp/compile.py b/rpython/jit/metainterp/compile.py --- a/rpython/jit/metainterp/compile.py +++ b/rpython/jit/metainterp/compile.py @@ -302,14 +302,16 @@ log=True, name=''): metainterp_sd.logger_ops.log_loop(inputargs, operations, -2, 'compiling', name=name) - return metainterp_sd.cpu.compile_loop(inputargs, operations, looptoken, + return metainterp_sd.cpu.compile_loop(metainterp_sd.logger_ops, + inputargs, operations, looptoken, log=log, name=name) def do_compile_bridge(metainterp_sd, faildescr, inputargs, operations, original_loop_token, log=True): metainterp_sd.logger_ops.log_bridge(inputargs, operations, "compiling") assert isinstance(faildescr, AbstractFailDescr) - return metainterp_sd.cpu.compile_bridge(faildescr, inputargs, operations, + return metainterp_sd.cpu.compile_bridge(metainterp_sd.logger_ops, + faildescr, inputargs, operations, original_loop_token, log=log) def send_loop_to_backend(greenkey, jitdriver_sd, metainterp_sd, loop, type): diff --git a/rpython/jit/metainterp/logger.py b/rpython/jit/metainterp/logger.py --- a/rpython/jit/metainterp/logger.py +++ b/rpython/jit/metainterp/logger.py @@ -17,6 +17,10 @@ debug_start("jit-log-noopt-loop") logops = self._log_operations(inputargs, operations, ops_offset) debug_stop("jit-log-noopt-loop") + elif type == "rewritten": + debug_start("jit-log-rewritten-loop") + logops = self._log_operations(inputargs, operations, ops_offset) + debug_stop("jit-log-rewritten-loop") elif number == -2: debug_start("jit-log-compiling-loop") logops = self._log_operations(inputargs, operations, ops_offset) @@ -35,6 +39,10 @@ debug_start("jit-log-noopt-bridge") logops = self._log_operations(inputargs, operations, ops_offset) debug_stop("jit-log-noopt-bridge") + elif extra == "rewritten": + debug_start("jit-log-rewritten-bridge") + logops = self._log_operations(inputargs, operations, ops_offset) + debug_stop("jit-log-rewritten-bridge") elif extra == "compiling": debug_start("jit-log-compiling-bridge") logops = self._log_operations(inputargs, operations, ops_offset) diff --git a/rpython/jit/metainterp/optimizeopt/pure.py b/rpython/jit/metainterp/optimizeopt/pure.py --- a/rpython/jit/metainterp/optimizeopt/pure.py +++ b/rpython/jit/metainterp/optimizeopt/pure.py @@ -52,7 +52,7 @@ # otherwise, the operation remains self.emit_operation(op) if op.returns_bool_result(): - self.optimizer.bool_boxes[self.getvalue(op.result)] = None + self.optimizer.bool_boxes[self.getvalue(op.result)] = None if nextop: self.emit_operation(nextop) From noreply at buildbot.pypy.org Sat Aug 17 17:02:03 2013 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Sat, 17 Aug 2013 17:02:03 +0200 (CEST) Subject: [pypy-commit] pypy refactor-str-types: Fix strbufobject. Message-ID: <20130817150203.288C41C10DD@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: refactor-str-types Changeset: r66183:ef7c9156078a Date: 2013-08-17 16:59 +0200 http://bitbucket.org/pypy/pypy/changeset/ef7c9156078a/ Log: Fix strbufobject. diff --git a/pypy/objspace/std/bytesobject.py b/pypy/objspace/std/bytesobject.py --- a/pypy/objspace/std/bytesobject.py +++ b/pypy/objspace/std/bytesobject.py @@ -3,7 +3,7 @@ from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.buffer import StringBuffer from pypy.interpreter.error import OperationError, operationerrfmt -from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault +from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault, interpindirect2app from pypy.objspace.std import newformat from pypy.objspace.std.basestringtype import basestring_typedef from pypy.objspace.std.formatting import mod_format @@ -43,8 +43,388 @@ return space.unicode_w(unicode_from_string(space, self)) return space.unicode_w(decode_object(space, self, encoding, errors)) + def descr_add(self, space, w_other): + """x.__add__(y) <==> x+y""" -class W_BytesObject(W_AbstractBytesObject, StringMethods): + def descr_contains(self, space, w_sub): + """x.__contains__(y) <==> y in x""" + + def descr_eq(self, space, w_other): + """x.__eq__(y) <==> x==y""" + + def descr__format__(self, space, w_format_spec): + """S.__format__(format_spec) -> string + + Return a formatted version of S as described by format_spec. + """ + + def descr_ge(self, space, w_other): + """x.__ge__(y) <==> x>=y""" + + def descr_getitem(self, space, w_index): + """x.__getitem__(y) <==> x[y]""" + + def descr_getnewargs(self, space): + """""" + + def descr_getslice(self, space, w_start, w_stop): + """x.__getslice__(i, j) <==> x[i:j] + + Use of negative indices is not supported. + """ + + def descr_gt(self, space, w_other): + """x.__gt__(y) <==> x>y""" + + def descr_hash(self, space): + """x.__hash__() <==> hash(x)""" + + def descr_le(self, space, w_other): + """x.__le__(y) <==> x<=y""" + + def descr_len(self, space): + """x.__len__() <==> len(x)""" + + def descr_lt(self, space, w_other): + """x.__lt__(y) <==> x x%y""" + + def descr_mul(self, space, w_times): + """x.__mul__(n) <==> x*n""" + + def descr_ne(self, space, w_other): + """x.__ne__(y) <==> x!=y""" + + def descr_repr(self, space): + """x.__repr__() <==> repr(x)""" + + def descr_rmod(self, space, w_values): + """x.__rmod__(y) <==> y%x""" + + def descr_rmul(self, space, w_times): + """x.__rmul__(n) <==> n*x""" + + def descr_str(self, space): + """x.__str__() <==> str(x)""" + + def descr_capitalize(self, space): + """S.capitalize() -> string + + Return a capitalized version of S, i.e. make the first character + have upper case and the rest lower case. + """ + + @unwrap_spec(width=int, w_fillchar=WrappedDefault(' ')) + def descr_center(self, space, width, w_fillchar): + """S.center(width[, fillchar]) -> string + + Return S centered in a string of length width. Padding is + done using the specified fill character (default is a space). + """ + + def descr_count(self, space, w_sub, w_start=None, w_end=None): + """S.count(sub[, start[, end]]) -> int + + Return the number of non-overlapping occurrences of substring sub in + string S[start:end]. Optional arguments start and end are interpreted + as in slice notation. + """ + + def descr_decode(self, space, w_encoding=None, w_errors=None): + """S.decode(encoding=None, errors='strict') -> object + + Decode S using the codec registered for encoding. encoding defaults + to the default encoding. errors may be given to set a different error + handling scheme. Default is 'strict' meaning that encoding errors raise + a UnicodeDecodeError. Other possible values are 'ignore' and 'replace' + as well as any other name registered with codecs.register_error that is + able to handle UnicodeDecodeErrors. + """ + + def descr_encode(self, space, w_encoding=None, w_errors=None): + """S.encode(encoding=None, errors='strict') -> object + + Encode S using the codec registered for encoding. encoding defaults + to the default encoding. errors may be given to set a different error + handling scheme. Default is 'strict' meaning that encoding errors raise + a UnicodeEncodeError. Other possible values are 'ignore', 'replace' and + 'xmlcharrefreplace' as well as any other name registered with + codecs.register_error that is able to handle UnicodeEncodeErrors. + """ + + def descr_endswith(self, space, w_suffix, w_start=None, w_end=None): + """S.endswith(suffix[, start[, end]]) -> bool + + Return True if S ends with the specified suffix, False otherwise. + With optional start, test S beginning at that position. + With optional end, stop comparing S at that position. + suffix can also be a tuple of strings to try. + """ + + @unwrap_spec(tabsize=int) + def descr_expandtabs(self, space, tabsize=8): + """S.expandtabs([tabsize]) -> string + + Return a copy of S where all tab characters are expanded using spaces. + If tabsize is not given, a tab size of 8 characters is assumed. + """ + + def descr_find(self, space, w_sub, w_start=None, w_end=None): + """S.find(sub[, start[, end]]) -> int + + Return the lowest index in S where substring sub is found, + such that sub is contained within S[start:end]. Optional + arguments start and end are interpreted as in slice notation. + + Return -1 on failure. + """ + + def descr_format(self, space, __args__): + """S.format(*args, **kwargs) -> string + + Return a formatted version of S, using substitutions from args and kwargs. + The substitutions are identified by braces ('{' and '}'). + """ + + def descr_index(self, space, w_sub, w_start=None, w_end=None): + """S.index(sub[, start[, end]]) -> int + + Like S.find() but raise ValueError when the substring is not found. + """ + + def descr_isalnum(self, space): + """S.isalnum() -> bool + + Return True if all characters in S are alphanumeric + and there is at least one character in S, False otherwise. + """ + + def descr_isalpha(self, space): + """S.isalpha() -> bool + + Return True if all characters in S are alphabetic + and there is at least one character in S, False otherwise. + """ + + def descr_isdigit(self, space): + """S.isdigit() -> bool + + Return True if all characters in S are digits + and there is at least one character in S, False otherwise. + """ + + def descr_islower(self, space): + """S.islower() -> bool + + Return True if all cased characters in S are lowercase and there is + at least one cased character in S, False otherwise. + """ + + def descr_isspace(self, space): + """S.isspace() -> bool + + Return True if all characters in S are whitespace + and there is at least one character in S, False otherwise. + """ + + def descr_istitle(self, space): + """S.istitle() -> bool + + Return True if S is a titlecased string and there is at least one + character in S, i.e. uppercase characters may only follow uncased + characters and lowercase characters only cased ones. Return False + otherwise. + """ + + def descr_isupper(self, space): + """S.isupper() -> bool + + Return True if all cased characters in S are uppercase and there is + at least one cased character in S, False otherwise. + """ + + def descr_join(self, space, w_list): + """S.join(iterable) -> string + + Return a string which is the concatenation of the strings in the + iterable. The separator between elements is S. + """ + + @unwrap_spec(width=int, w_fillchar=WrappedDefault(' ')) + def descr_ljust(self, space, width, w_fillchar): + """S.ljust(width[, fillchar]) -> string + + Return S left-justified in a string of length width. Padding is + done using the specified fill character (default is a space). + """ + + def descr_lower(self, space): + """S.lower() -> string + + Return a copy of the string S converted to lowercase. + """ + + def descr_lstrip(self, space, w_chars=None): + """S.lstrip([chars]) -> string or unicode + + Return a copy of the string S with leading whitespace removed. + If chars is given and not None, remove characters in chars instead. + If chars is unicode, S will be converted to unicode before stripping + """ + + def descr_partition(self, space, w_sub): + """S.partition(sep) -> (head, sep, tail) + + Search for the separator sep in S, and return the part before it, + the separator itself, and the part after it. If the separator is not + found, return S and two empty strings. + """ + + @unwrap_spec(count=int) + def descr_replace(self, space, w_old, w_new, count=-1): + """S.replace(old, new[, count]) -> string + + Return a copy of string S with all occurrences of substring + old replaced by new. If the optional argument count is + given, only the first count occurrences are replaced. + """ + + def descr_rfind(self, space, w_sub, w_start=None, w_end=None): + """S.rfind(sub[, start[, end]]) -> int + + Return the highest index in S where substring sub is found, + such that sub is contained within S[start:end]. Optional + arguments start and end are interpreted as in slice notation. + + Return -1 on failure. + """ + + def descr_rindex(self, space, w_sub, w_start=None, w_end=None): + """S.rindex(sub[, start[, end]]) -> int + + Like S.rfind() but raise ValueError when the substring is not found. + """ + + @unwrap_spec(width=int, w_fillchar=WrappedDefault(' ')) + def descr_rjust(self, space, width, w_fillchar): + """S.rjust(width[, fillchar]) -> string + + Return S right-justified in a string of length width. Padding is + done using the specified fill character (default is a space). + """ + + def descr_rpartition(self, space, w_sub): + """S.rpartition(sep) -> (head, sep, tail) + + Search for the separator sep in S, starting at the end of S, and return + the part before it, the separator itself, and the part after it. If the + separator is not found, return two empty strings and S. + """ + + @unwrap_spec(maxsplit=int) + def descr_rsplit(self, space, w_sep=None, maxsplit=-1): + """S.rsplit(sep=None, maxsplit=-1) -> list of strings + + Return a list of the words in the string S, using sep as the + delimiter string, starting at the end of the string and working + to the front. If maxsplit is given, at most maxsplit splits are + done. If sep is not specified or is None, any whitespace string + is a separator. + """ + + def descr_rstrip(self, space, w_chars=None): + """S.rstrip([chars]) -> string or unicode + + Return a copy of the string S with trailing whitespace removed. + If chars is given and not None, remove characters in chars instead. + If chars is unicode, S will be converted to unicode before stripping + """ + + @unwrap_spec(maxsplit=int) + def descr_split(self, space, w_sep=None, maxsplit=-1): + """S.split(sep=None, maxsplit=-1) -> list of strings + + Return a list of the words in the string S, using sep as the + delimiter string. If maxsplit is given, at most maxsplit + splits are done. If sep is not specified or is None, any + whitespace string is a separator and empty strings are removed + from the result. + """ + + @unwrap_spec(keepends=bool) + def descr_splitlines(self, space, keepends=False): + """S.splitlines(keepends=False) -> list of strings + + Return a list of the lines in S, breaking at line boundaries. + Line breaks are not included in the resulting list unless keepends + is given and true. + """ + + def descr_startswith(self, space, w_prefix, w_start=None, w_end=None): + """S.startswith(prefix[, start[, end]]) -> bool + + Return True if S starts with the specified prefix, False otherwise. + With optional start, test S beginning at that position. + With optional end, stop comparing S at that position. + prefix can also be a tuple of strings to try. + """ + + def descr_strip(self, space, w_chars=None): + """S.strip([chars]) -> string or unicode + + Return a copy of the string S with leading and trailing + whitespace removed. + If chars is given and not None, remove characters in chars instead. + If chars is unicode, S will be converted to unicode before stripping + """ + + def descr_swapcase(self, space): + """S.swapcase() -> string + + Return a copy of the string S with uppercase characters + converted to lowercase and vice versa. + """ + + def descr_title(self, space): + """S.title() -> string + + Return a titlecased version of S, i.e. words start with uppercase + characters, all remaining cased characters have lowercase. + """ + + @unwrap_spec(w_deletechars=WrappedDefault('')) + def descr_translate(self, space, w_table, w_deletechars): + """S.translate(table[, deletechars]) -> string + + Return a copy of the string S, where all characters occurring + in the optional argument deletechars are removed, and the + remaining characters have been mapped through the given + translation table, which must be a string of length 256 or None. + If the table argument is None, no translation is applied and + the operation simply removes the characters in deletechars. + """ + + def descr_upper(self, space): + """S.upper() -> string + + Return a copy of the string S converted to uppercase. + """ + + @unwrap_spec(width=int) + def descr_zfill(self, space, width): + """S.zfill(width) -> string + + Pad a numeric string S with zeros on the left, to fill a field + of the specified width. The string S is never truncated. + """ + + def descr_buffer(self, space): + pass + + +class W_BytesObject(StringMethods, W_AbstractBytesObject): _immutable_fields_ = ['_value'] def __init__(self, str): @@ -195,6 +575,12 @@ from .bytearrayobject import W_BytearrayObject, _make_data self_as_bytearray = W_BytearrayObject(_make_data(self._value)) return space.add(self_as_bytearray, w_other) + if space.config.objspace.std.withstrbuf: + from pypy.objspace.std.strbufobject import W_StringBufferObject + builder = StringBuilder() + builder.append(self._value) + builder.append(w_other._value) + return W_StringBufferObject(builder) return StringMethods.descr_add(self, space, w_other) def _startswith(self, space, value, w_prefix, start, end): @@ -300,514 +686,79 @@ return W_BytesObject(c) -class BytesDocstrings: - """str(object='') -> string +W_BytesObject.typedef = StdTypeDef( + "str", basestring_typedef, + __new__ = interp2app(W_BytesObject.descr_new), + __doc__ = """str(object='') -> string Return a nice string representation of the object. If the argument is a string, the return value is the same object. + """, - """ + __repr__ = interpindirect2app(W_AbstractBytesObject.descr_repr), + __str__ = interpindirect2app(W_AbstractBytesObject.descr_str), + __hash__ = interpindirect2app(W_AbstractBytesObject.descr_hash), - def __add__(): - """x.__add__(y) <==> x+y""" + __eq__ = interpindirect2app(W_AbstractBytesObject.descr_eq), + __ne__ = interpindirect2app(W_AbstractBytesObject.descr_ne), + __lt__ = interpindirect2app(W_AbstractBytesObject.descr_lt), + __le__ = interpindirect2app(W_AbstractBytesObject.descr_le), + __gt__ = interpindirect2app(W_AbstractBytesObject.descr_gt), + __ge__ = interpindirect2app(W_AbstractBytesObject.descr_ge), - def __contains__(): - """x.__contains__(y) <==> y in x""" + __len__ = interpindirect2app(W_AbstractBytesObject.descr_len), + __contains__ = interpindirect2app(W_AbstractBytesObject.descr_contains), - def __eq__(): - """x.__eq__(y) <==> x==y""" + __add__ = interpindirect2app(W_AbstractBytesObject.descr_add), + __mul__ = interpindirect2app(W_AbstractBytesObject.descr_mul), + __rmul__ = interpindirect2app(W_AbstractBytesObject.descr_rmul), - def __format__(): - """S.__format__(format_spec) -> string + __getitem__ = interpindirect2app(W_AbstractBytesObject.descr_getitem), + __getslice__ = interpindirect2app(W_AbstractBytesObject.descr_getslice), - Return a formatted version of S as described by format_spec. - """ + capitalize = interpindirect2app(W_AbstractBytesObject.descr_capitalize), + center = interpindirect2app(W_AbstractBytesObject.descr_center), + count = interpindirect2app(W_AbstractBytesObject.descr_count), + decode = interpindirect2app(W_AbstractBytesObject.descr_decode), + encode = interpindirect2app(W_AbstractBytesObject.descr_encode), + expandtabs = interpindirect2app(W_AbstractBytesObject.descr_expandtabs), + find = interpindirect2app(W_AbstractBytesObject.descr_find), + rfind = interpindirect2app(W_AbstractBytesObject.descr_rfind), + index = interpindirect2app(W_AbstractBytesObject.descr_index), + rindex = interpindirect2app(W_AbstractBytesObject.descr_rindex), + isalnum = interpindirect2app(W_AbstractBytesObject.descr_isalnum), + isalpha = interpindirect2app(W_AbstractBytesObject.descr_isalpha), + isdigit = interpindirect2app(W_AbstractBytesObject.descr_isdigit), + islower = interpindirect2app(W_AbstractBytesObject.descr_islower), + isspace = interpindirect2app(W_AbstractBytesObject.descr_isspace), + istitle = interpindirect2app(W_AbstractBytesObject.descr_istitle), + isupper = interpindirect2app(W_AbstractBytesObject.descr_isupper), + join = interpindirect2app(W_AbstractBytesObject.descr_join), + ljust = interpindirect2app(W_AbstractBytesObject.descr_ljust), + rjust = interpindirect2app(W_AbstractBytesObject.descr_rjust), + lower = interpindirect2app(W_AbstractBytesObject.descr_lower), + partition = interpindirect2app(W_AbstractBytesObject.descr_partition), + rpartition = interpindirect2app(W_AbstractBytesObject.descr_rpartition), + replace = interpindirect2app(W_AbstractBytesObject.descr_replace), + split = interpindirect2app(W_AbstractBytesObject.descr_split), + rsplit = interpindirect2app(W_AbstractBytesObject.descr_rsplit), + splitlines = interpindirect2app(W_AbstractBytesObject.descr_splitlines), + startswith = interpindirect2app(W_AbstractBytesObject.descr_startswith), + endswith = interpindirect2app(W_AbstractBytesObject.descr_endswith), + strip = interpindirect2app(W_AbstractBytesObject.descr_strip), + lstrip = interpindirect2app(W_AbstractBytesObject.descr_lstrip), + rstrip = interpindirect2app(W_AbstractBytesObject.descr_rstrip), + swapcase = interpindirect2app(W_AbstractBytesObject.descr_swapcase), + title = interpindirect2app(W_AbstractBytesObject.descr_title), + translate = interpindirect2app(W_AbstractBytesObject.descr_translate), + upper = interpindirect2app(W_AbstractBytesObject.descr_upper), + zfill = interpindirect2app(W_AbstractBytesObject.descr_zfill), - def __ge__(): - """x.__ge__(y) <==> x>=y""" - - def __getattribute__(): - """x.__getattribute__('name') <==> x.name""" - - def __getitem__(): - """x.__getitem__(y) <==> x[y]""" - - def __getnewargs__(): - """""" - - def __getslice__(): - """x.__getslice__(i, j) <==> x[i:j] - - Use of negative indices is not supported. - """ - - def __gt__(): - """x.__gt__(y) <==> x>y""" - - def __hash__(): - """x.__hash__() <==> hash(x)""" - - def __le__(): - """x.__le__(y) <==> x<=y""" - - def __len__(): - """x.__len__() <==> len(x)""" - - def __lt__(): - """x.__lt__(y) <==> x x%y""" - - def __mul__(): - """x.__mul__(n) <==> x*n""" - - def __ne__(): - """x.__ne__(y) <==> x!=y""" - - def __repr__(): - """x.__repr__() <==> repr(x)""" - - def __rmod__(): - """x.__rmod__(y) <==> y%x""" - - def __rmul__(): - """x.__rmul__(n) <==> n*x""" - - def __sizeof__(): - """S.__sizeof__() -> size of S in memory, in bytes""" - - def __str__(): - """x.__str__() <==> str(x)""" - - def capitalize(): - """S.capitalize() -> string - - Return a capitalized version of S, i.e. make the first character - have upper case and the rest lower case. - """ - - def center(): - """S.center(width[, fillchar]) -> string - - Return S centered in a string of length width. Padding is - done using the specified fill character (default is a space). - """ - - def count(): - """S.count(sub[, start[, end]]) -> int - - Return the number of non-overlapping occurrences of substring sub in - string S[start:end]. Optional arguments start and end are interpreted - as in slice notation. - """ - - def decode(): - """S.decode(encoding=None, errors='strict') -> object - - Decode S using the codec registered for encoding. encoding defaults - to the default encoding. errors may be given to set a different error - handling scheme. Default is 'strict' meaning that encoding errors raise - a UnicodeDecodeError. Other possible values are 'ignore' and 'replace' - as well as any other name registered with codecs.register_error that is - able to handle UnicodeDecodeErrors. - """ - - def encode(): - """S.encode(encoding=None, errors='strict') -> object - - Encode S using the codec registered for encoding. encoding defaults - to the default encoding. errors may be given to set a different error - handling scheme. Default is 'strict' meaning that encoding errors raise - a UnicodeEncodeError. Other possible values are 'ignore', 'replace' and - 'xmlcharrefreplace' as well as any other name registered with - codecs.register_error that is able to handle UnicodeEncodeErrors. - """ - - def endswith(): - """S.endswith(suffix[, start[, end]]) -> bool - - Return True if S ends with the specified suffix, False otherwise. - With optional start, test S beginning at that position. - With optional end, stop comparing S at that position. - suffix can also be a tuple of strings to try. - """ - - def expandtabs(): - """S.expandtabs([tabsize]) -> string - - Return a copy of S where all tab characters are expanded using spaces. - If tabsize is not given, a tab size of 8 characters is assumed. - """ - - def find(): - """S.find(sub[, start[, end]]) -> int - - Return the lowest index in S where substring sub is found, - such that sub is contained within S[start:end]. Optional - arguments start and end are interpreted as in slice notation. - - Return -1 on failure. - """ - - def format(): - """S.format(*args, **kwargs) -> string - - Return a formatted version of S, using substitutions from args and kwargs. - The substitutions are identified by braces ('{' and '}'). - """ - - def index(): - """S.index(sub[, start[, end]]) -> int - - Like S.find() but raise ValueError when the substring is not found. - """ - - def isalnum(): - """S.isalnum() -> bool - - Return True if all characters in S are alphanumeric - and there is at least one character in S, False otherwise. - """ - - def isalpha(): - """S.isalpha() -> bool - - Return True if all characters in S are alphabetic - and there is at least one character in S, False otherwise. - """ - - def isdigit(): - """S.isdigit() -> bool - - Return True if all characters in S are digits - and there is at least one character in S, False otherwise. - """ - - def islower(): - """S.islower() -> bool - - Return True if all cased characters in S are lowercase and there is - at least one cased character in S, False otherwise. - """ - - def isspace(): - """S.isspace() -> bool - - Return True if all characters in S are whitespace - and there is at least one character in S, False otherwise. - """ - - def istitle(): - """S.istitle() -> bool - - Return True if S is a titlecased string and there is at least one - character in S, i.e. uppercase characters may only follow uncased - characters and lowercase characters only cased ones. Return False - otherwise. - """ - - def isupper(): - """S.isupper() -> bool - - Return True if all cased characters in S are uppercase and there is - at least one cased character in S, False otherwise. - """ - - def join(): - """S.join(iterable) -> string - - Return a string which is the concatenation of the strings in the - iterable. The separator between elements is S. - """ - - def ljust(): - """S.ljust(width[, fillchar]) -> string - - Return S left-justified in a string of length width. Padding is - done using the specified fill character (default is a space). - """ - - def lower(): - """S.lower() -> string - - Return a copy of the string S converted to lowercase. - """ - - def lstrip(): - """S.lstrip([chars]) -> string or unicode - - Return a copy of the string S with leading whitespace removed. - If chars is given and not None, remove characters in chars instead. - If chars is unicode, S will be converted to unicode before stripping - """ - - def partition(): - """S.partition(sep) -> (head, sep, tail) - - Search for the separator sep in S, and return the part before it, - the separator itself, and the part after it. If the separator is not - found, return S and two empty strings. - """ - - def replace(): - """S.replace(old, new[, count]) -> string - - Return a copy of string S with all occurrences of substring - old replaced by new. If the optional argument count is - given, only the first count occurrences are replaced. - """ - - def rfind(): - """S.rfind(sub[, start[, end]]) -> int - - Return the highest index in S where substring sub is found, - such that sub is contained within S[start:end]. Optional - arguments start and end are interpreted as in slice notation. - - Return -1 on failure. - """ - - def rindex(): - """S.rindex(sub[, start[, end]]) -> int - - Like S.rfind() but raise ValueError when the substring is not found. - """ - - def rjust(): - """S.rjust(width[, fillchar]) -> string - - Return S right-justified in a string of length width. Padding is - done using the specified fill character (default is a space). - """ - - def rpartition(): - """S.rpartition(sep) -> (head, sep, tail) - - Search for the separator sep in S, starting at the end of S, and return - the part before it, the separator itself, and the part after it. If the - separator is not found, return two empty strings and S. - """ - - def rsplit(): - """S.rsplit(sep=None, maxsplit=-1) -> list of strings - - Return a list of the words in the string S, using sep as the - delimiter string, starting at the end of the string and working - to the front. If maxsplit is given, at most maxsplit splits are - done. If sep is not specified or is None, any whitespace string - is a separator. - """ - - def rstrip(): - """S.rstrip([chars]) -> string or unicode - - Return a copy of the string S with trailing whitespace removed. - If chars is given and not None, remove characters in chars instead. - If chars is unicode, S will be converted to unicode before stripping - """ - - def split(): - """S.split(sep=None, maxsplit=-1) -> list of strings - - Return a list of the words in the string S, using sep as the - delimiter string. If maxsplit is given, at most maxsplit - splits are done. If sep is not specified or is None, any - whitespace string is a separator and empty strings are removed - from the result. - """ - - def splitlines(): - """S.splitlines(keepends=False) -> list of strings - - Return a list of the lines in S, breaking at line boundaries. - Line breaks are not included in the resulting list unless keepends - is given and true. - """ - - def startswith(): - """S.startswith(prefix[, start[, end]]) -> bool - - Return True if S starts with the specified prefix, False otherwise. - With optional start, test S beginning at that position. - With optional end, stop comparing S at that position. - prefix can also be a tuple of strings to try. - """ - - def strip(): - """S.strip([chars]) -> string or unicode - - Return a copy of the string S with leading and trailing - whitespace removed. - If chars is given and not None, remove characters in chars instead. - If chars is unicode, S will be converted to unicode before stripping - """ - - def swapcase(): - """S.swapcase() -> string - - Return a copy of the string S with uppercase characters - converted to lowercase and vice versa. - """ - - def title(): - """S.title() -> string - - Return a titlecased version of S, i.e. words start with uppercase - characters, all remaining cased characters have lowercase. - """ - - def translate(): - """S.translate(table[, deletechars]) -> string - - Return a copy of the string S, where all characters occurring - in the optional argument deletechars are removed, and the - remaining characters have been mapped through the given - translation table, which must be a string of length 256 or None. - If the table argument is None, no translation is applied and - the operation simply removes the characters in deletechars. - """ - - def upper(): - """S.upper() -> string - - Return a copy of the string S converted to uppercase. - """ - - def zfill(): - """S.zfill(width) -> string - - Pad a numeric string S with zeros on the left, to fill a field - of the specified width. The string S is never truncated. - """ - - -W_BytesObject.typedef = StdTypeDef( - "str", basestring_typedef, - __new__ = interp2app(W_BytesObject.descr_new), - __doc__ = BytesDocstrings.__doc__, - - __repr__ = interp2app(W_BytesObject.descr_repr, - doc=BytesDocstrings.__repr__.__doc__), - __str__ = interp2app(W_BytesObject.descr_str, - doc=BytesDocstrings.__str__.__doc__), - __hash__ = interp2app(W_BytesObject.descr_hash, - doc=BytesDocstrings.__hash__.__doc__), - - __eq__ = interp2app(W_BytesObject.descr_eq, - doc=BytesDocstrings.__eq__.__doc__), - __ne__ = interp2app(W_BytesObject.descr_ne, - doc=BytesDocstrings.__ne__.__doc__), - __lt__ = interp2app(W_BytesObject.descr_lt, - doc=BytesDocstrings.__lt__.__doc__), - __le__ = interp2app(W_BytesObject.descr_le, - doc=BytesDocstrings.__le__.__doc__), - __gt__ = interp2app(W_BytesObject.descr_gt, - doc=BytesDocstrings.__gt__.__doc__), - __ge__ = interp2app(W_BytesObject.descr_ge, - doc=BytesDocstrings.__ge__.__doc__), - - __len__ = interp2app(W_BytesObject.descr_len, - doc=BytesDocstrings.__len__.__doc__), - __contains__ = interp2app(W_BytesObject.descr_contains, - doc=BytesDocstrings.__contains__.__doc__), - - __add__ = interp2app(W_BytesObject.descr_add, - doc=BytesDocstrings.__add__.__doc__), - __mul__ = interp2app(W_BytesObject.descr_mul, - doc=BytesDocstrings.__mul__.__doc__), - __rmul__ = interp2app(W_BytesObject.descr_mul, - doc=BytesDocstrings.__rmul__.__doc__), - - __getitem__ = interp2app(W_BytesObject.descr_getitem, - doc=BytesDocstrings.__getitem__.__doc__), - __getslice__ = interp2app(W_BytesObject.descr_getslice, - doc=BytesDocstrings.__getslice__.__doc__), - - capitalize = interp2app(W_BytesObject.descr_capitalize, - doc=BytesDocstrings.capitalize.__doc__), - center = interp2app(W_BytesObject.descr_center, - doc=BytesDocstrings.center.__doc__), - count = interp2app(W_BytesObject.descr_count, - doc=BytesDocstrings.count.__doc__), - decode = interp2app(W_BytesObject.descr_decode, - doc=BytesDocstrings.decode.__doc__), - encode = interp2app(W_BytesObject.descr_encode, - doc=BytesDocstrings.encode.__doc__), - expandtabs = interp2app(W_BytesObject.descr_expandtabs, - doc=BytesDocstrings.expandtabs.__doc__), - find = interp2app(W_BytesObject.descr_find, - doc=BytesDocstrings.find.__doc__), - rfind = interp2app(W_BytesObject.descr_rfind, - doc=BytesDocstrings.rfind.__doc__), - index = interp2app(W_BytesObject.descr_index, - doc=BytesDocstrings.index.__doc__), - rindex = interp2app(W_BytesObject.descr_rindex, - doc=BytesDocstrings.rindex.__doc__), - isalnum = interp2app(W_BytesObject.descr_isalnum, - doc=BytesDocstrings.isalnum.__doc__), - isalpha = interp2app(W_BytesObject.descr_isalpha, - doc=BytesDocstrings.isalpha.__doc__), - isdigit = interp2app(W_BytesObject.descr_isdigit, - doc=BytesDocstrings.isdigit.__doc__), - islower = interp2app(W_BytesObject.descr_islower, - doc=BytesDocstrings.islower.__doc__), - isspace = interp2app(W_BytesObject.descr_isspace, - doc=BytesDocstrings.isspace.__doc__), - istitle = interp2app(W_BytesObject.descr_istitle, - doc=BytesDocstrings.istitle.__doc__), - isupper = interp2app(W_BytesObject.descr_isupper, - doc=BytesDocstrings.isupper.__doc__), - join = interp2app(W_BytesObject.descr_join, - doc=BytesDocstrings.join.__doc__), - ljust = interp2app(W_BytesObject.descr_ljust, - doc=BytesDocstrings.ljust.__doc__), - rjust = interp2app(W_BytesObject.descr_rjust, - doc=BytesDocstrings.rjust.__doc__), - lower = interp2app(W_BytesObject.descr_lower, - doc=BytesDocstrings.lower.__doc__), - partition = interp2app(W_BytesObject.descr_partition, - doc=BytesDocstrings.partition.__doc__), - rpartition = interp2app(W_BytesObject.descr_rpartition, - doc=BytesDocstrings.rpartition.__doc__), - replace = interp2app(W_BytesObject.descr_replace, - doc=BytesDocstrings.replace.__doc__), - split = interp2app(W_BytesObject.descr_split, - doc=BytesDocstrings.split.__doc__), - rsplit = interp2app(W_BytesObject.descr_rsplit, - doc=BytesDocstrings.rsplit.__doc__), - splitlines = interp2app(W_BytesObject.descr_splitlines, - doc=BytesDocstrings.splitlines.__doc__), - startswith = interp2app(W_BytesObject.descr_startswith, - doc=BytesDocstrings.startswith.__doc__), - endswith = interp2app(W_BytesObject.descr_endswith, - doc=BytesDocstrings.endswith.__doc__), - strip = interp2app(W_BytesObject.descr_strip, - doc=BytesDocstrings.strip.__doc__), - lstrip = interp2app(W_BytesObject.descr_lstrip, - doc=BytesDocstrings.lstrip.__doc__), - rstrip = interp2app(W_BytesObject.descr_rstrip, - doc=BytesDocstrings.rstrip.__doc__), - swapcase = interp2app(W_BytesObject.descr_swapcase, - doc=BytesDocstrings.swapcase.__doc__), - title = interp2app(W_BytesObject.descr_title, - doc=BytesDocstrings.title.__doc__), - translate = interp2app(W_BytesObject.descr_translate, - doc=BytesDocstrings.translate.__doc__), - upper = interp2app(W_BytesObject.descr_upper, - doc=BytesDocstrings.upper.__doc__), - zfill = interp2app(W_BytesObject.descr_zfill, - doc=BytesDocstrings.zfill.__doc__), - - format = interp2app(W_BytesObject.descr_format, - doc=BytesDocstrings.format.__doc__), - __format__ = interp2app(W_BytesObject.descr__format__, - doc=BytesDocstrings.__format__.__doc__), - __mod__ = interp2app(W_BytesObject.descr_mod, - doc=BytesDocstrings.__mod__.__doc__), - __buffer__ = interp2app(W_BytesObject.descr_buffer), - __getnewargs__ = interp2app(W_BytesObject.descr_getnewargs, - doc=BytesDocstrings.__getnewargs__.__doc__), + format = interpindirect2app(W_BytesObject.descr_format), + __format__ = interpindirect2app(W_BytesObject.descr__format__), + __mod__ = interpindirect2app(W_BytesObject.descr_mod), + __buffer__ = interpindirect2app(W_AbstractBytesObject.descr_buffer), + __getnewargs__ = interpindirect2app(W_AbstractBytesObject.descr_getnewargs), _formatter_parser = interp2app(W_BytesObject.descr_formatter_parser), _formatter_field_name_split = interp2app(W_BytesObject.descr_formatter_field_name_split), diff --git a/pypy/objspace/std/strbufobject.py b/pypy/objspace/std/strbufobject.py --- a/pypy/objspace/std/strbufobject.py +++ b/pypy/objspace/std/strbufobject.py @@ -1,6 +1,12 @@ +import inspect + +import py + +from pypy.objspace.std.basestringtype import basestring_typedef from pypy.objspace.std.bytesobject import W_AbstractBytesObject, W_BytesObject +from pypy.objspace.std.stdtypedef import StdTypeDef +from pypy.interpreter.gateway import interp2app, unwrap_spec from rpython.rlib.rstring import StringBuilder -from pypy.interpreter.buffer import Buffer class W_StringBufferObject(W_AbstractBytesObject): w_str = None @@ -30,33 +36,54 @@ def str_w(self, space): return self.force() + def descr_len(self, space): + return space.wrap(self.length) + + def descr_add(self, space, w_other): + if self.builder.getlength() != self.length: + builder = StringBuilder() + builder.append(self.force()) + else: + builder = self.builder + if isinstance(w_other, W_StringBufferObject): + builder.append(w_other.force()) + else: + builder.append(w_other._value) + return W_StringBufferObject(builder) + + def descr_str(self, space): + # you cannot get subclasses of W_StringBufferObject here + assert type(self) is W_StringBufferObject + return self + + +delegation_dict = {} +for key, value in W_BytesObject.typedef.rawdict.iteritems(): + if not isinstance(value, interp2app): + continue + if key in ('__len__', '__add__', '__str__'): + continue + + func = value._code._bltin + args = inspect.getargs(func.func_code) + if args.varargs or args.keywords: + raise TypeError("Varargs and keywords not supported in unwrap_spec") + argspec = ', '.join([arg for arg in args.args[1:]]) + func_code = py.code.Source(""" + def f(self, %(args)s): + self.force() + return self.w_str.%(func_name)s(%(args)s) + """ % {'args': argspec, 'func_name': func.func_name}) + d = {} + exec func_code.compile() in d + f = d['f'] + f.func_defaults = func.func_defaults + f.__module__ = func.__module__ + # necessary for unique identifiers for pickling + f.func_name = func.func_name + unwrap_spec_ = getattr(func, 'unwrap_spec', None) + if unwrap_spec_ is not None: + f = unwrap_spec(**unwrap_spec_)(f) + setattr(W_StringBufferObject, func.func_name, f) + W_StringBufferObject.typedef = W_BytesObject.typedef - -# ____________________________________________________________ - -def joined2(str1, str2): - builder = StringBuilder() - builder.append(str1) - builder.append(str2) - return W_StringBufferObject(builder) - -# ____________________________________________________________ - -def len__StringBuffer(space, w_self): - return space.wrap(w_self.length) - -def add__StringBuffer_Bytes(space, w_self, w_other): - if w_self.builder.getlength() != w_self.length: - builder = StringBuilder() - builder.append(w_self.force()) - else: - builder = w_self.builder - builder.append(w_other._value) - return W_StringBufferObject(builder) - -def str__StringBuffer(space, w_self): - # you cannot get subclasses of W_StringBufferObject here - assert type(w_self) is W_StringBufferObject - return w_self - -from pypy.objspace.std import bytesobject From noreply at buildbot.pypy.org Sat Aug 17 17:08:32 2013 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Sat, 17 Aug 2013 17:08:32 +0200 (CEST) Subject: [pypy-commit] pypy refactor-str-types: hg merge default Message-ID: <20130817150832.CCAE61C3629@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: refactor-str-types Changeset: r66184:ee8c9667c732 Date: 2013-08-17 17:05 +0200 http://bitbucket.org/pypy/pypy/changeset/ee8c9667c732/ Log: hg merge default diff --git a/pypy/doc/cppyy.rst b/pypy/doc/cppyy.rst --- a/pypy/doc/cppyy.rst +++ b/pypy/doc/cppyy.rst @@ -83,7 +83,7 @@ the selection of scientific software) will also work for a build with the builtin backend. -.. _`download`: http://cern.ch/wlav/reflex-2013-04-23.tar.bz2 +.. _`download`: http://cern.ch/wlav/reflex-2013-08-14.tar.bz2 .. _`ROOT`: http://root.cern.ch/ Besides Reflex, you probably need a version of `gccxml`_ installed, which is @@ -98,8 +98,8 @@ To install the standalone version of Reflex, after download:: - $ tar jxf reflex-2013-04-23.tar.bz2 - $ cd reflex-2013-04-23 + $ tar jxf reflex-2013-08-14.tar.bz2 + $ cd reflex-2013-08-14 $ ./build/autogen $ ./configure $ make && make install diff --git a/pypy/interpreter/app_main.py b/pypy/interpreter/app_main.py --- a/pypy/interpreter/app_main.py +++ b/pypy/interpreter/app_main.py @@ -119,13 +119,12 @@ except: try: stderr = sys.stderr - except AttributeError: - pass # too bad - else: print >> stderr, 'Error calling sys.excepthook:' originalexcepthook(*sys.exc_info()) print >> stderr print >> stderr, 'Original exception was:' + except: + pass # too bad # we only get here if sys.excepthook didn't do its job originalexcepthook(etype, evalue, etraceback) diff --git a/pypy/module/__pypy__/test/test_signal.py b/pypy/module/__pypy__/test/test_signal.py --- a/pypy/module/__pypy__/test/test_signal.py +++ b/pypy/module/__pypy__/test/test_signal.py @@ -28,16 +28,17 @@ import __pypy__, thread, signal, time, sys def subthread(): + print('subthread started') try: with __pypy__.thread.signals_enabled: thread.interrupt_main() for i in range(10): - print 'x' + print('x') time.sleep(0.1) except BaseException, e: interrupted.append(e) finally: - print 'subthread stops, interrupted=%r' % (interrupted,) + print('subthread stops, interrupted=%r' % (interrupted,)) done.append(None) # This is normally called by app_main.py @@ -53,13 +54,13 @@ try: done = [] interrupted = [] - print '--- start ---' + print('--- start ---') thread.start_new_thread(subthread, ()) for j in range(10): if len(done): break - print '.' + print('.') time.sleep(0.1) - print 'main thread loop done' + print('main thread loop done') assert len(done) == 1 assert len(interrupted) == 1 assert 'KeyboardInterrupt' in interrupted[0].__class__.__name__ @@ -80,7 +81,7 @@ def threadfunction(): pid = fork() if pid == 0: - print 'in child' + print('in child') # signal() only works from the 'main' thread signal.signal(signal.SIGUSR1, signal.SIG_IGN) os._exit(42) diff --git a/pypy/module/_file/interp_file.py b/pypy/module/_file/interp_file.py --- a/pypy/module/_file/interp_file.py +++ b/pypy/module/_file/interp_file.py @@ -437,14 +437,14 @@ return self.getrepr(self.space, info) def getdisplayname(self): + space = self.space w_name = self.w_name if w_name is None: return '?' - elif self.space.is_true(self.space.isinstance(w_name, - self.space.w_str)): - return "'%s'" % self.space.str_w(w_name) + elif space.isinstance_w(w_name, space.w_str): + return "'%s'" % space.str_w(w_name) else: - return self.space.str_w(self.space.repr(w_name)) + return space.str_w(space.repr(w_name)) def file_writelines(self, w_lines): """writelines(sequence_of_strings) -> None. Write the strings to the file. diff --git a/pypy/module/array/interp_array.py b/pypy/module/array/interp_array.py --- a/pypy/module/array/interp_array.py +++ b/pypy/module/array/interp_array.py @@ -11,7 +11,7 @@ from rpython.rlib.unroll import unrolling_iterable from rpython.rlib.objectmodel import keepalive_until_here from rpython.rtyper.lltypesystem import lltype, rffi - +from pypy.objspace.std.floatobject import W_FloatObject @unwrap_spec(typecode=str) def w_array(space, w_cls, typecode, __args__): @@ -532,7 +532,7 @@ class TypeCode(object): - def __init__(self, itemtype, unwrap, canoverflow=False, signed=False): + def __init__(self, itemtype, unwrap, canoverflow=False, signed=False, method='__int__'): self.itemtype = itemtype self.bytes = rffi.sizeof(itemtype) self.arraytype = lltype.Array(itemtype, hints={'nolength': True}) @@ -540,6 +540,7 @@ self.signed = signed self.canoverflow = canoverflow self.w_class = None + self.method = method if self.canoverflow: assert self.bytes <= rffi.sizeof(rffi.ULONG) @@ -554,8 +555,8 @@ return True types = { - 'c': TypeCode(lltype.Char, 'str_w'), - 'u': TypeCode(lltype.UniChar, 'unicode_w'), + 'c': TypeCode(lltype.Char, 'str_w', method=''), + 'u': TypeCode(lltype.UniChar, 'unicode_w', method=''), 'b': TypeCode(rffi.SIGNEDCHAR, 'int_w', True, True), 'B': TypeCode(rffi.UCHAR, 'int_w', True), 'h': TypeCode(rffi.SHORT, 'int_w', True, True), @@ -567,8 +568,8 @@ # rbigint.touint() which # corresponds to the # C-type unsigned long - 'f': TypeCode(lltype.SingleFloat, 'float_w'), - 'd': TypeCode(lltype.Float, 'float_w'), + 'f': TypeCode(lltype.SingleFloat, 'float_w', method='__float__'), + 'd': TypeCode(lltype.Float, 'float_w', method='__float__'), } for k, v in types.items(): v.typecode = k @@ -613,7 +614,19 @@ def item_w(self, w_item): space = self.space unwrap = getattr(space, mytype.unwrap) - item = unwrap(w_item) + try: + item = unwrap(w_item) + except OperationError, e: + if isinstance(w_item, W_FloatObject): # Odd special case from cpython + raise + if mytype.method != '' and e.match(space, space.w_TypeError): + try: + item = unwrap(space.call_method(w_item, mytype.method)) + except OperationError: + msg = 'array item must be ' + mytype.unwrap[:-2] + raise OperationError(space.w_TypeError, space.wrap(msg)) + else: + raise if mytype.unwrap == 'bigint_w': try: item = item.touint() diff --git a/pypy/module/array/test/test_array.py b/pypy/module/array/test/test_array.py --- a/pypy/module/array/test/test_array.py +++ b/pypy/module/array/test/test_array.py @@ -874,6 +874,77 @@ assert l assert l[0] is None or len(l[0]) == 0 + def test_assign_object_with_special_methods(self): + from array import array + + class Num(object): + def __float__(self): + return 5.25 + + def __int__(self): + return 7 + + class NotNum(object): + pass + + class Silly(object): + def __float__(self): + return None + + def __int__(self): + return None + + class OldNum: + def __float__(self): + return 6.25 + + def __int__(self): + return 8 + + class OldNotNum: + pass + + class OldSilly: + def __float__(self): + return None + + def __int__(self): + return None + + for tc in 'bBhHiIlL': + a = array(tc, [0]) + raises(TypeError, a.__setitem__, 0, 1.0) + a[0] = 1 + a[0] = Num() + assert a[0] == 7 + raises(TypeError, a.__setitem__, NotNum()) + a[0] = OldNum() + assert a[0] == 8 + raises(TypeError, a.__setitem__, OldNotNum()) + raises(TypeError, a.__setitem__, Silly()) + raises(TypeError, a.__setitem__, OldSilly()) + + for tc in 'fd': + a = array(tc, [0]) + a[0] = 1.0 + a[0] = 1 + a[0] = Num() + assert a[0] == 5.25 + raises(TypeError, a.__setitem__, NotNum()) + a[0] = OldNum() + assert a[0] == 6.25 + raises(TypeError, a.__setitem__, OldNotNum()) + raises(TypeError, a.__setitem__, Silly()) + raises(TypeError, a.__setitem__, OldSilly()) + + a = array('c', 'hi') + a[0] = 'b' + assert a[0] == 'b' + + a = array('u', u'hi') + a[0] = u'b' + assert a[0] == u'b' + class TestCPythonsOwnArray(BaseArrayTests): diff --git a/pypy/module/cppyy/genreflex-methptrgetter.patch b/pypy/module/cppyy/genreflex-methptrgetter.patch --- a/pypy/module/cppyy/genreflex-methptrgetter.patch +++ b/pypy/module/cppyy/genreflex-methptrgetter.patch @@ -10,7 +10,7 @@ # The next is to avoid a known problem with gccxml that it generates a # references to id equal '_0' which is not defined anywhere self.xref['_0'] = {'elem':'Unknown', 'attrs':{'id':'_0','name':''}, 'subelems':[]} -@@ -1306,6 +1307,8 @@ +@@ -1328,6 +1329,8 @@ bases = self.getBases( attrs['id'] ) if inner and attrs.has_key('demangled') and self.isUnnamedType(attrs['demangled']) : cls = attrs['demangled'] @@ -19,7 +19,7 @@ clt = '' else: cls = self.genTypeName(attrs['id'],const=True,colon=True) -@@ -1343,7 +1346,7 @@ +@@ -1365,7 +1368,7 @@ # Inner class/struct/union/enum. for m in memList : member = self.xref[m] @@ -28,7 +28,7 @@ and member['attrs'].get('access') in ('private','protected') \ and not self.isUnnamedType(member['attrs'].get('demangled')): cmem = self.genTypeName(member['attrs']['id'],const=True,colon=True) -@@ -1981,8 +1984,15 @@ +@@ -2003,8 +2006,15 @@ else : params = '0' s = ' .AddFunctionMember(%s, Reflex::Literal("%s"), %s%s, 0, %s, %s)' % (self.genTypeID(id), name, type, id, params, mod) s += self.genCommentProperty(attrs) @@ -44,7 +44,7 @@ def genMCODef(self, type, name, attrs, args): id = attrs['id'] cl = self.genTypeName(attrs['context'],colon=True) -@@ -2049,8 +2059,44 @@ +@@ -2071,8 +2081,44 @@ if returns == 'void' : body += ' }\n' else : body += ' }\n' body += '}\n' @@ -105,17 +105,16 @@ -h, --help Print this help\n """ -@@ -127,7 +131,8 @@ - opts, args = getopt.getopt(options, 'ho:s:c:I:U:D:PC', \ +@@ -128,7 +132,7 @@ ['help','debug=', 'output=','selection_file=','pool','dataonly','interpreteronly','deep','gccxmlpath=', 'capabilities=','rootmap=','rootmap-lib=','comments','iocomments','no_membertypedefs', -- 'fail_on_warnings', 'quiet', 'gccxmlopt=', 'reflex', 'split=','no_templatetypedefs','gccxmlpost=']) -+ 'fail_on_warnings', 'quiet', 'gccxmlopt=', 'reflex', 'split=','no_templatetypedefs','gccxmlpost=', -+ 'with-methptrgetter']) + 'fail_on_warnings', 'quiet', 'gccxmlopt=', 'reflex', 'split=','no_templatetypedefs','gccxmlpost=', +- 'library=']) ++ 'library=', 'with-methptrgetter']) except getopt.GetoptError, e: print "--->> genreflex: ERROR:",e self.usage(2) -@@ -186,6 +191,8 @@ +@@ -187,6 +191,8 @@ self.rootmap = a if o in ('--rootmap-lib',): self.rootmaplib = a diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py --- a/pypy/module/micronumpy/__init__.py +++ b/pypy/module/micronumpy/__init__.py @@ -140,6 +140,7 @@ ("deg2rad", "radians"), ("rad2deg", "degrees"), ("reciprocal", "reciprocal"), + ("rint", "rint"), ("sign", "sign"), ("signbit", "signbit"), ("sin", "sin"), @@ -175,6 +176,8 @@ ('logaddexp2', 'logaddexp2'), ('real', 'real'), ('imag', 'imag'), + ('ones_like', 'ones_like'), + ('zeros_like', 'zeros_like'), ]: interpleveldefs[exposed] = "interp_ufuncs.get(space).%s" % impl diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -620,6 +620,7 @@ ("positive", "pos", 1), ("negative", "neg", 1), ("absolute", "abs", 1, {"complex_to_float": True}), + ("rint", "rint", 1), ("sign", "sign", 1, {"promote_bools": True}), ("signbit", "signbit", 1, {"bool_result": True, "allow_complex": False}), @@ -675,6 +676,8 @@ "allow_complex": False}), ("logaddexp2", "logaddexp2", 2, {"promote_to_float": True, "allow_complex": False}), + ("ones_like", "ones_like", 1), + ("zeros_like", "zeros_like", 1), ]: self.add_ufunc(space, *ufunc_def) diff --git a/pypy/module/micronumpy/test/test_complex.py b/pypy/module/micronumpy/test/test_complex.py --- a/pypy/module/micronumpy/test/test_complex.py +++ b/pypy/module/micronumpy/test/test_complex.py @@ -685,3 +685,8 @@ msg=error_message) sys.stderr.write('.') sys.stderr.write('\n') + + def test_complexbox_to_pycomplex(self): + from numpypy import complex128 + x = complex128(3.4j) + assert complex(x) == 3.4j diff --git a/pypy/module/micronumpy/test/test_ufuncs.py b/pypy/module/micronumpy/test/test_ufuncs.py --- a/pypy/module/micronumpy/test/test_ufuncs.py +++ b/pypy/module/micronumpy/test/test_ufuncs.py @@ -255,6 +255,22 @@ for i in range(3): assert c[i] == a[i] * b[i] + def test_rint(self): + from numpypy import array, complex, rint, isnan + + nnan, nan, inf, ninf = float('-nan'), float('nan'), float('inf'), float('-inf') + + reference = array([ninf, -2., -1., -0., 0., 0., 0., 1., 2., inf]) + a = array([ninf, -1.5, -1., -0.5, -0., 0., 0.5, 1., 1.5, inf]) + b = rint(a) + for i in range(len(a)): + assert b[i] == reference[i] + assert isnan(rint(nan)) + assert isnan(rint(nnan)) + + assert rint(complex(inf, 1.5)) == complex(inf, 2.) + assert rint(complex(0.5, inf)) == complex(0., inf) + def test_sign(self): from numpypy import array, sign, dtype @@ -939,4 +955,18 @@ assert logaddexp2(float('inf'), float('-inf')) == float('inf') assert logaddexp2(float('inf'), float('inf')) == float('inf') + def test_ones_like(self): + from numpypy import array, ones_like + assert ones_like(False) == array(True) + assert ones_like(2) == array(1) + assert ones_like(2.) == array(1.) + assert ones_like(complex(2)) == array(complex(1)) + + def test_zeros_like(self): + from numpypy import array, zeros_like + + assert zeros_like(True) == array(False) + assert zeros_like(2) == array(0) + assert zeros_like(2.) == array(0.) + assert zeros_like(complex(2)) == array(complex(0)) diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -307,6 +307,22 @@ def min(self, v1, v2): return min(v1, v2) + @simple_unary_op + def rint(self, v): + if isfinite(v): + return rfloat.round_double(v, 0, half_even=True) + else: + return v + + @simple_unary_op + def ones_like(self, v): + return 1 + + @simple_unary_op + def zeros_like(self, v): + return 0 + + class NonNativePrimitive(Primitive): _mixin_ = True @@ -1392,11 +1408,14 @@ def round(self, v, decimals=0): ans = list(self.for_computation(self.unbox(v))) if isfinite(ans[0]): - ans[0] = rfloat.round_double(ans[0], decimals, half_even=True) + ans[0] = rfloat.round_double(ans[0], decimals, half_even=True) if isfinite(ans[1]): - ans[1] = rfloat.round_double(ans[1], decimals, half_even=True) + ans[1] = rfloat.round_double(ans[1], decimals, half_even=True) return self.box_complex(ans[0], ans[1]) + def rint(self, v): + return self.round(v) + # No floor, ceil, trunc in numpy for complex #@simple_unary_op #def floor(self, v): @@ -1599,6 +1618,15 @@ except ValueError: return rfloat.NAN, rfloat.NAN + @complex_unary_op + def ones_like(self, v): + return 1, 0 + + @complex_unary_op + def zeros_like(self, v): + return 0, 0 + + class Complex64(ComplexFloating, BaseType): _attrs_ = () diff --git a/pypy/module/posix/interp_posix.py b/pypy/module/posix/interp_posix.py --- a/pypy/module/posix/interp_posix.py +++ b/pypy/module/posix/interp_posix.py @@ -723,11 +723,16 @@ for hook in get_fork_hooks(where): hook(space) -def fork(space): +def _run_forking_function(space, kind): run_fork_hooks('before', space) - try: - pid = os.fork() + if kind == "F": + pid = os.fork() + master_fd = -1 + elif kind == "P": + pid, master_fd = os.forkpty() + else: + raise AssertionError except OSError, e: try: run_fork_hooks('parent', space) @@ -735,12 +740,14 @@ # Don't clobber the OSError if the fork failed pass raise wrap_oserror(space, e) - if pid == 0: run_fork_hooks('child', space) else: run_fork_hooks('parent', space) + return pid, master_fd +def fork(space): + pid, irrelevant = _run_forking_function(space, "F") return space.wrap(pid) def openpty(space): @@ -752,10 +759,7 @@ return space.newtuple([space.wrap(master_fd), space.wrap(slave_fd)]) def forkpty(space): - try: - pid, master_fd = os.forkpty() - except OSError, e: - raise wrap_oserror(space, e) + pid, master_fd = _run_forking_function(space, "P") return space.newtuple([space.wrap(pid), space.wrap(master_fd)]) diff --git a/pypy/objspace/std/complextype.py b/pypy/objspace/std/complextype.py --- a/pypy/objspace/std/complextype.py +++ b/pypy/objspace/std/complextype.py @@ -201,7 +201,7 @@ if w_z is not None: # __complex__() must return a complex or (float,int,long) object # (XXX should not use isinstance here) - if not strict_typing and (space.isinstance_w(w_z, space.w_int) or + if not strict_typing and (space.isinstance_w(w_z, space.w_int) or space.isinstance_w(w_z, space.w_long) or space.isinstance_w(w_z, space.w_float)): return (space.float_w(w_z), 0.0) @@ -214,8 +214,10 @@ # # no '__complex__' method, so we assume it is a float, # unless it is an instance of some subclass of complex. - if isinstance(w_complex, W_ComplexObject): - return (w_complex.realval, w_complex.imagval) + if space.isinstance_w(w_complex, space.gettypefor(W_ComplexObject)): + real = space.float(space.getattr(w_complex, space.wrap("real"))) + imag = space.float(space.getattr(w_complex, space.wrap("imag"))) + return (space.float_w(real), space.float_w(imag)) # # Check that it is not a string (on which space.float() would succeed). if (space.isinstance_w(w_complex, space.w_str) or diff --git a/pypy/objspace/std/intobject.py b/pypy/objspace/std/intobject.py --- a/pypy/objspace/std/intobject.py +++ b/pypy/objspace/std/intobject.py @@ -1,6 +1,6 @@ from pypy.interpreter.error import OperationError from pypy.objspace.std import newformat -from pypy.objspace.std.inttype import wrapint, W_AbstractIntObject +from pypy.objspace.std.inttype import W_AbstractIntObject from pypy.objspace.std.model import registerimplementation, W_Object from pypy.objspace.std.multimethod import FailedToImplementArgs from pypy.objspace.std.noneobject import W_NoneObject @@ -55,7 +55,7 @@ if space.is_w(space.type(self), space.w_int): return self a = self.intval - return wrapint(space, a) + return space.newint(a) registerimplementation(W_IntObject) @@ -104,7 +104,7 @@ except OverflowError: raise FailedToImplementArgs(space.w_OverflowError, space.wrap("integer addition")) - return wrapint(space, z) + return space.newint(z) def sub__Int_Int(space, w_int1, w_int2): x = w_int1.intval @@ -114,7 +114,7 @@ except OverflowError: raise FailedToImplementArgs(space.w_OverflowError, space.wrap("integer substraction")) - return wrapint(space, z) + return space.newint(z) def mul__Int_Int(space, w_int1, w_int2): x = w_int1.intval @@ -124,7 +124,7 @@ except OverflowError: raise FailedToImplementArgs(space.w_OverflowError, space.wrap("integer multiplication")) - return wrapint(space, z) + return space.newint(z) def floordiv__Int_Int(space, w_int1, w_int2): x = w_int1.intval @@ -137,7 +137,7 @@ except OverflowError: raise FailedToImplementArgs(space.w_OverflowError, space.wrap("integer division")) - return wrapint(space, z) + return space.newint(z) div__Int_Int = floordiv__Int_Int def truediv__Int_Int(space, w_int1, w_int2): @@ -158,7 +158,7 @@ except OverflowError: raise FailedToImplementArgs(space.w_OverflowError, space.wrap("integer modulo")) - return wrapint(space, z) + return space.newint(z) def divmod__Int_Int(space, w_int1, w_int2): x = w_int1.intval @@ -231,7 +231,7 @@ except OverflowError: raise FailedToImplementArgs(space.w_OverflowError, space.wrap("integer negation")) - return wrapint(space, x) + return space.newint(x) get_negint = neg__Int @@ -247,7 +247,7 @@ def invert__Int(space, w_int1): x = w_int1.intval a = ~x - return wrapint(space, a) + return space.newint(a) def lshift__Int_Int(space, w_int1, w_int2): a = w_int1.intval @@ -258,7 +258,7 @@ except OverflowError: raise FailedToImplementArgs(space.w_OverflowError, space.wrap("integer left shift")) - return wrapint(space, c) + return space.newint(c) if b < 0: raise OperationError(space.w_ValueError, space.wrap("negative shift count")) @@ -284,25 +284,25 @@ a = 0 else: a = a >> b - return wrapint(space, a) + return space.newint(a) def and__Int_Int(space, w_int1, w_int2): a = w_int1.intval b = w_int2.intval res = a & b - return wrapint(space, res) + return space.newint(res) def xor__Int_Int(space, w_int1, w_int2): a = w_int1.intval b = w_int2.intval res = a ^ b - return wrapint(space, res) + return space.newint(res) def or__Int_Int(space, w_int1, w_int2): a = w_int1.intval b = w_int2.intval res = a | b - return wrapint(space, res) + return space.newint(res) def pos__Int(self, space): return self.int(space) @@ -323,7 +323,7 @@ return space.wrap(hex(w_int1.intval)) def getnewargs__Int(space, w_int1): - return space.newtuple([wrapint(space, w_int1.intval)]) + return space.newtuple([space.newint(w_int1.intval)]) register_all(vars()) diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -20,7 +20,6 @@ from pypy.objspace.std.bytesobject import W_BytesObject from pypy.objspace.std.floatobject import W_FloatObject from pypy.objspace.std.intobject import W_IntObject -from pypy.objspace.std.inttype import wrapint from pypy.objspace.std.iterobject import (W_FastListIterObject, W_ReverseSeqIterObject) from pypy.objspace.std.sliceobject import W_SliceObject, normalize_simple_slice @@ -427,7 +426,7 @@ def descr_len(self, space): result = self.length() - return wrapint(space, result) + return space.newint(result) def descr_iter(self, space): return W_FastListIterObject(self) diff --git a/pypy/objspace/std/stringmethods.py b/pypy/objspace/std/stringmethods.py --- a/pypy/objspace/std/stringmethods.py +++ b/pypy/objspace/std/stringmethods.py @@ -1,7 +1,6 @@ from pypy.interpreter.error import OperationError, operationerrfmt from pypy.interpreter.gateway import unwrap_spec, WrappedDefault from pypy.objspace.std import slicetype -from pypy.objspace.std.inttype import wrapint from pypy.objspace.std.sliceobject import W_SliceObject, normalize_simple_slice from rpython.rlib import jit from rpython.rlib.objectmodel import specialize @@ -205,7 +204,7 @@ @specialize.argtype(0) def descr_count(self, space, w_sub, w_start=None, w_end=None): value, start, end = self._convert_idx_params(space, w_start, w_end) - return wrapint(space, value.count(self._op_val(space, w_sub), start, end)) + return space.newint(value.count(self._op_val(space, w_sub), start, end)) @specialize.argtype(0) def descr_decode(self, space, w_encoding=None, w_errors=None): diff --git a/pypy/objspace/std/tupleobject.py b/pypy/objspace/std/tupleobject.py --- a/pypy/objspace/std/tupleobject.py +++ b/pypy/objspace/std/tupleobject.py @@ -7,7 +7,6 @@ from pypy.interpreter.gateway import ( WrappedDefault, interp2app, interpindirect2app, unwrap_spec) from pypy.objspace.std import slicetype -from pypy.objspace.std.inttype import wrapint from pypy.objspace.std.sliceobject import W_SliceObject, normalize_simple_slice from pypy.objspace.std.stdtypedef import StdTypeDef from pypy.objspace.std.util import negate @@ -56,7 +55,7 @@ def descr_len(self, space): result = self.length() - return wrapint(space, result) + return space.newint(result) def descr_iter(self, space): from pypy.objspace.std import iterobject diff --git a/pypy/tool/jitlogparser/parser.py b/pypy/tool/jitlogparser/parser.py --- a/pypy/tool/jitlogparser/parser.py +++ b/pypy/tool/jitlogparser/parser.py @@ -382,7 +382,7 @@ from rpython.jit.backend.tool.viewcode import World world = World() for entry in extract_category(log, 'jit-backend-dump'): - world.parse(entry.splitlines(True), truncate_addr=False) + world.parse(entry.splitlines(True)) dumps = {} for r in world.ranges: if r.addr in addrs and addrs[r.addr]: diff --git a/rpython/jit/backend/llsupport/rewrite.py b/rpython/jit/backend/llsupport/rewrite.py --- a/rpython/jit/backend/llsupport/rewrite.py +++ b/rpython/jit/backend/llsupport/rewrite.py @@ -26,10 +26,11 @@ - Add COND_CALLs to the write barrier before SETFIELD_GC and SETARRAYITEM_GC operations. - recent_mallocs contains a dictionary of variable -> None. If a variable - is in the dictionary, next setfields can be called without a write barrier, - because the variable got allocated after the last potentially collecting - resop + 'write_barrier_applied' contains a dictionary of variable -> None. + If a variable is in the dictionary, next setfields can be called without + a write barrier. The idea is that an object that was freshly allocated + or already write_barrier'd don't need another write_barrier if there + was no potentially collecting resop inbetween. """ _previous_size = -1 @@ -42,7 +43,7 @@ self.cpu = cpu self.newops = [] self.known_lengths = {} - self.recent_mallocs = {} + self.write_barrier_applied = {} def rewrite(self, operations): # we can only remember one malloc since the next malloc can possibly @@ -221,18 +222,18 @@ def emitting_an_operation_that_can_collect(self): # must be called whenever we emit an operation that can collect: # forgets the previous MALLOC_NURSERY, if any; and empty the - # set 'recent_mallocs', so that future SETFIELDs will generate + # set 'write_barrier_applied', so that future SETFIELDs will generate # a write barrier as usual. self._op_malloc_nursery = None - self.recent_mallocs.clear() + self.write_barrier_applied.clear() def _gen_call_malloc_gc(self, args, v_result, descr): """Generate a CALL_MALLOC_GC with the given args.""" self.emitting_an_operation_that_can_collect() op = ResOperation(rop.CALL_MALLOC_GC, args, v_result, descr) self.newops.append(op) - # mark 'v_result' as freshly malloced - self.recent_mallocs[v_result] = None + # mark 'v_result' as freshly malloced, so not needing a write barrier + self.write_barrier_applied[v_result] = None def gen_malloc_fixedsize(self, size, typeid, v_result): """Generate a CALL_MALLOC_GC(malloc_fixedsize_fn, ...). @@ -315,7 +316,7 @@ [ConstInt(kind), ConstInt(itemsize), v_length], v_result, descr=arraydescr) self.newops.append(op) - self.recent_mallocs[v_result] = None + self.write_barrier_applied[v_result] = None return True def gen_malloc_nursery_varsize_frame(self, sizebox, v_result): @@ -327,7 +328,7 @@ v_result) self.newops.append(op) - self.recent_mallocs[v_result] = None + self.write_barrier_applied[v_result] = None def gen_malloc_nursery(self, size, v_result): """Try to generate or update a CALL_MALLOC_NURSERY. @@ -360,7 +361,7 @@ self.newops.append(op) self._previous_size = size self._v_last_malloced_nursery = v_result - self.recent_mallocs[v_result] = None + self.write_barrier_applied[v_result] = None return True def gen_initialize_tid(self, v_newgcobj, tid): @@ -382,45 +383,42 @@ def handle_write_barrier_setfield(self, op): val = op.getarg(0) - # no need for a write barrier in the case of previous malloc - if val not in self.recent_mallocs: + if val not in self.write_barrier_applied: v = op.getarg(1) if isinstance(v, BoxPtr) or (isinstance(v, ConstPtr) and bool(v.value)): # store a non-NULL - self.gen_write_barrier(op.getarg(0), v) - op = op.copy_and_change(rop.SETFIELD_RAW) + self.gen_write_barrier(val) + #op = op.copy_and_change(rop.SETFIELD_RAW) self.newops.append(op) def handle_write_barrier_setinteriorfield(self, op): val = op.getarg(0) - # no need for a write barrier in the case of previous malloc - if val not in self.recent_mallocs: + if val not in self.write_barrier_applied: v = op.getarg(2) if isinstance(v, BoxPtr) or (isinstance(v, ConstPtr) and bool(v.value)): # store a non-NULL - self.gen_write_barrier(op.getarg(0), v) - op = op.copy_and_change(rop.SETINTERIORFIELD_RAW) + self.gen_write_barrier(val) + #op = op.copy_and_change(rop.SETINTERIORFIELD_RAW) self.newops.append(op) def handle_write_barrier_setarrayitem(self, op): val = op.getarg(0) - # no need for a write barrier in the case of previous malloc - if val not in self.recent_mallocs: + if val not in self.write_barrier_applied: v = op.getarg(2) if isinstance(v, BoxPtr) or (isinstance(v, ConstPtr) and bool(v.value)): # store a non-NULL - self.gen_write_barrier_array(op.getarg(0), - op.getarg(1), v) - op = op.copy_and_change(rop.SETARRAYITEM_RAW) + self.gen_write_barrier_array(val, op.getarg(1)) + #op = op.copy_and_change(rop.SETARRAYITEM_RAW) self.newops.append(op) - def gen_write_barrier(self, v_base, v_value): + def gen_write_barrier(self, v_base): write_barrier_descr = self.gc_ll_descr.write_barrier_descr - args = [v_base, v_value] + args = [v_base] self.newops.append(ResOperation(rop.COND_CALL_GC_WB, args, None, descr=write_barrier_descr)) + self.write_barrier_applied[v_base] = None - def gen_write_barrier_array(self, v_base, v_index, v_value): + def gen_write_barrier_array(self, v_base, v_index): write_barrier_descr = self.gc_ll_descr.write_barrier_descr if write_barrier_descr.has_write_barrier_from_array(self.cpu): # If we know statically the length of 'v', and it is not too @@ -430,13 +428,15 @@ length = self.known_lengths.get(v_base, LARGE) if length >= LARGE: # unknown or too big: produce a write_barrier_from_array - args = [v_base, v_index, v_value] + args = [v_base, v_index] self.newops.append( ResOperation(rop.COND_CALL_GC_WB_ARRAY, args, None, descr=write_barrier_descr)) + # a WB_ARRAY is not enough to prevent any future write + # barriers, so don't add to 'write_barrier_applied'! return # fall-back case: produce a write_barrier - self.gen_write_barrier(v_base, v_value) + self.gen_write_barrier(v_base) def round_up_for_allocation(self, size): if not self.gc_ll_descr.round_up: diff --git a/rpython/jit/backend/llsupport/test/test_gc.py b/rpython/jit/backend/llsupport/test/test_gc.py --- a/rpython/jit/backend/llsupport/test/test_gc.py +++ b/rpython/jit/backend/llsupport/test/test_gc.py @@ -202,13 +202,11 @@ rewriter = gc.GcRewriterAssembler(gc_ll_descr, None) newops = rewriter.newops v_base = BoxPtr() - v_value = BoxPtr() - rewriter.gen_write_barrier(v_base, v_value) + rewriter.gen_write_barrier(v_base) assert llop1.record == [] assert len(newops) == 1 assert newops[0].getopnum() == rop.COND_CALL_GC_WB assert newops[0].getarg(0) == v_base - assert newops[0].getarg(1) == v_value assert newops[0].result is None wbdescr = newops[0].getdescr() assert is_valid_int(wbdescr.jit_wb_if_flag) diff --git a/rpython/jit/backend/llsupport/test/test_rewrite.py b/rpython/jit/backend/llsupport/test/test_rewrite.py --- a/rpython/jit/backend/llsupport/test/test_rewrite.py +++ b/rpython/jit/backend/llsupport/test/test_rewrite.py @@ -561,8 +561,8 @@ jump() """, """ [p1, p2] - cond_call_gc_wb(p1, p2, descr=wbdescr) - setfield_raw(p1, p2, descr=tzdescr) + cond_call_gc_wb(p1, descr=wbdescr) + setfield_gc(p1, p2, descr=tzdescr) jump() """) @@ -575,8 +575,8 @@ jump() """, """ [p1, i2, p3] - cond_call_gc_wb(p1, p3, descr=wbdescr) - setarrayitem_raw(p1, i2, p3, descr=cdescr) + cond_call_gc_wb(p1, descr=wbdescr) + setarrayitem_gc(p1, i2, p3, descr=cdescr) jump() """) @@ -595,8 +595,8 @@ setfield_gc(p1, 8111, descr=tiddescr) setfield_gc(p1, 129, descr=clendescr) call(123456) - cond_call_gc_wb(p1, p3, descr=wbdescr) - setarrayitem_raw(p1, i2, p3, descr=cdescr) + cond_call_gc_wb(p1, descr=wbdescr) + setarrayitem_gc(p1, i2, p3, descr=cdescr) jump() """) @@ -616,8 +616,8 @@ setfield_gc(p1, 8111, descr=tiddescr) setfield_gc(p1, 130, descr=clendescr) call(123456) - cond_call_gc_wb_array(p1, i2, p3, descr=wbdescr) - setarrayitem_raw(p1, i2, p3, descr=cdescr) + cond_call_gc_wb_array(p1, i2, descr=wbdescr) + setarrayitem_gc(p1, i2, p3, descr=cdescr) jump() """) @@ -628,8 +628,8 @@ jump() """, """ [p1, i2, p3] - cond_call_gc_wb_array(p1, i2, p3, descr=wbdescr) - setarrayitem_raw(p1, i2, p3, descr=cdescr) + cond_call_gc_wb_array(p1, i2, descr=wbdescr) + setarrayitem_gc(p1, i2, p3, descr=cdescr) jump() """) @@ -647,8 +647,8 @@ setfield_gc(p1, 8111, descr=tiddescr) setfield_gc(p1, 5, descr=clendescr) label(p1, i2, p3) - cond_call_gc_wb_array(p1, i2, p3, descr=wbdescr) - setarrayitem_raw(p1, i2, p3, descr=cdescr) + cond_call_gc_wb_array(p1, i2, descr=wbdescr) + setarrayitem_gc(p1, i2, p3, descr=cdescr) jump() """) @@ -666,8 +666,8 @@ jump(p1, p2) """, """ [p1, p2] - cond_call_gc_wb(p1, p2, descr=wbdescr) - setinteriorfield_raw(p1, 0, p2, descr=interiorzdescr) + cond_call_gc_wb(p1, descr=wbdescr) + setinteriorfield_gc(p1, 0, p2, descr=interiorzdescr) jump(p1, p2) """, interiorzdescr=interiorzdescr) @@ -733,8 +733,8 @@ p1 = call_malloc_nursery_varsize(1, 1, i0, \ descr=strdescr) setfield_gc(p1, i0, descr=strlendescr) - cond_call_gc_wb(p0, p1, descr=wbdescr) - setfield_raw(p0, p1, descr=tzdescr) + cond_call_gc_wb(p0, descr=wbdescr) + setfield_gc(p0, p1, descr=tzdescr) jump() """) @@ -750,11 +750,25 @@ p0 = call_malloc_nursery(%(tdescr.size)d) setfield_gc(p0, 5678, descr=tiddescr) label(p0, p1) - cond_call_gc_wb(p0, p1, descr=wbdescr) - setfield_raw(p0, p1, descr=tzdescr) + cond_call_gc_wb(p0, descr=wbdescr) + setfield_gc(p0, p1, descr=tzdescr) jump() """) + def test_multiple_writes(self): + self.check_rewrite(""" + [p0, p1, p2] + setfield_gc(p0, p1, descr=tzdescr) + setfield_gc(p0, p2, descr=tzdescr) + jump(p1, p2, p0) + """, """ + [p0, p1, p2] + cond_call_gc_wb(p0, descr=wbdescr) + setfield_gc(p0, p1, descr=tzdescr) + setfield_gc(p0, p2, descr=tzdescr) + jump(p1, p2, p0) + """) + def test_rewrite_call_assembler(self): self.check_rewrite(""" [i0, f0] diff --git a/rpython/jit/backend/test/runner_test.py b/rpython/jit/backend/test/runner_test.py --- a/rpython/jit/backend/test/runner_test.py +++ b/rpython/jit/backend/test/runner_test.py @@ -2140,11 +2140,9 @@ s = lltype.malloc(S) s.tid = value sgcref = lltype.cast_opaque_ptr(llmemory.GCREF, s) - t = lltype.malloc(S) - tgcref = lltype.cast_opaque_ptr(llmemory.GCREF, t) del record[:] self.execute_operation(rop.COND_CALL_GC_WB, - [BoxPtr(sgcref), ConstPtr(tgcref)], + [BoxPtr(sgcref)], 'void', descr=WriteBarrierDescr()) if cond: assert record == [rffi.cast(lltype.Signed, sgcref)] @@ -2179,7 +2177,7 @@ sgcref = lltype.cast_opaque_ptr(llmemory.GCREF, s) del record[:] self.execute_operation(rop.COND_CALL_GC_WB_ARRAY, - [BoxPtr(sgcref), ConstInt(123), BoxPtr(sgcref)], + [BoxPtr(sgcref), ConstInt(123)], 'void', descr=WriteBarrierDescr()) if cond: assert record == [rffi.cast(lltype.Signed, sgcref)] @@ -2244,7 +2242,7 @@ del record[:] box_index = BoxIndexCls((9<<7) + 17) self.execute_operation(rop.COND_CALL_GC_WB_ARRAY, - [BoxPtr(sgcref), box_index, BoxPtr(sgcref)], + [BoxPtr(sgcref), box_index], 'void', descr=WriteBarrierDescr()) if cond in [0, 1]: assert record == [rffi.cast(lltype.Signed, s.data)] diff --git a/rpython/jit/backend/tool/viewcode.py b/rpython/jit/backend/tool/viewcode.py --- a/rpython/jit/backend/tool/viewcode.py +++ b/rpython/jit/backend/tool/viewcode.py @@ -51,18 +51,18 @@ raise ObjdumpNotFound('(g)objdump was not found in PATH') def machine_code_dump(data, originaddr, backend_name, label_list=None): - objdump_backend_option = { + objdump_machine_option = { 'x86': 'i386', 'x86-without-sse2': 'i386', 'x86_32': 'i386', - 'x86_64': 'x86-64', - 'x86-64': 'x86-64', + 'x86_64': 'i386:x86-64', + 'x86-64': 'i386:x86-64', 'i386': 'i386', 'arm': 'arm', 'arm_32': 'arm', } cmd = find_objdump() - objdump = ('%(command)s -M %(backend)s -b binary -m %(machine)s ' + objdump = ('%(command)s -b binary -m %(machine)s ' '--disassembler-options=intel-mnemonics ' '--adjust-vma=%(origin)d -D %(file)s') # @@ -73,8 +73,7 @@ 'command': cmd, 'file': tmpfile, 'origin': originaddr, - 'backend': objdump_backend_option[backend_name], - 'machine': 'i386' if not backend_name.startswith('arm') else 'arm', + 'machine': objdump_machine_option[backend_name], }, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = p.communicate() assert not p.returncode, ('Encountered an error running objdump: %s' % @@ -240,7 +239,7 @@ self.backend_name = None self.executable_name = None - def parse(self, f, textonly=True, truncate_addr=True): + def parse(self, f, textonly=True): for line in f: if line.startswith('BACKEND '): self.backend_name = line.split(' ')[1].strip() @@ -251,9 +250,7 @@ if len(pieces) == 3: continue # empty line baseaddr = long(pieces[1][1:], 16) - if truncate_addr: - baseaddr &= 0xFFFFFFFFL - elif baseaddr < 0: + if baseaddr < 0: baseaddr += (2 * sys.maxint + 2) offset = int(pieces[2][1:]) addr = baseaddr + offset @@ -273,9 +270,7 @@ assert pieces[1].startswith('@') assert pieces[2].startswith('+') baseaddr = long(pieces[1][1:], 16) - if truncate_addr: - baseaddr &= 0xFFFFFFFFL - elif baseaddr < 0: + if baseaddr < 0: baseaddr += (2 * sys.maxint + 2) offset = int(pieces[2][1:]) addr = baseaddr + offset diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py --- a/rpython/jit/metainterp/resoperation.py +++ b/rpython/jit/metainterp/resoperation.py @@ -502,8 +502,8 @@ 'SETFIELD_RAW/2d', 'STRSETITEM/3', 'UNICODESETITEM/3', - 'COND_CALL_GC_WB/2d', # [objptr, newvalue] (for the write barrier) - 'COND_CALL_GC_WB_ARRAY/3d', # [objptr, arrayindex, newvalue] (write barr.) + 'COND_CALL_GC_WB/1d', # [objptr] (for the write barrier) + 'COND_CALL_GC_WB_ARRAY/2d', # [objptr, arrayindex] (write barr. for array) 'DEBUG_MERGE_POINT/*', # debugging only 'JIT_DEBUG/*', # debugging only 'VIRTUAL_REF_FINISH/2', # removed before it's passed to the backend diff --git a/rpython/rtyper/module/ll_os.py b/rpython/rtyper/module/ll_os.py --- a/rpython/rtyper/module/ll_os.py +++ b/rpython/rtyper/module/ll_os.py @@ -1574,6 +1574,7 @@ _nowrapper = True) def fork_llimpl(): + # NB. keep forkpty() up-to-date, too opaqueaddr = rthread.gc_thread_before_fork() childpid = rffi.cast(lltype.Signed, os_fork()) rthread.gc_thread_after_fork(childpid, opaqueaddr) @@ -1609,6 +1610,7 @@ @registering_if(os, 'forkpty') def register_os_forkpty(self): + from rpython.rlib import rthread os_forkpty = self.llexternal( 'forkpty', [rffi.INTP, rffi.VOIDP, rffi.VOIDP, rffi.VOIDP], @@ -1616,7 +1618,11 @@ compilation_info=ExternalCompilationInfo(libraries=['util'])) def forkpty_llimpl(): master_p = lltype.malloc(rffi.INTP.TO, 1, flavor='raw') - childpid = os_forkpty(master_p, None, None, None) + master_p[0] = rffi.cast(rffi.INT, -1) + opaqueaddr = rthread.gc_thread_before_fork() + childpid = rffi.cast(lltype.Signed, + os_forkpty(master_p, None, None, None)) + rthread.gc_thread_after_fork(childpid, opaqueaddr) master_fd = master_p[0] lltype.free(master_p, flavor='raw') if childpid == -1: From noreply at buildbot.pypy.org Sat Aug 17 17:37:36 2013 From: noreply at buildbot.pypy.org (fijal) Date: Sat, 17 Aug 2013 17:37:36 +0200 (CEST) Subject: [pypy-commit] pypy rewritten-loop-logging: enable logging on arm Message-ID: <20130817153736.E55691C3629@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: rewritten-loop-logging Changeset: r66185:a5e75268576e Date: 2013-08-17 17:37 +0200 http://bitbucket.org/pypy/pypy/changeset/a5e75268576e/ Log: enable logging on arm diff --git a/rpython/jit/backend/arm/assembler.py b/rpython/jit/backend/arm/assembler.py --- a/rpython/jit/backend/arm/assembler.py +++ b/rpython/jit/backend/arm/assembler.py @@ -621,6 +621,9 @@ 'loop.asm') ops_offset = self.mc.ops_offset + if logger is not None: + logger.log_loop(inputargs, operations, 0, "rewritten", + name=loopname, ops_offset=ops_offset) self.teardown() debug_start("jit-backend-addr") @@ -695,6 +698,9 @@ frame_depth_no_fixed_size + JITFRAME_FIXED_SIZE) self.fixup_target_tokens(rawstart) self.update_frame_depth(frame_depth) + if logger: + logger.log_bridge(inputargs, operations, "rewritten", + ops_offset=ops_offset) self.teardown() debug_bridge(descr_number, rawstart, codeendpos) From noreply at buildbot.pypy.org Sat Aug 17 17:51:47 2013 From: noreply at buildbot.pypy.org (fijal) Date: Sat, 17 Aug 2013 17:51:47 +0200 (CEST) Subject: [pypy-commit] pypy rewritten-loop-logging: Fix the tests Message-ID: <20130817155147.DDDBE1C10DD@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: rewritten-loop-logging Changeset: r66186:87cf0e737036 Date: 2013-08-17 17:51 +0200 http://bitbucket.org/pypy/pypy/changeset/87cf0e737036/ Log: Fix the tests diff --git a/rpython/jit/backend/llsupport/test/test_gc_integration.py b/rpython/jit/backend/llsupport/test/test_gc_integration.py --- a/rpython/jit/backend/llsupport/test/test_gc_integration.py +++ b/rpython/jit/backend/llsupport/test/test_gc_integration.py @@ -676,7 +676,7 @@ 'checkdescr': checkdescr, 'fielddescr': cpu.fielddescrof(S, 'x')}) token = JitCellToken() - cpu.compile_loop(loop.inputargs, loop.operations, token) + cpu.compile_loop(None, loop.inputargs, loop.operations, token) p0 = lltype.malloc(S, zero=True) p1 = lltype.malloc(S) p2 = lltype.malloc(S) @@ -715,7 +715,7 @@ 'calldescr': checkdescr, }) token = JitCellToken() - cpu.compile_loop(loop.inputargs, loop.operations, token) + cpu.compile_loop(None, loop.inputargs, loop.operations, token) S = self.S s = lltype.malloc(S) cpu.execute_token(token, 1, s) @@ -743,7 +743,7 @@ token = JitCellToken() cpu.gc_ll_descr.init_nursery(20) cpu.setup_once() - cpu.compile_loop(loop.inputargs, loop.operations, token) + cpu.compile_loop(None, loop.inputargs, loop.operations, token) arg = longlong.getfloatstorage(2.3) frame = cpu.execute_token(token, arg) ofs = cpu.get_baseofs_of_frame_field() @@ -770,7 +770,7 @@ cpu.gc_ll_descr.collections = [[0, sizeof.size]] cpu.gc_ll_descr.init_nursery(2 * sizeof.size) cpu.setup_once() - cpu.compile_loop(loop.inputargs, loop.operations, token) + cpu.compile_loop(None, loop.inputargs, loop.operations, token) frame = cpu.execute_token(token) # now we should be able to track everything from the frame frame = lltype.cast_opaque_ptr(JITFRAMEPTR, frame) @@ -821,7 +821,7 @@ token = JitCellToken() cpu.gc_ll_descr.init_nursery(100) cpu.setup_once() - cpu.compile_loop(loop.inputargs, loop.operations, token) + cpu.compile_loop(None, loop.inputargs, loop.operations, token) args = [lltype.nullptr(llmemory.GCREF.TO) for i in range(7)] frame = cpu.execute_token(token, 1, *args) frame = rffi.cast(JITFRAMEPTR, frame) @@ -867,7 +867,7 @@ token = JitCellToken() cpu.gc_ll_descr.init_nursery(100) cpu.setup_once() - cpu.compile_loop(loop.inputargs, loop.operations, token) + cpu.compile_loop(None, loop.inputargs, loop.operations, token) frame = lltype.cast_opaque_ptr(JITFRAMEPTR, cpu.execute_token(token, 1, a)) @@ -911,7 +911,7 @@ token = JitCellToken() cpu.gc_ll_descr.init_nursery(100) cpu.setup_once() - cpu.compile_loop(loop.inputargs, loop.operations, token) + cpu.compile_loop(None, loop.inputargs, loop.operations, token) frame = lltype.cast_opaque_ptr(JITFRAMEPTR, cpu.execute_token(token, 1, a)) assert getmap(frame).count('1') == 4 diff --git a/rpython/jit/backend/llsupport/test/test_regalloc_integration.py b/rpython/jit/backend/llsupport/test/test_regalloc_integration.py --- a/rpython/jit/backend/llsupport/test/test_regalloc_integration.py +++ b/rpython/jit/backend/llsupport/test/test_regalloc_integration.py @@ -97,7 +97,7 @@ loop = self.parse(ops, namespace=namespace) self.loop = loop looptoken = JitCellToken() - self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + self.cpu.compile_loop(None, loop.inputargs, loop.operations, looptoken) arguments = [] for arg in args: if isinstance(arg, int): @@ -147,7 +147,8 @@ assert ([box.type for box in bridge.inputargs] == [box.type for box in guard_op.getfailargs()]) faildescr = guard_op.getdescr() - self.cpu.compile_bridge(faildescr, bridge.inputargs, bridge.operations, + self.cpu.compile_bridge(None, faildescr, bridge.inputargs, + bridge.operations, loop._jitcelltoken) return bridge @@ -335,7 +336,7 @@ ''' self.interpret(ops, [0, 0, 3, 0]) assert self.getints(3) == [1, -3, 10] - + def test_compare_memory_result_survives(self): ops = ''' [i0, i1, i2, i3] @@ -409,7 +410,7 @@ class TestRegallocCompOps(BaseTestRegalloc): - + def test_cmp_op_0(self): ops = ''' [i0, i3] @@ -575,7 +576,7 @@ class TestRegAllocCallAndStackDepth(BaseTestRegalloc): def setup_class(cls): py.test.skip("skip for now, not sure what do we do") - + def expected_frame_depth(self, num_call_args, num_pushed_input_args=0): # Assumes the arguments are all non-float if not self.cpu.IS_64_BIT: @@ -612,7 +613,7 @@ ops = ''' [i0, i1, i2, i3, i4, i5, i6, i7, i8, i9] i10 = call(ConstClass(f1ptr), i0, descr=f1_calldescr) - i11 = call(ConstClass(f2ptr), i10, i1, descr=f2_calldescr) + i11 = call(ConstClass(f2ptr), i10, i1, descr=f2_calldescr) guard_false(i5) [i11, i1, i2, i3, i4, i5, i6, i7, i8, i9] ''' loop = self.interpret(ops, [4, 7, 9, 9 ,9, 9, 9, 9, 9, 9]) @@ -649,7 +650,7 @@ ops = ''' [i2, i0, i1] - i3 = call(ConstClass(f2ptr), i2, i1, descr=f2_calldescr) + i3 = call(ConstClass(f2ptr), i2, i1, descr=f2_calldescr) guard_false(i0, descr=fdescr2) [i3, i0] ''' bridge = self.attach_bridge(ops, loop, -2) @@ -676,7 +677,7 @@ ops = ''' [i2] - i3 = call(ConstClass(f1ptr), i2, descr=f1_calldescr) + i3 = call(ConstClass(f1ptr), i2, descr=f1_calldescr) guard_false(i3, descr=fdescr2) [i3] ''' bridge = self.attach_bridge(ops, loop, -2) diff --git a/rpython/jit/backend/llsupport/test/test_runner.py b/rpython/jit/backend/llsupport/test/test_runner.py --- a/rpython/jit/backend/llsupport/test/test_runner.py +++ b/rpython/jit/backend/llsupport/test/test_runner.py @@ -14,7 +14,7 @@ def set_debug(flag): pass - def compile_loop(self, inputargs, operations, looptoken): + def compile_loop(self, logger, inputargs, operations, looptoken): py.test.skip("llsupport test: cannot compile operations") diff --git a/rpython/jit/backend/test/calling_convention_test.py b/rpython/jit/backend/test/calling_convention_test.py --- a/rpython/jit/backend/test/calling_convention_test.py +++ b/rpython/jit/backend/test/calling_convention_test.py @@ -105,7 +105,7 @@ loop = parse(ops, namespace=locals()) looptoken = JitCellToken() - self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + self.cpu.compile_loop(None, loop.inputargs, loop.operations, looptoken) argvals, expected_result = self._prepare_args(args, floats, ints) deadframe = self.cpu.execute_token(looptoken, *argvals) @@ -249,7 +249,7 @@ called_looptoken = JitCellToken() called_looptoken.outermost_jitdriver_sd = FakeJitDriverSD() done_descr = called_loop.operations[-1].getdescr() - self.cpu.compile_loop(called_loop.inputargs, called_loop.operations, called_looptoken) + self.cpu.compile_loop(None, called_loop.inputargs, called_loop.operations, called_looptoken) argvals, expected_result = self._prepare_args(args, floats, ints) deadframe = cpu.execute_token(called_looptoken, *argvals) @@ -278,7 +278,7 @@ self.cpu.done_with_this_frame_descr_float = done_descr try: othertoken = JitCellToken() - self.cpu.compile_loop(loop.inputargs, loop.operations, othertoken) + self.cpu.compile_loop(None, loop.inputargs, loop.operations, othertoken) # prepare call to called_loop argvals, _ = self._prepare_args(args, floats, ints) @@ -424,7 +424,7 @@ loop = parse(ops, namespace=locals()) looptoken = JitCellToken() - self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + self.cpu.compile_loop(None, loop.inputargs, loop.operations, looptoken) argvals, expected_result = self._prepare_args(args, floats, ints) deadframe = self.cpu.execute_token(looptoken, *argvals) diff --git a/rpython/jit/backend/test/test_random.py b/rpython/jit/backend/test/test_random.py --- a/rpython/jit/backend/test/test_random.py +++ b/rpython/jit/backend/test/test_random.py @@ -239,9 +239,9 @@ print >>s, ' operations[%d].setfailargs([%s])' % (i, fa) if fail_descr is None: print >>s, ' looptoken = JitCellToken()' - print >>s, ' cpu.compile_loop(inputargs, operations, looptoken)' + print >>s, ' cpu.compile_loop(None, inputargs, operations, looptoken)' else: - print >>s, ' cpu.compile_bridge(%s, inputargs, operations, looptoken)' % self.descr_counters[fail_descr] + print >>s, ' cpu.compile_bridge(None, %s, inputargs, operations, looptoken)' % self.descr_counters[fail_descr] if hasattr(self.loop, 'inputargs'): vals = [] for i, v in enumerate(self.loop.inputargs): @@ -643,7 +643,7 @@ self.builder = builder self.loop = loop dump(loop) - cpu.compile_loop(loop.inputargs, loop.operations, loop._jitcelltoken) + cpu.compile_loop(None, loop.inputargs, loop.operations, loop._jitcelltoken) if self.output: builder.print_loop(self.output) @@ -715,7 +715,7 @@ if box not in self.loop.inputargs: box = box.constbox() args.append(box) - self.cpu.compile_loop(self.loop.inputargs, + self.cpu.compile_loop(None, self.loop.inputargs, [ResOperation(rop.JUMP, args, None, descr=self.loop._targettoken)], self._initialjumploop_celltoken) @@ -851,7 +851,7 @@ if r.random() < .05: return False dump(subloop) - self.builder.cpu.compile_bridge(fail_descr, fail_args, + self.builder.cpu.compile_bridge(None, fail_descr, fail_args, subloop.operations, self.loop._jitcelltoken) diff --git a/rpython/jit/backend/x86/test/test_regalloc2.py b/rpython/jit/backend/x86/test/test_regalloc2.py --- a/rpython/jit/backend/x86/test/test_regalloc2.py +++ b/rpython/jit/backend/x86/test/test_regalloc2.py @@ -32,7 +32,7 @@ cpu = CPU(None, None) cpu.setup_once() looptoken = JitCellToken() - cpu.compile_loop(inputargs, operations, looptoken) + cpu.compile_loop(None, inputargs, operations, looptoken) deadframe = cpu.execute_token(looptoken, 9) assert cpu.get_int_value(deadframe, 0) == (9 >> 3) assert cpu.get_int_value(deadframe, 1) == (~18) @@ -58,7 +58,7 @@ cpu = CPU(None, None) cpu.setup_once() looptoken = JitCellToken() - cpu.compile_loop(inputargs, operations, looptoken) + cpu.compile_loop(None, inputargs, operations, looptoken) deadframe = cpu.execute_token(looptoken, -10) assert cpu.get_int_value(deadframe, 0) == 0 assert cpu.get_int_value(deadframe, 1) == -1000 @@ -159,7 +159,7 @@ cpu = CPU(None, None) cpu.setup_once() looptoken = JitCellToken() - cpu.compile_loop(inputargs, operations, looptoken) + cpu.compile_loop(None, inputargs, operations, looptoken) deadframe = cpu.execute_token(looptoken, -13, 10, 10, 8, -8, -16, -18, 46, -12, 26) assert cpu.get_int_value(deadframe, 0) == 0 @@ -271,7 +271,7 @@ cpu = CPU(None, None) cpu.setup_once() looptoken = JitCellToken() - cpu.compile_loop(inputargs, operations, looptoken) + cpu.compile_loop(None, inputargs, operations, looptoken) deadframe = cpu.execute_token(looptoken, 17, -20, -6, 6, 1, 13, 13, 9, 49, 8) assert cpu.get_int_value(deadframe, 0) == 0 @@ -386,7 +386,7 @@ operations[4].setfailargs([v4, v8, v10, v2, v9, v7, v6, v1]) operations[8].setfailargs([v3, v9, v2, v6, v4]) looptoken = JitCellToken() - cpu.compile_loop(inputargs, operations, looptoken) + cpu.compile_loop(None, inputargs, operations, looptoken) loop_args = [1, -39, 46, 21, 16, 6, -4611686018427387905, 12, 14, 2] frame = cpu.execute_token(looptoken, *loop_args) assert cpu.get_int_value(frame, 0) == 46 @@ -493,7 +493,7 @@ operations[16].setfailargs([v5, v9]) operations[34].setfailargs([]) operations[37].setfailargs([v12, v19, v10, v7, v4, v8, v18, v15, v9]) - cpu.compile_bridge(faildescr1, inputargs, operations, looptoken) + cpu.compile_bridge(None, faildescr1, inputargs, operations, looptoken) frame = cpu.execute_token(looptoken, *loop_args) #assert cpu.get_int_value(frame, 0) == -9223372036854775766 assert cpu.get_int_value(frame, 1) == 0 @@ -583,7 +583,7 @@ operations[0].setfailargs([]) operations[8].setfailargs([tmp23, v5, v3, v11, v6]) operations[30].setfailargs([v6]) - cpu.compile_bridge(faildescr6, inputargs, operations, looptoken) + cpu.compile_bridge(None, faildescr6, inputargs, operations, looptoken) frame = cpu.execute_token(looptoken, *loop_args) #assert cpu.get_int_value(frame, 0) == -9223372036854775808 v1 = BoxInt() @@ -607,6 +607,6 @@ ResOperation(rop.FINISH, [], None, descr=finishdescr13), ] operations[4].setfailargs([v2]) - cpu.compile_bridge(faildescr10, inputargs, operations, looptoken) + cpu.compile_bridge(None, faildescr10, inputargs, operations, looptoken) frame = cpu.execute_token(looptoken, *loop_args) #assert cpu.get_int_value(frame, 0) == 10 diff --git a/rpython/jit/metainterp/compile.py b/rpython/jit/metainterp/compile.py --- a/rpython/jit/metainterp/compile.py +++ b/rpython/jit/metainterp/compile.py @@ -934,7 +934,7 @@ ] operations[1].setfailargs([]) operations = get_deep_immutable_oplist(operations) - cpu.compile_loop(inputargs, operations, jitcell_token, log=False) + cpu.compile_loop(None, inputargs, operations, jitcell_token, log=False) if memory_manager is not None: # for tests memory_manager.keep_loop_alive(jitcell_token) return jitcell_token From noreply at buildbot.pypy.org Sat Aug 17 17:54:38 2013 From: noreply at buildbot.pypy.org (fijal) Date: Sat, 17 Aug 2013 17:54:38 +0200 (CEST) Subject: [pypy-commit] pypy rewritten-loop-logging: fix arm tests Message-ID: <20130817155438.05AEE1C10DD@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: rewritten-loop-logging Changeset: r66187:dcc329cbd387 Date: 2013-08-17 17:54 +0200 http://bitbucket.org/pypy/pypy/changeset/dcc329cbd387/ Log: fix arm tests diff --git a/rpython/jit/backend/arm/test/test_regalloc2.py b/rpython/jit/backend/arm/test/test_regalloc2.py --- a/rpython/jit/backend/arm/test/test_regalloc2.py +++ b/rpython/jit/backend/arm/test/test_regalloc2.py @@ -24,7 +24,7 @@ cpu = CPU(None, None) cpu.setup_once() looptoken = JitCellToken() - cpu.compile_loop(inputargs, operations, looptoken) + cpu.compile_loop(None, inputargs, operations, looptoken) deadframe = cpu.execute_token(looptoken, 9) assert cpu.get_int_value(deadframe, 0) == (9 >> 3) assert cpu.get_int_value(deadframe, 1) == (~18) @@ -48,7 +48,7 @@ cpu = CPU(None, None) cpu.setup_once() looptoken = JitCellToken() - cpu.compile_loop(inputargs, operations, looptoken) + cpu.compile_loop(None, inputargs, operations, looptoken) deadframe = cpu.execute_token(looptoken, -10) assert cpu.get_int_value(deadframe, 0) == 0 assert cpu.get_int_value(deadframe, 1) == -1000 @@ -145,7 +145,7 @@ cpu = CPU(None, None) cpu.setup_once() looptoken = JitCellToken() - cpu.compile_loop(inputargs, operations, looptoken) + cpu.compile_loop(None, inputargs, operations, looptoken) args = [-13 , 10 , 10 , 8 , -8 , -16 , -18 , 46 , -12 , 26] deadframe = cpu.execute_token(looptoken, *args) assert cpu.get_int_value(deadframe, 0) == 0 @@ -252,7 +252,7 @@ cpu = CPU(None, None) cpu.setup_once() looptoken = JitCellToken() - cpu.compile_loop(inputargs, operations, looptoken) + cpu.compile_loop(None, inputargs, operations, looptoken) args = [17 , -20 , -6 , 6 , 1 , 13 , 13 , 9 , 49 , 8] deadframe = cpu.execute_token(looptoken, *args) assert cpu.get_int_value(deadframe, 0) == 0 diff --git a/rpython/jit/backend/arm/test/test_runner.py b/rpython/jit/backend/arm/test/test_runner.py --- a/rpython/jit/backend/arm/test/test_runner.py +++ b/rpython/jit/backend/arm/test/test_runner.py @@ -75,7 +75,7 @@ ResOperation(rop.FINISH, [inp[1]], None, descr=BasicFinalDescr(1)), ] operations[-2].setfailargs(out) - cpu.compile_loop(inp, operations, looptoken) + cpu.compile_loop(None, inp, operations, looptoken) args = [i for i in range(1, 15)] deadframe = self.cpu.execute_token(looptoken, *args) output = [self.cpu.get_int_value(deadframe, i - 1) for i in range(1, 15)] @@ -117,9 +117,9 @@ i1 = int_sub(i0, 1) finish(i1) ''') - self.cpu.compile_loop(loop2.inputargs, loop2.operations, lt2) - self.cpu.compile_loop(loop3.inputargs, loop3.operations, lt3) - self.cpu.compile_loop(loop1.inputargs, loop1.operations, lt1) + self.cpu.compile_loop(None, loop2.inputargs, loop2.operations, lt2) + self.cpu.compile_loop(None, loop3.inputargs, loop3.operations, lt3) + self.cpu.compile_loop(None, loop1.inputargs, loop1.operations, lt1) df = self.cpu.execute_token(lt1, 10) assert self.cpu.get_int_value(df, 0) == 7 @@ -214,7 +214,7 @@ ops = "".join(ops) loop = parse(ops) looptoken = JitCellToken() - self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + self.cpu.compile_loop(None, loop.inputargs, loop.operations, looptoken) ARGS = [lltype.Signed] * numargs RES = lltype.Signed args = [i+1 for i in range(numargs)] @@ -246,7 +246,7 @@ try: self.cpu.assembler.set_debug(True) looptoken = JitCellToken() - self.cpu.compile_loop(ops.inputargs, ops.operations, looptoken) + self.cpu.compile_loop(None, ops.inputargs, ops.operations, looptoken) self.cpu.execute_token(looptoken, 0) # check debugging info struct = self.cpu.assembler.loop_run_counters[0] @@ -280,7 +280,7 @@ faildescr = BasicFailDescr(2) loop = parse(ops, self.cpu, namespace=locals()) looptoken = JitCellToken() - info = self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + info = self.cpu.compile_loop(None, loop.inputargs, loop.operations, looptoken) ops2 = """ [i0, f1] i1 = same_as(i0) @@ -293,7 +293,7 @@ """ loop2 = parse(ops2, self.cpu, namespace=locals()) looptoken2 = JitCellToken() - info = self.cpu.compile_loop(loop2.inputargs, loop2.operations, looptoken2) + info = self.cpu.compile_loop(None, loop2.inputargs, loop2.operations, looptoken2) deadframe = self.cpu.execute_token(looptoken, -9, longlong.getfloatstorage(-13.5)) res = longlong.getrealfloat(self.cpu.get_float_value(deadframe, 0)) From noreply at buildbot.pypy.org Sat Aug 17 18:05:20 2013 From: noreply at buildbot.pypy.org (fijal) Date: Sat, 17 Aug 2013 18:05:20 +0200 (CEST) Subject: [pypy-commit] pypy default: Try harder to find the correct category in jitviewer Message-ID: <20130817160520.72BC81C1356@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r66188:427b988a53bc Date: 2013-08-17 17:59 +0200 http://bitbucket.org/pypy/pypy/changeset/427b988a53bc/ Log: Try harder to find the correct category in jitviewer diff --git a/pypy/tool/jitlogparser/parser.py b/pypy/tool/jitlogparser/parser.py --- a/pypy/tool/jitlogparser/parser.py +++ b/pypy/tool/jitlogparser/parser.py @@ -390,7 +390,12 @@ data = r.data.encode('hex') # backward compatibility dumps[name] = (world.backend_name, r.addr, data) loops = [] - for entry in extract_category(log, 'jit-log-opt'): + cat = extract_category(log, 'jit-log-opt') + if not cat: + extract_category(log, 'jit-log-rewritten') + if not cat: + extract_category(log, 'jit-log-noopt') + for entry in cat: parser = ParserCls(entry, None, {}, 'lltype', None, nonstrict=True) loop = parser.parse() From noreply at buildbot.pypy.org Sat Aug 17 18:05:21 2013 From: noreply at buildbot.pypy.org (fijal) Date: Sat, 17 Aug 2013 18:05:21 +0200 (CEST) Subject: [pypy-commit] pypy default: merge default Message-ID: <20130817160521.AD2261C1356@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r66189:b5fe11be23c0 Date: 2013-08-17 18:04 +0200 http://bitbucket.org/pypy/pypy/changeset/b5fe11be23c0/ Log: merge default diff --git a/rpython/jit/backend/llsupport/rewrite.py b/rpython/jit/backend/llsupport/rewrite.py --- a/rpython/jit/backend/llsupport/rewrite.py +++ b/rpython/jit/backend/llsupport/rewrite.py @@ -26,10 +26,11 @@ - Add COND_CALLs to the write barrier before SETFIELD_GC and SETARRAYITEM_GC operations. - recent_mallocs contains a dictionary of variable -> None. If a variable - is in the dictionary, next setfields can be called without a write barrier, - because the variable got allocated after the last potentially collecting - resop + 'write_barrier_applied' contains a dictionary of variable -> None. + If a variable is in the dictionary, next setfields can be called without + a write barrier. The idea is that an object that was freshly allocated + or already write_barrier'd don't need another write_barrier if there + was no potentially collecting resop inbetween. """ _previous_size = -1 @@ -42,7 +43,7 @@ self.cpu = cpu self.newops = [] self.known_lengths = {} - self.recent_mallocs = {} + self.write_barrier_applied = {} def rewrite(self, operations): # we can only remember one malloc since the next malloc can possibly @@ -221,18 +222,18 @@ def emitting_an_operation_that_can_collect(self): # must be called whenever we emit an operation that can collect: # forgets the previous MALLOC_NURSERY, if any; and empty the - # set 'recent_mallocs', so that future SETFIELDs will generate + # set 'write_barrier_applied', so that future SETFIELDs will generate # a write barrier as usual. self._op_malloc_nursery = None - self.recent_mallocs.clear() + self.write_barrier_applied.clear() def _gen_call_malloc_gc(self, args, v_result, descr): """Generate a CALL_MALLOC_GC with the given args.""" self.emitting_an_operation_that_can_collect() op = ResOperation(rop.CALL_MALLOC_GC, args, v_result, descr) self.newops.append(op) - # mark 'v_result' as freshly malloced - self.recent_mallocs[v_result] = None + # mark 'v_result' as freshly malloced, so not needing a write barrier + self.write_barrier_applied[v_result] = None def gen_malloc_fixedsize(self, size, typeid, v_result): """Generate a CALL_MALLOC_GC(malloc_fixedsize_fn, ...). @@ -315,7 +316,7 @@ [ConstInt(kind), ConstInt(itemsize), v_length], v_result, descr=arraydescr) self.newops.append(op) - self.recent_mallocs[v_result] = None + self.write_barrier_applied[v_result] = None return True def gen_malloc_nursery_varsize_frame(self, sizebox, v_result): @@ -327,7 +328,7 @@ v_result) self.newops.append(op) - self.recent_mallocs[v_result] = None + self.write_barrier_applied[v_result] = None def gen_malloc_nursery(self, size, v_result): """Try to generate or update a CALL_MALLOC_NURSERY. @@ -360,7 +361,7 @@ self.newops.append(op) self._previous_size = size self._v_last_malloced_nursery = v_result - self.recent_mallocs[v_result] = None + self.write_barrier_applied[v_result] = None return True def gen_initialize_tid(self, v_newgcobj, tid): @@ -382,45 +383,42 @@ def handle_write_barrier_setfield(self, op): val = op.getarg(0) - # no need for a write barrier in the case of previous malloc - if val not in self.recent_mallocs: + if val not in self.write_barrier_applied: v = op.getarg(1) if isinstance(v, BoxPtr) or (isinstance(v, ConstPtr) and bool(v.value)): # store a non-NULL - self.gen_write_barrier(op.getarg(0), v) - op = op.copy_and_change(rop.SETFIELD_RAW) + self.gen_write_barrier(val) + #op = op.copy_and_change(rop.SETFIELD_RAW) self.newops.append(op) def handle_write_barrier_setinteriorfield(self, op): val = op.getarg(0) - # no need for a write barrier in the case of previous malloc - if val not in self.recent_mallocs: + if val not in self.write_barrier_applied: v = op.getarg(2) if isinstance(v, BoxPtr) or (isinstance(v, ConstPtr) and bool(v.value)): # store a non-NULL - self.gen_write_barrier(op.getarg(0), v) - op = op.copy_and_change(rop.SETINTERIORFIELD_RAW) + self.gen_write_barrier(val) + #op = op.copy_and_change(rop.SETINTERIORFIELD_RAW) self.newops.append(op) def handle_write_barrier_setarrayitem(self, op): val = op.getarg(0) - # no need for a write barrier in the case of previous malloc - if val not in self.recent_mallocs: + if val not in self.write_barrier_applied: v = op.getarg(2) if isinstance(v, BoxPtr) or (isinstance(v, ConstPtr) and bool(v.value)): # store a non-NULL - self.gen_write_barrier_array(op.getarg(0), - op.getarg(1), v) - op = op.copy_and_change(rop.SETARRAYITEM_RAW) + self.gen_write_barrier_array(val, op.getarg(1)) + #op = op.copy_and_change(rop.SETARRAYITEM_RAW) self.newops.append(op) - def gen_write_barrier(self, v_base, v_value): + def gen_write_barrier(self, v_base): write_barrier_descr = self.gc_ll_descr.write_barrier_descr - args = [v_base, v_value] + args = [v_base] self.newops.append(ResOperation(rop.COND_CALL_GC_WB, args, None, descr=write_barrier_descr)) + self.write_barrier_applied[v_base] = None - def gen_write_barrier_array(self, v_base, v_index, v_value): + def gen_write_barrier_array(self, v_base, v_index): write_barrier_descr = self.gc_ll_descr.write_barrier_descr if write_barrier_descr.has_write_barrier_from_array(self.cpu): # If we know statically the length of 'v', and it is not too @@ -430,13 +428,15 @@ length = self.known_lengths.get(v_base, LARGE) if length >= LARGE: # unknown or too big: produce a write_barrier_from_array - args = [v_base, v_index, v_value] + args = [v_base, v_index] self.newops.append( ResOperation(rop.COND_CALL_GC_WB_ARRAY, args, None, descr=write_barrier_descr)) + # a WB_ARRAY is not enough to prevent any future write + # barriers, so don't add to 'write_barrier_applied'! return # fall-back case: produce a write_barrier - self.gen_write_barrier(v_base, v_value) + self.gen_write_barrier(v_base) def round_up_for_allocation(self, size): if not self.gc_ll_descr.round_up: diff --git a/rpython/jit/backend/llsupport/test/test_gc.py b/rpython/jit/backend/llsupport/test/test_gc.py --- a/rpython/jit/backend/llsupport/test/test_gc.py +++ b/rpython/jit/backend/llsupport/test/test_gc.py @@ -202,13 +202,11 @@ rewriter = gc.GcRewriterAssembler(gc_ll_descr, None) newops = rewriter.newops v_base = BoxPtr() - v_value = BoxPtr() - rewriter.gen_write_barrier(v_base, v_value) + rewriter.gen_write_barrier(v_base) assert llop1.record == [] assert len(newops) == 1 assert newops[0].getopnum() == rop.COND_CALL_GC_WB assert newops[0].getarg(0) == v_base - assert newops[0].getarg(1) == v_value assert newops[0].result is None wbdescr = newops[0].getdescr() assert is_valid_int(wbdescr.jit_wb_if_flag) diff --git a/rpython/jit/backend/llsupport/test/test_rewrite.py b/rpython/jit/backend/llsupport/test/test_rewrite.py --- a/rpython/jit/backend/llsupport/test/test_rewrite.py +++ b/rpython/jit/backend/llsupport/test/test_rewrite.py @@ -561,8 +561,8 @@ jump() """, """ [p1, p2] - cond_call_gc_wb(p1, p2, descr=wbdescr) - setfield_raw(p1, p2, descr=tzdescr) + cond_call_gc_wb(p1, descr=wbdescr) + setfield_gc(p1, p2, descr=tzdescr) jump() """) @@ -575,8 +575,8 @@ jump() """, """ [p1, i2, p3] - cond_call_gc_wb(p1, p3, descr=wbdescr) - setarrayitem_raw(p1, i2, p3, descr=cdescr) + cond_call_gc_wb(p1, descr=wbdescr) + setarrayitem_gc(p1, i2, p3, descr=cdescr) jump() """) @@ -595,8 +595,8 @@ setfield_gc(p1, 8111, descr=tiddescr) setfield_gc(p1, 129, descr=clendescr) call(123456) - cond_call_gc_wb(p1, p3, descr=wbdescr) - setarrayitem_raw(p1, i2, p3, descr=cdescr) + cond_call_gc_wb(p1, descr=wbdescr) + setarrayitem_gc(p1, i2, p3, descr=cdescr) jump() """) @@ -616,8 +616,8 @@ setfield_gc(p1, 8111, descr=tiddescr) setfield_gc(p1, 130, descr=clendescr) call(123456) - cond_call_gc_wb_array(p1, i2, p3, descr=wbdescr) - setarrayitem_raw(p1, i2, p3, descr=cdescr) + cond_call_gc_wb_array(p1, i2, descr=wbdescr) + setarrayitem_gc(p1, i2, p3, descr=cdescr) jump() """) @@ -628,8 +628,8 @@ jump() """, """ [p1, i2, p3] - cond_call_gc_wb_array(p1, i2, p3, descr=wbdescr) - setarrayitem_raw(p1, i2, p3, descr=cdescr) + cond_call_gc_wb_array(p1, i2, descr=wbdescr) + setarrayitem_gc(p1, i2, p3, descr=cdescr) jump() """) @@ -647,8 +647,8 @@ setfield_gc(p1, 8111, descr=tiddescr) setfield_gc(p1, 5, descr=clendescr) label(p1, i2, p3) - cond_call_gc_wb_array(p1, i2, p3, descr=wbdescr) - setarrayitem_raw(p1, i2, p3, descr=cdescr) + cond_call_gc_wb_array(p1, i2, descr=wbdescr) + setarrayitem_gc(p1, i2, p3, descr=cdescr) jump() """) @@ -666,8 +666,8 @@ jump(p1, p2) """, """ [p1, p2] - cond_call_gc_wb(p1, p2, descr=wbdescr) - setinteriorfield_raw(p1, 0, p2, descr=interiorzdescr) + cond_call_gc_wb(p1, descr=wbdescr) + setinteriorfield_gc(p1, 0, p2, descr=interiorzdescr) jump(p1, p2) """, interiorzdescr=interiorzdescr) @@ -733,8 +733,8 @@ p1 = call_malloc_nursery_varsize(1, 1, i0, \ descr=strdescr) setfield_gc(p1, i0, descr=strlendescr) - cond_call_gc_wb(p0, p1, descr=wbdescr) - setfield_raw(p0, p1, descr=tzdescr) + cond_call_gc_wb(p0, descr=wbdescr) + setfield_gc(p0, p1, descr=tzdescr) jump() """) @@ -750,11 +750,25 @@ p0 = call_malloc_nursery(%(tdescr.size)d) setfield_gc(p0, 5678, descr=tiddescr) label(p0, p1) - cond_call_gc_wb(p0, p1, descr=wbdescr) - setfield_raw(p0, p1, descr=tzdescr) + cond_call_gc_wb(p0, descr=wbdescr) + setfield_gc(p0, p1, descr=tzdescr) jump() """) + def test_multiple_writes(self): + self.check_rewrite(""" + [p0, p1, p2] + setfield_gc(p0, p1, descr=tzdescr) + setfield_gc(p0, p2, descr=tzdescr) + jump(p1, p2, p0) + """, """ + [p0, p1, p2] + cond_call_gc_wb(p0, descr=wbdescr) + setfield_gc(p0, p1, descr=tzdescr) + setfield_gc(p0, p2, descr=tzdescr) + jump(p1, p2, p0) + """) + def test_rewrite_call_assembler(self): self.check_rewrite(""" [i0, f0] diff --git a/rpython/jit/backend/test/runner_test.py b/rpython/jit/backend/test/runner_test.py --- a/rpython/jit/backend/test/runner_test.py +++ b/rpython/jit/backend/test/runner_test.py @@ -2140,11 +2140,9 @@ s = lltype.malloc(S) s.tid = value sgcref = lltype.cast_opaque_ptr(llmemory.GCREF, s) - t = lltype.malloc(S) - tgcref = lltype.cast_opaque_ptr(llmemory.GCREF, t) del record[:] self.execute_operation(rop.COND_CALL_GC_WB, - [BoxPtr(sgcref), ConstPtr(tgcref)], + [BoxPtr(sgcref)], 'void', descr=WriteBarrierDescr()) if cond: assert record == [rffi.cast(lltype.Signed, sgcref)] @@ -2179,7 +2177,7 @@ sgcref = lltype.cast_opaque_ptr(llmemory.GCREF, s) del record[:] self.execute_operation(rop.COND_CALL_GC_WB_ARRAY, - [BoxPtr(sgcref), ConstInt(123), BoxPtr(sgcref)], + [BoxPtr(sgcref), ConstInt(123)], 'void', descr=WriteBarrierDescr()) if cond: assert record == [rffi.cast(lltype.Signed, sgcref)] @@ -2244,7 +2242,7 @@ del record[:] box_index = BoxIndexCls((9<<7) + 17) self.execute_operation(rop.COND_CALL_GC_WB_ARRAY, - [BoxPtr(sgcref), box_index, BoxPtr(sgcref)], + [BoxPtr(sgcref), box_index], 'void', descr=WriteBarrierDescr()) if cond in [0, 1]: assert record == [rffi.cast(lltype.Signed, s.data)] diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py --- a/rpython/jit/metainterp/resoperation.py +++ b/rpython/jit/metainterp/resoperation.py @@ -502,8 +502,8 @@ 'SETFIELD_RAW/2d', 'STRSETITEM/3', 'UNICODESETITEM/3', - 'COND_CALL_GC_WB/2d', # [objptr, newvalue] (for the write barrier) - 'COND_CALL_GC_WB_ARRAY/3d', # [objptr, arrayindex, newvalue] (write barr.) + 'COND_CALL_GC_WB/1d', # [objptr] (for the write barrier) + 'COND_CALL_GC_WB_ARRAY/2d', # [objptr, arrayindex] (write barr. for array) 'DEBUG_MERGE_POINT/*', # debugging only 'JIT_DEBUG/*', # debugging only 'VIRTUAL_REF_FINISH/2', # removed before it's passed to the backend From noreply at buildbot.pypy.org Sun Aug 18 20:51:59 2013 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Sun, 18 Aug 2013 20:51:59 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-2.7.4: Merged default into 2.7.4 Message-ID: <20130818185159.297581C135F@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: stdlib-2.7.4 Changeset: r66190:33fd4e459b77 Date: 2013-08-18 11:50 -0700 http://bitbucket.org/pypy/pypy/changeset/33fd4e459b77/ Log: Merged default into 2.7.4 diff too long, truncating to 2000 out of 74086 lines diff --git a/.hgignore b/.hgignore --- a/.hgignore +++ b/.hgignore @@ -6,6 +6,7 @@ .idea .project .pydevproject +__pycache__ syntax: regexp ^testresult$ diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -2,9 +2,9 @@ ======= Except when otherwise stated (look for LICENSE files in directories or -information at the beginning of each file) all software and -documentation in the 'pypy', 'ctype_configure', 'dotviewer', 'demo', -and 'lib_pypy' directories is licensed as follows: +information at the beginning of each file) all software and documentation in +the 'rpython', 'pypy', 'ctype_configure', 'dotviewer', 'demo', and 'lib_pypy' +directories is licensed as follows: The MIT License @@ -38,176 +38,239 @@ Armin Rigo Maciej Fijalkowski Carl Friedrich Bolz + Antonio Cuni Amaury Forgeot d'Arc - Antonio Cuni Samuele Pedroni + Alex Gaynor Michael Hudson + David Schneider Holger Krekel - Alex Gaynor Christian Tismer Hakan Ardo Benjamin Peterson - David Schneider + Matti Picus + Philip Jenvey + Anders Chrigstrom + Brian Kearns Eric van Riet Paap - Anders Chrigstrom Richard Emslie + Alexander Schremmer + Wim Lavrijsen Dan Villiom Podlaski Christiansen - Alexander Schremmer + Manuel Jacob Lukas Diekmann + Sven Hager + Anders Lehmann Aurelien Campeas - Anders Lehmann + Niklaus Haldimann + Ronan Lamy Camillo Bruni - Niklaus Haldimann - Sven Hager + Laura Creighton + Toon Verwaest Leonardo Santagada - Toon Verwaest Seo Sanghyeon Justin Peel + Ronny Pfannschmidt + David Edelsohn + Anders Hammarquist + Jakub Gustak + Guido Wesdorp Lawrence Oluyede Bartosz Skowron - Jakub Gustak - Guido Wesdorp Daniel Roberts - Laura Creighton + Niko Matsakis Adrien Di Mascio Ludovic Aubry - Niko Matsakis - Wim Lavrijsen - Matti Picus + Alexander Hesse + Jacob Hallen + Romain Guillebert Jason Creighton - Jacob Hallen Alex Martelli - Anders Hammarquist + Michal Bendowski Jan de Mooij + Michael Foord Stephan Diehl - Michael Foord Stefan Schwarzer + Valentino Volonghi Tomek Meka Patrick Maupin + stian Bob Ippolito Bruno Gola + Jean-Paul Calderone + Timo Paulssen Alexandre Fayolle + Simon Burton Marius Gedminas - Simon Burton - David Edelsohn - Jean-Paul Calderone John Witulski - Timo Paulssen - holger krekel + Greg Price Dario Bertini Mark Pearse + Simon Cross + Konstantin Lopuhin Andreas Stührk Jean-Philippe St. Pierre Guido van Rossum Pavel Vinogradov - Valentino Volonghi Paul deGrandis Ilya Osadchiy - Ronny Pfannschmidt Adrian Kuhn + Boris Feigin tav Georg Brandl - Philip Jenvey + Bert Freudenberg + Stian Andreassen + Stefano Rivera + Wanja Saatkamp Gerald Klix - Wanja Saatkamp - Boris Feigin + Mike Blume + Taavi Burns Oscar Nierstrasz David Malcolm Eugene Oden Henry Mason + Preston Timmons Jeff Terrace + David Ripton + Dusty Phillips Lukas Renggli Guenter Jantzen + Tobias Oberstein + Remi Meier Ned Batchelder - Bert Freudenberg Amit Regmi Ben Young Nicolas Chauvat Andrew Durdin Michael Schneider Nicholas Riley + Jason Chu + Igor Trindade Oliveira + Jeremy Thurgood Rocco Moretti Gintautas Miliauskas Michael Twomey - Igor Trindade Oliveira Lucian Branescu Mihaila + Tim Felgentreff + Tyler Wade + Gabriel Lavoie Olivier Dormond Jared Grubb Karl Bartel - Gabriel Lavoie + Brian Dorsey Victor Stinner - Brian Dorsey Stuart Williams + Jasper Schulz Toby Watson Antoine Pitrou + Aaron Iles + Michael Cheng Justas Sadzevicius + Gasper Zejn Neil Shepperd Mikael Schönenberg - Gasper Zejn + Elmo Mäntynen + Tobias Pape Jonathan David Riehl - Elmo Mäntynen + Stanislaw Halik Anders Qvist + Chirag Jadwani Beatrice During + Alex Perry + Vincent Legoll + Alan McIntyre Alexander Sedov Corbin Simpson - Vincent Legoll - Romain Guillebert - Alan McIntyre - Alex Perry + Christopher Pope + Laurence Tratt + Guillebert Romain + Christian Tismer + Dan Stromberg + Stefano Parmesan + Christian Hudon + Alexis Daboville Jens-Uwe Mager - Simon Cross - Dan Stromberg - Guillebert Romain Carl Meyer + Karl Ramm Pieter Zieschang + Gabriel + Paweł Piotr Przeradowski + Andrew Dalke + Sylvain Thenault + Nathan Taylor + Vladimir Kryachko + Jacek Generowicz Alejandro J. Cura - Sylvain Thenault - Christoph Gerum + Jacob Oscarson Travis Francis Athougies + Kristjan Valur Jonsson + Neil Blakey-Milner + Lutz Paelike + Lucio Torre + Lars Wassermann Henrik Vendelbo - Lutz Paelike - Jacob Oscarson - Martin Blais - Lucio Torre - Lene Wagner + Dan Buch Miguel de Val Borro Artur Lisiecki - Bruno Gola + Sergey Kishchenko Ignas Mikalajunas - Stefano Rivera + Christoph Gerum + Martin Blais + Lene Wagner + Tomo Cocoa + Andrews Medina + roberto at goyle + William Leslie + Bobby Impollonia + timo at eistee.fritz.box + Andrew Thompson + Yusei Tahara + Roberto De Ioris + Juan Francisco Cantero Hurtado + Godefroid Chappelle Joshua Gilbert - Godefroid Chappelle - Yusei Tahara + Dan Colish Christopher Armstrong + Michael Hudson-Doyle + Anders Sigfridsson + Yasir Suhail + Floris Bruynooghe + Akira Li + Gustavo Niemeyer Stephan Busemann - Gustavo Niemeyer - William Leslie - Akira Li - Kristjan Valur Jonsson - Bobby Impollonia - Michael Hudson-Doyle - Laurence Tratt - Yasir Suhail - Andrew Thompson - Anders Sigfridsson - Floris Bruynooghe - Jacek Generowicz - Dan Colish - Zooko Wilcox-O Hearn - Dan Loewenherz + Anna Katrina Dominguez + Christian Muirhead + James Lan + shoma hosaka + Daniel Neuhäuser + Buck Golemon + Konrad Delong + Dinu Gherman Chris Lambacher - Dinu Gherman - Brett Cannon - Daniel Neuhäuser - Michael Chermside - Konrad Delong - Anna Ravencroft - Greg Price - Armin Ronacher - Christian Muirhead + coolbutuseless at gmail.com Jim Baker Rodrigo Araújo - Romain Guillebert + Armin Ronacher + Brett Cannon + yrttyr + Zooko Wilcox-O Hearn + Tomer Chachamu + Christopher Groskopf + opassembler.py + Antony Lee + Jim Hunziker + Markus Unterwaditzer + Even Wiik Thomassen + jbs + soareschen + Flavio Percoco + Kristoffer Kleine + yasirs + Michael Chermside + Anna Ravencroft + Andrew Chambers + Julien Phalip + Dan Loewenherz Heinrich-Heine University, Germany Open End AB (formerly AB Strakt), Sweden @@ -219,45 +282,21 @@ Change Maker, Sweden University of California Berkeley, USA Google Inc. + King's College London The PyPy Logo as used by http://speed.pypy.org and others was created by Samuel Reis and is distributed on terms of Creative Commons Share Alike License. -License for 'lib-python/2.7.0' and 'lib-python/2.7.0-modified' -============================================================== +License for 'lib-python/2.7' +============================ -Except when otherwise stated (look for LICENSE files or -copyright/license information at the beginning of each file) the files -in the 'lib-python/2.7.0' and 'lib-python/2.7.0-modified' directories -are all copyrighted by the Python Software Foundation and licensed under -the Python Software License of which you can find a copy here: +Except when otherwise stated (look for LICENSE files or copyright/license +information at the beginning of each file) the files in the 'lib-python/2.7' +directory are all copyrighted by the Python Software Foundation and licensed +under the Python Software License of which you can find a copy here: http://www.python.org/doc/Copyright.html -License for 'pypy/translator/jvm/src/jna.jar' -============================================= - -The file 'pypy/translator/jvm/src/jna.jar' is licensed under the GNU -Lesser General Public License of which you can find a copy here: -http://www.gnu.org/licenses/lgpl.html - -License for 'pypy/translator/jvm/src/jasmin.jar' -================================================ - -The file 'pypy/translator/jvm/src/jasmin.jar' is copyright (c) 1996-2004 Jon Meyer -and distributed with permission. The use of Jasmin by PyPy does not imply -that PyPy is endorsed by Jon Meyer nor any of Jasmin's contributors. Furthermore, -the following disclaimer applies to Jasmin: - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED -WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A -PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR -TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF -ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - License for 'pypy/module/unicodedata/' ====================================== diff --git a/dotviewer/drawgraph.py b/dotviewer/drawgraph.py --- a/dotviewer/drawgraph.py +++ b/dotviewer/drawgraph.py @@ -22,6 +22,7 @@ 'yellow': (255,255,0), } re_nonword=re.compile(r'([^0-9a-zA-Z_.]+)') +re_linewidth=re.compile(r'setlinewidth\((\d+(\.\d*)?|\.\d+)\)') def combine(color1, color2, alpha): r1, g1, b1 = color1 @@ -138,6 +139,13 @@ self.yl = float(yl) rest = rest[3:] self.style, self.color = rest + linematch = re_linewidth.match(self.style) + if linematch: + num = linematch.group(1) + self.linewidth = int(round(float(num))) + self.style = self.style[linematch.end(0):] + else: + self.linewidth = 1 self.highlight = False self.cachedbezierpoints = None self.cachedarrowhead = None @@ -520,8 +528,8 @@ fgcolor = highlight_color(fgcolor) points = [self.map(*xy) for xy in edge.bezierpoints()] - def drawedgebody(points=points, fgcolor=fgcolor): - pygame.draw.lines(self.screen, fgcolor, False, points) + def drawedgebody(points=points, fgcolor=fgcolor, width=edge.linewidth): + pygame.draw.lines(self.screen, fgcolor, False, points, width) edgebodycmd.append(drawedgebody) points = [self.map(*xy) for xy in edge.arrowhead()] diff --git a/dotviewer/graphparse.py b/dotviewer/graphparse.py --- a/dotviewer/graphparse.py +++ b/dotviewer/graphparse.py @@ -152,7 +152,8 @@ try: plaincontent = dot2plain_graphviz(content, contenttype) except PlainParseError, e: - print e - # failed, retry via codespeak - plaincontent = dot2plain_codespeak(content, contenttype) + raise + ##print e + ### failed, retry via codespeak + ##plaincontent = dot2plain_codespeak(content, contenttype) return list(parse_plain(graph_id, plaincontent, links, fixedfont)) diff --git a/dotviewer/test/test_interactive.py b/dotviewer/test/test_interactive.py --- a/dotviewer/test/test_interactive.py +++ b/dotviewer/test/test_interactive.py @@ -34,6 +34,23 @@ } ''' +SOURCE2=r'''digraph f { + a; d; e; f; g; h; i; j; k; l; + a -> d [penwidth=1, style="setlinewidth(1)"]; + d -> e [penwidth=2, style="setlinewidth(2)"]; + e -> f [penwidth=4, style="setlinewidth(4)"]; + f -> g [penwidth=8, style="setlinewidth(8)"]; + g -> h [penwidth=16, style="setlinewidth(16)"]; + h -> i [penwidth=32, style="setlinewidth(32)"]; + i -> j [penwidth=64, style="setlinewidth(64)"]; + j -> k [penwidth=128, style="setlinewidth(128)"]; + k -> l [penwidth=256, style="setlinewidth(256)"]; +}''' + + + + + def setup_module(mod): if not option.pygame: py.test.skip("--pygame not enabled") @@ -161,3 +178,10 @@ page = MyPage(str(dotfile)) page.fixedfont = True graphclient.display_page(page) + +def test_linewidth(): + udir.join("graph2.dot").write(SOURCE2) + from dotviewer import graphpage, graphclient + dotfile = udir.join('graph2.dot') + page = graphpage.DotFileGraphPage(str(dotfile)) + graphclient.display_page(page) diff --git a/lib-python/2.7/distutils/sysconfig_pypy.py b/lib-python/2.7/distutils/sysconfig_pypy.py --- a/lib-python/2.7/distutils/sysconfig_pypy.py +++ b/lib-python/2.7/distutils/sysconfig_pypy.py @@ -12,6 +12,7 @@ import sys import os +import shlex from distutils.errors import DistutilsPlatformError @@ -124,11 +125,19 @@ if compiler.compiler_type == "unix": compiler.compiler_so.extend(['-O2', '-fPIC', '-Wimplicit']) compiler.shared_lib_extension = get_config_var('SO') + if "CPPFLAGS" in os.environ: + cppflags = shlex.split(os.environ["CPPFLAGS"]) + compiler.compiler.extend(cppflags) + compiler.compiler_so.extend(cppflags) + compiler.linker_so.extend(cppflags) if "CFLAGS" in os.environ: - cflags = os.environ["CFLAGS"].split() + cflags = shlex.split(os.environ["CFLAGS"]) compiler.compiler.extend(cflags) compiler.compiler_so.extend(cflags) compiler.linker_so.extend(cflags) + if "LDFLAGS" in os.environ: + ldflags = shlex.split(os.environ["LDFLAGS"]) + compiler.linker_so.extend(ldflags) from sysconfig_cpython import ( diff --git a/lib-python/2.7/json/__init__.py b/lib-python/2.7/json/__init__.py --- a/lib-python/2.7/json/__init__.py +++ b/lib-python/2.7/json/__init__.py @@ -105,6 +105,12 @@ __author__ = 'Bob Ippolito ' +try: + # PyPy speedup, the interface is different than CPython's _json + import _pypyjson +except ImportError: + _pypyjson = None + from .decoder import JSONDecoder from .encoder import JSONEncoder @@ -253,7 +259,6 @@ _default_decoder = JSONDecoder(encoding=None, object_hook=None, object_pairs_hook=None) - def load(fp, encoding=None, cls=None, object_hook=None, parse_float=None, parse_int=None, parse_constant=None, object_pairs_hook=None, **kw): """Deserialize ``fp`` (a ``.read()``-supporting file-like object containing @@ -335,7 +340,10 @@ if (cls is None and encoding is None and object_hook is None and parse_int is None and parse_float is None and parse_constant is None and object_pairs_hook is None and not kw): - return _default_decoder.decode(s) + if _pypyjson and not isinstance(s, unicode): + return _pypyjson.loads(s) + else: + return _default_decoder.decode(s) if cls is None: cls = JSONDecoder if object_hook is not None: diff --git a/lib-python/2.7/socket.py b/lib-python/2.7/socket.py --- a/lib-python/2.7/socket.py +++ b/lib-python/2.7/socket.py @@ -165,6 +165,8 @@ # All _delegate_methods must also be initialized here. send = recv = recv_into = sendto = recvfrom = recvfrom_into = _dummy __getattr__ = _dummy + def _drop(self): + pass # Wrapper around platform socket objects. This implements # a platform-independent dup() functionality. The @@ -179,12 +181,21 @@ def __init__(self, family=AF_INET, type=SOCK_STREAM, proto=0, _sock=None): if _sock is None: _sock = _realsocket(family, type, proto) - elif _type(_sock) is _realsocket: + else: + # PyPy note about refcounting: implemented with _reuse()/_drop() + # on the class '_socket.socket'. Python 3 did it differently + # with a reference counter on this class 'socket._socketobject' + # instead, but it is a less compatible change. + + # Note that a few libraries (like eventlet) poke at the + # private implementation of socket.py, passing custom + # objects to _socketobject(). These libraries need the + # following fix for use on PyPy: the custom objects need + # methods _reuse() and _drop() that maintains an explicit + # reference counter, starting at 0. When it drops back to + # zero, close() must be called. _sock._reuse() - # PyPy note about refcounting: implemented with _reuse()/_drop() - # on the class '_socket.socket'. Python 3 did it differently - # with a reference counter on this class 'socket._socketobject' - # instead, but it is a less compatible change (breaks eventlet). + self._sock = _sock def send(self, data, flags=0): @@ -216,9 +227,8 @@ def close(self): s = self._sock - if type(s) is _realsocket: - s._drop() self._sock = _closedsocket() + s._drop() close.__doc__ = _realsocket.close.__doc__ def accept(self): @@ -280,8 +290,14 @@ "_close"] def __init__(self, sock, mode='rb', bufsize=-1, close=False): - if type(sock) is _realsocket: - sock._reuse() + # Note that a few libraries (like eventlet) poke at the + # private implementation of socket.py, passing custom + # objects to _fileobject(). These libraries need the + # following fix for use on PyPy: the custom objects need + # methods _reuse() and _drop() that maintains an explicit + # reference counter, starting at 0. When it drops back to + # zero, close() must be called. + sock._reuse() self._sock = sock self.mode = mode # Not actually used in this version if bufsize < 0: @@ -317,11 +333,11 @@ self.flush() finally: s = self._sock - if type(s) is _realsocket: + self._sock = None + if s is not None: s._drop() - if self._close: - self._sock.close() - self._sock = None + if self._close: + s.close() def __del__(self): try: diff --git a/lib-python/2.7/ssl.py b/lib-python/2.7/ssl.py --- a/lib-python/2.7/ssl.py +++ b/lib-python/2.7/ssl.py @@ -110,6 +110,12 @@ suppress_ragged_eofs=True, ciphers=None): socket.__init__(self, _sock=sock._sock) + # "close" the original socket: it is not usable any more. + # this only calls _drop(), which should not actually call + # the operating system's close() because the reference + # counter is greater than 1 (we hold one too). + sock.close() + if ciphers is None and ssl_version != _SSLv2_IF_EXISTS: ciphers = _DEFAULT_CIPHERS @@ -354,11 +360,19 @@ works with the SSL connection. Just use the code from the socket module.""" - self._makefile_refs += 1 # close=True so as to decrement the reference count when done with # the file-like object. return _fileobject(self, mode, bufsize, close=True) + def _reuse(self): + self._makefile_refs += 1 + + def _drop(self): + if self._makefile_refs < 1: + self.close() + else: + self._makefile_refs -= 1 + def wrap_socket(sock, keyfile=None, certfile=None, diff --git a/lib-python/2.7/test/test_os.py b/lib-python/2.7/test/test_os.py --- a/lib-python/2.7/test/test_os.py +++ b/lib-python/2.7/test/test_os.py @@ -275,7 +275,7 @@ try: result.f_bfree = 1 self.fail("No exception raised") - except TypeError: + except (TypeError, AttributeError): pass try: diff --git a/lib-python/2.7/test/test_socket.py b/lib-python/2.7/test/test_socket.py --- a/lib-python/2.7/test/test_socket.py +++ b/lib-python/2.7/test/test_socket.py @@ -1086,6 +1086,9 @@ def recv(self, size): return self._recv_step.next()() + def _reuse(self): pass + def _drop(self): pass + @staticmethod def _raise_eintr(): raise socket.error(errno.EINTR) @@ -1418,7 +1421,8 @@ closed = False def flush(self): pass def close(self): self.closed = True - def _decref_socketios(self): pass + def _reuse(self): pass + def _drop(self): pass # must not close unless we request it: the original use of _fileobject # by module socket requires that the underlying socket not be closed until diff --git a/lib-python/2.7/test/test_urllib2.py b/lib-python/2.7/test/test_urllib2.py --- a/lib-python/2.7/test/test_urllib2.py +++ b/lib-python/2.7/test/test_urllib2.py @@ -270,6 +270,8 @@ self.reason = reason def read(self): return '' + def _reuse(self): pass + def _drop(self): pass class MockHTTPClass: def __init__(self): diff --git a/lib-python/2.7/urllib2.py b/lib-python/2.7/urllib2.py --- a/lib-python/2.7/urllib2.py +++ b/lib-python/2.7/urllib2.py @@ -1200,6 +1200,8 @@ # out of socket._fileobject() and into a base class. r.recv = r.read + r._reuse = lambda: None + r._drop = lambda: None fp = socket._fileobject(r, close=True) resp = addinfourl(fp, r.msg, req.get_full_url()) diff --git a/lib_pypy/_curses.py b/lib_pypy/_curses.py --- a/lib_pypy/_curses.py +++ b/lib_pypy/_curses.py @@ -966,7 +966,7 @@ r, g, b = ffi.new("short *"), ffi.new("short *"), ffi.new("short *") if lib.color_content(color, r, g, b) == lib.ERR: raise error("Argument 1 was out of range. Check value of COLORS.") - return (r, g, b) + return (r[0], g[0], b[0]) def color_pair(n): @@ -1121,6 +1121,7 @@ term = ffi.NULL err = ffi.new("int *") if lib.setupterm(term, fd, err) == lib.ERR: + err = err[0] if err == 0: raise error("setupterm: could not find terminal") elif err == -1: diff --git a/lib_pypy/_pypy_irc_topic.py b/lib_pypy/_pypy_irc_topic.py --- a/lib_pypy/_pypy_irc_topic.py +++ b/lib_pypy/_pypy_irc_topic.py @@ -165,6 +165,63 @@ Nyy rkprcgoybpxf frrz fnar. N cvax tyvggrel ebgngvat ynzoqn "vg'f yvxryl grzcbenel hagvy sberire" nevtb +"Lbh xabj jung'f avpr nobhg EClguba? Ybatre fjbeq svtugf." +nccneragyl pbashfvba vf n srngher +nccneragyl pbashfvba vf n srngher... be vf vg? +ClCl 1.7 eryrnfrq +vs lbh jnag gb or penml, lbh qba'g unir gb sbepr vg +vs lbh jnag vg gb or iveghny, lbh fubhyq abg sbepr vg + svwny: V whfg... fgnegrq pbqvat naq fhqqragyl... nobzvangvba +fabj, fabj! :-) +fabj, fabj, fabj, fabj +clcl 1.8 eryrnfrq +vg jnf srj lnxf gbb yngr +ClCl vf n enpr orgjrra hf funivat lnxf, naq gur havirefr gelvat gb cebqhpr zber naq zber lnxf +Jevgvat na SC7 nabalzbhf cebcbfny sbe ClCl vf yvxr znxvat n gi nq sbe n uvtu cresbeznapr fcbegf pne jvgubhg orvat noyr gb zragvba vgf zbqry be znahsnpghere +Fabj, fabj (ntnva) +Fgvyy fabjvat +thrff jung, fabj +"lbh haqrerfgvzngr gur vzcbegnapr bs zvpebnepuvgrpgher" "ab, vg'f zber gung jr ner fgvyy unccvyl va gur ynaq bs ernpunoyr sehvg" +Jub nz V? Naq vs lrf, ubj znal? +ClCl vf nyjnlf n cynfzn +"genafyngvba gbbypunva" = "EClgure"? gb jevgr vagrEClguref va +"sberire" va clcl grezf zrnaf yvxr srj zbaguf :) +"Onu. PClguba bofphevgl. - Nezva Evtb" +svwny: pna V vavgvngr lbh ba gur (rnfl ohg) aba-gevivny gbcvp bs jevgvat P shapgvba glcrf? :-) +nyy fbsgjner vzcebirzragf unccra ol n ovg +gur genprf qba'g yvr +:-) be engure ":-/" bss-ol-bar-xrl reebe +Be Nezva Evtb. V guvax ur'f noyr gb haqrefgnaq k86 gur jnl Plcure pna frr jbzra va gur zngevk. +V zvtug, ohg abobql erfcrpgf zr +cerohvyg vafgnapr Ryyvcfvf unf ab nggevohgr 'reeab' +guvf frnfba'f svefg "fabj! fabj!" vf urer +ClCl 2.0 orgn1 eryrnfrq - orggre yngr guna arire +Fjvgreynaq 2012: zber fabj va Qrprzore guna rire fvapr clcl fgnegrq +Fjvgmreynaq 2012: zber fabj va Qrprzore guna rire fvapr clcl fgnegrq + n sngny reebe, ol qrsvavgvba, vf sngny + V'z tynq gung jr cebtenz va Clguba naq jr qba'g qrny jvgu gubfr vffhrf rirel qnl. Ncneg gur snpg gung jr unir gb qrny jvgu gurz naljnl, vg frrzf +unccl arj lrne! +"zrffl" vf abg n whqtrzrag, ohg whfg n snpg bs pbzcyvpngrqarff +n ybg bs fabj +qb lbh xabj nobhg n gbnfgre jvgu 8XO bs ENZ naq 64XO bs EBZ? +vg'f orra fabjvat rirelqnl sbe n juvyr, V pna'g whfg chg "fabj, fabj" hc urer rirel qnl +fabjonyy svtugf! +sbejneq pbzcngvovyvgl jvgu bcgvzvmngvbaf gung unira'g orra vairagrq lrg +jr fgvyy unir gb jevgr fbsgjner jvgu n zrgnfcnpr ohooyr va vg +cebonoyl gur ynfg gvzr va gur frnfba, ohg: fabj, fabj! +ClCl 2.0-orgn2 eryrnfrq +Gur ceboyrz vf gung sbe nyzbfg nal aba-gevivny cebtenz, vg'f abg pyrne jung 'pbeerpg' zrnaf. +ClCl 2.0 nyzbfg eryrnfrq +ClCl 2.0 eryrnfrq +WVG pbzcvyref fubhyq or jevggra ol crbcyr jub npghnyyl unir snvgu va WVG pbzcvyref' novyvgl gb znxrf guvatf tb fpernzvat snfg +ClCl 2.0.1 eryrnfrq +arire haqrerfgvzngr gur vzcebonoyr jura lbh qb fbzrguvat ng 2TUm +ClCl 2.0.2 eryrnfrq +nyy jr arrq vf n angvir Cebybt znpuvar +V haqrefgnaq ubj qravnyvfz vf n onq qrohttvat grpuavdhr +rirel IZ fubhyq pbzr jvgu arheny argjbex genvarq gb erpbtavmr zvpeborapuznexf naq enaqbzyl syhpghngr gurz +/-9000% +lbh qvq abg nccebnpu clcl sebz gur rnfl raq: fgz, gura wvg. vg'f n ovg gur Abegu snpr +va ClCl orvat bayl zbqrengryl zntvp vf n tbbq guvat """ from string import ascii_uppercase, ascii_lowercase diff --git a/lib_pypy/_sqlite3.py b/lib_pypy/_sqlite3.py --- a/lib_pypy/_sqlite3.py +++ b/lib_pypy/_sqlite3.py @@ -1314,7 +1314,7 @@ for i in xrange(_lib.sqlite3_column_count(self._statement)): name = _lib.sqlite3_column_name(self._statement, i) if name: - name = _ffi.string(name).decode('utf-8').split("[")[0].strip() + name = _ffi.string(name).split("[")[0].strip() desc.append((name, None, None, None, None, None, None)) return desc diff --git a/lib_pypy/cffi.egg-info b/lib_pypy/cffi.egg-info --- a/lib_pypy/cffi.egg-info +++ b/lib_pypy/cffi.egg-info @@ -1,6 +1,6 @@ Metadata-Version: 1.0 Name: cffi -Version: 0.6 +Version: 0.7 Summary: Foreign Function Interface for Python calling C code. Home-page: http://cffi.readthedocs.org Author: Armin Rigo, Maciej Fijalkowski diff --git a/lib_pypy/ctypes_config_cache/syslog.ctc.py b/lib_pypy/ctypes_config_cache/syslog.ctc.py deleted file mode 100644 --- a/lib_pypy/ctypes_config_cache/syslog.ctc.py +++ /dev/null @@ -1,75 +0,0 @@ -""" -'ctypes_configure' source for syslog.py. -Run this to rebuild _syslog_cache.py. -""" - -from ctypes_configure.configure import (configure, - ExternalCompilationInfo, ConstantInteger, DefinedConstantInteger) -import dumpcache - - -_CONSTANTS = ( - 'LOG_EMERG', - 'LOG_ALERT', - 'LOG_CRIT', - 'LOG_ERR', - 'LOG_WARNING', - 'LOG_NOTICE', - 'LOG_INFO', - 'LOG_DEBUG', - - 'LOG_PID', - 'LOG_CONS', - 'LOG_NDELAY', - - 'LOG_KERN', - 'LOG_USER', - 'LOG_MAIL', - 'LOG_DAEMON', - 'LOG_AUTH', - 'LOG_LPR', - 'LOG_LOCAL0', - 'LOG_LOCAL1', - 'LOG_LOCAL2', - 'LOG_LOCAL3', - 'LOG_LOCAL4', - 'LOG_LOCAL5', - 'LOG_LOCAL6', - 'LOG_LOCAL7', -) -_OPTIONAL_CONSTANTS = ( - 'LOG_NOWAIT', - 'LOG_PERROR', - - 'LOG_SYSLOG', - 'LOG_CRON', - 'LOG_UUCP', - 'LOG_NEWS', -) - -# Constant aliases if there are not defined -_ALIAS = ( - ('LOG_SYSLOG', 'LOG_DAEMON'), - ('LOG_CRON', 'LOG_DAEMON'), - ('LOG_NEWS', 'LOG_MAIL'), - ('LOG_UUCP', 'LOG_MAIL'), -) - -class SyslogConfigure: - _compilation_info_ = ExternalCompilationInfo(includes=['sys/syslog.h']) -for key in _CONSTANTS: - setattr(SyslogConfigure, key, ConstantInteger(key)) -for key in _OPTIONAL_CONSTANTS: - setattr(SyslogConfigure, key, DefinedConstantInteger(key)) - -config = configure(SyslogConfigure) -for key in _OPTIONAL_CONSTANTS: - if config[key] is None: - del config[key] -for alias, key in _ALIAS: - config.setdefault(alias, config[key]) - -all_constants = config.keys() -all_constants.sort() -config['ALL_CONSTANTS'] = tuple(all_constants) -dumpcache.dumpcache2('syslog', config) diff --git a/lib_pypy/greenlet.py b/lib_pypy/greenlet.py --- a/lib_pypy/greenlet.py +++ b/lib_pypy/greenlet.py @@ -1,3 +1,4 @@ +import sys import _continuation __version__ = "0.4.0" @@ -5,7 +6,7 @@ # ____________________________________________________________ # Exceptions -class GreenletExit(Exception): +class GreenletExit(BaseException): """This special exception does not propagate to the parent greenlet; it can be used to kill a single greenlet.""" @@ -46,16 +47,16 @@ if parent is not None: self.parent = parent - def switch(self, *args): + def switch(self, *args, **kwds): "Switch execution to this greenlet, optionally passing the values " "given as argument(s). Returns the value passed when switching back." - return self.__switch('switch', args) + return self.__switch('switch', (args, kwds)) def throw(self, typ=GreenletExit, val=None, tb=None): "raise exception in greenlet, return value passed when switching back" return self.__switch('throw', typ, val, tb) - def __switch(target, methodname, *args): + def __switch(target, methodname, *baseargs): current = getcurrent() # while not (target.__main or _continulet.is_pending(target)): @@ -65,9 +66,9 @@ greenlet_func = _greenlet_start else: greenlet_func = _greenlet_throw - _continulet.__init__(target, greenlet_func, *args) + _continulet.__init__(target, greenlet_func, *baseargs) methodname = 'switch' - args = () + baseargs = () target.__started = True break # already done, go to the parent instead @@ -75,14 +76,27 @@ # up the 'parent' explicitly. Good enough, because a Ctrl-C # will show that the program is caught in this loop here.) target = target.parent + # convert a "raise GreenletExit" into "return GreenletExit" + if methodname == 'throw': + try: + raise baseargs[0], baseargs[1] + except GreenletExit, e: + methodname = 'switch' + baseargs = (((e,), {}),) + except: + baseargs = sys.exc_info()[:2] + baseargs[2:] # try: unbound_method = getattr(_continulet, methodname) - args = unbound_method(current, *args, to=target) + args, kwds = unbound_method(current, *baseargs, to=target) finally: _tls.current = current # - if len(args) == 1: + if kwds: + if args: + return args, kwds + return kwds + elif len(args) == 1: return args[0] else: return args @@ -129,18 +143,22 @@ _tls.current = gmain def _greenlet_start(greenlet, args): + args, kwds = args _tls.current = greenlet try: - res = greenlet.run(*args) + res = greenlet.run(*args, **kwds) except GreenletExit, e: res = e finally: _continuation.permute(greenlet, greenlet.parent) - return (res,) + return ((res,), None) def _greenlet_throw(greenlet, exc, value, tb): _tls.current = greenlet try: raise exc, value, tb + except GreenletExit, e: + res = e finally: _continuation.permute(greenlet, greenlet.parent) + return ((res,), None) diff --git a/lib_pypy/grp.py b/lib_pypy/grp.py --- a/lib_pypy/grp.py +++ b/lib_pypy/grp.py @@ -8,6 +8,7 @@ from ctypes import Structure, c_char_p, c_int, POINTER from ctypes_support import standard_c_lib as libc +import _structseq try: from __pypy__ import builtinify except ImportError: builtinify = lambda f: f @@ -23,32 +24,13 @@ ('gr_mem', POINTER(c_char_p)), ) -class Group(object): - def __init__(self, gr_name, gr_passwd, gr_gid, gr_mem): - self.gr_name = gr_name - self.gr_passwd = gr_passwd - self.gr_gid = gr_gid - self.gr_mem = gr_mem +class struct_group: + __metaclass__ = _structseq.structseqtype - def __getitem__(self, item): - if item == 0: - return self.gr_name - elif item == 1: - return self.gr_passwd - elif item == 2: - return self.gr_gid - elif item == 3: - return self.gr_mem - else: - raise IndexError(item) - - def __len__(self): - return 4 - - def __repr__(self): - return str((self.gr_name, self.gr_passwd, self.gr_gid, self.gr_mem)) - - # whatever else... + gr_name = _structseq.structseqfield(0) + gr_passwd = _structseq.structseqfield(1) + gr_gid = _structseq.structseqfield(2) + gr_mem = _structseq.structseqfield(3) libc.getgrgid.argtypes = [gid_t] libc.getgrgid.restype = POINTER(GroupStruct) @@ -71,8 +53,8 @@ while res.contents.gr_mem[i]: mem.append(res.contents.gr_mem[i]) i += 1 - return Group(res.contents.gr_name, res.contents.gr_passwd, - res.contents.gr_gid, mem) + return struct_group((res.contents.gr_name, res.contents.gr_passwd, + res.contents.gr_gid, mem)) @builtinify def getgrgid(gid): diff --git a/lib_pypy/pyrepl/curses.py b/lib_pypy/pyrepl/curses.py --- a/lib_pypy/pyrepl/curses.py +++ b/lib_pypy/pyrepl/curses.py @@ -19,11 +19,15 @@ # CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN # CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -# avoid importing the whole curses, if possible -try: +# If we are running on top of pypy, we import only _minimal_curses. +# Don't try to fall back to _curses, because that's going to use cffi +# and fall again more loudly. +import sys +if '__pypy__' in sys.builtin_module_names: # pypy case import _minimal_curses as _curses -except ImportError: +else: + # cpython case try: import _curses except ImportError: diff --git a/lib_pypy/readline.egg-info b/lib_pypy/readline.egg-info new file mode 100644 --- /dev/null +++ b/lib_pypy/readline.egg-info @@ -0,0 +1,10 @@ +Metadata-Version: 1.0 +Name: readline +Version: 6.2.4.1 +Summary: Hack to make "pip install readline" happy and do nothing +Home-page: UNKNOWN +Author: UNKNOWN +Author-email: UNKNOWN +License: UNKNOWN +Description: UNKNOWN +Platform: UNKNOWN diff --git a/lib_pypy/syslog.py b/lib_pypy/syslog.py --- a/lib_pypy/syslog.py +++ b/lib_pypy/syslog.py @@ -1,3 +1,4 @@ +# this cffi version was rewritten based on the # ctypes implementation: Victor Stinner, 2008-05-08 """ This module provides an interface to the Unix syslog library routines. @@ -9,34 +10,84 @@ if sys.platform == 'win32': raise ImportError("No syslog on Windows") -# load the platform-specific cache made by running syslog.ctc.py -from ctypes_config_cache._syslog_cache import * - -from ctypes_support import standard_c_lib as libc -from ctypes import c_int, c_char_p +from cffi import FFI try: from __pypy__ import builtinify except ImportError: builtinify = lambda f: f +ffi = FFI() -# Real prototype is: -# void syslog(int priority, const char *format, ...); -# But we also need format ("%s") and one format argument (message) -_syslog = libc.syslog -_syslog.argtypes = (c_int, c_char_p, c_char_p) -_syslog.restype = None +ffi.cdef(""" +/* mandatory constants */ +#define LOG_EMERG ... +#define LOG_ALERT ... +#define LOG_CRIT ... +#define LOG_ERR ... +#define LOG_WARNING ... +#define LOG_NOTICE ... +#define LOG_INFO ... +#define LOG_DEBUG ... -_openlog = libc.openlog -_openlog.argtypes = (c_char_p, c_int, c_int) -_openlog.restype = None +#define LOG_PID ... +#define LOG_CONS ... +#define LOG_NDELAY ... -_closelog = libc.closelog -_closelog.argtypes = None -_closelog.restype = None +#define LOG_KERN ... +#define LOG_USER ... +#define LOG_MAIL ... +#define LOG_DAEMON ... +#define LOG_AUTH ... +#define LOG_LPR ... +#define LOG_LOCAL0 ... +#define LOG_LOCAL1 ... +#define LOG_LOCAL2 ... +#define LOG_LOCAL3 ... +#define LOG_LOCAL4 ... +#define LOG_LOCAL5 ... +#define LOG_LOCAL6 ... +#define LOG_LOCAL7 ... -_setlogmask = libc.setlogmask -_setlogmask.argtypes = (c_int,) -_setlogmask.restype = c_int +/* optional constants, gets defined to -919919 if missing */ +#define LOG_NOWAIT ... +#define LOG_PERROR ... + +/* aliased constants, gets defined as some other constant if missing */ +#define LOG_SYSLOG ... +#define LOG_CRON ... +#define LOG_UUCP ... +#define LOG_NEWS ... + +/* functions */ +void openlog(const char *ident, int option, int facility); +void syslog(int priority, const char *format, const char *string); +// NB. the signature of syslog() is specialized to the only case we use +void closelog(void); +int setlogmask(int mask); +""") + +lib = ffi.verify(""" +#include + +#ifndef LOG_NOWAIT +#define LOG_NOWAIT -919919 +#endif +#ifndef LOG_PERROR +#define LOG_PERROR -919919 +#endif +#ifndef LOG_SYSLOG +#define LOG_SYSLOG LOG_DAEMON +#endif +#ifndef LOG_CRON +#define LOG_CRON LOG_DAEMON +#endif +#ifndef LOG_UUCP +#define LOG_UUCP LOG_MAIL +#endif +#ifndef LOG_NEWS +#define LOG_NEWS LOG_MAIL +#endif +""") + _S_log_open = False _S_ident_o = None @@ -52,12 +103,17 @@ return None @builtinify -def openlog(ident=None, logoption=0, facility=LOG_USER): +def openlog(ident=None, logoption=0, facility=lib.LOG_USER): global _S_ident_o, _S_log_open if ident is None: ident = _get_argv() - _S_ident_o = c_char_p(ident) # keepalive - _openlog(_S_ident_o, logoption, facility) + if ident is None: + _S_ident_o = ffi.NULL + elif isinstance(ident, str): + _S_ident_o = ffi.new("char[]", ident) # keepalive + else: + raise TypeError("'ident' must be a string or None") + lib.openlog(_S_ident_o, logoption, facility) _S_log_open = True @builtinify @@ -69,19 +125,19 @@ # if log is not opened, open it now if not _S_log_open: openlog() - _syslog(priority, "%s", message) + lib.syslog(priority, "%s", message) @builtinify def closelog(): global _S_log_open, S_ident_o if _S_log_open: - _closelog() + lib.closelog() _S_log_open = False _S_ident_o = None @builtinify def setlogmask(mask): - return _setlogmask(mask) + return lib.setlogmask(mask) @builtinify def LOG_MASK(pri): @@ -91,8 +147,15 @@ def LOG_UPTO(pri): return (1 << (pri + 1)) - 1 -__all__ = ALL_CONSTANTS + ( +__all__ = [] + +for name in sorted(lib.__dict__): + if name.startswith('LOG_'): + value = getattr(lib, name) + if value != -919919: + globals()[name] = value + __all__.append(name) + +__all__ = tuple(__all__) + ( 'openlog', 'syslog', 'closelog', 'setlogmask', 'LOG_MASK', 'LOG_UPTO') - -del ALL_CONSTANTS diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -35,7 +35,7 @@ "thread", "itertools", "pyexpat", "_ssl", "cpyext", "array", "binascii", "_multiprocessing", '_warnings', "_collections", "_multibytecodec", "micronumpy", "_ffi", - "_continuation", "_cffi_backend", "_csv", "cppyy"] + "_continuation", "_cffi_backend", "_csv", "cppyy", "_pypyjson"] )) translation_modules = default_modules.copy() @@ -48,11 +48,6 @@ "termios", "_minimal_curses", ])) -working_oo_modules = default_modules.copy() -working_oo_modules.update(dict.fromkeys( - ["_md5", "_sha", "cStringIO", "itertools"] -)) - # XXX this should move somewhere else, maybe to platform ("is this posixish" # check or something) if sys.platform == "win32": @@ -144,7 +139,7 @@ requires=module_dependencies.get(modname, []), suggests=module_suggests.get(modname, []), negation=modname not in essential_modules, - validator=get_module_validator(modname)) + ) #validator=get_module_validator(modname)) for modname in all_modules]), BoolOption("allworkingmodules", "use as many working modules as possible", @@ -340,10 +335,6 @@ if not IS_64_BITS: config.objspace.std.suggest(withsmalllong=True) - # some optimizations have different effects depending on the typesystem - if type_system == 'ootype': - config.objspace.std.suggest(multimethods="doubledispatch") - # extra optimizations with the JIT if level == 'jit': config.objspace.std.suggest(withcelldict=True) @@ -351,10 +342,7 @@ def enable_allworkingmodules(config): - if config.translation.type_system == 'ootype': - modules = working_oo_modules - else: - modules = working_modules + modules = working_modules if config.translation.sandbox: modules = default_modules # ignore names from 'essential_modules', notably 'exceptions', which diff --git a/pypy/doc/_ref.txt b/pypy/doc/_ref.txt --- a/pypy/doc/_ref.txt +++ b/pypy/doc/_ref.txt @@ -98,8 +98,6 @@ .. _`rpython/rtyper/memory/gc/hybrid.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/memory/gc/hybrid.py .. _`rpython/rtyper/memory/gc/minimarkpage.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/memory/gc/minimarkpage.py .. _`rpython/rtyper/memory/gc/semispace.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/memory/gc/semispace.py -.. _`rpython/rtyper/ootypesystem/`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/ootypesystem/ -.. _`rpython/rtyper/ootypesystem/ootype.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/ootypesystem/ootype.py .. _`rpython/rtyper/rint.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/rint.py .. _`rpython/rtyper/rlist.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/rlist.py .. _`rpython/rtyper/rmodel.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/rmodel.py diff --git a/pypy/doc/cli-backend.rst b/pypy/doc/cli-backend.rst deleted file mode 100644 --- a/pypy/doc/cli-backend.rst +++ /dev/null @@ -1,455 +0,0 @@ -=============== -The CLI backend -=============== - -The goal of GenCLI is to compile RPython programs to the CLI virtual -machine. - - -Target environment and language -=============================== - -The target of GenCLI is the Common Language Infrastructure environment -as defined by the `Standard Ecma 335`_. - -While in an ideal world we might suppose GenCLI to run fine with -every implementation conforming to that standard, we know the world we -live in is far from ideal, so extra efforts can be needed to maintain -compatibility with more than one implementation. - -At the moment of writing the two most popular implementations of the -standard are supported: Microsoft Common Language Runtime (CLR) and -Mono. - -Then we have to choose how to generate the real executables. There are -two main alternatives: generating source files in some high level -language (such as C#) or generating assembly level code in -Intermediate Language (IL). - -The IL approach is much faster during the code generation -phase, because it doesn't need to call a compiler. By contrast the -high level approach has two main advantages: - - - the code generation part could be easier because the target - language supports high level control structures such as - structured loops; - - - the generated executables take advantage of compiler's - optimizations. - -In reality the first point is not an advantage in the PyPy context, -because the `flow graph`_ we start from is quite low level and Python -loops are already expressed in terms of branches (i.e., gotos). - -About the compiler optimizations we must remember that the flow graph -we receive from earlier stages is already optimized: PyPy implements -a number of optimizations such a constant propagation and -dead code removal, so it's not obvious if the compiler could -do more. - -Moreover by emitting IL instruction we are not constrained to rely on -compiler choices but can directly choose how to map CLI opcodes: since -the backend often know more than the compiler about the context, we -might expect to produce more efficient code by selecting the most -appropriate instruction; e.g., we can check for arithmetic overflow -only when strictly necessary. - -The last but not least reason for choosing the low level approach is -flexibility in how to get an executable starting from the IL code we -generate: - - - write IL code to a file, then call the ilasm assembler; - - - directly generate code on the fly by accessing the facilities - exposed by the System.Reflection.Emit API. - - -Handling platform differences -============================= - -Since our goal is to support both Microsoft CLR we have to handle the -differences between the twos; in particular the main differences are -in the name of the helper tools we need to call: - -=============== ======== ====== -Tool CLR Mono -=============== ======== ====== -IL assembler ilasm ilasm2 -C# compiler csc gmcs -Runtime ... mono -=============== ======== ====== - -The code that handles these differences is located in the sdk.py -module: it defines an abstract class which exposes some methods -returning the name of the helpers and one subclass for each of the two -supported platforms. - -Since Microsoft ``ilasm`` is not capable of compiling the PyPy -standard interpreter due to its size, on Windows machines we also look -for an existing Mono installation: if present, we use CLR for -everything except the assembling phase, for which we use Mono's -``ilasm2``. - - -Targeting the CLI Virtual Machine -================================= - -In order to write a CLI backend we have to take a number of decisions. -First, we have to choose the typesystem to use: given that CLI -natively supports primitives like classes and instances, -ootypesystem is the most natural choice. - -Once the typesystem has been chosen there is a number of steps we have -to do for completing the backend: - - - map ootypesystem's types to CLI Common Type System's - types; - - - map ootypesystem's low level operation to CLI instructions; - - - map Python exceptions to CLI exceptions; - - - write a code generator that translates a flow graph - into a list of CLI instructions; - - - write a class generator that translates ootypesystem - classes into CLI classes. - - -Mapping primitive types ------------------------ - -The `rtyper`_ give us a flow graph annotated with types belonging to -ootypesystem: in order to produce CLI code we need to translate these -types into their Common Type System equivalents. - -For numeric types the conversion is straightforward, since -there is a one-to-one mapping between the two typesystems, so that -e.g. Float maps to float64. - -For character types the choice is more difficult: RPython has two -distinct types for plain ASCII and Unicode characters (named UniChar), -while .NET only supports Unicode with the char type. There are at -least two ways to map plain Char to CTS: - - - map UniChar to char, thus maintaining the original distinction - between the two types: this has the advantage of being a - one-to-one translation, but has the disadvantage that RPython - strings will not be recognized as .NET strings, since they only - would be sequences of bytes; - - - map both char, so that Python strings will be treated as strings - also by .NET: in this case there could be problems with existing - Python modules that use strings as sequences of byte, such as the - built-in struct module, so we need to pay special attention. - -We think that mapping Python strings to .NET strings is -fundamental, so we chose the second option. - -Mapping built-in types ----------------------- - -As we saw in section ootypesystem defines a set of types that take -advantage of built-in types offered by the platform. - -For the sake of simplicity we decided to write wrappers -around .NET classes in order to match the signatures required by -pypylib.dll: - -=================== =========================================== -ootype CLI -=================== =========================================== -String System.String -StringBuilder System.Text.StringBuilder -List System.Collections.Generic.List -Dict System.Collections.Generic.Dictionary -CustomDict pypy.runtime.Dict -DictItemsIterator pypy.runtime.DictItemsIterator -=================== =========================================== - -Wrappers exploit inheritance for wrapping the original classes, so, -for example, pypy.runtime.List is a subclass of -System.Collections.Generic.List that provides methods whose names -match those found in the _GENERIC_METHODS of ootype.List - -The only exception to this rule is the String class, which is not -wrapped since in .NET we can not subclass System.String. Instead, we -provide a bunch of static methods in pypylib.dll that implement the -methods declared by ootype.String._GENERIC_METHODS, then we call them -by explicitly passing the string object in the argument list. - - -Mapping instructions --------------------- - -PyPy's low level operations are expressed in Static Single Information -(SSI) form, such as this:: - - v2 = int_add(v0, v1) - -By contrast the CLI virtual machine is stack based, which means the -each operation pops its arguments from the top of the stacks and -pushes its result there. The most straightforward way to translate SSI -operations into stack based operations is to explicitly load the -arguments and store the result into the appropriate places:: - - LOAD v0 - LOAD v1 - int_add - STORE v2 - -The code produced works correctly but has some inefficiency issues that -can be addressed during the optimization phase. - -The CLI Virtual Machine is fairly expressive, so the conversion -between PyPy's low level operations and CLI instruction is relatively -simple: many operations maps directly to the corresponding -instruction, e.g int_add and sub. - -By contrast some instructions do not have a direct correspondent and -have to be rendered as a sequence of CLI instructions: this is the -case of the "less-equal" and "greater-equal" family of instructions, -that are rendered as "greater" or "less" followed by a boolean "not", -respectively. - -Finally, there are some instructions that cannot be rendered directly -without increasing the complexity of the code generator, such as -int_abs (which returns the absolute value of its argument). These -operations are translated by calling some helper function written in -C#. - -The code that implements the mapping is in the modules opcodes.py. - -Mapping exceptions ------------------- - -Both RPython and CLI have their own set of exception classes: some of -these are pretty similar; e.g., we have OverflowError, -ZeroDivisionError and IndexError on the first side and -OverflowException, DivideByZeroException and IndexOutOfRangeException -on the other side. - -The first attempt was to map RPython classes to their corresponding -CLI ones: this worked for simple cases, but it would have triggered -subtle bugs in more complex ones, because the two exception -hierarchies don't completely overlap. - -At the moment we've chosen to build an RPython exception hierarchy -completely independent from the CLI one, but this means that we can't -rely on exceptions raised by built-in operations. The currently -implemented solution is to do an exception translation on-the-fly. - -As an example consider the RPython int_add_ovf operation, that sums -two integers and raises an OverflowError exception in case of -overflow. For implementing it we can use the built-in add.ovf CLI -instruction that raises System.OverflowException when the result -overflows, catch that exception and throw a new one:: - - .try - { - ldarg 'x_0' - ldarg 'y_0' - add.ovf - stloc 'v1' - leave __check_block_2 - } - catch [mscorlib]System.OverflowException - { - newobj instance void class OverflowError::.ctor() - throw - } - - -Translating flow graphs ------------------------ - -As we saw previously in PyPy function and method bodies are -represented by flow graphs that we need to translate CLI IL code. Flow -graphs are expressed in a format that is very suitable for being -translated to low level code, so that phase is quite straightforward, -though the code is a bit involved because we need to take care of three -different types of blocks. - -The code doing this work is located in the Function.render -method in the file function.py. - -First of all it searches for variable names and types used by -each block; once they are collected it emits a .local IL -statement used for indicating the virtual machine the number and type -of local variables used. - -Then it sequentially renders all blocks in the graph, starting from the -start block; special care is taken for the return block which is -always rendered at last to meet CLI requirements. - -Each block starts with an unique label that is used for jumping -across, followed by the low level instructions the block is composed -of; finally there is some code that jumps to the appropriate next -block. - -Conditional and unconditional jumps are rendered with their -corresponding IL instructions: brtrue, brfalse. - -Blocks that needs to catch exceptions use the native facilities -offered by the CLI virtual machine: the entire block is surrounded by -a .try statement followed by as many catch as needed: each catching -sub-block then branches to the appropriate block:: - - - # RPython - try: - # block0 - ... - except ValueError: - # block1 - ... - except TypeError: - # block2 - ... - - // IL - block0: - .try { - ... - leave block3 - } - catch ValueError { - ... - leave block1 - } - catch TypeError { - ... - leave block2 - } - block1: - ... - br block3 - block2: - ... - br block3 - block3: - ... - -There is also an experimental feature that makes GenCLI to use its own -exception handling mechanism instead of relying on the .NET -one. Surprisingly enough, benchmarks are about 40% faster with our own -exception handling machinery. - - -Translating classes -------------------- - -As we saw previously, the semantic of ootypesystem classes -is very similar to the .NET one, so the translation is mostly -straightforward. - -The related code is located in the module class\_.py. Rendered classes -are composed of four parts: - - - fields; - - user defined methods; - - default constructor; - - the ToString method, mainly for testing purposes - -Since ootype implicitly assumes all method calls to be late bound, as -an optimization before rendering the classes we search for methods -that are not overridden in subclasses, and declare as "virtual" only -the one that needs to. - -The constructor does nothing more than calling the base class -constructor and initializing class fields to their default value. - -Inheritance is straightforward too, as it is natively supported by -CLI. The only noticeable thing is that we map ootypesystem's ROOT -class to the CLI equivalent System.Object. - -The Runtime Environment ------------------------ - -The runtime environment is a collection of helper classes and -functions used and referenced by many of the GenCLI submodules. It is -written in C#, compiled to a DLL (Dynamic Link Library), then linked -to generated code at compile-time. - -The DLL is called pypylib and is composed of three parts: - - - a set of helper functions used to implements complex RPython - low-level instructions such as runtimenew and ooparse_int; - - - a set of helper classes wrapping built-in types - - - a set of helpers used by the test framework - - -The first two parts are contained in the pypy.runtime namespace, while -the third is in the pypy.test one. - - -Testing GenCLI -============== - -As the rest of PyPy, GenCLI is a test-driven project: there is at -least one unit test for almost each single feature of the -backend. This development methodology allowed us to early discover -many subtle bugs and to do some big refactoring of the code with the -confidence not to break anything. - -The core of the testing framework is in the module -rpython.translator.cli.test.runtest; one of the most important function -of this module is compile_function(): it takes a Python function, -compiles it to CLI and returns a Python object that runs the just -created executable when called. - -This way we can test GenCLI generated code just as if it were a simple -Python function; we can also directly run the generated executable, -whose default name is main.exe, from a shell: the function parameters -are passed as command line arguments, and the return value is printed -on the standard output:: - - # Python source: foo.py - from rpython.translator.cli.test.runtest import compile_function - - def foo(x, y): - return x+y, x*y - - f = compile_function(foo, [int, int]) - assert f(3, 4) == (7, 12) - - - # shell - $ mono main.exe 3 4 - (7, 12) - -GenCLI supports only few RPython types as parameters: int, r_uint, -r_longlong, r_ulonglong, bool, float and one-length strings (i.e., -chars). By contrast, most types are fine for being returned: these -include all primitive types, list, tuples and instances. - -Installing Python for .NET on Linux -=================================== - -With the CLI backend, you can access .NET libraries from RPython; -programs using .NET libraries will always run when translated, but you -might also want to test them on top of CPython. - -To do so, you can install `Python for .NET`_. Unfortunately, it does -not work out of the box under Linux. - -To make it work, download and unpack the source package of Python -for .NET; the only version tested with PyPy is the 1.0-rc2, but it -might work also with others. Then, you need to create a file named -Python.Runtime.dll.config at the root of the unpacked archive; put the -following lines inside the file (assuming you are using Python 2.7):: - - - - - -The installation should be complete now. To run Python for .NET, -simply type ``mono python.exe``. - - -.. _`Standard Ecma 335`: http://www.ecma-international.org/publications/standards/Ecma-335.htm -.. _`flow graph`: translation.html#the-flow-model -.. _`rtyper`: rtyper.html -.. _`Python for .NET`: http://pythonnet.sourceforge.net/ diff --git a/pypy/doc/clr-module.rst b/pypy/doc/clr-module.rst deleted file mode 100644 --- a/pypy/doc/clr-module.rst +++ /dev/null @@ -1,143 +0,0 @@ -=============================== -The ``clr`` module for PyPy.NET -=============================== - -PyPy.NET give you access to the surrounding .NET environment via the -``clr`` module. This module is still experimental: some features are -still missing and its interface might change in next versions, but -it's still useful to experiment a bit with PyPy.NET. - -PyPy.NET provides an import hook that lets you to import .NET namespaces -seamlessly as they were normal Python modules. Then, - -PyPY.NET native classes try to behave as much as possible in the -"expected" way both for the developers used to .NET and for the ones -used to Python. - -In particular, the following features are mapped one to one because -they exist in both worlds: - - - .NET constructors are mapped to the Python __init__ method; - - - .NET instance methods are mapped to Python methods; - - - .NET static methods are mapped to Python static methods (belonging - to the class); - - - .NET properties are mapped to property-like Python objects (very - similar to the Python ``property`` built-in); - - - .NET indexers are mapped to Python __getitem__ and __setitem__; - - - .NET enumerators are mapped to Python iterators. - -Moreover, all the usual Python features such as bound and unbound -methods are available as well. - -Example of usage -================ - -Here is an example of interactive session using the ``clr`` module:: - - >>>> from System.Collections import ArrayList - >>>> obj = ArrayList() - >>>> obj.Add(1) - 0 - >>>> obj.Add(2) - 1 - >>>> obj.Add("foo") - 2 - >>>> print obj[0], obj[1], obj[2] - 1 2 foo - >>>> print obj.Count - 3 - -Conversion of parameters -======================== - -When calling a .NET method Python objects are converted to .NET -objects. Lots of effort have been taken to make the conversion as -much transparent as possible; in particular, all the primitive types -such as int, float and string are converted to the corresponding .NET -types (e.g., ``System.Int32``, ``System.Float64`` and -``System.String``). - -Python objects without a corresponding .NET types (e.g., instances of -user classes) are passed as "black boxes", for example to be stored in -some sort of collection. - -The opposite .NET to Python conversions happens for the values returned -by the methods. Again, primitive types are converted in a -straightforward way; non-primitive types are wrapped in a Python object, -so that they can be treated as usual. - -Overload resolution -=================== - -When calling an overloaded method, PyPy.NET tries to find the best -overload for the given arguments; for example, consider the -``System.Math.Abs`` method:: - - - >>>> from System import Math - >>>> Math.Abs(-42) - 42 - >>>> Math.Abs(-42.0) - 42.0 - -``System.Math.Abs`` has got overloadings both for integers and floats: -in the first case we call the method ``System.Math.Abs(int32)``, while -in the second one we call the method ``System.Math.Abs(float64)``. - -If the system can't find a best overload for the given parameters, a -TypeError exception is raised. - - -Generic classes -================ - -Generic classes are fully supported. To instantiate a generic class, you need -to use the ``[]`` notation:: - - >>>> from System.Collections.Generic import List - >>>> mylist = List[int]() - >>>> mylist.Add(42) - >>>> mylist.Add(43) - >>>> mylist.Add("foo") - Traceback (most recent call last): - File "", line 1, in - TypeError: No overloads for Add could match - >>>> mylist[0] - 42 - >>>> for item in mylist: print item - 42 - 43 - - -External assemblies and Windows Forms -===================================== - -By default, you can only import .NET namespaces that belongs to already loaded -assemblies. To load additional .NET assemblies, you can use -``clr.AddReferenceByPartialName``. The following example loads -``System.Windows.Forms`` and ``System.Drawing`` to display a simple Windows -Form displaying the usual "Hello World" message:: - - >>>> import clr - >>>> clr.AddReferenceByPartialName("System.Windows.Forms") - >>>> clr.AddReferenceByPartialName("System.Drawing") - >>>> from System.Windows.Forms import Application, Form, Label - >>>> from System.Drawing import Point - >>>> - >>>> frm = Form() - >>>> frm.Text = "The first pypy-cli Windows Forms app ever" - >>>> lbl = Label() - >>>> lbl.Text = "Hello World!" - >>>> lbl.AutoSize = True - >>>> lbl.Location = Point(100, 100) - >>>> frm.Controls.Add(lbl) - >>>> Application.Run(frm) - -Unfortunately at the moment you can't do much more than this with Windows -Forms, because we still miss support for delegates and so it's not possible -to handle events. diff --git a/pypy/doc/coding-guide.rst b/pypy/doc/coding-guide.rst --- a/pypy/doc/coding-guide.rst +++ b/pypy/doc/coding-guide.rst @@ -626,7 +626,7 @@ Here is the order in which PyPy looks up Python modules: -*pypy/modules* +*pypy/module* mixed interpreter/app-level builtin modules, such as the ``sys`` and ``__builtin__`` module. @@ -657,7 +657,7 @@ on some classes being old-style. We just maintain those changes in place, -to see what is changed we have a branch called `vendot/stdlib` +to see what is changed we have a branch called `vendor/stdlib` wich contains the unmodified cpython stdlib .. _`mixed module mechanism`: @@ -907,7 +907,7 @@ runs at application level. If you need to use modules From noreply at buildbot.pypy.org Sun Aug 18 21:00:16 2013 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Sun, 18 Aug 2013 21:00:16 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-2.7.4: Bumped version numbers Message-ID: <20130818190016.466CD1C1404@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: stdlib-2.7.4 Changeset: r66191:c40b7b15e939 Date: 2013-08-18 11:59 -0700 http://bitbucket.org/pypy/pypy/changeset/c40b7b15e939/ Log: Bumped version numbers diff --git a/pypy/doc/coding-guide.rst b/pypy/doc/coding-guide.rst --- a/pypy/doc/coding-guide.rst +++ b/pypy/doc/coding-guide.rst @@ -587,7 +587,7 @@ Modules visible from application programs are imported from interpreter or application level files. PyPy reuses almost all python -modules of CPython's standard library, currently from version 2.7.3. We +modules of CPython's standard library, currently from version 2.7.4. We sometimes need to `modify modules`_ and - more often - regression tests because they rely on implementation details of CPython. diff --git a/pypy/module/cpyext/include/patchlevel.h b/pypy/module/cpyext/include/patchlevel.h --- a/pypy/module/cpyext/include/patchlevel.h +++ b/pypy/module/cpyext/include/patchlevel.h @@ -26,7 +26,7 @@ #define PY_RELEASE_SERIAL 0 /* Version as a string */ -#define PY_VERSION "2.7.3" +#define PY_VERSION "2.7.4" /* PyPy version as a string */ #define PYPY_VERSION "2.2.0-alpha0" From noreply at buildbot.pypy.org Sun Aug 18 21:17:23 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Sun, 18 Aug 2013 21:17:23 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-2.7.4: maintain the old unused_data Message-ID: <20130818191723.745441C13FB@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: stdlib-2.7.4 Changeset: r66192:d721f5424f2a Date: 2013-04-01 18:51 -0700 http://bitbucket.org/pypy/pypy/changeset/d721f5424f2a/ Log: maintain the old unused_data (transplanted from 41bea643d6b454d6a9ca653b7e9b2022a45f5bf2) diff --git a/pypy/module/zlib/interp_zlib.py b/pypy/module/zlib/interp_zlib.py --- a/pypy/module/zlib/interp_zlib.py +++ b/pypy/module/zlib/interp_zlib.py @@ -303,7 +303,7 @@ tail = data[unused_start:] if finished: self.unconsumed_tail = '' - self.unused_data = tail + self.unused_data += tail else: self.unconsumed_tail = tail return self.space.wrap(string) diff --git a/pypy/module/zlib/test/test_zlib.py b/pypy/module/zlib/test/test_zlib.py --- a/pypy/module/zlib/test/test_zlib.py +++ b/pypy/module/zlib/test/test_zlib.py @@ -189,7 +189,7 @@ assert d.unused_data == 'spam' * 100 assert s1 + s2 + s3 == self.expanded s4 = d.decompress('egg' * 50) - assert d.unused_data == 'egg' * 50 + assert d.unused_data == ('spam' * 100) + ('egg' * 50) assert s4 == '' From noreply at buildbot.pypy.org Sun Aug 18 21:32:42 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 18 Aug 2013 21:32:42 +0200 (CEST) Subject: [pypy-commit] pypy default: Fix Message-ID: <20130818193242.BB0EC1C13FB@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r66193:10d40557981e Date: 2013-08-18 21:32 +0200 http://bitbucket.org/pypy/pypy/changeset/10d40557981e/ Log: Fix diff --git a/rpython/jit/backend/llgraph/runner.py b/rpython/jit/backend/llgraph/runner.py --- a/rpython/jit/backend/llgraph/runner.py +++ b/rpython/jit/backend/llgraph/runner.py @@ -960,10 +960,10 @@ def execute_force_token(self, _): return self - def execute_cond_call_gc_wb(self, descr, a, b): + def execute_cond_call_gc_wb(self, descr, a): py.test.skip("cond_call_gc_wb not supported") - def execute_cond_call_gc_wb_array(self, descr, a, b, c): + def execute_cond_call_gc_wb_array(self, descr, a, b): py.test.skip("cond_call_gc_wb_array not supported") def execute_keepalive(self, descr, x): From noreply at buildbot.pypy.org Sun Aug 18 21:40:08 2013 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Sun, 18 Aug 2013 21:40:08 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-2.7.4: include the needed modules for these tests Message-ID: <20130818194008.7DF3D1C135F@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: stdlib-2.7.4 Changeset: r66194:2bdbee976ee9 Date: 2013-08-18 12:38 -0700 http://bitbucket.org/pypy/pypy/changeset/2bdbee976ee9/ Log: include the needed modules for these tests diff --git a/pypy/module/_multiprocessing/test/test_connection.py b/pypy/module/_multiprocessing/test/test_connection.py --- a/pypy/module/_multiprocessing/test/test_connection.py +++ b/pypy/module/_multiprocessing/test/test_connection.py @@ -8,8 +8,9 @@ from pypy.module._multiprocessing import interp_semaphore class AppTestBufferTooShort: - spaceconfig = dict(usemodules=['_multiprocessing', 'thread', 'signal', - 'itertools']) + spaceconfig = {'usemodules': ['_multiprocessing', 'thread', 'signal', + 'itertools', 'select', 'fcntl', 'struct', + 'binascii']} def setup_class(cls): if cls.runappdirect: From noreply at buildbot.pypy.org Sun Aug 18 21:40:09 2013 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Sun, 18 Aug 2013 21:40:09 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-2.7.4: merged upstream Message-ID: <20130818194009.C1E2E1C135F@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: stdlib-2.7.4 Changeset: r66195:fc2e67e99022 Date: 2013-08-18 12:39 -0700 http://bitbucket.org/pypy/pypy/changeset/fc2e67e99022/ Log: merged upstream diff --git a/pypy/module/_multiprocessing/test/test_connection.py b/pypy/module/_multiprocessing/test/test_connection.py --- a/pypy/module/_multiprocessing/test/test_connection.py +++ b/pypy/module/_multiprocessing/test/test_connection.py @@ -8,8 +8,9 @@ from pypy.module._multiprocessing import interp_semaphore class AppTestBufferTooShort: - spaceconfig = dict(usemodules=['_multiprocessing', 'thread', 'signal', - 'itertools']) + spaceconfig = {'usemodules': ['_multiprocessing', 'thread', 'signal', + 'itertools', 'select', 'fcntl', 'struct', + 'binascii']} def setup_class(cls): if cls.runappdirect: From noreply at buildbot.pypy.org Sun Aug 18 21:40:11 2013 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Sun, 18 Aug 2013 21:40:11 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-2.7.4: merged default in Message-ID: <20130818194011.071541C135F@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: stdlib-2.7.4 Changeset: r66196:b02eea9a0b2e Date: 2013-08-18 12:39 -0700 http://bitbucket.org/pypy/pypy/changeset/b02eea9a0b2e/ Log: merged default in diff --git a/rpython/jit/backend/llgraph/runner.py b/rpython/jit/backend/llgraph/runner.py --- a/rpython/jit/backend/llgraph/runner.py +++ b/rpython/jit/backend/llgraph/runner.py @@ -960,10 +960,10 @@ def execute_force_token(self, _): return self - def execute_cond_call_gc_wb(self, descr, a, b): + def execute_cond_call_gc_wb(self, descr, a): py.test.skip("cond_call_gc_wb not supported") - def execute_cond_call_gc_wb_array(self, descr, a, b, c): + def execute_cond_call_gc_wb_array(self, descr, a, b): py.test.skip("cond_call_gc_wb_array not supported") def execute_keepalive(self, descr, x): From noreply at buildbot.pypy.org Sun Aug 18 21:46:48 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 18 Aug 2013 21:46:48 +0200 (CEST) Subject: [pypy-commit] pypy default: Increase the pause times. The tests failed today but I cannot reproduce, Message-ID: <20130818194648.4E6E21C135F@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r66197:d2d78b9065ec Date: 2013-08-18 21:46 +0200 http://bitbucket.org/pypy/pypy/changeset/d2d78b9065ec/ Log: Increase the pause times. The tests failed today but I cannot reproduce, so I'm guessing it was because the machine was busy and the tests don't give enough time. diff --git a/pypy/module/__pypy__/test/test_signal.py b/pypy/module/__pypy__/test/test_signal.py --- a/pypy/module/__pypy__/test/test_signal.py +++ b/pypy/module/__pypy__/test/test_signal.py @@ -34,7 +34,7 @@ thread.interrupt_main() for i in range(10): print('x') - time.sleep(0.1) + time.sleep(0.25) except BaseException, e: interrupted.append(e) finally: @@ -59,7 +59,7 @@ for j in range(10): if len(done): break print('.') - time.sleep(0.1) + time.sleep(0.25) print('main thread loop done') assert len(done) == 1 assert len(interrupted) == 1 @@ -117,7 +117,7 @@ def subthread(): try: - time.sleep(0.25) + time.sleep(0.5) with __pypy__.thread.signals_enabled: thread.interrupt_main() except BaseException, e: From noreply at buildbot.pypy.org Sun Aug 18 21:50:16 2013 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Sun, 18 Aug 2013 21:50:16 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-2.7.4: bumped this version number Message-ID: <20130818195016.027E31C13FB@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: stdlib-2.7.4 Changeset: r66198:f76f365843e0 Date: 2013-08-18 12:49 -0700 http://bitbucket.org/pypy/pypy/changeset/f76f365843e0/ Log: bumped this version number diff --git a/pypy/module/sys/version.py b/pypy/module/sys/version.py --- a/pypy/module/sys/version.py +++ b/pypy/module/sys/version.py @@ -7,7 +7,7 @@ from pypy.interpreter import gateway #XXX # the release serial 42 is not in range(16) -CPYTHON_VERSION = (2, 7, 3, "final", 42) +CPYTHON_VERSION = (2, 7, 4, "final", 42) #XXX # sync CPYTHON_VERSION with patchlevel.h, package.py CPYTHON_API_VERSION = 1013 #XXX # sync with include/modsupport.h From noreply at buildbot.pypy.org Mon Aug 19 00:37:50 2013 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Mon, 19 Aug 2013 00:37:50 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-2.7.4: Made import site work. Also cleaned up some code that predated sets Message-ID: <20130818223750.65C761C135F@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: stdlib-2.7.4 Changeset: r66199:679f3facaae0 Date: 2013-08-18 15:37 -0700 http://bitbucket.org/pypy/pypy/changeset/679f3facaae0/ Log: Made import site work. Also cleaned up some code that predated sets diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -13,68 +13,67 @@ and p.join('__init__.py').check() and not p.basename.startswith('test')] -essential_modules = dict.fromkeys( - ["exceptions", "_file", "sys", "__builtin__", "posix", "_warnings"] -) +essential_modules = set([ + "exceptions", "_file", "sys", "__builtin__", "posix", "_warnings", + "itertools" +]) default_modules = essential_modules.copy() -default_modules.update(dict.fromkeys( - ["_codecs", "gc", "_weakref", "marshal", "errno", "imp", - "math", "cmath", "_sre", "_pickle_support", "operator", - "parser", "symbol", "token", "_ast", "_io", "_random", "__pypy__", - "_testing"])) +default_modules.update([ + "_codecs", "gc", "_weakref", "marshal", "errno", "imp", "math", "cmath", + "_sre", "_pickle_support", "operator", "parser", "symbol", "token", "_ast", + "_io", "_random", "__pypy__", "_testing" +]) # --allworkingmodules working_modules = default_modules.copy() -working_modules.update(dict.fromkeys( - ["_socket", "unicodedata", "mmap", "fcntl", "_locale", "pwd", - "rctime" , "select", "zipimport", "_lsprof", - "crypt", "signal", "_rawffi", "termios", "zlib", "bz2", - "struct", "_hashlib", "_md5", "_sha", "_minimal_curses", "cStringIO", - "thread", "itertools", "pyexpat", "_ssl", "cpyext", "array", - "binascii", "_multiprocessing", '_warnings', - "_collections", "_multibytecodec", "micronumpy", "_ffi", - "_continuation", "_cffi_backend", "_csv", "cppyy", "_pypyjson"] -)) +working_modules.update([ + "_socket", "unicodedata", "mmap", "fcntl", "_locale", "pwd", "rctime" , + "select", "zipimport", "_lsprof", "crypt", "signal", "_rawffi", "termios", + "zlib", "bz2", "struct", "_hashlib", "_md5", "_sha", "_minimal_curses", + "cStringIO", "thread", "itertools", "pyexpat", "_ssl", "cpyext", "array", + "binascii", "_multiprocessing", '_warnings', "_collections", + "_multibytecodec", "micronumpy", "_ffi", "_continuation", "_cffi_backend", + "_csv", "cppyy", "_pypyjson" +]) translation_modules = default_modules.copy() -translation_modules.update(dict.fromkeys( - ["fcntl", "rctime", "select", "signal", "_rawffi", "zlib", - "struct", "_md5", "cStringIO", "array", "_ffi", - "binascii", - # the following are needed for pyrepl (and hence for the - # interactive prompt/pdb) - "termios", "_minimal_curses", - ])) +translation_modules.update([ + "fcntl", "rctime", "select", "signal", "_rawffi", "zlib", "struct", "_md5", + "cStringIO", "array", "_ffi", "binascii", + # the following are needed for pyrepl (and hence for the + # interactive prompt/pdb) + "termios", "_minimal_curses", +]) # XXX this should move somewhere else, maybe to platform ("is this posixish" # check or something) if sys.platform == "win32": - working_modules["_winreg"] = None + working_modules.add("_winreg") # unix only modules - del working_modules["crypt"] - del working_modules["fcntl"] - del working_modules["pwd"] - del working_modules["termios"] - del working_modules["_minimal_curses"] + working_modules.remove("crypt") + working_modules.remove("fcntl") + working_modules.remove("pwd") + working_modules.remove("termios") + working_modules.remove("_minimal_curses") if "cppyy" in working_modules: - del working_modules["cppyy"] # not tested on win32 + working_modules.remove("cppyy") # not tested on win32 # The _locale module is needed by site.py on Windows - default_modules["_locale"] = None + default_modules.add("_locale") if sys.platform == "sunos5": - del working_modules['mmap'] # depend on ctypes, can't get at c-level 'errono' - del working_modules['rctime'] # depend on ctypes, missing tm_zone/tm_gmtoff - del working_modules['signal'] # depend on ctypes, can't get at c-level 'errono' - del working_modules['fcntl'] # LOCK_NB not defined - del working_modules["_minimal_curses"] - del working_modules["termios"] - del working_modules["_multiprocessing"] # depends on rctime + working_modules.remove('mmap') # depend on ctypes, can't get at c-level 'errono' + working_modules.remove('rctime') # depend on ctypes, missing tm_zone/tm_gmtoff + working_modules.remove('signal') # depend on ctypes, can't get at c-level 'errono' + working_modules.remove('fcntl') # LOCK_NB not defined + working_modules.remove("_minimal_curses") + working_modules.remove("termios") + working_modules.remove("_multiprocessing") # depends on rctime if "cppyy" in working_modules: - del working_modules["cppyy"] # depends on ctypes + working_modules.remove("cppyy") # depends on ctypes module_dependencies = { @@ -300,11 +299,6 @@ """Apply PyPy-specific optimization suggestions on the 'config'. The optimizations depend on the selected level and possibly on the backend. """ - # warning: during some tests, the type_system and the backend may be - # unspecified and we get None. It shouldn't occur in translate.py though. - type_system = config.translation.type_system - backend = config.translation.backend - # all the good optimizations for PyPy should be listed here if level in ['2', '3', 'jit']: config.objspace.opcodes.suggest(CALL_METHOD=True) From noreply at buildbot.pypy.org Mon Aug 19 00:43:20 2013 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Mon, 19 Aug 2013 00:43:20 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-2.7.4: removed dead import Message-ID: <20130818224320.5C2C01C02DB@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: stdlib-2.7.4 Changeset: r66200:80a41d701907 Date: 2013-08-18 15:42 -0700 http://bitbucket.org/pypy/pypy/changeset/80a41d701907/ Log: removed dead import diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -3,7 +3,7 @@ import py from rpython.config.config import (OptionDescription, BoolOption, IntOption, - ChoiceOption, StrOption, to_optparse, ConflictConfigError) + ChoiceOption, StrOption, to_optparse) from rpython.config.translationoption import IS_64_BITS From noreply at buildbot.pypy.org Mon Aug 19 00:45:24 2013 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Mon, 19 Aug 2013 00:45:24 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-2.7.4: Added need modules for this test Message-ID: <20130818224524.7FBB31C02DB@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: stdlib-2.7.4 Changeset: r66201:e464044561a2 Date: 2013-08-18 15:44 -0700 http://bitbucket.org/pypy/pypy/changeset/e464044561a2/ Log: Added need modules for this test diff --git a/pypy/module/_multiprocessing/test/test_connection.py b/pypy/module/_multiprocessing/test/test_connection.py --- a/pypy/module/_multiprocessing/test/test_connection.py +++ b/pypy/module/_multiprocessing/test/test_connection.py @@ -100,7 +100,7 @@ spaceconfig = { "usemodules": [ '_multiprocessing', 'thread', 'signal', 'struct', 'array', - 'itertools', '_socket', 'binascii', + 'itertools', '_socket', 'binascii', 'select', 'fcntl', ] } From noreply at buildbot.pypy.org Mon Aug 19 08:58:56 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Mon, 19 Aug 2013 08:58:56 +0200 (CEST) Subject: [pypy-commit] stmgc nonmovable-int-ref: add debug print Message-ID: <20130819065856.583511C087E@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: nonmovable-int-ref Changeset: r483:9cd8cc0e987a Date: 2013-08-19 08:47 +0200 http://bitbucket.org/pypy/stmgc/changeset/9cd8cc0e987a/ Log: add debug print diff --git a/c4/extra.c b/c4/extra.c --- a/c4/extra.c +++ b/c4/extra.c @@ -60,6 +60,8 @@ result = (intptr_t)stub; spinlock_release(d->public_descriptor->collection_lock); stm_register_integer_address(result); + + dprintf(("allocate_public_int_adr(%p): %p", obj, stub)); return result; } From noreply at buildbot.pypy.org Mon Aug 19 08:58:55 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Mon, 19 Aug 2013 08:58:55 +0200 (CEST) Subject: [pypy-commit] stmgc nonmovable-int-ref: merge Message-ID: <20130819065855.2DF3F1C0149@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: nonmovable-int-ref Changeset: r482:bd11f3c7cfe8 Date: 2013-08-15 18:29 +0200 http://bitbucket.org/pypy/stmgc/changeset/bd11f3c7cfe8/ Log: merge diff --git a/c4/et.c b/c4/et.c --- a/c4/et.c +++ b/c4/et.c @@ -1337,9 +1337,13 @@ and then free B, which will not be used any more. */ size_t size = stmgc_size(B); assert(B->h_tid & GCFLAG_BACKUP_COPY); + /* if h_original was 0, it must stay that way and not point + to itself. (B->h_original may point to P) */ + revision_t h_original = P->h_original; memcpy(((char *)P) + offsetof(struct stm_object_s, h_revision), ((char *)B) + offsetof(struct stm_object_s, h_revision), size - offsetof(struct stm_object_s, h_revision)); + P->h_original = h_original; assert(!(P->h_tid & GCFLAG_BACKUP_COPY)); stmgcpage_free(B); dprintf(("abort: free backup at %p\n", B)); diff --git a/c4/extra.c b/c4/extra.c --- a/c4/extra.c +++ b/c4/extra.c @@ -136,6 +136,8 @@ return (revision_t)p; } + assert(p->h_original != (revision_t)p); + dprintf(("stm_id(%p) has orig fst: %p\n", p, (gcptr)p->h_original)); return p->h_original; diff --git a/c4/nursery.c b/c4/nursery.c --- a/c4/nursery.c +++ b/c4/nursery.c @@ -175,6 +175,7 @@ stm_copy_to_old_id_copy(obj, id_obj); fresh_old_copy = id_obj; + fresh_old_copy->h_original = 0; obj->h_tid &= ~GCFLAG_HAS_ID; } else { diff --git a/c4/test/test_extra.py b/c4/test/test_extra.py --- a/c4/test/test_extra.py +++ b/c4/test/test_extra.py @@ -157,7 +157,29 @@ 0, 0, 0, 0] +def test_bug(): + p1 = nalloc(HDR) + pid = lib.stm_id(p1) + lib.stm_push_root(p1) + minor_collect() + p1o = lib.stm_pop_root() + assert p1o == ffi.cast("gcptr", pid) + assert follow_original(p1o) == ffi.NULL + +def test_bug2(): + p = oalloc(HDR+WORD) + + def cb(c): + if c == 0: + pw = lib.stm_write_barrier(p) + abort_and_retry() + lib.stm_push_root(p) + perform_transaction(cb) + p = lib.stm_pop_root() + assert follow_original(p) == ffi.NULL + + def test_allocate_public_integer_address(): p1 = palloc(HDR) From noreply at buildbot.pypy.org Mon Aug 19 08:59:21 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Mon, 19 Aug 2013 08:59:21 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: fix that test so it makes sense Message-ID: <20130819065921.F2C381C0149@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r66202:6d7c5891dacc Date: 2013-08-19 08:49 +0200 http://bitbucket.org/pypy/pypy/changeset/6d7c5891dacc/ Log: fix that test so it makes sense diff --git a/rpython/jit/backend/llsupport/test/zrpy_gc_test.py b/rpython/jit/backend/llsupport/test/zrpy_gc_test.py --- a/rpython/jit/backend/llsupport/test/zrpy_gc_test.py +++ b/rpython/jit/backend/llsupport/test/zrpy_gc_test.py @@ -795,6 +795,7 @@ def define_compile_framework_ptr_eq(cls): # test ptr_eq + @dont_look_inside def raiseassert(cond): if not bool(cond): raise AssertionError @@ -812,14 +813,17 @@ raiseassert(x0 == ptrs[1]) raiseassert(x0 != ptrs[2]) raiseassert(x0 != ptrs[3]) + raiseassert(x1 != ptrs[0]) raiseassert(x1 != ptrs[1]) raiseassert(x1 == ptrs[2]) raiseassert(x1 != ptrs[3]) + raiseassert(x2 == ptrs[0]) raiseassert(x2 != ptrs[1]) raiseassert(x2 != ptrs[2]) raiseassert(x2 != ptrs[3]) + raiseassert(ptrs[0] is None) raiseassert(ptrs[1] is not None) raiseassert(ptrs[2] is not None) From noreply at buildbot.pypy.org Mon Aug 19 08:59:23 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Mon, 19 Aug 2013 08:59:23 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: import stmgc with with allocate_public_integer_address Message-ID: <20130819065923.71B5E1C0149@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r66203:56869831cd72 Date: 2013-08-19 08:53 +0200 http://bitbucket.org/pypy/pypy/changeset/56869831cd72/ Log: import stmgc with with allocate_public_integer_address diff --git a/rpython/translator/stm/src_stm/et.c b/rpython/translator/stm/src_stm/et.c --- a/rpython/translator/stm/src_stm/et.c +++ b/rpython/translator/stm/src_stm/et.c @@ -275,29 +275,81 @@ /* Version of stm_DirectReadBarrier() that doesn't abort and assumes * that 'P' was already an up-to-date result of a previous * stm_DirectReadBarrier(). We only have to check if we did in the - * meantime a stm_write_barrier(). + * meantime a stm_write_barrier(). Should only be called if we + * have the flag PUBLIC_TO_PRIVATE or on MOVED objects. This version + * should never abort (it is used in stm_decode_abort_info()). */ - if (P->h_tid & GCFLAG_PUBLIC) + assert(P->h_tid & GCFLAG_PUBLIC); + assert(!(P->h_tid & GCFLAG_STUB)); + + if (P->h_tid & GCFLAG_MOVED) { - if (P->h_tid & GCFLAG_MOVED) + dprintf(("repeat_read_barrier: %p -> %p moved\n", P, + (gcptr)P->h_revision)); + P = (gcptr)P->h_revision; + assert(P->h_tid & GCFLAG_PUBLIC); + assert(!(P->h_tid & GCFLAG_STUB)); + assert(!(P->h_tid & GCFLAG_MOVED)); + if (!(P->h_tid & GCFLAG_PUBLIC_TO_PRIVATE)) + return P; + } + assert(P->h_tid & GCFLAG_PUBLIC_TO_PRIVATE); + + struct tx_descriptor *d = thread_descriptor; + wlog_t *item; + G2L_FIND(d->public_to_private, P, item, goto no_private_obj); + + /* We have a key in 'public_to_private'. The value is the + corresponding private object. */ + dprintf(("repeat_read_barrier: %p -> %p public_to_private\n", P, item->val)); + P = item->val; + assert(!(P->h_tid & GCFLAG_PUBLIC)); + assert(!(P->h_tid & GCFLAG_STUB)); + assert(is_private(P)); + return P; + + no_private_obj: + /* Key not found. It should not be waiting in 'stolen_objects', + because this case from steal.c applies to objects to were originally + backup objects. 'P' cannot be a backup object if it was obtained + earlier as a result of stm_read_barrier(). + */ + return P; +} + +gcptr stm_ImmutReadBarrier(gcptr P) +{ + assert(P->h_tid & GCFLAG_STUB); + assert(P->h_tid & GCFLAG_PUBLIC); + + revision_t v = ACCESS_ONCE(P->h_revision); + assert(IS_POINTER(v)); /* "is a pointer", "has a more recent revision" */ + + if (!(v & 2)) + { + P = (gcptr)v; + } + else + { + /* follow a stub reference */ + struct tx_descriptor *d = thread_descriptor; + struct tx_public_descriptor *foreign_pd = STUB_THREAD(P); + if (foreign_pd == d->public_descriptor) { - P = (gcptr)P->h_revision; - assert(P->h_tid & GCFLAG_PUBLIC); + /* Same thread: dereference the pointer directly. */ + dprintf(("immut_read_barrier: %p -> %p via stub\n ", P, + (gcptr)(v - 2))); + P = (gcptr)(v - 2); } - if (P->h_tid & GCFLAG_PUBLIC_TO_PRIVATE) + else { - struct tx_descriptor *d = thread_descriptor; - wlog_t *item; - G2L_FIND(d->public_to_private, P, item, goto no_private_obj); - - P = item->val; - assert(!(P->h_tid & GCFLAG_PUBLIC)); - no_private_obj: - ; + /* stealing: needed because accessing v - 2 from this thread + is forbidden (the target might disappear under our feet) */ + dprintf(("immut_read_barrier: %p -> stealing...\n ", P)); + stm_steal_stub(P); } } - assert(!(P->h_tid & GCFLAG_STUB)); - return P; + return stm_immut_read_barrier(P); /* retry */ } static gcptr _match_public_to_private(gcptr P, gcptr pubobj, gcptr privobj, @@ -565,6 +617,16 @@ } } +gcptr stm_RepeatWriteBarrier(gcptr P) +{ + assert(!(P->h_tid & GCFLAG_IMMUTABLE)); + assert(is_private(P)); + assert(P->h_tid & GCFLAG_WRITE_BARRIER); + P->h_tid &= ~GCFLAG_WRITE_BARRIER; + gcptrlist_insert(&thread_descriptor->old_objects_to_trace, P); + return P; +} + gcptr stm_WriteBarrier(gcptr P) { assert(!(P->h_tid & GCFLAG_IMMUTABLE)); @@ -1276,9 +1338,13 @@ and then free B, which will not be used any more. */ size_t size = stmgc_size(B); assert(B->h_tid & GCFLAG_BACKUP_COPY); + /* if h_original was 0, it must stay that way and not point + to itself. (B->h_original may point to P) */ + revision_t h_original = P->h_original; memcpy(((char *)P) + offsetof(struct stm_object_s, h_revision), ((char *)B) + offsetof(struct stm_object_s, h_revision), size - offsetof(struct stm_object_s, h_revision)); + P->h_original = h_original; assert(!(P->h_tid & GCFLAG_BACKUP_COPY)); stmgcpage_free(B); dprintf(("abort: free backup at %p\n", B)); diff --git a/rpython/translator/stm/src_stm/et.h b/rpython/translator/stm/src_stm/et.h --- a/rpython/translator/stm/src_stm/et.h +++ b/rpython/translator/stm/src_stm/et.h @@ -70,11 +70,11 @@ static const revision_t GCFLAG_VISITED = STM_FIRST_GCFLAG << 1; static const revision_t GCFLAG_PUBLIC = STM_FIRST_GCFLAG << 2; static const revision_t GCFLAG_PREBUILT_ORIGINAL = STM_FIRST_GCFLAG << 3; -static const revision_t GCFLAG_PUBLIC_TO_PRIVATE = STM_FIRST_GCFLAG << 4; +// in stmgc.h: GCFLAG_PUBLIC_TO_PRIVATE = STM_FIRST_GCFLAG << 4; // in stmgc.h: GCFLAG_WRITE_BARRIER = STM_FIRST_GCFLAG << 5; -static const revision_t GCFLAG_MOVED = STM_FIRST_GCFLAG << 6; +// in stmgc.h: GCFLAG_MOVED = STM_FIRST_GCFLAG << 6; static const revision_t GCFLAG_BACKUP_COPY /*debug*/ = STM_FIRST_GCFLAG << 7; -static const revision_t GCFLAG_STUB /*debug*/ = STM_FIRST_GCFLAG << 8; +// in stmgc.h: GCFLAG_STUB = STM_FIRST_GCFLAG << 8; static const revision_t GCFLAG_PRIVATE_FROM_PROTECTED = STM_FIRST_GCFLAG << 9; static const revision_t GCFLAG_HAS_ID = STM_FIRST_GCFLAG << 10; static const revision_t GCFLAG_IMMUTABLE = STM_FIRST_GCFLAG << 11; @@ -196,8 +196,10 @@ void SpinLoop(int); gcptr stm_DirectReadBarrier(gcptr); +gcptr stm_WriteBarrier(gcptr); gcptr stm_RepeatReadBarrier(gcptr); -gcptr stm_WriteBarrier(gcptr); +gcptr stm_ImmutReadBarrier(gcptr); +gcptr stm_RepeatWriteBarrier(gcptr); gcptr _stm_nonrecord_barrier(gcptr); /* debugging: read barrier, but not recording anything */ int _stm_is_private(gcptr); /* debugging */ diff --git a/rpython/translator/stm/src_stm/extra.c b/rpython/translator/stm/src_stm/extra.c --- a/rpython/translator/stm/src_stm/extra.c +++ b/rpython/translator/stm/src_stm/extra.c @@ -24,6 +24,53 @@ stm_bytes_to_clear_on_abort = bytes; } + +intptr_t stm_allocate_public_integer_address(gcptr obj) +{ + struct tx_descriptor *d = thread_descriptor; + gcptr stub; + intptr_t result; + /* plan: we allocate a small stub whose reference + we never give to the caller except in the form + of an integer. + During major collections, we visit them and update + their references. */ + + /* we don't want to deal with young objs */ + if (!(obj->h_tid & GCFLAG_OLD)) { + stm_push_root(obj); + stm_minor_collect(); + obj = stm_pop_root(); + } + + spinlock_acquire(d->public_descriptor->collection_lock, 'P'); + + stub = stm_stub_malloc(d->public_descriptor, 0); + stub->h_tid = (obj->h_tid & STM_USER_TID_MASK) + | GCFLAG_PUBLIC | GCFLAG_STUB | GCFLAG_SMALLSTUB + | GCFLAG_OLD; + + stub->h_revision = ((revision_t)obj) | 2; + if (!(obj->h_tid & GCFLAG_PREBUILT_ORIGINAL) && obj->h_original) { + stub->h_original = obj->h_original; + } + else { + stub->h_original = (revision_t)obj; + } + + result = (intptr_t)stub; + spinlock_release(d->public_descriptor->collection_lock); + stm_register_integer_address(result); + + dprintf(("allocate_public_int_adr(%p): %p", obj, stub)); + return result; +} + + + + + + /************************************************************/ /* Each object has a h_original pointer to an old copy of the same object (e.g. an old revision), the "original". @@ -92,6 +139,8 @@ return (revision_t)p; } + assert(p->h_original != (revision_t)p); + dprintf(("stm_id(%p) has orig fst: %p\n", p, (gcptr)p->h_original)); return p->h_original; @@ -154,6 +203,19 @@ return (p1 == p2); } +_Bool stm_pointer_equal_prebuilt(gcptr p1, gcptr p2) +{ + assert(p2 != NULL); + assert(p2->h_tid & GCFLAG_PREBUILT_ORIGINAL); + + if (p1 == p2) + return 1; + + /* the only possible case to still get True is if p2 == p1->h_original */ + return (p1 != NULL) && (p1->h_original == (revision_t)p2) && + !(p1->h_tid & GCFLAG_PREBUILT_ORIGINAL); +} + /************************************************************/ void stm_abort_info_push(gcptr obj, long fieldoffsets[]) @@ -205,7 +267,7 @@ WRITE_BUF(buffer, res_size); WRITE('e'); for (i=0; iabortinfo.size; i+=2) { - char *object = (char *)stm_RepeatReadBarrier(d->abortinfo.items[i+0]); + char *object = (char*)stm_repeat_read_barrier(d->abortinfo.items[i+0]); long *fieldoffsets = (long*)d->abortinfo.items[i+1]; long kind, offset; size_t rps_size; diff --git a/rpython/translator/stm/src_stm/gcpage.c b/rpython/translator/stm/src_stm/gcpage.c --- a/rpython/translator/stm/src_stm/gcpage.c +++ b/rpython/translator/stm/src_stm/gcpage.c @@ -23,6 +23,9 @@ /* Only computed during a major collection */ static size_t mc_total_in_use, mc_total_reserved; +/* keeps track of registered smallstubs that will survive unless unregistered */ +static struct G2L registered_stubs; + /* For tests */ long stmgcpage_count(int quantity) { @@ -63,6 +66,8 @@ nblocks_for_size[i] = (GC_PAGE_SIZE - sizeof(page_header_t)) / (WORD * i); } + + memset(®istered_stubs, 0, sizeof(registered_stubs)); } void stmgcpage_init_tls(void) @@ -209,6 +214,34 @@ } +/***** registering of small stubs as integer addresses *****/ + +void stm_register_integer_address(intptr_t adr) +{ + gcptr obj = (gcptr)adr; + assert(obj->h_tid & GCFLAG_SMALLSTUB); + assert(obj->h_tid & GCFLAG_PUBLIC); + + stmgcpage_acquire_global_lock(); + g2l_insert(®istered_stubs, obj, NULL); + stmgcpage_release_global_lock(); + dprintf(("registered %p\n", obj)); +} + +void stm_unregister_integer_address(intptr_t adr) +{ + gcptr obj = (gcptr)adr; + assert(obj->h_tid & GCFLAG_SMALLSTUB); + assert(obj->h_tid & GCFLAG_PUBLIC); + + stmgcpage_acquire_global_lock(); + g2l_delete_item(®istered_stubs, obj); + stmgcpage_release_global_lock(); + dprintf(("unregistered %p\n", obj)); +} + + + /***** Major collections: marking *****/ static struct GcPtrList objects_to_trace; @@ -460,6 +493,27 @@ } } +static void mark_registered_stubs(void) +{ + wlog_t *item; + G2L_LOOP_FORWARD(registered_stubs, item) { + gcptr R = item->addr; + assert(R->h_tid & GCFLAG_SMALLSTUB); + + R->h_tid |= (GCFLAG_MARKED | GCFLAG_VISITED); + + gcptr L = (gcptr)(R->h_revision - 2); + L = stmgcpage_visit(L); + R->h_revision = ((revision_t)L) | 2; + + /* h_original will be kept up-to-date because + it is either == L or L's h_original. And + h_originals don't move */ + } G2L_LOOP_END; + +} + + static void mark_roots(gcptr *root, gcptr *end) { assert(*root == END_MARKER_ON); @@ -497,6 +551,14 @@ visit_take_protected(d->thread_local_obj_ref); visit_take_protected(&d->old_thread_local_obj); + /* the abortinfo objects */ + long i, size = d->abortinfo.size; + gcptr *items = d->abortinfo.items; + for (i = 0; i < size; i += 2) { + visit_take_protected(&items[i]); + /* items[i+1] is not a gc ptr */ + } + /* the current transaction's private copies of public objects */ wlog_t *item; G2L_LOOP_FORWARD(d->public_to_private, item) { @@ -528,8 +590,8 @@ } G2L_LOOP_END; /* reinsert to real pub_to_priv */ - long i, size = new_public_to_private.size; - gcptr *items = new_public_to_private.items; + size = new_public_to_private.size; + items = new_public_to_private.items; for (i = 0; i < size; i += 2) { g2l_insert(&d->public_to_private, items[i], items[i + 1]); } @@ -890,6 +952,7 @@ assert(gcptrlist_size(&objects_to_trace) == 0); mark_prebuilt_roots(); + mark_registered_stubs(); mark_all_stack_roots(); do { visit_all_objects(); diff --git a/rpython/translator/stm/src_stm/nursery.c b/rpython/translator/stm/src_stm/nursery.c --- a/rpython/translator/stm/src_stm/nursery.c +++ b/rpython/translator/stm/src_stm/nursery.c @@ -176,6 +176,7 @@ stm_copy_to_old_id_copy(obj, id_obj); fresh_old_copy = id_obj; + fresh_old_copy->h_original = 0; obj->h_tid &= ~GCFLAG_HAS_ID; } else { @@ -437,6 +438,19 @@ spinlock_release(d->public_descriptor->collection_lock); } +static void mark_extra_stuff(struct tx_descriptor *d) +{ + visit_if_young(d->thread_local_obj_ref); + visit_if_young(&d->old_thread_local_obj); + + long i, size = d->abortinfo.size; + gcptr *items = d->abortinfo.items; + for (i = 0; i < size; i += 2) { + visit_if_young(&items[i]); + /* items[i+1] is not a gc ptr */ + } +} + static void minor_collect(struct tx_descriptor *d) { dprintf(("minor collection [%p to %p]\n", @@ -452,8 +466,7 @@ mark_young_roots(d); - visit_if_young(d->thread_local_obj_ref); - visit_if_young(&d->old_thread_local_obj); + mark_extra_stuff(d); mark_stolen_young_stubs(d); diff --git a/rpython/translator/stm/src_stm/steal.c b/rpython/translator/stm/src_stm/steal.c --- a/rpython/translator/stm/src_stm/steal.c +++ b/rpython/translator/stm/src_stm/steal.c @@ -20,6 +20,59 @@ }; static __thread struct tx_steal_data *steal_data; +static void replace_ptr_to_immutable_with_stub(gcptr * pobj) +{ + gcptr stub, obj = *pobj; + assert(obj->h_tid & GCFLAG_IMMUTABLE); + assert(!(obj->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED)); + if (obj->h_tid & GCFLAG_PUBLIC) { + /* young public, replace with stolen old copy */ + assert(obj->h_tid & GCFLAG_MOVED); + assert(IS_POINTER(obj->h_revision)); + stub = (gcptr)obj->h_revision; + assert(!IS_POINTER(stub->h_revision)); /* not outdated */ + goto done; + } + + /* old or young protected! mark as PUBLIC */ + if (!(obj->h_tid & GCFLAG_OLD)) { + /* young protected */ + gcptr O; + + if (obj->h_tid & GCFLAG_HAS_ID) { + /* use id-copy for us */ + O = (gcptr)obj->h_original; + obj->h_tid &= ~GCFLAG_HAS_ID; + stm_copy_to_old_id_copy(obj, O); + O->h_original = 0; + } else { + O = stmgc_duplicate_old(obj); + + /* young and without original? */ + if (!(obj->h_original)) + obj->h_original = (revision_t)O; + } + obj->h_tid |= (GCFLAG_MOVED | GCFLAG_PUBLIC); + obj->h_revision = (revision_t)O; + + O->h_tid |= GCFLAG_PUBLIC; + /* here it is fine if it stays in read caches because + the object is immutable anyway and there are no + write_barriers allowed. */ + dprintf(("steal prot immutable -> public: %p -> %p\n", obj, O)); + stub = O; + goto done; + } + /* old protected: */ + dprintf(("prot immutable -> public: %p\n", obj)); + obj->h_tid |= GCFLAG_PUBLIC; + + return; + done: + *pobj = stub; + dprintf((" stolen: fixing *%p: %p -> %p\n", pobj, obj, stub)); +} + static void replace_ptr_to_protected_with_stub(gcptr *pobj) { gcptr stub, obj = *pobj; @@ -28,49 +81,7 @@ return; if (obj->h_tid & GCFLAG_IMMUTABLE) { - assert(!(obj->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED)); - if (obj->h_tid & GCFLAG_PUBLIC) { - /* young public, replace with stolen old copy */ - assert(obj->h_tid & GCFLAG_MOVED); - assert(IS_POINTER(obj->h_revision)); - stub = (gcptr)obj->h_revision; - assert(!IS_POINTER(stub->h_revision)); /* not outdated */ - goto done; - } - - /* old or young protected! mark as PUBLIC */ - if (!(obj->h_tid & GCFLAG_OLD)) { - /* young protected */ - gcptr O; - - if (obj->h_tid & GCFLAG_HAS_ID) { - /* use id-copy for us */ - O = (gcptr)obj->h_original; - obj->h_tid &= ~GCFLAG_HAS_ID; - stm_copy_to_old_id_copy(obj, O); - O->h_original = 0; - } else { - O = stmgc_duplicate_old(obj); - - /* young and without original? */ - if (!(obj->h_original)) - obj->h_original = (revision_t)O; - } - obj->h_tid |= (GCFLAG_MOVED | GCFLAG_PUBLIC); - obj->h_revision = (revision_t)O; - - O->h_tid |= GCFLAG_PUBLIC; - /* here it is fine if it stays in read caches because - the object is immutable anyway and there are no - write_barriers allowed. */ - dprintf(("steal prot immutable -> public: %p -> %p\n", obj, O)); - stub = O; - goto done; - } - /* old protected: */ - dprintf(("prot immutable -> public: %p\n", obj)); - obj->h_tid |= GCFLAG_PUBLIC; - + replace_ptr_to_immutable_with_stub(pobj); return; } diff --git a/rpython/translator/stm/src_stm/stmgc.h b/rpython/translator/stm/src_stm/stmgc.h --- a/rpython/translator/stm/src_stm/stmgc.h +++ b/rpython/translator/stm/src_stm/stmgc.h @@ -29,12 +29,21 @@ #define PREBUILT_REVISION 1 +/* push roots around allocating functions! */ + /* allocate an object out of the local nursery */ gcptr stm_allocate(size_t size, unsigned long tid); /* allocate an object that is be immutable. it cannot be changed with a stm_write_barrier() or after the next commit */ gcptr stm_allocate_immutable(size_t size, unsigned long tid); +/* allocates a public reference to the object that will + not be freed until stm_unregister_integer_address is + called on the result */ +intptr_t stm_allocate_public_integer_address(gcptr); +void stm_unregister_integer_address(intptr_t); + + /* returns a never changing hash for the object */ revision_t stm_hash(gcptr); /* returns a number for the object which is unique during its lifetime */ @@ -42,6 +51,7 @@ /* returns nonzero if the two object-copy pointers belong to the same original object */ _Bool stm_pointer_equal(gcptr, gcptr); +_Bool stm_pointer_equal_prebuilt(gcptr, gcptr); /* 2nd arg is known prebuilt */ /* to push/pop objects into the local shadowstack */ #if 0 // (optimized version below) @@ -59,7 +69,7 @@ int stm_enter_callback_call(void); void stm_leave_callback_call(int); -/* read/write barriers (the most general versions only for now). +/* read/write barriers. - the read barrier must be applied before reading from an object. the result is valid as long as we're in the same transaction, @@ -69,10 +79,28 @@ the result is valid for a shorter period of time: we have to do stm_write_barrier() again if we ended the transaction, or if we did a potential collection (e.g. stm_allocate()). + + - as an optimization, stm_repeat_read_barrier() can be used + instead of stm_read_barrier() if the object was already + obtained by a stm_read_barrier() in the same transaction. + The only thing that may have occurred is that a + stm_write_barrier() on the same object could have made it + invalid. + + - a different optimization is to read immutable fields: in order + to do that, use stm_immut_read_barrier(), which only activates + on stubs. + + - stm_repeat_write_barrier() can be used on an object on which + we already did stm_write_barrier(), but a potential collection + can have occurred. */ #if 0 // (optimized version below) gcptr stm_read_barrier(gcptr); gcptr stm_write_barrier(gcptr); +gcptr stm_repeat_read_barrier(gcptr); +gcptr stm_immut_read_barrier(gcptr); +gcptr stm_repeat_write_barrier(gcptr); /* <= always returns its argument */ #endif /* start a new transaction, calls callback(), and when it returns @@ -148,6 +176,8 @@ extern __thread void *stm_to_clear_on_abort; extern __thread size_t stm_bytes_to_clear_on_abort; +/* only user currently is stm_allocate_public_integer_address() */ +void stm_register_integer_address(intptr_t); /* macro functionality */ @@ -159,7 +189,13 @@ extern __thread revision_t stm_private_rev_num; gcptr stm_DirectReadBarrier(gcptr); gcptr stm_WriteBarrier(gcptr); +gcptr stm_RepeatReadBarrier(gcptr); +gcptr stm_ImmutReadBarrier(gcptr); +gcptr stm_RepeatWriteBarrier(gcptr); +static const revision_t GCFLAG_PUBLIC_TO_PRIVATE = STM_FIRST_GCFLAG << 4; static const revision_t GCFLAG_WRITE_BARRIER = STM_FIRST_GCFLAG << 5; +static const revision_t GCFLAG_MOVED = STM_FIRST_GCFLAG << 6; +static const revision_t GCFLAG_STUB = STM_FIRST_GCFLAG << 8; extern __thread char *stm_read_barrier_cache; #define FX_MASK 65535 #define FXCACHE_AT(obj) \ @@ -179,5 +215,20 @@ stm_WriteBarrier(obj) \ : (obj)) +#define stm_repeat_read_barrier(obj) \ + (UNLIKELY((obj)->h_tid & (GCFLAG_PUBLIC_TO_PRIVATE | GCFLAG_MOVED)) ? \ + stm_RepeatReadBarrier(obj) \ + : (obj)) + +#define stm_immut_read_barrier(obj) \ + (UNLIKELY((obj)->h_tid & GCFLAG_STUB) ? \ + stm_ImmutReadBarrier(obj) \ + : (obj)) + +#define stm_repeat_write_barrier(obj) \ + (UNLIKELY((obj)->h_tid & GCFLAG_WRITE_BARRIER) ? \ + stm_RepeatWriteBarrier(obj) \ + : (obj)) + #endif From noreply at buildbot.pypy.org Mon Aug 19 08:59:24 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Mon, 19 Aug 2013 08:59:24 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: the revision too... Message-ID: <20130819065924.AA0971C0149@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r66204:c64c72742660 Date: 2013-08-19 08:53 +0200 http://bitbucket.org/pypy/pypy/changeset/c64c72742660/ Log: the revision too... diff --git a/rpython/translator/stm/src_stm/revision b/rpython/translator/stm/src_stm/revision --- a/rpython/translator/stm/src_stm/revision +++ b/rpython/translator/stm/src_stm/revision @@ -1,1 +1,1 @@ -12cf412eb2d7+ +9cd8cc0e987a From noreply at buildbot.pypy.org Mon Aug 19 08:59:25 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Mon, 19 Aug 2013 08:59:25 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: start using allocate_public_integer_address. Breaks tests in test_stm_integration.py... Message-ID: <20130819065925.E41261C0149@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r66205:a95356764f49 Date: 2013-08-19 08:58 +0200 http://bitbucket.org/pypy/pypy/changeset/a95356764f49/ Log: start using allocate_public_integer_address. Breaks tests in test_stm_integration.py... diff --git a/rpython/jit/backend/llsupport/assembler.py b/rpython/jit/backend/llsupport/assembler.py --- a/rpython/jit/backend/llsupport/assembler.py +++ b/rpython/jit/backend/llsupport/assembler.py @@ -195,10 +195,7 @@ # we want the descr to keep alive guardtok.faildescr.rd_loop_token = self.current_clt fail_descr = rgc.cast_instance_to_gcref(guardtok.faildescr) - if self.cpu.gc_ll_descr.stm: - # only needed with STM, I think.. - fail_descr = rgc._make_sure_does_not_move(fail_descr) - fail_descr = rgc.cast_gcref_to_int(fail_descr) + fail_descr = rgc._make_sure_does_not_move(fail_descr) return fail_descr, target def call_assembler(self, op, guard_op, argloc, vloc, result_loc, tmploc): @@ -230,8 +227,7 @@ raise AssertionError(kind) gcref = rgc.cast_instance_to_gcref(value) - gcref = rgc._make_sure_does_not_move(gcref) - value = rgc.cast_gcref_to_int(gcref) + value = rgc._make_sure_does_not_move(gcref) je_location = self._call_assembler_check_descr(value, tmploc) # # Path A: use assembler_helper_adr diff --git a/rpython/jit/backend/llsupport/gc.py b/rpython/jit/backend/llsupport/gc.py --- a/rpython/jit/backend/llsupport/gc.py +++ b/rpython/jit/backend/llsupport/gc.py @@ -103,27 +103,19 @@ v = op.getarg(i) if isinstance(v, ConstPtr) and bool(v.value): p = rgc.cast_instance_to_gcref(v.value) - new_p = rgc._make_sure_does_not_move(p) - if we_are_translated(): - v.value = new_p - else: - assert p == new_p - gcrefs_output_list.append(new_p) - + v.imm_value = rgc._make_sure_does_not_move(p) + # XXX: fix for stm, record imm_values and unregister + # them again (below too): + gcrefs_output_list.append(p) + + if self.stm: + return # for descr, we do it on the fly in assembler.py if op.is_guard() or op.getopnum() == rop.FINISH: # the only ops with descrs that get recorded in a trace - from rpython.jit.metainterp.history import AbstractDescr descr = op.getdescr() llref = rgc.cast_instance_to_gcref(descr) - new_llref = rgc._make_sure_does_not_move(llref) - if we_are_translated(): - new_d = rgc.try_cast_gcref_to_instance(AbstractDescr, - new_llref) - # tests don't allow this: - op.setdescr(new_d) - else: - assert llref == new_llref - gcrefs_output_list.append(new_llref) + rgc._make_sure_does_not_move(llref) + gcrefs_output_list.append(llref) def rewrite_assembler(self, cpu, operations, gcrefs_output_list): if not self.stm: @@ -711,6 +703,12 @@ if self.stm: # XXX remove the indirections in the following calls from rpython.rlib import rstm + def stm_allocate_nonmovable_int_adr(obj): + return llop1.stm_allocate_nonmovable_int_adr( + lltype.Signed, obj) + self.generate_function('stm_allocate_nonmovable_int_adr', + stm_allocate_nonmovable_int_adr, + [llmemory.GCREF], RESULT=lltype.Signed) self.generate_function('stm_try_inevitable', rstm.become_inevitable, [], RESULT=lltype.Void) diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -249,9 +249,8 @@ self._store_and_reset_exception(self.mc, eax) ofs = self.cpu.get_ofs_of_frame_field('jf_guard_exc') self.mc.MOV_br(ofs, eax.value) - propagate_exception_descr = rgc.cast_gcref_to_int( - rgc._make_sure_does_not_move( - rgc.cast_instance_to_gcref(self.cpu.propagate_exception_descr))) + propagate_exception_descr = rgc._make_sure_does_not_move( + rgc.cast_instance_to_gcref(self.cpu.propagate_exception_descr)) ofs = self.cpu.get_ofs_of_frame_field('jf_descr') self.mc.MOV(RawEbpLoc(ofs), imm(propagate_exception_descr)) self.mc.MOV_rr(eax.value, ebp.value) @@ -2122,10 +2121,10 @@ cb.emit() def _store_force_index(self, guard_op): - faildescr = guard_op.getdescr() + faildescr = rgc._make_sure_does_not_move( + rgc.cast_instance_to_gcref(guard_op.getdescr())) ofs = self.cpu.get_ofs_of_frame_field('jf_force_descr') - self.mc.MOV(raw_stack(ofs), imm(rffi.cast(lltype.Signed, - cast_instance_to_gcref(faildescr)))) + self.mc.MOV(raw_stack(ofs), imm(faildescr)) def _emit_guard_not_forced(self, guard_token): ofs = self.cpu.get_ofs_of_frame_field('jf_descr') diff --git a/rpython/jit/backend/x86/regalloc.py b/rpython/jit/backend/x86/regalloc.py --- a/rpython/jit/backend/x86/regalloc.py +++ b/rpython/jit/backend/x86/regalloc.py @@ -8,7 +8,7 @@ BoxFloat, INT, REF, FLOAT, TargetToken) from rpython.jit.backend.x86.regloc import * -from rpython.rtyper.lltypesystem import lltype, rffi, rstr +from rpython.rtyper.lltypesystem import lltype, rffi, rstr, llmemory from rpython.rtyper.annlowlevel import cast_instance_to_gcref from rpython.rlib.objectmodel import we_are_translated from rpython.rlib import rgc @@ -45,9 +45,11 @@ if isinstance(c, ConstInt): return imm(c.value) elif isinstance(c, ConstPtr): - if we_are_translated() and c.value and rgc.can_move(c.value): + # if we_are_translated() and c.value and rgc.can_move(c.value): + # not_implemented("convert_to_imm: ConstPtr needs special care") + if c.value and not c.imm_value: not_implemented("convert_to_imm: ConstPtr needs special care") - return imm(rffi.cast(lltype.Signed, c.value)) + return imm(c.get_imm_value()) else: not_implemented("convert_to_imm: got a %s" % c) @@ -369,7 +371,6 @@ fail_descr = rgc.cast_instance_to_gcref(descr) # we know it does not move, but well fail_descr = rgc._make_sure_does_not_move(fail_descr) - fail_descr = rgc.cast_gcref_to_int(fail_descr) if op.numargs() == 1: loc = self.make_sure_var_in_reg(op.getarg(0)) locs = [loc, imm(fail_descr)] diff --git a/rpython/jit/backend/x86/test/test_stm_integration.py b/rpython/jit/backend/x86/test/test_stm_integration.py --- a/rpython/jit/backend/x86/test/test_stm_integration.py +++ b/rpython/jit/backend/x86/test/test_stm_integration.py @@ -21,10 +21,14 @@ from rpython.memory.gc.stmgc import StmGC from rpython.jit.metainterp import history from rpython.jit.codewriter.effectinfo import EffectInfo +from rpython.rlib import rgc from rpython.rtyper.llinterp import LLException import itertools, sys import ctypes +def cast_to_int(obj): + return rgc.cast_gcref_to_int(rgc.cast_instance_to_gcref(obj)) + CPU = getcpuclass() class MockSTMRootMap(object): @@ -171,6 +175,14 @@ self.generate_function('stm_ptr_eq', ptr_eq, [llmemory.GCREF] * 2, RESULT=lltype.Bool) + def stm_allocate_nonmovable_int_adr(obj): + assert False # should not be reached + return rgc.cast_gcref_to_int(obj) + self.generate_function('stm_allocate_nonmovable_int_adr', + stm_allocate_nonmovable_int_adr, + [llmemory.GCREF], + RESULT=lltype.Signed) + def malloc_big_fixedsize(size, tid): entries = size + StmGC.GCHDRSIZE TP = rffi.CArray(lltype.Char) diff --git a/rpython/jit/metainterp/history.py b/rpython/jit/metainterp/history.py --- a/rpython/jit/metainterp/history.py +++ b/rpython/jit/metainterp/history.py @@ -10,6 +10,7 @@ from rpython.jit.metainterp.resoperation import ResOperation, rop from rpython.jit.codewriter import heaptracker, longlong from rpython.rlib.objectmodel import compute_identity_hash +from rpython.rlib import rgc import weakref # ____________________________________________________________ @@ -308,17 +309,24 @@ class ConstPtr(Const): type = REF value = lltype.nullptr(llmemory.GCREF.TO) - _attrs_ = ('value',) + imm_value = 0 + _attrs_ = ('value', 'imm_value',) def __init__(self, value): assert lltype.typeOf(value) == llmemory.GCREF self.value = value + self.imm_value = 0 def clonebox(self): return BoxPtr(self.value) nonconstbox = clonebox + def get_imm_value(self): + # imm_value set if needed: + assert (not self.value) or self.imm_value + return self.imm_value + def getref_base(self): return self.value diff --git a/rpython/memory/gctransform/framework.py b/rpython/memory/gctransform/framework.py --- a/rpython/memory/gctransform/framework.py +++ b/rpython/memory/gctransform/framework.py @@ -314,13 +314,6 @@ self.can_move_ptr = getfn(GCClass.can_move.im_func, [s_gc, annmodel.SomeAddress()], annmodel.SomeBool()) - if hasattr(GCClass, 'get_original_copy'): - self.get_original_copy_ptr = getfn( - GCClass.get_original_copy.im_func, - [s_gc, annmodel.SomePtr(llmemory.GCREF)], - annmodel.SomePtr(llmemory.GCREF)) - else: - self.get_original_copy_ptr = None if hasattr(GCClass, 'shrink_array'): self.shrink_array_ptr = getfn( @@ -751,16 +744,6 @@ hop.genop("direct_call", [self.can_move_ptr, self.c_const_gc, v_addr], resultvar=op.result) - def gct_gc_get_original_copy(self, hop): - if self.get_original_copy_ptr is None: - raise Exception("unreachable code") - op = hop.spaceop - v_addr = hop.genop('cast_ptr_to_adr', - [op.args[0]], resulttype=llmemory.Address) - hop.genop("direct_call", [self.get_original_copy_ptr, - self.c_const_gc, v_addr], - resultvar=op.result) - def gct_shrink_array(self, hop): if self.shrink_array_ptr is None: return GCTransformer.gct_shrink_array(self, hop) diff --git a/rpython/memory/gctransform/stmframework.py b/rpython/memory/gctransform/stmframework.py --- a/rpython/memory/gctransform/stmframework.py +++ b/rpython/memory/gctransform/stmframework.py @@ -99,6 +99,7 @@ gct_stm_become_inevitable = _gct_with_roots_pushed gct_stm_perform_transaction = _gct_with_roots_pushed + gct_stm_allocate_nonmovable_int_adr = _gct_with_roots_pushed class StmRootWalker(BaseRootWalker): diff --git a/rpython/rlib/rgc.py b/rpython/rlib/rgc.py --- a/rpython/rlib/rgc.py +++ b/rpython/rlib/rgc.py @@ -26,12 +26,6 @@ return None # means 'not translated at all'; # in "if stm_is_enabled()" it is equivalent to False -def stm_get_original_copy(obj): - """ Returns a non-moving reference to an object (only use if obj is - already OLD!) - """ - return obj - # ____________________________________________________________ # Annotation and specialization @@ -76,20 +70,6 @@ hop.exception_cannot_occur() return hop.inputconst(lltype.Bool, hop.s_result.const) - -class StmGCGetOriginalCopy(ExtRegistryEntry): - _about_ = stm_get_original_copy - - def compute_result_annotation(self, s_obj): - from rpython.annotator import model as annmodel - return annmodel.SomePtr(llmemory.GCREF) - - def specialize_call(self, hop): - hop.exception_cannot_occur() - return hop.genop('gc_get_original_copy', hop.args_v, - resulttype=hop.r_result) - - def can_move(p): """Check if the GC object 'p' is at an address that can move. Must not be called with None. With non-moving GCs, it is always False. @@ -119,7 +99,13 @@ on objects that are already a bit old, so have a chance to be already non-movable.""" if not we_are_translated(): - return p + return cast_gcref_to_int(p) + + if stm_is_enabled(): + from rpython.rtyper.lltypesystem.lloperation import llop + res = llop.stm_allocate_nonmovable_int_adr(lltype.Signed, p) + return res + i = 0 while can_move(p): if i > 6: @@ -127,10 +113,7 @@ collect(i) i += 1 - if stm_is_enabled(): - return stm_get_original_copy(p) - else: - return p + return 0 def _heap_stats(): raise NotImplementedError # can't be run directly diff --git a/rpython/rtyper/lltypesystem/lloperation.py b/rpython/rtyper/lltypesystem/lloperation.py --- a/rpython/rtyper/lltypesystem/lloperation.py +++ b/rpython/rtyper/lltypesystem/lloperation.py @@ -425,6 +425,7 @@ 'stm_finalize': LLOp(), 'stm_barrier': LLOp(sideeffects=False), 'stm_allocate': LLOp(sideeffects=False, canmallocgc=True), + 'stm_allocate_nonmovable_int_adr': LLOp(sideeffects=False, canmallocgc=True), 'stm_become_inevitable': LLOp(canmallocgc=True), 'stm_minor_collect': LLOp(canmallocgc=True), 'stm_major_collect': LLOp(canmallocgc=True), @@ -533,7 +534,6 @@ 'gc_obtain_free_space': LLOp(), 'gc_set_max_heap_size': LLOp(), 'gc_can_move' : LLOp(sideeffects=False), - 'gc_get_original_copy': LLOp(sideeffects=False), 'gc_thread_prepare' : LLOp(canmallocgc=True), 'gc_thread_run' : LLOp(), 'gc_thread_start' : LLOp(), diff --git a/rpython/translator/c/funcgen.py b/rpython/translator/c/funcgen.py --- a/rpython/translator/c/funcgen.py +++ b/rpython/translator/c/funcgen.py @@ -617,6 +617,7 @@ OP_STM_MAJOR_COLLECT = _OP_STM OP_STM_MINOR_COLLECT = _OP_STM OP_STM_CLEAR_EXCEPTION_DATA_ON_ABORT= _OP_STM + OP_STM_ALLOCATE_NONMOVABLE_INT_ADR = _OP_STM def OP_PTR_NONZERO(self, op): diff --git a/rpython/translator/stm/funcgen.py b/rpython/translator/stm/funcgen.py --- a/rpython/translator/stm/funcgen.py +++ b/rpython/translator/stm/funcgen.py @@ -117,6 +117,11 @@ result = funcgen.expr(op.result) return '%s = stm_weakref_allocate(%s, %s, %s);' % (result, arg0, arg1, arg2) + +def stm_allocate_nonmovable_int_adr(funcgen, op): + arg0 = funcgen.expr(op.args[0]) + result = funcgen.expr(op.result) + return '%s = stm_allocate_public_integer_address(%s);' % (result, arg0) def stm_allocate(funcgen, op): arg0 = funcgen.expr(op.args[0]) diff --git a/rpython/translator/stm/inevitable.py b/rpython/translator/stm/inevitable.py --- a/rpython/translator/stm/inevitable.py +++ b/rpython/translator/stm/inevitable.py @@ -15,8 +15,7 @@ 'jit_force_quasi_immutable', 'jit_marker', 'jit_is_virtual', 'jit_record_known_class', 'gc_identityhash', 'gc_id', 'gc_can_move', 'gc__collect', - 'gc_adr_of_root_stack_top', 'gc_get_original_copy', - 'stmgc_get_original_copy', + 'gc_adr_of_root_stack_top', 'weakref_create', 'weakref_deref', 'stm_threadlocalref_get', 'stm_threadlocalref_set', 'stm_threadlocalref_count', 'stm_threadlocalref_addr', From noreply at buildbot.pypy.org Mon Aug 19 10:17:53 2013 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 19 Aug 2013 10:17:53 +0200 (CEST) Subject: [pypy-commit] pypy default: Simplify the write barrier by only passing one argument, which is Message-ID: <20130819081753.C2AA31C0149@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r66206:dab095843f89 Date: 2013-08-19 10:15 +0200 http://bitbucket.org/pypy/pypy/changeset/dab095843f89/ Log: Simplify the write barrier by only passing one argument, which is the object we write into --- and not the new value written. See comments in minimark. This allows means that assume_young_pointers is removed because it's now the same as write_barrier. diff --git a/rpython/jit/backend/arm/opassembler.py b/rpython/jit/backend/arm/opassembler.py --- a/rpython/jit/backend/arm/opassembler.py +++ b/rpython/jit/backend/arm/opassembler.py @@ -452,7 +452,7 @@ # Write code equivalent to write_barrier() in the GC: it checks # a flag in the object at arglocs[0], and if set, it calls a # helper piece of assembler. The latter saves registers as needed - # and call the function jit_remember_young_pointer() from the GC. + # and call the function remember_young_pointer() from the GC. if we_are_translated(): cls = self.cpu.gc_ll_descr.has_write_barrier_class() assert cls is not None and isinstance(descr, cls) diff --git a/rpython/jit/backend/llsupport/gc.py b/rpython/jit/backend/llsupport/gc.py --- a/rpython/jit/backend/llsupport/gc.py +++ b/rpython/jit/backend/llsupport/gc.py @@ -136,7 +136,7 @@ """ Allocate a new frame, overwritten by tests """ frame = jitframe.JITFRAME.allocate(frame_info) - llop.gc_assume_young_pointers(lltype.Void, frame) + llop.gc_writebarrier(lltype.Void, frame) return frame class JitFrameDescrs: @@ -360,8 +360,7 @@ def _check_valid_gc(self): # we need the hybrid or minimark GC for rgc._make_sure_does_not_move() - # to work. Additionally, 'hybrid' is missing some stuff like - # jit_remember_young_pointer() for now. + # to work. 'hybrid' could work but isn't tested with the JIT. if self.gcdescr.config.translation.gc not in ('minimark',): raise NotImplementedError("--gc=%s not implemented with the JIT" % (self.gcdescr.config.translation.gc,)) diff --git a/rpython/jit/backend/llsupport/llmodel.py b/rpython/jit/backend/llsupport/llmodel.py --- a/rpython/jit/backend/llsupport/llmodel.py +++ b/rpython/jit/backend/llsupport/llmodel.py @@ -97,7 +97,7 @@ new_frame.jf_savedata = frame.jf_savedata new_frame.jf_guard_exc = frame.jf_guard_exc # all other fields are empty - llop.gc_assume_young_pointers(lltype.Void, new_frame) + llop.gc_writebarrier(lltype.Void, new_frame) return lltype.cast_opaque_ptr(llmemory.GCREF, new_frame) except Exception, e: print "Unhandled exception", e, "in realloc_frame" diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -2022,7 +2022,7 @@ # Write code equivalent to write_barrier() in the GC: it checks # a flag in the object at arglocs[0], and if set, it calls a # helper piece of assembler. The latter saves registers as needed - # and call the function jit_remember_young_pointer() from the GC. + # and call the function remember_young_pointer() from the GC. if we_are_translated(): cls = self.cpu.gc_ll_descr.has_write_barrier_class() assert cls is not None and isinstance(descr, cls) diff --git a/rpython/memory/gc/base.py b/rpython/memory/gc/base.py --- a/rpython/memory/gc/base.py +++ b/rpython/memory/gc/base.py @@ -101,7 +101,7 @@ def set_root_walker(self, root_walker): self.root_walker = root_walker - def write_barrier(self, newvalue, addr_struct): + def write_barrier(self, addr_struct): pass def size_gc_header(self, typeid=0): diff --git a/rpython/memory/gc/generation.py b/rpython/memory/gc/generation.py --- a/rpython/memory/gc/generation.py +++ b/rpython/memory/gc/generation.py @@ -336,7 +336,7 @@ addr = pointer.address[0] newaddr = self.copy(addr) pointer.address[0] = newaddr - self.write_into_last_generation_obj(obj, newaddr) + self.write_into_last_generation_obj(obj) # ____________________________________________________________ # Implementation of nursery-only collections @@ -467,9 +467,9 @@ # "if addr_struct.int0 & JIT_WB_IF_FLAG: remember_young_pointer()") JIT_WB_IF_FLAG = GCFLAG_NO_YOUNG_PTRS - def write_barrier(self, newvalue, addr_struct): + def write_barrier(self, addr_struct): if self.header(addr_struct).tid & GCFLAG_NO_YOUNG_PTRS: - self.remember_young_pointer(addr_struct, newvalue) + self.remember_young_pointer(addr_struct) def _setup_wb(self): DEBUG = self.DEBUG @@ -480,43 +480,30 @@ # For x86, there is also an extra requirement: when the JIT calls # remember_young_pointer(), it assumes that it will not touch the SSE # registers, so it does not save and restore them (that's a *hack*!). - def remember_young_pointer(addr_struct, addr): + def remember_young_pointer(addr_struct): #llop.debug_print(lltype.Void, "\tremember_young_pointer", - # addr_struct, "<-", addr) + # addr_struct) if DEBUG: ll_assert(not self.is_in_nursery(addr_struct), "nursery object with GCFLAG_NO_YOUNG_PTRS") # # What is important in this function is that it *must* # clear the flag GCFLAG_NO_YOUNG_PTRS from 'addr_struct' - # if 'addr' is in the nursery. It is ok if, accidentally, - # it also clears the flag in some more rare cases, like - # 'addr' being a tagged pointer whose value happens to be - # a large integer that fools is_in_nursery(). - if self.appears_to_be_in_nursery(addr): - self.old_objects_pointing_to_young.append(addr_struct) - self.header(addr_struct).tid &= ~GCFLAG_NO_YOUNG_PTRS - self.write_into_last_generation_obj(addr_struct, addr) + # if the newly written value is in the nursery. It is ok + # if it also clears the flag in some more cases --- it is + # a win to not actually pass the 'newvalue' pointer here. + self.old_objects_pointing_to_young.append(addr_struct) + self.header(addr_struct).tid &= ~GCFLAG_NO_YOUNG_PTRS + self.write_into_last_generation_obj(addr_struct) remember_young_pointer._dont_inline_ = True self.remember_young_pointer = remember_young_pointer - def write_into_last_generation_obj(self, addr_struct, addr): + def write_into_last_generation_obj(self, addr_struct): objhdr = self.header(addr_struct) if objhdr.tid & GCFLAG_NO_HEAP_PTRS: - if (self.is_valid_gc_object(addr) and - not self.is_last_generation(addr)): - objhdr.tid &= ~GCFLAG_NO_HEAP_PTRS - self.last_generation_root_objects.append(addr_struct) - write_into_last_generation_obj._always_inline_ = True - - def assume_young_pointers(self, addr_struct): - objhdr = self.header(addr_struct) - if objhdr.tid & GCFLAG_NO_YOUNG_PTRS: - self.old_objects_pointing_to_young.append(addr_struct) - objhdr.tid &= ~GCFLAG_NO_YOUNG_PTRS - if objhdr.tid & GCFLAG_NO_HEAP_PTRS: objhdr.tid &= ~GCFLAG_NO_HEAP_PTRS self.last_generation_root_objects.append(addr_struct) + write_into_last_generation_obj._always_inline_ = True def writebarrier_before_copy(self, source_addr, dest_addr, source_start, dest_start, length): diff --git a/rpython/memory/gc/minimark.py b/rpython/memory/gc/minimark.py --- a/rpython/memory/gc/minimark.py +++ b/rpython/memory/gc/minimark.py @@ -1059,16 +1059,16 @@ def JIT_minimal_size_in_nursery(cls): return cls.minimal_size_in_nursery - def write_barrier(self, newvalue, addr_struct): + def write_barrier(self, addr_struct): if self.header(addr_struct).tid & GCFLAG_TRACK_YOUNG_PTRS: - self.remember_young_pointer(addr_struct, newvalue) + self.remember_young_pointer(addr_struct) - def write_barrier_from_array(self, newvalue, addr_array, index): + def write_barrier_from_array(self, addr_array, index): if self.header(addr_array).tid & GCFLAG_TRACK_YOUNG_PTRS: if self.card_page_indices > 0: # <- constant-folded self.remember_young_pointer_from_array2(addr_array, index) else: - self.remember_young_pointer(addr_array, newvalue) + self.remember_young_pointer(addr_array) def _init_writebarrier_logic(self): DEBUG = self.DEBUG @@ -1076,9 +1076,8 @@ # instead of keeping it as a regular method is to # make the code in write_barrier() marginally smaller # (which is important because it is inlined *everywhere*). - def remember_young_pointer(addr_struct, newvalue): + def remember_young_pointer(addr_struct): # 'addr_struct' is the address of the object in which we write. - # 'newvalue' is the address that we are going to write in there. # We know that 'addr_struct' has GCFLAG_TRACK_YOUNG_PTRS so far. # if DEBUG: # note: PYPY_GC_DEBUG=1 does not enable this @@ -1086,22 +1085,25 @@ self.header(addr_struct).tid & GCFLAG_HAS_CARDS != 0, "young object with GCFLAG_TRACK_YOUNG_PTRS and no cards") # - # If it seems that what we are writing is a pointer to a young obj - # (as checked with appears_to_be_young()), then we need - # to remove the flag GCFLAG_TRACK_YOUNG_PTRS and add the object - # to the list 'old_objects_pointing_to_young'. We know that - # 'addr_struct' cannot be in the nursery, because nursery objects - # never have the flag GCFLAG_TRACK_YOUNG_PTRS to start with. + # We need to remove the flag GCFLAG_TRACK_YOUNG_PTRS and add + # the object to the list 'old_objects_pointing_to_young'. + # We know that 'addr_struct' cannot be in the nursery, + # because nursery objects never have the flag + # GCFLAG_TRACK_YOUNG_PTRS to start with. Note that in + # theory we don't need to do that if the pointer that we're + # writing into the object isn't pointing to a young object. + # However, it isn't really a win, because then sometimes + # we're going to call this function a lot of times for the + # same object; moreover we'd need to pass the 'newvalue' as + # an argument here. The JIT has always called a + # 'newvalue'-less version, too. + self.old_objects_pointing_to_young.append(addr_struct) objhdr = self.header(addr_struct) - if self.appears_to_be_young(newvalue): - self.old_objects_pointing_to_young.append(addr_struct) - objhdr.tid &= ~GCFLAG_TRACK_YOUNG_PTRS + objhdr.tid &= ~GCFLAG_TRACK_YOUNG_PTRS # # Second part: if 'addr_struct' is actually a prebuilt GC # object and it's the first time we see a write to it, we - # add it to the list 'prebuilt_root_objects'. Note that we - # do it even in the (rare?) case of 'addr' being NULL or another - # prebuilt object, to simplify code. + # add it to the list 'prebuilt_root_objects'. if objhdr.tid & GCFLAG_NO_HEAP_PTRS: objhdr.tid &= ~GCFLAG_NO_HEAP_PTRS self.prebuilt_root_objects.append(addr_struct) @@ -1109,17 +1111,6 @@ remember_young_pointer._dont_inline_ = True self.remember_young_pointer = remember_young_pointer # - def jit_remember_young_pointer(addr_struct): - # minimal version of the above, with just one argument, - # called by the JIT when GCFLAG_TRACK_YOUNG_PTRS is set - self.old_objects_pointing_to_young.append(addr_struct) - objhdr = self.header(addr_struct) - objhdr.tid &= ~GCFLAG_TRACK_YOUNG_PTRS - if objhdr.tid & GCFLAG_NO_HEAP_PTRS: - objhdr.tid &= ~GCFLAG_NO_HEAP_PTRS - self.prebuilt_root_objects.append(addr_struct) - self.jit_remember_young_pointer = jit_remember_young_pointer - # if self.card_page_indices > 0: self._init_writebarrier_with_card_marker() @@ -1179,13 +1170,13 @@ # called by the JIT when GCFLAG_TRACK_YOUNG_PTRS is set # but GCFLAG_CARDS_SET is cleared. This tries to set # GCFLAG_CARDS_SET if possible; otherwise, it falls back - # to jit_remember_young_pointer(). + # to remember_young_pointer(). objhdr = self.header(addr_array) if objhdr.tid & GCFLAG_HAS_CARDS: self.old_objects_with_cards_set.append(addr_array) objhdr.tid |= GCFLAG_CARDS_SET else: - self.jit_remember_young_pointer(addr_array) + self.remember_young_pointer(addr_array) self.jit_remember_young_pointer_from_array = ( jit_remember_young_pointer_from_array) @@ -1196,19 +1187,6 @@ return llarena.getfakearenaaddress(addr_byte) + (~byteindex) - def assume_young_pointers(self, addr_struct): - """Called occasionally by the JIT to mean ``assume that 'addr_struct' - may now contain young pointers.'' - """ - objhdr = self.header(addr_struct) - if objhdr.tid & GCFLAG_TRACK_YOUNG_PTRS: - self.old_objects_pointing_to_young.append(addr_struct) - objhdr.tid &= ~GCFLAG_TRACK_YOUNG_PTRS - # - if objhdr.tid & GCFLAG_NO_HEAP_PTRS: - objhdr.tid &= ~GCFLAG_NO_HEAP_PTRS - self.prebuilt_root_objects.append(addr_struct) - def writebarrier_before_copy(self, source_addr, dest_addr, source_start, dest_start, length): """ This has the same effect as calling writebarrier over diff --git a/rpython/memory/gc/test/test_direct.py b/rpython/memory/gc/test/test_direct.py --- a/rpython/memory/gc/test/test_direct.py +++ b/rpython/memory/gc/test/test_direct.py @@ -87,19 +87,17 @@ def write(self, p, fieldname, newvalue): if self.gc.needs_write_barrier: - newaddr = llmemory.cast_ptr_to_adr(newvalue) addr_struct = llmemory.cast_ptr_to_adr(p) - self.gc.write_barrier(newaddr, addr_struct) + self.gc.write_barrier(addr_struct) setattr(p, fieldname, newvalue) def writearray(self, p, index, newvalue): if self.gc.needs_write_barrier: - newaddr = llmemory.cast_ptr_to_adr(newvalue) addr_struct = llmemory.cast_ptr_to_adr(p) if hasattr(self.gc, 'write_barrier_from_array'): - self.gc.write_barrier_from_array(newaddr, addr_struct, index) + self.gc.write_barrier_from_array(addr_struct, index) else: - self.gc.write_barrier(newaddr, addr_struct) + self.gc.write_barrier(addr_struct) p[index] = newvalue def malloc(self, TYPE, n=None): @@ -416,13 +414,13 @@ assert calls == ['semispace_collect'] calls = [] - def test_assume_young_pointers(self): + def test_write_barrier_direct(self): s0 = lltype.malloc(S, immortal=True) self.consider_constant(s0) s = self.malloc(S) s.x = 1 s0.next = s - self.gc.assume_young_pointers(llmemory.cast_ptr_to_adr(s0)) + self.gc.write_barrier(llmemory.cast_ptr_to_adr(s0)) self.gc.collect(0) @@ -558,12 +556,10 @@ assert hdr_src.tid & minimark.GCFLAG_HAS_CARDS assert hdr_dst.tid & minimark.GCFLAG_HAS_CARDS # - young_p = self.malloc(S) - self.gc.write_barrier_from_array(young_p, addr_src, 0) + self.gc.write_barrier_from_array(addr_src, 0) index_in_third_page = int(2.5 * self.gc.card_page_indices) assert index_in_third_page < largeobj_size - self.gc.write_barrier_from_array(young_p, addr_src, - index_in_third_page) + self.gc.write_barrier_from_array(addr_src, index_in_third_page) # assert hdr_src.tid & minimark.GCFLAG_CARDS_SET addr_byte = self.gc.get_card(addr_src, 0) diff --git a/rpython/memory/gctransform/framework.py b/rpython/memory/gctransform/framework.py --- a/rpython/memory/gctransform/framework.py +++ b/rpython/memory/gctransform/framework.py @@ -317,13 +317,6 @@ else: self.shrink_array_ptr = None - if hasattr(GCClass, 'assume_young_pointers'): - # xxx should really be a noop for gcs without generations - self.assume_young_pointers_ptr = getfn( - GCClass.assume_young_pointers.im_func, - [s_gc, annmodel.SomeAddress()], - annmodel.s_None) - if hasattr(GCClass, 'heap_stats'): self.heap_stats_ptr = getfn(GCClass.heap_stats.im_func, [s_gc], annmodel.SomePtr(lltype.Ptr(ARRAY_TYPEID_MAP)), @@ -474,11 +467,10 @@ if GCClass.needs_write_barrier: self.write_barrier_ptr = getfn(GCClass.write_barrier.im_func, [s_gc, - annmodel.SomeAddress(), annmodel.SomeAddress()], annmodel.s_None, inline=True) - func = getattr(gcdata.gc, 'jit_remember_young_pointer', None) + func = getattr(gcdata.gc, 'remember_young_pointer', None) if func is not None: # func should not be a bound method, but a real function assert isinstance(func, types.FunctionType) @@ -490,7 +482,6 @@ self.write_barrier_from_array_ptr = getfn(func.im_func, [s_gc, annmodel.SomeAddress(), - annmodel.SomeAddress(), annmodel.SomeInteger()], annmodel.s_None, inline=True) @@ -740,15 +731,15 @@ v_addr, v_length], resultvar=op.result) - def gct_gc_assume_young_pointers(self, hop): - if not hasattr(self, 'assume_young_pointers_ptr'): + def gct_gc_writebarrier(self, hop): + if self.write_barrier_ptr is None: return op = hop.spaceop v_addr = op.args[0] if v_addr.concretetype != llmemory.Address: v_addr = hop.genop('cast_ptr_to_adr', [v_addr], resulttype=llmemory.Address) - hop.genop("direct_call", [self.assume_young_pointers_ptr, + hop.genop("direct_call", [self.write_barrier_ptr, self.c_const_gc, v_addr]) def gct_gc_heap_stats(self, hop): @@ -1120,8 +1111,6 @@ and not isinstance(v_newvalue, Constant) and v_struct.concretetype.TO._gckind == "gc" and hop.spaceop not in self.clean_sets): - v_newvalue = hop.genop("cast_ptr_to_adr", [v_newvalue], - resulttype = llmemory.Address) v_structaddr = hop.genop("cast_ptr_to_adr", [v_struct], resulttype = llmemory.Address) if (self.write_barrier_from_array_ptr is not None and @@ -1131,14 +1120,12 @@ assert v_index.concretetype == lltype.Signed hop.genop("direct_call", [self.write_barrier_from_array_ptr, self.c_const_gc, - v_newvalue, v_structaddr, v_index]) else: self.write_barrier_calls += 1 hop.genop("direct_call", [self.write_barrier_ptr, self.c_const_gc, - v_newvalue, v_structaddr]) hop.rename('bare_' + opname) diff --git a/rpython/memory/gctransform/shadowstack.py b/rpython/memory/gctransform/shadowstack.py --- a/rpython/memory/gctransform/shadowstack.py +++ b/rpython/memory/gctransform/shadowstack.py @@ -304,12 +304,12 @@ "save_current_state_away: broken shadowstack") #shadowstackref.fullstack = True # - # cannot use llop.gc_assume_young_pointers() here, because + # cannot use llop.gc_writebarrier() here, because # we are in a minimally-transformed GC helper :-/ gc = self.gcdata.gc - if hasattr(gc.__class__, 'assume_young_pointers'): + if hasattr(gc.__class__, 'write_barrier'): shadowstackadr = llmemory.cast_ptr_to_adr(shadowstackref) - gc.assume_young_pointers(shadowstackadr) + gc.write_barrier(shadowstackadr) # self.gcdata.root_stack_top = llmemory.NULL # to detect missing restore diff --git a/rpython/memory/gcwrapper.py b/rpython/memory/gcwrapper.py --- a/rpython/memory/gcwrapper.py +++ b/rpython/memory/gcwrapper.py @@ -106,7 +106,6 @@ assert (type(index) is int # <- fast path or lltype.typeOf(index) == lltype.Signed) self.gc.write_barrier_from_array( - llmemory.cast_ptr_to_adr(newvalue), llmemory.cast_ptr_to_adr(toplevelcontainer), index) wb = False @@ -114,7 +113,6 @@ # if wb: self.gc.write_barrier( - llmemory.cast_ptr_to_adr(newvalue), llmemory.cast_ptr_to_adr(toplevelcontainer)) llheap.setinterior(toplevelcontainer, inneraddr, INNERTYPE, newvalue) diff --git a/rpython/memory/test/test_transformed_gc.py b/rpython/memory/test/test_transformed_gc.py --- a/rpython/memory/test/test_transformed_gc.py +++ b/rpython/memory/test/test_transformed_gc.py @@ -1186,7 +1186,7 @@ res = run([100, 100]) assert res == 200 - def define_assume_young_pointers(cls): + def define_write_barrier_direct(cls): from rpython.rlib import rgc S = lltype.GcForwardReference() S.become(lltype.GcStruct('S', @@ -1198,8 +1198,7 @@ s = lltype.malloc(S) s.x = 42 llop.bare_setfield(lltype.Void, s0, void('next'), s) - llop.gc_assume_young_pointers(lltype.Void, - llmemory.cast_ptr_to_adr(s0)) + llop.gc_writebarrier(lltype.Void, llmemory.cast_ptr_to_adr(s0)) rgc.collect(0) return s0.next.x @@ -1208,8 +1207,8 @@ return f, cleanup, None - def test_assume_young_pointers(self): - run = self.runner("assume_young_pointers") + def test_write_barrier_direct(self): + run = self.runner("write_barrier_direct") res = run([]) assert res == 42 diff --git a/rpython/rlib/_stacklet_asmgcc.py b/rpython/rlib/_stacklet_asmgcc.py --- a/rpython/rlib/_stacklet_asmgcc.py +++ b/rpython/rlib/_stacklet_asmgcc.py @@ -303,7 +303,7 @@ self.suspstack = NULL_SUSPSTACK ll_assert(bool(s.anchor), "s.anchor should not be null") s.handle = handle - llop.gc_assume_young_pointers(lltype.Void, llmemory.cast_ptr_to_adr(s)) + llop.gc_writebarrier(lltype.Void, llmemory.cast_ptr_to_adr(s)) return s def get_result_suspstack(self, h): diff --git a/rpython/rtyper/lltypesystem/lloperation.py b/rpython/rtyper/lltypesystem/lloperation.py --- a/rpython/rtyper/lltypesystem/lloperation.py +++ b/rpython/rtyper/lltypesystem/lloperation.py @@ -477,7 +477,7 @@ 'gc_thread_die' : LLOp(), 'gc_thread_before_fork':LLOp(), # returns an opaque address 'gc_thread_after_fork': LLOp(), # arguments: (result_of_fork, opaqueaddr) - 'gc_assume_young_pointers': LLOp(canrun=True), + 'gc_writebarrier': LLOp(canrun=True), 'gc_writebarrier_before_copy': LLOp(canrun=True), 'gc_heap_stats' : LLOp(canmallocgc=True), diff --git a/rpython/rtyper/lltypesystem/opimpl.py b/rpython/rtyper/lltypesystem/opimpl.py --- a/rpython/rtyper/lltypesystem/opimpl.py +++ b/rpython/rtyper/lltypesystem/opimpl.py @@ -643,7 +643,7 @@ def op_get_member_index(memberoffset): raise NotImplementedError -def op_gc_assume_young_pointers(addr): +def op_gc_writebarrier(addr): pass def op_shrink_array(array, smallersize): diff --git a/rpython/translator/c/gc.py b/rpython/translator/c/gc.py --- a/rpython/translator/c/gc.py +++ b/rpython/translator/c/gc.py @@ -85,7 +85,7 @@ def OP_GC_THREAD_AFTER_FORK(self, funcgen, op): return '' - def OP_GC_ASSUME_YOUNG_POINTERS(self, funcgen, op): + def OP_GC_WRITEBARRIER(self, funcgen, op): return '' def OP_GC_STACK_BOTTOM(self, funcgen, op): @@ -404,7 +404,7 @@ self.tid_fieldname(tid_field), funcgen.expr(c_skipoffset))) - def OP_GC_ASSUME_YOUNG_POINTERS(self, funcgen, op): + def OP_GC_WRITEBARRIER(self, funcgen, op): raise Exception("the FramewokGCTransformer should handle this") def OP_GC_GCFLAG_EXTRA(self, funcgen, op): diff --git a/rpython/translator/c/test/test_boehm.py b/rpython/translator/c/test/test_boehm.py --- a/rpython/translator/c/test/test_boehm.py +++ b/rpython/translator/c/test/test_boehm.py @@ -369,13 +369,12 @@ run = self.getcompiled(f) assert run() == 0x62024230 - def test_assume_young_pointers_nop(self): + def test_write_barrier_nop(self): S = lltype.GcStruct('S', ('x', lltype.Signed)) s = lltype.malloc(S) s.x = 0 def f(): - llop.gc_assume_young_pointers(lltype.Void, - llmemory.cast_ptr_to_adr(s)) + llop.gc_writebarrier(lltype.Void, llmemory.cast_ptr_to_adr(s)) return True run = self.getcompiled(f) assert run() == True From noreply at buildbot.pypy.org Mon Aug 19 10:23:10 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Mon, 19 Aug 2013 10:23:10 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: extend test (fails now :( ) Message-ID: <20130819082310.A2FEE1C0149@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r66207:85864451bd27 Date: 2013-08-19 10:22 +0200 http://bitbucket.org/pypy/pypy/changeset/85864451bd27/ Log: extend test (fails now :( ) diff --git a/rpython/jit/backend/llsupport/test/zrpy_gc_test.py b/rpython/jit/backend/llsupport/test/zrpy_gc_test.py --- a/rpython/jit/backend/llsupport/test/zrpy_gc_test.py +++ b/rpython/jit/backend/llsupport/test/zrpy_gc_test.py @@ -8,7 +8,7 @@ import os from rpython.rlib import rgc from rpython.rtyper.lltypesystem import lltype -from rpython.rlib.jit import JitDriver, dont_look_inside +from rpython.rlib.jit import JitDriver, dont_look_inside, promote from rpython.rlib.jit import elidable, unroll_safe from rpython.jit.backend.llsupport.gc import GcLLDescr_framework from rpython.tool.udir import udir @@ -809,6 +809,12 @@ @unroll_safe def f(n, x, x0, x1, x2, x3, x4, x5, x6, x7, ptrs, s): + if n % 3 == 0: + x0 = promote(x0) + elif n % 3 == 1: + x1 = promote(x1) + else: + x2 = promote(x2) raiseassert(x0 != ptrs[0]) raiseassert(x0 == ptrs[1]) raiseassert(x0 != ptrs[2]) From noreply at buildbot.pypy.org Mon Aug 19 10:28:40 2013 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Mon, 19 Aug 2013 10:28:40 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: Add arrival / departure dates. Message-ID: <20130819082840.BA5141C0149@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: extradoc Changeset: r5020:b2734796d6a5 Date: 2013-08-19 10:26 +0200 http://bitbucket.org/pypy/extradoc/changeset/b2734796d6a5/ Log: Add arrival / departure dates. diff --git a/sprintinfo/london-2013/people.txt b/sprintinfo/london-2013/people.txt --- a/sprintinfo/london-2013/people.txt +++ b/sprintinfo/london-2013/people.txt @@ -19,7 +19,7 @@ Remi Meier 24/8-1/9 ? Marko Bencun 24/8-1/9 ? Maciej Fijalkowski 25/8-1/9 private -Manuel Jacob ? sth. cheap, pref. share +Manuel Jacob 24/8-3/9 sth. cheap, pref. share Ronan Lamy 25/8-1/9 hotel Strand Continent. Antonio Cuni 26/8-5/9 hotel LSE Northumberl. ==================== ============== ======================= From noreply at buildbot.pypy.org Mon Aug 19 10:50:06 2013 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 19 Aug 2013 10:50:06 +0200 (CEST) Subject: [pypy-commit] pypy default: Comments: this logic is not used any more for now Message-ID: <20130819085006.98CC21C02DB@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r66208:dc28be2ed4e1 Date: 2013-08-19 10:49 +0200 http://bitbucket.org/pypy/pypy/changeset/dc28be2ed4e1/ Log: Comments: this logic is not used any more for now diff --git a/rpython/jit/backend/x86/regloc.py b/rpython/jit/backend/x86/regloc.py --- a/rpython/jit/backend/x86/regloc.py +++ b/rpython/jit/backend/x86/regloc.py @@ -370,8 +370,8 @@ class LocationCodeBuilder(object): _mixin_ = True - _reuse_scratch_register = False - _scratch_register_known = False + _reuse_scratch_register = False # for now, this is always False + _scratch_register_known = False # for now, this is always False _scratch_register_value = 0 def _binaryop(name): @@ -576,6 +576,7 @@ self.MOV_ri(X86_64_SCRATCH_REG.value, value) def begin_reuse_scratch_register(self): + # --NEVER CALLED (only from a specific test)-- # Flag the beginning of a block where it is okay to reuse the value # of the scratch register. In theory we shouldn't have to do this if # we were careful to mark all possible targets of a jump or call, and From noreply at buildbot.pypy.org Mon Aug 19 11:28:51 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Mon, 19 Aug 2013 11:28:51 +0200 (CEST) Subject: [pypy-commit] stmgc default: another bug with h_original :( Message-ID: <20130819092851.8A3441C02DB@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: Changeset: r484:e02ee68b20c4 Date: 2013-08-19 11:28 +0200 http://bitbucket.org/pypy/stmgc/changeset/e02ee68b20c4/ Log: another bug with h_original :( diff --git a/c4/nursery.c b/c4/nursery.c --- a/c4/nursery.c +++ b/c4/nursery.c @@ -177,10 +177,21 @@ fresh_old_copy = id_obj; fresh_old_copy->h_original = 0; obj->h_tid &= ~GCFLAG_HAS_ID; + + /* priv_from_prot's backup->h_originals already point + to id_obj */ } else { /* make a copy of it outside */ fresh_old_copy = create_old_object_copy(obj); + + if (obj->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED + && !(obj->h_original)) { + /* the object's backup copy still has + a h_original that is NULL*/ + gcptr B = (gcptr)obj->h_revision; + B->h_original = (revision_t)fresh_old_copy; + } } obj->h_tid |= GCFLAG_MOVED; diff --git a/c4/test/test_extra.py b/c4/test/test_extra.py --- a/c4/test/test_extra.py +++ b/c4/test/test_extra.py @@ -157,7 +157,7 @@ 0, 0, 0, 0] -def test_bug(): +def test_clear_original_on_id_copy(): p1 = nalloc(HDR) pid = lib.stm_id(p1) lib.stm_push_root(p1) @@ -167,7 +167,7 @@ assert p1o == ffi.cast("gcptr", pid) assert follow_original(p1o) == ffi.NULL -def test_bug2(): +def test_clear_original_on_priv_from_prot_abort(): p = oalloc(HDR+WORD) def cb(c): @@ -179,4 +179,25 @@ p = lib.stm_pop_root() assert follow_original(p) == ffi.NULL +def test_set_backups_original_on_move_to_id_copy(): + p1 = nalloc(HDR+WORD) + lib.stm_commit_transaction() + lib.stm_begin_inevitable_transaction() + assert classify(p1) == 'protected' + + pw = lib.stm_write_barrier(p1) + assert classify(pw) == 'private_from_protected' + assert pw == p1 + lib.stm_push_root(pw) + # make pw old + minor_collect() + p1o = lib.stm_pop_root() + + # Backup has updated h_original: + assert classify(p1o) == 'private_from_protected' + B = follow_revision(p1o) + assert follow_original(B) == p1o + + + From noreply at buildbot.pypy.org Mon Aug 19 12:00:01 2013 From: noreply at buildbot.pypy.org (fijal) Date: Mon, 19 Aug 2013 12:00:01 +0200 (CEST) Subject: [pypy-commit] pypy default: Use get_typeids_z Message-ID: <20130819100001.174781C3670@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r66209:9a191922b1d7 Date: 2013-08-19 11:59 +0200 http://bitbucket.org/pypy/pypy/changeset/9a191922b1d7/ Log: Use get_typeids_z diff --git a/pypy/tool/gcdump.py b/pypy/tool/gcdump.py --- a/pypy/tool/gcdump.py +++ b/pypy/tool/gcdump.py @@ -20,9 +20,13 @@ for obj in self.walk(a): self.add_object_summary(obj[2], obj[3]) - def load_typeids(self, filename): + def load_typeids(self, filename_or_iter): self.typeids = Stat.typeids.copy() - for num, line in enumerate(open(filename)): + if isinstance(filename_or_iter, str): + iter = open(filename_or_iter) + else: + iter = filename_or_iter + for num, line in enumerate(iter): if num == 0: continue words = line.split() @@ -92,5 +96,8 @@ typeid_name = os.path.join(os.path.dirname(sys.argv[1]), 'typeids.txt') if os.path.isfile(typeid_name): stat.load_typeids(typeid_name) + else: + import zlib, gc + stat.load_typeids(zlib.decompress(gc.get_typeids_z()).split("\n")) # stat.print_summary() From noreply at buildbot.pypy.org Mon Aug 19 14:38:18 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Mon, 19 Aug 2013 14:38:18 +0200 (CEST) Subject: [pypy-commit] stmgc nonmovable-int-ref: merge Message-ID: <20130819123818.7D0E41C300E@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: nonmovable-int-ref Changeset: r485:b19dfb209a10 Date: 2013-08-19 11:36 +0200 http://bitbucket.org/pypy/stmgc/changeset/b19dfb209a10/ Log: merge diff --git a/c4/nursery.c b/c4/nursery.c --- a/c4/nursery.c +++ b/c4/nursery.c @@ -177,10 +177,21 @@ fresh_old_copy = id_obj; fresh_old_copy->h_original = 0; obj->h_tid &= ~GCFLAG_HAS_ID; + + /* priv_from_prot's backup->h_originals already point + to id_obj */ } else { /* make a copy of it outside */ fresh_old_copy = create_old_object_copy(obj); + + if (obj->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED + && !(obj->h_original)) { + /* the object's backup copy still has + a h_original that is NULL*/ + gcptr B = (gcptr)obj->h_revision; + B->h_original = (revision_t)fresh_old_copy; + } } obj->h_tid |= GCFLAG_MOVED; diff --git a/c4/stmgc.h b/c4/stmgc.h --- a/c4/stmgc.h +++ b/c4/stmgc.h @@ -215,17 +215,18 @@ : (obj)) #define stm_repeat_read_barrier(obj) \ - (UNLIKELY((obj)->h_tid & (GCFLAG_PUBLIC_TO_PRIVATE | GCFLAG_MOVED)) ? \ + (UNLIKELY(((obj)->h_tid & (GCFLAG_PUBLIC_TO_PRIVATE | \ + GCFLAG_MOVED)) != 0) ? \ stm_RepeatReadBarrier(obj) \ : (obj)) #define stm_immut_read_barrier(obj) \ - (UNLIKELY((obj)->h_tid & GCFLAG_STUB) ? \ + (UNLIKELY(((obj)->h_tid & GCFLAG_STUB) != 0) ? \ stm_ImmutReadBarrier(obj) \ : (obj)) #define stm_repeat_write_barrier(obj) \ - (UNLIKELY((obj)->h_tid & GCFLAG_WRITE_BARRIER) ? \ + (UNLIKELY(((obj)->h_tid & GCFLAG_WRITE_BARRIER) != 0) ? \ stm_RepeatWriteBarrier(obj) \ : (obj)) diff --git a/c4/test/test_extra.py b/c4/test/test_extra.py --- a/c4/test/test_extra.py +++ b/c4/test/test_extra.py @@ -157,7 +157,7 @@ 0, 0, 0, 0] -def test_bug(): +def test_clear_original_on_id_copy(): p1 = nalloc(HDR) pid = lib.stm_id(p1) lib.stm_push_root(p1) @@ -167,7 +167,7 @@ assert p1o == ffi.cast("gcptr", pid) assert follow_original(p1o) == ffi.NULL -def test_bug2(): +def test_clear_original_on_priv_from_prot_abort(): p = oalloc(HDR+WORD) def cb(c): @@ -179,6 +179,25 @@ p = lib.stm_pop_root() assert follow_original(p) == ffi.NULL +def test_set_backups_original_on_move_to_id_copy(): + p1 = nalloc(HDR+WORD) + lib.stm_commit_transaction() + lib.stm_begin_inevitable_transaction() + assert classify(p1) == 'protected' + + pw = lib.stm_write_barrier(p1) + assert classify(pw) == 'private_from_protected' + assert pw == p1 + + lib.stm_push_root(pw) + # make pw old + minor_collect() + p1o = lib.stm_pop_root() + + # Backup has updated h_original: + assert classify(p1o) == 'private_from_protected' + B = follow_revision(p1o) + assert follow_original(B) == p1o def test_allocate_public_integer_address(): From noreply at buildbot.pypy.org Mon Aug 19 14:50:41 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Mon, 19 Aug 2013 14:50:41 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: stmgc import Message-ID: <20130819125041.BFF3B1C0149@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r66210:9f125baf35d1 Date: 2013-08-19 14:39 +0200 http://bitbucket.org/pypy/pypy/changeset/9f125baf35d1/ Log: stmgc import diff --git a/rpython/translator/stm/src_stm/nursery.c b/rpython/translator/stm/src_stm/nursery.c --- a/rpython/translator/stm/src_stm/nursery.c +++ b/rpython/translator/stm/src_stm/nursery.c @@ -178,10 +178,21 @@ fresh_old_copy = id_obj; fresh_old_copy->h_original = 0; obj->h_tid &= ~GCFLAG_HAS_ID; + + /* priv_from_prot's backup->h_originals already point + to id_obj */ } else { /* make a copy of it outside */ fresh_old_copy = create_old_object_copy(obj); + + if (obj->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED + && !(obj->h_original)) { + /* the object's backup copy still has + a h_original that is NULL*/ + gcptr B = (gcptr)obj->h_revision; + B->h_original = (revision_t)fresh_old_copy; + } } obj->h_tid |= GCFLAG_MOVED; diff --git a/rpython/translator/stm/src_stm/revision b/rpython/translator/stm/src_stm/revision --- a/rpython/translator/stm/src_stm/revision +++ b/rpython/translator/stm/src_stm/revision @@ -1,1 +1,1 @@ -9cd8cc0e987a +b19dfb209a10 diff --git a/rpython/translator/stm/src_stm/stmgc.h b/rpython/translator/stm/src_stm/stmgc.h --- a/rpython/translator/stm/src_stm/stmgc.h +++ b/rpython/translator/stm/src_stm/stmgc.h @@ -216,17 +216,18 @@ : (obj)) #define stm_repeat_read_barrier(obj) \ - (UNLIKELY((obj)->h_tid & (GCFLAG_PUBLIC_TO_PRIVATE | GCFLAG_MOVED)) ? \ + (UNLIKELY(((obj)->h_tid & (GCFLAG_PUBLIC_TO_PRIVATE | \ + GCFLAG_MOVED)) != 0) ? \ stm_RepeatReadBarrier(obj) \ : (obj)) #define stm_immut_read_barrier(obj) \ - (UNLIKELY((obj)->h_tid & GCFLAG_STUB) ? \ + (UNLIKELY(((obj)->h_tid & GCFLAG_STUB) != 0) ? \ stm_ImmutReadBarrier(obj) \ : (obj)) #define stm_repeat_write_barrier(obj) \ - (UNLIKELY((obj)->h_tid & GCFLAG_WRITE_BARRIER) ? \ + (UNLIKELY(((obj)->h_tid & GCFLAG_WRITE_BARRIER) != 0) ? \ stm_RepeatWriteBarrier(obj) \ : (obj)) From noreply at buildbot.pypy.org Mon Aug 19 14:50:43 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Mon, 19 Aug 2013 14:50:43 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: fix for ptr_eq because a function returning _Bool only sets the lower 8 bits of the return value Message-ID: <20130819125043.488501C0149@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r66211:c2953cb28b82 Date: 2013-08-19 14:40 +0200 http://bitbucket.org/pypy/pypy/changeset/c2953cb28b82/ Log: fix for ptr_eq because a function returning _Bool only sets the lower 8 bits of the return value diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -2337,8 +2337,9 @@ mc.CALL(imm(func)) # result still on stack mc.POP_r(X86_64_SCRATCH_REG.value) - # set flags: - mc.TEST_rr(X86_64_SCRATCH_REG.value, X86_64_SCRATCH_REG.value) + # _Bool return type only sets lower 8 bits of return value + sl = X86_64_SCRATCH_REG.lowest8bits() + mc.CMP8_ri(sl.value, 0) # # END SLOWPATH # From noreply at buildbot.pypy.org Mon Aug 19 16:42:18 2013 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 19 Aug 2013 16:42:18 +0200 (CEST) Subject: [pypy-commit] pypy default: test_cast_gcref_to_int: when running untranslated, accept Message-ID: <20130819144218.C6F5B1C0149@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r66212:452ef15ded28 Date: 2013-08-19 16:41 +0200 http://bitbucket.org/pypy/pypy/changeset/452ef15ded28/ Log: test_cast_gcref_to_int: when running untranslated, accept also _llgcopaque objects and force a cast to integer. diff --git a/rpython/rlib/rgc.py b/rpython/rlib/rgc.py --- a/rpython/rlib/rgc.py +++ b/rpython/rlib/rgc.py @@ -346,6 +346,10 @@ if we_are_translated(): return lltype.cast_ptr_to_int(gcref) else: + from rpython.rtyper.lltypesystem.ll2ctypes import _llgcopaque + if isinstance(gcref._obj, _llgcopaque): + from rpython.rtyper.lltypesystem import rffi + return rffi.cast(lltype.Signed, gcref) return id(gcref._x) def dump_rpy_heap(fd): diff --git a/rpython/rlib/test/test_rgc.py b/rpython/rlib/test/test_rgc.py --- a/rpython/rlib/test/test_rgc.py +++ b/rpython/rlib/test/test_rgc.py @@ -228,3 +228,8 @@ x1 = X() n = rgc.get_rpy_memory_usage(rgc.cast_instance_to_gcref(x1)) assert n >= 8 and n <= 64 + +def test_cast_gcref_to_int(): + from rpython.rtyper.lltypesystem import rffi + x = rffi.cast(llmemory.GCREF, 123456) + assert rgc.cast_gcref_to_int(x) == 123456 From noreply at buildbot.pypy.org Mon Aug 19 17:10:36 2013 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 19 Aug 2013 17:10:36 +0200 (CEST) Subject: [pypy-commit] pypy default: Oups. Revert the previous change and document why it was wrong Message-ID: <20130819151036.110561C087E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r66213:dbfb39c958a1 Date: 2013-08-19 17:09 +0200 http://bitbucket.org/pypy/pypy/changeset/dbfb39c958a1/ Log: Oups. Revert the previous change and document why it was wrong diff --git a/rpython/rlib/rgc.py b/rpython/rlib/rgc.py --- a/rpython/rlib/rgc.py +++ b/rpython/rlib/rgc.py @@ -343,13 +343,12 @@ return intmask(id(Class)) def cast_gcref_to_int(gcref): + # This is meant to be used on cast_instance_to_gcref results. + # Don't use this on regular gcrefs obtained e.g. with + # lltype.cast_opaque_ptr(). if we_are_translated(): return lltype.cast_ptr_to_int(gcref) else: - from rpython.rtyper.lltypesystem.ll2ctypes import _llgcopaque - if isinstance(gcref._obj, _llgcopaque): - from rpython.rtyper.lltypesystem import rffi - return rffi.cast(lltype.Signed, gcref) return id(gcref._x) def dump_rpy_heap(fd): diff --git a/rpython/rlib/test/test_rgc.py b/rpython/rlib/test/test_rgc.py --- a/rpython/rlib/test/test_rgc.py +++ b/rpython/rlib/test/test_rgc.py @@ -228,8 +228,3 @@ x1 = X() n = rgc.get_rpy_memory_usage(rgc.cast_instance_to_gcref(x1)) assert n >= 8 and n <= 64 - -def test_cast_gcref_to_int(): - from rpython.rtyper.lltypesystem import rffi - x = rffi.cast(llmemory.GCREF, 123456) - assert rgc.cast_gcref_to_int(x) == 123456 From noreply at buildbot.pypy.org Mon Aug 19 17:23:15 2013 From: noreply at buildbot.pypy.org (fijal) Date: Mon, 19 Aug 2013 17:23:15 +0200 (CEST) Subject: [pypy-commit] pypy rewritten-loop-logging: Fix tests Message-ID: <20130819152315.E6F8B1C087E@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: rewritten-loop-logging Changeset: r66214:705d27079fbb Date: 2013-08-19 17:22 +0200 http://bitbucket.org/pypy/pypy/changeset/705d27079fbb/ Log: Fix tests diff --git a/rpython/jit/backend/arm/test/test_generated.py b/rpython/jit/backend/arm/test/test_generated.py --- a/rpython/jit/backend/arm/test/test_generated.py +++ b/rpython/jit/backend/arm/test/test_generated.py @@ -40,7 +40,7 @@ looptoken = JitCellToken() operations[2].setfailargs([v12, v8, v3, v2, v1, v11]) operations[3].setfailargs([v9, v6, v10, v2, v8, v5, v1, v4]) - cpu.compile_loop(inputargs, operations, looptoken) + cpu.compile_loop(None, inputargs, operations, looptoken) args = [-12 , -26 , -19 , 7 , -5 , -24 , -37 , 62 , 9 , 12] deadframe = cpu.execute_token(looptoken, *args) assert cpu.get_int_value(deadframe, 0) == 0 @@ -92,7 +92,7 @@ operations[9].setfailargs([v15, v7, v10, v18, v4, v17, v1]) operations[-1].setfailargs([v7, v1, v2]) looptoken = JitCellToken() - cpu.compile_loop(inputargs, operations, looptoken) + cpu.compile_loop(None, inputargs, operations, looptoken) args = [16 , 5 , 5 , 16 , 46 , 6 , 63 , 39 , 78 , 0] deadframe = cpu.execute_token(looptoken, *args) assert cpu.get_int_value(deadframe, 0) == 105 @@ -136,7 +136,7 @@ operations[-1].setfailargs([v5, v2, v1, v10, v3, v8, v4, v6]) looptoken = JitCellToken() - cpu.compile_loop(inputargs, operations, looptoken) + cpu.compile_loop(None, inputargs, operations, looptoken) args = [-5 , 24 , 46 , -15 , 13 , -8 , 0 , -6 , 6 , 6] deadframe = cpu.execute_token(looptoken, *args) assert cpu.get_latest_descr(deadframe).identifier == 2 @@ -179,7 +179,7 @@ operations[5].setfailargs([]) operations[-1].setfailargs([v8, v2, v6, v5, v7, v1, v10]) looptoken = JitCellToken() - cpu.compile_loop(inputargs, operations, looptoken) + cpu.compile_loop(None, inputargs, operations, looptoken) args = [19 , -3 , -58 , -7 , 12 , 22 , -54 , -29 , -19 , -64] deadframe = cpu.execute_token(looptoken, *args) assert cpu.get_int_value(deadframe, 0) == -29 @@ -223,7 +223,7 @@ looptoken = JitCellToken() operations[5].setfailargs([]) operations[-1].setfailargs([v1, v4, v10, v8, v7, v3]) - cpu.compile_loop(inputargs, operations, looptoken) + cpu.compile_loop(None, inputargs, operations, looptoken) args = [1073741824 , 95 , -16 , 5 , 92 , 12 , 32 , 17 , 37 , -63] deadframe = cpu.execute_token(looptoken, *args) assert cpu.get_int_value(deadframe, 0) == 1073741824 @@ -280,7 +280,7 @@ operations[9].setfailargs([v10, v13]) operations[-1].setfailargs([v8, v10, v6, v3, v2, v9]) args = [32 , 41 , -9 , 12 , -18 , 46 , 15 , 17 , 10 , 12] - cpu.compile_loop(inputargs, operations, looptoken) + cpu.compile_loop(None, inputargs, operations, looptoken) deadframe = cpu.execute_token(looptoken, *args) assert cpu.get_latest_descr(deadframe).identifier == 3 assert cpu.get_int_value(deadframe, 0) == 12 @@ -328,7 +328,7 @@ operations[8].setfailargs([v5, v9]) operations[-1].setfailargs([v4, v10, v6, v5, v9, v7]) looptoken = JitCellToken() - cpu.compile_loop(inputargs, operations, looptoken) + cpu.compile_loop(None, inputargs, operations, looptoken) args = [-8 , 0 , 62 , 35 , 16 , 9 , 30 , 581610154 , -1 , 738197503] deadframe = cpu.execute_token(looptoken, *args) assert cpu.get_latest_descr(deadframe).identifier == 2 @@ -378,7 +378,7 @@ operations[-2].setfailargs([v9, v4, v10, v11, v14]) operations[-1].setfailargs([v10, v8, v1, v6, v4]) looptoken = JitCellToken() - cpu.compile_loop(inputargs, operations, looptoken) + cpu.compile_loop(None, inputargs, operations, looptoken) args = [-39 , -18 , 1588243114 , -9 , -4 , 1252698794 , 0 , 715827882 , -15 , 536870912] deadframe = cpu.execute_token(looptoken, *args) assert cpu.get_latest_descr(deadframe).identifier == 1 @@ -433,7 +433,7 @@ operations[9].setfailargs([v5, v7, v12, v14, v2, v13, v8]) operations[-1].setfailargs([v1, v2, v9]) looptoken = JitCellToken() - cpu.compile_loop(inputargs, operations, looptoken) + cpu.compile_loop(None, inputargs, operations, looptoken) args = [0 , -2 , 24 , 1 , -4 , 13 , -95 , 33 , 2 , -44] deadframe = cpu.execute_token(looptoken, *args) assert cpu.get_latest_descr(deadframe).identifier == 3 @@ -475,7 +475,7 @@ operations[2].setfailargs([v10, v3, v6, v11, v9, v2]) operations[-1].setfailargs([v8, v2, v10, v6, v7, v9, v5, v4]) looptoken = JitCellToken() - cpu.compile_loop(inputargs, operations, looptoken) + cpu.compile_loop(None, inputargs, operations, looptoken) args = [3 , -5 , 1431655765 , 47 , 12 , 1789569706 , 15 , 939524096 , 16 , -43] deadframe = cpu.execute_token(looptoken, *args) assert cpu.get_latest_descr(deadframe).identifier == 1 @@ -524,7 +524,7 @@ operations[-1].setfailargs([v2, v3, v5, v7, v10, v8, v9]) operations[4].setfailargs([v14]) looptoken = JitCellToken() - cpu.compile_loop(inputargs, operations, looptoken) + cpu.compile_loop(None, inputargs, operations, looptoken) args = [14 , -20 , 18 , -2058005163 , 6 , 1 , -16 , 11 , 0 , 19] deadframe = cpu.execute_token(looptoken, *args) assert cpu.get_latest_descr(deadframe).identifier == 1 diff --git a/rpython/jit/metainterp/test/test_compile.py b/rpython/jit/metainterp/test/test_compile.py --- a/rpython/jit/metainterp/test/test_compile.py +++ b/rpython/jit/metainterp/test/test_compile.py @@ -17,7 +17,7 @@ ts = typesystem.llhelper def __init__(self): self.seen = [] - def compile_loop(self, inputargs, operations, token, log=True, name=''): + def compile_loop(self, logger, inputargs, operations, token, log=True, name=''): self.seen.append((inputargs, operations, token)) class FakeLogger(object): From noreply at buildbot.pypy.org Mon Aug 19 17:25:09 2013 From: noreply at buildbot.pypy.org (fijal) Date: Mon, 19 Aug 2013 17:25:09 +0200 (CEST) Subject: [pypy-commit] pypy rewritten-loop-logging: close merged branch Message-ID: <20130819152509.6CAE11C087E@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: rewritten-loop-logging Changeset: r66215:ca7528cacac2 Date: 2013-08-19 17:23 +0200 http://bitbucket.org/pypy/pypy/changeset/ca7528cacac2/ Log: close merged branch From noreply at buildbot.pypy.org Mon Aug 19 17:25:11 2013 From: noreply at buildbot.pypy.org (fijal) Date: Mon, 19 Aug 2013 17:25:11 +0200 (CEST) Subject: [pypy-commit] pypy default: Merge rewritten-loop-logging, allowing to debug rewritten loops Message-ID: <20130819152511.13FBB1C087E@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r66216:01c12efd712a Date: 2013-08-19 17:24 +0200 http://bitbucket.org/pypy/pypy/changeset/01c12efd712a/ Log: Merge rewritten-loop-logging, allowing to debug rewritten loops diff --git a/rpython/jit/backend/arm/assembler.py b/rpython/jit/backend/arm/assembler.py --- a/rpython/jit/backend/arm/assembler.py +++ b/rpython/jit/backend/arm/assembler.py @@ -571,7 +571,8 @@ self.mc.BL(self.stack_check_slowpath, c=c.HI) # call if ip > lr # cpu interface - def assemble_loop(self, loopname, inputargs, operations, looptoken, log): + def assemble_loop(self, logger, loopname, inputargs, operations, looptoken, + log): clt = CompiledLoopToken(self.cpu, looptoken.number) looptoken.compiled_loop_token = clt clt._debug_nbargs = len(inputargs) @@ -620,6 +621,9 @@ 'loop.asm') ops_offset = self.mc.ops_offset + if logger is not None: + logger.log_loop(inputargs, operations, 0, "rewritten", + name=loopname, ops_offset=ops_offset) self.teardown() debug_start("jit-backend-addr") @@ -644,8 +648,8 @@ frame_depth = max(frame_depth, target_frame_depth) return frame_depth - def assemble_bridge(self, faildescr, inputargs, operations, - original_loop_token, log): + def assemble_bridge(self, logger, faildescr, inputargs, operations, + original_loop_token, log): if not we_are_translated(): # Arguments should be unique assert len(set(inputargs)) == len(inputargs) @@ -694,6 +698,9 @@ frame_depth_no_fixed_size + JITFRAME_FIXED_SIZE) self.fixup_target_tokens(rawstart) self.update_frame_depth(frame_depth) + if logger: + logger.log_bridge(inputargs, operations, "rewritten", + ops_offset=ops_offset) self.teardown() debug_bridge(descr_number, rawstart, codeendpos) diff --git a/rpython/jit/backend/arm/runner.py b/rpython/jit/backend/arm/runner.py --- a/rpython/jit/backend/arm/runner.py +++ b/rpython/jit/backend/arm/runner.py @@ -56,17 +56,18 @@ def finish_once(self): self.assembler.finish_once() - def compile_loop(self, inputargs, operations, looptoken, + def compile_loop(self, logger, inputargs, operations, looptoken, log=True, name=''): - return self.assembler.assemble_loop(name, inputargs, operations, - looptoken, log=log) + return self.assembler.assemble_loop(logger, name, inputargs, operations, + looptoken, log=log) - def compile_bridge(self, faildescr, inputargs, operations, + def compile_bridge(self, logger, faildescr, inputargs, operations, original_loop_token, log=True): clt = original_loop_token.compiled_loop_token clt.compiling_a_bridge() - return self.assembler.assemble_bridge(faildescr, inputargs, operations, - original_loop_token, log=log) + return self.assembler.assemble_bridge(logger, faildescr, inputargs, + operations, + original_loop_token, log=log) def clear_latest_values(self, count): setitem = self.assembler.fail_boxes_ptr.setitem diff --git a/rpython/jit/backend/arm/test/test_generated.py b/rpython/jit/backend/arm/test/test_generated.py --- a/rpython/jit/backend/arm/test/test_generated.py +++ b/rpython/jit/backend/arm/test/test_generated.py @@ -40,7 +40,7 @@ looptoken = JitCellToken() operations[2].setfailargs([v12, v8, v3, v2, v1, v11]) operations[3].setfailargs([v9, v6, v10, v2, v8, v5, v1, v4]) - cpu.compile_loop(inputargs, operations, looptoken) + cpu.compile_loop(None, inputargs, operations, looptoken) args = [-12 , -26 , -19 , 7 , -5 , -24 , -37 , 62 , 9 , 12] deadframe = cpu.execute_token(looptoken, *args) assert cpu.get_int_value(deadframe, 0) == 0 @@ -92,7 +92,7 @@ operations[9].setfailargs([v15, v7, v10, v18, v4, v17, v1]) operations[-1].setfailargs([v7, v1, v2]) looptoken = JitCellToken() - cpu.compile_loop(inputargs, operations, looptoken) + cpu.compile_loop(None, inputargs, operations, looptoken) args = [16 , 5 , 5 , 16 , 46 , 6 , 63 , 39 , 78 , 0] deadframe = cpu.execute_token(looptoken, *args) assert cpu.get_int_value(deadframe, 0) == 105 @@ -136,7 +136,7 @@ operations[-1].setfailargs([v5, v2, v1, v10, v3, v8, v4, v6]) looptoken = JitCellToken() - cpu.compile_loop(inputargs, operations, looptoken) + cpu.compile_loop(None, inputargs, operations, looptoken) args = [-5 , 24 , 46 , -15 , 13 , -8 , 0 , -6 , 6 , 6] deadframe = cpu.execute_token(looptoken, *args) assert cpu.get_latest_descr(deadframe).identifier == 2 @@ -179,7 +179,7 @@ operations[5].setfailargs([]) operations[-1].setfailargs([v8, v2, v6, v5, v7, v1, v10]) looptoken = JitCellToken() - cpu.compile_loop(inputargs, operations, looptoken) + cpu.compile_loop(None, inputargs, operations, looptoken) args = [19 , -3 , -58 , -7 , 12 , 22 , -54 , -29 , -19 , -64] deadframe = cpu.execute_token(looptoken, *args) assert cpu.get_int_value(deadframe, 0) == -29 @@ -223,7 +223,7 @@ looptoken = JitCellToken() operations[5].setfailargs([]) operations[-1].setfailargs([v1, v4, v10, v8, v7, v3]) - cpu.compile_loop(inputargs, operations, looptoken) + cpu.compile_loop(None, inputargs, operations, looptoken) args = [1073741824 , 95 , -16 , 5 , 92 , 12 , 32 , 17 , 37 , -63] deadframe = cpu.execute_token(looptoken, *args) assert cpu.get_int_value(deadframe, 0) == 1073741824 @@ -280,7 +280,7 @@ operations[9].setfailargs([v10, v13]) operations[-1].setfailargs([v8, v10, v6, v3, v2, v9]) args = [32 , 41 , -9 , 12 , -18 , 46 , 15 , 17 , 10 , 12] - cpu.compile_loop(inputargs, operations, looptoken) + cpu.compile_loop(None, inputargs, operations, looptoken) deadframe = cpu.execute_token(looptoken, *args) assert cpu.get_latest_descr(deadframe).identifier == 3 assert cpu.get_int_value(deadframe, 0) == 12 @@ -328,7 +328,7 @@ operations[8].setfailargs([v5, v9]) operations[-1].setfailargs([v4, v10, v6, v5, v9, v7]) looptoken = JitCellToken() - cpu.compile_loop(inputargs, operations, looptoken) + cpu.compile_loop(None, inputargs, operations, looptoken) args = [-8 , 0 , 62 , 35 , 16 , 9 , 30 , 581610154 , -1 , 738197503] deadframe = cpu.execute_token(looptoken, *args) assert cpu.get_latest_descr(deadframe).identifier == 2 @@ -378,7 +378,7 @@ operations[-2].setfailargs([v9, v4, v10, v11, v14]) operations[-1].setfailargs([v10, v8, v1, v6, v4]) looptoken = JitCellToken() - cpu.compile_loop(inputargs, operations, looptoken) + cpu.compile_loop(None, inputargs, operations, looptoken) args = [-39 , -18 , 1588243114 , -9 , -4 , 1252698794 , 0 , 715827882 , -15 , 536870912] deadframe = cpu.execute_token(looptoken, *args) assert cpu.get_latest_descr(deadframe).identifier == 1 @@ -433,7 +433,7 @@ operations[9].setfailargs([v5, v7, v12, v14, v2, v13, v8]) operations[-1].setfailargs([v1, v2, v9]) looptoken = JitCellToken() - cpu.compile_loop(inputargs, operations, looptoken) + cpu.compile_loop(None, inputargs, operations, looptoken) args = [0 , -2 , 24 , 1 , -4 , 13 , -95 , 33 , 2 , -44] deadframe = cpu.execute_token(looptoken, *args) assert cpu.get_latest_descr(deadframe).identifier == 3 @@ -475,7 +475,7 @@ operations[2].setfailargs([v10, v3, v6, v11, v9, v2]) operations[-1].setfailargs([v8, v2, v10, v6, v7, v9, v5, v4]) looptoken = JitCellToken() - cpu.compile_loop(inputargs, operations, looptoken) + cpu.compile_loop(None, inputargs, operations, looptoken) args = [3 , -5 , 1431655765 , 47 , 12 , 1789569706 , 15 , 939524096 , 16 , -43] deadframe = cpu.execute_token(looptoken, *args) assert cpu.get_latest_descr(deadframe).identifier == 1 @@ -524,7 +524,7 @@ operations[-1].setfailargs([v2, v3, v5, v7, v10, v8, v9]) operations[4].setfailargs([v14]) looptoken = JitCellToken() - cpu.compile_loop(inputargs, operations, looptoken) + cpu.compile_loop(None, inputargs, operations, looptoken) args = [14 , -20 , 18 , -2058005163 , 6 , 1 , -16 , 11 , 0 , 19] deadframe = cpu.execute_token(looptoken, *args) assert cpu.get_latest_descr(deadframe).identifier == 1 diff --git a/rpython/jit/backend/arm/test/test_regalloc2.py b/rpython/jit/backend/arm/test/test_regalloc2.py --- a/rpython/jit/backend/arm/test/test_regalloc2.py +++ b/rpython/jit/backend/arm/test/test_regalloc2.py @@ -24,7 +24,7 @@ cpu = CPU(None, None) cpu.setup_once() looptoken = JitCellToken() - cpu.compile_loop(inputargs, operations, looptoken) + cpu.compile_loop(None, inputargs, operations, looptoken) deadframe = cpu.execute_token(looptoken, 9) assert cpu.get_int_value(deadframe, 0) == (9 >> 3) assert cpu.get_int_value(deadframe, 1) == (~18) @@ -48,7 +48,7 @@ cpu = CPU(None, None) cpu.setup_once() looptoken = JitCellToken() - cpu.compile_loop(inputargs, operations, looptoken) + cpu.compile_loop(None, inputargs, operations, looptoken) deadframe = cpu.execute_token(looptoken, -10) assert cpu.get_int_value(deadframe, 0) == 0 assert cpu.get_int_value(deadframe, 1) == -1000 @@ -145,7 +145,7 @@ cpu = CPU(None, None) cpu.setup_once() looptoken = JitCellToken() - cpu.compile_loop(inputargs, operations, looptoken) + cpu.compile_loop(None, inputargs, operations, looptoken) args = [-13 , 10 , 10 , 8 , -8 , -16 , -18 , 46 , -12 , 26] deadframe = cpu.execute_token(looptoken, *args) assert cpu.get_int_value(deadframe, 0) == 0 @@ -252,7 +252,7 @@ cpu = CPU(None, None) cpu.setup_once() looptoken = JitCellToken() - cpu.compile_loop(inputargs, operations, looptoken) + cpu.compile_loop(None, inputargs, operations, looptoken) args = [17 , -20 , -6 , 6 , 1 , 13 , 13 , 9 , 49 , 8] deadframe = cpu.execute_token(looptoken, *args) assert cpu.get_int_value(deadframe, 0) == 0 diff --git a/rpython/jit/backend/arm/test/test_runner.py b/rpython/jit/backend/arm/test/test_runner.py --- a/rpython/jit/backend/arm/test/test_runner.py +++ b/rpython/jit/backend/arm/test/test_runner.py @@ -75,7 +75,7 @@ ResOperation(rop.FINISH, [inp[1]], None, descr=BasicFinalDescr(1)), ] operations[-2].setfailargs(out) - cpu.compile_loop(inp, operations, looptoken) + cpu.compile_loop(None, inp, operations, looptoken) args = [i for i in range(1, 15)] deadframe = self.cpu.execute_token(looptoken, *args) output = [self.cpu.get_int_value(deadframe, i - 1) for i in range(1, 15)] @@ -117,9 +117,9 @@ i1 = int_sub(i0, 1) finish(i1) ''') - self.cpu.compile_loop(loop2.inputargs, loop2.operations, lt2) - self.cpu.compile_loop(loop3.inputargs, loop3.operations, lt3) - self.cpu.compile_loop(loop1.inputargs, loop1.operations, lt1) + self.cpu.compile_loop(None, loop2.inputargs, loop2.operations, lt2) + self.cpu.compile_loop(None, loop3.inputargs, loop3.operations, lt3) + self.cpu.compile_loop(None, loop1.inputargs, loop1.operations, lt1) df = self.cpu.execute_token(lt1, 10) assert self.cpu.get_int_value(df, 0) == 7 @@ -214,7 +214,7 @@ ops = "".join(ops) loop = parse(ops) looptoken = JitCellToken() - self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + self.cpu.compile_loop(None, loop.inputargs, loop.operations, looptoken) ARGS = [lltype.Signed] * numargs RES = lltype.Signed args = [i+1 for i in range(numargs)] @@ -246,7 +246,7 @@ try: self.cpu.assembler.set_debug(True) looptoken = JitCellToken() - self.cpu.compile_loop(ops.inputargs, ops.operations, looptoken) + self.cpu.compile_loop(None, ops.inputargs, ops.operations, looptoken) self.cpu.execute_token(looptoken, 0) # check debugging info struct = self.cpu.assembler.loop_run_counters[0] @@ -280,7 +280,7 @@ faildescr = BasicFailDescr(2) loop = parse(ops, self.cpu, namespace=locals()) looptoken = JitCellToken() - info = self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + info = self.cpu.compile_loop(None, loop.inputargs, loop.operations, looptoken) ops2 = """ [i0, f1] i1 = same_as(i0) @@ -293,7 +293,7 @@ """ loop2 = parse(ops2, self.cpu, namespace=locals()) looptoken2 = JitCellToken() - info = self.cpu.compile_loop(loop2.inputargs, loop2.operations, looptoken2) + info = self.cpu.compile_loop(None, loop2.inputargs, loop2.operations, looptoken2) deadframe = self.cpu.execute_token(looptoken, -9, longlong.getfloatstorage(-13.5)) res = longlong.getrealfloat(self.cpu.get_float_value(deadframe, 0)) diff --git a/rpython/jit/backend/llgraph/runner.py b/rpython/jit/backend/llgraph/runner.py --- a/rpython/jit/backend/llgraph/runner.py +++ b/rpython/jit/backend/llgraph/runner.py @@ -183,7 +183,8 @@ self.stats = stats or MiniStats() self.vinfo_for_tests = kwds.get('vinfo_for_tests', None) - def compile_loop(self, inputargs, operations, looptoken, log=True, name=''): + def compile_loop(self, logger, inputargs, operations, looptoken, log=True, + name=''): clt = model.CompiledLoopToken(self, looptoken.number) looptoken.compiled_loop_token = clt lltrace = LLTrace(inputargs, operations) @@ -191,7 +192,7 @@ clt._llgraph_alltraces = [lltrace] self._record_labels(lltrace) - def compile_bridge(self, faildescr, inputargs, operations, + def compile_bridge(self, logger, faildescr, inputargs, operations, original_loop_token, log=True): clt = original_loop_token.compiled_loop_token clt.compiling_a_bridge() diff --git a/rpython/jit/backend/llsupport/test/test_gc_integration.py b/rpython/jit/backend/llsupport/test/test_gc_integration.py --- a/rpython/jit/backend/llsupport/test/test_gc_integration.py +++ b/rpython/jit/backend/llsupport/test/test_gc_integration.py @@ -676,7 +676,7 @@ 'checkdescr': checkdescr, 'fielddescr': cpu.fielddescrof(S, 'x')}) token = JitCellToken() - cpu.compile_loop(loop.inputargs, loop.operations, token) + cpu.compile_loop(None, loop.inputargs, loop.operations, token) p0 = lltype.malloc(S, zero=True) p1 = lltype.malloc(S) p2 = lltype.malloc(S) @@ -715,7 +715,7 @@ 'calldescr': checkdescr, }) token = JitCellToken() - cpu.compile_loop(loop.inputargs, loop.operations, token) + cpu.compile_loop(None, loop.inputargs, loop.operations, token) S = self.S s = lltype.malloc(S) cpu.execute_token(token, 1, s) @@ -743,7 +743,7 @@ token = JitCellToken() cpu.gc_ll_descr.init_nursery(20) cpu.setup_once() - cpu.compile_loop(loop.inputargs, loop.operations, token) + cpu.compile_loop(None, loop.inputargs, loop.operations, token) arg = longlong.getfloatstorage(2.3) frame = cpu.execute_token(token, arg) ofs = cpu.get_baseofs_of_frame_field() @@ -770,7 +770,7 @@ cpu.gc_ll_descr.collections = [[0, sizeof.size]] cpu.gc_ll_descr.init_nursery(2 * sizeof.size) cpu.setup_once() - cpu.compile_loop(loop.inputargs, loop.operations, token) + cpu.compile_loop(None, loop.inputargs, loop.operations, token) frame = cpu.execute_token(token) # now we should be able to track everything from the frame frame = lltype.cast_opaque_ptr(JITFRAMEPTR, frame) @@ -821,7 +821,7 @@ token = JitCellToken() cpu.gc_ll_descr.init_nursery(100) cpu.setup_once() - cpu.compile_loop(loop.inputargs, loop.operations, token) + cpu.compile_loop(None, loop.inputargs, loop.operations, token) args = [lltype.nullptr(llmemory.GCREF.TO) for i in range(7)] frame = cpu.execute_token(token, 1, *args) frame = rffi.cast(JITFRAMEPTR, frame) @@ -867,7 +867,7 @@ token = JitCellToken() cpu.gc_ll_descr.init_nursery(100) cpu.setup_once() - cpu.compile_loop(loop.inputargs, loop.operations, token) + cpu.compile_loop(None, loop.inputargs, loop.operations, token) frame = lltype.cast_opaque_ptr(JITFRAMEPTR, cpu.execute_token(token, 1, a)) @@ -911,7 +911,7 @@ token = JitCellToken() cpu.gc_ll_descr.init_nursery(100) cpu.setup_once() - cpu.compile_loop(loop.inputargs, loop.operations, token) + cpu.compile_loop(None, loop.inputargs, loop.operations, token) frame = lltype.cast_opaque_ptr(JITFRAMEPTR, cpu.execute_token(token, 1, a)) assert getmap(frame).count('1') == 4 diff --git a/rpython/jit/backend/llsupport/test/test_regalloc_integration.py b/rpython/jit/backend/llsupport/test/test_regalloc_integration.py --- a/rpython/jit/backend/llsupport/test/test_regalloc_integration.py +++ b/rpython/jit/backend/llsupport/test/test_regalloc_integration.py @@ -97,7 +97,7 @@ loop = self.parse(ops, namespace=namespace) self.loop = loop looptoken = JitCellToken() - self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + self.cpu.compile_loop(None, loop.inputargs, loop.operations, looptoken) arguments = [] for arg in args: if isinstance(arg, int): @@ -147,7 +147,8 @@ assert ([box.type for box in bridge.inputargs] == [box.type for box in guard_op.getfailargs()]) faildescr = guard_op.getdescr() - self.cpu.compile_bridge(faildescr, bridge.inputargs, bridge.operations, + self.cpu.compile_bridge(None, faildescr, bridge.inputargs, + bridge.operations, loop._jitcelltoken) return bridge @@ -335,7 +336,7 @@ ''' self.interpret(ops, [0, 0, 3, 0]) assert self.getints(3) == [1, -3, 10] - + def test_compare_memory_result_survives(self): ops = ''' [i0, i1, i2, i3] @@ -409,7 +410,7 @@ class TestRegallocCompOps(BaseTestRegalloc): - + def test_cmp_op_0(self): ops = ''' [i0, i3] @@ -575,7 +576,7 @@ class TestRegAllocCallAndStackDepth(BaseTestRegalloc): def setup_class(cls): py.test.skip("skip for now, not sure what do we do") - + def expected_frame_depth(self, num_call_args, num_pushed_input_args=0): # Assumes the arguments are all non-float if not self.cpu.IS_64_BIT: @@ -612,7 +613,7 @@ ops = ''' [i0, i1, i2, i3, i4, i5, i6, i7, i8, i9] i10 = call(ConstClass(f1ptr), i0, descr=f1_calldescr) - i11 = call(ConstClass(f2ptr), i10, i1, descr=f2_calldescr) + i11 = call(ConstClass(f2ptr), i10, i1, descr=f2_calldescr) guard_false(i5) [i11, i1, i2, i3, i4, i5, i6, i7, i8, i9] ''' loop = self.interpret(ops, [4, 7, 9, 9 ,9, 9, 9, 9, 9, 9]) @@ -649,7 +650,7 @@ ops = ''' [i2, i0, i1] - i3 = call(ConstClass(f2ptr), i2, i1, descr=f2_calldescr) + i3 = call(ConstClass(f2ptr), i2, i1, descr=f2_calldescr) guard_false(i0, descr=fdescr2) [i3, i0] ''' bridge = self.attach_bridge(ops, loop, -2) @@ -676,7 +677,7 @@ ops = ''' [i2] - i3 = call(ConstClass(f1ptr), i2, descr=f1_calldescr) + i3 = call(ConstClass(f1ptr), i2, descr=f1_calldescr) guard_false(i3, descr=fdescr2) [i3] ''' bridge = self.attach_bridge(ops, loop, -2) diff --git a/rpython/jit/backend/llsupport/test/test_runner.py b/rpython/jit/backend/llsupport/test/test_runner.py --- a/rpython/jit/backend/llsupport/test/test_runner.py +++ b/rpython/jit/backend/llsupport/test/test_runner.py @@ -14,7 +14,7 @@ def set_debug(flag): pass - def compile_loop(self, inputargs, operations, looptoken): + def compile_loop(self, logger, inputargs, operations, looptoken): py.test.skip("llsupport test: cannot compile operations") diff --git a/rpython/jit/backend/model.py b/rpython/jit/backend/model.py --- a/rpython/jit/backend/model.py +++ b/rpython/jit/backend/model.py @@ -51,7 +51,8 @@ """ return False - def compile_loop(self, inputargs, operations, looptoken, log=True, name=''): + def compile_loop(self, logger, inputargs, operations, looptoken, + log=True, name=''): """Assemble the given loop. Should create and attach a fresh CompiledLoopToken to looptoken.compiled_loop_token and stick extra attributes @@ -67,7 +68,7 @@ """ raise NotImplementedError - def compile_bridge(self, faildescr, inputargs, operations, + def compile_bridge(self, logger, faildescr, inputargs, operations, original_loop_token, log=True): """Assemble the bridge. The FailDescr is the descr of the original guard that failed. diff --git a/rpython/jit/backend/test/calling_convention_test.py b/rpython/jit/backend/test/calling_convention_test.py --- a/rpython/jit/backend/test/calling_convention_test.py +++ b/rpython/jit/backend/test/calling_convention_test.py @@ -105,7 +105,7 @@ loop = parse(ops, namespace=locals()) looptoken = JitCellToken() - self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + self.cpu.compile_loop(None, loop.inputargs, loop.operations, looptoken) argvals, expected_result = self._prepare_args(args, floats, ints) deadframe = self.cpu.execute_token(looptoken, *argvals) @@ -249,7 +249,7 @@ called_looptoken = JitCellToken() called_looptoken.outermost_jitdriver_sd = FakeJitDriverSD() done_descr = called_loop.operations[-1].getdescr() - self.cpu.compile_loop(called_loop.inputargs, called_loop.operations, called_looptoken) + self.cpu.compile_loop(None, called_loop.inputargs, called_loop.operations, called_looptoken) argvals, expected_result = self._prepare_args(args, floats, ints) deadframe = cpu.execute_token(called_looptoken, *argvals) @@ -278,7 +278,7 @@ self.cpu.done_with_this_frame_descr_float = done_descr try: othertoken = JitCellToken() - self.cpu.compile_loop(loop.inputargs, loop.operations, othertoken) + self.cpu.compile_loop(None, loop.inputargs, loop.operations, othertoken) # prepare call to called_loop argvals, _ = self._prepare_args(args, floats, ints) @@ -424,7 +424,7 @@ loop = parse(ops, namespace=locals()) looptoken = JitCellToken() - self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + self.cpu.compile_loop(None, loop.inputargs, loop.operations, looptoken) argvals, expected_result = self._prepare_args(args, floats, ints) deadframe = self.cpu.execute_token(looptoken, *argvals) diff --git a/rpython/jit/backend/test/runner_test.py b/rpython/jit/backend/test/runner_test.py --- a/rpython/jit/backend/test/runner_test.py +++ b/rpython/jit/backend/test/runner_test.py @@ -49,7 +49,7 @@ valueboxes, descr) looptoken = JitCellToken() - self.cpu.compile_loop(inputargs, operations, looptoken) + self.cpu.compile_loop(None, inputargs, operations, looptoken) args = [] for box in inputargs: if isinstance(box, BoxInt): @@ -127,7 +127,7 @@ ] inputargs = [i0] looptoken = JitCellToken() - self.cpu.compile_loop(inputargs, operations, looptoken) + self.cpu.compile_loop(None, inputargs, operations, looptoken) deadframe = self.cpu.execute_token(looptoken, 2) fail = self.cpu.get_latest_descr(deadframe) res = self.cpu.get_int_value(deadframe, 0) @@ -145,7 +145,7 @@ ] inputargs = [i0] looptoken = JitCellToken() - self.cpu.compile_loop(inputargs, operations, looptoken) + self.cpu.compile_loop(None, inputargs, operations, looptoken) deadframe = self.cpu.execute_token(looptoken, longlong.getfloatstorage(2.8)) fail = self.cpu.get_latest_descr(deadframe) @@ -170,7 +170,7 @@ inputargs = [i0] operations[3].setfailargs([i1]) - self.cpu.compile_loop(inputargs, operations, looptoken) + self.cpu.compile_loop(None, inputargs, operations, looptoken) deadframe = self.cpu.execute_token(looptoken, 2) fail = self.cpu.get_latest_descr(deadframe) assert fail.identifier == 2 @@ -195,7 +195,7 @@ inputargs = [i3] operations[4].setfailargs([None, None, i1, None]) - self.cpu.compile_loop(inputargs, operations, looptoken) + self.cpu.compile_loop(None, inputargs, operations, looptoken) deadframe = self.cpu.execute_token(looptoken, 44) fail = self.cpu.get_latest_descr(deadframe) assert fail.identifier == 2 @@ -221,7 +221,7 @@ operations[3].setfailargs([i1]) wr_i1 = weakref.ref(i1) wr_guard = weakref.ref(operations[2]) - self.cpu.compile_loop(inputargs, operations, looptoken) + self.cpu.compile_loop(None, inputargs, operations, looptoken) if hasattr(looptoken, '_x86_ops_offset'): del looptoken._x86_ops_offset # else it's kept alive del i0, i1, i2 @@ -249,7 +249,7 @@ ] inputargs = [i0] operations[3].setfailargs([i1]) - self.cpu.compile_loop(inputargs, operations, looptoken) + self.cpu.compile_loop(None, inputargs, operations, looptoken) i1b = BoxInt() i3 = BoxInt() @@ -260,7 +260,7 @@ ] bridge[1].setfailargs([i1b]) - self.cpu.compile_bridge(faildescr1, [i1b], bridge, looptoken) + self.cpu.compile_bridge(None, faildescr1, [i1b], bridge, looptoken) deadframe = self.cpu.execute_token(looptoken, 2) fail = self.cpu.get_latest_descr(deadframe) @@ -291,7 +291,7 @@ ] inputargs = [i3] operations[4].setfailargs([None, i1, None]) - self.cpu.compile_loop(inputargs, operations, looptoken) + self.cpu.compile_loop(None, inputargs, operations, looptoken) i1b = BoxInt() i3 = BoxInt() @@ -302,7 +302,7 @@ ] bridge[1].setfailargs([i1b]) - self.cpu.compile_bridge(faildescr1, [i1b], bridge, looptoken) + self.cpu.compile_bridge(None, faildescr1, [i1b], bridge, looptoken) deadframe = self.cpu.execute_token(looptoken, 2) fail = self.cpu.get_latest_descr(deadframe) @@ -320,7 +320,7 @@ ] inputargs = [i0] operations[0].setfailargs([i0]) - self.cpu.compile_loop(inputargs, operations, looptoken) + self.cpu.compile_loop(None, inputargs, operations, looptoken) i1list = [BoxInt() for i in range(150)] bridge = [] @@ -334,7 +334,7 @@ descr=BasicFinalDescr(4))) bridge[-2].setfailargs(i1list) - self.cpu.compile_bridge(faildescr1, [i0], bridge, looptoken) + self.cpu.compile_bridge(None, faildescr1, [i0], bridge, looptoken) deadframe = self.cpu.execute_token(looptoken, 1) fail = self.cpu.get_latest_descr(deadframe) @@ -358,7 +358,7 @@ operations = [ ResOperation(rop.FINISH, [i0], None, descr=faildescr) ] - self.cpu.compile_loop([i0], operations, looptoken) + self.cpu.compile_loop(None, [i0], operations, looptoken) deadframe = self.cpu.execute_token(looptoken, 99) fail = self.cpu.get_latest_descr(deadframe) assert fail is faildescr @@ -369,7 +369,7 @@ operations = [ ResOperation(rop.FINISH, [ConstInt(42)], None, descr=faildescr) ] - self.cpu.compile_loop([], operations, looptoken) + self.cpu.compile_loop(None, [], operations, looptoken) deadframe = self.cpu.execute_token(looptoken) fail = self.cpu.get_latest_descr(deadframe) assert fail is faildescr @@ -380,7 +380,7 @@ operations = [ ResOperation(rop.FINISH, [], None, descr=faildescr) ] - self.cpu.compile_loop([], operations, looptoken) + self.cpu.compile_loop(None, [], operations, looptoken) deadframe = self.cpu.execute_token(looptoken) fail = self.cpu.get_latest_descr(deadframe) assert fail is faildescr @@ -391,7 +391,7 @@ operations = [ ResOperation(rop.FINISH, [f0], None, descr=faildescr) ] - self.cpu.compile_loop([f0], operations, looptoken) + self.cpu.compile_loop(None, [f0], operations, looptoken) value = longlong.getfloatstorage(-61.25) deadframe = self.cpu.execute_token(looptoken, value) fail = self.cpu.get_latest_descr(deadframe) @@ -403,7 +403,7 @@ operations = [ ResOperation(rop.FINISH, [constfloat(42.5)], None, descr=faildescr) ] - self.cpu.compile_loop([], operations, looptoken) + self.cpu.compile_loop(None, [], operations, looptoken) deadframe = self.cpu.execute_token(looptoken) fail = self.cpu.get_latest_descr(deadframe) assert fail is faildescr @@ -429,7 +429,7 @@ ResOperation(rop.JUMP, [t, z], None, descr=targettoken), ] operations[-2].setfailargs([t, z]) - cpu.compile_loop([x, y], operations, looptoken) + cpu.compile_loop(None, [x, y], operations, looptoken) deadframe = self.cpu.execute_token(looptoken, 0, 10) assert self.cpu.get_int_value(deadframe, 0) == 0 assert self.cpu.get_int_value(deadframe, 1) == 55 @@ -488,7 +488,7 @@ ops[1].setfailargs([v_res]) # looptoken = JitCellToken() - self.cpu.compile_loop([v1, v2], ops, looptoken) + self.cpu.compile_loop(None, [v1, v2], ops, looptoken) for x, y, z in testcases: deadframe = self.cpu.execute_token(looptoken, x, y) fail = self.cpu.get_latest_descr(deadframe) @@ -1238,7 +1238,7 @@ print inputargs for op in operations: print op - self.cpu.compile_loop(inputargs, operations, looptoken) + self.cpu.compile_loop(None, inputargs, operations, looptoken) # deadframe = self.cpu.execute_token(looptoken, *values) fail = self.cpu.get_latest_descr(deadframe) @@ -1305,7 +1305,7 @@ operations[3].setfailargs(inputargs[:]) operations[3].setdescr(faildescr) # - self.cpu.compile_loop(inputargs, operations, looptoken) + self.cpu.compile_loop(None, inputargs, operations, looptoken) # values = [] S = lltype.GcStruct('S') @@ -1366,7 +1366,7 @@ operations[-3].setfailargs(fboxes) operations[-2].setfailargs(fboxes) looptoken = JitCellToken() - self.cpu.compile_loop(fboxes, operations, looptoken) + self.cpu.compile_loop(None, fboxes, operations, looptoken) fboxes2 = [BoxFloat() for i in range(12)] f3 = BoxFloat() @@ -1375,7 +1375,7 @@ ResOperation(rop.JUMP, [f3]+fboxes2[1:], None, descr=targettoken), ] - self.cpu.compile_bridge(faildescr1, fboxes2, bridge, looptoken) + self.cpu.compile_bridge(None, faildescr1, fboxes2, bridge, looptoken) args = [] for i in range(len(fboxes)): @@ -1407,7 +1407,7 @@ finish()""" loop = parse(loopops) looptoken = JitCellToken() - self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + self.cpu.compile_loop(None, loop.inputargs, loop.operations, looptoken) args = [1] args.append(longlong.getfloatstorage(132.25)) args.append(longlong.getfloatstorage(0.75)) @@ -1428,7 +1428,7 @@ ResOperation(rop.FINISH, [], None, descr=faildescr2), ] bridgeops[-2].setfailargs(fboxes[:]) - self.cpu.compile_bridge(loop.operations[-2].getdescr(), fboxes, + self.cpu.compile_bridge(None, loop.operations[-2].getdescr(), fboxes, bridgeops, looptoken) args = [1, longlong.getfloatstorage(132.25), @@ -1463,7 +1463,7 @@ ] operations[1].setfailargs([]) looptoken = JitCellToken() - self.cpu.compile_loop(inputargs, operations, looptoken) + self.cpu.compile_loop(None, inputargs, operations, looptoken) # for value in [-42, 0, 1, 10]: deadframe = self.cpu.execute_token(looptoken, value) @@ -1508,7 +1508,7 @@ ] operations[-2].setfailargs([]) looptoken = JitCellToken() - self.cpu.compile_loop(inputargs, operations, looptoken) + self.cpu.compile_loop(None, inputargs, operations, looptoken) # for test1 in [-65, -42, -11, 0, 1, 10]: if test1 == -42 or combinaison[0] == 'b': @@ -1560,7 +1560,7 @@ ] operations[-2].setfailargs([]) looptoken = JitCellToken() - self.cpu.compile_loop(inputargs, operations, looptoken) + self.cpu.compile_loop(None, inputargs, operations, looptoken) # for test1 in [65, 42, 11, 0, 1]: if test1 == 42 or combinaison[0] == 'b': @@ -1616,7 +1616,7 @@ ] operations[-2].setfailargs([]) looptoken = JitCellToken() - self.cpu.compile_loop(inputargs, operations, looptoken) + self.cpu.compile_loop(None, inputargs, operations, looptoken) # nan = 1e200 * 1e200 nan /= nan @@ -1675,7 +1675,7 @@ descr=faildescr)) looptoken = JitCellToken() # - self.cpu.compile_loop(inputargs, operations, looptoken) + self.cpu.compile_loop(None, inputargs, operations, looptoken) # args = [] for box in inputargs: @@ -1748,7 +1748,7 @@ looptoken = JitCellToken() # Use "set" to unique-ify inputargs unique_testcase_list = list(set(testcase)) - self.cpu.compile_loop(unique_testcase_list, operations, + self.cpu.compile_loop(None, unique_testcase_list, operations, looptoken) args = [box.getfloatstorage() for box in unique_testcase_list] @@ -2065,7 +2065,7 @@ exc_ptr = xptr loop = parse(ops, self.cpu, namespace=locals()) looptoken = JitCellToken() - self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + self.cpu.compile_loop(None, loop.inputargs, loop.operations, looptoken) deadframe = self.cpu.execute_token(looptoken, 1) assert self.cpu.get_ref_value(deadframe, 0) == xptr excvalue = self.cpu.grab_exc_value(deadframe) @@ -2088,7 +2088,7 @@ exc_ptr = yptr loop = parse(ops, self.cpu, namespace=locals()) looptoken = JitCellToken() - self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + self.cpu.compile_loop(None, loop.inputargs, loop.operations, looptoken) deadframe = self.cpu.execute_token(looptoken, 1) assert self.cpu.get_int_value(deadframe, 0) == 1 excvalue = self.cpu.grab_exc_value(deadframe) @@ -2105,7 +2105,7 @@ ''' loop = parse(ops, self.cpu, namespace=locals()) looptoken = JitCellToken() - self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + self.cpu.compile_loop(None, loop.inputargs, loop.operations, looptoken) deadframe = self.cpu.execute_token(looptoken, 1) assert self.cpu.get_int_value(deadframe, 0) == 1 excvalue = self.cpu.grab_exc_value(deadframe) @@ -2284,7 +2284,7 @@ 'func_ptr': func_ptr, 'calldescr': calldescr}) looptoken = JitCellToken() - self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + self.cpu.compile_loop(None, loop.inputargs, loop.operations, looptoken) f1 = longlong.getfloatstorage(1.2) f2 = longlong.getfloatstorage(3.4) frame = self.cpu.execute_token(looptoken, 1, 0, 1, 2, 3, 4, 5, f1, f2) @@ -2329,7 +2329,7 @@ ] ops[2].setfailargs([i1, i0]) looptoken = JitCellToken() - self.cpu.compile_loop([i0, i1], ops, looptoken) + self.cpu.compile_loop(None, [i0, i1], ops, looptoken) deadframe = self.cpu.execute_token(looptoken, 20, 0) fail = self.cpu.get_latest_descr(deadframe) assert fail.identifier == 0 @@ -2375,7 +2375,7 @@ ] ops[2].setfailargs([i1, i2, i0]) looptoken = JitCellToken() - self.cpu.compile_loop([i0, i1], ops, looptoken) + self.cpu.compile_loop(None, [i0, i1], ops, looptoken) deadframe = self.cpu.execute_token(looptoken, 20, 0) fail = self.cpu.get_latest_descr(deadframe) assert fail.identifier == 0 @@ -2423,7 +2423,7 @@ ] ops[2].setfailargs([i1, f2, i0]) looptoken = JitCellToken() - self.cpu.compile_loop([i0, i1], ops, looptoken) + self.cpu.compile_loop(None, [i0, i1], ops, looptoken) deadframe = self.cpu.execute_token(looptoken, 20, 0) fail = self.cpu.get_latest_descr(deadframe) assert fail.identifier == 0 @@ -2465,7 +2465,7 @@ ] ops[1].setfailargs([i1, i2]) looptoken = JitCellToken() - self.cpu.compile_loop([i1], ops, looptoken) + self.cpu.compile_loop(None, [i1], ops, looptoken) deadframe = self.cpu.execute_token(looptoken, ord('G')) fail = self.cpu.get_latest_descr(deadframe) assert fail.identifier == 0 @@ -2523,7 +2523,7 @@ ] ops[1].setfailargs([]) looptoken = JitCellToken() - self.cpu.compile_loop([i0, i1, i2, i3], ops, looptoken) + self.cpu.compile_loop(None, [i0, i1, i2, i3], ops, looptoken) args = [rffi.cast(lltype.Signed, raw), 2, 4, @@ -2580,7 +2580,7 @@ ResOperation(rop.FINISH, [i3], None, descr=BasicFinalDescr(0)) ] looptoken = JitCellToken() - self.cpu.compile_loop([i1, i2], ops, looptoken) + self.cpu.compile_loop(None, [i1, i2], ops, looptoken) buffer = lltype.malloc(rffi.CCHARP.TO, buflen, flavor='raw') args = [buflen, rffi.cast(lltype.Signed, buffer)] @@ -2650,7 +2650,7 @@ ] ops[1].setfailargs([]) looptoken = JitCellToken() - self.cpu.compile_loop([], ops, looptoken) + self.cpu.compile_loop(None, [], ops, looptoken) deadframe = self.cpu.execute_token(looptoken) fail = self.cpu.get_latest_descr(deadframe) @@ -2790,7 +2790,7 @@ ops.insert(-1, ResOperation(rop.SAME_AS, [b1], b1.clonebox())) looptoken = JitCellToken() - self.cpu.compile_loop(argboxes, ops, looptoken) + self.cpu.compile_loop(None, argboxes, ops, looptoken) # seen = [] deadframe = self.cpu.execute_token(looptoken, *argvalues_normal) @@ -2815,7 +2815,7 @@ ] ops[0].setfailargs([i1]) looptoken = JitCellToken() - self.cpu.compile_loop([i0, i1], ops, looptoken) + self.cpu.compile_loop(None, [i0, i1], ops, looptoken) deadframe = self.cpu.execute_token(looptoken, -42, 9) fail = self.cpu.get_latest_descr(deadframe) @@ -2842,7 +2842,7 @@ ResOperation(rop.FINISH, [i2], None, descr=BasicFinalDescr(3)) ] ops[0].setfailargs([]) - self.cpu.compile_bridge(faildescr, [i2], ops, looptoken) + self.cpu.compile_bridge(None, faildescr, [i2], ops, looptoken) deadframe = self.cpu.execute_token(looptoken, -42, 9) fail = self.cpu.get_latest_descr(deadframe) @@ -2875,7 +2875,7 @@ ] ops[0].setfailargs([]) looptoken = JitCellToken() - self.cpu.compile_loop([i0], ops, looptoken) + self.cpu.compile_loop(None, [i0], ops, looptoken) # mark as failing self.cpu.invalidate_loop(looptoken) # attach a bridge @@ -2883,7 +2883,7 @@ ops2 = [ ResOperation(rop.JUMP, [ConstInt(333)], None, descr=labeldescr), ] - self.cpu.compile_bridge(faildescr, [], ops2, looptoken) + self.cpu.compile_bridge(None, faildescr, [], ops2, looptoken) # run: must not be caught in an infinite loop deadframe = self.cpu.execute_token(looptoken, 16) fail = self.cpu.get_latest_descr(deadframe) @@ -3091,7 +3091,7 @@ looptoken.outermost_jitdriver_sd = FakeJitDriverSD() finish_descr = loop.operations[-1].getdescr() self.cpu.done_with_this_frame_descr_int = BasicFinalDescr() - self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + self.cpu.compile_loop(None, loop.inputargs, loop.operations, looptoken) ARGS = [lltype.Signed] * 10 RES = lltype.Signed FakeJitDriverSD.portal_calldescr = self.cpu.calldescrof( @@ -3109,7 +3109,7 @@ ''' loop = parse(ops, namespace=locals()) othertoken = JitCellToken() - self.cpu.compile_loop(loop.inputargs, loop.operations, othertoken) + self.cpu.compile_loop(None, loop.inputargs, loop.operations, othertoken) args = [i+1 for i in range(10)] deadframe = self.cpu.execute_token(othertoken, *args) assert self.cpu.get_int_value(deadframe, 0) == 13 @@ -3119,7 +3119,7 @@ del called[:] self.cpu.done_with_this_frame_descr_int = finish_descr othertoken = JitCellToken() - self.cpu.compile_loop(loop.inputargs, loop.operations, othertoken) + self.cpu.compile_loop(None, loop.inputargs, loop.operations, othertoken) args = [i+1 for i in range(10)] deadframe = self.cpu.execute_token(othertoken, *args) assert self.cpu.get_int_value(deadframe, 0) == 97 @@ -3157,7 +3157,7 @@ loop = parse(ops) looptoken = JitCellToken() looptoken.outermost_jitdriver_sd = FakeJitDriverSD() - self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + self.cpu.compile_loop(None, loop.inputargs, loop.operations, looptoken) ARGS = [lltype.Signed] * 10 RES = lltype.Signed FakeJitDriverSD.portal_calldescr = self.cpu.calldescrof( @@ -3171,7 +3171,7 @@ ''' loop = parse(ops, namespace=locals()) othertoken = JitCellToken() - self.cpu.compile_loop(loop.inputargs, loop.operations, othertoken) + self.cpu.compile_loop(None, loop.inputargs, loop.operations, othertoken) deadframe = self.cpu.execute_token(othertoken, sys.maxint - 1) assert self.cpu.get_int_value(deadframe, 0) == 3 @@ -3209,7 +3209,7 @@ looptoken = JitCellToken() looptoken.outermost_jitdriver_sd = FakeJitDriverSD() self.cpu.done_with_this_frame_descr_float = BasicFinalDescr() - self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + self.cpu.compile_loop(None, loop.inputargs, loop.operations, looptoken) args = [longlong.getfloatstorage(1.2), longlong.getfloatstorage(2.3)] deadframe = self.cpu.execute_token(looptoken, *args) @@ -3223,7 +3223,7 @@ ''' loop = parse(ops, namespace=locals()) othertoken = JitCellToken() - self.cpu.compile_loop(loop.inputargs, loop.operations, othertoken) + self.cpu.compile_loop(None, loop.inputargs, loop.operations, othertoken) args = [longlong.getfloatstorage(1.2), longlong.getfloatstorage(3.2)] deadframe = self.cpu.execute_token(othertoken, *args) @@ -3235,7 +3235,7 @@ del called[:] self.cpu.done_with_this_frame_descr_float = finish_descr othertoken = JitCellToken() - self.cpu.compile_loop(loop.inputargs, loop.operations, othertoken) + self.cpu.compile_loop(None, loop.inputargs, loop.operations, othertoken) args = [longlong.getfloatstorage(1.2), longlong.getfloatstorage(4.2)] deadframe = self.cpu.execute_token(othertoken, *args) @@ -3298,7 +3298,7 @@ looptoken = JitCellToken() looptoken.outermost_jitdriver_sd = FakeJitDriverSD() self.cpu.done_with_this_frame_descr_float = BasicFinalDescr() - self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + self.cpu.compile_loop(None, loop.inputargs, loop.operations, looptoken) finish_descr = loop.operations[-1].getdescr() args = [longlong.getfloatstorage(1.25), longlong.getfloatstorage(2.35)] @@ -3315,7 +3315,7 @@ ''' loop = parse(ops, namespace=locals()) othertoken = JitCellToken() - self.cpu.compile_loop(loop.inputargs, loop.operations, othertoken) + self.cpu.compile_loop(None, loop.inputargs, loop.operations, othertoken) # normal call_assembler: goes to looptoken args = [longlong.getfloatstorage(1.25), @@ -3334,7 +3334,7 @@ loop2 = parse(ops) looptoken2 = JitCellToken() looptoken2.outermost_jitdriver_sd = FakeJitDriverSD() - self.cpu.compile_loop(loop2.inputargs, loop2.operations, looptoken2) + self.cpu.compile_loop(None, loop2.inputargs, loop2.operations, looptoken2) finish_descr2 = loop2.operations[-1].getdescr() # install it @@ -3694,7 +3694,7 @@ ] inputargs = [i0] looptoken = JitCellToken() - self.cpu.compile_loop(inputargs, operations, looptoken) + self.cpu.compile_loop(None, inputargs, operations, looptoken) # overflowing value: deadframe = self.cpu.execute_token(looptoken, sys.maxint // 4 + 1) fail = self.cpu.get_latest_descr(deadframe) @@ -3747,7 +3747,7 @@ operations[3].setfailargs([i1]) operations[6].setfailargs([i1]) - self.cpu.compile_loop(inputargs, operations, looptoken) + self.cpu.compile_loop(None, inputargs, operations, looptoken) deadframe = self.cpu.execute_token(looptoken, 2) fail = self.cpu.get_latest_descr(deadframe) assert fail.identifier == 2 @@ -3759,7 +3759,7 @@ ResOperation(rop.INT_SUB, [i0, ConstInt(20)], i2), ResOperation(rop.JUMP, [i2], None, descr=targettoken2), ] - self.cpu.compile_bridge(faildescr, inputargs2, operations2, looptoken) + self.cpu.compile_bridge(None, faildescr, inputargs2, operations2, looptoken) deadframe = self.cpu.execute_token(looptoken, 2) fail = self.cpu.get_latest_descr(deadframe) @@ -3776,7 +3776,7 @@ descr = BasicFinalDescr() loop = parse(ops, self.cpu, namespace=locals()) looptoken = JitCellToken() - self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + self.cpu.compile_loop(None, loop.inputargs, loop.operations, looptoken) for inp, outp in [(2,2), (-3, 0)]: deadframe = self.cpu.execute_token(looptoken, inp) assert outp == self.cpu.get_int_value(deadframe, 0) @@ -3805,8 +3805,8 @@ bridge = parse(bridge_ops, self.cpu, namespace=locals()) looptoken = JitCellToken() self.cpu.assembler.set_debug(False) - info = self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) - bridge_info = self.cpu.compile_bridge(faildescr, bridge.inputargs, + info = self.cpu.compile_loop(None, loop.inputargs, loop.operations, looptoken) + bridge_info = self.cpu.compile_bridge(None, faildescr, bridge.inputargs, bridge.operations, looptoken) self.cpu.assembler.set_debug(True) # always on untranslated @@ -3850,7 +3850,7 @@ ResOperation(rop.FINISH, [i0], None, descr=BasicFinalDescr(1234)), ] operations[1].setfailargs([i0]) - self.cpu.compile_loop(inputargs, operations, looptoken1) + self.cpu.compile_loop(None, inputargs, operations, looptoken1) def func(a, b, c, d, e, f, g, h, i): assert a + 2 == b @@ -3904,14 +3904,14 @@ ResOperation(rop.JUMP, [i19], None, descr=targettoken1), ] operations2[-2].setfailargs([]) - self.cpu.compile_bridge(faildescr1, inputargs, operations2, looptoken1) + self.cpu.compile_bridge(None, faildescr1, inputargs, operations2, looptoken1) looptoken2 = JitCellToken() inputargs = [BoxInt()] operations3 = [ ResOperation(rop.JUMP, [ConstInt(0)], None, descr=targettoken1), ] - self.cpu.compile_loop(inputargs, operations3, looptoken2) + self.cpu.compile_loop(None, inputargs, operations3, looptoken2) deadframe = self.cpu.execute_token(looptoken2, -9) fail = self.cpu.get_latest_descr(deadframe) @@ -3928,11 +3928,11 @@ operations[0].setfailargs([]) looptoken = JitCellToken() inputargs = [t_box] - self.cpu.compile_loop(inputargs, operations, looptoken) + self.cpu.compile_loop(None, inputargs, operations, looptoken) operations = [ ResOperation(rop.FINISH, [], None, descr=BasicFinalDescr(99)) ] - self.cpu.compile_bridge(faildescr, [], operations, looptoken) + self.cpu.compile_bridge(None, faildescr, [], operations, looptoken) deadframe = self.cpu.execute_token(looptoken, null_box.getref_base()) fail = self.cpu.get_latest_descr(deadframe) assert fail.identifier == 99 @@ -3960,7 +3960,7 @@ # loop = parse(ops, self.cpu, namespace=locals()) looptoken = JitCellToken() - self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + self.cpu.compile_loop(None, loop.inputargs, loop.operations, looptoken) deadframe = self.cpu.execute_token(looptoken, rffi.cast(lltype.Signed, p), 16) result = self.cpu.get_int_value(deadframe, 0) @@ -3990,7 +3990,7 @@ # loop = parse(ops, self.cpu, namespace=locals()) looptoken = JitCellToken() - self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + self.cpu.compile_loop(None, loop.inputargs, loop.operations, looptoken) deadframe = self.cpu.execute_token(looptoken, rffi.cast(lltype.Signed, p), 16) result = self.cpu.get_float_value(deadframe, 0) @@ -4020,7 +4020,7 @@ # loop = parse(ops, self.cpu, namespace=locals()) looptoken = JitCellToken() - self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + self.cpu.compile_loop(None, loop.inputargs, loop.operations, looptoken) deadframe = self.cpu.execute_token(looptoken, rffi.cast(lltype.Signed, p), 16) result = self.cpu.get_int_value(deadframe, 0) @@ -4052,7 +4052,7 @@ p[i] = '\xDD' loop = parse(ops, self.cpu, namespace=locals()) looptoken = JitCellToken() - self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + self.cpu.compile_loop(None, loop.inputargs, loop.operations, looptoken) self.cpu.execute_token(looptoken, rffi.cast(lltype.Signed, p), 16, value) result = rawstorage.raw_storage_getitem(T, p, 16) @@ -4084,7 +4084,7 @@ p[i] = '\xDD' loop = parse(ops, self.cpu, namespace=locals()) looptoken = JitCellToken() - self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + self.cpu.compile_loop(None, loop.inputargs, loop.operations, looptoken) self.cpu.execute_token(looptoken, rffi.cast(lltype.Signed, p), 16, longlong.getfloatstorage(value)) @@ -4118,7 +4118,7 @@ p[i] = '\xDD' loop = parse(ops, self.cpu, namespace=locals()) looptoken = JitCellToken() - self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + self.cpu.compile_loop(None, loop.inputargs, loop.operations, looptoken) self.cpu.execute_token(looptoken, rffi.cast(lltype.Signed, p), 16, longlong.singlefloat2int(value)) @@ -4153,7 +4153,7 @@ ] ops[2].setfailargs([i2]) looptoken = JitCellToken() - self.cpu.compile_loop([i0, i1], ops, looptoken) + self.cpu.compile_loop(None, [i0, i1], ops, looptoken) deadframe = self.cpu.execute_token(looptoken, 20, 0) fail = self.cpu.get_latest_descr(deadframe) assert fail.identifier == 23 @@ -4187,7 +4187,7 @@ finish(i1, descr=finaldescr) """, namespace={'finaldescr': finaldescr, 'calldescr2': calldescr2, 'guarddescr': guarddescr, 'func2_ptr': func2_ptr}) - self.cpu.compile_bridge(faildescr, bridge.inputargs, + self.cpu.compile_bridge(None, faildescr, bridge.inputargs, bridge.operations, looptoken) cpu = self.cpu @@ -4220,7 +4220,7 @@ guard_true(i0, descr=faildescr) [i1, i2, px] finish(i2, descr=finaldescr2) """, namespace=locals()) - self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + self.cpu.compile_loop(None, loop.inputargs, loop.operations, looptoken) frame = self.cpu.execute_token(looptoken, 0, 0, 3) assert self.cpu.get_latest_descr(frame) is guarddescr from rpython.jit.backend.llsupport.llmodel import AbstractLLCPU @@ -4269,7 +4269,7 @@ 'faildescr2': BasicFailDescr(1), 'xtp': xtp }) - self.cpu.compile_bridge(faildescr, bridge.inputargs, + self.cpu.compile_bridge(None, faildescr, bridge.inputargs, bridge.operations, looptoken) raise LLException(xtp, xptr) @@ -4290,7 +4290,7 @@ 'faildescr': faildescr, 'finaldescr2': BasicFinalDescr(1)}) - self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + self.cpu.compile_loop(None, loop.inputargs, loop.operations, looptoken) frame = self.cpu.execute_token(looptoken, 1, 2, 3) descr = self.cpu.get_latest_descr(frame) assert descr.identifier == 42 diff --git a/rpython/jit/backend/test/test_random.py b/rpython/jit/backend/test/test_random.py --- a/rpython/jit/backend/test/test_random.py +++ b/rpython/jit/backend/test/test_random.py @@ -239,9 +239,9 @@ print >>s, ' operations[%d].setfailargs([%s])' % (i, fa) if fail_descr is None: print >>s, ' looptoken = JitCellToken()' - print >>s, ' cpu.compile_loop(inputargs, operations, looptoken)' + print >>s, ' cpu.compile_loop(None, inputargs, operations, looptoken)' else: - print >>s, ' cpu.compile_bridge(%s, inputargs, operations, looptoken)' % self.descr_counters[fail_descr] + print >>s, ' cpu.compile_bridge(None, %s, inputargs, operations, looptoken)' % self.descr_counters[fail_descr] if hasattr(self.loop, 'inputargs'): vals = [] for i, v in enumerate(self.loop.inputargs): @@ -643,7 +643,7 @@ self.builder = builder self.loop = loop dump(loop) - cpu.compile_loop(loop.inputargs, loop.operations, loop._jitcelltoken) + cpu.compile_loop(None, loop.inputargs, loop.operations, loop._jitcelltoken) if self.output: builder.print_loop(self.output) @@ -715,7 +715,7 @@ if box not in self.loop.inputargs: box = box.constbox() args.append(box) - self.cpu.compile_loop(self.loop.inputargs, + self.cpu.compile_loop(None, self.loop.inputargs, [ResOperation(rop.JUMP, args, None, descr=self.loop._targettoken)], self._initialjumploop_celltoken) @@ -851,7 +851,7 @@ if r.random() < .05: return False dump(subloop) - self.builder.cpu.compile_bridge(fail_descr, fail_args, + self.builder.cpu.compile_bridge(None, fail_descr, fail_args, subloop.operations, self.loop._jitcelltoken) diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -434,7 +434,8 @@ else: self.wb_slowpath[withcards + 2 * withfloats] = rawstart - def assemble_loop(self, loopname, inputargs, operations, looptoken, log): + def assemble_loop(self, logger, loopname, inputargs, operations, looptoken, + log): '''adds the following attributes to looptoken: _ll_function_addr (address of the generated func, as an int) _ll_loop_code (debug: addr of the start of the ResOps) @@ -467,8 +468,8 @@ # self._call_header_with_stack_check() self._check_frame_depth_debug(self.mc) - operations = regalloc.prepare_loop(inputargs, operations, looptoken, - clt.allgcrefs) + operations = regalloc.prepare_loop(inputargs, operations, + looptoken, clt.allgcrefs) looppos = self.mc.get_relative_pos() frame_depth_no_fixed_size = self._assemble(regalloc, inputargs, operations) @@ -498,6 +499,9 @@ looptoken._x86_fullsize = full_size looptoken._x86_ops_offset = ops_offset looptoken._ll_function_addr = rawstart + if logger: + logger.log_loop(inputargs, operations, 0, "rewritten", + name=loopname, ops_offset=ops_offset) self.fixup_target_tokens(rawstart) self.teardown() @@ -509,7 +513,7 @@ return AsmInfo(ops_offset, rawstart + looppos, size_excluding_failure_stuff - looppos) - def assemble_bridge(self, faildescr, inputargs, operations, + def assemble_bridge(self, logger, faildescr, inputargs, operations, original_loop_token, log): if not we_are_translated(): # Arguments should be unique @@ -544,6 +548,9 @@ ops_offset = self.mc.ops_offset frame_depth = max(self.current_clt.frame_info.jfi_frame_depth, frame_depth_no_fixed_size + JITFRAME_FIXED_SIZE) + if logger: + logger.log_bridge(inputargs, operations, "rewritten", + ops_offset=ops_offset) self.fixup_target_tokens(rawstart) self.update_frame_depth(frame_depth) self.teardown() diff --git a/rpython/jit/backend/x86/runner.py b/rpython/jit/backend/x86/runner.py --- a/rpython/jit/backend/x86/runner.py +++ b/rpython/jit/backend/x86/runner.py @@ -88,15 +88,17 @@ lines = machine_code_dump(data, addr, self.backend_name, label_list) print ''.join(lines) - def compile_loop(self, inputargs, operations, looptoken, log=True, name=''): - return self.assembler.assemble_loop(name, inputargs, operations, + def compile_loop(self, logger, inputargs, operations, looptoken, log=True, + name=''): + return self.assembler.assemble_loop(logger, name, inputargs, operations, looptoken, log=log) - def compile_bridge(self, faildescr, inputargs, operations, + def compile_bridge(self, logger, faildescr, inputargs, operations, original_loop_token, log=True): clt = original_loop_token.compiled_loop_token clt.compiling_a_bridge() - return self.assembler.assemble_bridge(faildescr, inputargs, operations, + return self.assembler.assemble_bridge(logger, faildescr, inputargs, + operations, original_loop_token, log=log) def clear_latest_values(self, count): diff --git a/rpython/jit/backend/x86/test/test_regalloc2.py b/rpython/jit/backend/x86/test/test_regalloc2.py --- a/rpython/jit/backend/x86/test/test_regalloc2.py +++ b/rpython/jit/backend/x86/test/test_regalloc2.py @@ -32,7 +32,7 @@ cpu = CPU(None, None) cpu.setup_once() looptoken = JitCellToken() - cpu.compile_loop(inputargs, operations, looptoken) + cpu.compile_loop(None, inputargs, operations, looptoken) deadframe = cpu.execute_token(looptoken, 9) assert cpu.get_int_value(deadframe, 0) == (9 >> 3) assert cpu.get_int_value(deadframe, 1) == (~18) @@ -58,7 +58,7 @@ cpu = CPU(None, None) cpu.setup_once() looptoken = JitCellToken() - cpu.compile_loop(inputargs, operations, looptoken) + cpu.compile_loop(None, inputargs, operations, looptoken) deadframe = cpu.execute_token(looptoken, -10) assert cpu.get_int_value(deadframe, 0) == 0 assert cpu.get_int_value(deadframe, 1) == -1000 @@ -159,7 +159,7 @@ cpu = CPU(None, None) cpu.setup_once() looptoken = JitCellToken() - cpu.compile_loop(inputargs, operations, looptoken) + cpu.compile_loop(None, inputargs, operations, looptoken) deadframe = cpu.execute_token(looptoken, -13, 10, 10, 8, -8, -16, -18, 46, -12, 26) assert cpu.get_int_value(deadframe, 0) == 0 @@ -271,7 +271,7 @@ cpu = CPU(None, None) cpu.setup_once() looptoken = JitCellToken() - cpu.compile_loop(inputargs, operations, looptoken) + cpu.compile_loop(None, inputargs, operations, looptoken) deadframe = cpu.execute_token(looptoken, 17, -20, -6, 6, 1, 13, 13, 9, 49, 8) assert cpu.get_int_value(deadframe, 0) == 0 @@ -386,7 +386,7 @@ operations[4].setfailargs([v4, v8, v10, v2, v9, v7, v6, v1]) operations[8].setfailargs([v3, v9, v2, v6, v4]) looptoken = JitCellToken() - cpu.compile_loop(inputargs, operations, looptoken) + cpu.compile_loop(None, inputargs, operations, looptoken) loop_args = [1, -39, 46, 21, 16, 6, -4611686018427387905, 12, 14, 2] frame = cpu.execute_token(looptoken, *loop_args) assert cpu.get_int_value(frame, 0) == 46 @@ -493,7 +493,7 @@ operations[16].setfailargs([v5, v9]) operations[34].setfailargs([]) operations[37].setfailargs([v12, v19, v10, v7, v4, v8, v18, v15, v9]) - cpu.compile_bridge(faildescr1, inputargs, operations, looptoken) + cpu.compile_bridge(None, faildescr1, inputargs, operations, looptoken) frame = cpu.execute_token(looptoken, *loop_args) #assert cpu.get_int_value(frame, 0) == -9223372036854775766 assert cpu.get_int_value(frame, 1) == 0 @@ -583,7 +583,7 @@ operations[0].setfailargs([]) operations[8].setfailargs([tmp23, v5, v3, v11, v6]) operations[30].setfailargs([v6]) - cpu.compile_bridge(faildescr6, inputargs, operations, looptoken) + cpu.compile_bridge(None, faildescr6, inputargs, operations, looptoken) frame = cpu.execute_token(looptoken, *loop_args) #assert cpu.get_int_value(frame, 0) == -9223372036854775808 v1 = BoxInt() @@ -607,6 +607,6 @@ ResOperation(rop.FINISH, [], None, descr=finishdescr13), ] operations[4].setfailargs([v2]) - cpu.compile_bridge(faildescr10, inputargs, operations, looptoken) + cpu.compile_bridge(None, faildescr10, inputargs, operations, looptoken) frame = cpu.execute_token(looptoken, *loop_args) #assert cpu.get_int_value(frame, 0) == 10 diff --git a/rpython/jit/backend/x86/test/test_runner.py b/rpython/jit/backend/x86/test/test_runner.py --- a/rpython/jit/backend/x86/test/test_runner.py +++ b/rpython/jit/backend/x86/test/test_runner.py @@ -287,7 +287,7 @@ ] ops[-2].setfailargs([i1]) looptoken = JitCellToken() - self.cpu.compile_loop([b], ops, looptoken) + self.cpu.compile_loop(None, [b], ops, looptoken) deadframe = self.cpu.execute_token(looptoken, b.value) result = self.cpu.get_int_value(deadframe, 0) if guard == rop.GUARD_FALSE: @@ -333,7 +333,7 @@ ops[-2].setfailargs([i1]) inputargs = [i for i in (a, b) if isinstance(i, Box)] looptoken = JitCellToken() - self.cpu.compile_loop(inputargs, ops, looptoken) + self.cpu.compile_loop(None, inputargs, ops, looptoken) inputvalues = [box.value for box in inputargs] deadframe = self.cpu.execute_token(looptoken, *inputvalues) result = self.cpu.get_int_value(deadframe, 0) @@ -377,7 +377,7 @@ ] inputargs = [i0] operations[-2].setfailargs([i1]) - self.cpu.compile_loop(inputargs, operations, looptoken) + self.cpu.compile_loop(None, inputargs, operations, looptoken) name, loopaddress, loopsize = agent.functions[0] assert name == "Loop # 17: hello (loop counter 0)" assert loopaddress <= looptoken._ll_loop_code @@ -393,7 +393,7 @@ ] bridge[1].setfailargs([i1b]) - self.cpu.compile_bridge(faildescr1, [i1b], bridge, looptoken) + self.cpu.compile_bridge(None, faildescr1, [i1b], bridge, looptoken) name, address, size = agent.functions[1] assert name == "Bridge # 0: bye (loop counter 1)" # Would be exactly ==, but there are some guard failure recovery @@ -422,7 +422,7 @@ ] inputargs = [i0] debug._log = dlog = debug.DebugLog() - info = self.cpu.compile_loop(inputargs, operations, looptoken) + info = self.cpu.compile_loop(None, inputargs, operations, looptoken) ops_offset = info.ops_offset debug._log = None # @@ -508,7 +508,7 @@ ops[5].setfailargs([]) ops[7].setfailargs([]) looptoken = JitCellToken() - self.cpu.compile_loop([i1, i2], ops, looptoken) + self.cpu.compile_loop(None, [i1, i2], ops, looptoken) deadframe = self.cpu.execute_token(looptoken, 123450, 123408) fail = self.cpu.get_latest_descr(deadframe) @@ -549,7 +549,7 @@ try: self.cpu.assembler.set_debug(True) looptoken = JitCellToken() - self.cpu.compile_loop(ops.inputargs, ops.operations, looptoken) + self.cpu.compile_loop(None, ops.inputargs, ops.operations, looptoken) self.cpu.execute_token(looptoken, 0) # check debugging info struct = self.cpu.assembler.loop_run_counters[0] diff --git a/rpython/jit/metainterp/compile.py b/rpython/jit/metainterp/compile.py --- a/rpython/jit/metainterp/compile.py +++ b/rpython/jit/metainterp/compile.py @@ -302,14 +302,16 @@ log=True, name=''): metainterp_sd.logger_ops.log_loop(inputargs, operations, -2, 'compiling', name=name) - return metainterp_sd.cpu.compile_loop(inputargs, operations, looptoken, + return metainterp_sd.cpu.compile_loop(metainterp_sd.logger_ops, + inputargs, operations, looptoken, log=log, name=name) def do_compile_bridge(metainterp_sd, faildescr, inputargs, operations, original_loop_token, log=True): metainterp_sd.logger_ops.log_bridge(inputargs, operations, "compiling") assert isinstance(faildescr, AbstractFailDescr) - return metainterp_sd.cpu.compile_bridge(faildescr, inputargs, operations, + return metainterp_sd.cpu.compile_bridge(metainterp_sd.logger_ops, + faildescr, inputargs, operations, original_loop_token, log=log) def send_loop_to_backend(greenkey, jitdriver_sd, metainterp_sd, loop, type): @@ -932,7 +934,7 @@ ] operations[1].setfailargs([]) operations = get_deep_immutable_oplist(operations) - cpu.compile_loop(inputargs, operations, jitcell_token, log=False) + cpu.compile_loop(None, inputargs, operations, jitcell_token, log=False) if memory_manager is not None: # for tests memory_manager.keep_loop_alive(jitcell_token) return jitcell_token diff --git a/rpython/jit/metainterp/logger.py b/rpython/jit/metainterp/logger.py --- a/rpython/jit/metainterp/logger.py +++ b/rpython/jit/metainterp/logger.py @@ -17,6 +17,10 @@ debug_start("jit-log-noopt-loop") logops = self._log_operations(inputargs, operations, ops_offset) debug_stop("jit-log-noopt-loop") + elif type == "rewritten": + debug_start("jit-log-rewritten-loop") + logops = self._log_operations(inputargs, operations, ops_offset) + debug_stop("jit-log-rewritten-loop") elif number == -2: debug_start("jit-log-compiling-loop") logops = self._log_operations(inputargs, operations, ops_offset) @@ -35,6 +39,10 @@ debug_start("jit-log-noopt-bridge") logops = self._log_operations(inputargs, operations, ops_offset) debug_stop("jit-log-noopt-bridge") + elif extra == "rewritten": + debug_start("jit-log-rewritten-bridge") + logops = self._log_operations(inputargs, operations, ops_offset) + debug_stop("jit-log-rewritten-bridge") elif extra == "compiling": debug_start("jit-log-compiling-bridge") logops = self._log_operations(inputargs, operations, ops_offset) diff --git a/rpython/jit/metainterp/optimizeopt/pure.py b/rpython/jit/metainterp/optimizeopt/pure.py --- a/rpython/jit/metainterp/optimizeopt/pure.py +++ b/rpython/jit/metainterp/optimizeopt/pure.py @@ -52,7 +52,7 @@ # otherwise, the operation remains self.emit_operation(op) if op.returns_bool_result(): - self.optimizer.bool_boxes[self.getvalue(op.result)] = None + self.optimizer.bool_boxes[self.getvalue(op.result)] = None if nextop: self.emit_operation(nextop) diff --git a/rpython/jit/metainterp/test/test_compile.py b/rpython/jit/metainterp/test/test_compile.py --- a/rpython/jit/metainterp/test/test_compile.py +++ b/rpython/jit/metainterp/test/test_compile.py @@ -17,7 +17,7 @@ ts = typesystem.llhelper def __init__(self): self.seen = [] - def compile_loop(self, inputargs, operations, token, log=True, name=''): + def compile_loop(self, logger, inputargs, operations, token, log=True, name=''): self.seen.append((inputargs, operations, token)) class FakeLogger(object): From noreply at buildbot.pypy.org Mon Aug 19 17:37:49 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Mon, 19 Aug 2013 17:37:49 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: use test8 instead of cmp8 Message-ID: <20130819153749.3E9041C087E@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r66217:0123e26474d0 Date: 2013-08-19 15:05 +0200 http://bitbucket.org/pypy/pypy/changeset/0123e26474d0/ Log: use test8 instead of cmp8 diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -2339,7 +2339,7 @@ mc.POP_r(X86_64_SCRATCH_REG.value) # _Bool return type only sets lower 8 bits of return value sl = X86_64_SCRATCH_REG.lowest8bits() - mc.CMP8_ri(sl.value, 0) + mc.TEST8_rr(sl.value, sl.value) # # END SLOWPATH # diff --git a/rpython/jit/backend/x86/rx86.py b/rpython/jit/backend/x86/rx86.py --- a/rpython/jit/backend/x86/rx86.py +++ b/rpython/jit/backend/x86/rx86.py @@ -586,6 +586,7 @@ TEST8_mi = insn(rex_nw, '\xF6', orbyte(0<<3), mem_reg_plus_const(1), immediate(2, 'b')) TEST8_bi = insn(rex_nw, '\xF6', orbyte(0<<3), stack_bp(1), immediate(2, 'b')) TEST8_ji = insn(rex_nw, '\xF6', orbyte(0<<3), abs_(1), immediate(2, 'b')) + TEST8_rr = insn(rex_fw, '\x84', byte_register(2,8), byte_register(1),'\xC0') TEST_rr = insn(rex_w, '\x85', register(2,8), register(1), '\xC0') BTS_mr = insn(rex_w, '\x0F\xAB', register(2,8), mem_reg_plus_const(1)) From noreply at buildbot.pypy.org Mon Aug 19 17:37:50 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Mon, 19 Aug 2013 17:37:50 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: in-progress Message-ID: <20130819153750.906541C087E@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r66218:9c9b91fd9c90 Date: 2013-08-19 16:50 +0200 http://bitbucket.org/pypy/pypy/changeset/9c9b91fd9c90/ Log: in-progress diff --git a/rpython/jit/backend/x86/test/test_stm_integration.py b/rpython/jit/backend/x86/test/test_stm_integration.py --- a/rpython/jit/backend/x86/test/test_stm_integration.py +++ b/rpython/jit/backend/x86/test/test_stm_integration.py @@ -170,7 +170,12 @@ inevitable, [], RESULT=lltype.Void) def ptr_eq(x, y): - self.ptr_eq_called_on.append((x, y)) + print "=== ptr_eq", x, y + print "=== ptr_eq", hex(rffi.cast(lltype.Signed, x)), hex(rffi.cast(lltype.Signed, y)) + + import pdb;pdb.set_trace() + self.ptr_eq_called_on.append((rffi.cast(lltype.Signed, x), + rffi.cast(lltype.Signed, y))) return x == y self.generate_function('stm_ptr_eq', ptr_eq, [llmemory.GCREF] * 2, RESULT=lltype.Bool) @@ -528,10 +533,13 @@ looptoken = JitCellToken() c_loop = cpu.compile_loop(inputargs + [i1], operations, looptoken) - print c_loop + args = [s for i, s in enumerate((s1, s2)) if not isinstance((p1, p2)[i], Const)] + [7] - + print "======" + print "inputargs:", inputargs+[i1], args + print "\n".join(map(str,c_loop[1])) + frame = self.cpu.execute_token(looptoken, *args) frame = rffi.cast(JITFRAMEPTR, frame) frame_adr = rffi.cast(lltype.Signed, frame.jf_descr) @@ -540,19 +548,19 @@ # CHECK: a, b = s1, s2 if isinstance(p1, Const): - s1 = p1.value + a = p1.value if isinstance(p2, Const): - s2 = p2.value + b = p2.value - if s1 == s2 or \ - rffi.cast(lltype.Signed, s1) == 0 or \ - rffi.cast(lltype.Signed, s2) == 0: - assert (s1, s2) not in called_on + if a == b or \ + rffi.cast(lltype.Signed, a) == 0 or \ + rffi.cast(lltype.Signed, b) == 0: + assert (a, b) not in called_on else: - assert [(s1, s2)] == called_on + assert [(a, b)] == called_on if guard is not None: - if s1 == s2: + if a == b: if guard in (rop.GUARD_TRUE, rop.GUARD_VALUE): assert not guard_failed else: From noreply at buildbot.pypy.org Mon Aug 19 17:37:51 2013 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 19 Aug 2013 17:37:51 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: test_cast_gcref_to_int: when running untranslated, accept Message-ID: <20130819153751.D31711C087E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c4 Changeset: r66219:40a9a595d258 Date: 2013-08-19 16:41 +0200 http://bitbucket.org/pypy/pypy/changeset/40a9a595d258/ Log: test_cast_gcref_to_int: when running untranslated, accept also _llgcopaque objects and force a cast to integer. diff --git a/rpython/rlib/rgc.py b/rpython/rlib/rgc.py --- a/rpython/rlib/rgc.py +++ b/rpython/rlib/rgc.py @@ -392,6 +392,10 @@ if we_are_translated(): return lltype.cast_ptr_to_int(gcref) else: + from rpython.rtyper.lltypesystem.ll2ctypes import _llgcopaque + if isinstance(gcref._obj, _llgcopaque): + from rpython.rtyper.lltypesystem import rffi + return rffi.cast(lltype.Signed, gcref) return id(gcref._x) def dump_rpy_heap(fd): diff --git a/rpython/rlib/test/test_rgc.py b/rpython/rlib/test/test_rgc.py --- a/rpython/rlib/test/test_rgc.py +++ b/rpython/rlib/test/test_rgc.py @@ -228,3 +228,8 @@ x1 = X() n = rgc.get_rpy_memory_usage(rgc.cast_instance_to_gcref(x1)) assert n >= 8 and n <= 64 + +def test_cast_gcref_to_int(): + from rpython.rtyper.lltypesystem import rffi + x = rffi.cast(llmemory.GCREF, 123456) + assert rgc.cast_gcref_to_int(x) == 123456 From noreply at buildbot.pypy.org Mon Aug 19 17:37:53 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Mon, 19 Aug 2013 17:37:53 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: remove gcref mess (test_stm_integration runs again) Message-ID: <20130819153753.1B5631C087E@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r66220:e81dca223118 Date: 2013-08-19 17:37 +0200 http://bitbucket.org/pypy/pypy/changeset/e81dca223118/ Log: remove gcref mess (test_stm_integration runs again) diff --git a/rpython/jit/backend/llsupport/gc.py b/rpython/jit/backend/llsupport/gc.py --- a/rpython/jit/backend/llsupport/gc.py +++ b/rpython/jit/backend/llsupport/gc.py @@ -102,11 +102,10 @@ for i in range(op.numargs()): v = op.getarg(i) if isinstance(v, ConstPtr) and bool(v.value): - p = rgc.cast_instance_to_gcref(v.value) - v.imm_value = rgc._make_sure_does_not_move(p) + v.imm_value = rgc._make_sure_does_not_move(v.value) # XXX: fix for stm, record imm_values and unregister # them again (below too): - gcrefs_output_list.append(p) + gcrefs_output_list.append(v.value) if self.stm: return # for descr, we do it on the fly in assembler.py diff --git a/rpython/jit/backend/x86/test/test_stm_integration.py b/rpython/jit/backend/x86/test/test_stm_integration.py --- a/rpython/jit/backend/x86/test/test_stm_integration.py +++ b/rpython/jit/backend/x86/test/test_stm_integration.py @@ -27,7 +27,10 @@ import ctypes def cast_to_int(obj): - return rgc.cast_gcref_to_int(rgc.cast_instance_to_gcref(obj)) + if isinstance(obj, rgc._GcRef): + return rgc.cast_gcref_to_int(obj) + else: + return rffi.cast(lltype.Signed, obj) CPU = getcpuclass() @@ -170,12 +173,8 @@ inevitable, [], RESULT=lltype.Void) def ptr_eq(x, y): - print "=== ptr_eq", x, y - print "=== ptr_eq", hex(rffi.cast(lltype.Signed, x)), hex(rffi.cast(lltype.Signed, y)) - - import pdb;pdb.set_trace() - self.ptr_eq_called_on.append((rffi.cast(lltype.Signed, x), - rffi.cast(lltype.Signed, y))) + print "=== ptr_eq", hex(cast_to_int(x)), hex(cast_to_int(y)) + self.ptr_eq_called_on.append((cast_to_int(x), cast_to_int(y))) return x == y self.generate_function('stm_ptr_eq', ptr_eq, [llmemory.GCREF] * 2, RESULT=lltype.Bool) @@ -546,15 +545,13 @@ guard_failed = frame_adr != id(finaldescr) # CHECK: - a, b = s1, s2 + a, b = cast_to_int(s1), cast_to_int(s2) if isinstance(p1, Const): - a = p1.value + a = cast_to_int(p1.value) if isinstance(p2, Const): - b = p2.value + b = cast_to_int(p2.value) - if a == b or \ - rffi.cast(lltype.Signed, a) == 0 or \ - rffi.cast(lltype.Signed, b) == 0: + if a == b or a == 0 or b == 0: assert (a, b) not in called_on else: assert [(a, b)] == called_on diff --git a/rpython/rlib/rgc.py b/rpython/rlib/rgc.py --- a/rpython/rlib/rgc.py +++ b/rpython/rlib/rgc.py @@ -99,7 +99,11 @@ on objects that are already a bit old, so have a chance to be already non-movable.""" if not we_are_translated(): - return cast_gcref_to_int(p) + if isinstance(p, _GcRef): + return cast_gcref_to_int(p) + else: + from rpython.rtyper.lltypesystem import rffi + return rffi.cast(lltype.Signed, p) if stm_is_enabled(): from rpython.rtyper.lltypesystem.lloperation import llop @@ -392,10 +396,6 @@ if we_are_translated(): return lltype.cast_ptr_to_int(gcref) else: - from rpython.rtyper.lltypesystem.ll2ctypes import _llgcopaque - if isinstance(gcref._obj, _llgcopaque): - from rpython.rtyper.lltypesystem import rffi - return rffi.cast(lltype.Signed, gcref) return id(gcref._x) def dump_rpy_heap(fd): diff --git a/rpython/rlib/test/test_rgc.py b/rpython/rlib/test/test_rgc.py --- a/rpython/rlib/test/test_rgc.py +++ b/rpython/rlib/test/test_rgc.py @@ -229,7 +229,4 @@ n = rgc.get_rpy_memory_usage(rgc.cast_instance_to_gcref(x1)) assert n >= 8 and n <= 64 -def test_cast_gcref_to_int(): - from rpython.rtyper.lltypesystem import rffi - x = rffi.cast(llmemory.GCREF, 123456) - assert rgc.cast_gcref_to_int(x) == 123456 + From noreply at buildbot.pypy.org Mon Aug 19 23:14:50 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Mon, 19 Aug 2013 23:14:50 +0200 (CEST) Subject: [pypy-commit] pypy less-stringly-ops: Inline FSFrame.unrollstack() Message-ID: <20130819211450.DC2BC1C10AB@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: less-stringly-ops Changeset: r66223:bf5bae7631fa Date: 2013-08-18 19:33 +0100 http://bitbucket.org/pypy/pypy/changeset/bf5bae7631fa/ Log: Inline FSFrame.unrollstack() diff --git a/rpython/flowspace/flowcontext.py b/rpython/flowspace/flowcontext.py --- a/rpython/flowspace/flowcontext.py +++ b/rpython/flowspace/flowcontext.py @@ -411,14 +411,6 @@ self.locals_stack_w[:len(items_w)] = items_w self.dropvaluesuntil(len(items_w)) - def unrollstack(self, unroller): - while self.blockstack: - block = self.blockstack.pop() - if isinstance(unroller, block.handles): - return block - block.cleanupstack(self) - return None - def getstate(self): # getfastscope() can return real None, for undefined locals data = self.save_locals_stack() @@ -1158,11 +1150,12 @@ WHY_YIELD not needed """ def unroll(self, frame): - block = frame.unrollstack(self) - if block is None: - return self.nomoreblocks() - else: - return block.handle(frame, self) + while frame.blockstack: + block = frame.blockstack.pop() + if isinstance(self, block.handles): + return block.handle(frame, self) + block.cleanupstack(frame) + return self.nomoreblocks() def nomoreblocks(self): raise BytecodeCorruption("misplaced bytecode - should not return") From noreply at buildbot.pypy.org Mon Aug 19 23:14:52 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Mon, 19 Aug 2013 23:14:52 +0200 (CEST) Subject: [pypy-commit] pypy less-stringly-ops: create class PureOperator Message-ID: <20130819211452.486F81C1356@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: less-stringly-ops Changeset: r66224:5ee688b7ae5b Date: 2013-08-08 17:50 +0100 http://bitbucket.org/pypy/pypy/changeset/5ee688b7ae5b/ Log: create class PureOperator diff --git a/rpython/flowspace/operation.py b/rpython/flowspace/operation.py --- a/rpython/flowspace/operation.py +++ b/rpython/flowspace/operation.py @@ -16,13 +16,12 @@ func2op = {} class SpaceOperator(object): - def __init__(self, name, arity, symbol, pyfunc, pure=False, - can_overflow=False): + pure = False + def __init__(self, name, arity, symbol, pyfunc, can_overflow=False): self.name = name self.arity = arity self.symbol = symbol self.pyfunc = pyfunc - self.pure = pure self.can_overflow = can_overflow self.canraise = [] @@ -41,10 +40,14 @@ return getattr(space, self.name)(*args_w) return sc_operator +class PureOperator(SpaceOperator): + pure = True + def add_operator(name, arity, symbol, pyfunc=None, pure=False, ovf=False): operator_func = getattr(operator, name, None) - oper = SpaceOperator(name, arity, symbol, pyfunc, pure, can_overflow=ovf) + cls = PureOperator if pure else SpaceOperator + oper = cls(name, arity, symbol, pyfunc, can_overflow=ovf) setattr(op, name, oper) if pyfunc is not None: func2op[pyfunc] = oper From noreply at buildbot.pypy.org Mon Aug 19 23:14:49 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Mon, 19 Aug 2013 23:14:49 +0200 (CEST) Subject: [pypy-commit] pypy less-stringly-ops: Create SuspendedUnroller.unroll() and use it to simplify some code Message-ID: <20130819211449.8C6741C087E@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: less-stringly-ops Changeset: r66222:3070ec015774 Date: 2013-08-12 01:50 +0100 http://bitbucket.org/pypy/pypy/changeset/3070ec015774/ Log: Create SuspendedUnroller.unroll() and use it to simplify some code diff --git a/rpython/flowspace/flowcontext.py b/rpython/flowspace/flowcontext.py --- a/rpython/flowspace/flowcontext.py +++ b/rpython/flowspace/flowcontext.py @@ -419,12 +419,6 @@ block.cleanupstack(self) return None - def unrollstack_and_jump(self, unroller): - block = self.unrollstack(unroller) - if block is None: - raise BytecodeCorruption("misplaced bytecode - should not return") - return block.handle(self, unroller) - def getstate(self): # getfastscope() can return real None, for undefined locals data = self.save_locals_stack() @@ -586,12 +580,7 @@ def handle_operation_error(self, operr): unroller = SApplicationException(operr) - block = self.unrollstack(unroller) - if block is None: - raise operr - else: - next_instr = block.handle(self, unroller) - return next_instr + return unroller.unroll(self) def getlocalvarname(self, index): return self.pycode.co_varnames[index] @@ -609,11 +598,11 @@ raise FlowingError(self, "This operation is not RPython") def BREAK_LOOP(self, oparg, next_instr): - return self.unrollstack_and_jump(SBreakLoop.singleton) + return SBreakLoop.singleton.unroll(self) def CONTINUE_LOOP(self, startofloop, next_instr): unroller = SContinueLoop(startofloop) - return self.unrollstack_and_jump(unroller) + return unroller.unroll(self) def cmp_lt(self, w_1, w_2): return self.space.lt(w_1, w_2) @@ -695,12 +684,7 @@ def RETURN_VALUE(self, oparg, next_instr): w_returnvalue = self.popvalue() unroller = SReturnValue(w_returnvalue) - block = self.unrollstack(unroller) - if block is None: - raise Return(w_returnvalue) - else: - next_instr = block.handle(self, unroller) - return next_instr # now inside a 'finally' block + return unroller.unroll(self) def END_FINALLY(self, oparg, next_instr): # unlike CPython, there are two statically distinct cases: the @@ -718,20 +702,12 @@ return elif isinstance(w_top, SuspendedUnroller): # case of a finally: block - return self.unroll_finally(w_top) + return w_top.unroll(self) else: # case of an except: block. We popped the exception type self.popvalue() # Now we pop the exception value unroller = self.popvalue() - return self.unroll_finally(unroller) - - def unroll_finally(self, unroller): - # go on unrolling the stack - block = self.unrollstack(unroller) - if block is None: - unroller.nomoreblocks() - else: - return block.handle(self, unroller) + return unroller.unroll(self) def POP_BLOCK(self, oparg, next_instr): block = self.blockstack.pop() @@ -1181,6 +1157,13 @@ WHY_CONTINUE, SContinueLoop WHY_YIELD not needed """ + def unroll(self, frame): + block = frame.unrollstack(self) + if block is None: + return self.nomoreblocks() + else: + return block.handle(frame, self) + def nomoreblocks(self): raise BytecodeCorruption("misplaced bytecode - should not return") From noreply at buildbot.pypy.org Mon Aug 19 23:14:47 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Mon, 19 Aug 2013 23:14:47 +0200 (CEST) Subject: [pypy-commit] pypy less-stringly-ops: kill unroller.kind Message-ID: <20130819211447.F38E41C0149@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: less-stringly-ops Changeset: r66221:c953a3ba4181 Date: 2013-08-12 01:07 +0100 http://bitbucket.org/pypy/pypy/changeset/c953a3ba4181/ Log: kill unroller.kind diff --git a/rpython/flowspace/flowcontext.py b/rpython/flowspace/flowcontext.py --- a/rpython/flowspace/flowcontext.py +++ b/rpython/flowspace/flowcontext.py @@ -411,16 +411,16 @@ self.locals_stack_w[:len(items_w)] = items_w self.dropvaluesuntil(len(items_w)) - def unrollstack(self, unroller_kind): + def unrollstack(self, unroller): while self.blockstack: block = self.blockstack.pop() - if (block.handling_mask & unroller_kind) != 0: + if isinstance(unroller, block.handles): return block block.cleanupstack(self) return None def unrollstack_and_jump(self, unroller): - block = self.unrollstack(unroller.kind) + block = self.unrollstack(unroller) if block is None: raise BytecodeCorruption("misplaced bytecode - should not return") return block.handle(self, unroller) @@ -585,11 +585,11 @@ return self.handle_operation_error(operr) def handle_operation_error(self, operr): - block = self.unrollstack(SApplicationException.kind) + unroller = SApplicationException(operr) + block = self.unrollstack(unroller) if block is None: raise operr else: - unroller = SApplicationException(operr) next_instr = block.handle(self, unroller) return next_instr @@ -694,11 +694,11 @@ def RETURN_VALUE(self, oparg, next_instr): w_returnvalue = self.popvalue() - block = self.unrollstack(SReturnValue.kind) + unroller = SReturnValue(w_returnvalue) + block = self.unrollstack(unroller) if block is None: raise Return(w_returnvalue) else: - unroller = SReturnValue(w_returnvalue) next_instr = block.handle(self, unroller) return next_instr # now inside a 'finally' block @@ -727,7 +727,7 @@ def unroll_finally(self, unroller): # go on unrolling the stack - block = self.unrollstack(unroller.kind) + block = self.unrollstack(unroller) if block is None: unroller.nomoreblocks() else: @@ -1191,7 +1191,6 @@ class SReturnValue(SuspendedUnroller): """Signals a 'return' statement. Argument is the wrapped object to return.""" - kind = 0x01 def __init__(self, w_returnvalue): self.w_returnvalue = w_returnvalue @@ -1209,7 +1208,6 @@ class SApplicationException(SuspendedUnroller): """Signals an application-level exception (i.e. an OperationException).""" - kind = 0x02 def __init__(self, operr): self.operr = operr @@ -1226,7 +1224,6 @@ class SBreakLoop(SuspendedUnroller): """Signals a 'break' statement.""" - kind = 0x04 def state_unpack_variables(self, space): return [] @@ -1240,7 +1237,6 @@ class SContinueLoop(SuspendedUnroller): """Signals a 'continue' statement. Argument is the bytecode position of the beginning of the loop.""" - kind = 0x08 def __init__(self, jump_to): self.jump_to = jump_to @@ -1281,8 +1277,7 @@ class LoopBlock(FrameBlock): """A loop block. Stores the end-of-loop pointer in case of 'break'.""" - _opname = 'SETUP_LOOP' - handling_mask = SBreakLoop.kind | SContinueLoop.kind + handles = (SBreakLoop, SContinueLoop) def handle(self, frame, unroller): if isinstance(unroller, SContinueLoop): @@ -1299,8 +1294,7 @@ class ExceptBlock(FrameBlock): """An try:except: block. Stores the position of the exception handler.""" - _opname = 'SETUP_EXCEPT' - handling_mask = SApplicationException.kind + handles = SApplicationException def handle(self, frame, unroller): # push the exception to the value stack for inspection by the @@ -1320,8 +1314,7 @@ class FinallyBlock(FrameBlock): """A try:finally: block. Stores the position of the exception handler.""" - _opname = 'SETUP_FINALLY' - handling_mask = -1 # handles every kind of SuspendedUnroller + handles = SuspendedUnroller def handle(self, frame, unroller): # any abnormal reason for unrolling a finally: triggers the end of From noreply at buildbot.pypy.org Mon Aug 19 23:14:56 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Mon, 19 Aug 2013 23:14:56 +0200 (CEST) Subject: [pypy-commit] pypy less-stringly-ops: Create a decorator to register flowspace special-cases Message-ID: <20130819211456.3FDDA1C300E@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: less-stringly-ops Changeset: r66227:2e539dd83216 Date: 2013-08-08 21:14 +0100 http://bitbucket.org/pypy/pypy/changeset/2e539dd83216/ Log: Create a decorator to register flowspace special-cases diff --git a/rpython/flowspace/specialcase.py b/rpython/flowspace/specialcase.py --- a/rpython/flowspace/specialcase.py +++ b/rpython/flowspace/specialcase.py @@ -1,8 +1,22 @@ from rpython.flowspace.model import Constant -from rpython.flowspace.operation import func2op, op +from rpython.flowspace.operation import func2op from rpython.rlib.rarithmetic import r_uint from rpython.rlib.objectmodel import we_are_translated +SPECIAL_CASES = {} + +def register_flow_sc(func): + """Decorator triggering special-case handling of ``func``. + + When the flow graph builder sees ``func``, it calls the decorated function + with ``decorated_func(space, *args_w)``, where ``args_w`` is a sequence of + flow objects (Constants or Variables). + """ + def decorate(sc_func): + SPECIAL_CASES[func] = sc_func + return decorate + + at register_flow_sc(__import__) def sc_import(space, args_w): assert len(args_w) > 0 and len(args_w) <= 5, 'import needs 1 to 5 arguments' args = [space.unwrap(arg) for arg in args_w] @@ -33,6 +47,7 @@ # _________________________________________________________________________ + at register_flow_sc(r_uint) def sc_r_uint(space, args_w): # special case to constant-fold r_uint(32-bit-constant) # (normally, the 32-bit constant is a long, and is not allowed to @@ -42,9 +57,11 @@ return Constant(r_uint(w_value.value)) return space.frame.do_operation('simple_call', space.wrap(r_uint), w_value) + at register_flow_sc(we_are_translated) def sc_we_are_translated(space, args_w): return Constant(True) + at register_flow_sc(locals) def sc_locals(space, args): raise Exception( "A function calling locals() is not RPython. " @@ -54,8 +71,5 @@ "pytest.ini from the root of the PyPy repository into your " "own project.") -SPECIAL_CASES = {__import__: sc_import, r_uint: sc_r_uint, - we_are_translated: sc_we_are_translated, - locals: sc_locals} for fn, oper in func2op.items(): - SPECIAL_CASES[fn] = oper.make_sc() + register_flow_sc(fn)(oper.make_sc()) From noreply at buildbot.pypy.org Mon Aug 19 23:14:54 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Mon, 19 Aug 2013 23:14:54 +0200 (CEST) Subject: [pypy-commit] pypy less-stringly-ops: Turn make_op() into the .eval method of SpaceOperator Message-ID: <20130819211454.CC3231C146E@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: less-stringly-ops Changeset: r66226:ef6bddfa2279 Date: 2013-08-09 05:57 +0100 http://bitbucket.org/pypy/pypy/changeset/ef6bddfa2279/ Log: Turn make_op() into the .eval method of SpaceOperator diff --git a/rpython/flowspace/objspace.py b/rpython/flowspace/objspace.py --- a/rpython/flowspace/objspace.py +++ b/rpython/flowspace/objspace.py @@ -386,61 +386,14 @@ raise FlowingError(self.frame, const(message)) return const(value) -def make_impure_op(oper): - def generic_operator(self, *args_w): - if len(args_w) != oper.arity: - raise TypeError(oper.name + " got the wrong number of arguments") - w_result = self.frame.do_operation_with_implicit_exceptions(oper.name, *args_w) - return w_result - return generic_operator - def make_op(oper): - """Add function operation to the flow space.""" - name = oper.name - func = oper.pyfunc - - def generic_operator(self, *args_w): - assert len(args_w) == oper.arity, name + " got the wrong number of arguments" - args = [] - if all(w_arg.foldable() for w_arg in args_w): - args = [w_arg.value for w_arg in args_w] - # All arguments are constants: call the operator now - try: - result = func(*args) - except Exception, e: - etype = e.__class__ - msg = "%s%r always raises %s: %s" % ( - name, tuple(args), etype, e) - raise FlowingError(self.frame, msg) - else: - # don't try to constant-fold operations giving a 'long' - # result. The result is probably meant to be sent to - # an intmask(), but the 'long' constant confuses the - # annotator a lot. - if oper.can_overflow and type(result) is long: - pass - # don't constant-fold getslice on lists, either - elif name == 'getslice' and type(result) is list: - pass - # otherwise, fine - else: - try: - return self.wrap(result) - except WrapException: - # type cannot sanely appear in flow graph, - # store operation with variable result instead - pass - w_result = self.frame.do_operation_with_implicit_exceptions(name, *args_w) - return w_result + def generic_operator(self, *args): + return oper.eval(self.frame, *args) return generic_operator for oper in operation.op.__dict__.values(): if getattr(FlowObjSpace, oper.name, None) is None: - if oper.pure: - op_method = make_op(oper) - else: - op_method = make_impure_op(oper) - setattr(FlowObjSpace, oper.name, op_method) + setattr(FlowObjSpace, oper.name, make_op(oper)) def build_flow(func, space=FlowObjSpace()): diff --git a/rpython/flowspace/operation.py b/rpython/flowspace/operation.py --- a/rpython/flowspace/operation.py +++ b/rpython/flowspace/operation.py @@ -8,7 +8,7 @@ import operator from rpython.tool.sourcetools import compile2 from rpython.rlib.rarithmetic import ovfcheck -from rpython.flowspace.model import Constant, const +from rpython.flowspace.model import Constant, WrapException, const class _OpHolder(object): pass op = _OpHolder() @@ -40,9 +40,50 @@ return getattr(space, self.name)(*args_w) return sc_operator + def eval(self, frame, *args_w): + if len(args_w) != self.arity: + raise TypeError(self.name + " got the wrong number of arguments") + w_result = frame.do_operation_with_implicit_exceptions(self.name, *args_w) + return w_result + class PureOperator(SpaceOperator): pure = True + def eval(self, frame, *args_w): + if len(args_w) != self.arity: + raise TypeError(self.name + " got the wrong number of arguments") + args = [] + if all(w_arg.foldable() for w_arg in args_w): + args = [w_arg.value for w_arg in args_w] + # All arguments are constants: call the operator now + try: + result = self.pyfunc(*args) + except Exception as e: + from rpython.flowspace.flowcontext import FlowingError + msg = "%s%r always raises %s: %s" % ( + self.name, tuple(args), type(e), e) + raise FlowingError(frame, msg) + else: + # don't try to constant-fold operations giving a 'long' + # result. The result is probably meant to be sent to + # an intmask(), but the 'long' constant confuses the + # annotator a lot. + if self.can_overflow and type(result) is long: + pass + # don't constant-fold getslice on lists, either + elif self.name == 'getslice' and type(result) is list: + pass + # otherwise, fine + else: + try: + return const(result) + except WrapException: + # type cannot sanely appear in flow graph, + # store operation with variable result instead + pass + w_result = frame.do_operation_with_implicit_exceptions(self.name, *args_w) + return w_result + def add_operator(name, arity, symbol, pyfunc=None, pure=False, ovf=False): operator_func = getattr(operator, name, None) From noreply at buildbot.pypy.org Mon Aug 19 23:14:58 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Mon, 19 Aug 2013 23:14:58 +0200 (CEST) Subject: [pypy-commit] pypy less-stringly-ops: Move special-case registrations next to the relevant definitions Message-ID: <20130819211458.8263E1C3046@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: less-stringly-ops Changeset: r66228:f8b3a9be2076 Date: 2013-08-19 18:02 +0100 http://bitbucket.org/pypy/pypy/changeset/f8b3a9be2076/ Log: Move special-case registrations next to the relevant definitions diff --git a/rpython/flowspace/model.py b/rpython/flowspace/model.py --- a/rpython/flowspace/model.py +++ b/rpython/flowspace/model.py @@ -8,7 +8,6 @@ from rpython.tool.uid import uid, Hashable from rpython.tool.sourcetools import PY_IDENTIFIER, nice_repr_for_func -from rpython.rlib.rarithmetic import is_valid_int, r_longlong, r_ulonglong, r_uint """ @@ -504,7 +503,8 @@ if not __debug__: return try: - + from rpython.rlib.rarithmetic import (is_valid_int, r_longlong, + r_ulonglong, r_uint) vars_previous_blocks = {} exitblocks = {graph.returnblock: 1, # retval diff --git a/rpython/flowspace/operation.py b/rpython/flowspace/operation.py --- a/rpython/flowspace/operation.py +++ b/rpython/flowspace/operation.py @@ -7,8 +7,8 @@ import __future__ import operator from rpython.tool.sourcetools import compile2 -from rpython.rlib.rarithmetic import ovfcheck from rpython.flowspace.model import Constant, WrapException, const +from rpython.flowspace.specialcase import register_flow_sc class _OpHolder(object): pass op = _OpHolder() @@ -97,6 +97,7 @@ if pyfunc is None: oper.pyfunc = operator_func if ovf: + from rpython.rlib.rarithmetic import ovfcheck ovf_func = lambda *args: ovfcheck(oper.pyfunc(*args)) add_operator(name + '_ovf', arity, symbol, pyfunc=ovf_func) @@ -288,6 +289,9 @@ if hasattr(__builtin__, 'next'): func2op[__builtin__.next] = op.next +for fn, oper in func2op.items(): + register_flow_sc(fn)(oper.make_sc()) + op_appendices = { OverflowError: 'ovf', diff --git a/rpython/flowspace/specialcase.py b/rpython/flowspace/specialcase.py --- a/rpython/flowspace/specialcase.py +++ b/rpython/flowspace/specialcase.py @@ -1,8 +1,3 @@ -from rpython.flowspace.model import Constant -from rpython.flowspace.operation import func2op -from rpython.rlib.rarithmetic import r_uint -from rpython.rlib.objectmodel import we_are_translated - SPECIAL_CASES = {} def register_flow_sc(func): @@ -22,6 +17,16 @@ args = [space.unwrap(arg) for arg in args_w] return space.import_name(*args) + at register_flow_sc(locals) +def sc_locals(space, args): + raise Exception( + "A function calling locals() is not RPython. " + "Note that if you're translating code outside the PyPy " + "repository, a likely cause is that py.test's --assert=rewrite " + "mode is getting in the way. You should copy the file " + "pytest.ini from the root of the PyPy repository into your " + "own project.") + # _________________________________________________________________________ # a simplified version of the basic printing routines, for RPython programs class StdOutBuffer: @@ -44,32 +49,3 @@ s = '\n' import os os.write(1, s) - -# _________________________________________________________________________ - - at register_flow_sc(r_uint) -def sc_r_uint(space, args_w): - # special case to constant-fold r_uint(32-bit-constant) - # (normally, the 32-bit constant is a long, and is not allowed to - # show up in the flow graphs at all) - [w_value] = args_w - if isinstance(w_value, Constant): - return Constant(r_uint(w_value.value)) - return space.frame.do_operation('simple_call', space.wrap(r_uint), w_value) - - at register_flow_sc(we_are_translated) -def sc_we_are_translated(space, args_w): - return Constant(True) - - at register_flow_sc(locals) -def sc_locals(space, args): - raise Exception( - "A function calling locals() is not RPython. " - "Note that if you're translating code outside the PyPy " - "repository, a likely cause is that py.test's --assert=rewrite " - "mode is getting in the way. You should copy the file " - "pytest.ini from the root of the PyPy repository into your " - "own project.") - -for fn, oper in func2op.items(): - register_flow_sc(fn)(oper.make_sc()) diff --git a/rpython/rlib/objectmodel.py b/rpython/rlib/objectmodel.py --- a/rpython/rlib/objectmodel.py +++ b/rpython/rlib/objectmodel.py @@ -10,6 +10,9 @@ import math import inspect from rpython.tool.sourcetools import rpython_wrapper +from rpython.rtyper.extregistry import ExtRegistryEntry +from rpython.flowspace.specialcase import register_flow_sc +from rpython.flowspace.model import Constant # specialize is a decorator factory for attaching _annspecialcase_ # attributes to functions: for example @@ -23,7 +26,6 @@ # def f(... # -from rpython.rtyper.extregistry import ExtRegistryEntry class _Specialize(object): def memo(self): @@ -278,7 +280,11 @@ def we_are_translated(): return False -# annotation -> True (replaced by the flow objspace) + + at register_flow_sc(we_are_translated) +def sc_we_are_translated(space, args_w): + return Constant(True) + def keepalive_until_here(*values): pass diff --git a/rpython/rlib/rarithmetic.py b/rpython/rlib/rarithmetic.py --- a/rpython/rlib/rarithmetic.py +++ b/rpython/rlib/rarithmetic.py @@ -33,6 +33,8 @@ import sys, struct from rpython.rtyper import extregistry from rpython.rlib import objectmodel +from rpython.flowspace.model import Constant, const +from rpython.flowspace.specialcase import register_flow_sc """ Long-term target: @@ -513,6 +515,16 @@ r_int = build_int('r_int', True, LONG_BIT) r_uint = build_int('r_uint', False, LONG_BIT) + at register_flow_sc(r_uint) +def sc_r_uint(space, args_w): + # (normally, the 32-bit constant is a long, and is not allowed to + # show up in the flow graphs at all) + [w_value] = args_w + if isinstance(w_value, Constant): + return Constant(r_uint(w_value.value)) + return space.frame.do_operation('simple_call', const(r_uint), w_value) + + r_longlong = build_int('r_longlong', True, 64) r_ulonglong = build_int('r_ulonglong', False, 64) From noreply at buildbot.pypy.org Mon Aug 19 23:14:53 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Mon, 19 Aug 2013 23:14:53 +0200 (CEST) Subject: [pypy-commit] pypy less-stringly-ops: Replace FlowObjSpace.wrap() with a const() function defined in flowspace.model Message-ID: <20130819211453.914521C142B@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: less-stringly-ops Changeset: r66225:f9a887280c0a Date: 2013-08-09 04:23 +0100 http://bitbucket.org/pypy/pypy/changeset/f9a887280c0a/ Log: Replace FlowObjSpace.wrap() with a const() function defined in flowspace.model diff --git a/rpython/flowspace/flowcontext.py b/rpython/flowspace/flowcontext.py --- a/rpython/flowspace/flowcontext.py +++ b/rpython/flowspace/flowcontext.py @@ -9,7 +9,7 @@ from rpython.tool.stdlib_opcode import host_bytecode_spec from rpython.flowspace.argument import CallSpec from rpython.flowspace.model import (Constant, Variable, Block, Link, - c_last_exception, SpaceOperation) + c_last_exception, SpaceOperation, const) from rpython.flowspace.framestate import (FrameState, recursively_unflatten, recursively_flatten) from rpython.flowspace.specialcase import (rpython_print_item, @@ -350,7 +350,7 @@ if closure is None: self.closure = [] else: - self.closure = [self.space.wrap(c.cell_contents) for c in closure] + self.closure = [const(c.cell_contents) for c in closure] assert len(self.closure) == len(self.pycode.co_freevars) def init_locals_stack(self, code): @@ -420,13 +420,13 @@ else: data.append(self.last_exception.w_type) data.append(self.last_exception.w_value) - recursively_flatten(self.space, data) + recursively_flatten(data) return FrameState(data, self.blockstack[:], self.last_instr) def setstate(self, state): """ Reset the frame to the given state. """ data = state.mergeable[:] - recursively_unflatten(self.space, data) + recursively_unflatten(data) self.restore_locals_stack(data[:-2]) # Nones == undefined locals if data[-2] == Constant(None): assert data[-1] == Constant(None) @@ -578,7 +578,7 @@ return self.pycode.co_varnames[index] def getconstant_w(self, index): - return self.space.wrap(self.pycode.consts[index]) + return const(self.pycode.consts[index]) def getname_u(self, index): return self.pycode.names[index] @@ -811,7 +811,7 @@ # directly call manager.__enter__(), don't use special lookup functions # which don't make sense on the RPython type system. w_manager = self.peekvalue() - w_exit = self.space.getattr(w_manager, self.space.wrap("__exit__")) + w_exit = self.space.getattr(w_manager, const("__exit__")) self.settopvalue(w_exit) w_result = self.space.call_method(w_manager, "__enter__") block = WithBlock(self, next_instr + offsettoend) @@ -1174,11 +1174,11 @@ def nomoreblocks(self): raise Return(self.w_returnvalue) - def state_unpack_variables(self, space): + def state_unpack_variables(self): return [self.w_returnvalue] @staticmethod - def state_pack_variables(space, w_returnvalue): + def state_pack_variables(w_returnvalue): return SReturnValue(w_returnvalue) class SApplicationException(SuspendedUnroller): @@ -1191,21 +1191,21 @@ def nomoreblocks(self): raise self.operr - def state_unpack_variables(self, space): + def state_unpack_variables(self): return [self.operr.w_type, self.operr.w_value] @staticmethod - def state_pack_variables(space, w_type, w_value): + def state_pack_variables(w_type, w_value): return SApplicationException(FSException(w_type, w_value)) class SBreakLoop(SuspendedUnroller): """Signals a 'break' statement.""" - def state_unpack_variables(self, space): + def state_unpack_variables(self): return [] @staticmethod - def state_pack_variables(space): + def state_pack_variables(): return SBreakLoop.singleton SBreakLoop.singleton = SBreakLoop() @@ -1217,12 +1217,12 @@ def __init__(self, jump_to): self.jump_to = jump_to - def state_unpack_variables(self, space): - return [space.wrap(self.jump_to)] + def state_unpack_variables(self): + return [const(self.jump_to)] @staticmethod - def state_pack_variables(space, w_jump_to): - return SContinueLoop(space.int_w(w_jump_to)) + def state_pack_variables(w_jump_to): + return SContinueLoop(w_jump_to.value) class FrameBlock(object): diff --git a/rpython/flowspace/framestate.py b/rpython/flowspace/framestate.py --- a/rpython/flowspace/framestate.py +++ b/rpython/flowspace/framestate.py @@ -106,7 +106,7 @@ UNPICKLE_TAGS = {} -def recursively_flatten(space, lst): +def recursively_flatten(lst): from rpython.flowspace.flowcontext import SuspendedUnroller i = 0 while i < len(lst): @@ -114,7 +114,7 @@ if not isinstance(unroller, SuspendedUnroller): i += 1 else: - vars = unroller.state_unpack_variables(space) + vars = unroller.state_unpack_variables() key = unroller.__class__, len(vars) try: tag = PICKLE_TAGS[key] @@ -124,12 +124,12 @@ lst[i:i + 1] = [tag] + vars -def recursively_unflatten(space, lst): +def recursively_unflatten(lst): for i in xrange(len(lst) - 1, -1, -1): item = lst[i] if item in UNPICKLE_TAGS: unrollerclass, argcount = UNPICKLE_TAGS[item] arguments = lst[i + 1:i + 1 + argcount] del lst[i + 1:i + 1 + argcount] - unroller = unrollerclass.state_pack_variables(space, *arguments) + unroller = unrollerclass.state_pack_variables(*arguments) lst[i] = unroller diff --git a/rpython/flowspace/model.py b/rpython/flowspace/model.py --- a/rpython/flowspace/model.py +++ b/rpython/flowspace/model.py @@ -355,6 +355,21 @@ during its construction""" +# method-wrappers have not enough introspection in CPython +if hasattr(complex.real.__get__, 'im_self'): + type_with_bad_introspection = None # on top of PyPy +else: + type_with_bad_introspection = type(complex.real.__get__) + +def const(obj): + if isinstance(obj, (Variable, Constant)): + raise TypeError("already wrapped: " + repr(obj)) + # method-wrapper have ill-defined comparison and introspection + # to appear in a flow graph + if type(obj) is type_with_bad_introspection: + raise WrapException + return Constant(obj) + class SpaceOperation(object): __slots__ = "opname args result offset".split() diff --git a/rpython/flowspace/objspace.py b/rpython/flowspace/objspace.py --- a/rpython/flowspace/objspace.py +++ b/rpython/flowspace/objspace.py @@ -9,7 +9,7 @@ from rpython.flowspace.argument import CallSpec from rpython.flowspace.model import (Constant, Variable, WrapException, - UnwrapException, checkgraph) + UnwrapException, checkgraph, const) from rpython.flowspace.bytecode import HostCode from rpython.flowspace import operation from rpython.flowspace.flowcontext import (FlowSpaceFrame, fixeggblocks, @@ -23,12 +23,6 @@ from rpython.rlib.rarithmetic import is_valid_int -# method-wrappers have not enough introspection in CPython -if hasattr(complex.real.__get__, 'im_self'): - type_with_bad_introspection = None # on top of PyPy -else: - type_with_bad_introspection = type(complex.real.__get__) - # the following gives us easy access to declare more for applications: NOT_REALLY_CONST = { Constant(sys): { @@ -136,18 +130,9 @@ fn = types.FunctionType(code, globals, code.co_name, defaults) return Constant(fn) - def wrap(self, obj): - if isinstance(obj, (Variable, Constant)): - raise TypeError("already wrapped: " + repr(obj)) - # method-wrapper have ill-defined comparison and introspection - # to appear in a flow graph - if type(obj) is type_with_bad_introspection: - raise WrapException - return Constant(obj) - def exc_wrap(self, exc): - w_value = self.wrap(exc) - w_type = self.wrap(type(exc)) + w_value = const(exc) + w_type = const(type(exc)) return FSException(w_type, w_value) def int_w(self, w_obj): @@ -191,7 +176,7 @@ return self.exception_issubclass_w(w_exc_type, w_check_class) # special case for StackOverflow (see rlib/rstackovf.py) if check_class == rstackovf.StackOverflow: - w_real_class = self.wrap(rstackovf._StackOverflow) + w_real_class = const(rstackovf._StackOverflow) return self.exception_issubclass_w(w_exc_type, w_real_class) # checking a tuple of classes for w_klass in self.unpackiterable(w_check_class): @@ -230,7 +215,7 @@ def unpackiterable(self, w_iterable): if isinstance(w_iterable, Constant): l = w_iterable.value - return [self.wrap(x) for x in l] + return [const(x) for x in l] else: raise UnwrapException("cannot unpack a Variable iterable ") @@ -239,19 +224,19 @@ l = list(self.unwrap(w_iterable)) if len(l) != expected_length: raise ValueError - return [self.wrap(x) for x in l] + return [const(x) for x in l] else: w_len = self.len(w_iterable) - w_correct = self.eq(w_len, self.wrap(expected_length)) + w_correct = self.eq(w_len, const(expected_length)) if not self.is_true(w_correct): e = self.exc_from_raise(self.w_ValueError, self.w_None) raise e - return [self.frame.do_operation('getitem', w_iterable, self.wrap(i)) + return [self.frame.do_operation('getitem', w_iterable, const(i)) for i in range(expected_length)] # ____________________________________________________________ def not_(self, w_obj): - return self.wrap(not self.is_true(w_obj)) + return const(not self.is_true(w_obj)) def is_true(self, w_obj): if w_obj.foldable(): @@ -263,7 +248,7 @@ if isinstance(w_iterable, Constant): iterable = w_iterable.value if isinstance(iterable, unrolling_iterable): - return self.wrap(iterable.get_unroller()) + return const(iterable.get_unroller()) w_iter = self.frame.do_operation("iter", w_iterable) return w_iter @@ -278,7 +263,7 @@ raise self.exc_wrap(StopIteration()) else: frame.replace_in_stack(it, next_unroller) - return self.wrap(v) + return const(v) w_item = frame.do_operation("next", w_iter) frame.handle_implicit_exceptions([StopIteration, RuntimeError]) return w_item @@ -302,7 +287,7 @@ obj, name, etype, e) raise FlowingError(self.frame, msg) try: - return self.wrap(result) + return const(result) except WrapException: pass return self.frame.do_operation_with_implicit_exceptions('getattr', @@ -316,7 +301,7 @@ mod = __import__(name, glob, loc, frm, level) except ImportError as e: raise self.exc_wrap(e) - return self.wrap(mod) + return const(mod) def import_from(self, w_module, w_name): assert isinstance(w_module, Constant) @@ -328,13 +313,13 @@ return self.frame.do_operation_with_implicit_exceptions('getattr', w_module, w_name) try: - return self.wrap(getattr(w_module.value, w_name.value)) + return const(getattr(w_module.value, w_name.value)) except AttributeError: raise self.exc_wrap(ImportError( "cannot import name '%s'" % w_name.value)) def call_method(self, w_obj, methname, *arg_w): - w_meth = self.getattr(w_obj, self.wrap(methname)) + w_meth = self.getattr(w_obj, const(methname)) return self.call_function(w_meth, *arg_w) def call_function(self, w_func, *args_w): @@ -343,7 +328,7 @@ def appcall(self, func, *args_w): """Call an app-level RPython function directly""" - w_func = self.wrap(func) + w_func = const(func) return self.frame.do_operation('simple_call', w_func, *args_w) def call_args(self, w_callable, args): @@ -351,7 +336,7 @@ fn = w_callable.value if hasattr(fn, "_flowspace_rewrite_directly_as_"): fn = fn._flowspace_rewrite_directly_as_ - w_callable = self.wrap(fn) + w_callable = const(fn) try: sc = self.specialcases[fn] # TypeError if 'fn' not hashable except (KeyError, TypeError): @@ -398,8 +383,8 @@ value = getattr(self.unwrap(self.builtin), varname) except AttributeError: message = "global name '%s' is not defined" % varname - raise FlowingError(self.frame, self.wrap(message)) - return self.wrap(value) + raise FlowingError(self.frame, const(message)) + return const(value) def make_impure_op(oper): def generic_operator(self, *args_w): diff --git a/rpython/flowspace/operation.py b/rpython/flowspace/operation.py --- a/rpython/flowspace/operation.py +++ b/rpython/flowspace/operation.py @@ -8,7 +8,7 @@ import operator from rpython.tool.sourcetools import compile2 from rpython.rlib.rarithmetic import ovfcheck -from rpython.flowspace.model import Constant +from rpython.flowspace.model import Constant, const class _OpHolder(object): pass op = _OpHolder() From noreply at buildbot.pypy.org Mon Aug 19 23:15:00 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Mon, 19 Aug 2013 23:15:00 +0200 (CEST) Subject: [pypy-commit] pypy less-stringly-ops: Use operator in FSFrame.do_operation_with...; shorten its name Message-ID: <20130819211500.82A2A1C0149@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: less-stringly-ops Changeset: r66229:a3a24731c83d Date: 2013-08-09 02:06 +0100 http://bitbucket.org/pypy/pypy/changeset/a3a24731c83d/ Log: Use operator in FSFrame.do_operation_with...; shorten its name diff --git a/rpython/flowspace/flowcontext.py b/rpython/flowspace/flowcontext.py --- a/rpython/flowspace/flowcontext.py +++ b/rpython/flowspace/flowcontext.py @@ -14,7 +14,6 @@ recursively_flatten) from rpython.flowspace.specialcase import (rpython_print_item, rpython_print_newline) -from rpython.flowspace.operation import op class FlowingError(Exception): @@ -449,10 +448,9 @@ recorder.append(spaceop) return spaceop.result - def do_operation_with_implicit_exceptions(self, name, *args_w): - w_result = self.do_operation(name, *args_w) - oper = getattr(op, name) - self.handle_implicit_exceptions(oper.canraise) + def do_op(self, operator, *args_w): + w_result = self.do_operation(operator.name, *args_w) + self.handle_implicit_exceptions(operator.canraise) return w_result def handle_implicit_exceptions(self, exceptions): diff --git a/rpython/flowspace/objspace.py b/rpython/flowspace/objspace.py --- a/rpython/flowspace/objspace.py +++ b/rpython/flowspace/objspace.py @@ -11,7 +11,7 @@ from rpython.flowspace.model import (Constant, Variable, WrapException, UnwrapException, checkgraph, const) from rpython.flowspace.bytecode import HostCode -from rpython.flowspace import operation +from rpython.flowspace.operation import op from rpython.flowspace.flowcontext import (FlowSpaceFrame, fixeggblocks, FSException, FlowingError) from rpython.flowspace.generator import (tweak_generator_graph, @@ -275,8 +275,7 @@ if w_obj in self.not_really_const: const_w = self.not_really_const[w_obj] if w_name not in const_w: - return self.frame.do_operation_with_implicit_exceptions('getattr', - w_obj, w_name) + return self.frame.do_op(op.getattr, w_obj, w_name) if w_obj.foldable() and w_name.foldable(): obj, name = w_obj.value, w_name.value try: @@ -290,8 +289,7 @@ return const(result) except WrapException: pass - return self.frame.do_operation_with_implicit_exceptions('getattr', - w_obj, w_name) + return self.frame.do_op(op.getattr, w_obj, w_name) def isinstance_w(self, w_obj, w_type): return self.is_true(self.isinstance(w_obj, w_type)) @@ -310,8 +308,7 @@ if w_module in self.not_really_const: const_w = self.not_really_const[w_module] if w_name not in const_w: - return self.frame.do_operation_with_implicit_exceptions('getattr', - w_module, w_name) + return self.frame.do_op(op.getattr, w_module, w_name) try: return const(getattr(w_module.value, w_name.value)) except AttributeError: @@ -391,7 +388,7 @@ return oper.eval(self.frame, *args) return generic_operator -for oper in operation.op.__dict__.values(): +for oper in op.__dict__.values(): if getattr(FlowObjSpace, oper.name, None) is None: setattr(FlowObjSpace, oper.name, make_op(oper)) diff --git a/rpython/flowspace/operation.py b/rpython/flowspace/operation.py --- a/rpython/flowspace/operation.py +++ b/rpython/flowspace/operation.py @@ -43,8 +43,7 @@ def eval(self, frame, *args_w): if len(args_w) != self.arity: raise TypeError(self.name + " got the wrong number of arguments") - w_result = frame.do_operation_with_implicit_exceptions(self.name, *args_w) - return w_result + return frame.do_op(self, *args_w) class PureOperator(SpaceOperator): pure = True @@ -81,8 +80,7 @@ # type cannot sanely appear in flow graph, # store operation with variable result instead pass - w_result = frame.do_operation_with_implicit_exceptions(self.name, *args_w) - return w_result + return frame.do_op(self, *args_w) def add_operator(name, arity, symbol, pyfunc=None, pure=False, ovf=False): From noreply at buildbot.pypy.org Mon Aug 19 23:15:01 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Mon, 19 Aug 2013 23:15:01 +0200 (CEST) Subject: [pypy-commit] pypy less-stringly-ops: Extract record() from FSFrame.do_operation() Message-ID: <20130819211501.BE6A11C0149@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: less-stringly-ops Changeset: r66230:5766800b98b6 Date: 2013-08-09 02:20 +0100 http://bitbucket.org/pypy/pypy/changeset/5766800b98b6/ Log: Extract record() from FSFrame.do_operation() diff --git a/rpython/flowspace/flowcontext.py b/rpython/flowspace/flowcontext.py --- a/rpython/flowspace/flowcontext.py +++ b/rpython/flowspace/flowcontext.py @@ -439,14 +439,17 @@ return self.recorder.guessbool(self, w_condition, **kwds) def do_operation(self, name, *args_w): + spaceop = SpaceOperation(name, args_w, Variable()) + self.record(spaceop) + return spaceop.result + + def record(self, spaceop): recorder = self.recorder if getattr(recorder, 'final_state', None) is not None: self.mergeblock(recorder.crnt_block, recorder.final_state) raise StopFlowing - spaceop = SpaceOperation(name, args_w, Variable()) spaceop.offset = self.last_instr recorder.append(spaceop) - return spaceop.result def do_op(self, operator, *args_w): w_result = self.do_operation(operator.name, *args_w) From noreply at buildbot.pypy.org Mon Aug 19 23:15:03 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Mon, 19 Aug 2013 23:15:03 +0200 (CEST) Subject: [pypy-commit] pypy less-stringly-ops: refactor and rename FSFrame.handle_implicit_exceptions Message-ID: <20130819211503.16D3D1C0149@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: less-stringly-ops Changeset: r66231:91df4586840d Date: 2013-08-09 03:02 +0100 http://bitbucket.org/pypy/pypy/changeset/91df4586840d/ Log: refactor and rename FSFrame.handle_implicit_exceptions diff --git a/rpython/flowspace/flowcontext.py b/rpython/flowspace/flowcontext.py --- a/rpython/flowspace/flowcontext.py +++ b/rpython/flowspace/flowcontext.py @@ -453,10 +453,11 @@ def do_op(self, operator, *args_w): w_result = self.do_operation(operator.name, *args_w) - self.handle_implicit_exceptions(operator.canraise) + if operator.canraise: + self.guessexception(operator.canraise) return w_result - def handle_implicit_exceptions(self, exceptions): + def guessexception(self, exceptions): """ Catch possible exceptions implicitly. @@ -465,9 +466,7 @@ even if the interpreter re-raises the exception, it will not be the same ImplicitOperationError instance internally. """ - if not exceptions: - return - return self.recorder.guessexception(self, *exceptions) + self.recorder.guessexception(self, *exceptions) def build_flow(self): graph = self.graph diff --git a/rpython/flowspace/objspace.py b/rpython/flowspace/objspace.py --- a/rpython/flowspace/objspace.py +++ b/rpython/flowspace/objspace.py @@ -265,7 +265,7 @@ frame.replace_in_stack(it, next_unroller) return const(v) w_item = frame.do_operation("next", w_iter) - frame.handle_implicit_exceptions([StopIteration, RuntimeError]) + frame.guessexception([StopIteration, RuntimeError]) return w_item @@ -365,10 +365,10 @@ types.TypeType)) and c.__module__ in ['__builtin__', 'exceptions']): if c in builtins_exceptions: - self.frame.handle_implicit_exceptions(builtins_exceptions[c]) + self.frame.guessexception(builtins_exceptions[c]) return w_res # *any* exception for non-builtins - self.frame.handle_implicit_exceptions([Exception]) + self.frame.guessexception([Exception]) return w_res def find_global(self, w_globals, varname): From noreply at buildbot.pypy.org Mon Aug 19 23:15:04 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Mon, 19 Aug 2013 23:15:04 +0200 (CEST) Subject: [pypy-commit] pypy less-stringly-ops: kill FlowObjSpace.unwrap(), .int_w(), .str_w() Message-ID: <20130819211504.512C01C0149@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: less-stringly-ops Changeset: r66232:d8b2304b850b Date: 2013-08-09 04:48 +0100 http://bitbucket.org/pypy/pypy/changeset/d8b2304b850b/ Log: kill FlowObjSpace.unwrap(), .int_w(), .str_w() diff --git a/rpython/flowspace/flowcontext.py b/rpython/flowspace/flowcontext.py --- a/rpython/flowspace/flowcontext.py +++ b/rpython/flowspace/flowcontext.py @@ -662,8 +662,8 @@ def IMPORT_NAME(self, nameindex, next_instr): space = self.space modulename = self.getname_u(nameindex) - glob = space.unwrap(self.w_globals) - fromlist = space.unwrap(self.popvalue()) + glob = self.w_globals.value + fromlist = self.popvalue().value level = self.popvalue().value w_obj = space.import_name(modulename, glob, None, fromlist, level) self.pushvalue(w_obj) @@ -941,7 +941,7 @@ for _ in range(n_keywords): w_value = self.popvalue() w_key = self.popvalue() - key = self.space.str_w(w_key) + key = w_key.value keywords[key] = w_value arguments = self.popvalues(n_arguments) args = CallSpec(arguments, keywords, w_star, w_starstar) diff --git a/rpython/flowspace/objspace.py b/rpython/flowspace/objspace.py --- a/rpython/flowspace/objspace.py +++ b/rpython/flowspace/objspace.py @@ -20,7 +20,6 @@ from rpython.flowspace.specialcase import SPECIAL_CASES from rpython.rlib.unroll import unrolling_iterable, _unroller from rpython.rlib import rstackovf -from rpython.rlib.rarithmetic import is_valid_int # the following gives us easy access to declare more for applications: @@ -68,7 +67,6 @@ (the bytecode of) some function. """ w_None = Constant(None) - builtin = Constant(__builtin__) sys = Constant(sys) w_False = Constant(False) w_True = Constant(True) @@ -120,13 +118,12 @@ return self.w_False def newfunction(self, w_code, w_globals, defaults_w): - try: - code = self.unwrap(w_code) - globals = self.unwrap(w_globals) - defaults = tuple([self.unwrap(value) for value in defaults_w]) - except UnwrapException: + if not all(isinstance(value, Constant) for value in defaults_w): raise FlowingError(self.frame, "Dynamically created function must" " have constant default values.") + code = w_code.value + globals = w_globals.value + defaults = tuple([default.value for default in defaults_w]) fn = types.FunctionType(code, globals, code.co_name, defaults) return Constant(fn) @@ -135,39 +132,14 @@ w_type = const(type(exc)) return FSException(w_type, w_value) - def int_w(self, w_obj): - if isinstance(w_obj, Constant): - val = w_obj.value - if not is_valid_int(val): - raise TypeError("expected integer: " + repr(w_obj)) - return val - return self.unwrap(w_obj) - - def str_w(self, w_obj): - if isinstance(w_obj, Constant): - val = w_obj.value - if type(val) is not str: - raise TypeError("expected string: " + repr(w_obj)) - return val - return self.unwrap(w_obj) - - def unwrap(self, w_obj): - if isinstance(w_obj, Variable): - raise UnwrapException - elif isinstance(w_obj, Constant): - return w_obj.value - else: - raise TypeError("not wrapped: " + repr(w_obj)) - def exception_issubclass_w(self, w_cls1, w_cls2): return self.is_true(self.issubtype(w_cls1, w_cls2)) def exception_match(self, w_exc_type, w_check_class): """Checks if the given exception type matches 'w_check_class'.""" - try: - check_class = self.unwrap(w_check_class) - except UnwrapException: + if not isinstance(w_check_class, Constant): raise FlowingError(self.frame, "Non-constant except guard.") + check_class = w_check_class.value if check_class in (NotImplementedError, AssertionError): raise FlowingError(self.frame, "Catching %s is not valid in RPython" % check_class.__name__) @@ -221,7 +193,7 @@ def unpack_sequence(self, w_iterable, expected_length): if isinstance(w_iterable, Constant): - l = list(self.unwrap(w_iterable)) + l = list(w_iterable.value) if len(l) != expected_length: raise ValueError return [const(x) for x in l] @@ -373,11 +345,11 @@ def find_global(self, w_globals, varname): try: - value = self.unwrap(w_globals)[varname] + value = w_globals.value[varname] except KeyError: # not in the globals, now look in the built-ins try: - value = getattr(self.unwrap(self.builtin), varname) + value = getattr(__builtin__, varname) except AttributeError: message = "global name '%s' is not defined" % varname raise FlowingError(self.frame, const(message)) diff --git a/rpython/flowspace/specialcase.py b/rpython/flowspace/specialcase.py --- a/rpython/flowspace/specialcase.py +++ b/rpython/flowspace/specialcase.py @@ -1,3 +1,5 @@ +from rpython.flowspace.model import Constant + SPECIAL_CASES = {} def register_flow_sc(func): @@ -14,7 +16,8 @@ @register_flow_sc(__import__) def sc_import(space, args_w): assert len(args_w) > 0 and len(args_w) <= 5, 'import needs 1 to 5 arguments' - args = [space.unwrap(arg) for arg in args_w] + assert all(isinstance(arg, Constant) for arg in args_w) + args = [arg.value for arg in args_w] return space.import_name(*args) @register_flow_sc(locals) From noreply at buildbot.pypy.org Mon Aug 19 23:15:07 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Mon, 19 Aug 2013 23:15:07 +0200 (CEST) Subject: [pypy-commit] pypy less-stringly-ops: Kill next_instr argument in FSFrame.OPCODE methods Message-ID: <20130819211507.721211C0149@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: less-stringly-ops Changeset: r66233:b3bd24e7ea22 Date: 2013-08-11 17:36 +0100 http://bitbucket.org/pypy/pypy/changeset/b3bd24e7ea22/ Log: Kill next_instr argument in FSFrame.OPCODE methods diff --git a/rpython/flowspace/bytecode.py b/rpython/flowspace/bytecode.py --- a/rpython/flowspace/bytecode.py +++ b/rpython/flowspace/bytecode.py @@ -3,6 +3,7 @@ """ from rpython.tool.stdlib_opcode import host_bytecode_spec from opcode import EXTENDED_ARG, HAVE_ARGUMENT +import opcode from rpython.flowspace.argument import Signature from rpython.flowspace.flowcontext import BytecodeCorruption @@ -83,10 +84,10 @@ Returns (next_instr, opname, oparg). """ co_code = self.co_code - opcode = ord(co_code[pos]) + opnum = ord(co_code[pos]) next_instr = pos + 1 - if opcode >= HAVE_ARGUMENT: + if opnum >= HAVE_ARGUMENT: lo = ord(co_code[next_instr]) hi = ord(co_code[next_instr+1]) next_instr += 2 @@ -94,16 +95,18 @@ else: oparg = 0 - while opcode == EXTENDED_ARG: - opcode = ord(co_code[next_instr]) - if opcode < HAVE_ARGUMENT: + while opnum == EXTENDED_ARG: + opnum = ord(co_code[next_instr]) + if opnum < HAVE_ARGUMENT: raise BytecodeCorruption lo = ord(co_code[next_instr+1]) hi = ord(co_code[next_instr+2]) next_instr += 3 oparg = (oparg * 65536) | (hi * 256) | lo - opname = self.opnames[opcode] + if opnum in opcode.hasjrel: + oparg += next_instr + opname = self.opnames[opnum] return next_instr, opname, oparg @property diff --git a/rpython/flowspace/flowcontext.py b/rpython/flowspace/flowcontext.py --- a/rpython/flowspace/flowcontext.py +++ b/rpython/flowspace/flowcontext.py @@ -565,7 +565,7 @@ def handle_bytecode(self, next_instr): next_instr, methodname, oparg = self.pycode.read(next_instr) try: - res = getattr(self, methodname)(oparg, next_instr) + res = getattr(self, methodname)(oparg) return res if res is not None else next_instr except FSException, operr: return self.handle_operation_error(operr) @@ -586,13 +586,13 @@ def getname_w(self, index): return Constant(self.pycode.names[index]) - def BAD_OPCODE(self, _, next_instr): + def BAD_OPCODE(self, _): raise FlowingError(self, "This operation is not RPython") - def BREAK_LOOP(self, oparg, next_instr): + def BREAK_LOOP(self, oparg): return SBreakLoop.singleton.unroll(self) - def CONTINUE_LOOP(self, startofloop, next_instr): + def CONTINUE_LOOP(self, startofloop): unroller = SContinueLoop(startofloop) return unroller.unroll(self) @@ -629,13 +629,13 @@ def cmp_exc_match(self, w_1, w_2): return self.space.newbool(self.space.exception_match(w_1, w_2)) - def COMPARE_OP(self, testnum, next_instr): + def COMPARE_OP(self, testnum): w_2 = self.popvalue() w_1 = self.popvalue() w_result = getattr(self, compare_method[testnum])(w_1, w_2) self.pushvalue(w_result) - def RAISE_VARARGS(self, nbargs, next_instr): + def RAISE_VARARGS(self, nbargs): space = self.space if nbargs == 0: if self.last_exception is not None: @@ -659,7 +659,7 @@ operror = space.exc_from_raise(w_type, w_value) raise operror - def IMPORT_NAME(self, nameindex, next_instr): + def IMPORT_NAME(self, nameindex): space = self.space modulename = self.getname_u(nameindex) glob = self.w_globals.value @@ -668,17 +668,17 @@ w_obj = space.import_name(modulename, glob, None, fromlist, level) self.pushvalue(w_obj) - def IMPORT_FROM(self, nameindex, next_instr): + def IMPORT_FROM(self, nameindex): w_name = self.getname_w(nameindex) w_module = self.peekvalue() self.pushvalue(self.space.import_from(w_module, w_name)) - def RETURN_VALUE(self, oparg, next_instr): + def RETURN_VALUE(self, oparg): w_returnvalue = self.popvalue() unroller = SReturnValue(w_returnvalue) return unroller.unroll(self) - def END_FINALLY(self, oparg, next_instr): + def END_FINALLY(self, oparg): # unlike CPython, there are two statically distinct cases: the # END_FINALLY might be closing an 'except' block or a 'finally' # block. In the first case, the stack contains three items: @@ -701,14 +701,14 @@ unroller = self.popvalue() return unroller.unroll(self) - def POP_BLOCK(self, oparg, next_instr): + def POP_BLOCK(self, oparg): block = self.blockstack.pop() block.cleanupstack(self) # the block knows how to clean up the value stack - def JUMP_ABSOLUTE(self, jumpto, next_instr): + def JUMP_ABSOLUTE(self, jumpto): return jumpto - def YIELD_VALUE(self, _, next_instr): + def YIELD_VALUE(self, _): assert self.pycode.is_generator w_result = self.popvalue() self.do_operation('yield', w_result) @@ -720,67 +720,60 @@ PRINT_ITEM_TO = BAD_OPCODE PRINT_NEWLINE_TO = BAD_OPCODE - def PRINT_ITEM(self, oparg, next_instr): + def PRINT_ITEM(self, oparg): w_item = self.popvalue() w_s = self.do_operation('str', w_item) self.space.appcall(rpython_print_item, w_s) - def PRINT_NEWLINE(self, oparg, next_instr): + def PRINT_NEWLINE(self, oparg): self.space.appcall(rpython_print_newline) - def JUMP_FORWARD(self, jumpby, next_instr): - next_instr += jumpby - return next_instr + def JUMP_FORWARD(self, target): + return target - def JUMP_IF_FALSE(self, stepby, next_instr): + def JUMP_IF_FALSE(self, target): # Python <= 2.6 only w_cond = self.peekvalue() if not self.space.is_true(w_cond): - next_instr += stepby - return next_instr + return target - def JUMP_IF_TRUE(self, stepby, next_instr): + def JUMP_IF_TRUE(self, target): # Python <= 2.6 only w_cond = self.peekvalue() if self.space.is_true(w_cond): - next_instr += stepby - return next_instr + return target - def POP_JUMP_IF_FALSE(self, target, next_instr): + def POP_JUMP_IF_FALSE(self, target): w_value = self.popvalue() if not self.space.is_true(w_value): return target - return next_instr - def POP_JUMP_IF_TRUE(self, target, next_instr): + def POP_JUMP_IF_TRUE(self, target): w_value = self.popvalue() if self.space.is_true(w_value): return target - return next_instr - def JUMP_IF_FALSE_OR_POP(self, target, next_instr): + def JUMP_IF_FALSE_OR_POP(self, target): w_value = self.peekvalue() if not self.space.is_true(w_value): return target self.popvalue() - return next_instr - def JUMP_IF_TRUE_OR_POP(self, target, next_instr): + def JUMP_IF_TRUE_OR_POP(self, target): w_value = self.peekvalue() if self.space.is_true(w_value): return target self.popvalue() - return next_instr - def JUMP_IF_NOT_DEBUG(self, target, next_instr): - return next_instr + def JUMP_IF_NOT_DEBUG(self, target): + pass - def GET_ITER(self, oparg, next_instr): + def GET_ITER(self, oparg): w_iterable = self.popvalue() w_iterator = self.space.iter(w_iterable) self.pushvalue(w_iterator) - def FOR_ITER(self, jumpby, next_instr): + def FOR_ITER(self, target): w_iterator = self.peekvalue() try: w_nextitem = self.space.next(w_iterator) @@ -789,24 +782,23 @@ raise # iterator exhausted self.popvalue() - next_instr += jumpby + return target else: self.pushvalue(w_nextitem) - return next_instr - def SETUP_LOOP(self, offsettoend, next_instr): - block = LoopBlock(self, next_instr + offsettoend) + def SETUP_LOOP(self, target): + block = LoopBlock(self, target) self.blockstack.append(block) - def SETUP_EXCEPT(self, offsettoend, next_instr): - block = ExceptBlock(self, next_instr + offsettoend) + def SETUP_EXCEPT(self, target): + block = ExceptBlock(self, target) self.blockstack.append(block) - def SETUP_FINALLY(self, offsettoend, next_instr): - block = FinallyBlock(self, next_instr + offsettoend) + def SETUP_FINALLY(self, target): + block = FinallyBlock(self, target) self.blockstack.append(block) - def SETUP_WITH(self, offsettoend, next_instr): + def SETUP_WITH(self, target): # A simpler version than the 'real' 2.7 one: # directly call manager.__enter__(), don't use special lookup functions # which don't make sense on the RPython type system. @@ -814,11 +806,11 @@ w_exit = self.space.getattr(w_manager, const("__exit__")) self.settopvalue(w_exit) w_result = self.space.call_method(w_manager, "__enter__") - block = WithBlock(self, next_instr + offsettoend) + block = WithBlock(self, target) self.blockstack.append(block) self.pushvalue(w_result) - def WITH_CLEANUP(self, oparg, next_instr): + def WITH_CLEANUP(self, oparg): # Note: RPython context managers receive None in lieu of tracebacks # and cannot suppress the exception. # This opcode changed a lot between CPython versions @@ -840,22 +832,22 @@ else: self.space.call_function(w_exitfunc, w_None, w_None, w_None) - def LOAD_FAST(self, varindex, next_instr): + def LOAD_FAST(self, varindex): w_value = self.locals_stack_w[varindex] if w_value is None: raise FlowingError(self, "Local variable referenced before assignment") self.pushvalue(w_value) - def LOAD_CONST(self, constindex, next_instr): + def LOAD_CONST(self, constindex): w_const = self.getconstant_w(constindex) self.pushvalue(w_const) - def LOAD_GLOBAL(self, nameindex, next_instr): + def LOAD_GLOBAL(self, nameindex): w_result = self.space.find_global(self.w_globals, self.getname_u(nameindex)) self.pushvalue(w_result) LOAD_NAME = LOAD_GLOBAL - def LOAD_ATTR(self, nameindex, next_instr): + def LOAD_ATTR(self, nameindex): "obj.attributename" w_obj = self.popvalue() w_attributename = self.getname_w(nameindex) @@ -863,29 +855,29 @@ self.pushvalue(w_value) LOOKUP_METHOD = LOAD_ATTR - def LOAD_DEREF(self, varindex, next_instr): + def LOAD_DEREF(self, varindex): self.pushvalue(self.closure[varindex]) - def STORE_FAST(self, varindex, next_instr): + def STORE_FAST(self, varindex): w_newvalue = self.popvalue() assert w_newvalue is not None self.locals_stack_w[varindex] = w_newvalue - def STORE_GLOBAL(self, nameindex, next_instr): + def STORE_GLOBAL(self, nameindex): varname = self.getname_u(nameindex) raise FlowingError(self, "Attempting to modify global variable %r." % (varname)) - def POP_TOP(self, oparg, next_instr): + def POP_TOP(self, oparg): self.popvalue() - def ROT_TWO(self, oparg, next_instr): + def ROT_TWO(self, oparg): w_1 = self.popvalue() w_2 = self.popvalue() self.pushvalue(w_1) self.pushvalue(w_2) - def ROT_THREE(self, oparg, next_instr): + def ROT_THREE(self, oparg): w_1 = self.popvalue() w_2 = self.popvalue() w_3 = self.popvalue() @@ -893,7 +885,7 @@ self.pushvalue(w_3) self.pushvalue(w_2) - def ROT_FOUR(self, oparg, next_instr): + def ROT_FOUR(self, oparg): w_1 = self.popvalue() w_2 = self.popvalue() w_3 = self.popvalue() @@ -903,11 +895,11 @@ self.pushvalue(w_3) self.pushvalue(w_2) - def DUP_TOP(self, oparg, next_instr): + def DUP_TOP(self, oparg): w_1 = self.peekvalue() self.pushvalue(w_1) - def DUP_TOPX(self, itemcount, next_instr): + def DUP_TOPX(self, itemcount): delta = itemcount - 1 while True: itemcount -= 1 @@ -925,7 +917,7 @@ for OPCODE, op in _unsupported_ops: locals()[OPCODE] = unsupportedoperation(OPCODE, op) - def BUILD_LIST_FROM_ARG(self, _, next_instr): + def BUILD_LIST_FROM_ARG(self, _): # This opcode was added with pypy-1.8. Here is a simpler # version, enough for annotation. last_val = self.popvalue() @@ -949,37 +941,37 @@ w_result = self.space.call_args(w_function, args) self.pushvalue(w_result) - def CALL_FUNCTION(self, oparg, next_instr): + def CALL_FUNCTION(self, oparg): self.call_function(oparg) CALL_METHOD = CALL_FUNCTION - def CALL_FUNCTION_VAR(self, oparg, next_instr): + def CALL_FUNCTION_VAR(self, oparg): w_varargs = self.popvalue() self.call_function(oparg, w_varargs) - def CALL_FUNCTION_KW(self, oparg, next_instr): + def CALL_FUNCTION_KW(self, oparg): w_varkw = self.popvalue() self.call_function(oparg, None, w_varkw) - def CALL_FUNCTION_VAR_KW(self, oparg, next_instr): + def CALL_FUNCTION_VAR_KW(self, oparg): w_varkw = self.popvalue() w_varargs = self.popvalue() self.call_function(oparg, w_varargs, w_varkw) - def MAKE_FUNCTION(self, numdefaults, next_instr): + def MAKE_FUNCTION(self, numdefaults): w_codeobj = self.popvalue() defaults = self.popvalues(numdefaults) fn = self.space.newfunction(w_codeobj, self.w_globals, defaults) self.pushvalue(fn) - def STORE_ATTR(self, nameindex, next_instr): + def STORE_ATTR(self, nameindex): "obj.attributename = newvalue" w_attributename = self.getname_w(nameindex) w_obj = self.popvalue() w_newvalue = self.popvalue() self.space.setattr(w_obj, w_attributename, w_newvalue) - def UNPACK_SEQUENCE(self, itemcount, next_instr): + def UNPACK_SEQUENCE(self, itemcount): w_iterable = self.popvalue() items = self.space.unpack_sequence(w_iterable, itemcount) for w_item in reversed(items): @@ -990,18 +982,18 @@ w_result = self.space.getslice(w_obj, w_start, w_end) self.pushvalue(w_result) - def SLICE_0(self, oparg, next_instr): + def SLICE_0(self, oparg): self.slice(self.space.w_None, self.space.w_None) - def SLICE_1(self, oparg, next_instr): + def SLICE_1(self, oparg): w_start = self.popvalue() self.slice(w_start, self.space.w_None) - def SLICE_2(self, oparg, next_instr): + def SLICE_2(self, oparg): w_end = self.popvalue() self.slice(self.space.w_None, w_end) - def SLICE_3(self, oparg, next_instr): + def SLICE_3(self, oparg): w_end = self.popvalue() w_start = self.popvalue() self.slice(w_start, w_end) @@ -1011,18 +1003,18 @@ w_newvalue = self.popvalue() self.space.setslice(w_obj, w_start, w_end, w_newvalue) - def STORE_SLICE_0(self, oparg, next_instr): + def STORE_SLICE_0(self, oparg): self.storeslice(self.space.w_None, self.space.w_None) - def STORE_SLICE_1(self, oparg, next_instr): + def STORE_SLICE_1(self, oparg): w_start = self.popvalue() self.storeslice(w_start, self.space.w_None) - def STORE_SLICE_2(self, oparg, next_instr): + def STORE_SLICE_2(self, oparg): w_end = self.popvalue() self.storeslice(self.space.w_None, w_end) - def STORE_SLICE_3(self, oparg, next_instr): + def STORE_SLICE_3(self, oparg): w_end = self.popvalue() w_start = self.popvalue() self.storeslice(w_start, w_end) @@ -1031,23 +1023,23 @@ w_obj = self.popvalue() self.space.delslice(w_obj, w_start, w_end) - def DELETE_SLICE_0(self, oparg, next_instr): + def DELETE_SLICE_0(self, oparg): self.deleteslice(self.space.w_None, self.space.w_None) - def DELETE_SLICE_1(self, oparg, next_instr): + def DELETE_SLICE_1(self, oparg): w_start = self.popvalue() self.deleteslice(w_start, self.space.w_None) - def DELETE_SLICE_2(self, oparg, next_instr): + def DELETE_SLICE_2(self, oparg): w_end = self.popvalue() self.deleteslice(self.space.w_None, w_end) - def DELETE_SLICE_3(self, oparg, next_instr): + def DELETE_SLICE_3(self, oparg): w_end = self.popvalue() w_start = self.popvalue() self.deleteslice(w_start, w_end) - def LIST_APPEND(self, oparg, next_instr): + def LIST_APPEND(self, oparg): w = self.popvalue() if sys.version_info < (2, 7): v = self.popvalue() @@ -1055,27 +1047,27 @@ v = self.peekvalue(oparg - 1) self.space.call_method(v, 'append', w) - def DELETE_FAST(self, varindex, next_instr): + def DELETE_FAST(self, varindex): if self.locals_stack_w[varindex] is None: varname = self.getlocalvarname(varindex) message = "local variable '%s' referenced before assignment" raise UnboundLocalError(message, varname) self.locals_stack_w[varindex] = None - def STORE_MAP(self, oparg, next_instr): + def STORE_MAP(self, oparg): w_key = self.popvalue() w_value = self.popvalue() w_dict = self.peekvalue() self.space.setitem(w_dict, w_key, w_value) - def STORE_SUBSCR(self, oparg, next_instr): + def STORE_SUBSCR(self, oparg): "obj[subscr] = newvalue" w_subscr = self.popvalue() w_obj = self.popvalue() w_newvalue = self.popvalue() self.space.setitem(w_obj, w_subscr, w_newvalue) - def BUILD_SLICE(self, numargs, next_instr): + def BUILD_SLICE(self, numargs): if numargs == 3: w_step = self.popvalue() elif numargs == 2: @@ -1087,23 +1079,23 @@ w_slice = self.space.newslice(w_start, w_end, w_step) self.pushvalue(w_slice) - def DELETE_SUBSCR(self, oparg, next_instr): + def DELETE_SUBSCR(self, oparg): "del obj[subscr]" w_subscr = self.popvalue() w_obj = self.popvalue() self.space.delitem(w_obj, w_subscr) - def BUILD_TUPLE(self, itemcount, next_instr): + def BUILD_TUPLE(self, itemcount): items = self.popvalues(itemcount) w_tuple = self.space.newtuple(items) self.pushvalue(w_tuple) - def BUILD_LIST(self, itemcount, next_instr): + def BUILD_LIST(self, itemcount): items = self.popvalues(itemcount) w_list = self.space.newlist(items) self.pushvalue(w_list) - def BUILD_MAP(self, itemcount, next_instr): + def BUILD_MAP(self, itemcount): w_dict = self.space.newdict() self.pushvalue(w_dict) @@ -1114,15 +1106,15 @@ # Set literals, set comprehensions - def BUILD_SET(self, oparg, next_instr): + def BUILD_SET(self, oparg): raise NotImplementedError("BUILD_SET") - def SET_ADD(self, oparg, next_instr): + def SET_ADD(self, oparg): raise NotImplementedError("SET_ADD") # Dict comprehensions - def MAP_ADD(self, oparg, next_instr): + def MAP_ADD(self, oparg): raise NotImplementedError("MAP_ADD") # Closures From noreply at buildbot.pypy.org Mon Aug 19 23:15:08 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Mon, 19 Aug 2013 23:15:08 +0200 (CEST) Subject: [pypy-commit] pypy less-stringly-ops: Allow creating the SpaceOperation directly from the SpaceOperator Message-ID: <20130819211508.AC1151C0149@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: less-stringly-ops Changeset: r66234:1a725470629d Date: 2013-08-09 06:30 +0100 http://bitbucket.org/pypy/pypy/changeset/1a725470629d/ Log: Allow creating the SpaceOperation directly from the SpaceOperator diff --git a/rpython/flowspace/flowcontext.py b/rpython/flowspace/flowcontext.py --- a/rpython/flowspace/flowcontext.py +++ b/rpython/flowspace/flowcontext.py @@ -452,10 +452,11 @@ recorder.append(spaceop) def do_op(self, operator, *args_w): - w_result = self.do_operation(operator.name, *args_w) + op = operator(*args_w) + self.record(op) if operator.canraise: self.guessexception(operator.canraise) - return w_result + return op.result def guessexception(self, exceptions): """ diff --git a/rpython/flowspace/operation.py b/rpython/flowspace/operation.py --- a/rpython/flowspace/operation.py +++ b/rpython/flowspace/operation.py @@ -7,7 +7,7 @@ import __future__ import operator from rpython.tool.sourcetools import compile2 -from rpython.flowspace.model import Constant, WrapException, const +from rpython.flowspace.model import Constant, WrapException, const, Variable from rpython.flowspace.specialcase import register_flow_sc class _OpHolder(object): pass @@ -45,6 +45,9 @@ raise TypeError(self.name + " got the wrong number of arguments") return frame.do_op(self, *args_w) + def __call__(self, *args_w): + return SpaceOperation(self.name, args_w, Variable()) + class PureOperator(SpaceOperator): pure = True From noreply at buildbot.pypy.org Mon Aug 19 23:15:09 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Mon, 19 Aug 2013 23:15:09 +0200 (CEST) Subject: [pypy-commit] pypy less-stringly-ops: replace SpaceOperators with subclasses of SpaceOperation Message-ID: <20130819211509.ED6F21C0149@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: less-stringly-ops Changeset: r66235:5a1ff23cec22 Date: 2013-08-09 10:08 +0100 http://bitbucket.org/pypy/pypy/changeset/5a1ff23cec22/ Log: replace SpaceOperators with subclasses of SpaceOperation diff --git a/rpython/flowspace/flowcontext.py b/rpython/flowspace/flowcontext.py --- a/rpython/flowspace/flowcontext.py +++ b/rpython/flowspace/flowcontext.py @@ -451,11 +451,10 @@ spaceop.offset = self.last_instr recorder.append(spaceop) - def do_op(self, operator, *args_w): - op = operator(*args_w) + def do_op(self, op): self.record(op) - if operator.canraise: - self.guessexception(operator.canraise) + if op.canraise: + self.guessexception(op.canraise) return op.result def guessexception(self, exceptions): diff --git a/rpython/flowspace/objspace.py b/rpython/flowspace/objspace.py --- a/rpython/flowspace/objspace.py +++ b/rpython/flowspace/objspace.py @@ -247,7 +247,7 @@ if w_obj in self.not_really_const: const_w = self.not_really_const[w_obj] if w_name not in const_w: - return self.frame.do_op(op.getattr, w_obj, w_name) + return self.frame.do_op(op.getattr(w_obj, w_name)) if w_obj.foldable() and w_name.foldable(): obj, name = w_obj.value, w_name.value try: @@ -261,7 +261,7 @@ return const(result) except WrapException: pass - return self.frame.do_op(op.getattr, w_obj, w_name) + return self.frame.do_op(op.getattr(w_obj, w_name)) def isinstance_w(self, w_obj, w_type): return self.is_true(self.isinstance(w_obj, w_type)) @@ -280,7 +280,7 @@ if w_module in self.not_really_const: const_w = self.not_really_const[w_module] if w_name not in const_w: - return self.frame.do_op(op.getattr, w_module, w_name) + return self.frame.do_op(op.getattr(w_module, w_name)) try: return const(getattr(w_module.value, w_name.value)) except AttributeError: @@ -355,14 +355,14 @@ raise FlowingError(self.frame, const(message)) return const(value) -def make_op(oper): +def make_op(cls): def generic_operator(self, *args): - return oper.eval(self.frame, *args) + return cls(*args).eval(self.frame) return generic_operator -for oper in op.__dict__.values(): - if getattr(FlowObjSpace, oper.name, None) is None: - setattr(FlowObjSpace, oper.name, make_op(oper)) +for cls in op.__dict__.values(): + if getattr(FlowObjSpace, cls.opname, None) is None: + setattr(FlowObjSpace, cls.opname, make_op(cls)) def build_flow(func, space=FlowObjSpace()): diff --git a/rpython/flowspace/operation.py b/rpython/flowspace/operation.py --- a/rpython/flowspace/operation.py +++ b/rpython/flowspace/operation.py @@ -7,7 +7,8 @@ import __future__ import operator from rpython.tool.sourcetools import compile2 -from rpython.flowspace.model import Constant, WrapException, const, Variable +from rpython.flowspace.model import (Constant, WrapException, const, Variable, + SpaceOperation) from rpython.flowspace.specialcase import register_flow_sc class _OpHolder(object): pass @@ -15,55 +16,50 @@ func2op = {} -class SpaceOperator(object): +class HLOperation(SpaceOperation): pure = False - def __init__(self, name, arity, symbol, pyfunc, can_overflow=False): - self.name = name - self.arity = arity - self.symbol = symbol - self.pyfunc = pyfunc - self.can_overflow = can_overflow - self.canraise = [] + def __init__(self, *args): + self.args = list(args) + self.result = Variable() + self.offset = -1 - def make_sc(self): + @classmethod + def make_sc(cls): def sc_operator(space, args_w): - if len(args_w) != self.arity: - if self is op.pow and len(args_w) == 2: + if len(args_w) != cls.arity: + if cls is op.pow and len(args_w) == 2: args_w = args_w + [Constant(None)] - elif self is op.getattr and len(args_w) == 3: + elif cls is op.getattr and len(args_w) == 3: return space.frame.do_operation('simple_call', Constant(getattr), *args_w) else: raise Exception("should call %r with exactly %d arguments" % ( - self.name, self.arity)) + cls.opname, cls.arity)) # completely replace the call with the underlying # operation and its limited implicit exceptions semantic - return getattr(space, self.name)(*args_w) + return getattr(space, cls.opname)(*args_w) return sc_operator - def eval(self, frame, *args_w): - if len(args_w) != self.arity: - raise TypeError(self.name + " got the wrong number of arguments") - return frame.do_op(self, *args_w) + def eval(self, frame): + if len(self.args) != self.arity: + raise TypeError(self.opname + " got the wrong number of arguments") + return frame.do_op(self) - def __call__(self, *args_w): - return SpaceOperation(self.name, args_w, Variable()) - -class PureOperator(SpaceOperator): +class PureOperation(HLOperation): pure = True - def eval(self, frame, *args_w): - if len(args_w) != self.arity: - raise TypeError(self.name + " got the wrong number of arguments") + def eval(self, frame): + if len(self.args) != self.arity: + raise TypeError(self.opname + " got the wrong number of arguments") args = [] - if all(w_arg.foldable() for w_arg in args_w): - args = [w_arg.value for w_arg in args_w] + if all(w_arg.foldable() for w_arg in self.args): + args = [w_arg.value for w_arg in self.args] # All arguments are constants: call the operator now try: result = self.pyfunc(*args) except Exception as e: from rpython.flowspace.flowcontext import FlowingError msg = "%s%r always raises %s: %s" % ( - self.name, tuple(args), type(e), e) + self.opname, tuple(args), type(e), e) raise FlowingError(frame, msg) else: # don't try to constant-fold operations giving a 'long' @@ -73,7 +69,7 @@ if self.can_overflow and type(result) is long: pass # don't constant-fold getslice on lists, either - elif self.name == 'getslice' and type(result) is list: + elif self.opname == 'getslice' and type(result) is list: pass # otherwise, fine else: @@ -83,23 +79,26 @@ # type cannot sanely appear in flow graph, # store operation with variable result instead pass - return frame.do_op(self, *args_w) + return frame.do_op(self) def add_operator(name, arity, symbol, pyfunc=None, pure=False, ovf=False): operator_func = getattr(operator, name, None) - cls = PureOperator if pure else SpaceOperator - oper = cls(name, arity, symbol, pyfunc, can_overflow=ovf) - setattr(op, name, oper) + base_cls = PureOperation if pure else HLOperation + cls = type(name, (base_cls,), {'opname': name, 'arity': arity, + 'can_overflow': ovf, 'canraise': []}) + setattr(op, name, cls) if pyfunc is not None: - func2op[pyfunc] = oper + func2op[pyfunc] = cls if operator_func: - func2op[operator_func] = oper - if pyfunc is None: - oper.pyfunc = operator_func + func2op[operator_func] = cls + if pyfunc is not None: + cls.pyfunc = staticmethod(pyfunc) + elif operator_func is not None: + cls.pyfunc = staticmethod(operator_func) if ovf: from rpython.rlib.rarithmetic import ovfcheck - ovf_func = lambda *args: ovfcheck(oper.pyfunc(*args)) + ovf_func = lambda *args: ovfcheck(cls.pyfunc(*args)) add_operator(name + '_ovf', arity, symbol, pyfunc=ovf_func) # ____________________________________________________________ @@ -315,7 +314,7 @@ oper = getattr(op, name) lis = oper.canraise if exc in lis: - raise ValueError, "your list is causing duplication!" + raise ValueError("your list is causing duplication!") lis.append(exc) assert exc in op_appendices From noreply at buildbot.pypy.org Mon Aug 19 23:15:11 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Mon, 19 Aug 2013 23:15:11 +0200 (CEST) Subject: [pypy-commit] pypy less-stringly-ops: move arity check to HLOperation ctor Message-ID: <20130819211511.34D821C0149@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: less-stringly-ops Changeset: r66236:0ce93a50de7c Date: 2013-08-09 11:19 +0100 http://bitbucket.org/pypy/pypy/changeset/0ce93a50de7c/ Log: move arity check to HLOperation ctor diff --git a/rpython/flowspace/operation.py b/rpython/flowspace/operation.py --- a/rpython/flowspace/operation.py +++ b/rpython/flowspace/operation.py @@ -19,6 +19,8 @@ class HLOperation(SpaceOperation): pure = False def __init__(self, *args): + if len(args) != self.arity: + raise TypeError(self.opname + " got the wrong number of arguments") self.args = list(args) self.result = Variable() self.offset = -1 @@ -40,16 +42,12 @@ return sc_operator def eval(self, frame): - if len(self.args) != self.arity: - raise TypeError(self.opname + " got the wrong number of arguments") return frame.do_op(self) class PureOperation(HLOperation): pure = True def eval(self, frame): - if len(self.args) != self.arity: - raise TypeError(self.opname + " got the wrong number of arguments") args = [] if all(w_arg.foldable() for w_arg in self.args): args = [w_arg.value for w_arg in self.args] From noreply at buildbot.pypy.org Mon Aug 19 23:15:12 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Mon, 19 Aug 2013 23:15:12 +0200 (CEST) Subject: [pypy-commit] pypy less-stringly-ops: Deal with FSFrame.last_instr a bit more explicitly Message-ID: <20130819211512.E39051C0149@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: less-stringly-ops Changeset: r66237:019245b1068b Date: 2013-04-30 15:32 +0100 http://bitbucket.org/pypy/pypy/changeset/019245b1068b/ Log: Deal with FSFrame.last_instr a bit more explicitly diff --git a/rpython/flowspace/flowcontext.py b/rpython/flowspace/flowcontext.py --- a/rpython/flowspace/flowcontext.py +++ b/rpython/flowspace/flowcontext.py @@ -410,7 +410,7 @@ self.locals_stack_w[:len(items_w)] = items_w self.dropvaluesuntil(len(items_w)) - def getstate(self): + def getstate(self, next_pos): # getfastscope() can return real None, for undefined locals data = self.save_locals_stack() if self.last_exception is None: @@ -420,7 +420,7 @@ data.append(self.last_exception.w_type) data.append(self.last_exception.w_value) recursively_flatten(data) - return FrameState(data, self.blockstack[:], self.last_instr) + return FrameState(data, self.blockstack[:], next_pos) def setstate(self, state): """ Reset the frame to the given state. """ @@ -432,7 +432,6 @@ self.last_exception = None else: self.last_exception = FSException(data[-2], data[-1]) - self.last_instr = state.next_instr self.blockstack = state.blocklist[:] def guessbool(self, w_condition, **kwds): @@ -478,11 +477,12 @@ def record_block(self, block): self.setstate(block.framestate) + next_pos = block.framestate.next_instr self.recorder = block.make_recorder() try: while True: - self.last_instr = self.handle_bytecode(self.last_instr) - self.recorder.final_state = self.getstate() + next_pos = self.handle_bytecode(next_pos) + self.recorder.final_state = self.getstate(next_pos) except ImplicitOperationError, e: if isinstance(e.w_type, Constant): @@ -563,6 +563,7 @@ break def handle_bytecode(self, next_instr): + self.last_instr = next_instr next_instr, methodname, oparg = self.pycode.read(next_instr) try: res = getattr(self, methodname)(oparg) diff --git a/rpython/flowspace/test/test_framestate.py b/rpython/flowspace/test/test_framestate.py --- a/rpython/flowspace/test/test_framestate.py +++ b/rpython/flowspace/test/test_framestate.py @@ -25,55 +25,55 @@ def test_eq_framestate(self): frame = self.getframe(self.func_simple) - fs1 = frame.getstate() - fs2 = frame.getstate() + fs1 = frame.getstate(0) + fs2 = frame.getstate(0) assert fs1 == fs2 def test_neq_hacked_framestate(self): frame = self.getframe(self.func_simple) - fs1 = frame.getstate() + fs1 = frame.getstate(0) frame.locals_stack_w[frame.pycode.co_nlocals-1] = Variable() - fs2 = frame.getstate() + fs2 = frame.getstate(0) assert fs1 != fs2 def test_union_on_equal_framestates(self): frame = self.getframe(self.func_simple) - fs1 = frame.getstate() - fs2 = frame.getstate() + fs1 = frame.getstate(0) + fs2 = frame.getstate(0) assert fs1.union(fs2) == fs1 def test_union_on_hacked_framestates(self): frame = self.getframe(self.func_simple) - fs1 = frame.getstate() + fs1 = frame.getstate(0) frame.locals_stack_w[frame.pycode.co_nlocals-1] = Variable() - fs2 = frame.getstate() + fs2 = frame.getstate(0) assert fs1.union(fs2) == fs2 # fs2 is more general assert fs2.union(fs1) == fs2 # fs2 is more general def test_restore_frame(self): frame = self.getframe(self.func_simple) - fs1 = frame.getstate() + fs1 = frame.getstate(0) frame.locals_stack_w[frame.pycode.co_nlocals-1] = Variable() frame.setstate(fs1) - assert fs1 == frame.getstate() + assert fs1 == frame.getstate(0) def test_copy(self): frame = self.getframe(self.func_simple) - fs1 = frame.getstate() + fs1 = frame.getstate(0) fs2 = fs1.copy() assert fs1 == fs2 def test_getvariables(self): frame = self.getframe(self.func_simple) - fs1 = frame.getstate() + fs1 = frame.getstate(0) vars = fs1.getvariables() assert len(vars) == 1 def test_getoutputargs(self): frame = self.getframe(self.func_simple) - fs1 = frame.getstate() + fs1 = frame.getstate(0) frame.locals_stack_w[frame.pycode.co_nlocals-1] = Variable() - fs2 = frame.getstate() + fs2 = frame.getstate(0) outputargs = fs1.getoutputargs(fs2) # 'x' -> 'x' is a Variable # locals_w[n-1] -> locals_w[n-1] is Constant(None) @@ -81,9 +81,9 @@ def test_union_different_constants(self): frame = self.getframe(self.func_simple) - fs1 = frame.getstate() + fs1 = frame.getstate(0) frame.locals_stack_w[frame.pycode.co_nlocals-1] = Constant(42) - fs2 = frame.getstate() + fs2 = frame.getstate(0) fs3 = fs1.union(fs2) frame.setstate(fs3) assert isinstance(frame.locals_stack_w[frame.pycode.co_nlocals-1], @@ -91,7 +91,7 @@ def test_union_spectag(self): frame = self.getframe(self.func_simple) - fs1 = frame.getstate() + fs1 = frame.getstate(0) frame.locals_stack_w[frame.pycode.co_nlocals-1] = Constant(SpecTag()) - fs2 = frame.getstate() + fs2 = frame.getstate(0) assert fs1.union(fs2) is None # UnionError From noreply at buildbot.pypy.org Mon Aug 19 23:15:14 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Mon, 19 Aug 2013 23:15:14 +0200 (CEST) Subject: [pypy-commit] pypy less-stringly-ops: Intercept FlowingError in FSFrame.record_block() and add frame info there Message-ID: <20130819211514.228591C0149@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: less-stringly-ops Changeset: r66238:9f35203d09ba Date: 2013-08-09 11:46 +0100 http://bitbucket.org/pypy/pypy/changeset/9f35203d09ba/ Log: Intercept FlowingError in FSFrame.record_block() and add frame info there diff --git a/rpython/flowspace/flowcontext.py b/rpython/flowspace/flowcontext.py --- a/rpython/flowspace/flowcontext.py +++ b/rpython/flowspace/flowcontext.py @@ -18,9 +18,7 @@ class FlowingError(Exception): """ Signals invalid RPython in the function being analysed""" - def __init__(self, frame, msg): - super(FlowingError, self).__init__(msg) - self.frame = frame + frame = None def __str__(self): msg = ['-+' * 30] @@ -307,7 +305,7 @@ def unsupportedoperation(OPCODE, msg): def UNSUPPORTED(self, *ignored): - raise FlowingError(self, "%s is not RPython" % (msg,)) + raise FlowingError("%s is not RPython" % (msg,)) UNSUPPORTED.func_name = OPCODE return UNSUPPORTED @@ -510,6 +508,11 @@ link = Link([w_result], self.graph.returnblock) self.recorder.crnt_block.closeblock(link) + except FlowingError as exc: + if exc.frame is None: + exc.frame = self + raise + self.recorder = None def mergeblock(self, currentblock, currentstate): @@ -588,7 +591,7 @@ return Constant(self.pycode.names[index]) def BAD_OPCODE(self, _): - raise FlowingError(self, "This operation is not RPython") + raise FlowingError("This operation is not RPython") def BREAK_LOOP(self, oparg): return SBreakLoop.singleton.unroll(self) @@ -836,7 +839,7 @@ def LOAD_FAST(self, varindex): w_value = self.locals_stack_w[varindex] if w_value is None: - raise FlowingError(self, "Local variable referenced before assignment") + raise FlowingError("Local variable referenced before assignment") self.pushvalue(w_value) def LOAD_CONST(self, constindex): @@ -866,8 +869,8 @@ def STORE_GLOBAL(self, nameindex): varname = self.getname_u(nameindex) - raise FlowingError(self, - "Attempting to modify global variable %r." % (varname)) + raise FlowingError( + "Attempting to modify global variable %r." % (varname)) def POP_TOP(self, oparg): self.popvalue() @@ -927,7 +930,7 @@ def call_function(self, oparg, w_star=None, w_starstar=None): if w_starstar is not None: - raise FlowingError(self, "Dict-unpacking is not RPython") + raise FlowingError("Dict-unpacking is not RPython") n_arguments = oparg & 0xff n_keywords = (oparg >> 8) & 0xff keywords = {} diff --git a/rpython/flowspace/objspace.py b/rpython/flowspace/objspace.py --- a/rpython/flowspace/objspace.py +++ b/rpython/flowspace/objspace.py @@ -119,7 +119,7 @@ def newfunction(self, w_code, w_globals, defaults_w): if not all(isinstance(value, Constant) for value in defaults_w): - raise FlowingError(self.frame, "Dynamically created function must" + raise FlowingError("Dynamically created function must" " have constant default values.") code = w_code.value globals = w_globals.value @@ -138,10 +138,10 @@ def exception_match(self, w_exc_type, w_check_class): """Checks if the given exception type matches 'w_check_class'.""" if not isinstance(w_check_class, Constant): - raise FlowingError(self.frame, "Non-constant except guard.") + raise FlowingError("Non-constant except guard.") check_class = w_check_class.value if check_class in (NotImplementedError, AssertionError): - raise FlowingError(self.frame, + raise FlowingError( "Catching %s is not valid in RPython" % check_class.__name__) if not isinstance(check_class, tuple): # the simple case @@ -256,7 +256,7 @@ etype = e.__class__ msg = "getattr(%s, %s) always raises %s: %s" % ( obj, name, etype, e) - raise FlowingError(self.frame, msg) + raise FlowingError(msg) try: return const(result) except WrapException: @@ -351,8 +351,7 @@ try: value = getattr(__builtin__, varname) except AttributeError: - message = "global name '%s' is not defined" % varname - raise FlowingError(self.frame, const(message)) + raise FlowingError("global name '%s' is not defined" % varname) return const(value) def make_op(cls): diff --git a/rpython/flowspace/operation.py b/rpython/flowspace/operation.py --- a/rpython/flowspace/operation.py +++ b/rpython/flowspace/operation.py @@ -58,7 +58,7 @@ from rpython.flowspace.flowcontext import FlowingError msg = "%s%r always raises %s: %s" % ( self.opname, tuple(args), type(e), e) - raise FlowingError(frame, msg) + raise FlowingError(msg) else: # don't try to constant-fold operations giving a 'long' # result. The result is probably meant to be sent to From noreply at buildbot.pypy.org Mon Aug 19 23:15:15 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Mon, 19 Aug 2013 23:15:15 +0200 (CEST) Subject: [pypy-commit] pypy less-stringly-ops: Add HLOperation.constfold() Message-ID: <20130819211515.72D541C0149@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: less-stringly-ops Changeset: r66239:c97160d9f9cc Date: 2013-08-09 19:14 +0100 http://bitbucket.org/pypy/pypy/changeset/c97160d9f9cc/ Log: Add HLOperation.constfold() diff --git a/rpython/flowspace/operation.py b/rpython/flowspace/operation.py --- a/rpython/flowspace/operation.py +++ b/rpython/flowspace/operation.py @@ -42,12 +42,18 @@ return sc_operator def eval(self, frame): + result = self.constfold() + if result is not None: + return result return frame.do_op(self) + def constfold(self): + return None + class PureOperation(HLOperation): pure = True - def eval(self, frame): + def constfold(self): args = [] if all(w_arg.foldable() for w_arg in self.args): args = [w_arg.value for w_arg in self.args] @@ -77,7 +83,6 @@ # type cannot sanely appear in flow graph, # store operation with variable result instead pass - return frame.do_op(self) def add_operator(name, arity, symbol, pyfunc=None, pure=False, ovf=False): From noreply at buildbot.pypy.org Mon Aug 19 23:15:16 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Mon, 19 Aug 2013 23:15:16 +0200 (CEST) Subject: [pypy-commit] pypy less-stringly-ops: Fix duplication between 'nonzero' and 'is_true' ops. Message-ID: <20130819211516.CB7861C0149@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: less-stringly-ops Changeset: r66240:7cb877b832f4 Date: 2013-08-19 17:42 +0100 http://bitbucket.org/pypy/pypy/changeset/7cb877b832f4/ Log: Fix duplication between 'nonzero' and 'is_true' ops. Call the merged operation 'bool'. diff --git a/rpython/annotator/argument.py b/rpython/annotator/argument.py --- a/rpython/annotator/argument.py +++ b/rpython/annotator/argument.py @@ -25,7 +25,7 @@ return [Ellipsis] raise CallPatternTooComplex("'*' argument must be SomeTuple") - def is_true(self, s_tup): + def bool(self, s_tup): assert isinstance(s_tup, SomeTuple) return bool(s_tup.items) @@ -210,7 +210,7 @@ args_w = data_args_w[:need_cnt] for argname, w_arg in zip(argnames[need_cnt:], data_args_w[need_cnt:]): unfiltered_kwds_w[argname] = w_arg - assert not space.is_true(data_w_stararg) + assert not space.bool(data_w_stararg) else: stararg_w = space.unpackiterable(data_w_stararg) args_w = data_args_w + stararg_w diff --git a/rpython/annotator/bookkeeper.py b/rpython/annotator/bookkeeper.py --- a/rpython/annotator/bookkeeper.py +++ b/rpython/annotator/bookkeeper.py @@ -688,7 +688,7 @@ fn, block, i = self.position_key op = block.operations[i] if opname is not None: - assert op.opname == opname or op.opname in opname + assert op.opname == opname if arity is not None: assert len(op.args) == arity if pos is not None: diff --git a/rpython/annotator/builtin.py b/rpython/annotator/builtin.py --- a/rpython/annotator/builtin.py +++ b/rpython/annotator/builtin.py @@ -94,7 +94,7 @@ def builtin_bool(s_obj): - return s_obj.is_true() + return s_obj.bool() def builtin_int(s_obj, s_base=None): if isinstance(s_obj, SomeInteger): diff --git a/rpython/annotator/test/test_annrpython.py b/rpython/annotator/test/test_annrpython.py --- a/rpython/annotator/test/test_annrpython.py +++ b/rpython/annotator/test/test_annrpython.py @@ -1490,7 +1490,7 @@ s = a.build_types(snippet.prime, [int]) assert s.knowntype == bool - def test_and_is_true_coalesce(self): + def test_and_bool_coalesce(self): def f(a,b,c,d,e): x = a and b if x: @@ -1500,7 +1500,7 @@ s = a.build_types(f, [int, str, a.bookkeeper.immutablevalue(1.0), a.bookkeeper.immutablevalue('d'), a.bookkeeper.immutablevalue('e')]) assert s == annmodel.SomeTuple([annmodel.SomeChar(), a.bookkeeper.immutablevalue(1.0)]) - def test_is_true_coalesce2(self): + def test_bool_coalesce2(self): def f(a,b,a1,b1,c,d,e): x = (a or b) and (a1 or b1) if x: @@ -1514,7 +1514,7 @@ assert s == annmodel.SomeTuple([annmodel.SomeChar(), a.bookkeeper.immutablevalue(1.0)]) - def test_is_true_coalesce_sanity(self): + def test_bool_coalesce_sanity(self): def f(a): while a: pass diff --git a/rpython/annotator/test/test_argument.py b/rpython/annotator/test/test_argument.py --- a/rpython/annotator/test/test_argument.py +++ b/rpython/annotator/test/test_argument.py @@ -7,7 +7,7 @@ def newtuple(self, items): return tuple(items) - def is_true(self, obj): + def bool(self, obj): return bool(obj) def unpackiterable(self, it): diff --git a/rpython/annotator/unaryop.py b/rpython/annotator/unaryop.py --- a/rpython/annotator/unaryop.py +++ b/rpython/annotator/unaryop.py @@ -20,10 +20,10 @@ def immutablevalue(x): return getbookkeeper().immutablevalue(x) -UNARY_OPERATIONS = set(['len', 'is_true', 'getattr', 'setattr', 'delattr', +UNARY_OPERATIONS = set(['len', 'bool', 'getattr', 'setattr', 'delattr', 'simple_call', 'call_args', 'str', 'repr', 'iter', 'next', 'invert', 'type', 'issubtype', - 'pos', 'neg', 'nonzero', 'abs', 'hex', 'oct', + 'pos', 'neg', 'abs', 'hex', 'oct', 'ord', 'int', 'float', 'long', 'hash', 'id', # <== not supported any more 'getslice', 'setslice', 'delslice', @@ -57,7 +57,7 @@ def len(obj): return SomeInteger(nonneg=True) - def is_true_behavior(obj, s): + def bool_behavior(obj, s): if obj.is_immutable_constant(): s.const = bool(obj.const) else: @@ -65,13 +65,13 @@ if s_len.is_immutable_constant(): s.const = s_len.const > 0 - def is_true(s_obj): + def bool(s_obj): r = SomeBool() - s_obj.is_true_behavior(r) + s_obj.bool_behavior(r) bk = getbookkeeper() knowntypedata = {} - op = bk._find_current_op(opname=("is_true", "nonzero"), arity=1) + op = bk._find_current_op(opname="bool", arity=1) arg = op.args[0] s_nonnone_obj = s_obj if s_obj.can_be_none(): @@ -80,9 +80,6 @@ r.set_knowntypedata(knowntypedata) return r - def nonzero(obj): - return obj.is_true() - def hash(obj): raise TypeError, ("cannot use hash() in RPython; " "see objectmodel.compute_xxx()") @@ -179,7 +176,7 @@ abs = neg - def is_true(self): + def bool(self): if self.is_immutable_constant(): return getbookkeeper().immutablevalue(bool(self.const)) return s_Bool @@ -211,7 +208,7 @@ abs_ovf = _clone(abs, [OverflowError]) class __extend__(SomeBool): - def is_true(self): + def bool(self): return self def invert(self): @@ -670,7 +667,7 @@ # create or update the attribute in clsdef clsdef.generalize_attr(attr, s_value) - def is_true_behavior(ins, s): + def bool_behavior(ins, s): if not ins.can_be_None: s.const = True @@ -739,7 +736,7 @@ d = [desc.bind_under(classdef, name) for desc in pbc.descriptions] return SomePBC(d, can_be_None=pbc.can_be_None) - def is_true_behavior(pbc, s): + def bool_behavior(pbc, s): if pbc.isNone(): s.const = False elif not pbc.can_be_None: @@ -799,7 +796,7 @@ v = p.ll_ptrtype._example()(*llargs) return ll_to_annotation(v) - def is_true(p): + def bool(p): return s_Bool class __extend__(SomeLLADTMeth): @@ -833,5 +830,5 @@ llmemory.supported_access_types[s_attr.const]) getattr.can_only_throw = [] - def is_true(s_addr): + def bool(s_addr): return s_Bool diff --git a/rpython/flowspace/objspace.py b/rpython/flowspace/objspace.py --- a/rpython/flowspace/objspace.py +++ b/rpython/flowspace/objspace.py @@ -213,7 +213,7 @@ def is_true(self, w_obj): if w_obj.foldable(): return bool(w_obj.value) - w_truthvalue = self.frame.do_operation('is_true', w_obj) + w_truthvalue = self.frame.do_operation('bool', w_obj) return self.frame.guessbool(w_truthvalue) def iter(self, w_iterable): diff --git a/rpython/flowspace/operation.py b/rpython/flowspace/operation.py --- a/rpython/flowspace/operation.py +++ b/rpython/flowspace/operation.py @@ -230,8 +230,8 @@ add_operator('trunc', 1, 'trunc', pyfunc=unsupported) add_operator('pos', 1, 'pos', pure=True) add_operator('neg', 1, 'neg', pure=True, ovf=True) -add_operator('nonzero', 1, 'truth', pyfunc=bool, pure=True) -op.is_true = op.nonzero +add_operator('bool', 1, 'truth', pyfunc=bool, pure=True) +op.is_true = op.nonzero = op.bool # for llinterp add_operator('abs' , 1, 'abs', pyfunc=abs, pure=True, ovf=True) add_operator('hex', 1, 'hex', pyfunc=hex, pure=True) add_operator('oct', 1, 'oct', pyfunc=oct, pure=True) @@ -288,7 +288,7 @@ # Other functions that get directly translated to SpaceOperators func2op[type] = op.type -func2op[operator.truth] = op.nonzero +func2op[operator.truth] = op.bool if hasattr(__builtin__, 'next'): func2op[__builtin__.next] = op.next diff --git a/rpython/flowspace/test/test_objspace.py b/rpython/flowspace/test/test_objspace.py --- a/rpython/flowspace/test/test_objspace.py +++ b/rpython/flowspace/test/test_objspace.py @@ -104,7 +104,7 @@ def test_loop(self): graph = self.codetest(self.loop) assert self.all_operations(graph) == {'abs': 1, - 'is_true': 1, + 'bool': 1, 'sub': 1} #__________________________________________________________ @@ -532,7 +532,7 @@ def f(x): return not ~-x graph = self.codetest(f) - assert self.all_operations(graph) == {'is_true': 1, 'invert': 1, 'neg': 1} + assert self.all_operations(graph) == {'bool': 1, 'invert': 1, 'neg': 1} #__________________________________________________________ diff --git a/rpython/flowspace/test/test_unroll.py b/rpython/flowspace/test/test_unroll.py --- a/rpython/flowspace/test/test_unroll.py +++ b/rpython/flowspace/test/test_unroll.py @@ -57,7 +57,7 @@ graph = self.codetest(f) ops = self.all_operations(graph) assert ops == {'simple_call': 6, - 'is_true': 6, + 'bool': 6, 'lt': 1, 'le': 1, 'eq': 1, diff --git a/rpython/rtyper/controllerentry.py b/rpython/rtyper/controllerentry.py --- a/rpython/rtyper/controllerentry.py +++ b/rpython/rtyper/controllerentry.py @@ -126,12 +126,12 @@ from rpython.rtyper.rcontrollerentry import rtypedelegate return rtypedelegate(self.delitem, hop) - def ctrl_is_true(self, s_obj): - return delegate(self.is_true, s_obj) + def ctrl_bool(self, s_obj): + return delegate(self.bool, s_obj) - def rtype_is_true(self, hop): + def rtype_bool(self, hop): from rpython.rtyper.rcontrollerentry import rtypedelegate - return rtypedelegate(self.is_true, hop) + return rtypedelegate(self.bool, hop) def ctrl_call(self, s_obj, *args_s): return delegate(self.call, s_obj, *args_s) @@ -236,7 +236,7 @@ assert s_attr.is_constant() s_cin.controller.ctrl_setattr(s_cin.s_real_obj, s_attr, s_value) - def is_true(s_cin): + def bool(s_cin): return s_cin.controller.ctrl_is_true(s_cin.s_real_obj) def simple_call(s_cin, *args_s): diff --git a/rpython/rtyper/lltypesystem/opimpl.py b/rpython/rtyper/lltypesystem/opimpl.py --- a/rpython/rtyper/lltypesystem/opimpl.py +++ b/rpython/rtyper/lltypesystem/opimpl.py @@ -12,7 +12,7 @@ ops_returning_a_bool = {'gt': True, 'ge': True, 'lt': True, 'le': True, 'eq': True, 'ne': True, - 'is_true': True} + 'bool': True, 'is_true':True} # global synonyms for some types from rpython.rlib.rarithmetic import intmask diff --git a/rpython/rtyper/lltypesystem/rbuilder.py b/rpython/rtyper/lltypesystem/rbuilder.py --- a/rpython/rtyper/lltypesystem/rbuilder.py +++ b/rpython/rtyper/lltypesystem/rbuilder.py @@ -128,7 +128,7 @@ return ll_builder.buf @classmethod - def ll_is_true(cls, ll_builder): + def ll_bool(cls, ll_builder): return ll_builder != nullptr(cls.lowleveltype.TO) class StringBuilderRepr(BaseStringBuilderRepr): diff --git a/rpython/rtyper/lltypesystem/rclass.py b/rpython/rtyper/lltypesystem/rclass.py --- a/rpython/rtyper/lltypesystem/rclass.py +++ b/rpython/rtyper/lltypesystem/rclass.py @@ -574,7 +574,7 @@ self.setfield(vinst, attr, vvalue, hop.llops, flags=hop.args_s[0].flags) - def rtype_is_true(self, hop): + def rtype_bool(self, hop): vinst, = hop.inputargs(self) return hop.genop('ptr_nonzero', [vinst], resulttype=Bool) diff --git a/rpython/rtyper/lltypesystem/rdict.py b/rpython/rtyper/lltypesystem/rdict.py --- a/rpython/rtyper/lltypesystem/rdict.py +++ b/rpython/rtyper/lltypesystem/rdict.py @@ -252,9 +252,9 @@ v_dict, = hop.inputargs(self) return hop.gendirectcall(ll_dict_len, v_dict) - def rtype_is_true(self, hop): + def rtype_bool(self, hop): v_dict, = hop.inputargs(self) - return hop.gendirectcall(ll_dict_is_true, v_dict) + return hop.gendirectcall(ll_dict_bool, v_dict) def make_iterator_repr(self, *variant): return DictIteratorRepr(self, *variant) @@ -440,7 +440,7 @@ def ll_dict_len(d): return d.num_items -def ll_dict_is_true(d): +def ll_dict_bool(d): # check if a dict is True, allowing for None return bool(d) and d.num_items != 0 diff --git a/rpython/rtyper/lltypesystem/rpbc.py b/rpython/rtyper/lltypesystem/rpbc.py --- a/rpython/rtyper/lltypesystem/rpbc.py +++ b/rpython/rtyper/lltypesystem/rpbc.py @@ -241,7 +241,7 @@ resulttype=rresult) return hop.llops.convertvar(v_result, rresult, hop.r_result) - def rtype_is_true(self, hop): + def rtype_bool(self, hop): if not self.s_pbc.can_be_None: return inputconst(Bool, True) else: diff --git a/rpython/rtyper/raddress.py b/rpython/rtyper/raddress.py --- a/rpython/rtyper/raddress.py +++ b/rpython/rtyper/raddress.py @@ -41,7 +41,7 @@ v_access = hop.inputarg(address_repr, 0) return v_access - def rtype_is_true(self, hop): + def rtype_bool(self, hop): v_addr, = hop.inputargs(address_repr) c_null = hop.inputconst(address_repr, NULL) return hop.genop('adr_ne', [v_addr, c_null], diff --git a/rpython/rtyper/rbool.py b/rpython/rtyper/rbool.py --- a/rpython/rtyper/rbool.py +++ b/rpython/rtyper/rbool.py @@ -22,7 +22,7 @@ raise TyperError("not a bool: %r" % (value,)) return value - def rtype_is_true(_, hop): + def rtype_bool(_, hop): vlist = hop.inputargs(Bool) return vlist[0] diff --git a/rpython/rtyper/rbuilder.py b/rpython/rtyper/rbuilder.py --- a/rpython/rtyper/rbuilder.py +++ b/rpython/rtyper/rbuilder.py @@ -49,10 +49,10 @@ hop.exception_cannot_occur() return hop.gendirectcall(self.ll_build, *vlist) - def rtype_is_true(self, hop): + def rtype_bool(self, hop): vlist = hop.inputargs(self) hop.exception_cannot_occur() - return hop.gendirectcall(self.ll_is_true, *vlist) + return hop.gendirectcall(self.ll_bool, *vlist) def convert_const(self, value): if not value is None: diff --git a/rpython/rtyper/rbuiltin.py b/rpython/rtyper/rbuiltin.py --- a/rpython/rtyper/rbuiltin.py +++ b/rpython/rtyper/rbuiltin.py @@ -195,7 +195,7 @@ def rtype_builtin_bool(hop): # not called any more? assert hop.nb_args == 1 - return hop.args_r[0].rtype_is_true(hop) + return hop.args_r[0].rtype_bool(hop) def rtype_builtin_int(hop): if isinstance(hop.args_s[0], annmodel.SomeString): diff --git a/rpython/rtyper/rclass.py b/rpython/rtyper/rclass.py --- a/rpython/rtyper/rclass.py +++ b/rpython/rtyper/rclass.py @@ -385,7 +385,7 @@ def rtype_setattr(self, hop): raise NotImplementedError - def rtype_is_true(self, hop): + def rtype_bool(self, hop): raise NotImplementedError def _emulate_call(self, hop, meth_name): diff --git a/rpython/rtyper/rcontrollerentry.py b/rpython/rtyper/rcontrollerentry.py --- a/rpython/rtyper/rcontrollerentry.py +++ b/rpython/rtyper/rcontrollerentry.py @@ -28,8 +28,8 @@ def rtype_setattr(self, hop): return self.controller.rtype_setattr(hop) - def rtype_is_true(self, hop): - return self.controller.rtype_is_true(hop) + def rtype_bool(self, hop): + return self.controller.rtype_bool(hop) def rtype_simple_call(self, hop): return self.controller.rtype_call(hop) diff --git a/rpython/rtyper/rfloat.py b/rpython/rtyper/rfloat.py --- a/rpython/rtyper/rfloat.py +++ b/rpython/rtyper/rfloat.py @@ -108,7 +108,7 @@ def get_ll_hash_function(self): return _hash_float - def rtype_is_true(_, hop): + def rtype_bool(_, hop): vlist = hop.inputargs(Float) return hop.genop('float_is_true', vlist, resulttype=Bool) diff --git a/rpython/rtyper/rint.py b/rpython/rtyper/rint.py --- a/rpython/rtyper/rint.py +++ b/rpython/rtyper/rint.py @@ -295,7 +295,7 @@ hop.exception_cannot_occur() return hop.genop('cast_int_to_unichar', vlist, resulttype=UniChar) - def rtype_is_true(self, hop): + def rtype_bool(self, hop): assert self is self.as_int # rtype_is_true() is overridden in BoolRepr vlist = hop.inputargs(self) return hop.genop(self.opprefix + 'is_true', vlist, resulttype=Bool) diff --git a/rpython/rtyper/rlist.py b/rpython/rtyper/rlist.py --- a/rpython/rtyper/rlist.py +++ b/rpython/rtyper/rlist.py @@ -129,7 +129,7 @@ ll_func = ll_len_foldable return hop.gendirectcall(ll_func, v_lst) - def rtype_is_true(self, hop): + def rtype_bool(self, hop): v_lst, = hop.inputargs(self) if hop.args_s[0].listdef.listitem.resized: ll_func = ll_list_is_true diff --git a/rpython/rtyper/rmodel.py b/rpython/rtyper/rmodel.py --- a/rpython/rtyper/rmodel.py +++ b/rpython/rtyper/rmodel.py @@ -199,15 +199,12 @@ [v_self] = hop.inputargs(self) return hop.gendirectcall(self.ll_str, v_self) - def rtype_nonzero(self, hop): - return self.rtype_is_true(hop) # can call a subclass' rtype_is_true() - - def rtype_is_true(self, hop): + def rtype_bool(self, hop): try: vlen = self.rtype_len(hop) except MissingRTypeOperation: if not hop.s_result.is_constant(): - raise TyperError("rtype_is_true(%r) not implemented" % (self,)) + raise TyperError("rtype_bool(%r) not implemented" % (self,)) return hop.inputconst(Bool, hop.s_result.const) else: return hop.genop('int_is_true', [vlen], resulttype=Bool) @@ -243,7 +240,7 @@ """A mix-in base class for subclasses of Repr that represent None as 'null' and true values as non-'null'. """ - def rtype_is_true(self, hop): + def rtype_bool(self, hop): if hop.s_result.is_constant(): return hop.inputconst(Bool, hop.s_result.const) else: diff --git a/rpython/rtyper/rpbc.py b/rpython/rtyper/rpbc.py --- a/rpython/rtyper/rpbc.py +++ b/rpython/rtyper/rpbc.py @@ -587,7 +587,7 @@ class NoneFrozenPBCRepr(Repr): lowleveltype = Void - def rtype_is_true(self, hop): + def rtype_bool(self, hop): return Constant(False, Bool) def none_call(self, hop): diff --git a/rpython/rtyper/rptr.py b/rpython/rtyper/rptr.py --- a/rpython/rtyper/rptr.py +++ b/rpython/rtyper/rptr.py @@ -79,7 +79,7 @@ return hop.genop('getarraysize', vlist, resulttype = hop.r_result.lowleveltype) - def rtype_is_true(self, hop): + def rtype_bool(self, hop): vlist = hop.inputargs(self) return hop.genop('ptr_nonzero', vlist, resulttype=lltype.Bool) diff --git a/rpython/rtyper/rstr.py b/rpython/rtyper/rstr.py --- a/rpython/rtyper/rstr.py +++ b/rpython/rtyper/rstr.py @@ -137,7 +137,7 @@ v_str, = hop.inputargs(string_repr) return hop.gendirectcall(self.ll.ll_strlen, v_str) - def rtype_is_true(self, hop): + def rtype_bool(self, hop): s_str = hop.args_s[0] if s_str.can_be_None: string_repr = hop.args_r[0].repr @@ -145,7 +145,7 @@ return hop.gendirectcall(self.ll.ll_str_is_true, v_str) else: # defaults to checking the length - return super(AbstractStringRepr, self).rtype_is_true(hop) + return super(AbstractStringRepr, self).rtype_bool(hop) def rtype_method_startswith(self, hop): str1_repr = hop.args_r[0].repr @@ -595,7 +595,7 @@ def rtype_len(_, hop): return hop.inputconst(Signed, 1) - def rtype_is_true(_, hop): + def rtype_bool(_, hop): assert not hop.args_s[0].can_be_None return hop.inputconst(Bool, True) diff --git a/rpython/rtyper/test/test_rdict.py b/rpython/rtyper/test/test_rdict.py --- a/rpython/rtyper/test/test_rdict.py +++ b/rpython/rtyper/test/test_rdict.py @@ -91,7 +91,7 @@ res = self.interpret(func, [6]) assert res == 1 - def test_dict_is_true(self): + def test_dict_bool(self): def func(i): if i: d = {} diff --git a/rpython/translator/simplify.py b/rpython/translator/simplify.py --- a/rpython/translator/simplify.py +++ b/rpython/translator/simplify.py @@ -389,9 +389,9 @@ # (they have no side effects, at least in R-Python) CanRemove = {} for _op in ''' - newtuple newlist newdict is_true + newtuple newlist newdict bool is_ id type issubtype repr str len hash getattr getitem - pos neg nonzero abs hex oct ord invert add sub mul + pos neg abs hex oct ord invert add sub mul truediv floordiv div mod divmod pow lshift rshift and_ or_ xor int float long lt le eq ne gt ge cmp coerce contains iter get'''.split(): @@ -580,14 +580,14 @@ del link.args[i] -def coalesce_is_true(graph): - """coalesce paths that go through an is_true and a directly successive - is_true both on the same value, transforming the link into the - second is_true from the first to directly jump to the correct +def coalesce_bool(graph): + """coalesce paths that go through an bool and a directly successive + bool both on the same value, transforming the link into the + second bool from the first to directly jump to the correct target out of the second.""" candidates = [] - def has_is_true_exitpath(block): + def has_bool_exitpath(block): tgts = [] start_op = block.operations[-1] cond_v = start_op.args[0] @@ -597,15 +597,15 @@ if tgt == block: continue rrenaming = dict(zip(tgt.inputargs,exit.args)) - if len(tgt.operations) == 1 and tgt.operations[0].opname == 'is_true': + if len(tgt.operations) == 1 and tgt.operations[0].opname == 'bool': tgt_op = tgt.operations[0] if tgt.exitswitch == tgt_op.result and rrenaming.get(tgt_op.args[0]) == cond_v: tgts.append((exit.exitcase, tgt)) return tgts for block in graph.iterblocks(): - if block.operations and block.operations[-1].opname == 'is_true': - tgts = has_is_true_exitpath(block) + if block.operations and block.operations[-1].opname == 'bool': + tgts = has_bool_exitpath(block) if tgts: candidates.append((block, tgts)) @@ -621,7 +621,7 @@ newlink = tgt.exits[case].copy(rename) newexits[case] = newlink cand.recloseblock(*newexits) - newtgts = has_is_true_exitpath(cand) + newtgts = has_bool_exitpath(cand) if newtgts: candidates.append((cand, newtgts)) @@ -979,7 +979,7 @@ eliminate_empty_blocks, remove_assertion_errors, join_blocks, - coalesce_is_true, + coalesce_bool, transform_dead_op_vars, remove_identical_vars, transform_ovfcheck, From noreply at buildbot.pypy.org Mon Aug 19 23:15:18 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Mon, 19 Aug 2013 23:15:18 +0200 (CEST) Subject: [pypy-commit] pypy less-stringly-ops: Move guessbool() call out of space.is_true() Message-ID: <20130819211518.0A97B1C0149@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: less-stringly-ops Changeset: r66241:ce7a9656676d Date: 2013-05-05 04:15 +0100 http://bitbucket.org/pypy/pypy/changeset/ce7a9656676d/ Log: Move guessbool() call out of space.is_true() diff --git a/rpython/flowspace/flowcontext.py b/rpython/flowspace/flowcontext.py --- a/rpython/flowspace/flowcontext.py +++ b/rpython/flowspace/flowcontext.py @@ -129,7 +129,7 @@ def append(self, operation): raise NotImplementedError - def guessbool(self, frame, w_condition, **kwds): + def guessbool(self, frame, w_condition): raise AssertionError("cannot guessbool(%s)" % (w_condition,)) @@ -211,7 +211,7 @@ [str(s) for s in self.listtoreplay[self.index:]])) self.index += 1 - def guessbool(self, frame, w_condition, **kwds): + def guessbool(self, frame, w_condition): assert self.index == len(self.listtoreplay) frame.recorder = self.nextreplayer return self.booloutcome @@ -432,8 +432,10 @@ self.last_exception = FSException(data[-2], data[-1]) self.blockstack = state.blocklist[:] - def guessbool(self, w_condition, **kwds): - return self.recorder.guessbool(self, w_condition, **kwds) + def guessbool(self, w_condition): + if isinstance(w_condition, Constant): + return w_condition.value + return self.recorder.guessbool(self, w_condition) def do_operation(self, name, *args_w): spaceop = SpaceOperation(name, args_w, Variable()) @@ -738,34 +740,34 @@ def JUMP_IF_FALSE(self, target): # Python <= 2.6 only w_cond = self.peekvalue() - if not self.space.is_true(w_cond): + if not self.guessbool(self.space.is_true(w_cond)): return target def JUMP_IF_TRUE(self, target): # Python <= 2.6 only w_cond = self.peekvalue() - if self.space.is_true(w_cond): + if self.guessbool(self.space.is_true(w_cond)): return target def POP_JUMP_IF_FALSE(self, target): w_value = self.popvalue() - if not self.space.is_true(w_value): + if not self.guessbool(self.space.is_true(w_value)): return target def POP_JUMP_IF_TRUE(self, target): w_value = self.popvalue() - if self.space.is_true(w_value): + if self.guessbool(self.space.is_true(w_value)): return target def JUMP_IF_FALSE_OR_POP(self, target): w_value = self.peekvalue() - if not self.space.is_true(w_value): + if not self.guessbool(self.space.is_true(w_value)): return target self.popvalue() def JUMP_IF_TRUE_OR_POP(self, target): w_value = self.peekvalue() - if self.space.is_true(w_value): + if self.guessbool(self.space.is_true(w_value)): return target self.popvalue() diff --git a/rpython/flowspace/objspace.py b/rpython/flowspace/objspace.py --- a/rpython/flowspace/objspace.py +++ b/rpython/flowspace/objspace.py @@ -90,7 +90,7 @@ return build_flow(func, self) def is_w(self, w_one, w_two): - return self.is_true(self.is_(w_one, w_two)) + return self.frame.guessbool(self.is_true(self.is_(w_one, w_two))) is_ = None # real version added by add_operations() id = None # real version added by add_operations() @@ -133,7 +133,7 @@ return FSException(w_type, w_value) def exception_issubclass_w(self, w_cls1, w_cls2): - return self.is_true(self.issubtype(w_cls1, w_cls2)) + return self.frame.guessbool(self.is_true(self.issubtype(w_cls1, w_cls2))) def exception_match(self, w_exc_type, w_check_class): """Checks if the given exception type matches 'w_check_class'.""" @@ -200,7 +200,7 @@ else: w_len = self.len(w_iterable) w_correct = self.eq(w_len, const(expected_length)) - if not self.is_true(w_correct): + if not self.frame.guessbool(self.is_true(w_correct)): e = self.exc_from_raise(self.w_ValueError, self.w_None) raise e return [self.frame.do_operation('getitem', w_iterable, const(i)) @@ -208,13 +208,13 @@ # ____________________________________________________________ def not_(self, w_obj): - return const(not self.is_true(w_obj)) + return const(not self.frame.guessbool(self.is_true(w_obj))) def is_true(self, w_obj): if w_obj.foldable(): - return bool(w_obj.value) + return const(bool(w_obj.value)) w_truthvalue = self.frame.do_operation('bool', w_obj) - return self.frame.guessbool(w_truthvalue) + return w_truthvalue def iter(self, w_iterable): if isinstance(w_iterable, Constant): @@ -264,7 +264,7 @@ return self.frame.do_op(op.getattr(w_obj, w_name)) def isinstance_w(self, w_obj, w_type): - return self.is_true(self.isinstance(w_obj, w_type)) + return self.frame.guessbool(self.is_true(self.isinstance(w_obj, w_type))) def import_name(self, name, glob=None, loc=None, frm=None, level=-1): try: From noreply at buildbot.pypy.org Mon Aug 19 23:15:19 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Mon, 19 Aug 2013 23:15:19 +0200 (CEST) Subject: [pypy-commit] pypy less-stringly-ops: Remove redundant space.is_true() around functions that always return a bool Message-ID: <20130819211519.446DC1C0149@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: less-stringly-ops Changeset: r66242:df03b38e6a11 Date: 2013-05-05 17:28 +0100 http://bitbucket.org/pypy/pypy/changeset/df03b38e6a11/ Log: Remove redundant space.is_true() around functions that always return a bool diff --git a/rpython/flowspace/objspace.py b/rpython/flowspace/objspace.py --- a/rpython/flowspace/objspace.py +++ b/rpython/flowspace/objspace.py @@ -90,7 +90,7 @@ return build_flow(func, self) def is_w(self, w_one, w_two): - return self.frame.guessbool(self.is_true(self.is_(w_one, w_two))) + return self.frame.guessbool(self.is_(w_one, w_two)) is_ = None # real version added by add_operations() id = None # real version added by add_operations() @@ -133,7 +133,7 @@ return FSException(w_type, w_value) def exception_issubclass_w(self, w_cls1, w_cls2): - return self.frame.guessbool(self.is_true(self.issubtype(w_cls1, w_cls2))) + return self.frame.guessbool(self.issubtype(w_cls1, w_cls2)) def exception_match(self, w_exc_type, w_check_class): """Checks if the given exception type matches 'w_check_class'.""" @@ -264,7 +264,7 @@ return self.frame.do_op(op.getattr(w_obj, w_name)) def isinstance_w(self, w_obj, w_type): - return self.frame.guessbool(self.is_true(self.isinstance(w_obj, w_type))) + return self.frame.guessbool(self.isinstance(w_obj, w_type)) def import_name(self, name, glob=None, loc=None, frm=None, level=-1): try: From noreply at buildbot.pypy.org Mon Aug 19 23:15:20 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Mon, 19 Aug 2013 23:15:20 +0200 (CEST) Subject: [pypy-commit] pypy less-stringly-ops: pull guessbool out of unnecessary utility methods Message-ID: <20130819211520.798591C0149@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: less-stringly-ops Changeset: r66243:5083ac174a41 Date: 2013-05-05 19:02 +0100 http://bitbucket.org/pypy/pypy/changeset/5083ac174a41/ Log: pull guessbool out of unnecessary utility methods diff --git a/rpython/flowspace/objspace.py b/rpython/flowspace/objspace.py --- a/rpython/flowspace/objspace.py +++ b/rpython/flowspace/objspace.py @@ -89,9 +89,6 @@ def build_flow(self, func): return build_flow(func, self) - def is_w(self, w_one, w_two): - return self.frame.guessbool(self.is_(w_one, w_two)) - is_ = None # real version added by add_operations() id = None # real version added by add_operations() @@ -132,11 +129,9 @@ w_type = const(type(exc)) return FSException(w_type, w_value) - def exception_issubclass_w(self, w_cls1, w_cls2): - return self.frame.guessbool(self.issubtype(w_cls1, w_cls2)) - def exception_match(self, w_exc_type, w_check_class): """Checks if the given exception type matches 'w_check_class'.""" + frame = self.frame if not isinstance(w_check_class, Constant): raise FlowingError("Non-constant except guard.") check_class = w_check_class.value @@ -145,11 +140,11 @@ "Catching %s is not valid in RPython" % check_class.__name__) if not isinstance(check_class, tuple): # the simple case - return self.exception_issubclass_w(w_exc_type, w_check_class) + return frame.guessbool(self.issubtype(w_exc_type, w_check_class)) # special case for StackOverflow (see rlib/rstackovf.py) if check_class == rstackovf.StackOverflow: w_real_class = const(rstackovf._StackOverflow) - return self.exception_issubclass_w(w_exc_type, w_real_class) + return frame.guessbool(self.issubtype(w_exc_type, w_real_class)) # checking a tuple of classes for w_klass in self.unpackiterable(w_check_class): if self.exception_match(w_exc_type, w_klass): @@ -162,14 +157,15 @@ Returns an FSException object whose w_value is an instance of w_type. """ - if self.isinstance_w(w_arg1, self.w_type): + frame = self.frame + if frame.guessbool(self.isinstance(w_arg1, self.w_type)): # this is for all cases of the form (Class, something) - if self.is_w(w_arg2, self.w_None): + if frame.guessbool(self.is_(w_arg2, self.w_None)): # raise Type: we assume we have to instantiate Type w_value = self.call_function(w_arg1) else: w_valuetype = self.type(w_arg2) - if self.exception_issubclass_w(w_valuetype, w_arg1): + if frame.guessbool(self.issubtype(w_valuetype, w_arg1)): # raise Type, Instance: let etype be the exact type of value w_value = w_arg2 else: @@ -177,7 +173,7 @@ w_value = self.call_function(w_arg1, w_arg2) else: # the only case left here is (inst, None), from a 'raise inst'. - if not self.is_w(w_arg2, self.w_None): + if not frame.guessbool(self.is_(w_arg2, self.w_None)): raise self.exc_wrap(TypeError( "instance exception may not have a separate value")) w_value = w_arg1 @@ -263,9 +259,6 @@ pass return self.frame.do_op(op.getattr(w_obj, w_name)) - def isinstance_w(self, w_obj, w_type): - return self.frame.guessbool(self.isinstance(w_obj, w_type)) - def import_name(self, name, glob=None, loc=None, frm=None, level=-1): try: mod = __import__(name, glob, loc, frm, level) From noreply at buildbot.pypy.org Mon Aug 19 23:15:21 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Mon, 19 Aug 2013 23:15:21 +0200 (CEST) Subject: [pypy-commit] pypy less-stringly-ops: kill FlowObjSpace.is_true() (it's now identical to FlowObjSpace.bool()) Message-ID: <20130819211521.E0D1F1C0149@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: less-stringly-ops Changeset: r66244:885db39402bc Date: 2013-08-10 16:50 +0100 http://bitbucket.org/pypy/pypy/changeset/885db39402bc/ Log: kill FlowObjSpace.is_true() (it's now identical to FlowObjSpace.bool()) diff --git a/rpython/flowspace/flowcontext.py b/rpython/flowspace/flowcontext.py --- a/rpython/flowspace/flowcontext.py +++ b/rpython/flowspace/flowcontext.py @@ -740,34 +740,34 @@ def JUMP_IF_FALSE(self, target): # Python <= 2.6 only w_cond = self.peekvalue() - if not self.guessbool(self.space.is_true(w_cond)): + if not self.guessbool(self.space.bool(w_cond)): return target def JUMP_IF_TRUE(self, target): # Python <= 2.6 only w_cond = self.peekvalue() - if self.guessbool(self.space.is_true(w_cond)): + if self.guessbool(self.space.bool(w_cond)): return target def POP_JUMP_IF_FALSE(self, target): w_value = self.popvalue() - if not self.guessbool(self.space.is_true(w_value)): + if not self.guessbool(self.space.bool(w_value)): return target def POP_JUMP_IF_TRUE(self, target): w_value = self.popvalue() - if self.guessbool(self.space.is_true(w_value)): + if self.guessbool(self.space.bool(w_value)): return target def JUMP_IF_FALSE_OR_POP(self, target): w_value = self.peekvalue() - if not self.guessbool(self.space.is_true(w_value)): + if not self.guessbool(self.space.bool(w_value)): return target self.popvalue() def JUMP_IF_TRUE_OR_POP(self, target): w_value = self.peekvalue() - if self.guessbool(self.space.is_true(w_value)): + if self.guessbool(self.space.bool(w_value)): return target self.popvalue() diff --git a/rpython/flowspace/objspace.py b/rpython/flowspace/objspace.py --- a/rpython/flowspace/objspace.py +++ b/rpython/flowspace/objspace.py @@ -196,7 +196,7 @@ else: w_len = self.len(w_iterable) w_correct = self.eq(w_len, const(expected_length)) - if not self.frame.guessbool(self.is_true(w_correct)): + if not self.frame.guessbool(self.bool(w_correct)): e = self.exc_from_raise(self.w_ValueError, self.w_None) raise e return [self.frame.do_operation('getitem', w_iterable, const(i)) @@ -204,13 +204,7 @@ # ____________________________________________________________ def not_(self, w_obj): - return const(not self.frame.guessbool(self.is_true(w_obj))) - - def is_true(self, w_obj): - if w_obj.foldable(): - return const(bool(w_obj.value)) - w_truthvalue = self.frame.do_operation('bool', w_obj) - return w_truthvalue + return const(not self.frame.guessbool(self.bool(w_obj))) def iter(self, w_iterable): if isinstance(w_iterable, Constant): From noreply at buildbot.pypy.org Mon Aug 19 23:15:23 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Mon, 19 Aug 2013 23:15:23 +0200 (CEST) Subject: [pypy-commit] pypy less-stringly-ops: Don't sugar isinstance in the first place Message-ID: <20130819211523.20E921C0149@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: less-stringly-ops Changeset: r66245:a8a1f2f496f1 Date: 2013-08-12 20:18 +0100 http://bitbucket.org/pypy/pypy/changeset/a8a1f2f496f1/ Log: Don't sugar isinstance in the first place diff --git a/rpython/flowspace/objspace.py b/rpython/flowspace/objspace.py --- a/rpython/flowspace/objspace.py +++ b/rpython/flowspace/objspace.py @@ -158,7 +158,8 @@ Returns an FSException object whose w_value is an instance of w_type. """ frame = self.frame - if frame.guessbool(self.isinstance(w_arg1, self.w_type)): + if frame.guessbool(self.call_function(const(isinstance), w_arg1, + self.w_type)): # this is for all cases of the form (Class, something) if frame.guessbool(self.is_(w_arg2, self.w_None)): # raise Type: we assume we have to instantiate Type diff --git a/rpython/flowspace/operation.py b/rpython/flowspace/operation.py --- a/rpython/flowspace/operation.py +++ b/rpython/flowspace/operation.py @@ -211,7 +211,6 @@ add_operator('is_', 2, 'is', pure=True) add_operator('id', 1, 'id', pyfunc=id) add_operator('type', 1, 'type', pyfunc=new_style_type, pure=True) -add_operator('isinstance', 2, 'isinstance', pyfunc=isinstance, pure=True) add_operator('issubtype', 2, 'issubtype', pyfunc=issubclass, pure=True) # not for old-style classes add_operator('repr', 1, 'repr', pyfunc=repr, pure=True) add_operator('str', 1, 'str', pyfunc=str, pure=True) @@ -232,7 +231,7 @@ add_operator('neg', 1, 'neg', pure=True, ovf=True) add_operator('bool', 1, 'truth', pyfunc=bool, pure=True) op.is_true = op.nonzero = op.bool # for llinterp -add_operator('abs' , 1, 'abs', pyfunc=abs, pure=True, ovf=True) +add_operator('abs', 1, 'abs', pyfunc=abs, pure=True, ovf=True) add_operator('hex', 1, 'hex', pyfunc=hex, pure=True) add_operator('oct', 1, 'oct', pyfunc=oct, pure=True) add_operator('ord', 1, 'ord', pyfunc=ord, pure=True) diff --git a/rpython/flowspace/specialcase.py b/rpython/flowspace/specialcase.py --- a/rpython/flowspace/specialcase.py +++ b/rpython/flowspace/specialcase.py @@ -1,4 +1,4 @@ -from rpython.flowspace.model import Constant +from rpython.flowspace.model import Constant, const SPECIAL_CASES = {} @@ -30,6 +30,14 @@ "pytest.ini from the root of the PyPy repository into your " "own project.") + at register_flow_sc(isinstance) +def sc_isinstance(space, args): + w_instance, w_type = args + if w_instance.foldable() and w_type.foldable(): + return const(isinstance(w_instance.value, w_type.value)) + return space.frame.do_operation('simple_call', const(isinstance), + w_instance, w_type) + # _________________________________________________________________________ # a simplified version of the basic printing routines, for RPython programs class StdOutBuffer: diff --git a/rpython/translator/simplify.py b/rpython/translator/simplify.py --- a/rpython/translator/simplify.py +++ b/rpython/translator/simplify.py @@ -57,17 +57,6 @@ # ____________________________________________________________ -def desugar_isinstance(graph): - """Replace isinstance operation with a call to isinstance.""" - constant_isinstance = Constant(isinstance) - for block in graph.iterblocks(): - for i in range(len(block.operations) - 1, -1, -1): - op = block.operations[i] - if op.opname == "isinstance": - args = [constant_isinstance, op.args[0], op.args[1]] - new_op = SpaceOperation("simple_call", args, op.result) - block.operations[i] = new_op - def eliminate_empty_blocks(graph): """Eliminate basic blocks that do not contain any operations. When this happens, we need to replace the preceeding link with the @@ -975,7 +964,6 @@ # ____ all passes & simplify_graph all_passes = [ - desugar_isinstance, eliminate_empty_blocks, remove_assertion_errors, join_blocks, From noreply at buildbot.pypy.org Mon Aug 19 23:15:24 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Mon, 19 Aug 2013 23:15:24 +0200 (CEST) Subject: [pypy-commit] pypy less-stringly-ops: Do not create so many useless implicit exception blocks Message-ID: <20130819211524.52D2F1C0149@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: less-stringly-ops Changeset: r66246:67521a7fb22d Date: 2013-08-12 16:23 +0100 http://bitbucket.org/pypy/pypy/changeset/67521a7fb22d/ Log: Do not create so many useless implicit exception blocks diff --git a/rpython/flowspace/flowcontext.py b/rpython/flowspace/flowcontext.py --- a/rpython/flowspace/flowcontext.py +++ b/rpython/flowspace/flowcontext.py @@ -456,7 +456,7 @@ self.guessexception(op.canraise) return op.result - def guessexception(self, exceptions): + def guessexception(self, exceptions, force=False): """ Catch possible exceptions implicitly. @@ -465,6 +465,11 @@ even if the interpreter re-raises the exception, it will not be the same ImplicitOperationError instance internally. """ + if not force and not any(isinstance(block, (ExceptBlock, FinallyBlock)) + for block in self.blockstack): + # The implicit exception wouldn't be caught and would later get + # removed, so don't bother creating it. + return self.recorder.guessexception(self, *exceptions) def build_flow(self): diff --git a/rpython/flowspace/objspace.py b/rpython/flowspace/objspace.py --- a/rpython/flowspace/objspace.py +++ b/rpython/flowspace/objspace.py @@ -228,7 +228,7 @@ frame.replace_in_stack(it, next_unroller) return const(v) w_item = frame.do_operation("next", w_iter) - frame.guessexception([StopIteration, RuntimeError]) + frame.guessexception([StopIteration, RuntimeError], force=True) return w_item From noreply at buildbot.pypy.org Tue Aug 20 01:43:50 2013 From: noreply at buildbot.pypy.org (rguillebert) Date: Tue, 20 Aug 2013 01:43:50 +0200 (CEST) Subject: [pypy-commit] pypy default: Add a test for the character dtype (PyPy issue 1546) Message-ID: <20130819234350.D142F1C36E6@cobra.cs.uni-duesseldorf.de> Author: Romain Guillebert Branch: Changeset: r66247:9a5d859c4562 Date: 2013-08-20 01:16 +0200 http://bitbucket.org/pypy/pypy/changeset/9a5d859c4562/ Log: Add a test for the character dtype (PyPy issue 1546) diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -778,6 +778,11 @@ from numpypy import unicode_ assert isinstance(unicode_(3), unicode) + def test_character_dtype(self): + from numpypy import array, character + x = array([["A", "B"], ["C", "D"]], character) + assert x == [["A", "B"], ["C", "D"]] + class AppTestRecordDtypes(BaseNumpyAppTest): spaceconfig = dict(usemodules=["micronumpy", "struct", "binascii"]) def test_create(self): From noreply at buildbot.pypy.org Tue Aug 20 01:43:54 2013 From: noreply at buildbot.pypy.org (rguillebert) Date: Tue, 20 Aug 2013 01:43:54 +0200 (CEST) Subject: [pypy-commit] pypy default: Merge heads Message-ID: <20130819234354.B28D31C36E6@cobra.cs.uni-duesseldorf.de> Author: Romain Guillebert Branch: Changeset: r66248:14996cd68e47 Date: 2013-08-20 01:22 +0200 http://bitbucket.org/pypy/pypy/changeset/14996cd68e47/ Log: Merge heads diff too long, truncating to 2000 out of 2931 lines diff --git a/pypy/module/__pypy__/test/test_signal.py b/pypy/module/__pypy__/test/test_signal.py --- a/pypy/module/__pypy__/test/test_signal.py +++ b/pypy/module/__pypy__/test/test_signal.py @@ -34,7 +34,7 @@ thread.interrupt_main() for i in range(10): print('x') - time.sleep(0.1) + time.sleep(0.25) except BaseException, e: interrupted.append(e) finally: @@ -59,7 +59,7 @@ for j in range(10): if len(done): break print('.') - time.sleep(0.1) + time.sleep(0.25) print('main thread loop done') assert len(done) == 1 assert len(interrupted) == 1 @@ -117,7 +117,7 @@ def subthread(): try: - time.sleep(0.25) + time.sleep(0.5) with __pypy__.thread.signals_enabled: thread.interrupt_main() except BaseException, e: diff --git a/pypy/module/_file/interp_file.py b/pypy/module/_file/interp_file.py --- a/pypy/module/_file/interp_file.py +++ b/pypy/module/_file/interp_file.py @@ -437,14 +437,14 @@ return self.getrepr(self.space, info) def getdisplayname(self): + space = self.space w_name = self.w_name if w_name is None: return '?' - elif self.space.is_true(self.space.isinstance(w_name, - self.space.w_str)): - return "'%s'" % self.space.str_w(w_name) + elif space.isinstance_w(w_name, space.w_str): + return "'%s'" % space.str_w(w_name) else: - return self.space.str_w(self.space.repr(w_name)) + return space.str_w(space.repr(w_name)) def file_writelines(self, w_lines): """writelines(sequence_of_strings) -> None. Write the strings to the file. diff --git a/pypy/module/array/interp_array.py b/pypy/module/array/interp_array.py --- a/pypy/module/array/interp_array.py +++ b/pypy/module/array/interp_array.py @@ -11,7 +11,7 @@ from rpython.rlib.unroll import unrolling_iterable from rpython.rlib.objectmodel import keepalive_until_here from rpython.rtyper.lltypesystem import lltype, rffi - +from pypy.objspace.std.floatobject import W_FloatObject @unwrap_spec(typecode=str) def w_array(space, w_cls, typecode, __args__): @@ -532,7 +532,7 @@ class TypeCode(object): - def __init__(self, itemtype, unwrap, canoverflow=False, signed=False): + def __init__(self, itemtype, unwrap, canoverflow=False, signed=False, method='__int__'): self.itemtype = itemtype self.bytes = rffi.sizeof(itemtype) self.arraytype = lltype.Array(itemtype, hints={'nolength': True}) @@ -540,6 +540,7 @@ self.signed = signed self.canoverflow = canoverflow self.w_class = None + self.method = method if self.canoverflow: assert self.bytes <= rffi.sizeof(rffi.ULONG) @@ -554,8 +555,8 @@ return True types = { - 'c': TypeCode(lltype.Char, 'str_w'), - 'u': TypeCode(lltype.UniChar, 'unicode_w'), + 'c': TypeCode(lltype.Char, 'str_w', method=''), + 'u': TypeCode(lltype.UniChar, 'unicode_w', method=''), 'b': TypeCode(rffi.SIGNEDCHAR, 'int_w', True, True), 'B': TypeCode(rffi.UCHAR, 'int_w', True), 'h': TypeCode(rffi.SHORT, 'int_w', True, True), @@ -567,8 +568,8 @@ # rbigint.touint() which # corresponds to the # C-type unsigned long - 'f': TypeCode(lltype.SingleFloat, 'float_w'), - 'd': TypeCode(lltype.Float, 'float_w'), + 'f': TypeCode(lltype.SingleFloat, 'float_w', method='__float__'), + 'd': TypeCode(lltype.Float, 'float_w', method='__float__'), } for k, v in types.items(): v.typecode = k @@ -613,7 +614,19 @@ def item_w(self, w_item): space = self.space unwrap = getattr(space, mytype.unwrap) - item = unwrap(w_item) + try: + item = unwrap(w_item) + except OperationError, e: + if isinstance(w_item, W_FloatObject): # Odd special case from cpython + raise + if mytype.method != '' and e.match(space, space.w_TypeError): + try: + item = unwrap(space.call_method(w_item, mytype.method)) + except OperationError: + msg = 'array item must be ' + mytype.unwrap[:-2] + raise OperationError(space.w_TypeError, space.wrap(msg)) + else: + raise if mytype.unwrap == 'bigint_w': try: item = item.touint() diff --git a/pypy/module/array/test/test_array.py b/pypy/module/array/test/test_array.py --- a/pypy/module/array/test/test_array.py +++ b/pypy/module/array/test/test_array.py @@ -937,6 +937,13 @@ raises(TypeError, a.__setitem__, Silly()) raises(TypeError, a.__setitem__, OldSilly()) + a = array('c', 'hi') + a[0] = 'b' + assert a[0] == 'b' + + a = array('u', u'hi') + a[0] = u'b' + assert a[0] == u'b' class TestCPythonsOwnArray(BaseArrayTests): diff --git a/pypy/objspace/std/bytearrayobject.py b/pypy/objspace/std/bytearrayobject.py --- a/pypy/objspace/std/bytearrayobject.py +++ b/pypy/objspace/std/bytearrayobject.py @@ -7,7 +7,6 @@ from pypy.objspace.std.bytearraytype import ( getbytevalue, makebytearraydata_w, new_bytearray) from pypy.objspace.std.intobject import W_IntObject -from pypy.objspace.std.inttype import wrapint from pypy.objspace.std.model import W_Object, registerimplementation from pypy.objspace.std.multimethod import FailedToImplement from pypy.objspace.std.noneobject import W_NoneObject @@ -76,7 +75,7 @@ def len__Bytearray(space, w_bytearray): result = len(w_bytearray.data) - return wrapint(space, result) + return space.newint(result) def ord__Bytearray(space, w_bytearray): if len(w_bytearray.data) != 1: diff --git a/pypy/objspace/std/complextype.py b/pypy/objspace/std/complextype.py --- a/pypy/objspace/std/complextype.py +++ b/pypy/objspace/std/complextype.py @@ -214,7 +214,7 @@ # # no '__complex__' method, so we assume it is a float, # unless it is an instance of some subclass of complex. - if space.is_true(space.isinstance(w_complex, space.gettypefor(W_ComplexObject))): + if space.isinstance_w(w_complex, space.gettypefor(W_ComplexObject)): real = space.float(space.getattr(w_complex, space.wrap("real"))) imag = space.float(space.getattr(w_complex, space.wrap("imag"))) return (space.float_w(real), space.float_w(imag)) diff --git a/pypy/objspace/std/intobject.py b/pypy/objspace/std/intobject.py --- a/pypy/objspace/std/intobject.py +++ b/pypy/objspace/std/intobject.py @@ -1,6 +1,6 @@ from pypy.interpreter.error import OperationError from pypy.objspace.std import newformat -from pypy.objspace.std.inttype import wrapint, W_AbstractIntObject +from pypy.objspace.std.inttype import W_AbstractIntObject from pypy.objspace.std.model import registerimplementation, W_Object from pypy.objspace.std.multimethod import FailedToImplementArgs from pypy.objspace.std.noneobject import W_NoneObject @@ -55,7 +55,7 @@ if space.is_w(space.type(self), space.w_int): return self a = self.intval - return wrapint(space, a) + return space.newint(a) registerimplementation(W_IntObject) @@ -104,7 +104,7 @@ except OverflowError: raise FailedToImplementArgs(space.w_OverflowError, space.wrap("integer addition")) - return wrapint(space, z) + return space.newint(z) def sub__Int_Int(space, w_int1, w_int2): x = w_int1.intval @@ -114,7 +114,7 @@ except OverflowError: raise FailedToImplementArgs(space.w_OverflowError, space.wrap("integer substraction")) - return wrapint(space, z) + return space.newint(z) def mul__Int_Int(space, w_int1, w_int2): x = w_int1.intval @@ -124,7 +124,7 @@ except OverflowError: raise FailedToImplementArgs(space.w_OverflowError, space.wrap("integer multiplication")) - return wrapint(space, z) + return space.newint(z) def floordiv__Int_Int(space, w_int1, w_int2): x = w_int1.intval @@ -137,7 +137,7 @@ except OverflowError: raise FailedToImplementArgs(space.w_OverflowError, space.wrap("integer division")) - return wrapint(space, z) + return space.newint(z) div__Int_Int = floordiv__Int_Int def truediv__Int_Int(space, w_int1, w_int2): @@ -158,7 +158,7 @@ except OverflowError: raise FailedToImplementArgs(space.w_OverflowError, space.wrap("integer modulo")) - return wrapint(space, z) + return space.newint(z) def divmod__Int_Int(space, w_int1, w_int2): x = w_int1.intval @@ -231,7 +231,7 @@ except OverflowError: raise FailedToImplementArgs(space.w_OverflowError, space.wrap("integer negation")) - return wrapint(space, x) + return space.newint(x) get_negint = neg__Int @@ -247,7 +247,7 @@ def invert__Int(space, w_int1): x = w_int1.intval a = ~x - return wrapint(space, a) + return space.newint(a) def lshift__Int_Int(space, w_int1, w_int2): a = w_int1.intval @@ -258,7 +258,7 @@ except OverflowError: raise FailedToImplementArgs(space.w_OverflowError, space.wrap("integer left shift")) - return wrapint(space, c) + return space.newint(c) if b < 0: raise OperationError(space.w_ValueError, space.wrap("negative shift count")) @@ -284,25 +284,25 @@ a = 0 else: a = a >> b - return wrapint(space, a) + return space.newint(a) def and__Int_Int(space, w_int1, w_int2): a = w_int1.intval b = w_int2.intval res = a & b - return wrapint(space, res) + return space.newint(res) def xor__Int_Int(space, w_int1, w_int2): a = w_int1.intval b = w_int2.intval res = a ^ b - return wrapint(space, res) + return space.newint(res) def or__Int_Int(space, w_int1, w_int2): a = w_int1.intval b = w_int2.intval res = a | b - return wrapint(space, res) + return space.newint(res) def pos__Int(self, space): return self.int(space) @@ -323,7 +323,7 @@ return space.wrap(hex(w_int1.intval)) def getnewargs__Int(space, w_int1): - return space.newtuple([wrapint(space, w_int1.intval)]) + return space.newtuple([space.newint(w_int1.intval)]) register_all(vars()) diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -19,7 +19,6 @@ from pypy.objspace.std import slicetype from pypy.objspace.std.floatobject import W_FloatObject from pypy.objspace.std.intobject import W_IntObject -from pypy.objspace.std.inttype import wrapint from pypy.objspace.std.iterobject import (W_FastListIterObject, W_ReverseSeqIterObject) from pypy.objspace.std.sliceobject import W_SliceObject, normalize_simple_slice @@ -427,7 +426,7 @@ def descr_len(self, space): result = self.length() - return wrapint(space, result) + return space.newint(result) def descr_iter(self, space): return W_FastListIterObject(self) diff --git a/pypy/objspace/std/stringobject.py b/pypy/objspace/std/stringobject.py --- a/pypy/objspace/std/stringobject.py +++ b/pypy/objspace/std/stringobject.py @@ -4,7 +4,6 @@ from pypy.interpreter.error import OperationError, operationerrfmt from pypy.objspace.std import newformat, slicetype from pypy.objspace.std.formatting import mod_format -from pypy.objspace.std.inttype import wrapint from pypy.objspace.std.model import W_Object, registerimplementation from pypy.objspace.std.multimethod import FailedToImplement from pypy.objspace.std.noneobject import W_NoneObject @@ -589,7 +588,7 @@ def str_count__String_String_ANY_ANY(space, w_self, w_arg, w_start, w_end): u_self, u_start, u_end = _convert_idx_params(space, w_self, w_start, w_end) - return wrapint(space, u_self.count(w_arg._value, u_start, u_end)) + return space.newint(u_self.count(w_arg._value, u_start, u_end)) def str_endswith__String_String_ANY_ANY(space, w_self, w_suffix, w_start, w_end): (u_self, start, end) = _convert_idx_params(space, w_self, w_start, @@ -709,7 +708,7 @@ def hash__String(space, w_str): s = w_str._value x = compute_hash(s) - return wrapint(space, x) + return space.newint(x) def lt__String_String(space, w_str1, w_str2): s1 = w_str1._value diff --git a/pypy/objspace/std/tupleobject.py b/pypy/objspace/std/tupleobject.py --- a/pypy/objspace/std/tupleobject.py +++ b/pypy/objspace/std/tupleobject.py @@ -7,7 +7,6 @@ from pypy.interpreter.gateway import ( WrappedDefault, interp2app, interpindirect2app, unwrap_spec) from pypy.objspace.std import slicetype -from pypy.objspace.std.inttype import wrapint from pypy.objspace.std.sliceobject import W_SliceObject, normalize_simple_slice from pypy.objspace.std.stdtypedef import StdTypeDef from pypy.objspace.std.util import negate @@ -56,7 +55,7 @@ def descr_len(self, space): result = self.length() - return wrapint(space, result) + return space.newint(result) def descr_iter(self, space): from pypy.objspace.std import iterobject diff --git a/pypy/tool/gcdump.py b/pypy/tool/gcdump.py --- a/pypy/tool/gcdump.py +++ b/pypy/tool/gcdump.py @@ -20,9 +20,13 @@ for obj in self.walk(a): self.add_object_summary(obj[2], obj[3]) - def load_typeids(self, filename): + def load_typeids(self, filename_or_iter): self.typeids = Stat.typeids.copy() - for num, line in enumerate(open(filename)): + if isinstance(filename_or_iter, str): + iter = open(filename_or_iter) + else: + iter = filename_or_iter + for num, line in enumerate(iter): if num == 0: continue words = line.split() @@ -92,5 +96,8 @@ typeid_name = os.path.join(os.path.dirname(sys.argv[1]), 'typeids.txt') if os.path.isfile(typeid_name): stat.load_typeids(typeid_name) + else: + import zlib, gc + stat.load_typeids(zlib.decompress(gc.get_typeids_z()).split("\n")) # stat.print_summary() diff --git a/pypy/tool/jitlogparser/parser.py b/pypy/tool/jitlogparser/parser.py --- a/pypy/tool/jitlogparser/parser.py +++ b/pypy/tool/jitlogparser/parser.py @@ -382,7 +382,7 @@ from rpython.jit.backend.tool.viewcode import World world = World() for entry in extract_category(log, 'jit-backend-dump'): - world.parse(entry.splitlines(True), truncate_addr=False) + world.parse(entry.splitlines(True)) dumps = {} for r in world.ranges: if r.addr in addrs and addrs[r.addr]: @@ -390,7 +390,12 @@ data = r.data.encode('hex') # backward compatibility dumps[name] = (world.backend_name, r.addr, data) loops = [] - for entry in extract_category(log, 'jit-log-opt'): + cat = extract_category(log, 'jit-log-opt') + if not cat: + extract_category(log, 'jit-log-rewritten') + if not cat: + extract_category(log, 'jit-log-noopt') + for entry in cat: parser = ParserCls(entry, None, {}, 'lltype', None, nonstrict=True) loop = parser.parse() diff --git a/rpython/jit/backend/arm/assembler.py b/rpython/jit/backend/arm/assembler.py --- a/rpython/jit/backend/arm/assembler.py +++ b/rpython/jit/backend/arm/assembler.py @@ -571,7 +571,8 @@ self.mc.BL(self.stack_check_slowpath, c=c.HI) # call if ip > lr # cpu interface - def assemble_loop(self, loopname, inputargs, operations, looptoken, log): + def assemble_loop(self, logger, loopname, inputargs, operations, looptoken, + log): clt = CompiledLoopToken(self.cpu, looptoken.number) looptoken.compiled_loop_token = clt clt._debug_nbargs = len(inputargs) @@ -620,6 +621,9 @@ 'loop.asm') ops_offset = self.mc.ops_offset + if logger is not None: + logger.log_loop(inputargs, operations, 0, "rewritten", + name=loopname, ops_offset=ops_offset) self.teardown() debug_start("jit-backend-addr") @@ -644,8 +648,8 @@ frame_depth = max(frame_depth, target_frame_depth) return frame_depth - def assemble_bridge(self, faildescr, inputargs, operations, - original_loop_token, log): + def assemble_bridge(self, logger, faildescr, inputargs, operations, + original_loop_token, log): if not we_are_translated(): # Arguments should be unique assert len(set(inputargs)) == len(inputargs) @@ -694,6 +698,9 @@ frame_depth_no_fixed_size + JITFRAME_FIXED_SIZE) self.fixup_target_tokens(rawstart) self.update_frame_depth(frame_depth) + if logger: + logger.log_bridge(inputargs, operations, "rewritten", + ops_offset=ops_offset) self.teardown() debug_bridge(descr_number, rawstart, codeendpos) diff --git a/rpython/jit/backend/arm/opassembler.py b/rpython/jit/backend/arm/opassembler.py --- a/rpython/jit/backend/arm/opassembler.py +++ b/rpython/jit/backend/arm/opassembler.py @@ -452,7 +452,7 @@ # Write code equivalent to write_barrier() in the GC: it checks # a flag in the object at arglocs[0], and if set, it calls a # helper piece of assembler. The latter saves registers as needed - # and call the function jit_remember_young_pointer() from the GC. + # and call the function remember_young_pointer() from the GC. if we_are_translated(): cls = self.cpu.gc_ll_descr.has_write_barrier_class() assert cls is not None and isinstance(descr, cls) diff --git a/rpython/jit/backend/arm/runner.py b/rpython/jit/backend/arm/runner.py --- a/rpython/jit/backend/arm/runner.py +++ b/rpython/jit/backend/arm/runner.py @@ -56,17 +56,18 @@ def finish_once(self): self.assembler.finish_once() - def compile_loop(self, inputargs, operations, looptoken, + def compile_loop(self, logger, inputargs, operations, looptoken, log=True, name=''): - return self.assembler.assemble_loop(name, inputargs, operations, - looptoken, log=log) + return self.assembler.assemble_loop(logger, name, inputargs, operations, + looptoken, log=log) - def compile_bridge(self, faildescr, inputargs, operations, + def compile_bridge(self, logger, faildescr, inputargs, operations, original_loop_token, log=True): clt = original_loop_token.compiled_loop_token clt.compiling_a_bridge() - return self.assembler.assemble_bridge(faildescr, inputargs, operations, - original_loop_token, log=log) + return self.assembler.assemble_bridge(logger, faildescr, inputargs, + operations, + original_loop_token, log=log) def clear_latest_values(self, count): setitem = self.assembler.fail_boxes_ptr.setitem diff --git a/rpython/jit/backend/arm/test/test_generated.py b/rpython/jit/backend/arm/test/test_generated.py --- a/rpython/jit/backend/arm/test/test_generated.py +++ b/rpython/jit/backend/arm/test/test_generated.py @@ -40,7 +40,7 @@ looptoken = JitCellToken() operations[2].setfailargs([v12, v8, v3, v2, v1, v11]) operations[3].setfailargs([v9, v6, v10, v2, v8, v5, v1, v4]) - cpu.compile_loop(inputargs, operations, looptoken) + cpu.compile_loop(None, inputargs, operations, looptoken) args = [-12 , -26 , -19 , 7 , -5 , -24 , -37 , 62 , 9 , 12] deadframe = cpu.execute_token(looptoken, *args) assert cpu.get_int_value(deadframe, 0) == 0 @@ -92,7 +92,7 @@ operations[9].setfailargs([v15, v7, v10, v18, v4, v17, v1]) operations[-1].setfailargs([v7, v1, v2]) looptoken = JitCellToken() - cpu.compile_loop(inputargs, operations, looptoken) + cpu.compile_loop(None, inputargs, operations, looptoken) args = [16 , 5 , 5 , 16 , 46 , 6 , 63 , 39 , 78 , 0] deadframe = cpu.execute_token(looptoken, *args) assert cpu.get_int_value(deadframe, 0) == 105 @@ -136,7 +136,7 @@ operations[-1].setfailargs([v5, v2, v1, v10, v3, v8, v4, v6]) looptoken = JitCellToken() - cpu.compile_loop(inputargs, operations, looptoken) + cpu.compile_loop(None, inputargs, operations, looptoken) args = [-5 , 24 , 46 , -15 , 13 , -8 , 0 , -6 , 6 , 6] deadframe = cpu.execute_token(looptoken, *args) assert cpu.get_latest_descr(deadframe).identifier == 2 @@ -179,7 +179,7 @@ operations[5].setfailargs([]) operations[-1].setfailargs([v8, v2, v6, v5, v7, v1, v10]) looptoken = JitCellToken() - cpu.compile_loop(inputargs, operations, looptoken) + cpu.compile_loop(None, inputargs, operations, looptoken) args = [19 , -3 , -58 , -7 , 12 , 22 , -54 , -29 , -19 , -64] deadframe = cpu.execute_token(looptoken, *args) assert cpu.get_int_value(deadframe, 0) == -29 @@ -223,7 +223,7 @@ looptoken = JitCellToken() operations[5].setfailargs([]) operations[-1].setfailargs([v1, v4, v10, v8, v7, v3]) - cpu.compile_loop(inputargs, operations, looptoken) + cpu.compile_loop(None, inputargs, operations, looptoken) args = [1073741824 , 95 , -16 , 5 , 92 , 12 , 32 , 17 , 37 , -63] deadframe = cpu.execute_token(looptoken, *args) assert cpu.get_int_value(deadframe, 0) == 1073741824 @@ -280,7 +280,7 @@ operations[9].setfailargs([v10, v13]) operations[-1].setfailargs([v8, v10, v6, v3, v2, v9]) args = [32 , 41 , -9 , 12 , -18 , 46 , 15 , 17 , 10 , 12] - cpu.compile_loop(inputargs, operations, looptoken) + cpu.compile_loop(None, inputargs, operations, looptoken) deadframe = cpu.execute_token(looptoken, *args) assert cpu.get_latest_descr(deadframe).identifier == 3 assert cpu.get_int_value(deadframe, 0) == 12 @@ -328,7 +328,7 @@ operations[8].setfailargs([v5, v9]) operations[-1].setfailargs([v4, v10, v6, v5, v9, v7]) looptoken = JitCellToken() - cpu.compile_loop(inputargs, operations, looptoken) + cpu.compile_loop(None, inputargs, operations, looptoken) args = [-8 , 0 , 62 , 35 , 16 , 9 , 30 , 581610154 , -1 , 738197503] deadframe = cpu.execute_token(looptoken, *args) assert cpu.get_latest_descr(deadframe).identifier == 2 @@ -378,7 +378,7 @@ operations[-2].setfailargs([v9, v4, v10, v11, v14]) operations[-1].setfailargs([v10, v8, v1, v6, v4]) looptoken = JitCellToken() - cpu.compile_loop(inputargs, operations, looptoken) + cpu.compile_loop(None, inputargs, operations, looptoken) args = [-39 , -18 , 1588243114 , -9 , -4 , 1252698794 , 0 , 715827882 , -15 , 536870912] deadframe = cpu.execute_token(looptoken, *args) assert cpu.get_latest_descr(deadframe).identifier == 1 @@ -433,7 +433,7 @@ operations[9].setfailargs([v5, v7, v12, v14, v2, v13, v8]) operations[-1].setfailargs([v1, v2, v9]) looptoken = JitCellToken() - cpu.compile_loop(inputargs, operations, looptoken) + cpu.compile_loop(None, inputargs, operations, looptoken) args = [0 , -2 , 24 , 1 , -4 , 13 , -95 , 33 , 2 , -44] deadframe = cpu.execute_token(looptoken, *args) assert cpu.get_latest_descr(deadframe).identifier == 3 @@ -475,7 +475,7 @@ operations[2].setfailargs([v10, v3, v6, v11, v9, v2]) operations[-1].setfailargs([v8, v2, v10, v6, v7, v9, v5, v4]) looptoken = JitCellToken() - cpu.compile_loop(inputargs, operations, looptoken) + cpu.compile_loop(None, inputargs, operations, looptoken) args = [3 , -5 , 1431655765 , 47 , 12 , 1789569706 , 15 , 939524096 , 16 , -43] deadframe = cpu.execute_token(looptoken, *args) assert cpu.get_latest_descr(deadframe).identifier == 1 @@ -524,7 +524,7 @@ operations[-1].setfailargs([v2, v3, v5, v7, v10, v8, v9]) operations[4].setfailargs([v14]) looptoken = JitCellToken() - cpu.compile_loop(inputargs, operations, looptoken) + cpu.compile_loop(None, inputargs, operations, looptoken) args = [14 , -20 , 18 , -2058005163 , 6 , 1 , -16 , 11 , 0 , 19] deadframe = cpu.execute_token(looptoken, *args) assert cpu.get_latest_descr(deadframe).identifier == 1 diff --git a/rpython/jit/backend/arm/test/test_regalloc2.py b/rpython/jit/backend/arm/test/test_regalloc2.py --- a/rpython/jit/backend/arm/test/test_regalloc2.py +++ b/rpython/jit/backend/arm/test/test_regalloc2.py @@ -24,7 +24,7 @@ cpu = CPU(None, None) cpu.setup_once() looptoken = JitCellToken() - cpu.compile_loop(inputargs, operations, looptoken) + cpu.compile_loop(None, inputargs, operations, looptoken) deadframe = cpu.execute_token(looptoken, 9) assert cpu.get_int_value(deadframe, 0) == (9 >> 3) assert cpu.get_int_value(deadframe, 1) == (~18) @@ -48,7 +48,7 @@ cpu = CPU(None, None) cpu.setup_once() looptoken = JitCellToken() - cpu.compile_loop(inputargs, operations, looptoken) + cpu.compile_loop(None, inputargs, operations, looptoken) deadframe = cpu.execute_token(looptoken, -10) assert cpu.get_int_value(deadframe, 0) == 0 assert cpu.get_int_value(deadframe, 1) == -1000 @@ -145,7 +145,7 @@ cpu = CPU(None, None) cpu.setup_once() looptoken = JitCellToken() - cpu.compile_loop(inputargs, operations, looptoken) + cpu.compile_loop(None, inputargs, operations, looptoken) args = [-13 , 10 , 10 , 8 , -8 , -16 , -18 , 46 , -12 , 26] deadframe = cpu.execute_token(looptoken, *args) assert cpu.get_int_value(deadframe, 0) == 0 @@ -252,7 +252,7 @@ cpu = CPU(None, None) cpu.setup_once() looptoken = JitCellToken() - cpu.compile_loop(inputargs, operations, looptoken) + cpu.compile_loop(None, inputargs, operations, looptoken) args = [17 , -20 , -6 , 6 , 1 , 13 , 13 , 9 , 49 , 8] deadframe = cpu.execute_token(looptoken, *args) assert cpu.get_int_value(deadframe, 0) == 0 diff --git a/rpython/jit/backend/arm/test/test_runner.py b/rpython/jit/backend/arm/test/test_runner.py --- a/rpython/jit/backend/arm/test/test_runner.py +++ b/rpython/jit/backend/arm/test/test_runner.py @@ -75,7 +75,7 @@ ResOperation(rop.FINISH, [inp[1]], None, descr=BasicFinalDescr(1)), ] operations[-2].setfailargs(out) - cpu.compile_loop(inp, operations, looptoken) + cpu.compile_loop(None, inp, operations, looptoken) args = [i for i in range(1, 15)] deadframe = self.cpu.execute_token(looptoken, *args) output = [self.cpu.get_int_value(deadframe, i - 1) for i in range(1, 15)] @@ -117,9 +117,9 @@ i1 = int_sub(i0, 1) finish(i1) ''') - self.cpu.compile_loop(loop2.inputargs, loop2.operations, lt2) - self.cpu.compile_loop(loop3.inputargs, loop3.operations, lt3) - self.cpu.compile_loop(loop1.inputargs, loop1.operations, lt1) + self.cpu.compile_loop(None, loop2.inputargs, loop2.operations, lt2) + self.cpu.compile_loop(None, loop3.inputargs, loop3.operations, lt3) + self.cpu.compile_loop(None, loop1.inputargs, loop1.operations, lt1) df = self.cpu.execute_token(lt1, 10) assert self.cpu.get_int_value(df, 0) == 7 @@ -214,7 +214,7 @@ ops = "".join(ops) loop = parse(ops) looptoken = JitCellToken() - self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + self.cpu.compile_loop(None, loop.inputargs, loop.operations, looptoken) ARGS = [lltype.Signed] * numargs RES = lltype.Signed args = [i+1 for i in range(numargs)] @@ -246,7 +246,7 @@ try: self.cpu.assembler.set_debug(True) looptoken = JitCellToken() - self.cpu.compile_loop(ops.inputargs, ops.operations, looptoken) + self.cpu.compile_loop(None, ops.inputargs, ops.operations, looptoken) self.cpu.execute_token(looptoken, 0) # check debugging info struct = self.cpu.assembler.loop_run_counters[0] @@ -280,7 +280,7 @@ faildescr = BasicFailDescr(2) loop = parse(ops, self.cpu, namespace=locals()) looptoken = JitCellToken() - info = self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + info = self.cpu.compile_loop(None, loop.inputargs, loop.operations, looptoken) ops2 = """ [i0, f1] i1 = same_as(i0) @@ -293,7 +293,7 @@ """ loop2 = parse(ops2, self.cpu, namespace=locals()) looptoken2 = JitCellToken() - info = self.cpu.compile_loop(loop2.inputargs, loop2.operations, looptoken2) + info = self.cpu.compile_loop(None, loop2.inputargs, loop2.operations, looptoken2) deadframe = self.cpu.execute_token(looptoken, -9, longlong.getfloatstorage(-13.5)) res = longlong.getrealfloat(self.cpu.get_float_value(deadframe, 0)) diff --git a/rpython/jit/backend/llgraph/runner.py b/rpython/jit/backend/llgraph/runner.py --- a/rpython/jit/backend/llgraph/runner.py +++ b/rpython/jit/backend/llgraph/runner.py @@ -183,7 +183,8 @@ self.stats = stats or MiniStats() self.vinfo_for_tests = kwds.get('vinfo_for_tests', None) - def compile_loop(self, inputargs, operations, looptoken, log=True, name=''): + def compile_loop(self, logger, inputargs, operations, looptoken, log=True, + name=''): clt = model.CompiledLoopToken(self, looptoken.number) looptoken.compiled_loop_token = clt lltrace = LLTrace(inputargs, operations) @@ -191,7 +192,7 @@ clt._llgraph_alltraces = [lltrace] self._record_labels(lltrace) - def compile_bridge(self, faildescr, inputargs, operations, + def compile_bridge(self, logger, faildescr, inputargs, operations, original_loop_token, log=True): clt = original_loop_token.compiled_loop_token clt.compiling_a_bridge() @@ -960,10 +961,10 @@ def execute_force_token(self, _): return self - def execute_cond_call_gc_wb(self, descr, a, b): + def execute_cond_call_gc_wb(self, descr, a): py.test.skip("cond_call_gc_wb not supported") - def execute_cond_call_gc_wb_array(self, descr, a, b, c): + def execute_cond_call_gc_wb_array(self, descr, a, b): py.test.skip("cond_call_gc_wb_array not supported") def execute_keepalive(self, descr, x): diff --git a/rpython/jit/backend/llsupport/gc.py b/rpython/jit/backend/llsupport/gc.py --- a/rpython/jit/backend/llsupport/gc.py +++ b/rpython/jit/backend/llsupport/gc.py @@ -136,7 +136,7 @@ """ Allocate a new frame, overwritten by tests """ frame = jitframe.JITFRAME.allocate(frame_info) - llop.gc_assume_young_pointers(lltype.Void, frame) + llop.gc_writebarrier(lltype.Void, frame) return frame class JitFrameDescrs: @@ -360,8 +360,7 @@ def _check_valid_gc(self): # we need the hybrid or minimark GC for rgc._make_sure_does_not_move() - # to work. Additionally, 'hybrid' is missing some stuff like - # jit_remember_young_pointer() for now. + # to work. 'hybrid' could work but isn't tested with the JIT. if self.gcdescr.config.translation.gc not in ('minimark',): raise NotImplementedError("--gc=%s not implemented with the JIT" % (self.gcdescr.config.translation.gc,)) diff --git a/rpython/jit/backend/llsupport/llmodel.py b/rpython/jit/backend/llsupport/llmodel.py --- a/rpython/jit/backend/llsupport/llmodel.py +++ b/rpython/jit/backend/llsupport/llmodel.py @@ -97,7 +97,7 @@ new_frame.jf_savedata = frame.jf_savedata new_frame.jf_guard_exc = frame.jf_guard_exc # all other fields are empty - llop.gc_assume_young_pointers(lltype.Void, new_frame) + llop.gc_writebarrier(lltype.Void, new_frame) return lltype.cast_opaque_ptr(llmemory.GCREF, new_frame) except Exception, e: print "Unhandled exception", e, "in realloc_frame" diff --git a/rpython/jit/backend/llsupport/rewrite.py b/rpython/jit/backend/llsupport/rewrite.py --- a/rpython/jit/backend/llsupport/rewrite.py +++ b/rpython/jit/backend/llsupport/rewrite.py @@ -26,10 +26,11 @@ - Add COND_CALLs to the write barrier before SETFIELD_GC and SETARRAYITEM_GC operations. - recent_mallocs contains a dictionary of variable -> None. If a variable - is in the dictionary, next setfields can be called without a write barrier, - because the variable got allocated after the last potentially collecting - resop + 'write_barrier_applied' contains a dictionary of variable -> None. + If a variable is in the dictionary, next setfields can be called without + a write barrier. The idea is that an object that was freshly allocated + or already write_barrier'd don't need another write_barrier if there + was no potentially collecting resop inbetween. """ _previous_size = -1 @@ -42,7 +43,7 @@ self.cpu = cpu self.newops = [] self.known_lengths = {} - self.recent_mallocs = {} + self.write_barrier_applied = {} def rewrite(self, operations): # we can only remember one malloc since the next malloc can possibly @@ -221,18 +222,18 @@ def emitting_an_operation_that_can_collect(self): # must be called whenever we emit an operation that can collect: # forgets the previous MALLOC_NURSERY, if any; and empty the - # set 'recent_mallocs', so that future SETFIELDs will generate + # set 'write_barrier_applied', so that future SETFIELDs will generate # a write barrier as usual. self._op_malloc_nursery = None - self.recent_mallocs.clear() + self.write_barrier_applied.clear() def _gen_call_malloc_gc(self, args, v_result, descr): """Generate a CALL_MALLOC_GC with the given args.""" self.emitting_an_operation_that_can_collect() op = ResOperation(rop.CALL_MALLOC_GC, args, v_result, descr) self.newops.append(op) - # mark 'v_result' as freshly malloced - self.recent_mallocs[v_result] = None + # mark 'v_result' as freshly malloced, so not needing a write barrier + self.write_barrier_applied[v_result] = None def gen_malloc_fixedsize(self, size, typeid, v_result): """Generate a CALL_MALLOC_GC(malloc_fixedsize_fn, ...). @@ -315,7 +316,7 @@ [ConstInt(kind), ConstInt(itemsize), v_length], v_result, descr=arraydescr) self.newops.append(op) - self.recent_mallocs[v_result] = None + self.write_barrier_applied[v_result] = None return True def gen_malloc_nursery_varsize_frame(self, sizebox, v_result): @@ -327,7 +328,7 @@ v_result) self.newops.append(op) - self.recent_mallocs[v_result] = None + self.write_barrier_applied[v_result] = None def gen_malloc_nursery(self, size, v_result): """Try to generate or update a CALL_MALLOC_NURSERY. @@ -360,7 +361,7 @@ self.newops.append(op) self._previous_size = size self._v_last_malloced_nursery = v_result - self.recent_mallocs[v_result] = None + self.write_barrier_applied[v_result] = None return True def gen_initialize_tid(self, v_newgcobj, tid): @@ -382,45 +383,42 @@ def handle_write_barrier_setfield(self, op): val = op.getarg(0) - # no need for a write barrier in the case of previous malloc - if val not in self.recent_mallocs: + if val not in self.write_barrier_applied: v = op.getarg(1) if isinstance(v, BoxPtr) or (isinstance(v, ConstPtr) and bool(v.value)): # store a non-NULL - self.gen_write_barrier(op.getarg(0), v) - op = op.copy_and_change(rop.SETFIELD_RAW) + self.gen_write_barrier(val) + #op = op.copy_and_change(rop.SETFIELD_RAW) self.newops.append(op) def handle_write_barrier_setinteriorfield(self, op): val = op.getarg(0) - # no need for a write barrier in the case of previous malloc - if val not in self.recent_mallocs: + if val not in self.write_barrier_applied: v = op.getarg(2) if isinstance(v, BoxPtr) or (isinstance(v, ConstPtr) and bool(v.value)): # store a non-NULL - self.gen_write_barrier(op.getarg(0), v) - op = op.copy_and_change(rop.SETINTERIORFIELD_RAW) + self.gen_write_barrier(val) + #op = op.copy_and_change(rop.SETINTERIORFIELD_RAW) self.newops.append(op) def handle_write_barrier_setarrayitem(self, op): val = op.getarg(0) - # no need for a write barrier in the case of previous malloc - if val not in self.recent_mallocs: + if val not in self.write_barrier_applied: v = op.getarg(2) if isinstance(v, BoxPtr) or (isinstance(v, ConstPtr) and bool(v.value)): # store a non-NULL - self.gen_write_barrier_array(op.getarg(0), - op.getarg(1), v) - op = op.copy_and_change(rop.SETARRAYITEM_RAW) + self.gen_write_barrier_array(val, op.getarg(1)) + #op = op.copy_and_change(rop.SETARRAYITEM_RAW) self.newops.append(op) - def gen_write_barrier(self, v_base, v_value): + def gen_write_barrier(self, v_base): write_barrier_descr = self.gc_ll_descr.write_barrier_descr - args = [v_base, v_value] + args = [v_base] self.newops.append(ResOperation(rop.COND_CALL_GC_WB, args, None, descr=write_barrier_descr)) + self.write_barrier_applied[v_base] = None - def gen_write_barrier_array(self, v_base, v_index, v_value): + def gen_write_barrier_array(self, v_base, v_index): write_barrier_descr = self.gc_ll_descr.write_barrier_descr if write_barrier_descr.has_write_barrier_from_array(self.cpu): # If we know statically the length of 'v', and it is not too @@ -430,13 +428,15 @@ length = self.known_lengths.get(v_base, LARGE) if length >= LARGE: # unknown or too big: produce a write_barrier_from_array - args = [v_base, v_index, v_value] + args = [v_base, v_index] self.newops.append( ResOperation(rop.COND_CALL_GC_WB_ARRAY, args, None, descr=write_barrier_descr)) + # a WB_ARRAY is not enough to prevent any future write + # barriers, so don't add to 'write_barrier_applied'! return # fall-back case: produce a write_barrier - self.gen_write_barrier(v_base, v_value) + self.gen_write_barrier(v_base) def round_up_for_allocation(self, size): if not self.gc_ll_descr.round_up: diff --git a/rpython/jit/backend/llsupport/test/test_gc.py b/rpython/jit/backend/llsupport/test/test_gc.py --- a/rpython/jit/backend/llsupport/test/test_gc.py +++ b/rpython/jit/backend/llsupport/test/test_gc.py @@ -202,13 +202,11 @@ rewriter = gc.GcRewriterAssembler(gc_ll_descr, None) newops = rewriter.newops v_base = BoxPtr() - v_value = BoxPtr() - rewriter.gen_write_barrier(v_base, v_value) + rewriter.gen_write_barrier(v_base) assert llop1.record == [] assert len(newops) == 1 assert newops[0].getopnum() == rop.COND_CALL_GC_WB assert newops[0].getarg(0) == v_base - assert newops[0].getarg(1) == v_value assert newops[0].result is None wbdescr = newops[0].getdescr() assert is_valid_int(wbdescr.jit_wb_if_flag) diff --git a/rpython/jit/backend/llsupport/test/test_gc_integration.py b/rpython/jit/backend/llsupport/test/test_gc_integration.py --- a/rpython/jit/backend/llsupport/test/test_gc_integration.py +++ b/rpython/jit/backend/llsupport/test/test_gc_integration.py @@ -676,7 +676,7 @@ 'checkdescr': checkdescr, 'fielddescr': cpu.fielddescrof(S, 'x')}) token = JitCellToken() - cpu.compile_loop(loop.inputargs, loop.operations, token) + cpu.compile_loop(None, loop.inputargs, loop.operations, token) p0 = lltype.malloc(S, zero=True) p1 = lltype.malloc(S) p2 = lltype.malloc(S) @@ -715,7 +715,7 @@ 'calldescr': checkdescr, }) token = JitCellToken() - cpu.compile_loop(loop.inputargs, loop.operations, token) + cpu.compile_loop(None, loop.inputargs, loop.operations, token) S = self.S s = lltype.malloc(S) cpu.execute_token(token, 1, s) @@ -743,7 +743,7 @@ token = JitCellToken() cpu.gc_ll_descr.init_nursery(20) cpu.setup_once() - cpu.compile_loop(loop.inputargs, loop.operations, token) + cpu.compile_loop(None, loop.inputargs, loop.operations, token) arg = longlong.getfloatstorage(2.3) frame = cpu.execute_token(token, arg) ofs = cpu.get_baseofs_of_frame_field() @@ -770,7 +770,7 @@ cpu.gc_ll_descr.collections = [[0, sizeof.size]] cpu.gc_ll_descr.init_nursery(2 * sizeof.size) cpu.setup_once() - cpu.compile_loop(loop.inputargs, loop.operations, token) + cpu.compile_loop(None, loop.inputargs, loop.operations, token) frame = cpu.execute_token(token) # now we should be able to track everything from the frame frame = lltype.cast_opaque_ptr(JITFRAMEPTR, frame) @@ -821,7 +821,7 @@ token = JitCellToken() cpu.gc_ll_descr.init_nursery(100) cpu.setup_once() - cpu.compile_loop(loop.inputargs, loop.operations, token) + cpu.compile_loop(None, loop.inputargs, loop.operations, token) args = [lltype.nullptr(llmemory.GCREF.TO) for i in range(7)] frame = cpu.execute_token(token, 1, *args) frame = rffi.cast(JITFRAMEPTR, frame) @@ -867,7 +867,7 @@ token = JitCellToken() cpu.gc_ll_descr.init_nursery(100) cpu.setup_once() - cpu.compile_loop(loop.inputargs, loop.operations, token) + cpu.compile_loop(None, loop.inputargs, loop.operations, token) frame = lltype.cast_opaque_ptr(JITFRAMEPTR, cpu.execute_token(token, 1, a)) @@ -911,7 +911,7 @@ token = JitCellToken() cpu.gc_ll_descr.init_nursery(100) cpu.setup_once() - cpu.compile_loop(loop.inputargs, loop.operations, token) + cpu.compile_loop(None, loop.inputargs, loop.operations, token) frame = lltype.cast_opaque_ptr(JITFRAMEPTR, cpu.execute_token(token, 1, a)) assert getmap(frame).count('1') == 4 diff --git a/rpython/jit/backend/llsupport/test/test_regalloc_integration.py b/rpython/jit/backend/llsupport/test/test_regalloc_integration.py --- a/rpython/jit/backend/llsupport/test/test_regalloc_integration.py +++ b/rpython/jit/backend/llsupport/test/test_regalloc_integration.py @@ -97,7 +97,7 @@ loop = self.parse(ops, namespace=namespace) self.loop = loop looptoken = JitCellToken() - self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + self.cpu.compile_loop(None, loop.inputargs, loop.operations, looptoken) arguments = [] for arg in args: if isinstance(arg, int): @@ -147,7 +147,8 @@ assert ([box.type for box in bridge.inputargs] == [box.type for box in guard_op.getfailargs()]) faildescr = guard_op.getdescr() - self.cpu.compile_bridge(faildescr, bridge.inputargs, bridge.operations, + self.cpu.compile_bridge(None, faildescr, bridge.inputargs, + bridge.operations, loop._jitcelltoken) return bridge @@ -335,7 +336,7 @@ ''' self.interpret(ops, [0, 0, 3, 0]) assert self.getints(3) == [1, -3, 10] - + def test_compare_memory_result_survives(self): ops = ''' [i0, i1, i2, i3] @@ -409,7 +410,7 @@ class TestRegallocCompOps(BaseTestRegalloc): - + def test_cmp_op_0(self): ops = ''' [i0, i3] @@ -575,7 +576,7 @@ class TestRegAllocCallAndStackDepth(BaseTestRegalloc): def setup_class(cls): py.test.skip("skip for now, not sure what do we do") - + def expected_frame_depth(self, num_call_args, num_pushed_input_args=0): # Assumes the arguments are all non-float if not self.cpu.IS_64_BIT: @@ -612,7 +613,7 @@ ops = ''' [i0, i1, i2, i3, i4, i5, i6, i7, i8, i9] i10 = call(ConstClass(f1ptr), i0, descr=f1_calldescr) - i11 = call(ConstClass(f2ptr), i10, i1, descr=f2_calldescr) + i11 = call(ConstClass(f2ptr), i10, i1, descr=f2_calldescr) guard_false(i5) [i11, i1, i2, i3, i4, i5, i6, i7, i8, i9] ''' loop = self.interpret(ops, [4, 7, 9, 9 ,9, 9, 9, 9, 9, 9]) @@ -649,7 +650,7 @@ ops = ''' [i2, i0, i1] - i3 = call(ConstClass(f2ptr), i2, i1, descr=f2_calldescr) + i3 = call(ConstClass(f2ptr), i2, i1, descr=f2_calldescr) guard_false(i0, descr=fdescr2) [i3, i0] ''' bridge = self.attach_bridge(ops, loop, -2) @@ -676,7 +677,7 @@ ops = ''' [i2] - i3 = call(ConstClass(f1ptr), i2, descr=f1_calldescr) + i3 = call(ConstClass(f1ptr), i2, descr=f1_calldescr) guard_false(i3, descr=fdescr2) [i3] ''' bridge = self.attach_bridge(ops, loop, -2) diff --git a/rpython/jit/backend/llsupport/test/test_rewrite.py b/rpython/jit/backend/llsupport/test/test_rewrite.py --- a/rpython/jit/backend/llsupport/test/test_rewrite.py +++ b/rpython/jit/backend/llsupport/test/test_rewrite.py @@ -561,8 +561,8 @@ jump() """, """ [p1, p2] - cond_call_gc_wb(p1, p2, descr=wbdescr) - setfield_raw(p1, p2, descr=tzdescr) + cond_call_gc_wb(p1, descr=wbdescr) + setfield_gc(p1, p2, descr=tzdescr) jump() """) @@ -575,8 +575,8 @@ jump() """, """ [p1, i2, p3] - cond_call_gc_wb(p1, p3, descr=wbdescr) - setarrayitem_raw(p1, i2, p3, descr=cdescr) + cond_call_gc_wb(p1, descr=wbdescr) + setarrayitem_gc(p1, i2, p3, descr=cdescr) jump() """) @@ -595,8 +595,8 @@ setfield_gc(p1, 8111, descr=tiddescr) setfield_gc(p1, 129, descr=clendescr) call(123456) - cond_call_gc_wb(p1, p3, descr=wbdescr) - setarrayitem_raw(p1, i2, p3, descr=cdescr) + cond_call_gc_wb(p1, descr=wbdescr) + setarrayitem_gc(p1, i2, p3, descr=cdescr) jump() """) @@ -616,8 +616,8 @@ setfield_gc(p1, 8111, descr=tiddescr) setfield_gc(p1, 130, descr=clendescr) call(123456) - cond_call_gc_wb_array(p1, i2, p3, descr=wbdescr) - setarrayitem_raw(p1, i2, p3, descr=cdescr) + cond_call_gc_wb_array(p1, i2, descr=wbdescr) + setarrayitem_gc(p1, i2, p3, descr=cdescr) jump() """) @@ -628,8 +628,8 @@ jump() """, """ [p1, i2, p3] - cond_call_gc_wb_array(p1, i2, p3, descr=wbdescr) - setarrayitem_raw(p1, i2, p3, descr=cdescr) + cond_call_gc_wb_array(p1, i2, descr=wbdescr) + setarrayitem_gc(p1, i2, p3, descr=cdescr) jump() """) @@ -647,8 +647,8 @@ setfield_gc(p1, 8111, descr=tiddescr) setfield_gc(p1, 5, descr=clendescr) label(p1, i2, p3) - cond_call_gc_wb_array(p1, i2, p3, descr=wbdescr) - setarrayitem_raw(p1, i2, p3, descr=cdescr) + cond_call_gc_wb_array(p1, i2, descr=wbdescr) + setarrayitem_gc(p1, i2, p3, descr=cdescr) jump() """) @@ -666,8 +666,8 @@ jump(p1, p2) """, """ [p1, p2] - cond_call_gc_wb(p1, p2, descr=wbdescr) - setinteriorfield_raw(p1, 0, p2, descr=interiorzdescr) + cond_call_gc_wb(p1, descr=wbdescr) + setinteriorfield_gc(p1, 0, p2, descr=interiorzdescr) jump(p1, p2) """, interiorzdescr=interiorzdescr) @@ -733,8 +733,8 @@ p1 = call_malloc_nursery_varsize(1, 1, i0, \ descr=strdescr) setfield_gc(p1, i0, descr=strlendescr) - cond_call_gc_wb(p0, p1, descr=wbdescr) - setfield_raw(p0, p1, descr=tzdescr) + cond_call_gc_wb(p0, descr=wbdescr) + setfield_gc(p0, p1, descr=tzdescr) jump() """) @@ -750,11 +750,25 @@ p0 = call_malloc_nursery(%(tdescr.size)d) setfield_gc(p0, 5678, descr=tiddescr) label(p0, p1) - cond_call_gc_wb(p0, p1, descr=wbdescr) - setfield_raw(p0, p1, descr=tzdescr) + cond_call_gc_wb(p0, descr=wbdescr) + setfield_gc(p0, p1, descr=tzdescr) jump() """) + def test_multiple_writes(self): + self.check_rewrite(""" + [p0, p1, p2] + setfield_gc(p0, p1, descr=tzdescr) + setfield_gc(p0, p2, descr=tzdescr) + jump(p1, p2, p0) + """, """ + [p0, p1, p2] + cond_call_gc_wb(p0, descr=wbdescr) + setfield_gc(p0, p1, descr=tzdescr) + setfield_gc(p0, p2, descr=tzdescr) + jump(p1, p2, p0) + """) + def test_rewrite_call_assembler(self): self.check_rewrite(""" [i0, f0] diff --git a/rpython/jit/backend/llsupport/test/test_runner.py b/rpython/jit/backend/llsupport/test/test_runner.py --- a/rpython/jit/backend/llsupport/test/test_runner.py +++ b/rpython/jit/backend/llsupport/test/test_runner.py @@ -14,7 +14,7 @@ def set_debug(flag): pass - def compile_loop(self, inputargs, operations, looptoken): + def compile_loop(self, logger, inputargs, operations, looptoken): py.test.skip("llsupport test: cannot compile operations") diff --git a/rpython/jit/backend/model.py b/rpython/jit/backend/model.py --- a/rpython/jit/backend/model.py +++ b/rpython/jit/backend/model.py @@ -51,7 +51,8 @@ """ return False - def compile_loop(self, inputargs, operations, looptoken, log=True, name=''): + def compile_loop(self, logger, inputargs, operations, looptoken, + log=True, name=''): """Assemble the given loop. Should create and attach a fresh CompiledLoopToken to looptoken.compiled_loop_token and stick extra attributes @@ -67,7 +68,7 @@ """ raise NotImplementedError - def compile_bridge(self, faildescr, inputargs, operations, + def compile_bridge(self, logger, faildescr, inputargs, operations, original_loop_token, log=True): """Assemble the bridge. The FailDescr is the descr of the original guard that failed. diff --git a/rpython/jit/backend/test/calling_convention_test.py b/rpython/jit/backend/test/calling_convention_test.py --- a/rpython/jit/backend/test/calling_convention_test.py +++ b/rpython/jit/backend/test/calling_convention_test.py @@ -105,7 +105,7 @@ loop = parse(ops, namespace=locals()) looptoken = JitCellToken() - self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + self.cpu.compile_loop(None, loop.inputargs, loop.operations, looptoken) argvals, expected_result = self._prepare_args(args, floats, ints) deadframe = self.cpu.execute_token(looptoken, *argvals) @@ -249,7 +249,7 @@ called_looptoken = JitCellToken() called_looptoken.outermost_jitdriver_sd = FakeJitDriverSD() done_descr = called_loop.operations[-1].getdescr() - self.cpu.compile_loop(called_loop.inputargs, called_loop.operations, called_looptoken) + self.cpu.compile_loop(None, called_loop.inputargs, called_loop.operations, called_looptoken) argvals, expected_result = self._prepare_args(args, floats, ints) deadframe = cpu.execute_token(called_looptoken, *argvals) @@ -278,7 +278,7 @@ self.cpu.done_with_this_frame_descr_float = done_descr try: othertoken = JitCellToken() - self.cpu.compile_loop(loop.inputargs, loop.operations, othertoken) + self.cpu.compile_loop(None, loop.inputargs, loop.operations, othertoken) # prepare call to called_loop argvals, _ = self._prepare_args(args, floats, ints) @@ -424,7 +424,7 @@ loop = parse(ops, namespace=locals()) looptoken = JitCellToken() - self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + self.cpu.compile_loop(None, loop.inputargs, loop.operations, looptoken) argvals, expected_result = self._prepare_args(args, floats, ints) deadframe = self.cpu.execute_token(looptoken, *argvals) diff --git a/rpython/jit/backend/test/runner_test.py b/rpython/jit/backend/test/runner_test.py --- a/rpython/jit/backend/test/runner_test.py +++ b/rpython/jit/backend/test/runner_test.py @@ -49,7 +49,7 @@ valueboxes, descr) looptoken = JitCellToken() - self.cpu.compile_loop(inputargs, operations, looptoken) + self.cpu.compile_loop(None, inputargs, operations, looptoken) args = [] for box in inputargs: if isinstance(box, BoxInt): @@ -127,7 +127,7 @@ ] inputargs = [i0] looptoken = JitCellToken() - self.cpu.compile_loop(inputargs, operations, looptoken) + self.cpu.compile_loop(None, inputargs, operations, looptoken) deadframe = self.cpu.execute_token(looptoken, 2) fail = self.cpu.get_latest_descr(deadframe) res = self.cpu.get_int_value(deadframe, 0) @@ -145,7 +145,7 @@ ] inputargs = [i0] looptoken = JitCellToken() - self.cpu.compile_loop(inputargs, operations, looptoken) + self.cpu.compile_loop(None, inputargs, operations, looptoken) deadframe = self.cpu.execute_token(looptoken, longlong.getfloatstorage(2.8)) fail = self.cpu.get_latest_descr(deadframe) @@ -170,7 +170,7 @@ inputargs = [i0] operations[3].setfailargs([i1]) - self.cpu.compile_loop(inputargs, operations, looptoken) + self.cpu.compile_loop(None, inputargs, operations, looptoken) deadframe = self.cpu.execute_token(looptoken, 2) fail = self.cpu.get_latest_descr(deadframe) assert fail.identifier == 2 @@ -195,7 +195,7 @@ inputargs = [i3] operations[4].setfailargs([None, None, i1, None]) - self.cpu.compile_loop(inputargs, operations, looptoken) + self.cpu.compile_loop(None, inputargs, operations, looptoken) deadframe = self.cpu.execute_token(looptoken, 44) fail = self.cpu.get_latest_descr(deadframe) assert fail.identifier == 2 @@ -221,7 +221,7 @@ operations[3].setfailargs([i1]) wr_i1 = weakref.ref(i1) wr_guard = weakref.ref(operations[2]) - self.cpu.compile_loop(inputargs, operations, looptoken) + self.cpu.compile_loop(None, inputargs, operations, looptoken) if hasattr(looptoken, '_x86_ops_offset'): del looptoken._x86_ops_offset # else it's kept alive del i0, i1, i2 @@ -249,7 +249,7 @@ ] inputargs = [i0] operations[3].setfailargs([i1]) - self.cpu.compile_loop(inputargs, operations, looptoken) + self.cpu.compile_loop(None, inputargs, operations, looptoken) i1b = BoxInt() i3 = BoxInt() @@ -260,7 +260,7 @@ ] bridge[1].setfailargs([i1b]) - self.cpu.compile_bridge(faildescr1, [i1b], bridge, looptoken) + self.cpu.compile_bridge(None, faildescr1, [i1b], bridge, looptoken) deadframe = self.cpu.execute_token(looptoken, 2) fail = self.cpu.get_latest_descr(deadframe) @@ -291,7 +291,7 @@ ] inputargs = [i3] operations[4].setfailargs([None, i1, None]) - self.cpu.compile_loop(inputargs, operations, looptoken) + self.cpu.compile_loop(None, inputargs, operations, looptoken) i1b = BoxInt() i3 = BoxInt() @@ -302,7 +302,7 @@ ] bridge[1].setfailargs([i1b]) - self.cpu.compile_bridge(faildescr1, [i1b], bridge, looptoken) + self.cpu.compile_bridge(None, faildescr1, [i1b], bridge, looptoken) deadframe = self.cpu.execute_token(looptoken, 2) fail = self.cpu.get_latest_descr(deadframe) @@ -320,7 +320,7 @@ ] inputargs = [i0] operations[0].setfailargs([i0]) - self.cpu.compile_loop(inputargs, operations, looptoken) + self.cpu.compile_loop(None, inputargs, operations, looptoken) i1list = [BoxInt() for i in range(150)] bridge = [] @@ -334,7 +334,7 @@ descr=BasicFinalDescr(4))) bridge[-2].setfailargs(i1list) - self.cpu.compile_bridge(faildescr1, [i0], bridge, looptoken) + self.cpu.compile_bridge(None, faildescr1, [i0], bridge, looptoken) deadframe = self.cpu.execute_token(looptoken, 1) fail = self.cpu.get_latest_descr(deadframe) @@ -358,7 +358,7 @@ operations = [ ResOperation(rop.FINISH, [i0], None, descr=faildescr) ] - self.cpu.compile_loop([i0], operations, looptoken) + self.cpu.compile_loop(None, [i0], operations, looptoken) deadframe = self.cpu.execute_token(looptoken, 99) fail = self.cpu.get_latest_descr(deadframe) assert fail is faildescr @@ -369,7 +369,7 @@ operations = [ ResOperation(rop.FINISH, [ConstInt(42)], None, descr=faildescr) ] - self.cpu.compile_loop([], operations, looptoken) + self.cpu.compile_loop(None, [], operations, looptoken) deadframe = self.cpu.execute_token(looptoken) fail = self.cpu.get_latest_descr(deadframe) assert fail is faildescr @@ -380,7 +380,7 @@ operations = [ ResOperation(rop.FINISH, [], None, descr=faildescr) ] - self.cpu.compile_loop([], operations, looptoken) + self.cpu.compile_loop(None, [], operations, looptoken) deadframe = self.cpu.execute_token(looptoken) fail = self.cpu.get_latest_descr(deadframe) assert fail is faildescr @@ -391,7 +391,7 @@ operations = [ ResOperation(rop.FINISH, [f0], None, descr=faildescr) ] - self.cpu.compile_loop([f0], operations, looptoken) + self.cpu.compile_loop(None, [f0], operations, looptoken) value = longlong.getfloatstorage(-61.25) deadframe = self.cpu.execute_token(looptoken, value) fail = self.cpu.get_latest_descr(deadframe) @@ -403,7 +403,7 @@ operations = [ ResOperation(rop.FINISH, [constfloat(42.5)], None, descr=faildescr) ] - self.cpu.compile_loop([], operations, looptoken) + self.cpu.compile_loop(None, [], operations, looptoken) deadframe = self.cpu.execute_token(looptoken) fail = self.cpu.get_latest_descr(deadframe) assert fail is faildescr @@ -429,7 +429,7 @@ ResOperation(rop.JUMP, [t, z], None, descr=targettoken), ] operations[-2].setfailargs([t, z]) - cpu.compile_loop([x, y], operations, looptoken) + cpu.compile_loop(None, [x, y], operations, looptoken) deadframe = self.cpu.execute_token(looptoken, 0, 10) assert self.cpu.get_int_value(deadframe, 0) == 0 assert self.cpu.get_int_value(deadframe, 1) == 55 @@ -488,7 +488,7 @@ ops[1].setfailargs([v_res]) # looptoken = JitCellToken() - self.cpu.compile_loop([v1, v2], ops, looptoken) + self.cpu.compile_loop(None, [v1, v2], ops, looptoken) for x, y, z in testcases: deadframe = self.cpu.execute_token(looptoken, x, y) fail = self.cpu.get_latest_descr(deadframe) @@ -1238,7 +1238,7 @@ print inputargs for op in operations: print op - self.cpu.compile_loop(inputargs, operations, looptoken) + self.cpu.compile_loop(None, inputargs, operations, looptoken) # deadframe = self.cpu.execute_token(looptoken, *values) fail = self.cpu.get_latest_descr(deadframe) @@ -1305,7 +1305,7 @@ operations[3].setfailargs(inputargs[:]) operations[3].setdescr(faildescr) # - self.cpu.compile_loop(inputargs, operations, looptoken) + self.cpu.compile_loop(None, inputargs, operations, looptoken) # values = [] S = lltype.GcStruct('S') @@ -1366,7 +1366,7 @@ operations[-3].setfailargs(fboxes) operations[-2].setfailargs(fboxes) looptoken = JitCellToken() - self.cpu.compile_loop(fboxes, operations, looptoken) + self.cpu.compile_loop(None, fboxes, operations, looptoken) fboxes2 = [BoxFloat() for i in range(12)] f3 = BoxFloat() @@ -1375,7 +1375,7 @@ ResOperation(rop.JUMP, [f3]+fboxes2[1:], None, descr=targettoken), ] - self.cpu.compile_bridge(faildescr1, fboxes2, bridge, looptoken) + self.cpu.compile_bridge(None, faildescr1, fboxes2, bridge, looptoken) args = [] for i in range(len(fboxes)): @@ -1407,7 +1407,7 @@ finish()""" loop = parse(loopops) looptoken = JitCellToken() - self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + self.cpu.compile_loop(None, loop.inputargs, loop.operations, looptoken) args = [1] args.append(longlong.getfloatstorage(132.25)) args.append(longlong.getfloatstorage(0.75)) @@ -1428,7 +1428,7 @@ ResOperation(rop.FINISH, [], None, descr=faildescr2), ] bridgeops[-2].setfailargs(fboxes[:]) - self.cpu.compile_bridge(loop.operations[-2].getdescr(), fboxes, + self.cpu.compile_bridge(None, loop.operations[-2].getdescr(), fboxes, bridgeops, looptoken) args = [1, longlong.getfloatstorage(132.25), @@ -1463,7 +1463,7 @@ ] operations[1].setfailargs([]) looptoken = JitCellToken() - self.cpu.compile_loop(inputargs, operations, looptoken) + self.cpu.compile_loop(None, inputargs, operations, looptoken) # for value in [-42, 0, 1, 10]: deadframe = self.cpu.execute_token(looptoken, value) @@ -1508,7 +1508,7 @@ ] operations[-2].setfailargs([]) looptoken = JitCellToken() - self.cpu.compile_loop(inputargs, operations, looptoken) + self.cpu.compile_loop(None, inputargs, operations, looptoken) # for test1 in [-65, -42, -11, 0, 1, 10]: if test1 == -42 or combinaison[0] == 'b': @@ -1560,7 +1560,7 @@ ] operations[-2].setfailargs([]) looptoken = JitCellToken() - self.cpu.compile_loop(inputargs, operations, looptoken) + self.cpu.compile_loop(None, inputargs, operations, looptoken) # for test1 in [65, 42, 11, 0, 1]: if test1 == 42 or combinaison[0] == 'b': @@ -1616,7 +1616,7 @@ ] operations[-2].setfailargs([]) looptoken = JitCellToken() - self.cpu.compile_loop(inputargs, operations, looptoken) + self.cpu.compile_loop(None, inputargs, operations, looptoken) # nan = 1e200 * 1e200 nan /= nan @@ -1675,7 +1675,7 @@ descr=faildescr)) looptoken = JitCellToken() # - self.cpu.compile_loop(inputargs, operations, looptoken) + self.cpu.compile_loop(None, inputargs, operations, looptoken) # args = [] for box in inputargs: @@ -1748,7 +1748,7 @@ looptoken = JitCellToken() # Use "set" to unique-ify inputargs unique_testcase_list = list(set(testcase)) - self.cpu.compile_loop(unique_testcase_list, operations, + self.cpu.compile_loop(None, unique_testcase_list, operations, looptoken) args = [box.getfloatstorage() for box in unique_testcase_list] @@ -2065,7 +2065,7 @@ exc_ptr = xptr loop = parse(ops, self.cpu, namespace=locals()) looptoken = JitCellToken() - self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + self.cpu.compile_loop(None, loop.inputargs, loop.operations, looptoken) deadframe = self.cpu.execute_token(looptoken, 1) assert self.cpu.get_ref_value(deadframe, 0) == xptr excvalue = self.cpu.grab_exc_value(deadframe) @@ -2088,7 +2088,7 @@ exc_ptr = yptr loop = parse(ops, self.cpu, namespace=locals()) looptoken = JitCellToken() - self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + self.cpu.compile_loop(None, loop.inputargs, loop.operations, looptoken) deadframe = self.cpu.execute_token(looptoken, 1) assert self.cpu.get_int_value(deadframe, 0) == 1 excvalue = self.cpu.grab_exc_value(deadframe) @@ -2105,7 +2105,7 @@ ''' loop = parse(ops, self.cpu, namespace=locals()) looptoken = JitCellToken() - self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + self.cpu.compile_loop(None, loop.inputargs, loop.operations, looptoken) deadframe = self.cpu.execute_token(looptoken, 1) assert self.cpu.get_int_value(deadframe, 0) == 1 excvalue = self.cpu.grab_exc_value(deadframe) @@ -2140,11 +2140,9 @@ s = lltype.malloc(S) s.tid = value sgcref = lltype.cast_opaque_ptr(llmemory.GCREF, s) - t = lltype.malloc(S) - tgcref = lltype.cast_opaque_ptr(llmemory.GCREF, t) del record[:] self.execute_operation(rop.COND_CALL_GC_WB, - [BoxPtr(sgcref), ConstPtr(tgcref)], + [BoxPtr(sgcref)], 'void', descr=WriteBarrierDescr()) if cond: assert record == [rffi.cast(lltype.Signed, sgcref)] @@ -2179,7 +2177,7 @@ sgcref = lltype.cast_opaque_ptr(llmemory.GCREF, s) del record[:] self.execute_operation(rop.COND_CALL_GC_WB_ARRAY, - [BoxPtr(sgcref), ConstInt(123), BoxPtr(sgcref)], + [BoxPtr(sgcref), ConstInt(123)], 'void', descr=WriteBarrierDescr()) if cond: assert record == [rffi.cast(lltype.Signed, sgcref)] @@ -2244,7 +2242,7 @@ del record[:] box_index = BoxIndexCls((9<<7) + 17) self.execute_operation(rop.COND_CALL_GC_WB_ARRAY, - [BoxPtr(sgcref), box_index, BoxPtr(sgcref)], + [BoxPtr(sgcref), box_index], 'void', descr=WriteBarrierDescr()) if cond in [0, 1]: assert record == [rffi.cast(lltype.Signed, s.data)] @@ -2286,7 +2284,7 @@ 'func_ptr': func_ptr, 'calldescr': calldescr}) looptoken = JitCellToken() - self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + self.cpu.compile_loop(None, loop.inputargs, loop.operations, looptoken) f1 = longlong.getfloatstorage(1.2) f2 = longlong.getfloatstorage(3.4) frame = self.cpu.execute_token(looptoken, 1, 0, 1, 2, 3, 4, 5, f1, f2) @@ -2331,7 +2329,7 @@ ] ops[2].setfailargs([i1, i0]) looptoken = JitCellToken() - self.cpu.compile_loop([i0, i1], ops, looptoken) + self.cpu.compile_loop(None, [i0, i1], ops, looptoken) deadframe = self.cpu.execute_token(looptoken, 20, 0) fail = self.cpu.get_latest_descr(deadframe) assert fail.identifier == 0 @@ -2377,7 +2375,7 @@ ] ops[2].setfailargs([i1, i2, i0]) looptoken = JitCellToken() - self.cpu.compile_loop([i0, i1], ops, looptoken) + self.cpu.compile_loop(None, [i0, i1], ops, looptoken) deadframe = self.cpu.execute_token(looptoken, 20, 0) fail = self.cpu.get_latest_descr(deadframe) assert fail.identifier == 0 @@ -2425,7 +2423,7 @@ ] ops[2].setfailargs([i1, f2, i0]) looptoken = JitCellToken() - self.cpu.compile_loop([i0, i1], ops, looptoken) + self.cpu.compile_loop(None, [i0, i1], ops, looptoken) deadframe = self.cpu.execute_token(looptoken, 20, 0) fail = self.cpu.get_latest_descr(deadframe) assert fail.identifier == 0 @@ -2467,7 +2465,7 @@ ] ops[1].setfailargs([i1, i2]) looptoken = JitCellToken() - self.cpu.compile_loop([i1], ops, looptoken) + self.cpu.compile_loop(None, [i1], ops, looptoken) deadframe = self.cpu.execute_token(looptoken, ord('G')) fail = self.cpu.get_latest_descr(deadframe) assert fail.identifier == 0 @@ -2525,7 +2523,7 @@ ] ops[1].setfailargs([]) looptoken = JitCellToken() - self.cpu.compile_loop([i0, i1, i2, i3], ops, looptoken) + self.cpu.compile_loop(None, [i0, i1, i2, i3], ops, looptoken) args = [rffi.cast(lltype.Signed, raw), 2, 4, @@ -2582,7 +2580,7 @@ ResOperation(rop.FINISH, [i3], None, descr=BasicFinalDescr(0)) ] looptoken = JitCellToken() - self.cpu.compile_loop([i1, i2], ops, looptoken) + self.cpu.compile_loop(None, [i1, i2], ops, looptoken) buffer = lltype.malloc(rffi.CCHARP.TO, buflen, flavor='raw') args = [buflen, rffi.cast(lltype.Signed, buffer)] @@ -2652,7 +2650,7 @@ ] ops[1].setfailargs([]) looptoken = JitCellToken() - self.cpu.compile_loop([], ops, looptoken) + self.cpu.compile_loop(None, [], ops, looptoken) deadframe = self.cpu.execute_token(looptoken) fail = self.cpu.get_latest_descr(deadframe) @@ -2792,7 +2790,7 @@ ops.insert(-1, ResOperation(rop.SAME_AS, [b1], b1.clonebox())) looptoken = JitCellToken() - self.cpu.compile_loop(argboxes, ops, looptoken) + self.cpu.compile_loop(None, argboxes, ops, looptoken) # seen = [] deadframe = self.cpu.execute_token(looptoken, *argvalues_normal) @@ -2817,7 +2815,7 @@ ] ops[0].setfailargs([i1]) looptoken = JitCellToken() - self.cpu.compile_loop([i0, i1], ops, looptoken) + self.cpu.compile_loop(None, [i0, i1], ops, looptoken) deadframe = self.cpu.execute_token(looptoken, -42, 9) fail = self.cpu.get_latest_descr(deadframe) @@ -2844,7 +2842,7 @@ ResOperation(rop.FINISH, [i2], None, descr=BasicFinalDescr(3)) ] ops[0].setfailargs([]) - self.cpu.compile_bridge(faildescr, [i2], ops, looptoken) + self.cpu.compile_bridge(None, faildescr, [i2], ops, looptoken) deadframe = self.cpu.execute_token(looptoken, -42, 9) fail = self.cpu.get_latest_descr(deadframe) @@ -2877,7 +2875,7 @@ ] ops[0].setfailargs([]) looptoken = JitCellToken() - self.cpu.compile_loop([i0], ops, looptoken) + self.cpu.compile_loop(None, [i0], ops, looptoken) # mark as failing self.cpu.invalidate_loop(looptoken) # attach a bridge @@ -2885,7 +2883,7 @@ ops2 = [ ResOperation(rop.JUMP, [ConstInt(333)], None, descr=labeldescr), ] - self.cpu.compile_bridge(faildescr, [], ops2, looptoken) + self.cpu.compile_bridge(None, faildescr, [], ops2, looptoken) # run: must not be caught in an infinite loop deadframe = self.cpu.execute_token(looptoken, 16) fail = self.cpu.get_latest_descr(deadframe) @@ -3093,7 +3091,7 @@ looptoken.outermost_jitdriver_sd = FakeJitDriverSD() finish_descr = loop.operations[-1].getdescr() self.cpu.done_with_this_frame_descr_int = BasicFinalDescr() - self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + self.cpu.compile_loop(None, loop.inputargs, loop.operations, looptoken) ARGS = [lltype.Signed] * 10 RES = lltype.Signed FakeJitDriverSD.portal_calldescr = self.cpu.calldescrof( @@ -3111,7 +3109,7 @@ ''' loop = parse(ops, namespace=locals()) othertoken = JitCellToken() - self.cpu.compile_loop(loop.inputargs, loop.operations, othertoken) + self.cpu.compile_loop(None, loop.inputargs, loop.operations, othertoken) args = [i+1 for i in range(10)] deadframe = self.cpu.execute_token(othertoken, *args) assert self.cpu.get_int_value(deadframe, 0) == 13 @@ -3121,7 +3119,7 @@ del called[:] self.cpu.done_with_this_frame_descr_int = finish_descr othertoken = JitCellToken() - self.cpu.compile_loop(loop.inputargs, loop.operations, othertoken) + self.cpu.compile_loop(None, loop.inputargs, loop.operations, othertoken) args = [i+1 for i in range(10)] deadframe = self.cpu.execute_token(othertoken, *args) assert self.cpu.get_int_value(deadframe, 0) == 97 @@ -3159,7 +3157,7 @@ loop = parse(ops) looptoken = JitCellToken() looptoken.outermost_jitdriver_sd = FakeJitDriverSD() - self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + self.cpu.compile_loop(None, loop.inputargs, loop.operations, looptoken) ARGS = [lltype.Signed] * 10 RES = lltype.Signed FakeJitDriverSD.portal_calldescr = self.cpu.calldescrof( @@ -3173,7 +3171,7 @@ ''' loop = parse(ops, namespace=locals()) othertoken = JitCellToken() - self.cpu.compile_loop(loop.inputargs, loop.operations, othertoken) + self.cpu.compile_loop(None, loop.inputargs, loop.operations, othertoken) deadframe = self.cpu.execute_token(othertoken, sys.maxint - 1) assert self.cpu.get_int_value(deadframe, 0) == 3 @@ -3211,7 +3209,7 @@ looptoken = JitCellToken() looptoken.outermost_jitdriver_sd = FakeJitDriverSD() self.cpu.done_with_this_frame_descr_float = BasicFinalDescr() - self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + self.cpu.compile_loop(None, loop.inputargs, loop.operations, looptoken) args = [longlong.getfloatstorage(1.2), longlong.getfloatstorage(2.3)] deadframe = self.cpu.execute_token(looptoken, *args) @@ -3225,7 +3223,7 @@ ''' loop = parse(ops, namespace=locals()) othertoken = JitCellToken() - self.cpu.compile_loop(loop.inputargs, loop.operations, othertoken) + self.cpu.compile_loop(None, loop.inputargs, loop.operations, othertoken) args = [longlong.getfloatstorage(1.2), longlong.getfloatstorage(3.2)] deadframe = self.cpu.execute_token(othertoken, *args) @@ -3237,7 +3235,7 @@ del called[:] self.cpu.done_with_this_frame_descr_float = finish_descr othertoken = JitCellToken() - self.cpu.compile_loop(loop.inputargs, loop.operations, othertoken) + self.cpu.compile_loop(None, loop.inputargs, loop.operations, othertoken) args = [longlong.getfloatstorage(1.2), longlong.getfloatstorage(4.2)] deadframe = self.cpu.execute_token(othertoken, *args) @@ -3300,7 +3298,7 @@ looptoken = JitCellToken() looptoken.outermost_jitdriver_sd = FakeJitDriverSD() self.cpu.done_with_this_frame_descr_float = BasicFinalDescr() - self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + self.cpu.compile_loop(None, loop.inputargs, loop.operations, looptoken) finish_descr = loop.operations[-1].getdescr() args = [longlong.getfloatstorage(1.25), longlong.getfloatstorage(2.35)] @@ -3317,7 +3315,7 @@ ''' loop = parse(ops, namespace=locals()) othertoken = JitCellToken() - self.cpu.compile_loop(loop.inputargs, loop.operations, othertoken) + self.cpu.compile_loop(None, loop.inputargs, loop.operations, othertoken) # normal call_assembler: goes to looptoken args = [longlong.getfloatstorage(1.25), @@ -3336,7 +3334,7 @@ loop2 = parse(ops) looptoken2 = JitCellToken() looptoken2.outermost_jitdriver_sd = FakeJitDriverSD() - self.cpu.compile_loop(loop2.inputargs, loop2.operations, looptoken2) + self.cpu.compile_loop(None, loop2.inputargs, loop2.operations, looptoken2) finish_descr2 = loop2.operations[-1].getdescr() # install it @@ -3696,7 +3694,7 @@ ] inputargs = [i0] looptoken = JitCellToken() - self.cpu.compile_loop(inputargs, operations, looptoken) + self.cpu.compile_loop(None, inputargs, operations, looptoken) # overflowing value: deadframe = self.cpu.execute_token(looptoken, sys.maxint // 4 + 1) fail = self.cpu.get_latest_descr(deadframe) @@ -3749,7 +3747,7 @@ operations[3].setfailargs([i1]) operations[6].setfailargs([i1]) - self.cpu.compile_loop(inputargs, operations, looptoken) + self.cpu.compile_loop(None, inputargs, operations, looptoken) deadframe = self.cpu.execute_token(looptoken, 2) fail = self.cpu.get_latest_descr(deadframe) assert fail.identifier == 2 @@ -3761,7 +3759,7 @@ ResOperation(rop.INT_SUB, [i0, ConstInt(20)], i2), ResOperation(rop.JUMP, [i2], None, descr=targettoken2), ] - self.cpu.compile_bridge(faildescr, inputargs2, operations2, looptoken) + self.cpu.compile_bridge(None, faildescr, inputargs2, operations2, looptoken) deadframe = self.cpu.execute_token(looptoken, 2) fail = self.cpu.get_latest_descr(deadframe) @@ -3778,7 +3776,7 @@ descr = BasicFinalDescr() loop = parse(ops, self.cpu, namespace=locals()) looptoken = JitCellToken() - self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + self.cpu.compile_loop(None, loop.inputargs, loop.operations, looptoken) for inp, outp in [(2,2), (-3, 0)]: deadframe = self.cpu.execute_token(looptoken, inp) assert outp == self.cpu.get_int_value(deadframe, 0) @@ -3807,8 +3805,8 @@ bridge = parse(bridge_ops, self.cpu, namespace=locals()) looptoken = JitCellToken() self.cpu.assembler.set_debug(False) - info = self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) - bridge_info = self.cpu.compile_bridge(faildescr, bridge.inputargs, + info = self.cpu.compile_loop(None, loop.inputargs, loop.operations, looptoken) + bridge_info = self.cpu.compile_bridge(None, faildescr, bridge.inputargs, bridge.operations, looptoken) self.cpu.assembler.set_debug(True) # always on untranslated @@ -3852,7 +3850,7 @@ ResOperation(rop.FINISH, [i0], None, descr=BasicFinalDescr(1234)), ] operations[1].setfailargs([i0]) - self.cpu.compile_loop(inputargs, operations, looptoken1) + self.cpu.compile_loop(None, inputargs, operations, looptoken1) def func(a, b, c, d, e, f, g, h, i): assert a + 2 == b @@ -3906,14 +3904,14 @@ ResOperation(rop.JUMP, [i19], None, descr=targettoken1), ] operations2[-2].setfailargs([]) - self.cpu.compile_bridge(faildescr1, inputargs, operations2, looptoken1) + self.cpu.compile_bridge(None, faildescr1, inputargs, operations2, looptoken1) looptoken2 = JitCellToken() inputargs = [BoxInt()] operations3 = [ ResOperation(rop.JUMP, [ConstInt(0)], None, descr=targettoken1), ] - self.cpu.compile_loop(inputargs, operations3, looptoken2) + self.cpu.compile_loop(None, inputargs, operations3, looptoken2) deadframe = self.cpu.execute_token(looptoken2, -9) fail = self.cpu.get_latest_descr(deadframe) @@ -3930,11 +3928,11 @@ operations[0].setfailargs([]) looptoken = JitCellToken() inputargs = [t_box] - self.cpu.compile_loop(inputargs, operations, looptoken) + self.cpu.compile_loop(None, inputargs, operations, looptoken) operations = [ ResOperation(rop.FINISH, [], None, descr=BasicFinalDescr(99)) ] - self.cpu.compile_bridge(faildescr, [], operations, looptoken) + self.cpu.compile_bridge(None, faildescr, [], operations, looptoken) deadframe = self.cpu.execute_token(looptoken, null_box.getref_base()) fail = self.cpu.get_latest_descr(deadframe) assert fail.identifier == 99 @@ -3962,7 +3960,7 @@ # loop = parse(ops, self.cpu, namespace=locals()) looptoken = JitCellToken() - self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + self.cpu.compile_loop(None, loop.inputargs, loop.operations, looptoken) deadframe = self.cpu.execute_token(looptoken, rffi.cast(lltype.Signed, p), 16) result = self.cpu.get_int_value(deadframe, 0) @@ -3992,7 +3990,7 @@ # loop = parse(ops, self.cpu, namespace=locals()) looptoken = JitCellToken() - self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + self.cpu.compile_loop(None, loop.inputargs, loop.operations, looptoken) deadframe = self.cpu.execute_token(looptoken, rffi.cast(lltype.Signed, p), 16) result = self.cpu.get_float_value(deadframe, 0) @@ -4022,7 +4020,7 @@ # loop = parse(ops, self.cpu, namespace=locals()) looptoken = JitCellToken() - self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + self.cpu.compile_loop(None, loop.inputargs, loop.operations, looptoken) deadframe = self.cpu.execute_token(looptoken, rffi.cast(lltype.Signed, p), 16) result = self.cpu.get_int_value(deadframe, 0) @@ -4054,7 +4052,7 @@ p[i] = '\xDD' loop = parse(ops, self.cpu, namespace=locals()) looptoken = JitCellToken() - self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + self.cpu.compile_loop(None, loop.inputargs, loop.operations, looptoken) self.cpu.execute_token(looptoken, rffi.cast(lltype.Signed, p), 16, value) result = rawstorage.raw_storage_getitem(T, p, 16) @@ -4086,7 +4084,7 @@ p[i] = '\xDD' loop = parse(ops, self.cpu, namespace=locals()) looptoken = JitCellToken() - self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + self.cpu.compile_loop(None, loop.inputargs, loop.operations, looptoken) self.cpu.execute_token(looptoken, rffi.cast(lltype.Signed, p), 16, longlong.getfloatstorage(value)) @@ -4120,7 +4118,7 @@ p[i] = '\xDD' loop = parse(ops, self.cpu, namespace=locals()) looptoken = JitCellToken() - self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + self.cpu.compile_loop(None, loop.inputargs, loop.operations, looptoken) self.cpu.execute_token(looptoken, rffi.cast(lltype.Signed, p), 16, longlong.singlefloat2int(value)) @@ -4155,7 +4153,7 @@ ] ops[2].setfailargs([i2]) looptoken = JitCellToken() - self.cpu.compile_loop([i0, i1], ops, looptoken) + self.cpu.compile_loop(None, [i0, i1], ops, looptoken) deadframe = self.cpu.execute_token(looptoken, 20, 0) fail = self.cpu.get_latest_descr(deadframe) assert fail.identifier == 23 @@ -4189,7 +4187,7 @@ finish(i1, descr=finaldescr) """, namespace={'finaldescr': finaldescr, 'calldescr2': calldescr2, 'guarddescr': guarddescr, 'func2_ptr': func2_ptr}) - self.cpu.compile_bridge(faildescr, bridge.inputargs, From noreply at buildbot.pypy.org Tue Aug 20 09:13:39 2013 From: noreply at buildbot.pypy.org (fijal) Date: Tue, 20 Aug 2013 09:13:39 +0200 (CEST) Subject: [pypy-commit] pypy default: fix whatsnew Message-ID: <20130820071339.8A64D1C02B1@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r66249:8e21ac4a4e52 Date: 2013-08-20 09:12 +0200 http://bitbucket.org/pypy/pypy/changeset/8e21ac4a4e52/ Log: fix whatsnew diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -74,3 +74,4 @@ .. branch: dotviewer-linewidth .. branch: reflex-support .. branch: numpypy-inplace-op +.. branch: rewritten-loop-logging From noreply at buildbot.pypy.org Tue Aug 20 10:39:35 2013 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 20 Aug 2013 10:39:35 +0200 (CEST) Subject: [pypy-commit] pypy default: Optimize int_xor(_, 0) Message-ID: <20130820083935.087F11C02B1@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r66250:0a2651680d0a Date: 2013-08-20 10:31 +0200 http://bitbucket.org/pypy/pypy/changeset/0a2651680d0a/ Log: Optimize int_xor(_, 0) diff --git a/rpython/jit/metainterp/optimizeopt/rewrite.py b/rpython/jit/metainterp/optimizeopt/rewrite.py --- a/rpython/jit/metainterp/optimizeopt/rewrite.py +++ b/rpython/jit/metainterp/optimizeopt/rewrite.py @@ -178,6 +178,17 @@ else: self.emit_operation(op) + def optimize_INT_XOR(self, op): + v1 = self.getvalue(op.getarg(0)) + v2 = self.getvalue(op.getarg(1)) + + if v1.is_constant() and v1.box.getint() == 0: + self.make_equal_to(op.result, v2) + elif v2.is_constant() and v2.box.getint() == 0: + self.make_equal_to(op.result, v1) + else: + self.emit_operation(op) + def optimize_FLOAT_MUL(self, op): arg1 = op.getarg(0) arg2 = op.getarg(1) diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py @@ -3263,6 +3263,20 @@ """ self.optimize_loop(ops, expected) + def test_fold_partially_constant_xor(self): + ops = """ + [i0, i1] + i2 = int_xor(i0, 23) + i3 = int_xor(i1, 0) + jump(i2, i3) + """ + expected = """ + [i0, i1] + i2 = int_xor(i0, 23) + jump(i2, i1) + """ + self.optimize_loop(ops, expected) + # ---------- def test_residual_call_does_not_invalidate_caches(self): From noreply at buildbot.pypy.org Tue Aug 20 10:39:36 2013 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 20 Aug 2013 10:39:36 +0200 (CEST) Subject: [pypy-commit] pypy default: Fix the XXX Message-ID: <20130820083936.E490E1C02B1@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r66251:ded906e02c44 Date: 2013-08-20 10:38 +0200 http://bitbucket.org/pypy/pypy/changeset/ded906e02c44/ Log: Fix the XXX diff --git a/rpython/rlib/rsre/rsre_char.py b/rpython/rlib/rsre/rsre_char.py --- a/rpython/rlib/rsre/rsre_char.py +++ b/rpython/rlib/rsre/rsre_char.py @@ -115,10 +115,7 @@ for function, negate in category_dispatch_unroll: if category_code == i: result = function(char_code) - if negate: - return not result # XXX this might lead to a guard - else: - return result + return result ^ negate i = i + 1 else: return False @@ -160,9 +157,7 @@ ppos += 1 else: return False - if negated: - return not result - return result + return result ^ negated def set_literal(pat, index, char_code): # From noreply at buildbot.pypy.org Tue Aug 20 13:49:21 2013 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 20 Aug 2013 13:49:21 +0200 (CEST) Subject: [pypy-commit] pypy default: A first version of import_from_mixin(), which really copies the Message-ID: <20130820114921.9A6CE1C02B1@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r66252:51c5cb0ddc92 Date: 2013-08-20 13:48 +0200 http://bitbucket.org/pypy/pypy/changeset/51c5cb0ddc92/ Log: A first version of import_from_mixin(), which really copies the content of the source class into the target, completely before translation. diff --git a/rpython/annotator/test/test_annrpython.py b/rpython/annotator/test/test_annrpython.py --- a/rpython/annotator/test/test_annrpython.py +++ b/rpython/annotator/test/test_annrpython.py @@ -2539,6 +2539,27 @@ s = a.build_types(f, []) assert s.const == 2 + def test_import_from_mixin(self): + class M(object): + def f(self): + return self.a + class I(object): + objectmodel.import_from_mixin(M) + def __init__(self, i): + self.a = i + class S(object): + objectmodel.import_from_mixin(M) + def __init__(self, s): + self.a = s + def f(n): + return (I(n).f(), S("a" * n).f()) + + assert f(3) == (3, "aaa") + a = self.RPythonAnnotator() + s = a.build_types(f, [int]) + assert isinstance(s.items[0], annmodel.SomeInteger) + assert isinstance(s.items[1], annmodel.SomeString) + def test___class___attribute(self): class Base(object): pass class A(Base): pass diff --git a/rpython/rlib/objectmodel.py b/rpython/rlib/objectmodel.py --- a/rpython/rlib/objectmodel.py +++ b/rpython/rlib/objectmodel.py @@ -9,7 +9,7 @@ import types import math import inspect -from rpython.tool.sourcetools import rpython_wrapper +from rpython.tool.sourcetools import rpython_wrapper, func_with_new_name # specialize is a decorator factory for attaching _annspecialcase_ # attributes to functions: for example @@ -720,3 +720,32 @@ self.dic = dic self.key = key self.hash = hash + +# ____________________________________________________________ + +def import_from_mixin(M): + flatten = {} + for base in inspect.getmro(M): + for key, value in base.__dict__.items(): + if key in ('__module__', '__name__', '__dict__', + '__doc__', '__weakref__'): + continue + if key in flatten: + continue + if isinstance(value, types.FunctionType): + value = func_with_new_name(value, value.__name__) + elif isinstance(value, staticmethod): + func = value.__get__(42) + func = func_with_new_name(func, func.__name__) + value = staticmethod(func) + elif isinstance(value, classmethod): + raise AssertionError("classmethods not supported " + "in 'import_from_mixin'") + flatten[key] = value + # + target = sys._getframe(1).f_locals + for key, value in flatten.items(): + if key in target: + raise Exception("import_from_mixin: would overwrite the value " + "already defined locally for %r" % (key,)) + target[key] = value diff --git a/rpython/rlib/test/test_objectmodel.py b/rpython/rlib/test/test_objectmodel.py --- a/rpython/rlib/test/test_objectmodel.py +++ b/rpython/rlib/test/test_objectmodel.py @@ -548,3 +548,57 @@ r = interpret(f, [29]) assert r == 1 + +def test_import_from_mixin(): + class M: # old-style + def f(self): pass + class A: # old-style + import_from_mixin(M) + assert A.f.im_func is not M.f.im_func + + class M(object): + def f(self): pass + class A: # old-style + import_from_mixin(M) + assert A.f.im_func is not M.f.im_func + + class M: # old-style + def f(self): pass + class A(object): + import_from_mixin(M) + assert A.f.im_func is not M.f.im_func + + class M(object): + def f(self): pass + class A(object): + import_from_mixin(M) + assert A.f.im_func is not M.f.im_func + + class MBase(object): + a = 42; b = 43; c = 1000 + def f(self): return "hi" + def g(self): return self.c - 1 + class M(MBase): + a = 84 + def f(self): return "there" + class A(object): + import_from_mixin(M) + c = 88 + assert A.f.im_func is not M.f.im_func + assert A.f.im_func is not MBase.f.im_func + assert A.g.im_func is not MBase.g.im_func + assert A().f() == "there" + assert A.a == 84 + assert A.b == 43 + assert A.c == 88 + assert A().g() == 87 + + try: + class B(object): + a = 63 + import_from_mixin(M) + except Exception, e: + assert ("would overwrite the value already defined locally for 'a'" + in str(e)) + else: + raise AssertionError("failed to detect overwritten attribute") From noreply at buildbot.pypy.org Tue Aug 20 13:54:11 2013 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 20 Aug 2013 13:54:11 +0200 (CEST) Subject: [pypy-commit] pypy default: List of dependencies on the SLES11 platform (thanks Riccardo on pypy-dev) Message-ID: <20130820115411.28FDF1C02B1@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r66253:39b9309d3050 Date: 2013-08-20 13:53 +0200 http://bitbucket.org/pypy/pypy/changeset/39b9309d3050/ Log: List of dependencies on the SLES11 platform (thanks Riccardo on pypy-dev) diff --git a/pypy/doc/getting-started-python.rst b/pypy/doc/getting-started-python.rst --- a/pypy/doc/getting-started-python.rst +++ b/pypy/doc/getting-started-python.rst @@ -57,6 +57,12 @@ zlib-devel bzip2-devel ncurses-devel expat-devel \ openssl-devel gc-devel python-sphinx python-greenlet + On SLES11: + + $ sudo zypper install gcc make python-devel pkg-config \ + zlib-devel libopenssl-devel libbz2-devel sqlite3-devel \ + libexpat-devel libffi-devel python-curses + The above command lines are split with continuation characters, giving the necessary dependencies first, then the optional ones. * ``pkg-config`` (to help us locate libffi files) From noreply at buildbot.pypy.org Tue Aug 20 14:12:23 2013 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 20 Aug 2013 14:12:23 +0200 (CEST) Subject: [pypy-commit] pypy default: Add docstring, and add a "special_methods" parameter, which seems safer Message-ID: <20130820121223.34A1D1C02B1@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r66254:5b980423ca91 Date: 2013-08-20 14:11 +0200 http://bitbucket.org/pypy/pypy/changeset/5b980423ca91/ Log: Add docstring, and add a "special_methods" parameter, which seems safer than filtering out a few random names. diff --git a/rpython/rlib/objectmodel.py b/rpython/rlib/objectmodel.py --- a/rpython/rlib/objectmodel.py +++ b/rpython/rlib/objectmodel.py @@ -723,13 +723,24 @@ # ____________________________________________________________ -def import_from_mixin(M): +def import_from_mixin(M, special_methods=['__init__', '__del__']): + """Copy all methods and class attributes from the class M into + the current scope. Should be called when defining a class body. + Function and staticmethod objects are duplicated, which means + that annotation will not consider them as identical to another + copy in another unrelated class. + + By default, "special" methods and class attributes, with a name + like "__xxx__", are not copied unless they are "__init__" or + "__del__". The list can be changed with the optional second + argument. + """ flatten = {} for base in inspect.getmro(M): for key, value in base.__dict__.items(): - if key in ('__module__', '__name__', '__dict__', - '__doc__', '__weakref__'): - continue + if key.startswith('__') and key.endswith('__'): + if key not in special_methods: + continue if key in flatten: continue if isinstance(value, types.FunctionType): diff --git a/rpython/rlib/test/test_objectmodel.py b/rpython/rlib/test/test_objectmodel.py --- a/rpython/rlib/test/test_objectmodel.py +++ b/rpython/rlib/test/test_objectmodel.py @@ -602,3 +602,13 @@ in str(e)) else: raise AssertionError("failed to detect overwritten attribute") + + class M(object): + def __str__(self): + return "m!" + class A(object): + import_from_mixin(M) + class B(object): + import_from_mixin(M, special_methods=['__str__']) + assert str(A()).startswith('<') + assert str(B()) == "m!" From noreply at buildbot.pypy.org Tue Aug 20 14:16:23 2013 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 20 Aug 2013 14:16:23 +0200 (CEST) Subject: [pypy-commit] pypy default: Test and fix: 'object' has an empty __init__ which we should not copy. Message-ID: <20130820121623.63E291C02B1@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r66255:ee2311d4ba26 Date: 2013-08-20 14:15 +0200 http://bitbucket.org/pypy/pypy/changeset/ee2311d4ba26/ Log: Test and fix: 'object' has an empty __init__ which we should not copy. diff --git a/pypy/interpreter/buffer.py b/pypy/interpreter/buffer.py --- a/pypy/interpreter/buffer.py +++ b/pypy/interpreter/buffer.py @@ -19,7 +19,7 @@ from pypy.interpreter.typedef import TypeDef from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.error import OperationError -from rpython.rlib.objectmodel import compute_hash +from rpython.rlib.objectmodel import compute_hash, import_from_mixin from rpython.rlib.rstring import StringBuilder @@ -272,8 +272,6 @@ # ____________________________________________________________ class SubBufferMixin(object): - _mixin_ = True - def __init__(self, buffer, offset, size): self.buffer = buffer self.offset = offset @@ -297,10 +295,11 @@ # out of bounds return self.buffer.getslice(self.offset + start, self.offset + stop, step, size) -class SubBuffer(SubBufferMixin, Buffer): - pass +class SubBuffer(Buffer): + import_from_mixin(SubBufferMixin) -class RWSubBuffer(SubBufferMixin, RWBuffer): +class RWSubBuffer(RWBuffer): + import_from_mixin(SubBufferMixin) def setitem(self, index, char): self.buffer.setitem(self.offset + index, char) diff --git a/pypy/objspace/descroperation.py b/pypy/objspace/descroperation.py --- a/pypy/objspace/descroperation.py +++ b/pypy/objspace/descroperation.py @@ -121,7 +121,7 @@ greens=['w_type'], reds='auto') class DescrOperation(object): - _mixin_ = True + # This is meant to be a *mixin*. def is_data_descr(space, w_obj): return space.lookup(w_obj, '__set__') is not None diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py --- a/pypy/objspace/std/objspace.py +++ b/pypy/objspace/std/objspace.py @@ -10,7 +10,7 @@ from rpython.rlib.objectmodel import instantiate, specialize, is_annotation_constant from rpython.rlib.debug import make_sure_not_resized from rpython.rlib.rarithmetic import base_int, widen, is_valid_int -from rpython.rlib.objectmodel import we_are_translated +from rpython.rlib.objectmodel import we_are_translated, import_from_mixin from rpython.rlib import jit # Object imports @@ -37,9 +37,10 @@ from pypy.objspace.std.stringtype import wrapstr from pypy.objspace.std.unicodetype import wrapunicode -class StdObjSpace(ObjSpace, DescrOperation): +class StdObjSpace(ObjSpace): """The standard object space, implementing a general-purpose object library in Restricted Python.""" + import_from_mixin(DescrOperation) def initialize(self): "NOT_RPYTHON: only for initializing the space." diff --git a/rpython/rlib/objectmodel.py b/rpython/rlib/objectmodel.py --- a/rpython/rlib/objectmodel.py +++ b/rpython/rlib/objectmodel.py @@ -737,6 +737,8 @@ """ flatten = {} for base in inspect.getmro(M): + if base is object: + continue for key, value in base.__dict__.items(): if key.startswith('__') and key.endswith('__'): if key not in special_methods: diff --git a/rpython/rlib/test/test_objectmodel.py b/rpython/rlib/test/test_objectmodel.py --- a/rpython/rlib/test/test_objectmodel.py +++ b/rpython/rlib/test/test_objectmodel.py @@ -612,3 +612,12 @@ import_from_mixin(M, special_methods=['__str__']) assert str(A()).startswith('<') assert str(B()) == "m!" + + class M(object): + pass + class A(object): + def __init__(self): + self.foo = 42 + class B(A): + import_from_mixin(M) + assert B().foo == 42 From noreply at buildbot.pypy.org Tue Aug 20 14:21:25 2013 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 20 Aug 2013 14:21:25 +0200 (CEST) Subject: [pypy-commit] pypy default: Backed out changeset ee2311d4ba26 Message-ID: <20130820122125.788AE1C02B1@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r66256:fb08a7c4671e Date: 2013-08-20 14:20 +0200 http://bitbucket.org/pypy/pypy/changeset/fb08a7c4671e/ Log: Backed out changeset ee2311d4ba26 diff --git a/pypy/interpreter/buffer.py b/pypy/interpreter/buffer.py --- a/pypy/interpreter/buffer.py +++ b/pypy/interpreter/buffer.py @@ -19,7 +19,7 @@ from pypy.interpreter.typedef import TypeDef from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.error import OperationError -from rpython.rlib.objectmodel import compute_hash, import_from_mixin +from rpython.rlib.objectmodel import compute_hash from rpython.rlib.rstring import StringBuilder @@ -272,6 +272,8 @@ # ____________________________________________________________ class SubBufferMixin(object): + _mixin_ = True + def __init__(self, buffer, offset, size): self.buffer = buffer self.offset = offset @@ -295,11 +297,10 @@ # out of bounds return self.buffer.getslice(self.offset + start, self.offset + stop, step, size) -class SubBuffer(Buffer): - import_from_mixin(SubBufferMixin) +class SubBuffer(SubBufferMixin, Buffer): + pass -class RWSubBuffer(RWBuffer): - import_from_mixin(SubBufferMixin) +class RWSubBuffer(SubBufferMixin, RWBuffer): def setitem(self, index, char): self.buffer.setitem(self.offset + index, char) diff --git a/pypy/objspace/descroperation.py b/pypy/objspace/descroperation.py --- a/pypy/objspace/descroperation.py +++ b/pypy/objspace/descroperation.py @@ -121,7 +121,7 @@ greens=['w_type'], reds='auto') class DescrOperation(object): - # This is meant to be a *mixin*. + _mixin_ = True def is_data_descr(space, w_obj): return space.lookup(w_obj, '__set__') is not None diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py --- a/pypy/objspace/std/objspace.py +++ b/pypy/objspace/std/objspace.py @@ -10,7 +10,7 @@ from rpython.rlib.objectmodel import instantiate, specialize, is_annotation_constant from rpython.rlib.debug import make_sure_not_resized from rpython.rlib.rarithmetic import base_int, widen, is_valid_int -from rpython.rlib.objectmodel import we_are_translated, import_from_mixin +from rpython.rlib.objectmodel import we_are_translated from rpython.rlib import jit # Object imports @@ -37,10 +37,9 @@ from pypy.objspace.std.stringtype import wrapstr from pypy.objspace.std.unicodetype import wrapunicode -class StdObjSpace(ObjSpace): +class StdObjSpace(ObjSpace, DescrOperation): """The standard object space, implementing a general-purpose object library in Restricted Python.""" - import_from_mixin(DescrOperation) def initialize(self): "NOT_RPYTHON: only for initializing the space." diff --git a/rpython/rlib/objectmodel.py b/rpython/rlib/objectmodel.py --- a/rpython/rlib/objectmodel.py +++ b/rpython/rlib/objectmodel.py @@ -737,8 +737,6 @@ """ flatten = {} for base in inspect.getmro(M): - if base is object: - continue for key, value in base.__dict__.items(): if key.startswith('__') and key.endswith('__'): if key not in special_methods: diff --git a/rpython/rlib/test/test_objectmodel.py b/rpython/rlib/test/test_objectmodel.py --- a/rpython/rlib/test/test_objectmodel.py +++ b/rpython/rlib/test/test_objectmodel.py @@ -612,12 +612,3 @@ import_from_mixin(M, special_methods=['__str__']) assert str(A()).startswith('<') assert str(B()) == "m!" - - class M(object): - pass - class A(object): - def __init__(self): - self.foo = 42 - class B(A): - import_from_mixin(M) - assert B().foo == 42 From noreply at buildbot.pypy.org Tue Aug 20 14:21:57 2013 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 20 Aug 2013 14:21:57 +0200 (CEST) Subject: [pypy-commit] pypy default: Test and fix: 'object' has an empty __init__ which we should not copy. Message-ID: <20130820122157.A9B631C02B1@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r66257:93521d0a62cf Date: 2013-08-20 14:21 +0200 http://bitbucket.org/pypy/pypy/changeset/93521d0a62cf/ Log: Test and fix: 'object' has an empty __init__ which we should not copy. (trying again) diff --git a/rpython/rlib/objectmodel.py b/rpython/rlib/objectmodel.py --- a/rpython/rlib/objectmodel.py +++ b/rpython/rlib/objectmodel.py @@ -737,6 +737,8 @@ """ flatten = {} for base in inspect.getmro(M): + if base is object: + continue for key, value in base.__dict__.items(): if key.startswith('__') and key.endswith('__'): if key not in special_methods: diff --git a/rpython/rlib/test/test_objectmodel.py b/rpython/rlib/test/test_objectmodel.py --- a/rpython/rlib/test/test_objectmodel.py +++ b/rpython/rlib/test/test_objectmodel.py @@ -612,3 +612,12 @@ import_from_mixin(M, special_methods=['__str__']) assert str(A()).startswith('<') assert str(B()) == "m!" + + class M(object): + pass + class A(object): + def __init__(self): + self.foo = 42 + class B(A): + import_from_mixin(M) + assert B().foo == 42 From noreply at buildbot.pypy.org Tue Aug 20 14:24:41 2013 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 20 Aug 2013 14:24:41 +0200 (CEST) Subject: [pypy-commit] pypy default: Example of a simple case for import_from_mixin(). Message-ID: <20130820122441.06C581C02B1@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r66258:75e2865eba64 Date: 2013-08-20 14:24 +0200 http://bitbucket.org/pypy/pypy/changeset/75e2865eba64/ Log: Example of a simple case for import_from_mixin(). diff --git a/pypy/interpreter/buffer.py b/pypy/interpreter/buffer.py --- a/pypy/interpreter/buffer.py +++ b/pypy/interpreter/buffer.py @@ -19,7 +19,7 @@ from pypy.interpreter.typedef import TypeDef from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.error import OperationError -from rpython.rlib.objectmodel import compute_hash +from rpython.rlib.objectmodel import compute_hash, import_from_mixin from rpython.rlib.rstring import StringBuilder @@ -272,8 +272,6 @@ # ____________________________________________________________ class SubBufferMixin(object): - _mixin_ = True - def __init__(self, buffer, offset, size): self.buffer = buffer self.offset = offset @@ -297,10 +295,11 @@ # out of bounds return self.buffer.getslice(self.offset + start, self.offset + stop, step, size) -class SubBuffer(SubBufferMixin, Buffer): - pass +class SubBuffer(Buffer): + import_from_mixin(SubBufferMixin) -class RWSubBuffer(SubBufferMixin, RWBuffer): +class RWSubBuffer(RWBuffer): + import_from_mixin(SubBufferMixin) def setitem(self, index, char): self.buffer.setitem(self.offset + index, char) From noreply at buildbot.pypy.org Tue Aug 20 14:41:44 2013 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 20 Aug 2013 14:41:44 +0200 (CEST) Subject: [pypy-commit] pypy default: Change the _mixin_ class DescrOperation into an import_from_mixin() class. Message-ID: <20130820124144.4880D1C02B1@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r66259:ae87826527de Date: 2013-08-20 14:41 +0200 http://bitbucket.org/pypy/pypy/changeset/ae87826527de/ Log: Change the _mixin_ class DescrOperation into an import_from_mixin() class. There are a couple of subtle possible differences (so I'll run all tests now) diff --git a/pypy/objspace/descroperation.py b/pypy/objspace/descroperation.py --- a/pypy/objspace/descroperation.py +++ b/pypy/objspace/descroperation.py @@ -121,7 +121,7 @@ greens=['w_type'], reds='auto') class DescrOperation(object): - _mixin_ = True + # This is meant to be a *mixin*. def is_data_descr(space, w_obj): return space.lookup(w_obj, '__set__') is not None @@ -867,12 +867,12 @@ elif _arity == 2 and len(_specialnames) == 2: #print "binop", _specialnames _impl_maker = _make_binop_impl - elif _arity == 1 and len(_specialnames) == 1: + elif _arity == 1 and len(_specialnames) == 1 and _name != 'int': #print "unaryop", _specialnames _impl_maker = _make_unaryop_impl if _impl_maker: setattr(DescrOperation,_name,_impl_maker(_symbol,_specialnames)) - elif _name not in ['is_', 'id','type','issubtype', + elif _name not in ['is_', 'id','type','issubtype', 'int', # not really to be defined in DescrOperation 'ord', 'unichr', 'unicode']: raise Exception, "missing def for operation %s" % _name diff --git a/pypy/objspace/std/builtinshortcut.py b/pypy/objspace/std/builtinshortcut.py --- a/pypy/objspace/std/builtinshortcut.py +++ b/pypy/objspace/std/builtinshortcut.py @@ -131,6 +131,7 @@ w_obj = w_res # general case fallback - return DescrOperation.is_true(space, w_obj) + return _DescrOperation_is_true(space, w_obj) + _DescrOperation_is_true = DescrOperation.is_true.im_func space.is_true = is_true diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py --- a/pypy/objspace/std/objspace.py +++ b/pypy/objspace/std/objspace.py @@ -10,7 +10,7 @@ from rpython.rlib.objectmodel import instantiate, specialize, is_annotation_constant from rpython.rlib.debug import make_sure_not_resized from rpython.rlib.rarithmetic import base_int, widen, is_valid_int -from rpython.rlib.objectmodel import we_are_translated +from rpython.rlib.objectmodel import we_are_translated, import_from_mixin from rpython.rlib import jit # Object imports @@ -37,9 +37,10 @@ from pypy.objspace.std.stringtype import wrapstr from pypy.objspace.std.unicodetype import wrapunicode -class StdObjSpace(ObjSpace, DescrOperation): +class StdObjSpace(ObjSpace): """The standard object space, implementing a general-purpose object library in Restricted Python.""" + import_from_mixin(DescrOperation) def initialize(self): "NOT_RPYTHON: only for initializing the space." @@ -492,16 +493,19 @@ self.wrap("Expected tuple of length 3")) return self.int_w(l_w[0]), self.int_w(l_w[1]), self.int_w(l_w[2]) + _DescrOperation_is_true = is_true + _DescrOperation_getattr = getattr + def is_true(self, w_obj): # a shortcut for performance # NOTE! this method is typically overridden by builtinshortcut.py. if type(w_obj) is W_BoolObject: return w_obj.boolval - return DescrOperation.is_true(self, w_obj) + return self._DescrOperation_is_true(w_obj) def getattr(self, w_obj, w_name): if not self.config.objspace.std.getattributeshortcut: - return DescrOperation.getattr(self, w_obj, w_name) + return self._DescrOperation_getattr(w_obj, w_name) # an optional shortcut for performance w_type = self.type(w_obj) From noreply at buildbot.pypy.org Tue Aug 20 15:51:18 2013 From: noreply at buildbot.pypy.org (fijal) Date: Tue, 20 Aug 2013 15:51:18 +0200 (CEST) Subject: [pypy-commit] pypy default: crucial fix Message-ID: <20130820135118.5758F1C02B1@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r66260:b6109e035541 Date: 2013-08-20 15:49 +0200 http://bitbucket.org/pypy/pypy/changeset/b6109e035541/ Log: crucial fix diff --git a/pypy/tool/gcdump.py b/pypy/tool/gcdump.py --- a/pypy/tool/gcdump.py +++ b/pypy/tool/gcdump.py @@ -29,6 +29,8 @@ for num, line in enumerate(iter): if num == 0: continue + if not line: + continue words = line.split() if words[0].startswith('member'): del words[0] From noreply at buildbot.pypy.org Tue Aug 20 15:51:20 2013 From: noreply at buildbot.pypy.org (fijal) Date: Tue, 20 Aug 2013 15:51:20 +0200 (CEST) Subject: [pypy-commit] pypy default: merge Message-ID: <20130820135120.45AEF1C02EE@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r66261:c545818cef25 Date: 2013-08-20 15:50 +0200 http://bitbucket.org/pypy/pypy/changeset/c545818cef25/ Log: merge diff --git a/pypy/doc/getting-started-python.rst b/pypy/doc/getting-started-python.rst --- a/pypy/doc/getting-started-python.rst +++ b/pypy/doc/getting-started-python.rst @@ -57,6 +57,12 @@ zlib-devel bzip2-devel ncurses-devel expat-devel \ openssl-devel gc-devel python-sphinx python-greenlet + On SLES11: + + $ sudo zypper install gcc make python-devel pkg-config \ + zlib-devel libopenssl-devel libbz2-devel sqlite3-devel \ + libexpat-devel libffi-devel python-curses + The above command lines are split with continuation characters, giving the necessary dependencies first, then the optional ones. * ``pkg-config`` (to help us locate libffi files) diff --git a/pypy/interpreter/buffer.py b/pypy/interpreter/buffer.py --- a/pypy/interpreter/buffer.py +++ b/pypy/interpreter/buffer.py @@ -19,7 +19,7 @@ from pypy.interpreter.typedef import TypeDef from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.error import OperationError -from rpython.rlib.objectmodel import compute_hash +from rpython.rlib.objectmodel import compute_hash, import_from_mixin from rpython.rlib.rstring import StringBuilder @@ -272,8 +272,6 @@ # ____________________________________________________________ class SubBufferMixin(object): - _mixin_ = True - def __init__(self, buffer, offset, size): self.buffer = buffer self.offset = offset @@ -297,10 +295,11 @@ # out of bounds return self.buffer.getslice(self.offset + start, self.offset + stop, step, size) -class SubBuffer(SubBufferMixin, Buffer): - pass +class SubBuffer(Buffer): + import_from_mixin(SubBufferMixin) -class RWSubBuffer(SubBufferMixin, RWBuffer): +class RWSubBuffer(RWBuffer): + import_from_mixin(SubBufferMixin) def setitem(self, index, char): self.buffer.setitem(self.offset + index, char) diff --git a/pypy/objspace/descroperation.py b/pypy/objspace/descroperation.py --- a/pypy/objspace/descroperation.py +++ b/pypy/objspace/descroperation.py @@ -121,7 +121,7 @@ greens=['w_type'], reds='auto') class DescrOperation(object): - _mixin_ = True + # This is meant to be a *mixin*. def is_data_descr(space, w_obj): return space.lookup(w_obj, '__set__') is not None @@ -867,12 +867,12 @@ elif _arity == 2 and len(_specialnames) == 2: #print "binop", _specialnames _impl_maker = _make_binop_impl - elif _arity == 1 and len(_specialnames) == 1: + elif _arity == 1 and len(_specialnames) == 1 and _name != 'int': #print "unaryop", _specialnames _impl_maker = _make_unaryop_impl if _impl_maker: setattr(DescrOperation,_name,_impl_maker(_symbol,_specialnames)) - elif _name not in ['is_', 'id','type','issubtype', + elif _name not in ['is_', 'id','type','issubtype', 'int', # not really to be defined in DescrOperation 'ord', 'unichr', 'unicode']: raise Exception, "missing def for operation %s" % _name diff --git a/pypy/objspace/std/builtinshortcut.py b/pypy/objspace/std/builtinshortcut.py --- a/pypy/objspace/std/builtinshortcut.py +++ b/pypy/objspace/std/builtinshortcut.py @@ -131,6 +131,7 @@ w_obj = w_res # general case fallback - return DescrOperation.is_true(space, w_obj) + return _DescrOperation_is_true(space, w_obj) + _DescrOperation_is_true = DescrOperation.is_true.im_func space.is_true = is_true diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py --- a/pypy/objspace/std/objspace.py +++ b/pypy/objspace/std/objspace.py @@ -10,7 +10,7 @@ from rpython.rlib.objectmodel import instantiate, specialize, is_annotation_constant from rpython.rlib.debug import make_sure_not_resized from rpython.rlib.rarithmetic import base_int, widen, is_valid_int -from rpython.rlib.objectmodel import we_are_translated +from rpython.rlib.objectmodel import we_are_translated, import_from_mixin from rpython.rlib import jit # Object imports @@ -37,9 +37,10 @@ from pypy.objspace.std.stringtype import wrapstr from pypy.objspace.std.unicodetype import wrapunicode -class StdObjSpace(ObjSpace, DescrOperation): +class StdObjSpace(ObjSpace): """The standard object space, implementing a general-purpose object library in Restricted Python.""" + import_from_mixin(DescrOperation) def initialize(self): "NOT_RPYTHON: only for initializing the space." @@ -492,16 +493,19 @@ self.wrap("Expected tuple of length 3")) return self.int_w(l_w[0]), self.int_w(l_w[1]), self.int_w(l_w[2]) + _DescrOperation_is_true = is_true + _DescrOperation_getattr = getattr + def is_true(self, w_obj): # a shortcut for performance # NOTE! this method is typically overridden by builtinshortcut.py. if type(w_obj) is W_BoolObject: return w_obj.boolval - return DescrOperation.is_true(self, w_obj) + return self._DescrOperation_is_true(w_obj) def getattr(self, w_obj, w_name): if not self.config.objspace.std.getattributeshortcut: - return DescrOperation.getattr(self, w_obj, w_name) + return self._DescrOperation_getattr(w_obj, w_name) # an optional shortcut for performance w_type = self.type(w_obj) diff --git a/rpython/annotator/test/test_annrpython.py b/rpython/annotator/test/test_annrpython.py --- a/rpython/annotator/test/test_annrpython.py +++ b/rpython/annotator/test/test_annrpython.py @@ -2539,6 +2539,27 @@ s = a.build_types(f, []) assert s.const == 2 + def test_import_from_mixin(self): + class M(object): + def f(self): + return self.a + class I(object): + objectmodel.import_from_mixin(M) + def __init__(self, i): + self.a = i + class S(object): + objectmodel.import_from_mixin(M) + def __init__(self, s): + self.a = s + def f(n): + return (I(n).f(), S("a" * n).f()) + + assert f(3) == (3, "aaa") + a = self.RPythonAnnotator() + s = a.build_types(f, [int]) + assert isinstance(s.items[0], annmodel.SomeInteger) + assert isinstance(s.items[1], annmodel.SomeString) + def test___class___attribute(self): class Base(object): pass class A(Base): pass diff --git a/rpython/jit/metainterp/optimizeopt/rewrite.py b/rpython/jit/metainterp/optimizeopt/rewrite.py --- a/rpython/jit/metainterp/optimizeopt/rewrite.py +++ b/rpython/jit/metainterp/optimizeopt/rewrite.py @@ -178,6 +178,17 @@ else: self.emit_operation(op) + def optimize_INT_XOR(self, op): + v1 = self.getvalue(op.getarg(0)) + v2 = self.getvalue(op.getarg(1)) + + if v1.is_constant() and v1.box.getint() == 0: + self.make_equal_to(op.result, v2) + elif v2.is_constant() and v2.box.getint() == 0: + self.make_equal_to(op.result, v1) + else: + self.emit_operation(op) + def optimize_FLOAT_MUL(self, op): arg1 = op.getarg(0) arg2 = op.getarg(1) diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py @@ -3263,6 +3263,20 @@ """ self.optimize_loop(ops, expected) + def test_fold_partially_constant_xor(self): + ops = """ + [i0, i1] + i2 = int_xor(i0, 23) + i3 = int_xor(i1, 0) + jump(i2, i3) + """ + expected = """ + [i0, i1] + i2 = int_xor(i0, 23) + jump(i2, i1) + """ + self.optimize_loop(ops, expected) + # ---------- def test_residual_call_does_not_invalidate_caches(self): diff --git a/rpython/rlib/objectmodel.py b/rpython/rlib/objectmodel.py --- a/rpython/rlib/objectmodel.py +++ b/rpython/rlib/objectmodel.py @@ -9,7 +9,7 @@ import types import math import inspect -from rpython.tool.sourcetools import rpython_wrapper +from rpython.tool.sourcetools import rpython_wrapper, func_with_new_name # specialize is a decorator factory for attaching _annspecialcase_ # attributes to functions: for example @@ -720,3 +720,45 @@ self.dic = dic self.key = key self.hash = hash + +# ____________________________________________________________ + +def import_from_mixin(M, special_methods=['__init__', '__del__']): + """Copy all methods and class attributes from the class M into + the current scope. Should be called when defining a class body. + Function and staticmethod objects are duplicated, which means + that annotation will not consider them as identical to another + copy in another unrelated class. + + By default, "special" methods and class attributes, with a name + like "__xxx__", are not copied unless they are "__init__" or + "__del__". The list can be changed with the optional second + argument. + """ + flatten = {} + for base in inspect.getmro(M): + if base is object: + continue + for key, value in base.__dict__.items(): + if key.startswith('__') and key.endswith('__'): + if key not in special_methods: + continue + if key in flatten: + continue + if isinstance(value, types.FunctionType): + value = func_with_new_name(value, value.__name__) + elif isinstance(value, staticmethod): + func = value.__get__(42) + func = func_with_new_name(func, func.__name__) + value = staticmethod(func) + elif isinstance(value, classmethod): + raise AssertionError("classmethods not supported " + "in 'import_from_mixin'") + flatten[key] = value + # + target = sys._getframe(1).f_locals + for key, value in flatten.items(): + if key in target: + raise Exception("import_from_mixin: would overwrite the value " + "already defined locally for %r" % (key,)) + target[key] = value diff --git a/rpython/rlib/rsre/rsre_char.py b/rpython/rlib/rsre/rsre_char.py --- a/rpython/rlib/rsre/rsre_char.py +++ b/rpython/rlib/rsre/rsre_char.py @@ -115,10 +115,7 @@ for function, negate in category_dispatch_unroll: if category_code == i: result = function(char_code) - if negate: - return not result # XXX this might lead to a guard - else: - return result + return result ^ negate i = i + 1 else: return False @@ -160,9 +157,7 @@ ppos += 1 else: return False - if negated: - return not result - return result + return result ^ negated def set_literal(pat, index, char_code): # diff --git a/rpython/rlib/test/test_objectmodel.py b/rpython/rlib/test/test_objectmodel.py --- a/rpython/rlib/test/test_objectmodel.py +++ b/rpython/rlib/test/test_objectmodel.py @@ -548,3 +548,76 @@ r = interpret(f, [29]) assert r == 1 + +def test_import_from_mixin(): + class M: # old-style + def f(self): pass + class A: # old-style + import_from_mixin(M) + assert A.f.im_func is not M.f.im_func + + class M(object): + def f(self): pass + class A: # old-style + import_from_mixin(M) + assert A.f.im_func is not M.f.im_func + + class M: # old-style + def f(self): pass + class A(object): + import_from_mixin(M) + assert A.f.im_func is not M.f.im_func + + class M(object): + def f(self): pass + class A(object): + import_from_mixin(M) + assert A.f.im_func is not M.f.im_func + + class MBase(object): + a = 42; b = 43; c = 1000 + def f(self): return "hi" + def g(self): return self.c - 1 + class M(MBase): + a = 84 + def f(self): return "there" + class A(object): + import_from_mixin(M) + c = 88 + assert A.f.im_func is not M.f.im_func + assert A.f.im_func is not MBase.f.im_func + assert A.g.im_func is not MBase.g.im_func + assert A().f() == "there" + assert A.a == 84 + assert A.b == 43 + assert A.c == 88 + assert A().g() == 87 + + try: + class B(object): + a = 63 + import_from_mixin(M) + except Exception, e: + assert ("would overwrite the value already defined locally for 'a'" + in str(e)) + else: + raise AssertionError("failed to detect overwritten attribute") + + class M(object): + def __str__(self): + return "m!" + class A(object): + import_from_mixin(M) + class B(object): + import_from_mixin(M, special_methods=['__str__']) + assert str(A()).startswith('<') + assert str(B()) == "m!" + + class M(object): + pass + class A(object): + def __init__(self): + self.foo = 42 + class B(A): + import_from_mixin(M) + assert B().foo == 42 From noreply at buildbot.pypy.org Tue Aug 20 16:03:51 2013 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Tue, 20 Aug 2013 16:03:51 +0200 (CEST) Subject: [pypy-commit] pypy refactor-str-types: hg merge default Message-ID: <20130820140351.8C6821C02B1@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: refactor-str-types Changeset: r66262:fe8465da6397 Date: 2013-08-20 15:47 +0200 http://bitbucket.org/pypy/pypy/changeset/fe8465da6397/ Log: hg merge default diff too long, truncating to 2000 out of 2642 lines diff --git a/pypy/doc/getting-started-python.rst b/pypy/doc/getting-started-python.rst --- a/pypy/doc/getting-started-python.rst +++ b/pypy/doc/getting-started-python.rst @@ -57,6 +57,12 @@ zlib-devel bzip2-devel ncurses-devel expat-devel \ openssl-devel gc-devel python-sphinx python-greenlet + On SLES11: + + $ sudo zypper install gcc make python-devel pkg-config \ + zlib-devel libopenssl-devel libbz2-devel sqlite3-devel \ + libexpat-devel libffi-devel python-curses + The above command lines are split with continuation characters, giving the necessary dependencies first, then the optional ones. * ``pkg-config`` (to help us locate libffi files) diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -74,3 +74,4 @@ .. branch: dotviewer-linewidth .. branch: reflex-support .. branch: numpypy-inplace-op +.. branch: rewritten-loop-logging diff --git a/pypy/interpreter/buffer.py b/pypy/interpreter/buffer.py --- a/pypy/interpreter/buffer.py +++ b/pypy/interpreter/buffer.py @@ -19,7 +19,7 @@ from pypy.interpreter.typedef import TypeDef from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.error import OperationError -from rpython.rlib.objectmodel import compute_hash +from rpython.rlib.objectmodel import compute_hash, import_from_mixin from rpython.rlib.rstring import StringBuilder @@ -272,8 +272,6 @@ # ____________________________________________________________ class SubBufferMixin(object): - _mixin_ = True - def __init__(self, buffer, offset, size): self.buffer = buffer self.offset = offset @@ -297,10 +295,11 @@ # out of bounds return self.buffer.getslice(self.offset + start, self.offset + stop, step, size) -class SubBuffer(SubBufferMixin, Buffer): - pass +class SubBuffer(Buffer): + import_from_mixin(SubBufferMixin) -class RWSubBuffer(SubBufferMixin, RWBuffer): +class RWSubBuffer(RWBuffer): + import_from_mixin(SubBufferMixin) def setitem(self, index, char): self.buffer.setitem(self.offset + index, char) diff --git a/pypy/module/__pypy__/test/test_signal.py b/pypy/module/__pypy__/test/test_signal.py --- a/pypy/module/__pypy__/test/test_signal.py +++ b/pypy/module/__pypy__/test/test_signal.py @@ -34,7 +34,7 @@ thread.interrupt_main() for i in range(10): print('x') - time.sleep(0.1) + time.sleep(0.25) except BaseException, e: interrupted.append(e) finally: @@ -59,7 +59,7 @@ for j in range(10): if len(done): break print('.') - time.sleep(0.1) + time.sleep(0.25) print('main thread loop done') assert len(done) == 1 assert len(interrupted) == 1 @@ -117,7 +117,7 @@ def subthread(): try: - time.sleep(0.25) + time.sleep(0.5) with __pypy__.thread.signals_enabled: thread.interrupt_main() except BaseException, e: diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -778,6 +778,11 @@ from numpypy import unicode_ assert isinstance(unicode_(3), unicode) + def test_character_dtype(self): + from numpypy import array, character + x = array([["A", "B"], ["C", "D"]], character) + assert x == [["A", "B"], ["C", "D"]] + class AppTestRecordDtypes(BaseNumpyAppTest): spaceconfig = dict(usemodules=["micronumpy", "struct", "binascii"]) def test_create(self): diff --git a/pypy/objspace/descroperation.py b/pypy/objspace/descroperation.py --- a/pypy/objspace/descroperation.py +++ b/pypy/objspace/descroperation.py @@ -121,7 +121,7 @@ greens=['w_type'], reds='auto') class DescrOperation(object): - _mixin_ = True + # This is meant to be a *mixin*. def is_data_descr(space, w_obj): return space.lookup(w_obj, '__set__') is not None @@ -867,12 +867,12 @@ elif _arity == 2 and len(_specialnames) == 2: #print "binop", _specialnames _impl_maker = _make_binop_impl - elif _arity == 1 and len(_specialnames) == 1: + elif _arity == 1 and len(_specialnames) == 1 and _name != 'int': #print "unaryop", _specialnames _impl_maker = _make_unaryop_impl if _impl_maker: setattr(DescrOperation,_name,_impl_maker(_symbol,_specialnames)) - elif _name not in ['is_', 'id','type','issubtype', + elif _name not in ['is_', 'id','type','issubtype', 'int', # not really to be defined in DescrOperation 'ord', 'unichr', 'unicode']: raise Exception, "missing def for operation %s" % _name diff --git a/pypy/objspace/std/builtinshortcut.py b/pypy/objspace/std/builtinshortcut.py --- a/pypy/objspace/std/builtinshortcut.py +++ b/pypy/objspace/std/builtinshortcut.py @@ -131,6 +131,7 @@ w_obj = w_res # general case fallback - return DescrOperation.is_true(space, w_obj) + return _DescrOperation_is_true(space, w_obj) + _DescrOperation_is_true = DescrOperation.is_true.im_func space.is_true = is_true diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py --- a/pypy/objspace/std/objspace.py +++ b/pypy/objspace/std/objspace.py @@ -10,7 +10,7 @@ from rpython.rlib.objectmodel import instantiate, specialize, is_annotation_constant from rpython.rlib.debug import make_sure_not_resized from rpython.rlib.rarithmetic import base_int, widen, is_valid_int -from rpython.rlib.objectmodel import we_are_translated +from rpython.rlib.objectmodel import we_are_translated, import_from_mixin from rpython.rlib import jit # Object imports @@ -37,9 +37,10 @@ from pypy.objspace.std.inttype import wrapint from pypy.objspace.std.unicodeobject import wrapunicode -class StdObjSpace(ObjSpace, DescrOperation): +class StdObjSpace(ObjSpace): """The standard object space, implementing a general-purpose object library in Restricted Python.""" + import_from_mixin(DescrOperation) def initialize(self): "NOT_RPYTHON: only for initializing the space." @@ -492,16 +493,19 @@ self.wrap("Expected tuple of length 3")) return self.int_w(l_w[0]), self.int_w(l_w[1]), self.int_w(l_w[2]) + _DescrOperation_is_true = is_true + _DescrOperation_getattr = getattr + def is_true(self, w_obj): # a shortcut for performance # NOTE! this method is typically overridden by builtinshortcut.py. if type(w_obj) is W_BoolObject: return w_obj.boolval - return DescrOperation.is_true(self, w_obj) + return self._DescrOperation_is_true(w_obj) def getattr(self, w_obj, w_name): if not self.config.objspace.std.getattributeshortcut: - return DescrOperation.getattr(self, w_obj, w_name) + return self._DescrOperation_getattr(w_obj, w_name) # an optional shortcut for performance w_type = self.type(w_obj) diff --git a/pypy/tool/gcdump.py b/pypy/tool/gcdump.py --- a/pypy/tool/gcdump.py +++ b/pypy/tool/gcdump.py @@ -20,9 +20,13 @@ for obj in self.walk(a): self.add_object_summary(obj[2], obj[3]) - def load_typeids(self, filename): + def load_typeids(self, filename_or_iter): self.typeids = Stat.typeids.copy() - for num, line in enumerate(open(filename)): + if isinstance(filename_or_iter, str): + iter = open(filename_or_iter) + else: + iter = filename_or_iter + for num, line in enumerate(iter): if num == 0: continue words = line.split() @@ -92,5 +96,8 @@ typeid_name = os.path.join(os.path.dirname(sys.argv[1]), 'typeids.txt') if os.path.isfile(typeid_name): stat.load_typeids(typeid_name) + else: + import zlib, gc + stat.load_typeids(zlib.decompress(gc.get_typeids_z()).split("\n")) # stat.print_summary() diff --git a/pypy/tool/jitlogparser/parser.py b/pypy/tool/jitlogparser/parser.py --- a/pypy/tool/jitlogparser/parser.py +++ b/pypy/tool/jitlogparser/parser.py @@ -390,7 +390,12 @@ data = r.data.encode('hex') # backward compatibility dumps[name] = (world.backend_name, r.addr, data) loops = [] - for entry in extract_category(log, 'jit-log-opt'): + cat = extract_category(log, 'jit-log-opt') + if not cat: + extract_category(log, 'jit-log-rewritten') + if not cat: + extract_category(log, 'jit-log-noopt') + for entry in cat: parser = ParserCls(entry, None, {}, 'lltype', None, nonstrict=True) loop = parser.parse() diff --git a/rpython/annotator/test/test_annrpython.py b/rpython/annotator/test/test_annrpython.py --- a/rpython/annotator/test/test_annrpython.py +++ b/rpython/annotator/test/test_annrpython.py @@ -2539,6 +2539,27 @@ s = a.build_types(f, []) assert s.const == 2 + def test_import_from_mixin(self): + class M(object): + def f(self): + return self.a + class I(object): + objectmodel.import_from_mixin(M) + def __init__(self, i): + self.a = i + class S(object): + objectmodel.import_from_mixin(M) + def __init__(self, s): + self.a = s + def f(n): + return (I(n).f(), S("a" * n).f()) + + assert f(3) == (3, "aaa") + a = self.RPythonAnnotator() + s = a.build_types(f, [int]) + assert isinstance(s.items[0], annmodel.SomeInteger) + assert isinstance(s.items[1], annmodel.SomeString) + def test___class___attribute(self): class Base(object): pass class A(Base): pass diff --git a/rpython/jit/backend/arm/assembler.py b/rpython/jit/backend/arm/assembler.py --- a/rpython/jit/backend/arm/assembler.py +++ b/rpython/jit/backend/arm/assembler.py @@ -571,7 +571,8 @@ self.mc.BL(self.stack_check_slowpath, c=c.HI) # call if ip > lr # cpu interface - def assemble_loop(self, loopname, inputargs, operations, looptoken, log): + def assemble_loop(self, logger, loopname, inputargs, operations, looptoken, + log): clt = CompiledLoopToken(self.cpu, looptoken.number) looptoken.compiled_loop_token = clt clt._debug_nbargs = len(inputargs) @@ -620,6 +621,9 @@ 'loop.asm') ops_offset = self.mc.ops_offset + if logger is not None: + logger.log_loop(inputargs, operations, 0, "rewritten", + name=loopname, ops_offset=ops_offset) self.teardown() debug_start("jit-backend-addr") @@ -644,8 +648,8 @@ frame_depth = max(frame_depth, target_frame_depth) return frame_depth - def assemble_bridge(self, faildescr, inputargs, operations, - original_loop_token, log): + def assemble_bridge(self, logger, faildescr, inputargs, operations, + original_loop_token, log): if not we_are_translated(): # Arguments should be unique assert len(set(inputargs)) == len(inputargs) @@ -694,6 +698,9 @@ frame_depth_no_fixed_size + JITFRAME_FIXED_SIZE) self.fixup_target_tokens(rawstart) self.update_frame_depth(frame_depth) + if logger: + logger.log_bridge(inputargs, operations, "rewritten", + ops_offset=ops_offset) self.teardown() debug_bridge(descr_number, rawstart, codeendpos) diff --git a/rpython/jit/backend/arm/opassembler.py b/rpython/jit/backend/arm/opassembler.py --- a/rpython/jit/backend/arm/opassembler.py +++ b/rpython/jit/backend/arm/opassembler.py @@ -452,7 +452,7 @@ # Write code equivalent to write_barrier() in the GC: it checks # a flag in the object at arglocs[0], and if set, it calls a # helper piece of assembler. The latter saves registers as needed - # and call the function jit_remember_young_pointer() from the GC. + # and call the function remember_young_pointer() from the GC. if we_are_translated(): cls = self.cpu.gc_ll_descr.has_write_barrier_class() assert cls is not None and isinstance(descr, cls) diff --git a/rpython/jit/backend/arm/runner.py b/rpython/jit/backend/arm/runner.py --- a/rpython/jit/backend/arm/runner.py +++ b/rpython/jit/backend/arm/runner.py @@ -56,17 +56,18 @@ def finish_once(self): self.assembler.finish_once() - def compile_loop(self, inputargs, operations, looptoken, + def compile_loop(self, logger, inputargs, operations, looptoken, log=True, name=''): - return self.assembler.assemble_loop(name, inputargs, operations, - looptoken, log=log) + return self.assembler.assemble_loop(logger, name, inputargs, operations, + looptoken, log=log) - def compile_bridge(self, faildescr, inputargs, operations, + def compile_bridge(self, logger, faildescr, inputargs, operations, original_loop_token, log=True): clt = original_loop_token.compiled_loop_token clt.compiling_a_bridge() - return self.assembler.assemble_bridge(faildescr, inputargs, operations, - original_loop_token, log=log) + return self.assembler.assemble_bridge(logger, faildescr, inputargs, + operations, + original_loop_token, log=log) def clear_latest_values(self, count): setitem = self.assembler.fail_boxes_ptr.setitem diff --git a/rpython/jit/backend/arm/test/test_generated.py b/rpython/jit/backend/arm/test/test_generated.py --- a/rpython/jit/backend/arm/test/test_generated.py +++ b/rpython/jit/backend/arm/test/test_generated.py @@ -40,7 +40,7 @@ looptoken = JitCellToken() operations[2].setfailargs([v12, v8, v3, v2, v1, v11]) operations[3].setfailargs([v9, v6, v10, v2, v8, v5, v1, v4]) - cpu.compile_loop(inputargs, operations, looptoken) + cpu.compile_loop(None, inputargs, operations, looptoken) args = [-12 , -26 , -19 , 7 , -5 , -24 , -37 , 62 , 9 , 12] deadframe = cpu.execute_token(looptoken, *args) assert cpu.get_int_value(deadframe, 0) == 0 @@ -92,7 +92,7 @@ operations[9].setfailargs([v15, v7, v10, v18, v4, v17, v1]) operations[-1].setfailargs([v7, v1, v2]) looptoken = JitCellToken() - cpu.compile_loop(inputargs, operations, looptoken) + cpu.compile_loop(None, inputargs, operations, looptoken) args = [16 , 5 , 5 , 16 , 46 , 6 , 63 , 39 , 78 , 0] deadframe = cpu.execute_token(looptoken, *args) assert cpu.get_int_value(deadframe, 0) == 105 @@ -136,7 +136,7 @@ operations[-1].setfailargs([v5, v2, v1, v10, v3, v8, v4, v6]) looptoken = JitCellToken() - cpu.compile_loop(inputargs, operations, looptoken) + cpu.compile_loop(None, inputargs, operations, looptoken) args = [-5 , 24 , 46 , -15 , 13 , -8 , 0 , -6 , 6 , 6] deadframe = cpu.execute_token(looptoken, *args) assert cpu.get_latest_descr(deadframe).identifier == 2 @@ -179,7 +179,7 @@ operations[5].setfailargs([]) operations[-1].setfailargs([v8, v2, v6, v5, v7, v1, v10]) looptoken = JitCellToken() - cpu.compile_loop(inputargs, operations, looptoken) + cpu.compile_loop(None, inputargs, operations, looptoken) args = [19 , -3 , -58 , -7 , 12 , 22 , -54 , -29 , -19 , -64] deadframe = cpu.execute_token(looptoken, *args) assert cpu.get_int_value(deadframe, 0) == -29 @@ -223,7 +223,7 @@ looptoken = JitCellToken() operations[5].setfailargs([]) operations[-1].setfailargs([v1, v4, v10, v8, v7, v3]) - cpu.compile_loop(inputargs, operations, looptoken) + cpu.compile_loop(None, inputargs, operations, looptoken) args = [1073741824 , 95 , -16 , 5 , 92 , 12 , 32 , 17 , 37 , -63] deadframe = cpu.execute_token(looptoken, *args) assert cpu.get_int_value(deadframe, 0) == 1073741824 @@ -280,7 +280,7 @@ operations[9].setfailargs([v10, v13]) operations[-1].setfailargs([v8, v10, v6, v3, v2, v9]) args = [32 , 41 , -9 , 12 , -18 , 46 , 15 , 17 , 10 , 12] - cpu.compile_loop(inputargs, operations, looptoken) + cpu.compile_loop(None, inputargs, operations, looptoken) deadframe = cpu.execute_token(looptoken, *args) assert cpu.get_latest_descr(deadframe).identifier == 3 assert cpu.get_int_value(deadframe, 0) == 12 @@ -328,7 +328,7 @@ operations[8].setfailargs([v5, v9]) operations[-1].setfailargs([v4, v10, v6, v5, v9, v7]) looptoken = JitCellToken() - cpu.compile_loop(inputargs, operations, looptoken) + cpu.compile_loop(None, inputargs, operations, looptoken) args = [-8 , 0 , 62 , 35 , 16 , 9 , 30 , 581610154 , -1 , 738197503] deadframe = cpu.execute_token(looptoken, *args) assert cpu.get_latest_descr(deadframe).identifier == 2 @@ -378,7 +378,7 @@ operations[-2].setfailargs([v9, v4, v10, v11, v14]) operations[-1].setfailargs([v10, v8, v1, v6, v4]) looptoken = JitCellToken() - cpu.compile_loop(inputargs, operations, looptoken) + cpu.compile_loop(None, inputargs, operations, looptoken) args = [-39 , -18 , 1588243114 , -9 , -4 , 1252698794 , 0 , 715827882 , -15 , 536870912] deadframe = cpu.execute_token(looptoken, *args) assert cpu.get_latest_descr(deadframe).identifier == 1 @@ -433,7 +433,7 @@ operations[9].setfailargs([v5, v7, v12, v14, v2, v13, v8]) operations[-1].setfailargs([v1, v2, v9]) looptoken = JitCellToken() - cpu.compile_loop(inputargs, operations, looptoken) + cpu.compile_loop(None, inputargs, operations, looptoken) args = [0 , -2 , 24 , 1 , -4 , 13 , -95 , 33 , 2 , -44] deadframe = cpu.execute_token(looptoken, *args) assert cpu.get_latest_descr(deadframe).identifier == 3 @@ -475,7 +475,7 @@ operations[2].setfailargs([v10, v3, v6, v11, v9, v2]) operations[-1].setfailargs([v8, v2, v10, v6, v7, v9, v5, v4]) looptoken = JitCellToken() - cpu.compile_loop(inputargs, operations, looptoken) + cpu.compile_loop(None, inputargs, operations, looptoken) args = [3 , -5 , 1431655765 , 47 , 12 , 1789569706 , 15 , 939524096 , 16 , -43] deadframe = cpu.execute_token(looptoken, *args) assert cpu.get_latest_descr(deadframe).identifier == 1 @@ -524,7 +524,7 @@ operations[-1].setfailargs([v2, v3, v5, v7, v10, v8, v9]) operations[4].setfailargs([v14]) looptoken = JitCellToken() - cpu.compile_loop(inputargs, operations, looptoken) + cpu.compile_loop(None, inputargs, operations, looptoken) args = [14 , -20 , 18 , -2058005163 , 6 , 1 , -16 , 11 , 0 , 19] deadframe = cpu.execute_token(looptoken, *args) assert cpu.get_latest_descr(deadframe).identifier == 1 diff --git a/rpython/jit/backend/arm/test/test_regalloc2.py b/rpython/jit/backend/arm/test/test_regalloc2.py --- a/rpython/jit/backend/arm/test/test_regalloc2.py +++ b/rpython/jit/backend/arm/test/test_regalloc2.py @@ -24,7 +24,7 @@ cpu = CPU(None, None) cpu.setup_once() looptoken = JitCellToken() - cpu.compile_loop(inputargs, operations, looptoken) + cpu.compile_loop(None, inputargs, operations, looptoken) deadframe = cpu.execute_token(looptoken, 9) assert cpu.get_int_value(deadframe, 0) == (9 >> 3) assert cpu.get_int_value(deadframe, 1) == (~18) @@ -48,7 +48,7 @@ cpu = CPU(None, None) cpu.setup_once() looptoken = JitCellToken() - cpu.compile_loop(inputargs, operations, looptoken) + cpu.compile_loop(None, inputargs, operations, looptoken) deadframe = cpu.execute_token(looptoken, -10) assert cpu.get_int_value(deadframe, 0) == 0 assert cpu.get_int_value(deadframe, 1) == -1000 @@ -145,7 +145,7 @@ cpu = CPU(None, None) cpu.setup_once() looptoken = JitCellToken() - cpu.compile_loop(inputargs, operations, looptoken) + cpu.compile_loop(None, inputargs, operations, looptoken) args = [-13 , 10 , 10 , 8 , -8 , -16 , -18 , 46 , -12 , 26] deadframe = cpu.execute_token(looptoken, *args) assert cpu.get_int_value(deadframe, 0) == 0 @@ -252,7 +252,7 @@ cpu = CPU(None, None) cpu.setup_once() looptoken = JitCellToken() - cpu.compile_loop(inputargs, operations, looptoken) + cpu.compile_loop(None, inputargs, operations, looptoken) args = [17 , -20 , -6 , 6 , 1 , 13 , 13 , 9 , 49 , 8] deadframe = cpu.execute_token(looptoken, *args) assert cpu.get_int_value(deadframe, 0) == 0 diff --git a/rpython/jit/backend/arm/test/test_runner.py b/rpython/jit/backend/arm/test/test_runner.py --- a/rpython/jit/backend/arm/test/test_runner.py +++ b/rpython/jit/backend/arm/test/test_runner.py @@ -75,7 +75,7 @@ ResOperation(rop.FINISH, [inp[1]], None, descr=BasicFinalDescr(1)), ] operations[-2].setfailargs(out) - cpu.compile_loop(inp, operations, looptoken) + cpu.compile_loop(None, inp, operations, looptoken) args = [i for i in range(1, 15)] deadframe = self.cpu.execute_token(looptoken, *args) output = [self.cpu.get_int_value(deadframe, i - 1) for i in range(1, 15)] @@ -117,9 +117,9 @@ i1 = int_sub(i0, 1) finish(i1) ''') - self.cpu.compile_loop(loop2.inputargs, loop2.operations, lt2) - self.cpu.compile_loop(loop3.inputargs, loop3.operations, lt3) - self.cpu.compile_loop(loop1.inputargs, loop1.operations, lt1) + self.cpu.compile_loop(None, loop2.inputargs, loop2.operations, lt2) + self.cpu.compile_loop(None, loop3.inputargs, loop3.operations, lt3) + self.cpu.compile_loop(None, loop1.inputargs, loop1.operations, lt1) df = self.cpu.execute_token(lt1, 10) assert self.cpu.get_int_value(df, 0) == 7 @@ -214,7 +214,7 @@ ops = "".join(ops) loop = parse(ops) looptoken = JitCellToken() - self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + self.cpu.compile_loop(None, loop.inputargs, loop.operations, looptoken) ARGS = [lltype.Signed] * numargs RES = lltype.Signed args = [i+1 for i in range(numargs)] @@ -246,7 +246,7 @@ try: self.cpu.assembler.set_debug(True) looptoken = JitCellToken() - self.cpu.compile_loop(ops.inputargs, ops.operations, looptoken) + self.cpu.compile_loop(None, ops.inputargs, ops.operations, looptoken) self.cpu.execute_token(looptoken, 0) # check debugging info struct = self.cpu.assembler.loop_run_counters[0] @@ -280,7 +280,7 @@ faildescr = BasicFailDescr(2) loop = parse(ops, self.cpu, namespace=locals()) looptoken = JitCellToken() - info = self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + info = self.cpu.compile_loop(None, loop.inputargs, loop.operations, looptoken) ops2 = """ [i0, f1] i1 = same_as(i0) @@ -293,7 +293,7 @@ """ loop2 = parse(ops2, self.cpu, namespace=locals()) looptoken2 = JitCellToken() - info = self.cpu.compile_loop(loop2.inputargs, loop2.operations, looptoken2) + info = self.cpu.compile_loop(None, loop2.inputargs, loop2.operations, looptoken2) deadframe = self.cpu.execute_token(looptoken, -9, longlong.getfloatstorage(-13.5)) res = longlong.getrealfloat(self.cpu.get_float_value(deadframe, 0)) diff --git a/rpython/jit/backend/llgraph/runner.py b/rpython/jit/backend/llgraph/runner.py --- a/rpython/jit/backend/llgraph/runner.py +++ b/rpython/jit/backend/llgraph/runner.py @@ -183,7 +183,8 @@ self.stats = stats or MiniStats() self.vinfo_for_tests = kwds.get('vinfo_for_tests', None) - def compile_loop(self, inputargs, operations, looptoken, log=True, name=''): + def compile_loop(self, logger, inputargs, operations, looptoken, log=True, + name=''): clt = model.CompiledLoopToken(self, looptoken.number) looptoken.compiled_loop_token = clt lltrace = LLTrace(inputargs, operations) @@ -191,7 +192,7 @@ clt._llgraph_alltraces = [lltrace] self._record_labels(lltrace) - def compile_bridge(self, faildescr, inputargs, operations, + def compile_bridge(self, logger, faildescr, inputargs, operations, original_loop_token, log=True): clt = original_loop_token.compiled_loop_token clt.compiling_a_bridge() @@ -960,10 +961,10 @@ def execute_force_token(self, _): return self - def execute_cond_call_gc_wb(self, descr, a, b): + def execute_cond_call_gc_wb(self, descr, a): py.test.skip("cond_call_gc_wb not supported") - def execute_cond_call_gc_wb_array(self, descr, a, b, c): + def execute_cond_call_gc_wb_array(self, descr, a, b): py.test.skip("cond_call_gc_wb_array not supported") def execute_keepalive(self, descr, x): diff --git a/rpython/jit/backend/llsupport/gc.py b/rpython/jit/backend/llsupport/gc.py --- a/rpython/jit/backend/llsupport/gc.py +++ b/rpython/jit/backend/llsupport/gc.py @@ -136,7 +136,7 @@ """ Allocate a new frame, overwritten by tests """ frame = jitframe.JITFRAME.allocate(frame_info) - llop.gc_assume_young_pointers(lltype.Void, frame) + llop.gc_writebarrier(lltype.Void, frame) return frame class JitFrameDescrs: @@ -360,8 +360,7 @@ def _check_valid_gc(self): # we need the hybrid or minimark GC for rgc._make_sure_does_not_move() - # to work. Additionally, 'hybrid' is missing some stuff like - # jit_remember_young_pointer() for now. + # to work. 'hybrid' could work but isn't tested with the JIT. if self.gcdescr.config.translation.gc not in ('minimark',): raise NotImplementedError("--gc=%s not implemented with the JIT" % (self.gcdescr.config.translation.gc,)) diff --git a/rpython/jit/backend/llsupport/llmodel.py b/rpython/jit/backend/llsupport/llmodel.py --- a/rpython/jit/backend/llsupport/llmodel.py +++ b/rpython/jit/backend/llsupport/llmodel.py @@ -97,7 +97,7 @@ new_frame.jf_savedata = frame.jf_savedata new_frame.jf_guard_exc = frame.jf_guard_exc # all other fields are empty - llop.gc_assume_young_pointers(lltype.Void, new_frame) + llop.gc_writebarrier(lltype.Void, new_frame) return lltype.cast_opaque_ptr(llmemory.GCREF, new_frame) except Exception, e: print "Unhandled exception", e, "in realloc_frame" diff --git a/rpython/jit/backend/llsupport/test/test_gc_integration.py b/rpython/jit/backend/llsupport/test/test_gc_integration.py --- a/rpython/jit/backend/llsupport/test/test_gc_integration.py +++ b/rpython/jit/backend/llsupport/test/test_gc_integration.py @@ -676,7 +676,7 @@ 'checkdescr': checkdescr, 'fielddescr': cpu.fielddescrof(S, 'x')}) token = JitCellToken() - cpu.compile_loop(loop.inputargs, loop.operations, token) + cpu.compile_loop(None, loop.inputargs, loop.operations, token) p0 = lltype.malloc(S, zero=True) p1 = lltype.malloc(S) p2 = lltype.malloc(S) @@ -715,7 +715,7 @@ 'calldescr': checkdescr, }) token = JitCellToken() - cpu.compile_loop(loop.inputargs, loop.operations, token) + cpu.compile_loop(None, loop.inputargs, loop.operations, token) S = self.S s = lltype.malloc(S) cpu.execute_token(token, 1, s) @@ -743,7 +743,7 @@ token = JitCellToken() cpu.gc_ll_descr.init_nursery(20) cpu.setup_once() - cpu.compile_loop(loop.inputargs, loop.operations, token) + cpu.compile_loop(None, loop.inputargs, loop.operations, token) arg = longlong.getfloatstorage(2.3) frame = cpu.execute_token(token, arg) ofs = cpu.get_baseofs_of_frame_field() @@ -770,7 +770,7 @@ cpu.gc_ll_descr.collections = [[0, sizeof.size]] cpu.gc_ll_descr.init_nursery(2 * sizeof.size) cpu.setup_once() - cpu.compile_loop(loop.inputargs, loop.operations, token) + cpu.compile_loop(None, loop.inputargs, loop.operations, token) frame = cpu.execute_token(token) # now we should be able to track everything from the frame frame = lltype.cast_opaque_ptr(JITFRAMEPTR, frame) @@ -821,7 +821,7 @@ token = JitCellToken() cpu.gc_ll_descr.init_nursery(100) cpu.setup_once() - cpu.compile_loop(loop.inputargs, loop.operations, token) + cpu.compile_loop(None, loop.inputargs, loop.operations, token) args = [lltype.nullptr(llmemory.GCREF.TO) for i in range(7)] frame = cpu.execute_token(token, 1, *args) frame = rffi.cast(JITFRAMEPTR, frame) @@ -867,7 +867,7 @@ token = JitCellToken() cpu.gc_ll_descr.init_nursery(100) cpu.setup_once() - cpu.compile_loop(loop.inputargs, loop.operations, token) + cpu.compile_loop(None, loop.inputargs, loop.operations, token) frame = lltype.cast_opaque_ptr(JITFRAMEPTR, cpu.execute_token(token, 1, a)) @@ -911,7 +911,7 @@ token = JitCellToken() cpu.gc_ll_descr.init_nursery(100) cpu.setup_once() - cpu.compile_loop(loop.inputargs, loop.operations, token) + cpu.compile_loop(None, loop.inputargs, loop.operations, token) frame = lltype.cast_opaque_ptr(JITFRAMEPTR, cpu.execute_token(token, 1, a)) assert getmap(frame).count('1') == 4 diff --git a/rpython/jit/backend/llsupport/test/test_regalloc_integration.py b/rpython/jit/backend/llsupport/test/test_regalloc_integration.py --- a/rpython/jit/backend/llsupport/test/test_regalloc_integration.py +++ b/rpython/jit/backend/llsupport/test/test_regalloc_integration.py @@ -97,7 +97,7 @@ loop = self.parse(ops, namespace=namespace) self.loop = loop looptoken = JitCellToken() - self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + self.cpu.compile_loop(None, loop.inputargs, loop.operations, looptoken) arguments = [] for arg in args: if isinstance(arg, int): @@ -147,7 +147,8 @@ assert ([box.type for box in bridge.inputargs] == [box.type for box in guard_op.getfailargs()]) faildescr = guard_op.getdescr() - self.cpu.compile_bridge(faildescr, bridge.inputargs, bridge.operations, + self.cpu.compile_bridge(None, faildescr, bridge.inputargs, + bridge.operations, loop._jitcelltoken) return bridge @@ -335,7 +336,7 @@ ''' self.interpret(ops, [0, 0, 3, 0]) assert self.getints(3) == [1, -3, 10] - + def test_compare_memory_result_survives(self): ops = ''' [i0, i1, i2, i3] @@ -409,7 +410,7 @@ class TestRegallocCompOps(BaseTestRegalloc): - + def test_cmp_op_0(self): ops = ''' [i0, i3] @@ -575,7 +576,7 @@ class TestRegAllocCallAndStackDepth(BaseTestRegalloc): def setup_class(cls): py.test.skip("skip for now, not sure what do we do") - + def expected_frame_depth(self, num_call_args, num_pushed_input_args=0): # Assumes the arguments are all non-float if not self.cpu.IS_64_BIT: @@ -612,7 +613,7 @@ ops = ''' [i0, i1, i2, i3, i4, i5, i6, i7, i8, i9] i10 = call(ConstClass(f1ptr), i0, descr=f1_calldescr) - i11 = call(ConstClass(f2ptr), i10, i1, descr=f2_calldescr) + i11 = call(ConstClass(f2ptr), i10, i1, descr=f2_calldescr) guard_false(i5) [i11, i1, i2, i3, i4, i5, i6, i7, i8, i9] ''' loop = self.interpret(ops, [4, 7, 9, 9 ,9, 9, 9, 9, 9, 9]) @@ -649,7 +650,7 @@ ops = ''' [i2, i0, i1] - i3 = call(ConstClass(f2ptr), i2, i1, descr=f2_calldescr) + i3 = call(ConstClass(f2ptr), i2, i1, descr=f2_calldescr) guard_false(i0, descr=fdescr2) [i3, i0] ''' bridge = self.attach_bridge(ops, loop, -2) @@ -676,7 +677,7 @@ ops = ''' [i2] - i3 = call(ConstClass(f1ptr), i2, descr=f1_calldescr) + i3 = call(ConstClass(f1ptr), i2, descr=f1_calldescr) guard_false(i3, descr=fdescr2) [i3] ''' bridge = self.attach_bridge(ops, loop, -2) diff --git a/rpython/jit/backend/llsupport/test/test_runner.py b/rpython/jit/backend/llsupport/test/test_runner.py --- a/rpython/jit/backend/llsupport/test/test_runner.py +++ b/rpython/jit/backend/llsupport/test/test_runner.py @@ -14,7 +14,7 @@ def set_debug(flag): pass - def compile_loop(self, inputargs, operations, looptoken): + def compile_loop(self, logger, inputargs, operations, looptoken): py.test.skip("llsupport test: cannot compile operations") diff --git a/rpython/jit/backend/model.py b/rpython/jit/backend/model.py --- a/rpython/jit/backend/model.py +++ b/rpython/jit/backend/model.py @@ -51,7 +51,8 @@ """ return False - def compile_loop(self, inputargs, operations, looptoken, log=True, name=''): + def compile_loop(self, logger, inputargs, operations, looptoken, + log=True, name=''): """Assemble the given loop. Should create and attach a fresh CompiledLoopToken to looptoken.compiled_loop_token and stick extra attributes @@ -67,7 +68,7 @@ """ raise NotImplementedError - def compile_bridge(self, faildescr, inputargs, operations, + def compile_bridge(self, logger, faildescr, inputargs, operations, original_loop_token, log=True): """Assemble the bridge. The FailDescr is the descr of the original guard that failed. diff --git a/rpython/jit/backend/test/calling_convention_test.py b/rpython/jit/backend/test/calling_convention_test.py --- a/rpython/jit/backend/test/calling_convention_test.py +++ b/rpython/jit/backend/test/calling_convention_test.py @@ -105,7 +105,7 @@ loop = parse(ops, namespace=locals()) looptoken = JitCellToken() - self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + self.cpu.compile_loop(None, loop.inputargs, loop.operations, looptoken) argvals, expected_result = self._prepare_args(args, floats, ints) deadframe = self.cpu.execute_token(looptoken, *argvals) @@ -249,7 +249,7 @@ called_looptoken = JitCellToken() called_looptoken.outermost_jitdriver_sd = FakeJitDriverSD() done_descr = called_loop.operations[-1].getdescr() - self.cpu.compile_loop(called_loop.inputargs, called_loop.operations, called_looptoken) + self.cpu.compile_loop(None, called_loop.inputargs, called_loop.operations, called_looptoken) argvals, expected_result = self._prepare_args(args, floats, ints) deadframe = cpu.execute_token(called_looptoken, *argvals) @@ -278,7 +278,7 @@ self.cpu.done_with_this_frame_descr_float = done_descr try: othertoken = JitCellToken() - self.cpu.compile_loop(loop.inputargs, loop.operations, othertoken) + self.cpu.compile_loop(None, loop.inputargs, loop.operations, othertoken) # prepare call to called_loop argvals, _ = self._prepare_args(args, floats, ints) @@ -424,7 +424,7 @@ loop = parse(ops, namespace=locals()) looptoken = JitCellToken() - self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + self.cpu.compile_loop(None, loop.inputargs, loop.operations, looptoken) argvals, expected_result = self._prepare_args(args, floats, ints) deadframe = self.cpu.execute_token(looptoken, *argvals) diff --git a/rpython/jit/backend/test/runner_test.py b/rpython/jit/backend/test/runner_test.py --- a/rpython/jit/backend/test/runner_test.py +++ b/rpython/jit/backend/test/runner_test.py @@ -49,7 +49,7 @@ valueboxes, descr) looptoken = JitCellToken() - self.cpu.compile_loop(inputargs, operations, looptoken) + self.cpu.compile_loop(None, inputargs, operations, looptoken) args = [] for box in inputargs: if isinstance(box, BoxInt): @@ -127,7 +127,7 @@ ] inputargs = [i0] looptoken = JitCellToken() - self.cpu.compile_loop(inputargs, operations, looptoken) + self.cpu.compile_loop(None, inputargs, operations, looptoken) deadframe = self.cpu.execute_token(looptoken, 2) fail = self.cpu.get_latest_descr(deadframe) res = self.cpu.get_int_value(deadframe, 0) @@ -145,7 +145,7 @@ ] inputargs = [i0] looptoken = JitCellToken() - self.cpu.compile_loop(inputargs, operations, looptoken) + self.cpu.compile_loop(None, inputargs, operations, looptoken) deadframe = self.cpu.execute_token(looptoken, longlong.getfloatstorage(2.8)) fail = self.cpu.get_latest_descr(deadframe) @@ -170,7 +170,7 @@ inputargs = [i0] operations[3].setfailargs([i1]) - self.cpu.compile_loop(inputargs, operations, looptoken) + self.cpu.compile_loop(None, inputargs, operations, looptoken) deadframe = self.cpu.execute_token(looptoken, 2) fail = self.cpu.get_latest_descr(deadframe) assert fail.identifier == 2 @@ -195,7 +195,7 @@ inputargs = [i3] operations[4].setfailargs([None, None, i1, None]) - self.cpu.compile_loop(inputargs, operations, looptoken) + self.cpu.compile_loop(None, inputargs, operations, looptoken) deadframe = self.cpu.execute_token(looptoken, 44) fail = self.cpu.get_latest_descr(deadframe) assert fail.identifier == 2 @@ -221,7 +221,7 @@ operations[3].setfailargs([i1]) wr_i1 = weakref.ref(i1) wr_guard = weakref.ref(operations[2]) - self.cpu.compile_loop(inputargs, operations, looptoken) + self.cpu.compile_loop(None, inputargs, operations, looptoken) if hasattr(looptoken, '_x86_ops_offset'): del looptoken._x86_ops_offset # else it's kept alive del i0, i1, i2 @@ -249,7 +249,7 @@ ] inputargs = [i0] operations[3].setfailargs([i1]) - self.cpu.compile_loop(inputargs, operations, looptoken) + self.cpu.compile_loop(None, inputargs, operations, looptoken) i1b = BoxInt() i3 = BoxInt() @@ -260,7 +260,7 @@ ] bridge[1].setfailargs([i1b]) - self.cpu.compile_bridge(faildescr1, [i1b], bridge, looptoken) + self.cpu.compile_bridge(None, faildescr1, [i1b], bridge, looptoken) deadframe = self.cpu.execute_token(looptoken, 2) fail = self.cpu.get_latest_descr(deadframe) @@ -291,7 +291,7 @@ ] inputargs = [i3] operations[4].setfailargs([None, i1, None]) - self.cpu.compile_loop(inputargs, operations, looptoken) + self.cpu.compile_loop(None, inputargs, operations, looptoken) i1b = BoxInt() i3 = BoxInt() @@ -302,7 +302,7 @@ ] bridge[1].setfailargs([i1b]) - self.cpu.compile_bridge(faildescr1, [i1b], bridge, looptoken) + self.cpu.compile_bridge(None, faildescr1, [i1b], bridge, looptoken) deadframe = self.cpu.execute_token(looptoken, 2) fail = self.cpu.get_latest_descr(deadframe) @@ -320,7 +320,7 @@ ] inputargs = [i0] operations[0].setfailargs([i0]) - self.cpu.compile_loop(inputargs, operations, looptoken) + self.cpu.compile_loop(None, inputargs, operations, looptoken) i1list = [BoxInt() for i in range(150)] bridge = [] @@ -334,7 +334,7 @@ descr=BasicFinalDescr(4))) bridge[-2].setfailargs(i1list) - self.cpu.compile_bridge(faildescr1, [i0], bridge, looptoken) + self.cpu.compile_bridge(None, faildescr1, [i0], bridge, looptoken) deadframe = self.cpu.execute_token(looptoken, 1) fail = self.cpu.get_latest_descr(deadframe) @@ -358,7 +358,7 @@ operations = [ ResOperation(rop.FINISH, [i0], None, descr=faildescr) ] - self.cpu.compile_loop([i0], operations, looptoken) + self.cpu.compile_loop(None, [i0], operations, looptoken) deadframe = self.cpu.execute_token(looptoken, 99) fail = self.cpu.get_latest_descr(deadframe) assert fail is faildescr @@ -369,7 +369,7 @@ operations = [ ResOperation(rop.FINISH, [ConstInt(42)], None, descr=faildescr) ] - self.cpu.compile_loop([], operations, looptoken) + self.cpu.compile_loop(None, [], operations, looptoken) deadframe = self.cpu.execute_token(looptoken) fail = self.cpu.get_latest_descr(deadframe) assert fail is faildescr @@ -380,7 +380,7 @@ operations = [ ResOperation(rop.FINISH, [], None, descr=faildescr) ] - self.cpu.compile_loop([], operations, looptoken) + self.cpu.compile_loop(None, [], operations, looptoken) deadframe = self.cpu.execute_token(looptoken) fail = self.cpu.get_latest_descr(deadframe) assert fail is faildescr @@ -391,7 +391,7 @@ operations = [ ResOperation(rop.FINISH, [f0], None, descr=faildescr) ] - self.cpu.compile_loop([f0], operations, looptoken) + self.cpu.compile_loop(None, [f0], operations, looptoken) value = longlong.getfloatstorage(-61.25) deadframe = self.cpu.execute_token(looptoken, value) fail = self.cpu.get_latest_descr(deadframe) @@ -403,7 +403,7 @@ operations = [ ResOperation(rop.FINISH, [constfloat(42.5)], None, descr=faildescr) ] - self.cpu.compile_loop([], operations, looptoken) + self.cpu.compile_loop(None, [], operations, looptoken) deadframe = self.cpu.execute_token(looptoken) fail = self.cpu.get_latest_descr(deadframe) assert fail is faildescr @@ -429,7 +429,7 @@ ResOperation(rop.JUMP, [t, z], None, descr=targettoken), ] operations[-2].setfailargs([t, z]) - cpu.compile_loop([x, y], operations, looptoken) + cpu.compile_loop(None, [x, y], operations, looptoken) deadframe = self.cpu.execute_token(looptoken, 0, 10) assert self.cpu.get_int_value(deadframe, 0) == 0 assert self.cpu.get_int_value(deadframe, 1) == 55 @@ -488,7 +488,7 @@ ops[1].setfailargs([v_res]) # looptoken = JitCellToken() - self.cpu.compile_loop([v1, v2], ops, looptoken) + self.cpu.compile_loop(None, [v1, v2], ops, looptoken) for x, y, z in testcases: deadframe = self.cpu.execute_token(looptoken, x, y) fail = self.cpu.get_latest_descr(deadframe) @@ -1238,7 +1238,7 @@ print inputargs for op in operations: print op - self.cpu.compile_loop(inputargs, operations, looptoken) + self.cpu.compile_loop(None, inputargs, operations, looptoken) # deadframe = self.cpu.execute_token(looptoken, *values) fail = self.cpu.get_latest_descr(deadframe) @@ -1305,7 +1305,7 @@ operations[3].setfailargs(inputargs[:]) operations[3].setdescr(faildescr) # - self.cpu.compile_loop(inputargs, operations, looptoken) + self.cpu.compile_loop(None, inputargs, operations, looptoken) # values = [] S = lltype.GcStruct('S') @@ -1366,7 +1366,7 @@ operations[-3].setfailargs(fboxes) operations[-2].setfailargs(fboxes) looptoken = JitCellToken() - self.cpu.compile_loop(fboxes, operations, looptoken) + self.cpu.compile_loop(None, fboxes, operations, looptoken) fboxes2 = [BoxFloat() for i in range(12)] f3 = BoxFloat() @@ -1375,7 +1375,7 @@ ResOperation(rop.JUMP, [f3]+fboxes2[1:], None, descr=targettoken), ] - self.cpu.compile_bridge(faildescr1, fboxes2, bridge, looptoken) + self.cpu.compile_bridge(None, faildescr1, fboxes2, bridge, looptoken) args = [] for i in range(len(fboxes)): @@ -1407,7 +1407,7 @@ finish()""" loop = parse(loopops) looptoken = JitCellToken() - self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + self.cpu.compile_loop(None, loop.inputargs, loop.operations, looptoken) args = [1] args.append(longlong.getfloatstorage(132.25)) args.append(longlong.getfloatstorage(0.75)) @@ -1428,7 +1428,7 @@ ResOperation(rop.FINISH, [], None, descr=faildescr2), ] bridgeops[-2].setfailargs(fboxes[:]) - self.cpu.compile_bridge(loop.operations[-2].getdescr(), fboxes, + self.cpu.compile_bridge(None, loop.operations[-2].getdescr(), fboxes, bridgeops, looptoken) args = [1, longlong.getfloatstorage(132.25), @@ -1463,7 +1463,7 @@ ] operations[1].setfailargs([]) looptoken = JitCellToken() - self.cpu.compile_loop(inputargs, operations, looptoken) + self.cpu.compile_loop(None, inputargs, operations, looptoken) # for value in [-42, 0, 1, 10]: deadframe = self.cpu.execute_token(looptoken, value) @@ -1508,7 +1508,7 @@ ] operations[-2].setfailargs([]) looptoken = JitCellToken() - self.cpu.compile_loop(inputargs, operations, looptoken) + self.cpu.compile_loop(None, inputargs, operations, looptoken) # for test1 in [-65, -42, -11, 0, 1, 10]: if test1 == -42 or combinaison[0] == 'b': @@ -1560,7 +1560,7 @@ ] operations[-2].setfailargs([]) looptoken = JitCellToken() - self.cpu.compile_loop(inputargs, operations, looptoken) + self.cpu.compile_loop(None, inputargs, operations, looptoken) # for test1 in [65, 42, 11, 0, 1]: if test1 == 42 or combinaison[0] == 'b': @@ -1616,7 +1616,7 @@ ] operations[-2].setfailargs([]) looptoken = JitCellToken() - self.cpu.compile_loop(inputargs, operations, looptoken) + self.cpu.compile_loop(None, inputargs, operations, looptoken) # nan = 1e200 * 1e200 nan /= nan @@ -1675,7 +1675,7 @@ descr=faildescr)) looptoken = JitCellToken() # - self.cpu.compile_loop(inputargs, operations, looptoken) + self.cpu.compile_loop(None, inputargs, operations, looptoken) # args = [] for box in inputargs: @@ -1748,7 +1748,7 @@ looptoken = JitCellToken() # Use "set" to unique-ify inputargs unique_testcase_list = list(set(testcase)) - self.cpu.compile_loop(unique_testcase_list, operations, + self.cpu.compile_loop(None, unique_testcase_list, operations, looptoken) args = [box.getfloatstorage() for box in unique_testcase_list] @@ -2065,7 +2065,7 @@ exc_ptr = xptr loop = parse(ops, self.cpu, namespace=locals()) looptoken = JitCellToken() - self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + self.cpu.compile_loop(None, loop.inputargs, loop.operations, looptoken) deadframe = self.cpu.execute_token(looptoken, 1) assert self.cpu.get_ref_value(deadframe, 0) == xptr excvalue = self.cpu.grab_exc_value(deadframe) @@ -2088,7 +2088,7 @@ exc_ptr = yptr loop = parse(ops, self.cpu, namespace=locals()) looptoken = JitCellToken() - self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + self.cpu.compile_loop(None, loop.inputargs, loop.operations, looptoken) deadframe = self.cpu.execute_token(looptoken, 1) assert self.cpu.get_int_value(deadframe, 0) == 1 excvalue = self.cpu.grab_exc_value(deadframe) @@ -2105,7 +2105,7 @@ ''' loop = parse(ops, self.cpu, namespace=locals()) looptoken = JitCellToken() - self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + self.cpu.compile_loop(None, loop.inputargs, loop.operations, looptoken) deadframe = self.cpu.execute_token(looptoken, 1) assert self.cpu.get_int_value(deadframe, 0) == 1 excvalue = self.cpu.grab_exc_value(deadframe) @@ -2284,7 +2284,7 @@ 'func_ptr': func_ptr, 'calldescr': calldescr}) looptoken = JitCellToken() - self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + self.cpu.compile_loop(None, loop.inputargs, loop.operations, looptoken) f1 = longlong.getfloatstorage(1.2) f2 = longlong.getfloatstorage(3.4) frame = self.cpu.execute_token(looptoken, 1, 0, 1, 2, 3, 4, 5, f1, f2) @@ -2329,7 +2329,7 @@ ] ops[2].setfailargs([i1, i0]) looptoken = JitCellToken() - self.cpu.compile_loop([i0, i1], ops, looptoken) + self.cpu.compile_loop(None, [i0, i1], ops, looptoken) deadframe = self.cpu.execute_token(looptoken, 20, 0) fail = self.cpu.get_latest_descr(deadframe) assert fail.identifier == 0 @@ -2375,7 +2375,7 @@ ] ops[2].setfailargs([i1, i2, i0]) looptoken = JitCellToken() - self.cpu.compile_loop([i0, i1], ops, looptoken) + self.cpu.compile_loop(None, [i0, i1], ops, looptoken) deadframe = self.cpu.execute_token(looptoken, 20, 0) fail = self.cpu.get_latest_descr(deadframe) assert fail.identifier == 0 @@ -2423,7 +2423,7 @@ ] ops[2].setfailargs([i1, f2, i0]) looptoken = JitCellToken() - self.cpu.compile_loop([i0, i1], ops, looptoken) + self.cpu.compile_loop(None, [i0, i1], ops, looptoken) deadframe = self.cpu.execute_token(looptoken, 20, 0) fail = self.cpu.get_latest_descr(deadframe) assert fail.identifier == 0 @@ -2465,7 +2465,7 @@ ] ops[1].setfailargs([i1, i2]) looptoken = JitCellToken() - self.cpu.compile_loop([i1], ops, looptoken) + self.cpu.compile_loop(None, [i1], ops, looptoken) deadframe = self.cpu.execute_token(looptoken, ord('G')) fail = self.cpu.get_latest_descr(deadframe) assert fail.identifier == 0 @@ -2523,7 +2523,7 @@ ] ops[1].setfailargs([]) looptoken = JitCellToken() - self.cpu.compile_loop([i0, i1, i2, i3], ops, looptoken) + self.cpu.compile_loop(None, [i0, i1, i2, i3], ops, looptoken) args = [rffi.cast(lltype.Signed, raw), 2, 4, @@ -2580,7 +2580,7 @@ ResOperation(rop.FINISH, [i3], None, descr=BasicFinalDescr(0)) ] looptoken = JitCellToken() - self.cpu.compile_loop([i1, i2], ops, looptoken) + self.cpu.compile_loop(None, [i1, i2], ops, looptoken) buffer = lltype.malloc(rffi.CCHARP.TO, buflen, flavor='raw') args = [buflen, rffi.cast(lltype.Signed, buffer)] @@ -2650,7 +2650,7 @@ ] ops[1].setfailargs([]) looptoken = JitCellToken() - self.cpu.compile_loop([], ops, looptoken) + self.cpu.compile_loop(None, [], ops, looptoken) deadframe = self.cpu.execute_token(looptoken) fail = self.cpu.get_latest_descr(deadframe) @@ -2790,7 +2790,7 @@ ops.insert(-1, ResOperation(rop.SAME_AS, [b1], b1.clonebox())) looptoken = JitCellToken() - self.cpu.compile_loop(argboxes, ops, looptoken) + self.cpu.compile_loop(None, argboxes, ops, looptoken) # seen = [] deadframe = self.cpu.execute_token(looptoken, *argvalues_normal) @@ -2815,7 +2815,7 @@ ] ops[0].setfailargs([i1]) looptoken = JitCellToken() - self.cpu.compile_loop([i0, i1], ops, looptoken) + self.cpu.compile_loop(None, [i0, i1], ops, looptoken) deadframe = self.cpu.execute_token(looptoken, -42, 9) fail = self.cpu.get_latest_descr(deadframe) @@ -2842,7 +2842,7 @@ ResOperation(rop.FINISH, [i2], None, descr=BasicFinalDescr(3)) ] ops[0].setfailargs([]) - self.cpu.compile_bridge(faildescr, [i2], ops, looptoken) + self.cpu.compile_bridge(None, faildescr, [i2], ops, looptoken) deadframe = self.cpu.execute_token(looptoken, -42, 9) fail = self.cpu.get_latest_descr(deadframe) @@ -2875,7 +2875,7 @@ ] ops[0].setfailargs([]) looptoken = JitCellToken() - self.cpu.compile_loop([i0], ops, looptoken) + self.cpu.compile_loop(None, [i0], ops, looptoken) # mark as failing self.cpu.invalidate_loop(looptoken) # attach a bridge @@ -2883,7 +2883,7 @@ ops2 = [ ResOperation(rop.JUMP, [ConstInt(333)], None, descr=labeldescr), ] - self.cpu.compile_bridge(faildescr, [], ops2, looptoken) + self.cpu.compile_bridge(None, faildescr, [], ops2, looptoken) # run: must not be caught in an infinite loop deadframe = self.cpu.execute_token(looptoken, 16) fail = self.cpu.get_latest_descr(deadframe) @@ -3091,7 +3091,7 @@ looptoken.outermost_jitdriver_sd = FakeJitDriverSD() finish_descr = loop.operations[-1].getdescr() self.cpu.done_with_this_frame_descr_int = BasicFinalDescr() - self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + self.cpu.compile_loop(None, loop.inputargs, loop.operations, looptoken) ARGS = [lltype.Signed] * 10 RES = lltype.Signed FakeJitDriverSD.portal_calldescr = self.cpu.calldescrof( @@ -3109,7 +3109,7 @@ ''' loop = parse(ops, namespace=locals()) othertoken = JitCellToken() - self.cpu.compile_loop(loop.inputargs, loop.operations, othertoken) + self.cpu.compile_loop(None, loop.inputargs, loop.operations, othertoken) args = [i+1 for i in range(10)] deadframe = self.cpu.execute_token(othertoken, *args) assert self.cpu.get_int_value(deadframe, 0) == 13 @@ -3119,7 +3119,7 @@ del called[:] self.cpu.done_with_this_frame_descr_int = finish_descr othertoken = JitCellToken() - self.cpu.compile_loop(loop.inputargs, loop.operations, othertoken) + self.cpu.compile_loop(None, loop.inputargs, loop.operations, othertoken) args = [i+1 for i in range(10)] deadframe = self.cpu.execute_token(othertoken, *args) assert self.cpu.get_int_value(deadframe, 0) == 97 @@ -3157,7 +3157,7 @@ loop = parse(ops) looptoken = JitCellToken() looptoken.outermost_jitdriver_sd = FakeJitDriverSD() - self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + self.cpu.compile_loop(None, loop.inputargs, loop.operations, looptoken) ARGS = [lltype.Signed] * 10 RES = lltype.Signed FakeJitDriverSD.portal_calldescr = self.cpu.calldescrof( @@ -3171,7 +3171,7 @@ ''' loop = parse(ops, namespace=locals()) othertoken = JitCellToken() - self.cpu.compile_loop(loop.inputargs, loop.operations, othertoken) + self.cpu.compile_loop(None, loop.inputargs, loop.operations, othertoken) deadframe = self.cpu.execute_token(othertoken, sys.maxint - 1) assert self.cpu.get_int_value(deadframe, 0) == 3 @@ -3209,7 +3209,7 @@ looptoken = JitCellToken() looptoken.outermost_jitdriver_sd = FakeJitDriverSD() self.cpu.done_with_this_frame_descr_float = BasicFinalDescr() - self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + self.cpu.compile_loop(None, loop.inputargs, loop.operations, looptoken) args = [longlong.getfloatstorage(1.2), longlong.getfloatstorage(2.3)] deadframe = self.cpu.execute_token(looptoken, *args) @@ -3223,7 +3223,7 @@ ''' loop = parse(ops, namespace=locals()) othertoken = JitCellToken() - self.cpu.compile_loop(loop.inputargs, loop.operations, othertoken) + self.cpu.compile_loop(None, loop.inputargs, loop.operations, othertoken) args = [longlong.getfloatstorage(1.2), longlong.getfloatstorage(3.2)] deadframe = self.cpu.execute_token(othertoken, *args) @@ -3235,7 +3235,7 @@ del called[:] self.cpu.done_with_this_frame_descr_float = finish_descr othertoken = JitCellToken() - self.cpu.compile_loop(loop.inputargs, loop.operations, othertoken) + self.cpu.compile_loop(None, loop.inputargs, loop.operations, othertoken) args = [longlong.getfloatstorage(1.2), longlong.getfloatstorage(4.2)] deadframe = self.cpu.execute_token(othertoken, *args) @@ -3298,7 +3298,7 @@ looptoken = JitCellToken() looptoken.outermost_jitdriver_sd = FakeJitDriverSD() self.cpu.done_with_this_frame_descr_float = BasicFinalDescr() - self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + self.cpu.compile_loop(None, loop.inputargs, loop.operations, looptoken) finish_descr = loop.operations[-1].getdescr() args = [longlong.getfloatstorage(1.25), longlong.getfloatstorage(2.35)] @@ -3315,7 +3315,7 @@ ''' loop = parse(ops, namespace=locals()) othertoken = JitCellToken() - self.cpu.compile_loop(loop.inputargs, loop.operations, othertoken) + self.cpu.compile_loop(None, loop.inputargs, loop.operations, othertoken) # normal call_assembler: goes to looptoken args = [longlong.getfloatstorage(1.25), @@ -3334,7 +3334,7 @@ loop2 = parse(ops) looptoken2 = JitCellToken() looptoken2.outermost_jitdriver_sd = FakeJitDriverSD() - self.cpu.compile_loop(loop2.inputargs, loop2.operations, looptoken2) + self.cpu.compile_loop(None, loop2.inputargs, loop2.operations, looptoken2) finish_descr2 = loop2.operations[-1].getdescr() # install it @@ -3694,7 +3694,7 @@ ] inputargs = [i0] looptoken = JitCellToken() - self.cpu.compile_loop(inputargs, operations, looptoken) + self.cpu.compile_loop(None, inputargs, operations, looptoken) # overflowing value: deadframe = self.cpu.execute_token(looptoken, sys.maxint // 4 + 1) fail = self.cpu.get_latest_descr(deadframe) @@ -3747,7 +3747,7 @@ operations[3].setfailargs([i1]) operations[6].setfailargs([i1]) - self.cpu.compile_loop(inputargs, operations, looptoken) + self.cpu.compile_loop(None, inputargs, operations, looptoken) deadframe = self.cpu.execute_token(looptoken, 2) fail = self.cpu.get_latest_descr(deadframe) assert fail.identifier == 2 @@ -3759,7 +3759,7 @@ ResOperation(rop.INT_SUB, [i0, ConstInt(20)], i2), ResOperation(rop.JUMP, [i2], None, descr=targettoken2), ] - self.cpu.compile_bridge(faildescr, inputargs2, operations2, looptoken) + self.cpu.compile_bridge(None, faildescr, inputargs2, operations2, looptoken) deadframe = self.cpu.execute_token(looptoken, 2) fail = self.cpu.get_latest_descr(deadframe) @@ -3776,7 +3776,7 @@ descr = BasicFinalDescr() loop = parse(ops, self.cpu, namespace=locals()) looptoken = JitCellToken() - self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + self.cpu.compile_loop(None, loop.inputargs, loop.operations, looptoken) for inp, outp in [(2,2), (-3, 0)]: deadframe = self.cpu.execute_token(looptoken, inp) assert outp == self.cpu.get_int_value(deadframe, 0) @@ -3805,8 +3805,8 @@ bridge = parse(bridge_ops, self.cpu, namespace=locals()) looptoken = JitCellToken() self.cpu.assembler.set_debug(False) - info = self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) - bridge_info = self.cpu.compile_bridge(faildescr, bridge.inputargs, + info = self.cpu.compile_loop(None, loop.inputargs, loop.operations, looptoken) + bridge_info = self.cpu.compile_bridge(None, faildescr, bridge.inputargs, bridge.operations, looptoken) self.cpu.assembler.set_debug(True) # always on untranslated @@ -3850,7 +3850,7 @@ ResOperation(rop.FINISH, [i0], None, descr=BasicFinalDescr(1234)), ] operations[1].setfailargs([i0]) - self.cpu.compile_loop(inputargs, operations, looptoken1) + self.cpu.compile_loop(None, inputargs, operations, looptoken1) def func(a, b, c, d, e, f, g, h, i): assert a + 2 == b @@ -3904,14 +3904,14 @@ ResOperation(rop.JUMP, [i19], None, descr=targettoken1), ] operations2[-2].setfailargs([]) - self.cpu.compile_bridge(faildescr1, inputargs, operations2, looptoken1) + self.cpu.compile_bridge(None, faildescr1, inputargs, operations2, looptoken1) looptoken2 = JitCellToken() inputargs = [BoxInt()] operations3 = [ ResOperation(rop.JUMP, [ConstInt(0)], None, descr=targettoken1), ] - self.cpu.compile_loop(inputargs, operations3, looptoken2) + self.cpu.compile_loop(None, inputargs, operations3, looptoken2) deadframe = self.cpu.execute_token(looptoken2, -9) fail = self.cpu.get_latest_descr(deadframe) @@ -3928,11 +3928,11 @@ operations[0].setfailargs([]) looptoken = JitCellToken() inputargs = [t_box] - self.cpu.compile_loop(inputargs, operations, looptoken) + self.cpu.compile_loop(None, inputargs, operations, looptoken) operations = [ ResOperation(rop.FINISH, [], None, descr=BasicFinalDescr(99)) ] - self.cpu.compile_bridge(faildescr, [], operations, looptoken) + self.cpu.compile_bridge(None, faildescr, [], operations, looptoken) deadframe = self.cpu.execute_token(looptoken, null_box.getref_base()) fail = self.cpu.get_latest_descr(deadframe) assert fail.identifier == 99 @@ -3960,7 +3960,7 @@ # loop = parse(ops, self.cpu, namespace=locals()) looptoken = JitCellToken() - self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + self.cpu.compile_loop(None, loop.inputargs, loop.operations, looptoken) deadframe = self.cpu.execute_token(looptoken, rffi.cast(lltype.Signed, p), 16) result = self.cpu.get_int_value(deadframe, 0) @@ -3990,7 +3990,7 @@ # loop = parse(ops, self.cpu, namespace=locals()) looptoken = JitCellToken() - self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + self.cpu.compile_loop(None, loop.inputargs, loop.operations, looptoken) deadframe = self.cpu.execute_token(looptoken, rffi.cast(lltype.Signed, p), 16) result = self.cpu.get_float_value(deadframe, 0) @@ -4020,7 +4020,7 @@ # loop = parse(ops, self.cpu, namespace=locals()) looptoken = JitCellToken() - self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + self.cpu.compile_loop(None, loop.inputargs, loop.operations, looptoken) deadframe = self.cpu.execute_token(looptoken, rffi.cast(lltype.Signed, p), 16) result = self.cpu.get_int_value(deadframe, 0) @@ -4052,7 +4052,7 @@ p[i] = '\xDD' loop = parse(ops, self.cpu, namespace=locals()) looptoken = JitCellToken() - self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + self.cpu.compile_loop(None, loop.inputargs, loop.operations, looptoken) self.cpu.execute_token(looptoken, rffi.cast(lltype.Signed, p), 16, value) result = rawstorage.raw_storage_getitem(T, p, 16) @@ -4084,7 +4084,7 @@ p[i] = '\xDD' loop = parse(ops, self.cpu, namespace=locals()) looptoken = JitCellToken() - self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + self.cpu.compile_loop(None, loop.inputargs, loop.operations, looptoken) self.cpu.execute_token(looptoken, rffi.cast(lltype.Signed, p), 16, longlong.getfloatstorage(value)) @@ -4118,7 +4118,7 @@ p[i] = '\xDD' loop = parse(ops, self.cpu, namespace=locals()) looptoken = JitCellToken() - self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + self.cpu.compile_loop(None, loop.inputargs, loop.operations, looptoken) self.cpu.execute_token(looptoken, rffi.cast(lltype.Signed, p), 16, longlong.singlefloat2int(value)) @@ -4153,7 +4153,7 @@ ] ops[2].setfailargs([i2]) looptoken = JitCellToken() - self.cpu.compile_loop([i0, i1], ops, looptoken) + self.cpu.compile_loop(None, [i0, i1], ops, looptoken) deadframe = self.cpu.execute_token(looptoken, 20, 0) fail = self.cpu.get_latest_descr(deadframe) assert fail.identifier == 23 @@ -4187,7 +4187,7 @@ finish(i1, descr=finaldescr) """, namespace={'finaldescr': finaldescr, 'calldescr2': calldescr2, 'guarddescr': guarddescr, 'func2_ptr': func2_ptr}) - self.cpu.compile_bridge(faildescr, bridge.inputargs, + self.cpu.compile_bridge(None, faildescr, bridge.inputargs, bridge.operations, looptoken) cpu = self.cpu @@ -4220,7 +4220,7 @@ guard_true(i0, descr=faildescr) [i1, i2, px] finish(i2, descr=finaldescr2) """, namespace=locals()) - self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + self.cpu.compile_loop(None, loop.inputargs, loop.operations, looptoken) frame = self.cpu.execute_token(looptoken, 0, 0, 3) assert self.cpu.get_latest_descr(frame) is guarddescr from rpython.jit.backend.llsupport.llmodel import AbstractLLCPU @@ -4269,7 +4269,7 @@ 'faildescr2': BasicFailDescr(1), 'xtp': xtp }) - self.cpu.compile_bridge(faildescr, bridge.inputargs, + self.cpu.compile_bridge(None, faildescr, bridge.inputargs, bridge.operations, looptoken) raise LLException(xtp, xptr) @@ -4290,7 +4290,7 @@ 'faildescr': faildescr, 'finaldescr2': BasicFinalDescr(1)}) - self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + self.cpu.compile_loop(None, loop.inputargs, loop.operations, looptoken) frame = self.cpu.execute_token(looptoken, 1, 2, 3) descr = self.cpu.get_latest_descr(frame) assert descr.identifier == 42 diff --git a/rpython/jit/backend/test/test_random.py b/rpython/jit/backend/test/test_random.py --- a/rpython/jit/backend/test/test_random.py +++ b/rpython/jit/backend/test/test_random.py @@ -239,9 +239,9 @@ print >>s, ' operations[%d].setfailargs([%s])' % (i, fa) if fail_descr is None: print >>s, ' looptoken = JitCellToken()' - print >>s, ' cpu.compile_loop(inputargs, operations, looptoken)' + print >>s, ' cpu.compile_loop(None, inputargs, operations, looptoken)' else: - print >>s, ' cpu.compile_bridge(%s, inputargs, operations, looptoken)' % self.descr_counters[fail_descr] + print >>s, ' cpu.compile_bridge(None, %s, inputargs, operations, looptoken)' % self.descr_counters[fail_descr] if hasattr(self.loop, 'inputargs'): vals = [] for i, v in enumerate(self.loop.inputargs): @@ -643,7 +643,7 @@ self.builder = builder self.loop = loop dump(loop) - cpu.compile_loop(loop.inputargs, loop.operations, loop._jitcelltoken) + cpu.compile_loop(None, loop.inputargs, loop.operations, loop._jitcelltoken) if self.output: builder.print_loop(self.output) @@ -715,7 +715,7 @@ if box not in self.loop.inputargs: box = box.constbox() args.append(box) - self.cpu.compile_loop(self.loop.inputargs, + self.cpu.compile_loop(None, self.loop.inputargs, [ResOperation(rop.JUMP, args, None, descr=self.loop._targettoken)], self._initialjumploop_celltoken) @@ -851,7 +851,7 @@ if r.random() < .05: return False dump(subloop) - self.builder.cpu.compile_bridge(fail_descr, fail_args, + self.builder.cpu.compile_bridge(None, fail_descr, fail_args, subloop.operations, self.loop._jitcelltoken) diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -434,7 +434,8 @@ else: self.wb_slowpath[withcards + 2 * withfloats] = rawstart - def assemble_loop(self, loopname, inputargs, operations, looptoken, log): + def assemble_loop(self, logger, loopname, inputargs, operations, looptoken, + log): '''adds the following attributes to looptoken: _ll_function_addr (address of the generated func, as an int) _ll_loop_code (debug: addr of the start of the ResOps) @@ -467,8 +468,8 @@ # self._call_header_with_stack_check() self._check_frame_depth_debug(self.mc) - operations = regalloc.prepare_loop(inputargs, operations, looptoken, - clt.allgcrefs) + operations = regalloc.prepare_loop(inputargs, operations, + looptoken, clt.allgcrefs) looppos = self.mc.get_relative_pos() frame_depth_no_fixed_size = self._assemble(regalloc, inputargs, operations) @@ -498,6 +499,9 @@ looptoken._x86_fullsize = full_size looptoken._x86_ops_offset = ops_offset looptoken._ll_function_addr = rawstart + if logger: + logger.log_loop(inputargs, operations, 0, "rewritten", + name=loopname, ops_offset=ops_offset) self.fixup_target_tokens(rawstart) self.teardown() @@ -509,7 +513,7 @@ return AsmInfo(ops_offset, rawstart + looppos, size_excluding_failure_stuff - looppos) - def assemble_bridge(self, faildescr, inputargs, operations, + def assemble_bridge(self, logger, faildescr, inputargs, operations, original_loop_token, log): if not we_are_translated(): # Arguments should be unique @@ -544,6 +548,9 @@ ops_offset = self.mc.ops_offset frame_depth = max(self.current_clt.frame_info.jfi_frame_depth, frame_depth_no_fixed_size + JITFRAME_FIXED_SIZE) + if logger: + logger.log_bridge(inputargs, operations, "rewritten", + ops_offset=ops_offset) self.fixup_target_tokens(rawstart) self.update_frame_depth(frame_depth) self.teardown() @@ -2022,7 +2029,7 @@ # Write code equivalent to write_barrier() in the GC: it checks # a flag in the object at arglocs[0], and if set, it calls a # helper piece of assembler. The latter saves registers as needed - # and call the function jit_remember_young_pointer() from the GC. + # and call the function remember_young_pointer() from the GC. if we_are_translated(): cls = self.cpu.gc_ll_descr.has_write_barrier_class() assert cls is not None and isinstance(descr, cls) diff --git a/rpython/jit/backend/x86/regloc.py b/rpython/jit/backend/x86/regloc.py --- a/rpython/jit/backend/x86/regloc.py +++ b/rpython/jit/backend/x86/regloc.py @@ -370,8 +370,8 @@ class LocationCodeBuilder(object): _mixin_ = True - _reuse_scratch_register = False - _scratch_register_known = False + _reuse_scratch_register = False # for now, this is always False + _scratch_register_known = False # for now, this is always False _scratch_register_value = 0 def _binaryop(name): @@ -576,6 +576,7 @@ self.MOV_ri(X86_64_SCRATCH_REG.value, value) def begin_reuse_scratch_register(self): + # --NEVER CALLED (only from a specific test)-- # Flag the beginning of a block where it is okay to reuse the value # of the scratch register. In theory we shouldn't have to do this if # we were careful to mark all possible targets of a jump or call, and diff --git a/rpython/jit/backend/x86/runner.py b/rpython/jit/backend/x86/runner.py --- a/rpython/jit/backend/x86/runner.py +++ b/rpython/jit/backend/x86/runner.py @@ -88,15 +88,17 @@ lines = machine_code_dump(data, addr, self.backend_name, label_list) print ''.join(lines) - def compile_loop(self, inputargs, operations, looptoken, log=True, name=''): - return self.assembler.assemble_loop(name, inputargs, operations, + def compile_loop(self, logger, inputargs, operations, looptoken, log=True, + name=''): + return self.assembler.assemble_loop(logger, name, inputargs, operations, looptoken, log=log) - def compile_bridge(self, faildescr, inputargs, operations, + def compile_bridge(self, logger, faildescr, inputargs, operations, original_loop_token, log=True): clt = original_loop_token.compiled_loop_token clt.compiling_a_bridge() - return self.assembler.assemble_bridge(faildescr, inputargs, operations, + return self.assembler.assemble_bridge(logger, faildescr, inputargs, + operations, original_loop_token, log=log) def clear_latest_values(self, count): diff --git a/rpython/jit/backend/x86/test/test_regalloc2.py b/rpython/jit/backend/x86/test/test_regalloc2.py --- a/rpython/jit/backend/x86/test/test_regalloc2.py +++ b/rpython/jit/backend/x86/test/test_regalloc2.py @@ -32,7 +32,7 @@ cpu = CPU(None, None) cpu.setup_once() looptoken = JitCellToken() - cpu.compile_loop(inputargs, operations, looptoken) + cpu.compile_loop(None, inputargs, operations, looptoken) deadframe = cpu.execute_token(looptoken, 9) assert cpu.get_int_value(deadframe, 0) == (9 >> 3) assert cpu.get_int_value(deadframe, 1) == (~18) @@ -58,7 +58,7 @@ cpu = CPU(None, None) cpu.setup_once() looptoken = JitCellToken() - cpu.compile_loop(inputargs, operations, looptoken) + cpu.compile_loop(None, inputargs, operations, looptoken) deadframe = cpu.execute_token(looptoken, -10) assert cpu.get_int_value(deadframe, 0) == 0 assert cpu.get_int_value(deadframe, 1) == -1000 @@ -159,7 +159,7 @@ cpu = CPU(None, None) cpu.setup_once() looptoken = JitCellToken() - cpu.compile_loop(inputargs, operations, looptoken) + cpu.compile_loop(None, inputargs, operations, looptoken) deadframe = cpu.execute_token(looptoken, -13, 10, 10, 8, -8, -16, -18, 46, -12, 26) assert cpu.get_int_value(deadframe, 0) == 0 @@ -271,7 +271,7 @@ cpu = CPU(None, None) cpu.setup_once() looptoken = JitCellToken() - cpu.compile_loop(inputargs, operations, looptoken) + cpu.compile_loop(None, inputargs, operations, looptoken) deadframe = cpu.execute_token(looptoken, 17, -20, -6, 6, 1, 13, 13, 9, 49, 8) assert cpu.get_int_value(deadframe, 0) == 0 @@ -386,7 +386,7 @@ operations[4].setfailargs([v4, v8, v10, v2, v9, v7, v6, v1]) operations[8].setfailargs([v3, v9, v2, v6, v4]) looptoken = JitCellToken() - cpu.compile_loop(inputargs, operations, looptoken) + cpu.compile_loop(None, inputargs, operations, looptoken) loop_args = [1, -39, 46, 21, 16, 6, -4611686018427387905, 12, 14, 2] frame = cpu.execute_token(looptoken, *loop_args) assert cpu.get_int_value(frame, 0) == 46 @@ -493,7 +493,7 @@ operations[16].setfailargs([v5, v9]) operations[34].setfailargs([]) operations[37].setfailargs([v12, v19, v10, v7, v4, v8, v18, v15, v9]) - cpu.compile_bridge(faildescr1, inputargs, operations, looptoken) + cpu.compile_bridge(None, faildescr1, inputargs, operations, looptoken) frame = cpu.execute_token(looptoken, *loop_args) #assert cpu.get_int_value(frame, 0) == -9223372036854775766 assert cpu.get_int_value(frame, 1) == 0 @@ -583,7 +583,7 @@ operations[0].setfailargs([]) operations[8].setfailargs([tmp23, v5, v3, v11, v6]) operations[30].setfailargs([v6]) - cpu.compile_bridge(faildescr6, inputargs, operations, looptoken) + cpu.compile_bridge(None, faildescr6, inputargs, operations, looptoken) frame = cpu.execute_token(looptoken, *loop_args) #assert cpu.get_int_value(frame, 0) == -9223372036854775808 v1 = BoxInt() @@ -607,6 +607,6 @@ ResOperation(rop.FINISH, [], None, descr=finishdescr13), ] operations[4].setfailargs([v2]) - cpu.compile_bridge(faildescr10, inputargs, operations, looptoken) + cpu.compile_bridge(None, faildescr10, inputargs, operations, looptoken) frame = cpu.execute_token(looptoken, *loop_args) #assert cpu.get_int_value(frame, 0) == 10 diff --git a/rpython/jit/backend/x86/test/test_runner.py b/rpython/jit/backend/x86/test/test_runner.py --- a/rpython/jit/backend/x86/test/test_runner.py +++ b/rpython/jit/backend/x86/test/test_runner.py @@ -287,7 +287,7 @@ ] ops[-2].setfailargs([i1]) looptoken = JitCellToken() - self.cpu.compile_loop([b], ops, looptoken) + self.cpu.compile_loop(None, [b], ops, looptoken) deadframe = self.cpu.execute_token(looptoken, b.value) result = self.cpu.get_int_value(deadframe, 0) if guard == rop.GUARD_FALSE: @@ -333,7 +333,7 @@ ops[-2].setfailargs([i1]) inputargs = [i for i in (a, b) if isinstance(i, Box)] looptoken = JitCellToken() - self.cpu.compile_loop(inputargs, ops, looptoken) + self.cpu.compile_loop(None, inputargs, ops, looptoken) inputvalues = [box.value for box in inputargs] deadframe = self.cpu.execute_token(looptoken, *inputvalues) result = self.cpu.get_int_value(deadframe, 0) @@ -377,7 +377,7 @@ ] inputargs = [i0] operations[-2].setfailargs([i1]) - self.cpu.compile_loop(inputargs, operations, looptoken) + self.cpu.compile_loop(None, inputargs, operations, looptoken) name, loopaddress, loopsize = agent.functions[0] assert name == "Loop # 17: hello (loop counter 0)" assert loopaddress <= looptoken._ll_loop_code @@ -393,7 +393,7 @@ ] bridge[1].setfailargs([i1b]) - self.cpu.compile_bridge(faildescr1, [i1b], bridge, looptoken) + self.cpu.compile_bridge(None, faildescr1, [i1b], bridge, looptoken) name, address, size = agent.functions[1] assert name == "Bridge # 0: bye (loop counter 1)" # Would be exactly ==, but there are some guard failure recovery @@ -422,7 +422,7 @@ ] inputargs = [i0] debug._log = dlog = debug.DebugLog() - info = self.cpu.compile_loop(inputargs, operations, looptoken) + info = self.cpu.compile_loop(None, inputargs, operations, looptoken) ops_offset = info.ops_offset debug._log = None # @@ -508,7 +508,7 @@ ops[5].setfailargs([]) ops[7].setfailargs([]) looptoken = JitCellToken() - self.cpu.compile_loop([i1, i2], ops, looptoken) + self.cpu.compile_loop(None, [i1, i2], ops, looptoken) deadframe = self.cpu.execute_token(looptoken, 123450, 123408) fail = self.cpu.get_latest_descr(deadframe) @@ -549,7 +549,7 @@ try: self.cpu.assembler.set_debug(True) looptoken = JitCellToken() - self.cpu.compile_loop(ops.inputargs, ops.operations, looptoken) + self.cpu.compile_loop(None, ops.inputargs, ops.operations, looptoken) self.cpu.execute_token(looptoken, 0) # check debugging info struct = self.cpu.assembler.loop_run_counters[0] diff --git a/rpython/jit/metainterp/compile.py b/rpython/jit/metainterp/compile.py --- a/rpython/jit/metainterp/compile.py +++ b/rpython/jit/metainterp/compile.py @@ -302,14 +302,16 @@ log=True, name=''): metainterp_sd.logger_ops.log_loop(inputargs, operations, -2, 'compiling', name=name) - return metainterp_sd.cpu.compile_loop(inputargs, operations, looptoken, + return metainterp_sd.cpu.compile_loop(metainterp_sd.logger_ops, + inputargs, operations, looptoken, log=log, name=name) def do_compile_bridge(metainterp_sd, faildescr, inputargs, operations, original_loop_token, log=True): metainterp_sd.logger_ops.log_bridge(inputargs, operations, "compiling") assert isinstance(faildescr, AbstractFailDescr) - return metainterp_sd.cpu.compile_bridge(faildescr, inputargs, operations, + return metainterp_sd.cpu.compile_bridge(metainterp_sd.logger_ops, + faildescr, inputargs, operations, original_loop_token, log=log) def send_loop_to_backend(greenkey, jitdriver_sd, metainterp_sd, loop, type): @@ -932,7 +934,7 @@ ] operations[1].setfailargs([]) operations = get_deep_immutable_oplist(operations) - cpu.compile_loop(inputargs, operations, jitcell_token, log=False) + cpu.compile_loop(None, inputargs, operations, jitcell_token, log=False) if memory_manager is not None: # for tests memory_manager.keep_loop_alive(jitcell_token) return jitcell_token diff --git a/rpython/jit/metainterp/logger.py b/rpython/jit/metainterp/logger.py --- a/rpython/jit/metainterp/logger.py +++ b/rpython/jit/metainterp/logger.py @@ -17,6 +17,10 @@ debug_start("jit-log-noopt-loop") logops = self._log_operations(inputargs, operations, ops_offset) debug_stop("jit-log-noopt-loop") + elif type == "rewritten": + debug_start("jit-log-rewritten-loop") + logops = self._log_operations(inputargs, operations, ops_offset) + debug_stop("jit-log-rewritten-loop") elif number == -2: debug_start("jit-log-compiling-loop") logops = self._log_operations(inputargs, operations, ops_offset) @@ -35,6 +39,10 @@ debug_start("jit-log-noopt-bridge") logops = self._log_operations(inputargs, operations, ops_offset) debug_stop("jit-log-noopt-bridge") + elif extra == "rewritten": + debug_start("jit-log-rewritten-bridge") + logops = self._log_operations(inputargs, operations, ops_offset) + debug_stop("jit-log-rewritten-bridge") elif extra == "compiling": debug_start("jit-log-compiling-bridge") logops = self._log_operations(inputargs, operations, ops_offset) diff --git a/rpython/jit/metainterp/optimizeopt/pure.py b/rpython/jit/metainterp/optimizeopt/pure.py --- a/rpython/jit/metainterp/optimizeopt/pure.py +++ b/rpython/jit/metainterp/optimizeopt/pure.py @@ -52,7 +52,7 @@ # otherwise, the operation remains self.emit_operation(op) if op.returns_bool_result(): - self.optimizer.bool_boxes[self.getvalue(op.result)] = None + self.optimizer.bool_boxes[self.getvalue(op.result)] = None if nextop: self.emit_operation(nextop) diff --git a/rpython/jit/metainterp/optimizeopt/rewrite.py b/rpython/jit/metainterp/optimizeopt/rewrite.py --- a/rpython/jit/metainterp/optimizeopt/rewrite.py +++ b/rpython/jit/metainterp/optimizeopt/rewrite.py @@ -178,6 +178,17 @@ else: self.emit_operation(op) + def optimize_INT_XOR(self, op): + v1 = self.getvalue(op.getarg(0)) + v2 = self.getvalue(op.getarg(1)) + + if v1.is_constant() and v1.box.getint() == 0: + self.make_equal_to(op.result, v2) + elif v2.is_constant() and v2.box.getint() == 0: + self.make_equal_to(op.result, v1) + else: + self.emit_operation(op) + def optimize_FLOAT_MUL(self, op): arg1 = op.getarg(0) arg2 = op.getarg(1) diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py @@ -3263,6 +3263,20 @@ """ self.optimize_loop(ops, expected) + def test_fold_partially_constant_xor(self): + ops = """ + [i0, i1] + i2 = int_xor(i0, 23) + i3 = int_xor(i1, 0) + jump(i2, i3) + """ + expected = """ + [i0, i1] + i2 = int_xor(i0, 23) + jump(i2, i1) + """ + self.optimize_loop(ops, expected) + # ---------- def test_residual_call_does_not_invalidate_caches(self): diff --git a/rpython/jit/metainterp/test/test_compile.py b/rpython/jit/metainterp/test/test_compile.py --- a/rpython/jit/metainterp/test/test_compile.py +++ b/rpython/jit/metainterp/test/test_compile.py @@ -17,7 +17,7 @@ ts = typesystem.llhelper def __init__(self): self.seen = [] - def compile_loop(self, inputargs, operations, token, log=True, name=''): + def compile_loop(self, logger, inputargs, operations, token, log=True, name=''): self.seen.append((inputargs, operations, token)) class FakeLogger(object): diff --git a/rpython/memory/gc/base.py b/rpython/memory/gc/base.py --- a/rpython/memory/gc/base.py +++ b/rpython/memory/gc/base.py @@ -101,7 +101,7 @@ def set_root_walker(self, root_walker): self.root_walker = root_walker - def write_barrier(self, newvalue, addr_struct): + def write_barrier(self, addr_struct): pass def size_gc_header(self, typeid=0): diff --git a/rpython/memory/gc/generation.py b/rpython/memory/gc/generation.py --- a/rpython/memory/gc/generation.py +++ b/rpython/memory/gc/generation.py @@ -336,7 +336,7 @@ addr = pointer.address[0] newaddr = self.copy(addr) pointer.address[0] = newaddr - self.write_into_last_generation_obj(obj, newaddr) + self.write_into_last_generation_obj(obj) # ____________________________________________________________ # Implementation of nursery-only collections @@ -467,9 +467,9 @@ # "if addr_struct.int0 & JIT_WB_IF_FLAG: remember_young_pointer()") JIT_WB_IF_FLAG = GCFLAG_NO_YOUNG_PTRS - def write_barrier(self, newvalue, addr_struct): + def write_barrier(self, addr_struct): if self.header(addr_struct).tid & GCFLAG_NO_YOUNG_PTRS: - self.remember_young_pointer(addr_struct, newvalue) + self.remember_young_pointer(addr_struct) def _setup_wb(self): DEBUG = self.DEBUG @@ -480,43 +480,30 @@ From noreply at buildbot.pypy.org Tue Aug 20 16:03:53 2013 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Tue, 20 Aug 2013 16:03:53 +0200 (CEST) Subject: [pypy-commit] pypy refactor-str-types: Remove all ``@specialize.argtype(0)`` and use import_from_mixin(). Message-ID: <20130820140353.0ECEB1C02B1@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: refactor-str-types Changeset: r66263:e75b57683b2b Date: 2013-08-20 16:00 +0200 http://bitbucket.org/pypy/pypy/changeset/e75b57683b2b/ Log: Remove all ``@specialize.argtype(0)`` and use import_from_mixin(). diff --git a/pypy/objspace/std/bytearrayobject.py b/pypy/objspace/std/bytearrayobject.py --- a/pypy/objspace/std/bytearrayobject.py +++ b/pypy/objspace/std/bytearrayobject.py @@ -9,14 +9,16 @@ from pypy.objspace.std.stdtypedef import StdTypeDef from pypy.objspace.std.stringmethods import StringMethods from pypy.objspace.std.util import get_positive_index -from rpython.rlib.objectmodel import newlist_hint, resizelist_hint +from rpython.rlib.objectmodel import newlist_hint, resizelist_hint, import_from_mixin from rpython.rlib.rstring import StringBuilder def _make_data(s): return [s[i] for i in range(len(s))] -class W_BytearrayObject(W_Root, StringMethods): +class W_BytearrayObject(W_Root): + import_from_mixin(StringMethods) + def __init__(w_self, data): w_self.data = data diff --git a/pypy/objspace/std/bytesobject.py b/pypy/objspace/std/bytesobject.py --- a/pypy/objspace/std/bytesobject.py +++ b/pypy/objspace/std/bytesobject.py @@ -12,7 +12,7 @@ from pypy.objspace.std.unicodeobject import (unicode_from_string, decode_object, unicode_from_encoded_object, _get_encoding_and_errors) from rpython.rlib.jit import we_are_jitted -from rpython.rlib.objectmodel import compute_hash, compute_unique_id +from rpython.rlib.objectmodel import compute_hash, compute_unique_id, import_from_mixin from rpython.rlib.rstring import StringBuilder, replace @@ -424,7 +424,8 @@ pass -class W_BytesObject(StringMethods, W_AbstractBytesObject): +class W_BytesObject(W_AbstractBytesObject): + import_from_mixin(StringMethods) _immutable_fields_ = ['_value'] def __init__(self, str): diff --git a/pypy/objspace/std/stringmethods.py b/pypy/objspace/std/stringmethods.py --- a/pypy/objspace/std/stringmethods.py +++ b/pypy/objspace/std/stringmethods.py @@ -9,8 +9,6 @@ class StringMethods(object): - _mixin_ = True - def _sliced(self, space, s, start, stop, orig_obj): assert start >= 0 assert stop >= 0 @@ -26,7 +24,6 @@ space, lenself, w_start, w_end, upper_bound=upper_bound) return (value, start, end) - @specialize.argtype(0) def descr_eq(self, space, w_other): try: return space.newbool(self._val(space) == self._op_val(space, w_other)) @@ -42,7 +39,6 @@ return space.w_False raise - @specialize.argtype(0) def descr_ne(self, space, w_other): try: return space.newbool(self._val(space) != self._op_val(space, w_other)) @@ -58,7 +54,6 @@ return space.w_True raise - @specialize.argtype(0) def descr_lt(self, space, w_other): try: return space.newbool(self._val(space) < self._op_val(space, w_other)) @@ -66,7 +61,6 @@ if e.match(space, space.w_TypeError): return space.w_NotImplemented - @specialize.argtype(0) def descr_le(self, space, w_other): try: return space.newbool(self._val(space) <= self._op_val(space, w_other)) @@ -74,7 +68,6 @@ if e.match(space, space.w_TypeError): return space.w_NotImplemented - @specialize.argtype(0) def descr_gt(self, space, w_other): try: return space.newbool(self._val(space) > self._op_val(space, w_other)) @@ -82,7 +75,6 @@ if e.match(space, space.w_TypeError): return space.w_NotImplemented - @specialize.argtype(0) def descr_ge(self, space, w_other): try: return space.newbool(self._val(space) >= self._op_val(space, w_other)) @@ -90,15 +82,12 @@ if e.match(space, space.w_TypeError): return space.w_NotImplemented - @specialize.argtype(0) def descr_len(self, space): return space.wrap(self._len()) - #@specialize.argtype(0) #def descr_iter(self, space): # pass - @specialize.argtype(0) def descr_contains(self, space, w_sub): from pypy.objspace.std.bytearrayobject import W_BytearrayObject if (isinstance(self, W_BytearrayObject) and @@ -113,11 +102,9 @@ return space.w_False return space.newbool(self._val(space).find(self._op_val(space, w_sub)) >= 0) - @specialize.argtype(0) def descr_add(self, space, w_other): return self._new(self._val(space) + self._op_val(space, w_other)) - @specialize.argtype(0) def descr_mul(self, space, w_times): try: times = space.getindex_w(w_times, space.w_OverflowError) @@ -131,7 +118,6 @@ return self._new(self._val(space)[0] * times) return self._new(self._val(space) * times) - @specialize.argtype(0) def descr_getitem(self, space, w_index): if isinstance(w_index, W_SliceObject): selfvalue = self._val(space) @@ -160,7 +146,6 @@ #return wrapchar(space, selfvalue[index]) return self._new(selfvalue[index]) - @specialize.argtype(0) def descr_getslice(self, space, w_start, w_stop): selfvalue = self._val(space) start, stop = normalize_simple_slice(space, len(selfvalue), w_start, @@ -170,7 +155,6 @@ else: return self._sliced(space, selfvalue, start, stop, self) - @specialize.argtype(0) def descr_capitalize(self, space): value = self._val(space) if len(value) == 0: @@ -183,7 +167,6 @@ return self._new(builder.build()) @unwrap_spec(width=int, w_fillchar=WrappedDefault(' ')) - @specialize.argtype(0) def descr_center(self, space, width, w_fillchar): value = self._val(space) fillchar = self._op_val(space, w_fillchar) @@ -201,12 +184,10 @@ return self._new(u_centered) - @specialize.argtype(0) def descr_count(self, space, w_sub, w_start=None, w_end=None): value, start, end = self._convert_idx_params(space, w_start, w_end) return space.newint(value.count(self._op_val(space, w_sub), start, end)) - @specialize.argtype(0) def descr_decode(self, space, w_encoding=None, w_errors=None): from pypy.objspace.std.unicodeobject import _get_encoding_and_errors, \ unicode_from_string, decode_object @@ -215,7 +196,6 @@ return unicode_from_string(space, self) return decode_object(space, self, encoding, errors) - @specialize.argtype(0) def descr_encode(self, space, w_encoding=None, w_errors=None): from pypy.objspace.std.unicodeobject import _get_encoding_and_errors, \ encode_object @@ -223,7 +203,6 @@ return encode_object(space, self, encoding, errors) @unwrap_spec(tabsize=int) - @specialize.argtype(0) def descr_expandtabs(self, space, tabsize=8): value = self._val(space) if not value: @@ -266,19 +245,16 @@ return distance - @specialize.argtype(0) def descr_find(self, space, w_sub, w_start=None, w_end=None): (value, start, end) = self._convert_idx_params(space, w_start, w_end) res = value.find(self._op_val(space, w_sub), start, end) return space.wrap(res) - @specialize.argtype(0) def descr_rfind(self, space, w_sub, w_start=None, w_end=None): (value, start, end) = self._convert_idx_params(space, w_start, w_end) res = value.rfind(self._op_val(space, w_sub), start, end) return space.wrap(res) - @specialize.argtype(0) def descr_index(self, space, w_sub, w_start=None, w_end=None): (value, start, end) = self._convert_idx_params(space, w_start, w_end) res = value.find(self._op_val(space, w_sub), start, end) @@ -288,7 +264,6 @@ return space.wrap(res) - @specialize.argtype(0) def descr_rindex(self, space, w_sub, w_start=None, w_end=None): (value, start, end) = self._convert_idx_params(space, w_start, w_end) res = value.rfind(self._op_val(space, w_sub), start, end) @@ -318,19 +293,15 @@ return space.w_False return space.w_True - @specialize.argtype(0) def descr_isalnum(self, space): return self._is_generic(space, '_isalnum') - @specialize.argtype(0) def descr_isalpha(self, space): return self._is_generic(space, '_isalpha') - @specialize.argtype(0) def descr_isdigit(self, space): return self._is_generic(space, '_isdigit') - @specialize.argtype(0) def descr_islower(self, space): v = self._val(space) if len(v) == 1: @@ -344,11 +315,9 @@ cased = True return space.newbool(cased) - @specialize.argtype(0) def descr_isspace(self, space): return self._is_generic(space, '_isspace') - @specialize.argtype(0) def descr_istitle(self, space): input = self._val(space) cased = False @@ -370,7 +339,6 @@ return space.newbool(cased) - @specialize.argtype(0) def descr_isupper(self, space): v = self._val(space) if len(v) == 1: @@ -384,7 +352,6 @@ cased = True return space.newbool(cased) - @specialize.argtype(0) def descr_join(self, space, w_list): from pypy.objspace.std.bytesobject import W_BytesObject from pypy.objspace.std.unicodeobject import W_UnicodeObject @@ -445,7 +412,6 @@ assert False, 'unreachable' @unwrap_spec(width=int, w_fillchar=WrappedDefault(' ')) - @specialize.argtype(0) def descr_ljust(self, space, width, w_fillchar): value = self._val(space) fillchar = self._op_val(space, w_fillchar) @@ -461,7 +427,6 @@ return self._new(value) @unwrap_spec(width=int, w_fillchar=WrappedDefault(' ')) - @specialize.argtype(0) def descr_rjust(self, space, width, w_fillchar): value = self._val(space) fillchar = self._op_val(space, w_fillchar) @@ -476,7 +441,6 @@ return self._new(value) - @specialize.argtype(0) def descr_lower(self, space): value = self._val(space) builder = self._builder(len(value)) @@ -484,7 +448,6 @@ builder.append(self._lower(value[i])) return self._new(builder.build()) - @specialize.argtype(0) def descr_partition(self, space, w_sub): value = self._val(space) sub = self._op_val(space, w_sub) @@ -505,7 +468,6 @@ [self._sliced(space, value, 0, pos, self), w_sub, self._sliced(space, value, pos+len(sub), len(value), self)]) - @specialize.argtype(0) def descr_rpartition(self, space, w_sub): value = self._val(space) sub = self._op_val(space, w_sub) @@ -527,7 +489,6 @@ self._sliced(space, value, pos+len(sub), len(value), self)]) @unwrap_spec(count=int) - @specialize.argtype(0) def descr_replace(self, space, w_old, w_new, count=-1): input = self._val(space) sub = self._op_val(space, w_old) @@ -540,7 +501,6 @@ return self._new(res) @unwrap_spec(maxsplit=int) - @specialize.argtype(0) def descr_split(self, space, w_sep=None, maxsplit=-1): res = [] value = self._val(space) @@ -581,7 +541,6 @@ return self._newlist_unwrapped(space, res) @unwrap_spec(maxsplit=int) - @specialize.argtype(0) def descr_rsplit(self, space, w_sep=None, maxsplit=-1): res = [] value = self._val(space) @@ -625,7 +584,6 @@ return self._newlist_unwrapped(space, res) @unwrap_spec(keepends=bool) - @specialize.argtype(0) def descr_splitlines(self, space, keepends=False): value = self._val(space) length = len(value) @@ -647,7 +605,6 @@ strs.append(value[pos:length]) return self._newlist_unwrapped(space, strs) - @specialize.argtype(0) def descr_startswith(self, space, w_prefix, w_start=None, w_end=None): (value, start, end) = self._convert_idx_params(space, w_start, w_end, True) @@ -658,11 +615,9 @@ return space.w_False return space.newbool(self._startswith(space, value, w_prefix, start, end)) - @specialize.argtype(0) def _startswith(self, space, value, w_prefix, start, end): return startswith(value, self._op_val(space, w_prefix), start, end) - @specialize.argtype(0) def descr_endswith(self, space, w_suffix, w_start=None, w_end=None): (value, start, end) = self._convert_idx_params(space, w_start, w_end, True) @@ -674,7 +629,6 @@ return space.w_False return space.newbool(self._endswith(space, value, w_suffix, start, end)) - @specialize.argtype(0) def _endswith(self, space, value, w_prefix, start, end): return endswith(value, self._op_val(space, w_prefix), start, end) @@ -717,25 +671,21 @@ assert rpos >= lpos # annotator hint, don't remove return self._sliced(space, value, lpos, rpos, self) - @specialize.argtype(0) def descr_strip(self, space, w_chars=None): if space.is_none(w_chars): return self._strip_none(space, left=1, right=1) return self._strip(space, w_chars, left=1, right=1) - @specialize.argtype(0) def descr_lstrip(self, space, w_chars=None): if space.is_none(w_chars): return self._strip_none(space, left=1, right=0) return self._strip(space, w_chars, left=1, right=0) - @specialize.argtype(0) def descr_rstrip(self, space, w_chars=None): if space.is_none(w_chars): return self._strip_none(space, left=0, right=1) return self._strip(space, w_chars, left=0, right=1) - @specialize.argtype(0) def descr_swapcase(self, space): selfvalue = self._val(space) builder = self._builder(len(selfvalue)) @@ -749,7 +699,6 @@ builder.append(ch) return self._new(builder.build()) - @specialize.argtype(0) def descr_title(self, space): selfval = self._val(space) if len(selfval) == 0: @@ -770,7 +719,6 @@ # for bytes and bytearray, overridden by unicode @unwrap_spec(w_deletechars=WrappedDefault('')) - @specialize.argtype(0) def descr_translate(self, space, w_table, w_deletechars): if space.is_w(w_table, space.w_None): table = self.DEFAULT_NOOP_TABLE @@ -797,7 +745,6 @@ buf.append(table[ord(char)]) return self._new(buf.build()) - @specialize.argtype(0) def descr_upper(self, space): value = self._val(space) builder = self._builder(len(value)) @@ -806,7 +753,6 @@ return self._new(builder.build()) @unwrap_spec(width=int) - @specialize.argtype(0) def descr_zfill(self, space, width): selfval = self._val(space) if len(selfval) == 0: @@ -827,6 +773,5 @@ builder.append_slice(selfval, start, len(selfval)) return self._new(builder.build()) - @specialize.argtype(0) def descr_getnewargs(self, space): return space.newtuple([self._new(self._val(space))]) diff --git a/pypy/objspace/std/unicodeobject.py b/pypy/objspace/std/unicodeobject.py --- a/pypy/objspace/std/unicodeobject.py +++ b/pypy/objspace/std/unicodeobject.py @@ -10,7 +10,7 @@ from pypy.objspace.std.formatting import mod_format from pypy.objspace.std.stdtypedef import StdTypeDef from pypy.objspace.std.stringmethods import StringMethods -from rpython.rlib.objectmodel import compute_hash, compute_unique_id +from rpython.rlib.objectmodel import compute_hash, compute_unique_id, import_from_mixin from rpython.rlib.rstring import UnicodeBuilder from rpython.rlib.runicode import (str_decode_utf_8, str_decode_ascii, unicode_encode_utf_8, unicode_encode_ascii, make_unicode_escape_function) @@ -20,7 +20,8 @@ 'unicode_from_string', 'unicode_to_decimal_w'] -class W_UnicodeObject(W_Root, StringMethods): +class W_UnicodeObject(W_Root): + import_from_mixin(StringMethods) _immutable_fields_ = ['_value'] def __init__(w_self, unistr): From noreply at buildbot.pypy.org Tue Aug 20 16:58:54 2013 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 20 Aug 2013 16:58:54 +0200 (CEST) Subject: [pypy-commit] pypy default: Document Message-ID: <20130820145854.CD7EC1C02B1@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r66264:790e2ef30000 Date: 2013-08-20 16:56 +0200 http://bitbucket.org/pypy/pypy/changeset/790e2ef30000/ Log: Document diff --git a/pypy/doc/coding-guide.rst b/pypy/doc/coding-guide.rst --- a/pypy/doc/coding-guide.rst +++ b/pypy/doc/coding-guide.rst @@ -339,9 +339,10 @@ + methods and other class attributes do not change after startup + single inheritance is fully supported -+ simple mixins somewhat work too, but the mixed in class needs a - ``_mixin_ = True`` class attribute. isinstance checks against the - mixin type will fail when translated. ++ use `rpython.rlib.objectmodel.import_from_mixin(M)` in a class + body to copy the whole content of a class `M`. This can be used + to implement mixins: functions and staticmethods are duplicated + (the other class attributes are just copied unmodified). + classes are first-class objects too From noreply at buildbot.pypy.org Tue Aug 20 16:58:56 2013 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 20 Aug 2013 16:58:56 +0200 (CEST) Subject: [pypy-commit] pypy default: backout ded906e02c44 for now Message-ID: <20130820145856.3BD591C02B1@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r66265:229673529cf2 Date: 2013-08-20 16:57 +0200 http://bitbucket.org/pypy/pypy/changeset/229673529cf2/ Log: backout ded906e02c44 for now diff --git a/rpython/rlib/rsre/rsre_char.py b/rpython/rlib/rsre/rsre_char.py --- a/rpython/rlib/rsre/rsre_char.py +++ b/rpython/rlib/rsre/rsre_char.py @@ -115,7 +115,10 @@ for function, negate in category_dispatch_unroll: if category_code == i: result = function(char_code) - return result ^ negate + if negate: + return not result # XXX this might lead to a guard + else: + return result i = i + 1 else: return False @@ -157,7 +160,9 @@ ppos += 1 else: return False - return result ^ negated + if negated: + return not result + return result def set_literal(pat, index, char_code): # From noreply at buildbot.pypy.org Tue Aug 20 16:58:57 2013 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 20 Aug 2013 16:58:57 +0200 (CEST) Subject: [pypy-commit] pypy default: merge heads Message-ID: <20130820145857.752C01C02B1@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r66266:44440be09da4 Date: 2013-08-20 16:58 +0200 http://bitbucket.org/pypy/pypy/changeset/44440be09da4/ Log: merge heads diff --git a/pypy/tool/gcdump.py b/pypy/tool/gcdump.py --- a/pypy/tool/gcdump.py +++ b/pypy/tool/gcdump.py @@ -29,6 +29,8 @@ for num, line in enumerate(iter): if num == 0: continue + if not line: + continue words = line.split() if words[0].startswith('member'): del words[0] From noreply at buildbot.pypy.org Tue Aug 20 17:08:24 2013 From: noreply at buildbot.pypy.org (rguillebert) Date: Tue, 20 Aug 2013 17:08:24 +0200 (CEST) Subject: [pypy-commit] pypy default: Fix a bug that makes translation fails sometimes Message-ID: <20130820150824.217181C02EE@cobra.cs.uni-duesseldorf.de> Author: Romain Guillebert Branch: Changeset: r66267:0ce8426280c8 Date: 2013-08-20 17:07 +0200 http://bitbucket.org/pypy/pypy/changeset/0ce8426280c8/ Log: Fix a bug that makes translation fails sometimes diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -309,8 +309,8 @@ @simple_unary_op def rint(self, v): - if isfinite(v): - return rfloat.round_double(v, 0, half_even=True) + if isfinite(float(v)): + return rfloat.round_double(float(v), 0, half_even=True) else: return v From noreply at buildbot.pypy.org Tue Aug 20 17:47:00 2013 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Tue, 20 Aug 2013 17:47:00 +0200 (CEST) Subject: [pypy-commit] pypy refactor-str-types: Fix translation. Message-ID: <20130820154700.ABB141C02B1@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: refactor-str-types Changeset: r66268:ee17e069dd6f Date: 2013-08-20 16:45 +0200 http://bitbucket.org/pypy/pypy/changeset/ee17e069dd6f/ Log: Fix translation. diff --git a/pypy/objspace/std/bytesobject.py b/pypy/objspace/std/bytesobject.py --- a/pypy/objspace/std/bytesobject.py +++ b/pypy/objspace/std/bytesobject.py @@ -567,6 +567,7 @@ # auto-conversion fun + _StringMethods_descr_add = descr_add def descr_add(self, space, w_other): if space.isinstance_w(w_other, space.w_unicode): self_as_unicode = unicode_from_encoded_object(space, self, None, None) @@ -582,28 +583,32 @@ builder.append(self._value) builder.append(w_other._value) return W_StringBufferObject(builder) - return StringMethods.descr_add(self, space, w_other) + return self._StringMethods_descr_add(space, w_other) + _StringMethods__startswith = _startswith def _startswith(self, space, value, w_prefix, start, end): if space.isinstance_w(w_prefix, space.w_unicode): self_as_unicode = unicode_from_encoded_object(space, self, None, None) return self_as_unicode._startswith(space, self_as_unicode._value, w_prefix, start, end) - return StringMethods._startswith(self, space, value, w_prefix, start, end) + return self._StringMethods__startswith(space, value, w_prefix, start, end) + _StringMethods__endswith = _endswith def _endswith(self, space, value, w_suffix, start, end): if space.isinstance_w(w_suffix, space.w_unicode): self_as_unicode = unicode_from_encoded_object(space, self, None, None) return self_as_unicode._endswith(space, self_as_unicode._value, w_suffix, start, end) - return StringMethods._endswith(self, space, value, w_suffix, start, end) + return self._StringMethods__endswith(space, value, w_suffix, start, end) + _StringMethods_descr_contains = descr_contains def descr_contains(self, space, w_sub): if space.isinstance_w(w_sub, space.w_unicode): from pypy.objspace.std.unicodeobject import W_UnicodeObject assert isinstance(w_sub, W_UnicodeObject) self_as_unicode = unicode_from_encoded_object(space, self, None, None) return space.newbool(self_as_unicode._value.find(w_sub._value) >= 0) - return StringMethods.descr_contains(self, space, w_sub) + return self._StringMethods_descr_contains(space, w_sub) + _StringMethods_descr_replace = descr_replace @unwrap_spec(count=int) def descr_replace(self, space, w_old, w_new, count=-1): old_is_unicode = space.isinstance_w(w_old, space.w_unicode) @@ -623,7 +628,7 @@ raise OperationError(space.w_OverflowError, space.wrap("replace string is too long")) return self_as_uni._new(res) - return StringMethods.descr_replace(self, space, w_old, w_new, count) + return self._StringMethods_descr_replace(space, w_old, w_new, count) def _join_return_one(self, space, w_obj): return (space.is_w(space.type(w_obj), space.w_str) or From noreply at buildbot.pypy.org Tue Aug 20 17:47:03 2013 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Tue, 20 Aug 2013 17:47:03 +0200 (CEST) Subject: [pypy-commit] pypy refactor-str-types: Fix. Message-ID: <20130820154703.A2E781C02B1@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: refactor-str-types Changeset: r66269:bd29ce6880a0 Date: 2013-08-20 17:44 +0200 http://bitbucket.org/pypy/pypy/changeset/bd29ce6880a0/ Log: Fix. diff --git a/pypy/objspace/std/stringmethods.py b/pypy/objspace/std/stringmethods.py --- a/pypy/objspace/std/stringmethods.py +++ b/pypy/objspace/std/stringmethods.py @@ -118,6 +118,8 @@ return self._new(self._val(space)[0] * times) return self._new(self._val(space) * times) + descr_rmul = descr_mul + def descr_getitem(self, space, w_index): if isinstance(w_index, W_SliceObject): selfvalue = self._val(space) From noreply at buildbot.pypy.org Tue Aug 20 18:08:17 2013 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 20 Aug 2013 18:08:17 +0200 (CEST) Subject: [pypy-commit] pypy default: Fix the bigcharset's performance, hopefully. Add a random test to verify Message-ID: <20130820160817.0F2F11C02B1@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r66270:7f24ac0d5a0d Date: 2013-08-20 18:07 +0200 http://bitbucket.org/pypy/pypy/changeset/7f24ac0d5a0d/ Log: Fix the bigcharset's performance, hopefully. Add a random test to verify that it still seems to work fine. diff --git a/rpython/rlib/rsre/rsre_char.py b/rpython/rlib/rsre/rsre_char.py --- a/rpython/rlib/rsre/rsre_char.py +++ b/rpython/rlib/rsre/rsre_char.py @@ -192,39 +192,39 @@ def set_bigcharset(pat, index, char_code): # <256 blockindices> - # XXX this function needs a makeover, it's very bad count = pat[index+1] index += 2 - if char_code < 65536: - block_index = char_code >> 8 - # NB: there are CODESIZE block indices per bytecode - a = to_byte_array(pat[index+(block_index / CODESIZE)]) - block = a[block_index % CODESIZE] - index += 256 / CODESIZE # skip block indices - if CODESIZE == 2: - shift = 4 - else: - shift = 5 - block_value = pat[index+(block * (32 / CODESIZE) - + ((char_code & 255) >> shift))] - match = (block_value & (1 << (char_code & ((8 * CODESIZE) - 1)))) != 0 + + if CODESIZE == 2: + # One bytecode is 2 bytes, so contains 2 of the blockindices. + # So the 256 blockindices are packed in 128 bytecodes, but + # we need to unpack it as a byte. + assert char_code < 65536 + shift = 4 else: - index += 256 / CODESIZE # skip block indices - match = False + # One bytecode is 4 bytes, so contains 4 of the blockindices. + # So the 256 blockindices are packed in 64 bytecodes, but + # we need to unpack it as a byte. + if char_code >= 65536: + index += 256 / CODESIZE + count * (32 / CODESIZE) + return False, index + shift = 5 + + block = pat[index + (char_code >> (shift + 5))] + + block_shift = char_code >> 5 + if BIG_ENDIAN: + block_shift = ~block_shift + block_shift &= (CODESIZE - 1) * 8 + block = (block >> block_shift) & 0xFF + + index += 256 / CODESIZE + block_value = pat[index+(block * (32 / CODESIZE) + + ((char_code & 255) >> shift))] + match = (block_value & (1 << (char_code & ((8 * CODESIZE) - 1)))) index += count * (32 / CODESIZE) # skip blocks return match, index -def to_byte_array(int_value): - """Creates a list of bytes out of an integer representing data that is - CODESIZE bytes wide.""" - byte_array = [0] * CODESIZE - for i in range(CODESIZE): - byte_array[i] = int_value & 0xff - int_value = int_value >> 8 - if BIG_ENDIAN: - byte_array.reverse() - return byte_array - set_dispatch_table = [ None, # FAILURE None, None, None, None, None, None, None, None, diff --git a/rpython/rlib/rsre/test/test_match.py b/rpython/rlib/rsre/test/test_match.py --- a/rpython/rlib/rsre/test/test_match.py +++ b/rpython/rlib/rsre/test/test_match.py @@ -1,4 +1,4 @@ -import re +import re, random from rpython.rlib.rsre import rsre_core from rpython.rlib.rsre.rpy import get_code @@ -241,3 +241,19 @@ def test_match_bug3(self): r = get_code(r'([ax]*?x*)?$') assert rsre_core.match(r, "aaxaa") + + def test_bigcharset(self): + for i in range(100): + chars = [unichr(random.randrange(0x100, 0xD000)) + for n in range(random.randrange(1, 25))] + pattern = u'[%s]' % (u''.join(chars),) + r = get_code(pattern) + for c in chars: + assert rsre_core.match(r, c) + for i in range(200): + c = unichr(random.randrange(0x0, 0xD000)) + res = rsre_core.match(r, c) + if c in chars: + assert res is not None + else: + assert res is None From noreply at buildbot.pypy.org Tue Aug 20 21:30:24 2013 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 20 Aug 2013 21:30:24 +0200 (CEST) Subject: [pypy-commit] stmgc default: Add the first nice checkfence demo with a minimal stm. Message-ID: <20130820193024.76ADD1C02B1@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r486:6d314a5409dd Date: 2013-08-20 21:29 +0200 http://bitbucket.org/pypy/stmgc/changeset/6d314a5409dd/ Log: Add the first nice checkfence demo with a minimal stm. Add the 'howto-checkfence' from arigo/arigo, complete it with how to run the demo. diff --git a/checkfence/README b/checkfence/README new file mode 100644 --- /dev/null +++ b/checkfence/README @@ -0,0 +1,56 @@ +Installing checkfence on Linux 64 +--------------------------------- + +apt-get install bison flex ocaml ocaml-findlib + +cvs -z3 -d:pserver:anonymous at checkfence.cvs.sourceforge.net:/cvsroot/checkfence co -P checkfence + +cvs -z3 -d:pserver:anonymous at checkfence.cvs.sourceforge.net:/cvsroot/checkfence co -P c2lsl + + +http://www.princeton.edu/~chaff/zchaff.html + for Linux 64 you need zchaff.64bit.2007.3.12.zip. + I did not try the 32-bit version. + + Build with "make -j1". + + This is C++ code with errors: it's missing these lines + #include + #include + at the top of some files. Add as you get the errors. + + +CIL version 1.3.7 (the more recent 1.7.3 doesn't work here) + http://sourceforge.net/projects/cil/files/cil/cil-1.3.7/ + + cd /usr/lib/ocaml + sudo ln -s libcamlstr.a libstr.a + + ./configure + make -j1 + + +Compiling checkfence: + cd checkfence/build + + edit the Makefile: ZCHAFFDIR=/path/to/zchaff64 + make opt + + +Compiling C2LSL: + cd c2lsl + + edit the Makefile: CILDIR=/path/to/cil-1.3.7 + and also: CILINCLUDES=....x86_LINUX (instead of x86_WIN32) + + make -j1 + + + +Running the examples: + cd c4 + ln -s /full/path/to/c2lsl + ln -s /full/path/to/checkfence + ./run test1.c test1.lsl + + Look at 'T0.bsc-overview.htm' in your web browser. diff --git a/checkfence/c4/run b/checkfence/c4/run new file mode 100755 --- /dev/null +++ b/checkfence/c4/run @@ -0,0 +1,11 @@ +#!/bin/sh + +export C2LSL_HOME=./c2lsl +export CHECKFENCE_HOME=./checkfence + + +$C2LSL_HOME/bin/c2lsl.exe "$1" _run.lsl || exit 1 +shift +$CHECKFENCE_HOME/run/clean || exit 1 +echo ------------------------------------------------------------------------- +$CHECKFENCE_HOME/run/checkfence -i _run.lsl "$@" || exit 1 diff --git a/checkfence/c4/test1.c b/checkfence/c4/test1.c new file mode 100644 --- /dev/null +++ b/checkfence/c4/test1.c @@ -0,0 +1,93 @@ +#include "lsl_protos.h" + + +#define PREBUILT_FLAGS 0 +#define LOCKED 5 + +typedef int revision_t; + + +typedef struct { + int h_flags; + revision_t h_revision; + revision_t h_original; + int value; +} object_t; + +object_t o1, o2; +int global_timestamp; + +struct tx_descriptor { + int starttime; + int lock; + object_t *copy_of_o1; +}; + +void init_descriptor(struct tx_descriptor *d) +{ + d->starttime = global_timestamp; lsl_fence("load-load"); + d->copy_of_o1 = NULL; + //d->lock = lsl_get_thread_id() + 1000000; +} + + +object_t *stm_write_barrier(struct tx_descriptor *d, object_t *P) +{ + lsl_observe_label("write_barrier"); + + if (d->copy_of_o1 == NULL) { + lsl_assume(P->h_revision <= d->starttime); /* otherwise, abort */ + + object_t *W = lsl_malloc(sizeof(object_t)); + W->value = P->value; + d->copy_of_o1 = W; + } + return d->copy_of_o1; +} + + +void i() +{ + o1.h_flags = PREBUILT_FLAGS; + o1.h_revision = 0; + o1.h_original = 0; + o1.value = 50; + global_timestamp = 2; +} + +void commit(struct tx_descriptor *d) +{ + lsl_observe_label("commit"); + + if (d->copy_of_o1 != NULL) { + int old = o1.h_revision; + lsl_assume(old <= d->starttime); /* otherwise, abort */ + lsl_assume(lsl_cas_32(&o1.h_revision, old, LOCKED)); /* retry */ + } + + int endtime = global_timestamp + 1; + lsl_fence("load-load"); + lsl_assume(lsl_cas_32(&global_timestamp, endtime - 1, endtime)); + /* otherwise, retry */ + + if (d->copy_of_o1 != NULL) { + int o1_value = d->copy_of_o1->value; + o1.value = o1_value; + lsl_fence("store-store"); + o1.h_revision = endtime; + lsl_observe_output("o1_value", o1_value); + d->copy_of_o1 = NULL; + } +} + +void W1() +{ + struct tx_descriptor d; + + init_descriptor(&d); + + object_t *p1 = stm_write_barrier(&d, &o1); + ++p1->value; + + commit(&d); +} diff --git a/checkfence/c4/test1.lsl b/checkfence/c4/test1.lsl new file mode 100644 --- /dev/null +++ b/checkfence/c4/test1.lsl @@ -0,0 +1,2 @@ + +test T0 = i ( W1 | W1 ) From noreply at buildbot.pypy.org Tue Aug 20 21:35:13 2013 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 20 Aug 2013 21:35:13 +0200 (CEST) Subject: [pypy-commit] stmgc default: Increase the value of LOCKED. For some reason too large values increase the run-time significantly, but at least 99 is large enough for practical purposes here. Message-ID: <20130820193513.E12E41C02B1@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r487:7acf528af898 Date: 2013-08-20 21:35 +0200 http://bitbucket.org/pypy/stmgc/changeset/7acf528af898/ Log: Increase the value of LOCKED. For some reason too large values increase the run-time significantly, but at least 99 is large enough for practical purposes here. diff --git a/checkfence/c4/test1.c b/checkfence/c4/test1.c --- a/checkfence/c4/test1.c +++ b/checkfence/c4/test1.c @@ -2,7 +2,7 @@ #define PREBUILT_FLAGS 0 -#define LOCKED 5 +#define LOCKED 99 typedef int revision_t; From noreply at buildbot.pypy.org Tue Aug 20 21:58:32 2013 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 20 Aug 2013 21:58:32 +0200 (CEST) Subject: [pypy-commit] stmgc default: Updates Message-ID: <20130820195832.1C0C71C02B1@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r488:85b8e1c49c62 Date: 2013-08-20 21:58 +0200 http://bitbucket.org/pypy/stmgc/changeset/85b8e1c49c62/ Log: Updates diff --git a/checkfence/c4/test1.c b/checkfence/c4/test1.c --- a/checkfence/c4/test1.c +++ b/checkfence/c4/test1.c @@ -52,7 +52,7 @@ o1.h_revision = 0; o1.h_original = 0; o1.value = 50; - global_timestamp = 2; + global_timestamp = 0; } void commit(struct tx_descriptor *d) @@ -64,9 +64,8 @@ lsl_assume(old <= d->starttime); /* otherwise, abort */ lsl_assume(lsl_cas_32(&o1.h_revision, old, LOCKED)); /* retry */ } - + lsl_fence("full"); int endtime = global_timestamp + 1; - lsl_fence("load-load"); lsl_assume(lsl_cas_32(&global_timestamp, endtime - 1, endtime)); /* otherwise, retry */ From noreply at buildbot.pypy.org Wed Aug 21 03:05:39 2013 From: noreply at buildbot.pypy.org (rguillebert) Date: Wed, 21 Aug 2013 03:05:39 +0200 (CEST) Subject: [pypy-commit] pypy default: Add a failing test for the rint ufunc Message-ID: <20130821010539.808571C02B1@cobra.cs.uni-duesseldorf.de> Author: Romain Guillebert Branch: Changeset: r66271:a643b3face13 Date: 2013-08-21 03:03 +0200 http://bitbucket.org/pypy/pypy/changeset/a643b3face13/ Log: Add a failing test for the rint ufunc diff --git a/pypy/module/micronumpy/test/test_ufuncs.py b/pypy/module/micronumpy/test/test_ufuncs.py --- a/pypy/module/micronumpy/test/test_ufuncs.py +++ b/pypy/module/micronumpy/test/test_ufuncs.py @@ -257,6 +257,7 @@ def test_rint(self): from numpypy import array, complex, rint, isnan + import sys nnan, nan, inf, ninf = float('-nan'), float('nan'), float('inf'), float('-inf') @@ -271,6 +272,8 @@ assert rint(complex(inf, 1.5)) == complex(inf, 2.) assert rint(complex(0.5, inf)) == complex(0., inf) + assert rint(sys.maxint) == sys.maxint + def test_sign(self): from numpypy import array, sign, dtype From noreply at buildbot.pypy.org Wed Aug 21 03:09:19 2013 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Wed, 21 Aug 2013 03:09:19 +0200 (CEST) Subject: [pypy-commit] pypy refactor-str-types: Override rich comparison implementations in W_BytesObject. They return w_NotImplemented when comparing with objects that aren't instances of W_BytesObject. Message-ID: <20130821010919.B60431C02B1@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: refactor-str-types Changeset: r66272:94ba4b49ed07 Date: 2013-08-21 03:08 +0200 http://bitbucket.org/pypy/pypy/changeset/94ba4b49ed07/ Log: Override rich comparison implementations in W_BytesObject. They return w_NotImplemented when comparing with objects that aren't instances of W_BytesObject. diff --git a/pypy/objspace/std/bytesobject.py b/pypy/objspace/std/bytesobject.py --- a/pypy/objspace/std/bytesobject.py +++ b/pypy/objspace/std/bytesobject.py @@ -565,6 +565,36 @@ def descr_buffer(self, space): return space.wrap(StringBuffer(self._value)) + def descr_eq(self, space, w_other): + if not isinstance(w_other, W_BytesObject): + return space.w_NotImplemented + return space.newbool(self._value == w_other._value) + + def descr_ne(self, space, w_other): + if not isinstance(w_other, W_BytesObject): + return space.w_NotImplemented + return space.newbool(self._value != w_other._value) + + def descr_lt(self, space, w_other): + if not isinstance(w_other, W_BytesObject): + return space.w_NotImplemented + return space.newbool(self._value < w_other._value) + + def descr_le(self, space, w_other): + if not isinstance(w_other, W_BytesObject): + return space.w_NotImplemented + return space.newbool(self._value <= w_other._value) + + def descr_gt(self, space, w_other): + if not isinstance(w_other, W_BytesObject): + return space.w_NotImplemented + return space.newbool(self._value > w_other._value) + + def descr_ge(self, space, w_other): + if not isinstance(w_other, W_BytesObject): + return space.w_NotImplemented + return space.newbool(self._value >= w_other._value) + # auto-conversion fun _StringMethods_descr_add = descr_add From noreply at buildbot.pypy.org Wed Aug 21 03:09:21 2013 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Wed, 21 Aug 2013 03:09:21 +0200 (CEST) Subject: [pypy-commit] pypy refactor-str-types: hg merge default Message-ID: <20130821010921.5FECB1C02B1@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: refactor-str-types Changeset: r66273:13bf2a4c394b Date: 2013-08-21 03:08 +0200 http://bitbucket.org/pypy/pypy/changeset/13bf2a4c394b/ Log: hg merge default diff --git a/pypy/doc/coding-guide.rst b/pypy/doc/coding-guide.rst --- a/pypy/doc/coding-guide.rst +++ b/pypy/doc/coding-guide.rst @@ -339,9 +339,10 @@ + methods and other class attributes do not change after startup + single inheritance is fully supported -+ simple mixins somewhat work too, but the mixed in class needs a - ``_mixin_ = True`` class attribute. isinstance checks against the - mixin type will fail when translated. ++ use `rpython.rlib.objectmodel.import_from_mixin(M)` in a class + body to copy the whole content of a class `M`. This can be used + to implement mixins: functions and staticmethods are duplicated + (the other class attributes are just copied unmodified). + classes are first-class objects too diff --git a/pypy/module/micronumpy/test/test_ufuncs.py b/pypy/module/micronumpy/test/test_ufuncs.py --- a/pypy/module/micronumpy/test/test_ufuncs.py +++ b/pypy/module/micronumpy/test/test_ufuncs.py @@ -257,6 +257,7 @@ def test_rint(self): from numpypy import array, complex, rint, isnan + import sys nnan, nan, inf, ninf = float('-nan'), float('nan'), float('inf'), float('-inf') @@ -271,6 +272,8 @@ assert rint(complex(inf, 1.5)) == complex(inf, 2.) assert rint(complex(0.5, inf)) == complex(0., inf) + assert rint(sys.maxint) == sys.maxint + def test_sign(self): from numpypy import array, sign, dtype diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -309,8 +309,8 @@ @simple_unary_op def rint(self, v): - if isfinite(v): - return rfloat.round_double(v, 0, half_even=True) + if isfinite(float(v)): + return rfloat.round_double(float(v), 0, half_even=True) else: return v diff --git a/pypy/tool/gcdump.py b/pypy/tool/gcdump.py --- a/pypy/tool/gcdump.py +++ b/pypy/tool/gcdump.py @@ -29,6 +29,8 @@ for num, line in enumerate(iter): if num == 0: continue + if not line: + continue words = line.split() if words[0].startswith('member'): del words[0] diff --git a/rpython/rlib/rsre/rsre_char.py b/rpython/rlib/rsre/rsre_char.py --- a/rpython/rlib/rsre/rsre_char.py +++ b/rpython/rlib/rsre/rsre_char.py @@ -115,7 +115,10 @@ for function, negate in category_dispatch_unroll: if category_code == i: result = function(char_code) - return result ^ negate + if negate: + return not result # XXX this might lead to a guard + else: + return result i = i + 1 else: return False @@ -157,7 +160,9 @@ ppos += 1 else: return False - return result ^ negated + if negated: + return not result + return result def set_literal(pat, index, char_code): # @@ -187,39 +192,39 @@ def set_bigcharset(pat, index, char_code): # <256 blockindices> - # XXX this function needs a makeover, it's very bad count = pat[index+1] index += 2 - if char_code < 65536: - block_index = char_code >> 8 - # NB: there are CODESIZE block indices per bytecode - a = to_byte_array(pat[index+(block_index / CODESIZE)]) - block = a[block_index % CODESIZE] - index += 256 / CODESIZE # skip block indices - if CODESIZE == 2: - shift = 4 - else: - shift = 5 - block_value = pat[index+(block * (32 / CODESIZE) - + ((char_code & 255) >> shift))] - match = (block_value & (1 << (char_code & ((8 * CODESIZE) - 1)))) != 0 + + if CODESIZE == 2: + # One bytecode is 2 bytes, so contains 2 of the blockindices. + # So the 256 blockindices are packed in 128 bytecodes, but + # we need to unpack it as a byte. + assert char_code < 65536 + shift = 4 else: - index += 256 / CODESIZE # skip block indices - match = False + # One bytecode is 4 bytes, so contains 4 of the blockindices. + # So the 256 blockindices are packed in 64 bytecodes, but + # we need to unpack it as a byte. + if char_code >= 65536: + index += 256 / CODESIZE + count * (32 / CODESIZE) + return False, index + shift = 5 + + block = pat[index + (char_code >> (shift + 5))] + + block_shift = char_code >> 5 + if BIG_ENDIAN: + block_shift = ~block_shift + block_shift &= (CODESIZE - 1) * 8 + block = (block >> block_shift) & 0xFF + + index += 256 / CODESIZE + block_value = pat[index+(block * (32 / CODESIZE) + + ((char_code & 255) >> shift))] + match = (block_value & (1 << (char_code & ((8 * CODESIZE) - 1)))) index += count * (32 / CODESIZE) # skip blocks return match, index -def to_byte_array(int_value): - """Creates a list of bytes out of an integer representing data that is - CODESIZE bytes wide.""" - byte_array = [0] * CODESIZE - for i in range(CODESIZE): - byte_array[i] = int_value & 0xff - int_value = int_value >> 8 - if BIG_ENDIAN: - byte_array.reverse() - return byte_array - set_dispatch_table = [ None, # FAILURE None, None, None, None, None, None, None, None, diff --git a/rpython/rlib/rsre/test/test_match.py b/rpython/rlib/rsre/test/test_match.py --- a/rpython/rlib/rsre/test/test_match.py +++ b/rpython/rlib/rsre/test/test_match.py @@ -1,4 +1,4 @@ -import re +import re, random from rpython.rlib.rsre import rsre_core from rpython.rlib.rsre.rpy import get_code @@ -241,3 +241,19 @@ def test_match_bug3(self): r = get_code(r'([ax]*?x*)?$') assert rsre_core.match(r, "aaxaa") + + def test_bigcharset(self): + for i in range(100): + chars = [unichr(random.randrange(0x100, 0xD000)) + for n in range(random.randrange(1, 25))] + pattern = u'[%s]' % (u''.join(chars),) + r = get_code(pattern) + for c in chars: + assert rsre_core.match(r, c) + for i in range(200): + c = unichr(random.randrange(0x0, 0xD000)) + res = rsre_core.match(r, c) + if c in chars: + assert res is not None + else: + assert res is None From noreply at buildbot.pypy.org Wed Aug 21 09:41:24 2013 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 21 Aug 2013 09:41:24 +0200 (CEST) Subject: [pypy-commit] pypy default: Add the Win64 plan in the back of my mind. Message-ID: <20130821074124.01B171C02EE@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r66274:422233ea1b89 Date: 2013-08-21 09:40 +0200 http://bitbucket.org/pypy/pypy/changeset/422233ea1b89/ Log: Add the Win64 plan in the back of my mind. diff --git a/pypy/doc/windows.rst b/pypy/doc/windows.rst --- a/pypy/doc/windows.rst +++ b/pypy/doc/windows.rst @@ -6,6 +6,10 @@ The following text gives some hints about how to translate the PyPy interpreter. +PyPy supports only being translated as a 32bit program, even on +64bit Windows. See at the end of this page for what is missing +for a full 64bit translation. + To build pypy-c you need a C compiler. Microsoft Visual Studio is preferred, but can also use the mingw32 port of gcc. @@ -63,7 +67,7 @@ INCLUDE, LIB and PATH (for DLLs) environment variables appropriately. Abridged method (for -Ojit builds using Visual Studio 2008) -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Download the versions of all the external packages from https://bitbucket.org/pypy/pypy/downloads/local.zip @@ -112,13 +116,14 @@ nmake -f makefile.msc The sqlite3 database library -~~~~~~~~~~~~~~~~~~~~ +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Download http://www.sqlite.org/2013/sqlite-amalgamation-3071601.zip and extract it into a directory under the base directory. Also get http://www.sqlite.org/2013/sqlite-dll-win32-x86-3071601.zip and extract the dll into the bin directory, and the sqlite3.def into the sources directory. Now build the import library so cffi can use the header and dll:: + lib /DEF:sqlite3.def" /OUT:sqlite3.lib" copy sqlite3.lib path\to\libs @@ -206,8 +211,68 @@ March 2012, --cc is not a valid option for pytest.py. However if you set an environment variable CC to the compliter exe, testing will use it. -.. _'mingw32 build': http://sourceforge.net/projects/mingw-w64/files/Toolchains%20targetting%20Win32/Automated%20Builds +.. _`mingw32 build`: http://sourceforge.net/projects/mingw-w64/files/Toolchains%20targetting%20Win32/Automated%20Builds .. _`mingw64 build`: http://sourceforge.net/projects/mingw-w64/files/Toolchains%20targetting%20Win64/Automated%20Builds .. _`msys for mingw`: http://sourceforge.net/projects/mingw-w64/files/External%20binary%20packages%20%28Win64%20hosted%29/MSYS%20%2832-bit%29 .. _`libffi source files`: http://sourceware.org/libffi/ .. _`RPython translation toolchain`: translation.html + + +What is missing for a full 64-bit translation +--------------------------------------------- + +The main blocker is that we assume that the integer type of RPython is +large enough to (occasionally) contain a pointer value cast to an +integer. The simplest fix is to make sure that it is so, but it will +give the following incompatibility between CPython and PyPy on Win64: + +CPython: ``sys.maxint == 2**32-1, sys.maxsize == 2**64-1`` + +PyPy: ``sys.maxint == sys.maxsize == 2**64-1`` + +...and, correspondingly, PyPy supports ints up to the larger value of +sys.maxint before they are converted to ``long``. The first decision +that someone needs to make is if this incompatibility is reasonable. + +Assuming that it is, the fixes are probably not too much work if the +goal is only to get a translated PyPy executable and to run tests with +it --- and not care about running all the tests of PyPy before +translation. To do that, the only tests that you should run (and start +with) are some tests in rpython/translator/c/test/, like +``test_standalone.py`` and ``test_newgc.py``. Keep in mind that this +runs small translations, and some details may go wrong, running on top +of CPython Win64; notably, constant integer values should be allowed up +to ``2**63-1``, but any value larger than ``2**32-1`` will be considered +out of bound. To fix this, you need to explicitly wrap such large +integers e.g. in the class ``r_longlong`` of rpython.rlib.rarithmetic. +This makes the translation toolchain handle them as longlong, which +have the correct range, even though in the end it is the same type, +i.e. a 64-bit integer. + +What is really needed is to review all the C files in +rpython/translator/c/src for the word ``long``, because this means a +32-bit integer even on Win64. Replace it with ``Signed``, and check the +definition of ``Signed``: it should be equal to ``long`` on every other +platforms (so you can replace one with the other without breaking +anything on other platforms), and on Win64 it should be something like +``long long``. + +These two types have corresponding RPython types: ``rffi.LONG`` and +``lltype.Signed`` respectively. Add tests that check that integers +casted to one type or the other really have 32 and 64 bits respectively, +on Win64. + +Once these basic tests work, you need to review ``pypy/module/*/`` for +usages of ``rffi.LONG`` versus ``lltype.Signed``. Some other places +might need a similar review too, like ``rpython/rlib/``. Important: at +this point the goal would not be to run the tests in these directories! +Doing so would create more confusion to work around. Instead, the goal +would be to fix some ``LONG-versus-Signed`` issues, and if necessary +make sure that the tests still run fine e.g. on Win32. + +This should get you a translation of PyPy with ``-O2``, i.e. without the +JIT. Check carefully the warnings of the C compiler at the end. I +think that MSVC is "nice" in the sense that by default a lot of +mismatches of integer sizes are reported as warnings. + +This should be your first long-term goal. Happy hacking :-) From noreply at buildbot.pypy.org Wed Aug 21 09:45:46 2013 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 21 Aug 2013 09:45:46 +0200 (CEST) Subject: [pypy-commit] pypy default: Mention early work (hi Christian) Message-ID: <20130821074546.79F471C02EE@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r66275:bc66a4bf350e Date: 2013-08-21 09:45 +0200 http://bitbucket.org/pypy/pypy/changeset/bc66a4bf350e/ Log: Mention early work (hi Christian) diff --git a/pypy/doc/windows.rst b/pypy/doc/windows.rst --- a/pypy/doc/windows.rst +++ b/pypy/doc/windows.rst @@ -268,7 +268,11 @@ this point the goal would not be to run the tests in these directories! Doing so would create more confusion to work around. Instead, the goal would be to fix some ``LONG-versus-Signed`` issues, and if necessary -make sure that the tests still run fine e.g. on Win32. +make sure that the tests still run fine e.g. on Win32. There was some +early work done notably in ``rpython/rlib/rarithmetic`` with the goal of +running all the tests on Win64, but I think by now that it's a bad idea: +again, we should only make sure that the tests work on Win32, and that +PyPy translates on Win64 and then run the (standard lib-python) tests. This should get you a translation of PyPy with ``-O2``, i.e. without the JIT. Check carefully the warnings of the C compiler at the end. I From noreply at buildbot.pypy.org Wed Aug 21 10:01:44 2013 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 21 Aug 2013 10:01:44 +0200 (CEST) Subject: [pypy-commit] pypy default: Two new SSE 4 instructions Message-ID: <20130821080144.E0ED21C02EE@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r66276:8de0c4e1a787 Date: 2013-08-21 09:59 +0200 http://bitbucket.org/pypy/pypy/changeset/8de0c4e1a787/ Log: Two new SSE 4 instructions diff --git a/rpython/translator/c/gcc/trackgcroot.py b/rpython/translator/c/gcc/trackgcroot.py --- a/rpython/translator/c/gcc/trackgcroot.py +++ b/rpython/translator/c/gcc/trackgcroot.py @@ -489,7 +489,7 @@ 'pabs', 'pack', 'padd', 'palign', 'pand', 'pavg', 'pcmp', 'pextr', 'phadd', 'phsub', 'pinsr', 'pmadd', 'pmax', 'pmin', 'pmovmsk', 'pmul', 'por', 'psadb', 'pshuf', 'psign', 'psll', 'psra', 'psrl', - 'psub', 'punpck', 'pxor', + 'psub', 'punpck', 'pxor', 'pmovzx', 'pmovsx', # all vectors don't produce pointers 'v', # sign-extending moves should not produce GC pointers From noreply at buildbot.pypy.org Wed Aug 21 11:27:34 2013 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 21 Aug 2013 11:27:34 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-static-barrier: Add a XXX about that case Message-ID: <20130821092734.E95241C0189@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-static-barrier Changeset: r66277:a4d5e4fccf79 Date: 2013-08-21 11:26 +0200 http://bitbucket.org/pypy/pypy/changeset/a4d5e4fccf79/ Log: Add a XXX about that case diff --git a/rpython/translator/stm/writebarrier.py b/rpython/translator/stm/writebarrier.py --- a/rpython/translator/stm/writebarrier.py +++ b/rpython/translator/stm/writebarrier.py @@ -186,6 +186,8 @@ for v, cat in category.items(): if cat == 'W': category[v] = 'V' + # XXX the V2W barrier is only necessary when we're + # writing pointers, not if we're writing ints effectinfo = stmtransformer.write_analyzer.analyze( op, graphinfo=graphinfo) From noreply at buildbot.pypy.org Wed Aug 21 13:19:08 2013 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 21 Aug 2013 13:19:08 +0200 (CEST) Subject: [pypy-commit] pypy default: Develop and use other potential ideas Message-ID: <20130821111908.544B81C3050@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r66278:c6dd660901e1 Date: 2013-08-21 13:18 +0200 http://bitbucket.org/pypy/pypy/changeset/c6dd660901e1/ Log: Develop and use other potential ideas diff --git a/pypy/doc/windows.rst b/pypy/doc/windows.rst --- a/pypy/doc/windows.rst +++ b/pypy/doc/windows.rst @@ -262,10 +262,9 @@ casted to one type or the other really have 32 and 64 bits respectively, on Win64. -Once these basic tests work, you need to review ``pypy/module/*/`` for -usages of ``rffi.LONG`` versus ``lltype.Signed``. Some other places -might need a similar review too, like ``rpython/rlib/``. Important: at -this point the goal would not be to run the tests in these directories! +Once these basic tests work, you need to review ``rpython/rlib/`` for +usages of ``rffi.LONG`` versus ``lltype.Signed``. Important: at this +point the goal would not be to run the tests in these directories! Doing so would create more confusion to work around. Instead, the goal would be to fix some ``LONG-versus-Signed`` issues, and if necessary make sure that the tests still run fine e.g. on Win32. There was some @@ -274,9 +273,27 @@ again, we should only make sure that the tests work on Win32, and that PyPy translates on Win64 and then run the (standard lib-python) tests. -This should get you a translation of PyPy with ``-O2``, i.e. without the -JIT. Check carefully the warnings of the C compiler at the end. I -think that MSVC is "nice" in the sense that by default a lot of -mismatches of integer sizes are reported as warnings. +The goal here is to get a translation of PyPy with ``-O2`` with a +minimal set of modules, starting with ``--no-allworkingmodules``. Check +carefully the warnings of the C compiler at the end. I think that MSVC +is "nice" in the sense that by default a lot of mismatches of integer +sizes are reported as warnings. -This should be your first long-term goal. Happy hacking :-) +Why first try to translate when the modules ``pypy/module/*/`` may need +fixes too? The idea is that you really need to get a minimal translated +PyPy, with the minimal amount of modules (this used to be with the +``--translationmodules`` option, if it still works). Then we have a +Python interpreter, namely this minimal PyPy, which can run a full +translation and which has the "correct" setting of ``sys.maxint`` and +64-bit integers. So once we get this minimal PyPy we can use it to +translate a complete PyPy with less troubles. (We still need to review +e.g. ``rffi.LONG`` / ``lltype.Signed`` issues, obviously.) + +Alternatively, you might try to hack CPython to have ints store a 64-bit +number and ``sys.maxint`` be 2**63-1. This might be easier, and work as +long as you don't try too hard to crash it because of the precision loss +that undoubtedly occurs everywhere. Running the translation with such a +hacked CPython would give the same effect as running it on top of the +minimal PyPy described above. + +Happy hacking :-) From noreply at buildbot.pypy.org Wed Aug 21 13:22:10 2013 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 21 Aug 2013 13:22:10 +0200 (CEST) Subject: [pypy-commit] pypy default: Add a note. Message-ID: <20130821112210.C21541C3050@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r66279:1d3e8ab7f4dd Date: 2013-08-21 13:21 +0200 http://bitbucket.org/pypy/pypy/changeset/1d3e8ab7f4dd/ Log: Add a note. diff --git a/pypy/doc/windows.rst b/pypy/doc/windows.rst --- a/pypy/doc/windows.rst +++ b/pypy/doc/windows.rst @@ -294,6 +294,10 @@ long as you don't try too hard to crash it because of the precision loss that undoubtedly occurs everywhere. Running the translation with such a hacked CPython would give the same effect as running it on top of the -minimal PyPy described above. +minimal PyPy described above. (Note that it's ok to do that: once we get +a full PyPy, we can simply tell people that future translations must be +run on top of that. We end up with a strange kind of dependency, but +I believe it's ok here, as Windows executables are supposed to never be +broken by newer versions of Windows.) Happy hacking :-) From noreply at buildbot.pypy.org Wed Aug 21 15:56:20 2013 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Wed, 21 Aug 2013 15:56:20 +0200 (CEST) Subject: [pypy-commit] pypy refactor-str-types: Fix. Message-ID: <20130821135620.30E9A1C361F@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: refactor-str-types Changeset: r66280:8360b8f1a263 Date: 2013-08-21 12:47 +0200 http://bitbucket.org/pypy/pypy/changeset/8360b8f1a263/ Log: Fix. diff --git a/pypy/objspace/std/bytesobject.py b/pypy/objspace/std/bytesobject.py --- a/pypy/objspace/std/bytesobject.py +++ b/pypy/objspace/std/bytesobject.py @@ -611,7 +611,7 @@ from pypy.objspace.std.strbufobject import W_StringBufferObject builder = StringBuilder() builder.append(self._value) - builder.append(w_other._value) + builder.append(self._op_val(space, w_other)) return W_StringBufferObject(builder) return self._StringMethods_descr_add(space, w_other) From noreply at buildbot.pypy.org Wed Aug 21 15:56:23 2013 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Wed, 21 Aug 2013 15:56:23 +0200 (CEST) Subject: [pypy-commit] pypy refactor-str-types: Fix comparison of W_BytesObject with W_StringBufferObject. Message-ID: <20130821135623.BE4D71C361F@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: refactor-str-types Changeset: r66281:a25bcb6612ef Date: 2013-08-21 15:55 +0200 http://bitbucket.org/pypy/pypy/changeset/a25bcb6612ef/ Log: Fix comparison of W_BytesObject with W_StringBufferObject. diff --git a/pypy/objspace/std/bytesobject.py b/pypy/objspace/std/bytesobject.py --- a/pypy/objspace/std/bytesobject.py +++ b/pypy/objspace/std/bytesobject.py @@ -566,31 +566,49 @@ return space.wrap(StringBuffer(self._value)) def descr_eq(self, space, w_other): + from pypy.objspace.std.strbufobject import W_StringBufferObject + if isinstance(w_other, W_StringBufferObject): + return space.newbool(self._value == w_other.force()) if not isinstance(w_other, W_BytesObject): return space.w_NotImplemented return space.newbool(self._value == w_other._value) def descr_ne(self, space, w_other): + from pypy.objspace.std.strbufobject import W_StringBufferObject + if isinstance(w_other, W_StringBufferObject): + return space.newbool(self._value != w_other.force()) if not isinstance(w_other, W_BytesObject): return space.w_NotImplemented return space.newbool(self._value != w_other._value) def descr_lt(self, space, w_other): + from pypy.objspace.std.strbufobject import W_StringBufferObject + if isinstance(w_other, W_StringBufferObject): + return space.newbool(self._value < w_other.force()) if not isinstance(w_other, W_BytesObject): return space.w_NotImplemented return space.newbool(self._value < w_other._value) def descr_le(self, space, w_other): + from pypy.objspace.std.strbufobject import W_StringBufferObject + if isinstance(w_other, W_StringBufferObject): + return space.newbool(self._value <= w_other.force()) if not isinstance(w_other, W_BytesObject): return space.w_NotImplemented return space.newbool(self._value <= w_other._value) def descr_gt(self, space, w_other): + from pypy.objspace.std.strbufobject import W_StringBufferObject + if isinstance(w_other, W_StringBufferObject): + return space.newbool(self._value > w_other.force()) if not isinstance(w_other, W_BytesObject): return space.w_NotImplemented return space.newbool(self._value > w_other._value) def descr_ge(self, space, w_other): + from pypy.objspace.std.strbufobject import W_StringBufferObject + if isinstance(w_other, W_StringBufferObject): + return space.newbool(self._value >= w_other.force()) if not isinstance(w_other, W_BytesObject): return space.w_NotImplemented return space.newbool(self._value >= w_other._value) From noreply at buildbot.pypy.org Wed Aug 21 17:40:14 2013 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 21 Aug 2013 17:40:14 +0200 (CEST) Subject: [pypy-commit] pypy optmodel-refactor: Progress on building new sort of bytecode - intermediate checkin (I just Message-ID: <20130821154015.007251C02DB@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: optmodel-refactor Changeset: r66282:597c1b69f100 Date: 2013-08-21 17:39 +0200 http://bitbucket.org/pypy/pypy/changeset/597c1b69f100/ Log: Progress on building new sort of bytecode - intermediate checkin (I just need a point of reference) diff --git a/rpython/jit/codewriter/assembler.py b/rpython/jit/codewriter/assembler.py --- a/rpython/jit/codewriter/assembler.py +++ b/rpython/jit/codewriter/assembler.py @@ -36,7 +36,7 @@ self.fix_labels() self.check_result() if jitcode is None: - jitcode = JitCode(ssarepr.name) + jitcode = JitCode(ssarepr.name, self._count_jitcodes) jitcode._ssarepr = ssarepr self.make_jitcode(jitcode) if self._count_jitcodes < 20: # stop if we have a lot of them diff --git a/rpython/jit/codewriter/call.py b/rpython/jit/codewriter/call.py --- a/rpython/jit/codewriter/call.py +++ b/rpython/jit/codewriter/call.py @@ -22,6 +22,7 @@ self.cpu = cpu self.jitdrivers_sd = jitdrivers_sd self.jitcodes = {} # map {graph: jitcode} + self.alljitcodes = [] # list of all jitcodes self.unfinished_graphs = [] # list of graphs with pending jitcodes self.callinfocollection = CallInfoCollection() if hasattr(cpu, 'rtyper'): # for tests @@ -167,8 +168,9 @@ '%s has _gctransformer_hint_close_stack_' % (graph,)) # fnaddr, calldescr = self.get_jitcode_calldescr(graph) - jitcode = JitCode(graph.name, fnaddr, calldescr, + jitcode = JitCode(graph.name, len(self.jitcodes), fnaddr, calldescr, called_from=called_from) + self.alljitcodes.append(jitcode) self.jitcodes[graph] = jitcode self.unfinished_graphs.append(graph) return jitcode diff --git a/rpython/jit/codewriter/jitcode.py b/rpython/jit/codewriter/jitcode.py --- a/rpython/jit/codewriter/jitcode.py +++ b/rpython/jit/codewriter/jitcode.py @@ -8,8 +8,10 @@ _empty_r = [] _empty_f = [] - def __init__(self, name, fnaddr=None, calldescr=None, called_from=None): + def __init__(self, name, number=-1, fnaddr=None, calldescr=None, + called_from=None): self.name = name + self.number = number self.fnaddr = fnaddr self.calldescr = calldescr self.is_portal = False diff --git a/rpython/jit/metainterp/blackhole.py b/rpython/jit/metainterp/blackhole.py --- a/rpython/jit/metainterp/blackhole.py +++ b/rpython/jit/metainterp/blackhole.py @@ -1608,10 +1608,10 @@ def resume_in_blackhole(metainterp_sd, jitdriver_sd, resumedescr, deadframe, all_virtuals=None): - from rpython.jit.metainterp.resume import blackhole_from_resumedata + from rpython.jit.metainterp.resume2 import blackhole_from_resumedata #debug_start('jit-blackhole') blackholeinterp = blackhole_from_resumedata( - metainterp_sd.blackholeinterpbuilder, + metainterp_sd, jitdriver_sd, resumedescr, deadframe, diff --git a/rpython/jit/metainterp/compile.py b/rpython/jit/metainterp/compile.py --- a/rpython/jit/metainterp/compile.py +++ b/rpython/jit/metainterp/compile.py @@ -133,7 +133,8 @@ [ResOperation(rop.LABEL, jumpargs, None, descr=jitcell_token)] try: - optimize_trace(metainterp_sd, part, enable_opts) + optimize_trace(metainterp_sd, metainterp.resume_bc.boxes, part, + enable_opts) except InvalidLoop: return None target_token = part.operations[0].getdescr() @@ -160,7 +161,7 @@ jumpargs = part.operations[-1].getarglist() try: - optimize_trace(metainterp_sd, part, enable_opts) + optimize_trace(metainterp_sd, allboxes, part, enable_opts) except InvalidLoop: return None @@ -212,7 +213,7 @@ orignial_label = label.clone() assert label.getopnum() == rop.LABEL try: - optimize_trace(metainterp_sd, part, jitdriver_sd.warmstate.enable_opts) + optimize_trace(metainterp_sd, allboxes, part, jitdriver_sd.warmstate.enable_opts) except InvalidLoop: # Fall back on jumping to preamble target_token = label.getdescr() @@ -222,7 +223,8 @@ [ResOperation(rop.JUMP, inputargs[:], None, descr=loop_jitcell_token)] try: - optimize_trace(metainterp_sd, part, jitdriver_sd.warmstate.enable_opts, + optimize_trace(metainterp_sd, allboxes, part, + jitdriver_sd.warmstate.enable_opts, inline_short_preamble=False) except InvalidLoop: return None @@ -630,6 +632,7 @@ def copy_all_attributes_into(self, res): # XXX a bit ugly to have to list them all here + # XXX kill that res.rd_snapshot = self.rd_snapshot res.rd_frame_info_list = self.rd_frame_info_list res.rd_numb = self.rd_numb @@ -637,6 +640,8 @@ res.rd_virtuals = self.rd_virtuals res.rd_pendingfields = self.rd_pendingfields res.rd_count = self.rd_count + res.rd_bytecode = self.rd_bytecode + res.rd_bytecode_position = self.rd_bytecode_position def _clone_if_mutable(self): res = ResumeGuardDescr() @@ -851,7 +856,8 @@ else: inline_short_preamble = True try: - optimize_trace(metainterp_sd, new_trace, state.enable_opts, inline_short_preamble) + optimize_trace(metainterp_sd, allboxes, + new_trace, state.enable_opts, inline_short_preamble) except InvalidLoop: debug_print("compile_new_bridge: got an InvalidLoop") # XXX I am fairly convinced that optimize_bridge cannot actually raise diff --git a/rpython/jit/metainterp/optimizeopt/__init__.py b/rpython/jit/metainterp/optimizeopt/__init__.py --- a/rpython/jit/metainterp/optimizeopt/__init__.py +++ b/rpython/jit/metainterp/optimizeopt/__init__.py @@ -48,7 +48,8 @@ return optimizations, unroll -def optimize_trace(metainterp_sd, loop, enable_opts, inline_short_preamble=True): +def optimize_trace(metainterp_sd, allboxes, loop, enable_opts, + inline_short_preamble=True): """Optimize loop.operations to remove internal overheadish operations. """ @@ -60,11 +61,11 @@ if unroll: optimize_unroll(metainterp_sd, loop, optimizations, inline_short_preamble) else: - optimizer = Optimizer(metainterp_sd, loop, optimizations) + optimizer = Optimizer(metainterp_sd, loop, allboxes, optimizations) optimizer.propagate_all_forward() finally: debug_stop("jit-optimize") - + if __name__ == '__main__': print ALL_OPTS_NAMES diff --git a/rpython/jit/metainterp/optimizeopt/heap.py b/rpython/jit/metainterp/optimizeopt/heap.py --- a/rpython/jit/metainterp/optimizeopt/heap.py +++ b/rpython/jit/metainterp/optimizeopt/heap.py @@ -1,5 +1,6 @@ import os +from rpython.rlib.objectmodel import specialize from rpython.jit.metainterp.history import Const from rpython.jit.metainterp.jitexc import JitException from rpython.jit.metainterp.optimizeopt.optimizer import Optimization, MODE_ARRAY, LEVEL_KNOWNCLASS diff --git a/rpython/jit/metainterp/optimizeopt/optimizer.py b/rpython/jit/metainterp/optimizeopt/optimizer.py --- a/rpython/jit/metainterp/optimizeopt/optimizer.py +++ b/rpython/jit/metainterp/optimizeopt/optimizer.py @@ -1,12 +1,13 @@ from rpython.jit.metainterp import jitprof, resume, compile from rpython.jit.metainterp.executor import execute_nonspec -from rpython.jit.metainterp.history import BoxInt, BoxFloat, Const, ConstInt, REF +from rpython.jit.metainterp.history import BoxInt, BoxFloat, Const, ConstInt, REF, AbstractFailDescr from rpython.jit.metainterp.optimizeopt.intutils import IntBound, IntUnbounded, \ ImmutableIntUnbounded, \ IntLowerBound, MININT, MAXINT from rpython.jit.metainterp.optimizeopt.util import make_dispatcher_method from rpython.jit.metainterp.resoperation import rop, ResOperation, AbstractResOp from rpython.jit.metainterp.typesystem import llhelper +from rpython.jit.metainterp.resume2 import OptimizerResumeInterpreter from rpython.tool.pairtype import extendabletype from rpython.rlib.debug import debug_print from rpython.rlib.objectmodel import specialize @@ -340,7 +341,21 @@ class Optimizer(Optimization): - def __init__(self, metainterp_sd, loop, optimizations=None): + def __init__(self, metainterp_sd, loop, allboxes, optimizations=None): + self.allboxes = {} + for k, v in allboxes.iteritems(): + self.allboxes[v] = k + # we fish bytecode from the loop + for op in loop.operations: + if op.is_guard(): + descr = op.getdescr() + assert isinstance(descr, AbstractFailDescr) + bc = descr.rd_bytecode + jitcodes = metainterp_sd.alljitcodes + self.resume_bc = OptimizerResumeInterpreter(bc, jitcodes) + break + else: + self.resume_bc = None # trivial case self.metainterp_sd = metainterp_sd self.cpu = metainterp_sd.cpu self.loop = loop @@ -570,6 +585,9 @@ del self.replaces_guard[op] return else: + descr = op.getdescr() + assert isinstance(descr, AbstractFailDescr) + self.resume_bc.interpret_until(descr.rd_bytecode_position) op = self.store_final_boxes_in_guard(op, pendingfields) elif op.can_raise(): self.exception_might_have_happened = True @@ -594,9 +612,10 @@ assert pendingfields is not None descr = op.getdescr() assert isinstance(descr, compile.ResumeGuardDescr) - modifier = resume.ResumeDataVirtualAdder(descr, self.resumedata_memo) + #modifier = resume.ResumeDataVirtualAdder(descr, self.resumedata_memo) try: - newboxes = modifier.finish(self, pendingfields) + #newboxes = modifier.finish(self, pendingfields) + newboxes = self.resume_bc.get_current_boxes(self.allboxes) if len(newboxes) > self.metainterp_sd.options.failargs_limit: raise resume.TagOverflow except resume.TagOverflow: diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -6,6 +6,7 @@ from rpython.jit.codewriter.effectinfo import EffectInfo from rpython.jit.codewriter.jitcode import JitCode, SwitchDictDescr from rpython.jit.metainterp import history, compile, resume, executor, jitexc +from rpython.jit.metainterp import resume2 from rpython.jit.metainterp.heapcache import HeapCache from rpython.jit.metainterp.history import (Const, ConstInt, ConstPtr, ConstFloat, Box, TargetToken) @@ -1467,9 +1468,10 @@ logger_noopt = None logger_ops = None - def __init__(self, cpu, options, + def __init__(self, cpu, options, alljitcodes, ProfilerClass=EmptyProfiler, warmrunnerdesc=None): self.cpu = cpu + self.alljitcodes = alljitcodes self.stats = self.cpu.stats self.options = options self.logger_noopt = Logger(self) @@ -1675,6 +1677,13 @@ return self.jitdriver_sd is not None and jitcode is self.jitdriver_sd.mainjitcode def newframe(self, jitcode, greenkey=None): + if not self.framestack: + pc = 0 + boxlist = [] + else: + pc = self.framestack[-1].pc + boxlist = self.framestack[-1].get_list_of_active_boxes(True) + self.resume_bc.enter_function(jitcode, pc, boxlist) if jitcode.is_portal: self.portal_call_depth += 1 self.call_ids.append(self.current_call_id) @@ -1691,6 +1700,7 @@ return f def popframe(self): + self.resume_bc.leave_function() frame = self.framestack.pop() jitcode = frame.jitcode if jitcode.is_portal: @@ -1808,8 +1818,14 @@ saved_pc = frame.pc if resumepc >= 0: frame.pc = resumepc - resume.capture_resumedata(self.framestack, virtualizable_boxes, - self.virtualref_boxes, resumedescr) + else: + resumepc = frame.pc + boxlist = self.framestack[-1].get_list_of_active_boxes(False) + else: + boxlist = [] + #resume.capture_resumedata(self.framestack, virtualizable_boxes, + # self.virtualref_boxes, resumedescr) + self.resume_bc.capture_resumedata(resumedescr, resumepc, boxlist) if self.framestack: self.framestack[-1].pc = saved_pc @@ -2223,6 +2239,7 @@ resume_at_jump_descr, try_disabling_unroll=False): num_green_args = self.jitdriver_sd.num_green_args greenkey = original_boxes[:num_green_args] + self.resume_bc.finish(self, start) if not self.partial_trace: ptoken = self.get_procedure_token(greenkey) if ptoken is not None and ptoken.target_tokens is not None: @@ -2356,6 +2373,7 @@ # ----- make a new frame ----- self.portal_call_depth = -1 # always one portal around self.framestack = [] + self.resume_bc = resume2.ResumeBytecodeBuilder(self.staticdata) f = self.newframe(self.jitdriver_sd.mainjitcode) f.setup_call(original_boxes) assert self.portal_call_depth == 0 @@ -2371,6 +2389,9 @@ try: self.portal_call_depth = -1 # always one portal around self.history = history.History() + self.resume_bc = resume2.ResumeBytecodeBuilder(self.staticdata) + self.resume_bc.start_from_descr(resumedescr) + xxx inputargs_and_holes = self.rebuild_state_after_failure(resumedescr, deadframe) self.history.inputargs = [box for box in inputargs_and_holes if box] diff --git a/rpython/jit/metainterp/resume2.py b/rpython/jit/metainterp/resume2.py new file mode 100644 --- /dev/null +++ b/rpython/jit/metainterp/resume2.py @@ -0,0 +1,237 @@ + +""" The new resume that records bytecode. The idea is that bytecode is +incremental and minimal for the case the guard is never incoked. However, +if the guard fails it can be compressed into a starting point. + +opcodes: + +UPDATE_PC [list-of-alive-boxes] +ENTER_FRAME [list-of-alive-boxes] +LEAVE_FRAME + +""" + +from rpython.rlib import rstack + +ENTER_FRAME = chr(0) +LEAVE_FRAME = chr(1) +CAPTURE_POINT = chr(2) + +BC_NAMES = ['ENTER_FRAME', 'LEAVE_FRAME', 'CAPTURE_POINT'] + +class ResumeBytecodeBuilder(object): + def __init__(self, metainterp_sd): + self.bc = [] + self.boxes = {} + self.metainterp_sd = metainterp_sd + + def enumerate_box(self, box): + if box in self.boxes: + return self.boxes[box] + else: + no = len(self.boxes) + self.boxes[box] = no + return no + + def write(self, c): + self.bc.append(c) + + def write_short(self, s): + assert 0 <= s <= (2**16 - 1) + self.write(chr(s >> 8)) + self.write(chr(s & 0xff)) + + def write_list(self, l): + self.write_short(len(l)) + for item in l: + self.write_short(item) + + def enter_function(self, jitcode, pc, boxlist): + self.write(ENTER_FRAME) + self.write_short(jitcode.number) + self.write_short(pc) + self.write_list([self.enumerate_box(box) for box in boxlist]) + + def leave_function(self): + self.write(LEAVE_FRAME) + + def start_from_descr(self, descr): + xxx + + def capture_resumedata(self, descr, resumepc, boxlist): + self.write(CAPTURE_POINT) + self.write_short(resumepc) + self.write_list([self.enumerate_box(box) for box in boxlist]) + descr.rd_bytecode_position = len(self.bc) + + def finish(self, metainterp, start): + from rpython.jit.metainterp.history import AbstractFailDescr + + assert start == 0 + finished_bc = ''.join(self.bc) + for op in metainterp.history.operations: + if op.is_guard(): + descr = op.getdescr() + assert isinstance(descr, AbstractFailDescr) + descr.rd_bytecode = finished_bc + print_bc(finished_bc, self.metainterp_sd.alljitcodes) + +class AbstractBytecodeInterpreter(object): + def __init__(self, bc, alljitcodes): + self.bc = bc + self.alljitcodes = alljitcodes + self.init() + + def init(self): + pass + + def read_short(self): + res = (ord(self.bc[self.pos]) << 8) + ord(self.bc[self.pos + 1]) + self.pos += 2 + return res + + def read_list(self): + length = self.read_short() + l = [] + for i in range(length): + l.append(self.read_short()) + return l + + def interpret(self): + self.pos = 0 + self.interpret_until(len(self.bc)) + + def interpret_until(self, stop): + self.stop = stop + while self.pos < stop: + opcode = self.bc[self.pos] + self.pos += 1 + if opcode == ENTER_FRAME: + jitcode = self.alljitcodes[self.read_short()] + pc = self.read_short() + boxlist = self.read_list() + self.ENTER_FRAME(jitcode, pc, boxlist) + elif opcode == LEAVE_FRAME: + self.LEAVE_FRAME() + elif opcode == CAPTURE_POINT: + pc = self.read_short() + boxlist = self.read_list() + self.CAPTURE_POINT(pc, boxlist) + +class BytecodePrinter(AbstractBytecodeInterpreter): + def ENTER_FRAME(self, jitcode, pc, boxnos): + print "ENTER_FRAME %s %d %s" % (jitcode.name, pc, boxnos) + + def CAPTURE_POINT(self, pc, boxlist): + print "CAPTURE_POINT %d %s" % (pc, boxlist) + + def LEAVE_FRAME(self): + print "LEAVE_FRAME" + +class DirectResumeBuilder(AbstractBytecodeInterpreter): + def init(self): + self.pos = 0 + self.framestack = [] + + def ENTER_FRAME(self, jitcode, pc, boxlist): + self.framestack.append((jitcode, pc, boxlist)) + + def CAPTURE_POINT(self, pc, boxlist): + if self.pos == self.stop: + self.framestack.append((None, pc, boxlist)) + + def LEAVE_FRAME(self): + self.framestack.pop() + +class OptimizerResumeInterpreter(AbstractBytecodeInterpreter): + def init(self): + self.pos = 0 + self.framestack = [] + self.cur_len = 0 + self.cur_boxlist = None + + def get_current_boxes(self, allboxes): + newboxes = [None] * (self.cur_len + len(self.cur_boxlist)) + i = 0 + j = 0 + while i < len(self.framestack): + boxlist = self.framestack[i] + newboxes[j:j + len(boxlist)] = boxlist + i += 1 + j += len(boxlist) + newboxes[j:] = self.cur_boxlist + return [allboxes[i] for i in newboxes] + + def ENTER_FRAME(self, jitcode, pc, boxlist): + self.framestack.append(boxlist) + self.cur_len += len(boxlist) + + def CAPTURE_POINT(self, pc, boxlist): + self.cur_boxlist = boxlist + + def LEAVE_FRAME(self): + el = self.framestack.pop() + self.cur_len -= len(el) + +def print_bc(bc, jitcodes): + BytecodePrinter(bc, jitcodes).interpret() + +class InfoFiller(object): + def __init__(self, cpu, deadframe, bhinterp, boxlist): + self.cpu = cpu + self.boxlist = boxlist + self.deadframe = deadframe + self.bhinterp = bhinterp + + def callback_i(self, index, register_index): + backend_index = self.boxlist[index] + intval = self.cpu.get_int_value(self.deadframe, backend_index) + self.bhinterp.setarg_i(register_index, intval) + + def callback_r(self, index, register_index): + xxx + + def callback_f(self, index, register_index): + xxx + +def blackhole_from_resumedata(metainterp_sd, jitdriver_sd, descr, + deadframe, all_virtuals=None): + # The initialization is stack-critical code: it must not be interrupted by + # StackOverflow, otherwise the jit_virtual_refs are left in a dangling state. + assert all_virtuals is None + blackholeinterpbuilder = metainterp_sd.blackholeinterpbuilder + alljitcodes = metainterp_sd.alljitcodes + rstack._stack_criticalcode_start() + try: + pos = descr.rd_bytecode_position + rb = DirectResumeBuilder(descr.rd_bytecode, alljitcodes) + rb.interpret_until(pos) + framestack = rb.framestack + finally: + rstack._stack_criticalcode_stop() + + # + # First get a chain of blackhole interpreters whose length is given + # by the size of framestack. The first one we get must be + # the bottom one, i.e. the last one in the chain, in order to make + # the comment in BlackholeInterpreter.setposition() valid. + nextbh = None + for i in range(len(framestack)): + curbh = blackholeinterpbuilder.acquire_interp() + curbh.nextblackholeinterp = nextbh + nextbh = curbh + firstbh = nextbh + # + # Now fill the blackhole interpreters with resume data. + curbh = firstbh + for i in range(len(framestack) - 1, 0, -1): + jitcode = framestack[i - 1][0] + pc = framestack[i - 1][1] + boxlist = framestack[i][2] + curbh.setposition(jitcode, pc) + info = curbh.get_current_position_info() + filler = InfoFiller(metainterp_sd.cpu, deadframe, curbh, boxlist) + info.enumerate_vars(filler.callback_i, filler.callback_r, + filler.callback_f, None) + curbh = curbh.nextblackholeinterp + return firstbh diff --git a/rpython/jit/metainterp/test/test_loop.py b/rpython/jit/metainterp/test/test_loop.py --- a/rpython/jit/metainterp/test/test_loop.py +++ b/rpython/jit/metainterp/test/test_loop.py @@ -908,5 +908,11 @@ res = self.meta_interp(f, [20, 10]) assert res == f(20, 10) -class TestLLtype(LoopTest, LLJitMixin): - pass +#class TestLLtype(LoopTest, LLJitMixin): + #pass + +class TestLLtype2(LoopTest, LLJitMixin): + enable_opts = '' + + def check_resops(self, *args, **kwds): + pass diff --git a/rpython/jit/metainterp/warmspot.py b/rpython/jit/metainterp/warmspot.py --- a/rpython/jit/metainterp/warmspot.py +++ b/rpython/jit/metainterp/warmspot.py @@ -198,7 +198,8 @@ elif self.opt.listops: self.prejit_optimizations_minimal_inline(policy, graphs) - self.build_meta_interp(ProfilerClass) + self.build_meta_interp(ProfilerClass, + self.codewriter.callcontrol.alljitcodes) self.make_args_specifications() # from rpython.jit.metainterp.virtualref import VirtualRefInfo @@ -427,9 +428,10 @@ cpu.supports_singlefloats = False self.cpu = cpu - def build_meta_interp(self, ProfilerClass): + def build_meta_interp(self, ProfilerClass, alljitcodes): self.metainterp_sd = MetaInterpStaticData(self.cpu, self.opt, + alljitcodes, ProfilerClass=ProfilerClass, warmrunnerdesc=self) From noreply at buildbot.pypy.org Wed Aug 21 19:05:06 2013 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 21 Aug 2013 19:05:06 +0200 (CEST) Subject: [pypy-commit] pypy gc-del-2: in-progress Message-ID: <20130821170506.7ED531C01F6@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: gc-del-2 Changeset: r66283:c37e00e43d40 Date: 2013-08-21 19:04 +0200 http://bitbucket.org/pypy/pypy/changeset/c37e00e43d40/ Log: in-progress diff --git a/rpython/memory/gc/minimark.py b/rpython/memory/gc/minimark.py --- a/rpython/memory/gc/minimark.py +++ b/rpython/memory/gc/minimark.py @@ -321,8 +321,9 @@ # finalizers, they are made young again. self.young_objects_not_in_nursery = self.null_address_dict() # - # A list of all objects with finalizers (these are never young). - self.objects_with_finalizers = self.AddressDeque() + # 4 lists of all objects with finalizers, young or old, light or not. + self.young_objects_with_finalizers = self.AddressDeque() + self.old_objects_with_finalizers = self.AddressDeque() self.young_objects_with_light_finalizers = self.AddressStack() self.old_objects_with_light_finalizers = self.AddressStack() # @@ -526,18 +527,10 @@ totalsize = size_gc_header + size rawtotalsize = raw_malloc_usage(totalsize) # - # If the object needs a finalizer, ask for a rawmalloc. - # The following check should be constant-folded. - if needs_finalizer and not is_finalizer_light: - ll_assert(not contains_weakptr, - "'needs_finalizer' and 'contains_weakptr' both specified") - obj = self.external_malloc(typeid, 0, can_make_young=False) - self.objects_with_finalizers.append(obj) - # # If totalsize is greater than nonlarge_max (which should never be # the case in practice), ask for a rawmalloc. The following check # should be constant-folded. - elif rawtotalsize > self.nonlarge_max: + if rawtotalsize > self.nonlarge_max: ll_assert(not contains_weakptr, "'contains_weakptr' specified for a large object") obj = self.external_malloc(typeid, 0) @@ -559,14 +552,19 @@ # Build the object. llarena.arena_reserve(result, totalsize) obj = result + size_gc_header - if is_finalizer_light: - self.young_objects_with_light_finalizers.append(obj) self.init_gc_object(result, typeid, flags=0) # # If it is a weakref, record it (check constant-folded). if contains_weakptr: self.young_objects_with_weakrefs.append(obj) # + # More checks for recording, constant-folded + if needs_finalizer: + if is_finalizer_light: + self.young_objects_with_light_finalizers.append(obj) + else: + self.young_objects_with_finalizers.append(obj) + # return llmemory.cast_adr_to_ptr(obj, llmemory.GCREF) @@ -683,9 +681,9 @@ collect_and_reserve._dont_inline_ = True - def external_malloc(self, typeid, length, can_make_young=True): - """Allocate a large object using the ArenaCollection or - raw_malloc(), possibly as an object with card marking enabled, + def external_malloc(self, typeid, length): + """Allocate a large object using raw_malloc(), + possibly as an object with card marking enabled, if it has gc pointers in its var-sized part. 'length' should be specified as 0 if the object is not varsized. The returned object is fully initialized and zero-filled.""" @@ -722,28 +720,9 @@ self.minor_collection() self.major_collection(raw_malloc_usage(totalsize)) # - # Check if the object would fit in the ArenaCollection. - if raw_malloc_usage(totalsize) <= self.small_request_threshold: - # - # Yes. Round up 'totalsize' (it cannot overflow and it - # must remain <= self.small_request_threshold.) - totalsize = llarena.round_up_for_allocation(totalsize) - ll_assert(raw_malloc_usage(totalsize) <= - self.small_request_threshold, - "rounding up made totalsize > small_request_threshold") - # - # Allocate from the ArenaCollection and clear the memory returned. - result = self.ac.malloc(totalsize) - llmemory.raw_memclear(result, totalsize) - # - # An object allocated from ArenaCollection is always old, even - # if 'can_make_young'. The interesting case of 'can_make_young' - # is for large objects, bigger than the 'large_objects' threshold, - # which are raw-malloced but still young. - extra_flags = GCFLAG_TRACK_YOUNG_PTRS - # - else: - # No, so proceed to allocate it externally with raw_malloc(). + # (preserves the indentation of the following block) + if 1: + # Allocate the object externally with raw_malloc(). # Check if we need to introduce the card marker bits area. if (self.card_page_indices <= 0 # <- this check is constant-folded or not self.has_gcptr_in_varsize(typeid) or @@ -1336,7 +1315,7 @@ if self.young_objects_with_weakrefs.non_empty(): self.invalidate_young_weakrefs() if self.young_objects_with_light_finalizers.non_empty(): - self.deal_with_young_objects_with_finalizers() + self.deal_with_young_objects_with_light_finalizers() # # Clear this mapping. if self.nursery_objects_shadows.length() > 0: @@ -1654,10 +1633,10 @@ # # Finalizer support: adds the flag GCFLAG_VISITED to all objects # with a finalizer and all objects reachable from there (and also - # moves some objects from 'objects_with_finalizers' to + # moves some objects from 'old_objects_with_finalizers' to # 'run_finalizers'). - if self.objects_with_finalizers.non_empty(): - self.deal_with_objects_with_finalizers() + if self.old_objects_with_finalizers.non_empty(): + self.deal_with_old_objects_with_finalizers() # self.objects_to_trace.delete() # @@ -1665,7 +1644,7 @@ if self.old_objects_with_weakrefs.non_empty(): self.invalidate_old_weakrefs() if self.old_objects_with_light_finalizers.non_empty(): - self.deal_with_old_objects_with_finalizers() + self.deal_with_old_objects_with_light_finalizers() # # Walk all rawmalloced objects and free the ones that don't @@ -1921,7 +1900,7 @@ # ---------- # Finalizers - def deal_with_young_objects_with_finalizers(self): + def deal_with_young_objects_with_light_finalizers(self): """ This is a much simpler version of dealing with finalizers and an optimization - we can reasonably assume that those finalizers don't do anything fancy and *just* call them. Among other things @@ -1937,7 +1916,7 @@ obj = self.get_forwarding_address(obj) self.old_objects_with_light_finalizers.append(obj) - def deal_with_old_objects_with_finalizers(self): + def deal_with_old_objects_with_light_finalizers(self): """ This is a much simpler version of dealing with finalizers and an optimization - we can reasonably assume that those finalizers don't do anything fancy and *just* call them. Among other things @@ -1957,7 +1936,7 @@ self.old_objects_with_light_finalizers.delete() self.old_objects_with_light_finalizers = new_objects - def deal_with_objects_with_finalizers(self): + def deal_with_old_objects_with_finalizers(self): # Walk over list of objects with finalizers. # If it is not surviving, add it to the list of to-be-called # finalizers and make it survive, to make the finalizer runnable. diff --git a/rpython/memory/test/test_minimark_gc.py b/rpython/memory/test/test_minimark_gc.py --- a/rpython/memory/test/test_minimark_gc.py +++ b/rpython/memory/test/test_minimark_gc.py @@ -16,6 +16,7 @@ def test_finalizer_young_obj(self): class A: def __del__(self): + State() # so that it is not a light finalizer state.seen += 1 class State: pass @@ -72,7 +73,7 @@ def __init__(self, n, next): self.n = n self.next = next - def __del__(self): + def __del__(self): # not a light finalizer state.freed.append(self.n) class State: pass From noreply at buildbot.pypy.org Thu Aug 22 03:35:53 2013 From: noreply at buildbot.pypy.org (andrewchambers) Date: Thu, 22 Aug 2013 03:35:53 +0200 (CEST) Subject: [pypy-commit] pypy incremental-gc: added some failing tests Message-ID: <20130822013553.8BC161C136D@cobra.cs.uni-duesseldorf.de> Author: Andrew Chambers Branch: incremental-gc Changeset: r66284:74b07286c87d Date: 2013-08-22 13:35 +1200 http://bitbucket.org/pypy/pypy/changeset/74b07286c87d/ Log: added some failing tests diff --git a/rpython/memory/gc/incminimark.py b/rpython/memory/gc/incminimark.py --- a/rpython/memory/gc/incminimark.py +++ b/rpython/memory/gc/incminimark.py @@ -130,13 +130,24 @@ # by the incremental collection GCFLAG_GRAY = first_gcflag << 8 -# The following flag is just an alias for the gray flag. It -# is only used by major collections, it is set on objects -# which are allocated during the sweeping and finalization states -# it has different meaning outside of the sweeping state. -# This flag should not be reset by any minor collection operation -GCFLAG_NOSWEEP = first_gcflag << 8 +# This flag allows sweeping to be incrementalised. +# it is set when an object would be swept, but isnt +# because this flag was not set. The point of this +# flag is to make sure an object has survived through +# at least one major collection so we are sure +# it is unreachable. It is needed because a write +# barrier has no way of knowing which objects are truly +# unvisited, or they were simply already reset by +# a sweep. +GCFLAG_CANSWEEP = first_gcflag << 9 +# Flag indicates object is old. It is needed by the +# write barrier code so that we can track when a young +# reference is written into a black object. +# we must make a shadow and prevent such an object from being freed by +# the next minor collection so that we dont get dead objects in +# objects_to_trace during marking. +GCFLAG_OLD = first_gcflag << 10 # States for the incremental GC @@ -155,7 +166,7 @@ -TID_MASK = (first_gcflag << 9) - 1 +TID_MASK = (first_gcflag << 11) - 1 FORWARDSTUB = lltype.GcStruct('forwarding_stub', @@ -1636,6 +1647,11 @@ self._visit_young_rawmalloced_object(obj) return # + + # Do this after check we are old to avoid cache misses like + # In the comment above. + self.header(obj).tid |= GCFLAG_OLD + size_gc_header = self.gcheaderbuilder.size_gc_header if self.header(obj).tid & GCFLAG_HAS_SHADOW == 0: # @@ -1708,7 +1724,7 @@ hdr = self.header(obj) if hdr.tid & GCFLAG_VISITED: return - hdr.tid |= GCFLAG_VISITED + hdr.tid |= (GCFLAG_VISITED|GCFLAG_OLD) # # we just made 'obj' old, so we need to add it to the correct lists added_somewhere = False @@ -1931,6 +1947,7 @@ def free_rawmalloced_object_if_unvisited(self, obj): if self.header(obj).tid & GCFLAG_VISITED: + self.header(obj).tid |= GCFLAG_OLD self.header(obj).tid &= ~(GCFLAG_VISITED|GCFLAG_GRAY) # survives self.old_rawmalloced_objects.append(obj) else: diff --git a/rpython/memory/gc/test/test_direct.py b/rpython/memory/gc/test/test_direct.py --- a/rpython/memory/gc/test/test_direct.py +++ b/rpython/memory/gc/test/test_direct.py @@ -640,24 +640,149 @@ newobj1 = self.malloc(S) newobj2 = self.malloc(S) newobj1.x = 1337 - #newobj2.x = 1338 + newobj2.x = 1338 self.write(oldobj,'next',newobj1) self.gc.debug_gc_step_until(incminimark.STATE_SCANNING) #should not be cleared even though it was allocated while sweeping assert newobj1.x == 1337 - #assert newobj2.x == 1338 + # XXX find appropriate exception type + assert py.test.raises(RuntimeError,"newobj2.x") - def test_new_marking_write_sweeping(self): - - assert False + #def test_new_marking_write_sweeping(self): + # + # assert False - def test_finalizing_new_object(self): + #def test_finalizing_new_object(self): # Must test an object with a finalizer # being added just before finalizers start being called # must test this new objects finalizer is not called # XXX maybe cant do this in test_direct and need test_transformed - assert False + # assert False + def test_young_gray_collected(self): + from rpython.memory.gc import incminimark + + # Test the write barrier triggers on a young object + # but doesnt crash when that object is collected + + for i in range(2): + curobj = self.malloc(S) + curobj.x = i + self.stackroots.append(curobj) + + + self.gc.debug_gc_step_until(incminimark.STATE_MARKING) + + #process one object + self.gc.debug_gc_step() + + oldobj = self.stackroots[-1] + + newobj = self.malloc(S) + newobj.x = 5 + # make newobj gray + self.write(oldobj,'next',newobj) + #the barrier should have made the object gray + newhdr = self.gc.header(llmemory.cast_ptr_to_adr(newobj)) + assert newhdr.tid & incminimark.GCFLAG_GRAY + + # make newobj unreachable again + self.write(oldobj,'next',oldobj) + + #complete collection + self.gc.debug_gc_step_until(incminimark.STATE_SCANNING) + self.gc.debug_check_consistency() + + # object cant be collected in this case, must be made old. + assert newobj.x == 5 + + self.gc.debug_gc_step_until(incminimark.STATE_SCANNING) + + # now object is collected + assert py.test.raises(RuntimeError,"newobj.x") + + # Test trying to be a bit comprehensive about + # states and types of objects + def test_allocate_states(self): + from rpython.memory.gc import incminimark + largeobj_size = self.gc.nonlarge_max + 1 + + assert self.gc.gc_state == incminimark.STATE_SCANNING + assert self.gc.get_total_memory_used() == 0 + + for i in range(5): + curobj = self.malloc(S) + curobj.x = i + self.stackroots.append(curobj) + assert self.gc.is_in_nursery(llmemory.cast_ptr_to_adr(curobj)) + + reachableroot = curobj + + for i in range(5): + curobj = self.malloc(VAR, largeobj_size) + self.stackroots.append(curobj) + assert not self.gc.is_in_nursery(llmemory.cast_ptr_to_adr(curobj)) + + assert self.gc.gc_state == incminimark.STATE_SCANNING + + + nallocated = {} + + reachable = [] + unreachable = [] + + while True: + + if self.gc.gc_state not in nallocated: + nallocated[self.gc.gc_state] = 0 + + if nallocated[self.gc.gc_state] < 1: + unreachableobj = self.malloc(S) + reachableobj = self.malloc(S) + assert self.gc.is_in_nursery(llmemory.cast_ptr_to_adr(reachableobj)) + reachableviayoungobj = self.malloc(S) + self.write(reachableobj,'next',reachableviayoungobj) + unreachableobj.x = 150 + reachableobj.x = 150 + reachableviayoungobj.x = 150 + + self.write(reachableroot,'next',reachableobj) + reachableroot = reachableobj + + unreachable.append(unreachableobj) + reachable.append(reachableobj) + reachable.append(reachableviayoungobj) + + nallocated[self.gc.gc_state] += 1 + + if self.gc.gc_state == incminimark.STATE_SCANNING: + pass + elif self.gc.gc_state == incminimark.STATE_MARKING: + pass + elif self.gc.gc_state == incminimark.STATE_SWEEPING_RAWMALLOC: + pass + elif self.gc.gc_state == incminimark.STATE_SWEEPING_ARENA: + pass + elif self.gc.gc_state == incminimark.STATE_FINALIZING: + # ASSUMPTION finalizing is atomic + # + #complete collection + self.gc.debug_gc_step() + assert self.gc.gc_state == incminimark.STATE_SCANNING + break + else: + raise Exception("unreachable") + + self.gc.debug_gc_step() + + #complete the next collection cycle + self.gc.debug_gc_step_until(incminimark.STATE_SCANNING) + + for obj in reachable: + assert obj.x == 150 + + for obj in unreachable: + assert py.test.raises(RuntimeError,"obj.x") class TestIncrementalMiniMarkGCFull(TestMiniMarkGCFull): from rpython.memory.gc.incminimark import IncrementalMiniMarkGC as GCClass From noreply at buildbot.pypy.org Thu Aug 22 09:26:18 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Thu, 22 Aug 2013 09:26:18 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: fix an error in richards.py caused by missing barriers in llmodel.py Message-ID: <20130822072618.978D31C1509@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r66285:7abd061e246f Date: 2013-08-21 16:32 +0200 http://bitbucket.org/pypy/pypy/changeset/7abd061e246f/ Log: fix an error in richards.py caused by missing barriers in llmodel.py diff --git a/rpython/jit/backend/llsupport/gc.py b/rpython/jit/backend/llsupport/gc.py --- a/rpython/jit/backend/llsupport/gc.py +++ b/rpython/jit/backend/llsupport/gc.py @@ -426,8 +426,8 @@ @specialize.arg(2) def _do_barrier(self, gcref_struct, returns_modified_object): + raise NotImplementedError("implement in subclasses!") assert self.returns_modified_object == returns_modified_object - # XXX: fastpath for Read and Write variants funcptr = self.get_barrier_funcptr(returns_modified_object) res = funcptr(llmemory.cast_ptr_to_adr(gcref_struct)) return llmemory.cast_adr_to_ptr(res, llmemory.GCREF) @@ -451,7 +451,8 @@ priv_rev = self.llop1.stm_get_adr_of_private_rev_num(rffi.SIGNEDP) if objhdr.h_revision == priv_rev[0]: return gcref_struct - + + # readcache[obj] == obj read_cache = self.llop1.stm_get_adr_of_read_barrier_cache(rffi.SIGNEDP) objint = llmemory.cast_adr_to_int(objadr) assert WORD == 8, "check for 32bit compatibility" @@ -461,7 +462,6 @@ if rcp[index] == objint: return gcref_struct - # XXX: readcache! funcptr = self.get_barrier_funcptr(returns_modified_object) res = funcptr(objadr) return llmemory.cast_adr_to_ptr(res, llmemory.GCREF) diff --git a/rpython/jit/backend/llsupport/llmodel.py b/rpython/jit/backend/llsupport/llmodel.py --- a/rpython/jit/backend/llsupport/llmodel.py +++ b/rpython/jit/backend/llsupport/llmodel.py @@ -52,6 +52,10 @@ self._setup_exception_handling_untranslated() self.asmmemmgr = AsmMemoryManager() self._setup_frame_realloc(translate_support_code) + self._setup_descrs() + self.setup() + + def _setup_descrs(self): ad = self.gc_ll_descr.getframedescrs(self).arraydescr self.signedarraydescr = ad # the same as normal JITFRAME, however with an array of pointers @@ -63,7 +67,6 @@ else: self.floatarraydescr = ArrayDescr(ad.basesize, ad.itemsize, ad.lendescr, FLAG_FLOAT) - self.setup() def getarraydescr_for_frame(self, type): if type == history.FLOAT: @@ -475,6 +478,7 @@ def bh_arraylen_gc(self, array, arraydescr): assert isinstance(arraydescr, ArrayDescr) + array = self.gc_ll_descr.do_stm_barrier(array, 'R') ofs = arraydescr.lendescr.offset return rffi.cast(rffi.CArrayPtr(lltype.Signed), array)[ofs/WORD] @@ -617,18 +621,22 @@ # --- end of GC unsafe code --- def bh_strlen(self, string): + string = self.gc_ll_descr.do_stm_barrier(string, 'R') s = lltype.cast_opaque_ptr(lltype.Ptr(rstr.STR), string) return len(s.chars) def bh_unicodelen(self, string): + string = self.gc_ll_descr.do_stm_barrier(string, 'R') u = lltype.cast_opaque_ptr(lltype.Ptr(rstr.UNICODE), string) return len(u.chars) def bh_strgetitem(self, string, index): + string = self.gc_ll_descr.do_stm_barrier(string, 'R') s = lltype.cast_opaque_ptr(lltype.Ptr(rstr.STR), string) return ord(s.chars[index]) def bh_unicodegetitem(self, string, index): + string = self.gc_ll_descr.do_stm_barrier(string, 'R') u = lltype.cast_opaque_ptr(lltype.Ptr(rstr.UNICODE), string) return ord(u.chars[index]) @@ -759,6 +767,8 @@ def bh_new_with_vtable(self, vtable, sizedescr): res = self.gc_ll_descr.gc_malloc(sizedescr) if self.vtable_offset is not None: + assert not self.gc_ll_descr.stm + res = self.gc_ll_descr.do_stm_barrier(res, 'W') as_array = rffi.cast(rffi.CArrayPtr(lltype.Signed), res) as_array[self.vtable_offset/WORD] = vtable return res @@ -767,6 +777,7 @@ return lltype.malloc(rffi.CCHARP.TO, size, flavor='raw') def bh_classof(self, struct): + struct = self.gc_ll_descr.do_stm_barrier(struct, 'R') struct = lltype.cast_opaque_ptr(rclass.OBJECTPTR, struct) result_adr = llmemory.cast_ptr_to_adr(struct.typeptr) return heaptracker.adr2int(result_adr) @@ -781,19 +792,25 @@ return self.gc_ll_descr.gc_malloc_unicode(length) def bh_strsetitem(self, string, index, newvalue): + string = self.gc_ll_descr.do_stm_barrier(string, 'W') s = lltype.cast_opaque_ptr(lltype.Ptr(rstr.STR), string) s.chars[index] = chr(newvalue) def bh_unicodesetitem(self, string, index, newvalue): + string = self.gc_ll_descr.do_stm_barrier(string, 'W') u = lltype.cast_opaque_ptr(lltype.Ptr(rstr.UNICODE), string) u.chars[index] = unichr(newvalue) def bh_copystrcontent(self, src, dst, srcstart, dststart, length): + src = self.gc_ll_descr.do_stm_barrier(src, 'R') + dst = self.gc_ll_descr.do_stm_barrier(dst, 'W') src = lltype.cast_opaque_ptr(lltype.Ptr(rstr.STR), src) dst = lltype.cast_opaque_ptr(lltype.Ptr(rstr.STR), dst) rstr.copy_string_contents(src, dst, srcstart, dststart, length) def bh_copyunicodecontent(self, src, dst, srcstart, dststart, length): + src = self.gc_ll_descr.do_stm_barrier(src, 'R') + dst = self.gc_ll_descr.do_stm_barrier(dst, 'W') src = lltype.cast_opaque_ptr(lltype.Ptr(rstr.UNICODE), src) dst = lltype.cast_opaque_ptr(lltype.Ptr(rstr.UNICODE), dst) rstr.copy_unicode_contents(src, dst, srcstart, dststart, length) diff --git a/rpython/jit/backend/x86/test/test_stm_integration.py b/rpython/jit/backend/x86/test/test_stm_integration.py --- a/rpython/jit/backend/x86/test/test_stm_integration.py +++ b/rpython/jit/backend/x86/test/test_stm_integration.py @@ -188,15 +188,21 @@ RESULT=lltype.Signed) def malloc_big_fixedsize(size, tid): + print "malloc:", size, tid + if size > sys.maxint / 2: + # for testing exception + return lltype.nullptr(llmemory.GCREF.TO) + entries = size + StmGC.GCHDRSIZE TP = rffi.CArray(lltype.Char) obj = lltype.malloc(TP, n=entries, flavor='raw', - track_allocation=False, zero=True) + track_allocation=False, zero=True) objptr = rffi.cast(StmGC.GCHDRP, obj) objptr.h_tid = rffi.cast(lltype.Unsigned, - StmGC.GCFLAG_OLD|StmGC.GCFLAG_WRITE_BARRIER - | tid) + StmGC.GCFLAG_OLD + | StmGC.GCFLAG_WRITE_BARRIER | tid) objptr.h_revision = rffi.cast(lltype.Signed, -sys.maxint) + print "return:", obj, objptr return rffi.cast(llmemory.GCREF, objptr) self.generate_function('malloc_big_fixedsize', malloc_big_fixedsize, [lltype.Signed] * 2) @@ -659,6 +665,108 @@ deadframe = self.cpu.execute_token(othertoken, *args) assert called == [id(finish_descr2)] + + def test_call_malloc_gc(self): + cpu = self.cpu + cpu.gc_ll_descr.init_nursery(100) + cpu.setup_once() + + size = WORD*3 + addr = cpu.gc_ll_descr.get_malloc_fn_addr('malloc_big_fixedsize') + typeid = 11 + descr = cpu.gc_ll_descr.malloc_big_fixedsize_descr + + p0 = BoxPtr() + ops1 = [ResOperation(rop.CALL_MALLOC_GC, + [ConstInt(addr), ConstInt(size), ConstInt(typeid)], + p0, descr), + ResOperation(rop.FINISH, [p0], None, + descr=BasicFinalDescr(0)), + ] + + inputargs = [] + looptoken = JitCellToken() + c_loop = cpu.compile_loop(inputargs, ops1, + looptoken) + + args = [] + print "======" + print "inputargs:", inputargs, args + print "\n".join(map(str,c_loop[1])) + + frame = self.cpu.execute_token(looptoken, *args) + + + def test_assembler_call_propagate_exc(self): + cpu = self.cpu + cpu._setup_descrs() + cpu.gc_ll_descr.init_nursery(100) + + excdescr = BasicFailDescr(666) + cpu.propagate_exception_descr = excdescr + cpu.setup_once() # xxx redo it, because we added + # propagate_exception + + def assembler_helper(deadframe, virtualizable): + #assert cpu.get_latest_descr(deadframe) is excdescr + # let's assume we handled that + return 3 + + FUNCPTR = lltype.Ptr(lltype.FuncType([llmemory.GCREF, + llmemory.GCREF], + lltype.Signed)) + class FakeJitDriverSD: + index_of_virtualizable = -1 + _assembler_helper_ptr = llhelper(FUNCPTR, assembler_helper) + assembler_helper_adr = llmemory.cast_ptr_to_adr( + _assembler_helper_ptr) + + + + addr = cpu.gc_ll_descr.get_malloc_fn_addr('malloc_big_fixedsize') + typeid = 11 + descr = cpu.gc_ll_descr.malloc_big_fixedsize_descr + + p0 = BoxPtr() + i0 = BoxInt() + ops = [ResOperation(rop.CALL_MALLOC_GC, + [ConstInt(addr), i0, ConstInt(typeid)], + p0, descr), + ResOperation(rop.FINISH, [p0], None, + descr=BasicFinalDescr(0)), + ] + + inputargs = [i0] + looptoken = JitCellToken() + looptoken.outermost_jitdriver_sd = FakeJitDriverSD() + c_loop = cpu.compile_loop(inputargs, ops, looptoken) + + + ARGS = [lltype.Signed] * 10 + RES = lltype.Signed + FakeJitDriverSD.portal_calldescr = cpu.calldescrof( + lltype.Ptr(lltype.FuncType(ARGS, RES)), ARGS, RES, + EffectInfo.MOST_GENERAL) + i1 = ConstInt(sys.maxint - 1) + i2 = BoxInt() + finaldescr = BasicFinalDescr(1) + not_forced = ResOperation(rop.GUARD_NOT_FORCED, [], None, + descr=BasicFailDescr(1)) + not_forced.setfailargs([]) + ops = [ResOperation(rop.CALL_ASSEMBLER, [i1], i2, descr=looptoken), + not_forced, + ResOperation(rop.FINISH, [i1], None, descr=finaldescr), + ] + othertoken = JitCellToken() + cpu.done_with_this_frame_descr_int = BasicFinalDescr() + loop = cpu.compile_loop([], ops, othertoken) + + deadframe = cpu.execute_token(othertoken) + frame = rffi.cast(JITFRAMEPTR, deadframe) + frame_adr = rffi.cast(lltype.Signed, frame.jf_descr) + assert frame_adr != id(finaldescr) + + From noreply at buildbot.pypy.org Thu Aug 22 10:44:38 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 22 Aug 2013 10:44:38 +0200 (CEST) Subject: [pypy-commit] stmgc default: Yet another kind of barrier. Message-ID: <20130822084438.8B5A91C0842@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r489:49c3e0a47ab4 Date: 2013-08-22 10:44 +0200 http://bitbucket.org/pypy/stmgc/changeset/49c3e0a47ab4/ Log: Yet another kind of barrier. diff --git a/c4/stmgc.h b/c4/stmgc.h --- a/c4/stmgc.h +++ b/c4/stmgc.h @@ -84,6 +84,10 @@ - stm_repeat_write_barrier() can be used on an object on which we already did stm_write_barrier(), but a potential collection can have occurred. + + - stm_write_barrier_noptr() is a slightly cheaper version of + stm_write_barrier(), for when we are going to write + non-gc-pointers into the object. */ #if 0 // (optimized version below) gcptr stm_read_barrier(gcptr); @@ -91,6 +95,7 @@ gcptr stm_repeat_read_barrier(gcptr); gcptr stm_immut_read_barrier(gcptr); gcptr stm_repeat_write_barrier(gcptr); /* <= always returns its argument */ +gcptr stm_write_barrier_noptr(gcptr); #endif /* start a new transaction, calls callback(), and when it returns @@ -219,5 +224,10 @@ stm_RepeatWriteBarrier(obj) \ : (obj)) +#define stm_write_barrier_noptr(obj) \ + (UNLIKELY((obj)->h_revision != stm_private_rev_num) ? \ + stm_WriteBarrier(obj) \ + : (obj)) + #endif diff --git a/c4/test/support.py b/c4/test/support.py --- a/c4/test/support.py +++ b/c4/test/support.py @@ -61,6 +61,7 @@ gcptr stm_repeat_read_barrier(gcptr); gcptr stm_immut_read_barrier(gcptr); gcptr stm_repeat_write_barrier(gcptr); + gcptr stm_write_barrier_noptr(gcptr); void stm_perform_transaction(gcptr arg, int (*callback)(gcptr, int)); void stm_commit_transaction(void); void stm_begin_inevitable_transaction(void); diff --git a/c4/test/test_et.py b/c4/test/test_et.py --- a/c4/test/test_et.py +++ b/c4/test/test_et.py @@ -762,3 +762,21 @@ assert n1 == n q = lib.rawgetptr(n, 0) assert lib.rawgetlong(q, 0) == 1298719 + +def test_write_barrier_noptr(): + p = nalloc(HDR + WORD) + assert lib.stm_write_barrier_noptr(p) == p + assert p.h_revision == lib.get_private_rev_num() + assert p.h_tid == lib.gettid(p) | 0 # no GC flags + assert classify(p) == "private" + # + lib.stm_commit_transaction() + lib.stm_begin_inevitable_transaction() + assert classify(p) == "protected" + q = lib.stm_write_barrier_noptr(p) + assert q == p + assert classify(p) == "private_from_protected" + assert q == lib.stm_write_barrier_noptr(p) + assert q == lib.stm_write_barrier_noptr(q) + assert q == lib.stm_write_barrier(p) + assert q == lib.stm_write_barrier(q) From noreply at buildbot.pypy.org Thu Aug 22 10:51:05 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 22 Aug 2013 10:51:05 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-static-barrier: Improve generation of write barriers: we only need to check Message-ID: <20130822085105.D5CE51C0842@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-static-barrier Changeset: r66286:3588acc12b00 Date: 2013-08-22 10:50 +0200 http://bitbucket.org/pypy/pypy/changeset/3588acc12b00/ Log: Improve generation of write barriers: we only need to check GCFLAG_WRITEBARRIER when we're writing a GC pointer into the object. diff --git a/rpython/translator/stm/funcgen.py b/rpython/translator/stm/funcgen.py --- a/rpython/translator/stm/funcgen.py +++ b/rpython/translator/stm/funcgen.py @@ -52,11 +52,14 @@ category_change = op.args[0].value frm, middle, to = category_change assert middle == '2' + assert frm < to if to == 'W': if frm >= 'V': funcname = 'stm_repeat_write_barrier' else: funcname = 'stm_write_barrier' + elif to == 'V': + funcname = 'stm_write_barrier_noptr' elif to == 'R': if frm >= 'Q': funcname = 'stm_repeat_read_barrier' diff --git a/rpython/translator/stm/src_stm/et.c b/rpython/translator/stm/src_stm/et.c --- a/rpython/translator/stm/src_stm/et.c +++ b/rpython/translator/stm/src_stm/et.c @@ -1340,9 +1340,13 @@ and then free B, which will not be used any more. */ size_t size = stmgc_size(B); assert(B->h_tid & GCFLAG_BACKUP_COPY); + /* if h_original was 0, it must stay that way and not point + to itself. (B->h_original may point to P) */ + revision_t h_original = P->h_original; memcpy(((char *)P) + offsetof(struct stm_object_s, h_revision), ((char *)B) + offsetof(struct stm_object_s, h_revision), size - offsetof(struct stm_object_s, h_revision)); + P->h_original = h_original; assert(!(P->h_tid & GCFLAG_BACKUP_COPY)); stmgcpage_free(B); dprintf(("abort: free backup at %p\n", B)); diff --git a/rpython/translator/stm/src_stm/extra.c b/rpython/translator/stm/src_stm/extra.c --- a/rpython/translator/stm/src_stm/extra.c +++ b/rpython/translator/stm/src_stm/extra.c @@ -92,6 +92,8 @@ return (revision_t)p; } + assert(p->h_original != (revision_t)p); + dprintf(("stm_id(%p) has orig fst: %p\n", p, (gcptr)p->h_original)); return p->h_original; diff --git a/rpython/translator/stm/src_stm/nursery.c b/rpython/translator/stm/src_stm/nursery.c --- a/rpython/translator/stm/src_stm/nursery.c +++ b/rpython/translator/stm/src_stm/nursery.c @@ -176,11 +176,23 @@ stm_copy_to_old_id_copy(obj, id_obj); fresh_old_copy = id_obj; + fresh_old_copy->h_original = 0; obj->h_tid &= ~GCFLAG_HAS_ID; + + /* priv_from_prot's backup->h_originals already point + to id_obj */ } else { /* make a copy of it outside */ fresh_old_copy = create_old_object_copy(obj); + + if (obj->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED + && !(obj->h_original)) { + /* the object's backup copy still has + a h_original that is NULL*/ + gcptr B = (gcptr)obj->h_revision; + B->h_original = (revision_t)fresh_old_copy; + } } obj->h_tid |= GCFLAG_MOVED; diff --git a/rpython/translator/stm/src_stm/revision b/rpython/translator/stm/src_stm/revision --- a/rpython/translator/stm/src_stm/revision +++ b/rpython/translator/stm/src_stm/revision @@ -1,1 +1,1 @@ -50d9d16d6327 +49c3e0a47ab4 diff --git a/rpython/translator/stm/src_stm/stmgc.h b/rpython/translator/stm/src_stm/stmgc.h --- a/rpython/translator/stm/src_stm/stmgc.h +++ b/rpython/translator/stm/src_stm/stmgc.h @@ -85,6 +85,10 @@ - stm_repeat_write_barrier() can be used on an object on which we already did stm_write_barrier(), but a potential collection can have occurred. + + - stm_write_barrier_noptr() is a slightly cheaper version of + stm_write_barrier(), for when we are going to write + non-gc-pointers into the object. */ #if 0 // (optimized version below) gcptr stm_read_barrier(gcptr); @@ -92,6 +96,7 @@ gcptr stm_repeat_read_barrier(gcptr); gcptr stm_immut_read_barrier(gcptr); gcptr stm_repeat_write_barrier(gcptr); /* <= always returns its argument */ +gcptr stm_write_barrier_noptr(gcptr); #endif /* start a new transaction, calls callback(), and when it returns @@ -205,19 +210,25 @@ : (obj)) #define stm_repeat_read_barrier(obj) \ - (UNLIKELY((obj)->h_tid & (GCFLAG_PUBLIC_TO_PRIVATE | GCFLAG_MOVED)) ? \ + (UNLIKELY(((obj)->h_tid & (GCFLAG_PUBLIC_TO_PRIVATE | \ + GCFLAG_MOVED)) != 0) ? \ stm_RepeatReadBarrier(obj) \ : (obj)) #define stm_immut_read_barrier(obj) \ - (UNLIKELY((obj)->h_tid & GCFLAG_STUB) ? \ + (UNLIKELY(((obj)->h_tid & GCFLAG_STUB) != 0) ? \ stm_ImmutReadBarrier(obj) \ : (obj)) #define stm_repeat_write_barrier(obj) \ - (UNLIKELY((obj)->h_tid & GCFLAG_WRITE_BARRIER) ? \ + (UNLIKELY(((obj)->h_tid & GCFLAG_WRITE_BARRIER) != 0) ? \ stm_RepeatWriteBarrier(obj) \ : (obj)) +#define stm_write_barrier_noptr(obj) \ + (UNLIKELY((obj)->h_revision != stm_private_rev_num) ? \ + stm_WriteBarrier(obj) \ + : (obj)) + #endif diff --git a/rpython/translator/stm/test/test_writebarrier.py b/rpython/translator/stm/test/test_writebarrier.py --- a/rpython/translator/stm/test/test_writebarrier.py +++ b/rpython/translator/stm/test/test_writebarrier.py @@ -38,6 +38,20 @@ self.interpret(f1, [4]) assert x1.foo == 4 assert len(self.writemode) == 1 + assert self.barriers == ['I2V'] + + def test_simple_write_pointer(self): + T = lltype.GcStruct('T') + X = lltype.GcStruct('X', ('foo', lltype.Ptr(T))) + t1 = lltype.malloc(T, immortal=True) + x1 = lltype.malloc(X, immortal=True, zero=True) + + def f1(n): + x1.foo = t1 + + self.interpret(f1, [4]) + assert x1.foo == t1 + assert len(self.writemode) == 1 assert self.barriers == ['I2W'] def test_multiple_reads(self): @@ -71,10 +85,9 @@ assert len(self.writemode) == 1 assert self.barriers == [] - def test_repeat_write_barrier_after_malloc(self): + def test_dont_repeat_write_barrier_after_malloc_if_not_a_ptr(self): X = lltype.GcStruct('X', ('foo', lltype.Signed)) - x1 = lltype.malloc(X, immortal=True) - x1.foo = 6 + x1 = lltype.malloc(X, immortal=True, zero=True) def f1(n): x1.foo = n lltype.malloc(X) @@ -82,6 +95,21 @@ self.interpret(f1, [4]) assert len(self.writemode) == 2 + assert self.barriers == ['I2V'] + + def test_repeat_write_barrier_after_malloc(self): + T = lltype.GcStruct('T') + X = lltype.GcStruct('X', ('foo', lltype.Ptr(T))) + t1 = lltype.malloc(T, immortal=True) + t2 = lltype.malloc(T, immortal=True) + x1 = lltype.malloc(X, immortal=True, zero=True) + def f1(n): + x1.foo = t1 + lltype.malloc(X) + x1.foo = t2 + + self.interpret(f1, [4]) + assert len(self.writemode) == 2 assert self.barriers == ['I2W', 'V2W'] def test_repeat_read_barrier_after_malloc(self): @@ -110,10 +138,10 @@ y = lltype.malloc(X, immortal=True) res = self.interpret(f1, [x, y]) assert res == 36 - assert self.barriers == ['A2R', 'A2W', 'q2r'] + assert self.barriers == ['A2R', 'A2V', 'q2r'] res = self.interpret(f1, [x, x]) assert res == 42 - assert self.barriers == ['A2R', 'A2W', 'Q2R'] + assert self.barriers == ['A2R', 'A2V', 'Q2R'] def test_write_cannot_alias(self): X = lltype.GcStruct('X', ('foo', lltype.Signed)) @@ -128,7 +156,7 @@ y = lltype.malloc(Y, immortal=True) res = self.interpret(f1, [x, y]) assert res == 36 - assert self.barriers == ['A2R', 'A2W'] + assert self.barriers == ['A2R', 'A2V'] def test_call_external_release_gil(self): X = lltype.GcStruct('X', ('foo', lltype.Signed)) @@ -203,10 +231,10 @@ y = lltype.malloc(X, immortal=True) res = self.interpret(f1, [x, y]) assert res == 0 - assert self.barriers == ['A2W', '='] + assert self.barriers == ['A2V', '='] res = self.interpret(f1, [x, x]) assert res == 1 - assert self.barriers == ['A2W', '='] + assert self.barriers == ['A2V', '='] def test_pointer_compare_3(self): X = lltype.GcStruct('X', ('foo', lltype.Signed)) @@ -217,10 +245,10 @@ y = lltype.malloc(X, immortal=True) res = self.interpret(f1, [x, y]) assert res == 1 - assert self.barriers == ['A2W', '='] + assert self.barriers == ['A2V', '='] res = self.interpret(f1, [x, x]) assert res == 0 - assert self.barriers == ['A2W', '='] + assert self.barriers == ['A2V', '='] def test_pointer_compare_4(self): X = lltype.GcStruct('X', ('foo', lltype.Signed)) @@ -232,10 +260,10 @@ y = lltype.malloc(X, immortal=True) res = self.interpret(f1, [x, y]) assert res == 1 - assert self.barriers == ['A2W', 'A2W'] + assert self.barriers == ['A2V', 'A2V'] res = self.interpret(f1, [x, x]) assert res == 0 - assert self.barriers == ['A2W', 'A2W'] + assert self.barriers == ['A2V', 'A2V'] def test_simple_loop(self): X = lltype.GcStruct('X', ('foo', lltype.Signed)) @@ -248,7 +276,7 @@ res = self.interpret(f1, [x, 5]) assert res == 0 # for now we get this. Later, we could probably optimize it - assert self.barriers == ['A2W', 'a2w', 'a2w', 'a2w', 'a2w'] + assert self.barriers == ['A2V', 'a2v', 'a2v', 'a2v', 'a2v'] def test_subclassing(self): class X: @@ -292,12 +320,12 @@ y = make_y(i) external_any_gcobj() prev = y.ybar # a2r - handle(y) # inside handle(): a2r, r2w + handle(y) # inside handle(): a2r, r2v return prev + y.ybar # q2r res = self.interpret(f1, [10]) assert res == 21 - assert self.barriers == ['a2r', 'a2r', 'r2w', 'q2r'] + assert self.barriers == ['a2r', 'a2r', 'r2v', 'q2r'] def test_subclassing_2(self): class X: @@ -317,12 +345,12 @@ y = Y(); y.foo = -13; y.ybar = i external_any_gcobj() prev = x.foo # a2r - handle(y) # inside handle(): a2r, r2w + handle(y) # inside handle(): a2r, r2v return prev + x.foo # q2r res = self.interpret(f1, [10]) assert res == 84 - assert self.barriers == ['a2r', 'a2r', 'r2w', 'q2r'] + assert self.barriers == ['a2r', 'a2r', 'r2v', 'q2r'] def test_subclassing_gcref(self): Y = lltype.GcStruct('Y', ('foo', lltype.Signed), @@ -340,12 +368,12 @@ x = lltype.cast_opaque_ptr(llmemory.GCREF, y) external_any_gcobj() prev = lltype.cast_opaque_ptr(YPTR, x).foo # a2r - handle(y) # inside handle(): a2r, r2w + handle(y) # inside handle(): a2r, r2v return prev + lltype.cast_opaque_ptr(YPTR, x).ybar # q2r? res = self.interpret(f1, [10]) assert res == 42 + 11 - assert self.barriers == ['a2r', 'a2r', 'r2w', 'a2r'] + assert self.barriers == ['a2r', 'a2r', 'r2v', 'a2r'] # Ideally we should get [... 'q2r'] but getting 'a2r' is not wrong # either. This is because from a GCREF the only thing we can do is # cast_opaque_ptr, which is not special-cased in writebarrier.py. @@ -354,10 +382,12 @@ class X: pass x = X() + x2 = X() + x3 = X() def f1(i): - x.a = i # write barrier + x.a = x2 # write barrier y = X() # malloc - x.a += 1 # write barrier again + x.a = x3 # write barrier again return y res = self.interpret(f1, [10]) @@ -375,7 +405,7 @@ res = self.interpret(f1, [4]) assert res == 4 - assert self.barriers == ['a2w', 'a2i'] + assert self.barriers == ['a2v', 'a2i'] def test_read_immutable_prebuilt(self): class Foo: diff --git a/rpython/translator/stm/test/transform_support.py b/rpython/translator/stm/test/transform_support.py --- a/rpython/translator/stm/test/transform_support.py +++ b/rpython/translator/stm/test/transform_support.py @@ -93,7 +93,7 @@ else: # a barrier, calling a helper ptr2 = _stmptr(obj, to) - if to == 'W': + if to >= 'V': self.llinterpreter.tester.writemode.add(ptr2._obj) self.llinterpreter.tester.barriers.append(kind) return ptr2 @@ -115,7 +115,11 @@ def op_setfield(self, obj, fieldname, fieldvalue): if obj._TYPE.TO._gckind == 'gc': - self.check_category(obj, 'W') + T = lltype.typeOf(fieldvalue) + if isinstance(T, lltype.Ptr) and T.TO._gckind == 'gc': + self.check_category(obj, 'W') + else: + self.check_category(obj, 'V') # convert R -> Q all other pointers to the same object we can find for p in self.all_stm_ptrs(): if p._category == 'R' and p._T == obj._T and p == obj: diff --git a/rpython/translator/stm/writebarrier.py b/rpython/translator/stm/writebarrier.py --- a/rpython/translator/stm/writebarrier.py +++ b/rpython/translator/stm/writebarrier.py @@ -111,7 +111,13 @@ op.args[-1].concretetype is not lltype.Void and op.args[0].concretetype.TO._gckind == 'gc'): # setfields need a regular write barrier - wants_a_barrier[op] = 'W' + T = op.args[-1].concretetype + if isinstance(T, lltype.Ptr) and T.TO._gckind == 'gc': + wants_a_barrier[op] = 'W' + else: + # a write of a non-gc pointer doesn't need to check for + # the GCFLAG_WRITEBARRIER + wants_a_barrier[op] = 'V' elif (op.opname in ('ptr_eq', 'ptr_ne') and op.args[0].concretetype.TO._gckind == 'gc'): @@ -186,8 +192,6 @@ for v, cat in category.items(): if cat == 'W': category[v] = 'V' - # XXX the V2W barrier is only necessary when we're - # writing pointers, not if we're writing ints effectinfo = stmtransformer.write_analyzer.analyze( op, graphinfo=graphinfo) From noreply at buildbot.pypy.org Thu Aug 22 14:40:23 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 22 Aug 2013 14:40:23 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-static-barrier: No-op: refactoring Message-ID: <20130822124023.A530B1C074B@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-static-barrier Changeset: r66287:8093f92384b5 Date: 2013-08-22 14:39 +0200 http://bitbucket.org/pypy/pypy/changeset/8093f92384b5/ Log: No-op: refactoring diff --git a/rpython/translator/stm/writebarrier.py b/rpython/translator/stm/writebarrier.py --- a/rpython/translator/stm/writebarrier.py +++ b/rpython/translator/stm/writebarrier.py @@ -1,4 +1,5 @@ from rpython.flowspace.model import SpaceOperation, Constant, Variable +from rpython.flowspace.model import mkentrymap from rpython.translator.unsimplify import varoftype, insert_empty_block from rpython.rtyper.lltypesystem import lltype from rpython.translator.backendopt.writeanalyze import top_set @@ -37,52 +38,23 @@ return to > frm -def insert_stm_barrier(stmtransformer, graph): - """This function uses the following characters for 'categories': +class BlockTransformer(object): - * 'A': any general pointer - * 'I': not a stub (immut_read_barrier was applied) - * 'Q': same as R, except needs a repeat_read_barrier - * 'R': the read barrier was applied - * 'V': same as W, except needs a repeat_write_barrier - * 'W': the write barrier was applied + def __init__(self, stmtransformer, block, entrylinks): + self.stmtransformer = stmtransformer + self.block = block + self.inputargs_category = {} + self.patch = None + for link in entrylinks: + self.inputargs_category[link] = ['A'] * len(link.args) - The letters are chosen so that a barrier is needed to change a - pointer from category x to category y if and only if y > x. - """ - graphinfo = stmtransformer.write_analyzer.compute_graph_info(graph) - gcremovetypeptr = ( - stmtransformer.translator.config.translation.gcremovetypeptr) - def get_category(v): - if isinstance(v, Constant): - default = 'I' # prebuilt objects cannot be stubs - else: - default = 'A' - return category.get(v, default) - - def get_category_or_null(v): - if isinstance(v, Constant) and not v.value: - return None - return category.get(v, 'A') - - def renamings_get(v): - if v not in renamings: - return v - v2 = renamings[v][0] - if v2.concretetype == v.concretetype: - return v2 - v3 = varoftype(v.concretetype) - newoperations.append(SpaceOperation('cast_pointer', [v2], v3)) - return v3 - - for block in graph.iterblocks(): - if block.operations == (): - continue - # + def analyze_inside_block(self): + gcremovetypeptr = ( + self.stmtransformer.translator.config.translation.gcremovetypeptr) wants_a_barrier = {} expand_comparison = set() - for op in block.operations: + for op in self.block.operations: is_getter = (op.opname in ('getfield', 'getarrayitem', 'getinteriorfield') and op.result.concretetype is not lltype.Void and @@ -124,112 +96,192 @@ # GC pointer comparison might need special care expand_comparison.add(op) # - if wants_a_barrier or expand_comparison: - # note: 'renamings' maps old vars to new vars, but cast_pointers - # are done lazily. It means that the two vars may not have - # exactly the same type. - renamings = {} # {original-var: [var-in-newoperations] (len 1)} - category = {} # {var-in-newoperations: LETTER} + self.wants_a_barrier = wants_a_barrier + self.expand_comparison = expand_comparison + return bool(wants_a_barrier or expand_comparison) + + + def flow_through_block(self, graphinfo): + + def get_category(v): + if isinstance(v, Constant): + default = 'I' # prebuilt objects cannot be stubs + else: + default = 'A' + return category.get(v, default) + + def get_category_or_null(v): + if isinstance(v, Constant) and not v.value: + return None + return category.get(v, 'A') + + def renamings_get(v): + if v not in renamings: + return v + v2 = renamings[v][0] + if v2.concretetype == v.concretetype: + return v2 + v3 = varoftype(v.concretetype) + newoperations.append(SpaceOperation('cast_pointer', [v2], v3)) + return v3 + + # note: 'renamings' maps old vars to new vars, but cast_pointers + # are done lazily. It means that the two vars may not have + # exactly the same type. + renamings = {} # {original-var: [var-in-newoperations] (len 1)} + category = {} # {var-in-newoperations: LETTER} + newoperations = [] + stmtransformer = self.stmtransformer + + for op in self.block.operations: + # + if op.opname == 'cast_pointer': + v = op.args[0] + renamings[op.result] = renamings.setdefault(v, [v]) + continue + # + to = self.wants_a_barrier.get(op) + if to is not None: + v = op.args[0] + v_holder = renamings.setdefault(v, [v]) + v = v_holder[0] + frm = get_category(v) + if needs_barrier(frm, to): + try: + b = stmtransformer.barrier_counts[frm, to] + except KeyError: + c_info = Constant('%s2%s' % (frm, to), lltype.Void) + b = [0, c_info] + stmtransformer.barrier_counts[frm, to] = b + b[0] += 1 + c_info = b[1] + w = varoftype(v.concretetype) + newop = SpaceOperation('stm_barrier', [c_info, v], w) + newoperations.append(newop) + v_holder[0] = w + category[w] = to + # + newop = SpaceOperation(op.opname, + [renamings_get(v) for v in op.args], + op.result) + newoperations.append(newop) + # + if op in self.expand_comparison: + cats = (get_category_or_null(newop.args[0]), + get_category_or_null(newop.args[1])) + if None not in cats and (cats[0] < 'V' or cats[1] < 'V'): + if newop.opname == 'ptr_ne': + v = varoftype(lltype.Bool) + negop = SpaceOperation('bool_not', [v], + newop.result) + newoperations.append(negop) + newop.result = v + newop.opname = 'stm_ptr_eq' + + if stmtransformer.break_analyzer.analyze(op): + # this operation can perform a transaction break: + # all pointers are lowered to 'I', because a non- + # stub cannot suddenly point to a stub, but we + # cannot guarantee anything more + for v, cat in category.items(): + if cat > 'I': + category[v] = 'I' + + if stmtransformer.collect_analyzer.analyze(op): + # this operation can collect: we bring all 'W' + # categories back to 'V', because we would need + # a repeat_write_barrier on them afterwards + for v, cat in category.items(): + if cat == 'W': + category[v] = 'V' + + effectinfo = stmtransformer.write_analyzer.analyze( + op, graphinfo=graphinfo) + if effectinfo: + if effectinfo is top_set: + # this operation can perform random writes: any + # 'R'-category object falls back to 'Q' because + # we would need a repeat_read_barrier() + for v, cat in category.items(): + if cat == 'R': + category[v] = 'Q' + else: + # the same, but only on objects of the right types + # -- we need to consider 'types' or any base type + types = set() + for entry in effectinfo: + TYPE = entry[1].TO + while TYPE is not None: + types.add(TYPE) + if not isinstance(TYPE, lltype.Struct): + break + _, TYPE = TYPE._first_struct() + for v in category.keys(): + if (v.concretetype.TO in types and + category[v] == 'R'): + category[v] = 'Q' + + if op.opname in MALLOCS: + category[op.result] = 'W' + + blockoperations = newoperations + linkoperations = [] + for link in self.block.exits: newoperations = [] - for op in block.operations: - # - if op.opname == 'cast_pointer': - v = op.args[0] - renamings[op.result] = renamings.setdefault(v, [v]) - continue - # - to = wants_a_barrier.get(op) - if to is not None: - v = op.args[0] - v_holder = renamings.setdefault(v, [v]) - v = v_holder[0] - frm = get_category(v) - if needs_barrier(frm, to): - try: - b = stmtransformer.barrier_counts[frm, to] - except KeyError: - c_info = Constant('%s2%s' % (frm, to), lltype.Void) - b = [0, c_info] - stmtransformer.barrier_counts[frm, to] = b - b[0] += 1 - c_info = b[1] - w = varoftype(v.concretetype) - newop = SpaceOperation('stm_barrier', [c_info, v], w) - newoperations.append(newop) - v_holder[0] = w - category[w] = to - # - newop = SpaceOperation(op.opname, - [renamings_get(v) for v in op.args], - op.result) - newoperations.append(newop) - # - if op in expand_comparison: - cats = (get_category_or_null(newop.args[0]), - get_category_or_null(newop.args[1])) - if None not in cats and (cats[0] < 'V' or cats[1] < 'V'): - if newop.opname == 'ptr_ne': - v = varoftype(lltype.Bool) - negop = SpaceOperation('bool_not', [v], - newop.result) - newoperations.append(negop) - newop.result = v - newop.opname = 'stm_ptr_eq' + newargs = [renamings_get(v) for v in link.args] + linkoperations.append((newargs, newoperations)) + # + # Record how we'd like to patch the block, but don't do any + # patching yet + self.patch = (blockoperations, linkoperations) - if stmtransformer.break_analyzer.analyze(op): - # this operation can perform a transaction break: - # all pointers are lowered to 'I', because a non- - # stub cannot suddenly point to a stub, but we - # cannot guarantee anything more - for v, cat in category.items(): - if cat > 'I': - category[v] = 'I' - if stmtransformer.collect_analyzer.analyze(op): - # this operation can collect: we bring all 'W' - # categories back to 'V', because we would need - # a repeat_write_barrier on them afterwards - for v, cat in category.items(): - if cat == 'W': - category[v] = 'V' + def patch_now(self): + if self.patch is None: + return + newoperations, linkoperations = self.patch + self.block.operations = newoperations + assert len(linkoperations) == len(self.block.exits) + for link, (newargs, newoperations) in zip(self.block.exits, + linkoperations): + link.args[:] = newargs + if newoperations: + # must put them in a fresh block along the link + annotator = self.stmtransformer.translator.annotator + newblock = insert_empty_block(annotator, link, + newoperations) - effectinfo = stmtransformer.write_analyzer.analyze( - op, graphinfo=graphinfo) - if effectinfo: - if effectinfo is top_set: - # this operation can perform random writes: any - # 'R'-category object falls back to 'Q' because - # we would need a repeat_read_barrier() - for v, cat in category.items(): - if cat == 'R': - category[v] = 'Q' - else: - # the same, but only on objects of the right types - # -- we need to consider 'types' or any base type - types = set() - for entry in effectinfo: - TYPE = entry[1].TO - while TYPE is not None: - types.add(TYPE) - if not isinstance(TYPE, lltype.Struct): - break - _, TYPE = TYPE._first_struct() - for v in category.keys(): - if (v.concretetype.TO in types and - category[v] == 'R'): - category[v] = 'Q' - if op.opname in MALLOCS: - category[op.result] = 'W' +def insert_stm_barrier(stmtransformer, graph): + """This function uses the following characters for 'categories': - block.operations = newoperations - # - for link in block.exits: - newoperations = [] - for i, v in enumerate(link.args): - link.args[i] = renamings_get(v) - if newoperations: - # must put them in a fresh block along the link - annotator = stmtransformer.translator.annotator - newblock = insert_empty_block(annotator, link, - newoperations) + * 'A': any general pointer + * 'I': not a stub (immut_read_barrier was applied) + * 'Q': same as R, except needs a repeat_read_barrier + * 'R': the read barrier was applied + * 'V': same as W, except needs a repeat_write_barrier + * 'W': the write barrier was applied + + The letters are chosen so that a barrier is needed to change a + pointer from category x to category y if and only if y > x. + """ + graphinfo = stmtransformer.write_analyzer.compute_graph_info(graph) + + block_transformers = {} + entrymap = mkentrymap(graph) + pending = set() + + for block in graph.iterblocks(): + if block.operations == (): + continue + bt = BlockTransformer(stmtransformer, block, entrymap[block]) + if bt.analyze_inside_block(): + pending.add(bt) + block_transformers[block] = bt + + while pending: + bt = pending.pop() + bt.flow_through_block(graphinfo) + + for bt in block_transformers.values(): + bt.patch_now() From noreply at buildbot.pypy.org Thu Aug 22 17:00:51 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 22 Aug 2013 17:00:51 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-static-barrier: Do some whole-graph analysis. Message-ID: <20130822150051.6DBA21C0F1B@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-static-barrier Changeset: r66288:7a6758a0e339 Date: 2013-08-22 16:56 +0200 http://bitbucket.org/pypy/pypy/changeset/7a6758a0e339/ Log: Do some whole-graph analysis. diff --git a/rpython/rtyper/lltypesystem/lloperation.py b/rpython/rtyper/lltypesystem/lloperation.py --- a/rpython/rtyper/lltypesystem/lloperation.py +++ b/rpython/rtyper/lltypesystem/lloperation.py @@ -624,6 +624,7 @@ 'debug_reraise_traceback': LLOp(), 'debug_print_traceback': LLOp(), 'debug_nonnull_pointer': LLOp(canrun=True), + 'debug_stm_flush_barrier': LLOp(canrun=True), # __________ instrumentation _________ 'instrument_count': LLOp(), diff --git a/rpython/rtyper/lltypesystem/opimpl.py b/rpython/rtyper/lltypesystem/opimpl.py --- a/rpython/rtyper/lltypesystem/opimpl.py +++ b/rpython/rtyper/lltypesystem/opimpl.py @@ -673,6 +673,9 @@ def op_nop(x): pass +def op_debug_stm_flush_barrier(): + pass + # ____________________________________________________________ def get_op_impl(opname): diff --git a/rpython/translator/stm/test/test_writebarrier.py b/rpython/translator/stm/test/test_writebarrier.py --- a/rpython/translator/stm/test/test_writebarrier.py +++ b/rpython/translator/stm/test/test_writebarrier.py @@ -1,5 +1,6 @@ from rpython.rlib.rstm import register_invoke_around_extcall from rpython.rtyper.lltypesystem import lltype, llmemory, rffi +from rpython.rtyper.lltypesystem.lloperation import llop from rpython.translator.stm.test.transform_support import BaseTestTransform @@ -294,16 +295,15 @@ x = Z() x.foo = 815 x.zbar = 'A' - external_any_gcobj() + llop.debug_stm_flush_barrier(lltype.Void) result = x.foo # 1 if isinstance(x, Y): # 2 - result += x.ybar # 3 + result += x.ybar # 3: optimized return result res = self.interpret(f1, [10]) assert res == 42 + 10 - assert self.barriers == ['a2r', 'a2i', 'a2r'] # from 3 blocks (could be - # optimized later) + assert self.barriers == ['a2r', 'a2i'] res = self.interpret(f1, [-10]) assert res == 815 assert self.barriers == ['a2r', 'a2i'] @@ -318,7 +318,7 @@ return y def f1(i): y = make_y(i) - external_any_gcobj() + llop.debug_stm_flush_barrier(lltype.Void) prev = y.ybar # a2r handle(y) # inside handle(): a2r, r2v return prev + y.ybar # q2r @@ -343,7 +343,7 @@ else: x = Z(); x.foo = 815; x.zbar = 'A' y = Y(); y.foo = -13; y.ybar = i - external_any_gcobj() + llop.debug_stm_flush_barrier(lltype.Void) prev = x.foo # a2r handle(y) # inside handle(): a2r, r2v return prev + x.foo # q2r @@ -366,7 +366,7 @@ else: y = lltype.nullptr(Y) x = lltype.cast_opaque_ptr(llmemory.GCREF, y) - external_any_gcobj() + llop.debug_stm_flush_barrier(lltype.Void) prev = lltype.cast_opaque_ptr(YPTR, x).foo # a2r handle(y) # inside handle(): a2r, r2v return prev + lltype.cast_opaque_ptr(YPTR, x).ybar # q2r? @@ -387,7 +387,7 @@ def f1(i): x.a = x2 # write barrier y = X() # malloc - x.a = x3 # write barrier again + x.a = x3 # repeat write barrier return y res = self.interpret(f1, [10]) @@ -399,8 +399,10 @@ def f1(n): x = Foo() + llop.debug_stm_flush_barrier(lltype.Void) if n > 1: x.foo = n + llop.debug_stm_flush_barrier(lltype.Void) return x.foo res = self.interpret(f1, [4]) diff --git a/rpython/translator/stm/test/transform_support.py b/rpython/translator/stm/test/transform_support.py --- a/rpython/translator/stm/test/transform_support.py +++ b/rpython/translator/stm/test/transform_support.py @@ -77,7 +77,7 @@ def check_category(self, p, expected): cat = self.get_category_or_null(p) - assert cat in 'AIQRVW' or cat is None + assert cat is None or cat in 'AIQRVW' if expected is not None: assert cat is not None and cat >= expected return cat diff --git a/rpython/translator/stm/writebarrier.py b/rpython/translator/stm/writebarrier.py --- a/rpython/translator/stm/writebarrier.py +++ b/rpython/translator/stm/writebarrier.py @@ -1,6 +1,6 @@ from rpython.flowspace.model import SpaceOperation, Constant, Variable -from rpython.flowspace.model import mkentrymap from rpython.translator.unsimplify import varoftype, insert_empty_block +from rpython.translator.unsimplify import insert_empty_startblock from rpython.rtyper.lltypesystem import lltype from rpython.translator.backendopt.writeanalyze import top_set @@ -38,15 +38,21 @@ return to > frm +class Renaming(object): + def __init__(self, newvar, category): + self.newvar = newvar # a Variable or a Constant + self.TYPE = newvar.concretetype + self.category = category + + class BlockTransformer(object): - def __init__(self, stmtransformer, block, entrylinks): + def __init__(self, stmtransformer, block): self.stmtransformer = stmtransformer self.block = block - self.inputargs_category = {} self.patch = None - for link in entrylinks: - self.inputargs_category[link] = ['A'] * len(link.args) + self.inputargs_category = [None] * len(block.inputargs) + self.inputargs_category_per_link = {} def analyze_inside_block(self): @@ -98,54 +104,71 @@ # self.wants_a_barrier = wants_a_barrier self.expand_comparison = expand_comparison - return bool(wants_a_barrier or expand_comparison) def flow_through_block(self, graphinfo): - def get_category(v): - if isinstance(v, Constant): - default = 'I' # prebuilt objects cannot be stubs - else: - default = 'A' - return category.get(v, default) + def renfetch(v): + try: + return renamings[v] + except KeyError: + if isinstance(v, Variable): + ren = Renaming(v, 'A') + else: + ren = Renaming(v, 'I') # prebuilt objects cannot be stubs + renamings[v] = ren + return ren def get_category_or_null(v): - if isinstance(v, Constant) and not v.value: + # 'v' is an original variable here, or a constant + if isinstance(v, Constant) and not v.value: # a NULL constant return None - return category.get(v, 'A') + if v in renamings: + return renamings[v].category + if isinstance(v, Constant): + return 'I' + else: + return 'A' def renamings_get(v): - if v not in renamings: - return v - v2 = renamings[v][0] + try: + ren = renamings[v] + except KeyError: + return v # unmodified + v2 = ren.newvar if v2.concretetype == v.concretetype: return v2 v3 = varoftype(v.concretetype) newoperations.append(SpaceOperation('cast_pointer', [v2], v3)) + if lltype.castable(ren.TYPE, v3.concretetype) > 0: + ren.TYPE = v3.concretetype return v3 # note: 'renamings' maps old vars to new vars, but cast_pointers # are done lazily. It means that the two vars may not have # exactly the same type. - renamings = {} # {original-var: [var-in-newoperations] (len 1)} - category = {} # {var-in-newoperations: LETTER} + renamings = {} # {original-var: Renaming(newvar, category)} newoperations = [] stmtransformer = self.stmtransformer + # make the initial trivial renamings needed to have some precise + # categories for the input args + for v, cat in zip(self.block.inputargs, self.inputargs_category): + if (cat is not None and + isinstance(v.concretetype, lltype.Ptr) and + v.concretetype.TO._gckind == 'gc'): + renamings[v] = Renaming(v, cat) + for op in self.block.operations: # - if op.opname == 'cast_pointer': - v = op.args[0] - renamings[op.result] = renamings.setdefault(v, [v]) + if op.opname in ('cast_pointer', 'same_as'): + renamings[op.result] = renfetch(op.args[0]) continue # to = self.wants_a_barrier.get(op) if to is not None: - v = op.args[0] - v_holder = renamings.setdefault(v, [v]) - v = v_holder[0] - frm = get_category(v) + ren = renfetch(op.args[0]) + frm = ren.category if needs_barrier(frm, to): try: b = stmtransformer.barrier_counts[frm, to] @@ -155,11 +178,12 @@ stmtransformer.barrier_counts[frm, to] = b b[0] += 1 c_info = b[1] + v = ren.newvar w = varoftype(v.concretetype) newop = SpaceOperation('stm_barrier', [c_info, v], w) newoperations.append(newop) - v_holder[0] = w - category[w] = to + ren.newvar = w + ren.category = to # newop = SpaceOperation(op.opname, [renamings_get(v) for v in op.args], @@ -167,8 +191,8 @@ newoperations.append(newop) # if op in self.expand_comparison: - cats = (get_category_or_null(newop.args[0]), - get_category_or_null(newop.args[1])) + cats = (get_category_or_null(op.args[0]), + get_category_or_null(op.args[1])) if None not in cats and (cats[0] < 'V' or cats[1] < 'V'): if newop.opname == 'ptr_ne': v = varoftype(lltype.Bool) @@ -183,17 +207,21 @@ # all pointers are lowered to 'I', because a non- # stub cannot suddenly point to a stub, but we # cannot guarantee anything more - for v, cat in category.items(): - if cat > 'I': - category[v] = 'I' + for ren in renamings.values(): + if ren.category > 'I': + ren.category = 'I' + + if op.opname == 'debug_stm_flush_barrier': + for ren in renamings.values(): + ren.category = 'A' if stmtransformer.collect_analyzer.analyze(op): # this operation can collect: we bring all 'W' # categories back to 'V', because we would need # a repeat_write_barrier on them afterwards - for v, cat in category.items(): - if cat == 'W': - category[v] = 'V' + for ren in renamings.values(): + if ren.category == 'W': + ren.category = 'V' effectinfo = stmtransformer.write_analyzer.analyze( op, graphinfo=graphinfo) @@ -202,9 +230,9 @@ # this operation can perform random writes: any # 'R'-category object falls back to 'Q' because # we would need a repeat_read_barrier() - for v, cat in category.items(): - if cat == 'R': - category[v] = 'Q' + for ren in renamings.values(): + if ren.category == 'R': + ren.category = 'Q' else: # the same, but only on objects of the right types # -- we need to consider 'types' or any base type @@ -216,34 +244,82 @@ if not isinstance(TYPE, lltype.Struct): break _, TYPE = TYPE._first_struct() - for v in category.keys(): - if (v.concretetype.TO in types and - category[v] == 'R'): - category[v] = 'Q' + for ren in renamings.values(): + if ren.TYPE.TO in types and ren.category == 'R': + ren.category = 'Q' if op.opname in MALLOCS: - category[op.result] = 'W' + assert op.result not in renamings + renamings[op.result] = Renaming(op.result, 'W') + if isinstance(self.block.exitswitch, Variable): + switchv = renamings_get(self.block.exitswitch) + else: + switchv = None blockoperations = newoperations linkoperations = [] for link in self.block.exits: + output_categories = [] + for v in link.args: + if (isinstance(v.concretetype, lltype.Ptr) and + v.concretetype.TO._gckind == 'gc'): + cat = get_category_or_null(v) + else: + cat = None + output_categories.append(cat) newoperations = [] newargs = [renamings_get(v) for v in link.args] - linkoperations.append((newargs, newoperations)) + linkoperations.append((newargs, newoperations, output_categories)) # # Record how we'd like to patch the block, but don't do any # patching yet - self.patch = (blockoperations, linkoperations) + self.patch = (blockoperations, switchv, linkoperations) + + + def update_targets(self, block_transformers): + (_, _, linkoperations) = self.patch + assert len(linkoperations) == len(self.block.exits) + targetbts = [] + for link, (_, _, output_categories) in zip(self.block.exits, + linkoperations): + targetblock = link.target + if targetblock not in block_transformers: + continue # ignore the exit block + targetbt = block_transformers[targetblock] + targetbt.inputargs_category_per_link[link] = output_categories + if targetbt.update_inputargs_category(): + targetbts.append(targetbt) + return set(targetbts) + + def update_inputargs_category(self): + values = self.inputargs_category_per_link.values() + newcats = [] + for i in range(len(self.block.inputargs)): + cat = None + for output_categories in values: + cat2 = output_categories[i] + if cat is None: + cat = cat2 + elif cat2 is not None: + cat = min(cat, cat2) + newcats.append(cat) + if newcats != self.inputargs_category: + self.inputargs_category = newcats + return True + else: + return False def patch_now(self): if self.patch is None: return - newoperations, linkoperations = self.patch + newoperations, switchv, linkoperations = self.patch self.block.operations = newoperations + if switchv is not None: + self.block.exitswitch = switchv assert len(linkoperations) == len(self.block.exits) - for link, (newargs, newoperations) in zip(self.block.exits, - linkoperations): + for link, (newargs, newoperations, _) in zip(self.block.exits, + linkoperations): link.args[:] = newargs if newoperations: # must put them in a fresh block along the link @@ -266,22 +342,24 @@ pointer from category x to category y if and only if y > x. """ graphinfo = stmtransformer.write_analyzer.compute_graph_info(graph) + annotator = stmtransformer.translator.annotator + insert_empty_startblock(annotator, graph) block_transformers = {} - entrymap = mkentrymap(graph) pending = set() for block in graph.iterblocks(): if block.operations == (): continue - bt = BlockTransformer(stmtransformer, block, entrymap[block]) - if bt.analyze_inside_block(): - pending.add(bt) + bt = BlockTransformer(stmtransformer, block) + bt.analyze_inside_block() block_transformers[block] = bt + pending.add(bt) while pending: bt = pending.pop() bt.flow_through_block(graphinfo) + pending |= bt.update_targets(block_transformers) for bt in block_transformers.values(): bt.patch_now() From noreply at buildbot.pypy.org Thu Aug 22 17:00:52 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 22 Aug 2013 17:00:52 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-static-barrier: Fixes Message-ID: <20130822150052.C962A1C10DD@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-static-barrier Changeset: r66289:1db160b42644 Date: 2013-08-22 17:00 +0200 http://bitbucket.org/pypy/pypy/changeset/1db160b42644/ Log: Fixes diff --git a/rpython/translator/stm/writebarrier.py b/rpython/translator/stm/writebarrier.py --- a/rpython/translator/stm/writebarrier.py +++ b/rpython/translator/stm/writebarrier.py @@ -37,6 +37,9 @@ def needs_barrier(frm, to): return to > frm +def is_gc_ptr(T): + return isinstance(T, lltype.Ptr) and T.TO._gckind == 'gc' + class Renaming(object): def __init__(self, newvar, category): @@ -64,7 +67,7 @@ is_getter = (op.opname in ('getfield', 'getarrayitem', 'getinteriorfield') and op.result.concretetype is not lltype.Void and - op.args[0].concretetype.TO._gckind == 'gc') + is_gc_ptr(op.args[0].concretetype)) if (gcremovetypeptr and op.opname in ('getfield', 'setfield') and op.args[1].value == 'typeptr' and @@ -87,10 +90,10 @@ elif (op.opname in ('setfield', 'setarrayitem', 'setinteriorfield') and op.args[-1].concretetype is not lltype.Void and - op.args[0].concretetype.TO._gckind == 'gc'): + is_gc_ptr(op.args[0].concretetype)): # setfields need a regular write barrier T = op.args[-1].concretetype - if isinstance(T, lltype.Ptr) and T.TO._gckind == 'gc': + if is_gc_ptr(T): wants_a_barrier[op] = 'W' else: # a write of a non-gc pointer doesn't need to check for @@ -98,7 +101,7 @@ wants_a_barrier[op] = 'V' elif (op.opname in ('ptr_eq', 'ptr_ne') and - op.args[0].concretetype.TO._gckind == 'gc'): + is_gc_ptr(op.args[0].concretetype)): # GC pointer comparison might need special care expand_comparison.add(op) # @@ -154,14 +157,13 @@ # make the initial trivial renamings needed to have some precise # categories for the input args for v, cat in zip(self.block.inputargs, self.inputargs_category): - if (cat is not None and - isinstance(v.concretetype, lltype.Ptr) and - v.concretetype.TO._gckind == 'gc'): + if cat is not None and is_gc_ptr(v.concretetype): renamings[v] = Renaming(v, cat) for op in self.block.operations: # - if op.opname in ('cast_pointer', 'same_as'): + if (op.opname in ('cast_pointer', 'same_as') and + is_gc_ptr(op.result.concretetype)): renamings[op.result] = renfetch(op.args[0]) continue # @@ -261,8 +263,7 @@ for link in self.block.exits: output_categories = [] for v in link.args: - if (isinstance(v.concretetype, lltype.Ptr) and - v.concretetype.TO._gckind == 'gc'): + if is_gc_ptr(v.concretetype): cat = get_category_or_null(v) else: cat = None From noreply at buildbot.pypy.org Thu Aug 22 17:04:04 2013 From: noreply at buildbot.pypy.org (rguillebert) Date: Thu, 22 Aug 2013 17:04:04 +0200 (CEST) Subject: [pypy-commit] pypy default: Make numpy.character usable as a dtype (it's an alias for str) Message-ID: <20130822150404.D1D0C1C0F1B@cobra.cs.uni-duesseldorf.de> Author: Romain Guillebert Branch: Changeset: r66290:36f863c1be94 Date: 2013-08-22 17:03 +0200 http://bitbucket.org/pypy/pypy/changeset/36f863c1be94/ Log: Make numpy.character usable as a dtype (it's an alias for str) diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -685,7 +685,7 @@ name='string', char='S', w_box_type = space.gettypefor(interp_boxes.W_StringBox), - alternate_constructors=[space.w_str], + alternate_constructors=[space.w_str, space.gettypefor(interp_boxes.W_CharacterBox)], aliases=["str"], ) self.w_unicodedtype = W_Dtype( diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -781,7 +781,7 @@ def test_character_dtype(self): from numpypy import array, character x = array([["A", "B"], ["C", "D"]], character) - assert x == [["A", "B"], ["C", "D"]] + assert (x == [["A", "B"], ["C", "D"]]).all() class AppTestRecordDtypes(BaseNumpyAppTest): spaceconfig = dict(usemodules=["micronumpy", "struct", "binascii"]) From noreply at buildbot.pypy.org Thu Aug 22 18:40:19 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 22 Aug 2013 18:40:19 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-static-barrier: Hack to avoid the algorithm getting stuck (maybe) Message-ID: <20130822164019.AEEE21C074B@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-static-barrier Changeset: r66291:1ffdd0bbbb60 Date: 2013-08-22 18:39 +0200 http://bitbucket.org/pypy/pypy/changeset/1ffdd0bbbb60/ Log: Hack to avoid the algorithm getting stuck (maybe) diff --git a/rpython/translator/stm/test/test_writebarrier.py b/rpython/translator/stm/test/test_writebarrier.py --- a/rpython/translator/stm/test/test_writebarrier.py +++ b/rpython/translator/stm/test/test_writebarrier.py @@ -463,6 +463,32 @@ assert res == True assert self.barriers == [] + def test_infinite_loop_bug(self): + class A(object): + user_overridden_class = False + + def stuff(self): + return 12.3 + + def immutable_unique_id(self): + if self.user_overridden_class: + return None + from rpython.rlib.longlong2float import float2longlong + from rpython.rlib.rarithmetic import r_ulonglong + from rpython.rlib.rbigint import rbigint + real = self.stuff() + imag = self.stuff() + real_b = rbigint.fromrarith_int(float2longlong(real)) + imag_b = rbigint.fromrarith_int(r_ulonglong(float2longlong(imag))) + val = real_b.lshift(64).or_(imag_b).lshift(3) + return val + + def f(): + return A().immutable_unique_id() + + for i in range(10): + self.interpret(f, [], run=False) + external_release_gil = rffi.llexternal('external_release_gil', [], lltype.Void, _callable=lambda: None, diff --git a/rpython/translator/stm/test/transform_support.py b/rpython/translator/stm/test/transform_support.py --- a/rpython/translator/stm/test/transform_support.py +++ b/rpython/translator/stm/test/transform_support.py @@ -38,7 +38,7 @@ return 'I' # allocated with immortal=True raise AssertionError("unknown category on %r" % (p,)) - def interpret(self, fn, args, gcremovetypeptr=False): + def interpret(self, fn, args, gcremovetypeptr=False, run=True): self.build_state() clear_tcache() interp, self.graph = get_interpreter(fn, args, view=False) @@ -60,8 +60,9 @@ if self.do_jit_driver: import py py.test.skip("XXX how to test?") - result = interp.eval_graph(self.graph, args) - return result + if run: + result = interp.eval_graph(self.graph, args) + return result class LLSTMFrame(LLFrame): @@ -131,7 +132,7 @@ cat = self.check_category(obj, None) p = opimpl.op_cast_pointer(RESTYPE, obj) return _stmptr(p, cat) - return LLFrame.op_cast_pointer(self, RESTYPE, obj) + return lltype.cast_pointer(RESTYPE, obj) op_cast_pointer.need_result_type = True def op_cast_opaque_ptr(self, RESTYPE, obj): diff --git a/rpython/translator/stm/writebarrier.py b/rpython/translator/stm/writebarrier.py --- a/rpython/translator/stm/writebarrier.py +++ b/rpython/translator/stm/writebarrier.py @@ -358,7 +358,11 @@ pending.add(bt) while pending: - bt = pending.pop() + # XXX sadly, this seems to be order-dependent. Picking the minimum + # of the blocks seems to be necessary, too, to avoid the situation + # of two blocks chasing each other around a loop :-( + bt = min(pending) + pending.remove(bt) bt.flow_through_block(graphinfo) pending |= bt.update_targets(block_transformers) From noreply at buildbot.pypy.org Thu Aug 22 19:59:57 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 22 Aug 2013 19:59:57 +0200 (CEST) Subject: [pypy-commit] stmgc default: Conditionally compile counters for the slow- and fast-path of all Message-ID: <20130822175957.F41E41C12CC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r490:7b20231c8672 Date: 2013-08-22 19:59 +0200 http://bitbucket.org/pypy/stmgc/changeset/7b20231c8672/ Log: Conditionally compile counters for the slow- and fast-path of all barriers. diff --git a/c4/Makefile b/c4/Makefile --- a/c4/Makefile +++ b/c4/Makefile @@ -21,7 +21,7 @@ C_FILES = et.c lists.c steal.c nursery.c gcpage.c \ stmsync.c extra.c weakref.c dbgmem.c fprintcolor.c -DEBUG = -g -DGC_NURSERY=0x10000 -D_GC_DEBUG=1 -DDUMP_EXTRA=1 -D_GC_DEBUGPRINTS=1 +DEBUG = -g -DGC_NURSERY=0x10000 -D_GC_DEBUG=1 -DDUMP_EXTRA=1 -D_GC_DEBUGPRINTS=1 -DSTM_BARRIER_COUNT=1 # note that we don't say -DNDEBUG, so that asserts should still be compiled in diff --git a/c4/fprintcolor.c b/c4/fprintcolor.c --- a/c4/fprintcolor.c +++ b/c4/fprintcolor.c @@ -57,3 +57,25 @@ } #endif + + +#ifdef STM_BARRIER_COUNT +long stm_barriercount[STM_BARRIER_NUMBERS]; + +void stm_print_barrier_count(void) +{ + static char names[] = STM_BARRIER_NAMES; + char *p = names; + char *q; + int i; + dprintf(("** Summary of the barrier calls **\n")); + for (i = 0; i < STM_BARRIER_NUMBERS; i += 2) { + q = strchr(p, '\n'); + *q = '\0'; + dprintf(("%12ld %s\n", stm_barriercount[i], p)); + *q = '\n'; + dprintf(("%12ld \\ fast path\n", stm_barriercount[i + 1])); + p = q + 1; + } +} +#endif diff --git a/c4/fprintcolor.h b/c4/fprintcolor.h --- a/c4/fprintcolor.h +++ b/c4/fprintcolor.h @@ -20,3 +20,8 @@ #define dprintfcolor() 0 #endif + + +#ifdef STM_BARRIER_COUNT +void stm_print_barrier_count(void); +#endif diff --git a/c4/stmgc.h b/c4/stmgc.h --- a/c4/stmgc.h +++ b/c4/stmgc.h @@ -196,38 +196,52 @@ #define UNLIKELY(test) __builtin_expect(test, 0) +#ifdef STM_BARRIER_COUNT +# define STM_BARRIER_NUMBERS 12 +# define STM_BARRIER_NAMES "stm_read_barrier\n" \ + "stm_write_barrier\n" \ + "stm_repeat_read_barrier\n" \ + "stm_immut_read_barrier\n" \ + "stm_repeat_write_barrier\n" \ + "stm_write_barrier_noptr\n" +# define STM_COUNT(id, x) (stm_barriercount[id]++, x) +extern long stm_barriercount[STM_BARRIER_NUMBERS]; +#else +# define STM_COUNT(id, x) (x) +#endif + #define stm_read_barrier(obj) \ (UNLIKELY(((obj)->h_revision != stm_private_rev_num) && \ (FXCACHE_AT(obj) != (obj))) ? \ - stm_DirectReadBarrier(obj) \ - : (obj)) + STM_COUNT(0, stm_DirectReadBarrier(obj)) \ + : STM_COUNT(1, obj)) #define stm_write_barrier(obj) \ (UNLIKELY(((obj)->h_revision != stm_private_rev_num) || \ (((obj)->h_tid & GCFLAG_WRITE_BARRIER) != 0)) ? \ - stm_WriteBarrier(obj) \ - : (obj)) + STM_COUNT(2, stm_WriteBarrier(obj)) \ + : STM_COUNT(3, obj)) #define stm_repeat_read_barrier(obj) \ (UNLIKELY(((obj)->h_tid & (GCFLAG_PUBLIC_TO_PRIVATE | \ GCFLAG_MOVED)) != 0) ? \ - stm_RepeatReadBarrier(obj) \ - : (obj)) + STM_COUNT(4, stm_RepeatReadBarrier(obj)) \ + : STM_COUNT(5, obj)) #define stm_immut_read_barrier(obj) \ (UNLIKELY(((obj)->h_tid & GCFLAG_STUB) != 0) ? \ - stm_ImmutReadBarrier(obj) \ - : (obj)) + STM_COUNT(6, stm_ImmutReadBarrier(obj)) \ + : STM_COUNT(7, obj)) #define stm_repeat_write_barrier(obj) \ (UNLIKELY(((obj)->h_tid & GCFLAG_WRITE_BARRIER) != 0) ? \ - stm_RepeatWriteBarrier(obj) \ - : (obj)) + STM_COUNT(8, stm_RepeatWriteBarrier(obj)) \ + : STM_COUNT(9, obj)) #define stm_write_barrier_noptr(obj) \ (UNLIKELY((obj)->h_revision != stm_private_rev_num) ? \ - stm_WriteBarrier(obj) \ - : (obj)) + STM_COUNT(10, stm_WriteBarrier(obj)) \ + : STM_COUNT(11, obj)) #endif diff --git a/c4/stmsync.c b/c4/stmsync.c --- a/c4/stmsync.c +++ b/c4/stmsync.c @@ -83,6 +83,13 @@ dprintf(("enter_callback_call(tok=%d)\n", token)); if (token == 1) { stmgcpage_acquire_global_lock(); +#ifdef STM_BARRIER_COUNT + static int seen = 0; + if (!seen) { + seen = 1; + atexit(&stm_print_barrier_count); + } +#endif DescriptorInit(); stmgc_init_nursery(); init_shadowstack(); From noreply at buildbot.pypy.org Thu Aug 22 20:01:01 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 22 Aug 2013 20:01:01 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-static-barrier: import stmgc/7b20231c8672 Message-ID: <20130822180101.3515A1C12CC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-static-barrier Changeset: r66292:30e2390132a9 Date: 2013-08-22 20:00 +0200 http://bitbucket.org/pypy/pypy/changeset/30e2390132a9/ Log: import stmgc/7b20231c8672 diff --git a/rpython/translator/stm/src_stm/fprintcolor.c b/rpython/translator/stm/src_stm/fprintcolor.c --- a/rpython/translator/stm/src_stm/fprintcolor.c +++ b/rpython/translator/stm/src_stm/fprintcolor.c @@ -58,3 +58,25 @@ } #endif + + +#ifdef STM_BARRIER_COUNT +long stm_barriercount[STM_BARRIER_NUMBERS]; + +void stm_print_barrier_count(void) +{ + static char names[] = STM_BARRIER_NAMES; + char *p = names; + char *q; + int i; + dprintf(("** Summary of the barrier calls **\n")); + for (i = 0; i < STM_BARRIER_NUMBERS; i += 2) { + q = strchr(p, '\n'); + *q = '\0'; + dprintf(("%12ld %s\n", stm_barriercount[i], p)); + *q = '\n'; + dprintf(("%12ld \\ fast path\n", stm_barriercount[i + 1])); + p = q + 1; + } +} +#endif diff --git a/rpython/translator/stm/src_stm/fprintcolor.h b/rpython/translator/stm/src_stm/fprintcolor.h --- a/rpython/translator/stm/src_stm/fprintcolor.h +++ b/rpython/translator/stm/src_stm/fprintcolor.h @@ -21,3 +21,8 @@ #define dprintfcolor() 0 #endif + + +#ifdef STM_BARRIER_COUNT +void stm_print_barrier_count(void); +#endif diff --git a/rpython/translator/stm/src_stm/revision b/rpython/translator/stm/src_stm/revision --- a/rpython/translator/stm/src_stm/revision +++ b/rpython/translator/stm/src_stm/revision @@ -1,1 +1,1 @@ -49c3e0a47ab4 +7b20231c8672 diff --git a/rpython/translator/stm/src_stm/stmgc.h b/rpython/translator/stm/src_stm/stmgc.h --- a/rpython/translator/stm/src_stm/stmgc.h +++ b/rpython/translator/stm/src_stm/stmgc.h @@ -197,38 +197,52 @@ #define UNLIKELY(test) __builtin_expect(test, 0) +#ifdef STM_BARRIER_COUNT +# define STM_BARRIER_NUMBERS 12 +# define STM_BARRIER_NAMES "stm_read_barrier\n" \ + "stm_write_barrier\n" \ + "stm_repeat_read_barrier\n" \ + "stm_immut_read_barrier\n" \ + "stm_repeat_write_barrier\n" \ + "stm_write_barrier_noptr\n" +# define STM_COUNT(id, x) (stm_barriercount[id]++, x) +extern long stm_barriercount[STM_BARRIER_NUMBERS]; +#else +# define STM_COUNT(id, x) (x) +#endif + #define stm_read_barrier(obj) \ (UNLIKELY(((obj)->h_revision != stm_private_rev_num) && \ (FXCACHE_AT(obj) != (obj))) ? \ - stm_DirectReadBarrier(obj) \ - : (obj)) + STM_COUNT(0, stm_DirectReadBarrier(obj)) \ + : STM_COUNT(1, obj)) #define stm_write_barrier(obj) \ (UNLIKELY(((obj)->h_revision != stm_private_rev_num) || \ (((obj)->h_tid & GCFLAG_WRITE_BARRIER) != 0)) ? \ - stm_WriteBarrier(obj) \ - : (obj)) + STM_COUNT(2, stm_WriteBarrier(obj)) \ + : STM_COUNT(3, obj)) #define stm_repeat_read_barrier(obj) \ (UNLIKELY(((obj)->h_tid & (GCFLAG_PUBLIC_TO_PRIVATE | \ GCFLAG_MOVED)) != 0) ? \ - stm_RepeatReadBarrier(obj) \ - : (obj)) + STM_COUNT(4, stm_RepeatReadBarrier(obj)) \ + : STM_COUNT(5, obj)) #define stm_immut_read_barrier(obj) \ (UNLIKELY(((obj)->h_tid & GCFLAG_STUB) != 0) ? \ - stm_ImmutReadBarrier(obj) \ - : (obj)) + STM_COUNT(6, stm_ImmutReadBarrier(obj)) \ + : STM_COUNT(7, obj)) #define stm_repeat_write_barrier(obj) \ (UNLIKELY(((obj)->h_tid & GCFLAG_WRITE_BARRIER) != 0) ? \ - stm_RepeatWriteBarrier(obj) \ - : (obj)) + STM_COUNT(8, stm_RepeatWriteBarrier(obj)) \ + : STM_COUNT(9, obj)) #define stm_write_barrier_noptr(obj) \ (UNLIKELY((obj)->h_revision != stm_private_rev_num) ? \ - stm_WriteBarrier(obj) \ - : (obj)) + STM_COUNT(10, stm_WriteBarrier(obj)) \ + : STM_COUNT(11, obj)) #endif diff --git a/rpython/translator/stm/src_stm/stmsync.c b/rpython/translator/stm/src_stm/stmsync.c --- a/rpython/translator/stm/src_stm/stmsync.c +++ b/rpython/translator/stm/src_stm/stmsync.c @@ -84,6 +84,13 @@ dprintf(("enter_callback_call(tok=%d)\n", token)); if (token == 1) { stmgcpage_acquire_global_lock(); +#ifdef STM_BARRIER_COUNT + static int seen = 0; + if (!seen) { + seen = 1; + atexit(&stm_print_barrier_count); + } +#endif DescriptorInit(); stmgc_init_nursery(); init_shadowstack(); From noreply at buildbot.pypy.org Thu Aug 22 20:08:04 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 22 Aug 2013 20:08:04 +0200 (CEST) Subject: [pypy-commit] stmgc default: Duh, the point is to always print the results if compiled in. Message-ID: <20130822180804.B3FB21C12CD@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r491:c7b63aa9d1ad Date: 2013-08-22 20:07 +0200 http://bitbucket.org/pypy/stmgc/changeset/c7b63aa9d1ad/ Log: Duh, the point is to always print the results if compiled in. diff --git a/c4/fprintcolor.c b/c4/fprintcolor.c --- a/c4/fprintcolor.c +++ b/c4/fprintcolor.c @@ -68,13 +68,13 @@ char *p = names; char *q; int i; - dprintf(("** Summary of the barrier calls **\n")); + fprintf(stderr, "** Summary of the barrier calls **\n"); for (i = 0; i < STM_BARRIER_NUMBERS; i += 2) { q = strchr(p, '\n'); *q = '\0'; - dprintf(("%12ld %s\n", stm_barriercount[i], p)); + fprintf(stderr, "%12ld %s\n", stm_barriercount[i], p); *q = '\n'; - dprintf(("%12ld \\ fast path\n", stm_barriercount[i + 1])); + fprintf(stderr, "%12ld \\ fast path\n", stm_barriercount[i + 1]); p = q + 1; } } From noreply at buildbot.pypy.org Thu Aug 22 20:08:58 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 22 Aug 2013 20:08:58 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-static-barrier: import stmgc/c7b63aa9d1ad Message-ID: <20130822180858.83A1C1C12CD@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-static-barrier Changeset: r66293:001be0e0a57b Date: 2013-08-22 20:08 +0200 http://bitbucket.org/pypy/pypy/changeset/001be0e0a57b/ Log: import stmgc/c7b63aa9d1ad diff --git a/rpython/translator/stm/src_stm/fprintcolor.c b/rpython/translator/stm/src_stm/fprintcolor.c --- a/rpython/translator/stm/src_stm/fprintcolor.c +++ b/rpython/translator/stm/src_stm/fprintcolor.c @@ -69,13 +69,13 @@ char *p = names; char *q; int i; - dprintf(("** Summary of the barrier calls **\n")); + fprintf(stderr, "** Summary of the barrier calls **\n"); for (i = 0; i < STM_BARRIER_NUMBERS; i += 2) { q = strchr(p, '\n'); *q = '\0'; - dprintf(("%12ld %s\n", stm_barriercount[i], p)); + fprintf(stderr, "%12ld %s\n", stm_barriercount[i], p); *q = '\n'; - dprintf(("%12ld \\ fast path\n", stm_barriercount[i + 1])); + fprintf(stderr, "%12ld \\ fast path\n", stm_barriercount[i + 1]); p = q + 1; } } diff --git a/rpython/translator/stm/src_stm/revision b/rpython/translator/stm/src_stm/revision --- a/rpython/translator/stm/src_stm/revision +++ b/rpython/translator/stm/src_stm/revision @@ -1,1 +1,1 @@ -7b20231c8672 +c7b63aa9d1ad From noreply at buildbot.pypy.org Thu Aug 22 20:58:00 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 22 Aug 2013 20:58:00 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: Add a (failing) test: most operations with GC in their name need to Message-ID: <20130822185800.93E221C12CC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c4 Changeset: r66294:e8db0c8ebd69 Date: 2013-08-22 20:57 +0200 http://bitbucket.org/pypy/pypy/changeset/e8db0c8ebd69/ Log: Add a (failing) test: most operations with GC in their name need to be explicitly handled by stmrewrite. This is a hack but it gives some future-proofing against adding new GC operations and forgetting that they need special STM support. diff --git a/rpython/jit/backend/llsupport/test/test_stmrewrite.py b/rpython/jit/backend/llsupport/test/test_stmrewrite.py --- a/rpython/jit/backend/llsupport/test/test_stmrewrite.py +++ b/rpython/jit/backend/llsupport/test/test_stmrewrite.py @@ -1,10 +1,35 @@ from rpython.jit.backend.llsupport.descr import * from rpython.jit.backend.llsupport.gc import * from rpython.jit.metainterp.gc import get_description +from rpython.jit.metainterp import resoperation from rpython.jit.backend.llsupport.test.test_rewrite import ( RewriteTests, BaseFakeCPU) from rpython.rtyper.lltypesystem import lltype, rclass, rffi, llmemory + +def test_all_operations_with_gc_in_their_name(): + # hack, but will fail if we add a new ResOperation called .._GC_.. + import os, re + r_gc = re.compile(r"(^|_)GC(_|$)") + with open(os.path.join(os.path.dirname( + os.path.dirname(os.path.abspath(__file__))), 'stmrewrite.py')) as f: + source = f.read() + words = re.split("\W", source) + # extra op names with GC in their name but where it's ok if stmrewrite + # doesn't mention them: + words.append('CALL_MALLOC_GC') + words.append('COND_CALL_GC_WB') + words.append('COND_CALL_GC_WB_ARRAY') + # + words = set(words) + missing = [] + for name in sorted(resoperation.opname.values()): + if r_gc.search(name): + if name not in words: + missing.append(name) + assert not missing + + class TestStm(RewriteTests): def setup_method(self, meth): class config_(object): From noreply at buildbot.pypy.org Thu Aug 22 21:15:50 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 22 Aug 2013 21:15:50 +0200 (CEST) Subject: [pypy-commit] stmgc nonmovable-int-ref: Close branch about to be merged Message-ID: <20130822191550.E4E201C074B@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: nonmovable-int-ref Changeset: r492:b1da024ad4b2 Date: 2013-08-22 21:15 +0200 http://bitbucket.org/pypy/stmgc/changeset/b1da024ad4b2/ Log: Close branch about to be merged From noreply at buildbot.pypy.org Thu Aug 22 21:15:52 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 22 Aug 2013 21:15:52 +0200 (CEST) Subject: [pypy-commit] stmgc default: hg merge nonmovable-int-ref Message-ID: <20130822191552.881251C074B@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r493:e14cbe1e040b Date: 2013-08-22 21:15 +0200 http://bitbucket.org/pypy/stmgc/changeset/e14cbe1e040b/ Log: hg merge nonmovable-int-ref diff --git a/c4/demo_random.c b/c4/demo_random.c --- a/c4/demo_random.c +++ b/c4/demo_random.c @@ -20,6 +20,7 @@ #define MAXROOTS 1000 #define SHARED_ROOTS 5 // shared by threads #define DO_MAJOR_COLLECTS 1 +#define MAX_PUBLIC_INTS 5 @@ -82,6 +83,8 @@ int interruptible; int atomic; char to_clear_on_abort[20]; + intptr_t public_ints[MAX_PUBLIC_INTS]; + int num_public_ints; }; __thread struct thread_data td; @@ -266,6 +269,38 @@ } } +void check_public_ints() +{ + int i; + for (i = 0; i < td.num_public_ints; i++) { + intptr_t ip = td.public_ints[i]; + gcptr obj = (gcptr)ip; + assert(obj->h_tid & GCFLAG_PUBLIC); + assert(obj->h_tid & GCFLAG_SMALLSTUB); + check(obj); + check((gcptr)(obj->h_revision - 2)); + } +} + +void add_as_public_int(gcptr p) +{ + if (!p || td.num_public_ints >= MAX_PUBLIC_INTS) + return; + + push_roots(); + intptr_t ip = stm_allocate_public_integer_address(p); + pop_roots(); + td.public_ints[td.num_public_ints++] = ip; +} + +void pop_public_int() +{ + if (td.num_public_ints == 0) + return; + + stm_unregister_integer_address(td.public_ints[--td.num_public_ints]); +} + gcptr read_barrier(gcptr p) { gcptr r = p; @@ -401,6 +436,7 @@ gcptr rare_events(gcptr p, gcptr _r, gcptr _sr) { + check_public_ints(); int k = get_rand(100); if (k < 10) { push_roots(); @@ -408,13 +444,22 @@ stm_become_inevitable("fun"); p = stm_pop_root(); pop_roots(); - } + } else if (k < 40) { push_roots(); stmgc_minor_collect(); pop_roots(); p = NULL; - } else if (k < 41 && DO_MAJOR_COLLECTS) { + } + else if (k < 50) { + add_as_public_int(p); + p = NULL; + } + else if (k < 60) { + pop_public_int(); + p = NULL; + } + else if (k < 61 && DO_MAJOR_COLLECTS) { fprintf(stdout, "major collect\n"); push_roots(); stmgcpage_possibly_major_collect(1); diff --git a/c4/et.c b/c4/et.c --- a/c4/et.c +++ b/c4/et.c @@ -6,7 +6,6 @@ */ #include "stmimpl.h" -#ifdef _GC_DEBUG char tmp_buf[128]; char* stm_dbg_get_hdr_str(gcptr obj) { @@ -26,7 +25,6 @@ cur += sprintf(cur, "tid=%ld", stm_get_tid(obj)); return tmp_buf; } -#endif diff --git a/c4/extra.c b/c4/extra.c --- a/c4/extra.c +++ b/c4/extra.c @@ -23,6 +23,53 @@ stm_bytes_to_clear_on_abort = bytes; } + +intptr_t stm_allocate_public_integer_address(gcptr obj) +{ + struct tx_descriptor *d = thread_descriptor; + gcptr stub; + intptr_t result; + /* plan: we allocate a small stub whose reference + we never give to the caller except in the form + of an integer. + During major collections, we visit them and update + their references. */ + + /* we don't want to deal with young objs */ + if (!(obj->h_tid & GCFLAG_OLD)) { + stm_push_root(obj); + stm_minor_collect(); + obj = stm_pop_root(); + } + + spinlock_acquire(d->public_descriptor->collection_lock, 'P'); + + stub = stm_stub_malloc(d->public_descriptor, 0); + stub->h_tid = (obj->h_tid & STM_USER_TID_MASK) + | GCFLAG_PUBLIC | GCFLAG_STUB | GCFLAG_SMALLSTUB + | GCFLAG_OLD; + + stub->h_revision = ((revision_t)obj) | 2; + if (!(obj->h_tid & GCFLAG_PREBUILT_ORIGINAL) && obj->h_original) { + stub->h_original = obj->h_original; + } + else { + stub->h_original = (revision_t)obj; + } + + result = (intptr_t)stub; + spinlock_release(d->public_descriptor->collection_lock); + stm_register_integer_address(result); + + dprintf(("allocate_public_int_adr(%p): %p", obj, stub)); + return result; +} + + + + + + /************************************************************/ /* Each object has a h_original pointer to an old copy of the same object (e.g. an old revision), the "original". diff --git a/c4/gcpage.c b/c4/gcpage.c --- a/c4/gcpage.c +++ b/c4/gcpage.c @@ -22,6 +22,9 @@ /* Only computed during a major collection */ static size_t mc_total_in_use, mc_total_reserved; +/* keeps track of registered smallstubs that will survive unless unregistered */ +static struct G2L registered_stubs; + /* For tests */ long stmgcpage_count(int quantity) { @@ -62,6 +65,8 @@ nblocks_for_size[i] = (GC_PAGE_SIZE - sizeof(page_header_t)) / (WORD * i); } + + memset(®istered_stubs, 0, sizeof(registered_stubs)); } void stmgcpage_init_tls(void) @@ -208,6 +213,34 @@ } +/***** registering of small stubs as integer addresses *****/ + +void stm_register_integer_address(intptr_t adr) +{ + gcptr obj = (gcptr)adr; + assert(obj->h_tid & GCFLAG_SMALLSTUB); + assert(obj->h_tid & GCFLAG_PUBLIC); + + stmgcpage_acquire_global_lock(); + g2l_insert(®istered_stubs, obj, NULL); + stmgcpage_release_global_lock(); + dprintf(("registered %p\n", obj)); +} + +void stm_unregister_integer_address(intptr_t adr) +{ + gcptr obj = (gcptr)adr; + assert(obj->h_tid & GCFLAG_SMALLSTUB); + assert(obj->h_tid & GCFLAG_PUBLIC); + + stmgcpage_acquire_global_lock(); + g2l_delete_item(®istered_stubs, obj); + stmgcpage_release_global_lock(); + dprintf(("unregistered %p\n", obj)); +} + + + /***** Major collections: marking *****/ static struct GcPtrList objects_to_trace; @@ -459,6 +492,27 @@ } } +static void mark_registered_stubs(void) +{ + wlog_t *item; + G2L_LOOP_FORWARD(registered_stubs, item) { + gcptr R = item->addr; + assert(R->h_tid & GCFLAG_SMALLSTUB); + + R->h_tid |= (GCFLAG_MARKED | GCFLAG_VISITED); + + gcptr L = (gcptr)(R->h_revision - 2); + L = stmgcpage_visit(L); + R->h_revision = ((revision_t)L) | 2; + + /* h_original will be kept up-to-date because + it is either == L or L's h_original. And + h_originals don't move */ + } G2L_LOOP_END; + +} + + static void mark_roots(gcptr *root, gcptr *end) { assert(*root == END_MARKER_ON); @@ -897,6 +951,7 @@ assert(gcptrlist_size(&objects_to_trace) == 0); mark_prebuilt_roots(); + mark_registered_stubs(); mark_all_stack_roots(); do { visit_all_objects(); diff --git a/c4/steal.c b/c4/steal.c --- a/c4/steal.c +++ b/c4/steal.c @@ -19,6 +19,59 @@ }; static __thread struct tx_steal_data *steal_data; +static void replace_ptr_to_immutable_with_stub(gcptr * pobj) +{ + gcptr stub, obj = *pobj; + assert(obj->h_tid & GCFLAG_IMMUTABLE); + assert(!(obj->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED)); + if (obj->h_tid & GCFLAG_PUBLIC) { + /* young public, replace with stolen old copy */ + assert(obj->h_tid & GCFLAG_MOVED); + assert(IS_POINTER(obj->h_revision)); + stub = (gcptr)obj->h_revision; + assert(!IS_POINTER(stub->h_revision)); /* not outdated */ + goto done; + } + + /* old or young protected! mark as PUBLIC */ + if (!(obj->h_tid & GCFLAG_OLD)) { + /* young protected */ + gcptr O; + + if (obj->h_tid & GCFLAG_HAS_ID) { + /* use id-copy for us */ + O = (gcptr)obj->h_original; + obj->h_tid &= ~GCFLAG_HAS_ID; + stm_copy_to_old_id_copy(obj, O); + O->h_original = 0; + } else { + O = stmgc_duplicate_old(obj); + + /* young and without original? */ + if (!(obj->h_original)) + obj->h_original = (revision_t)O; + } + obj->h_tid |= (GCFLAG_MOVED | GCFLAG_PUBLIC); + obj->h_revision = (revision_t)O; + + O->h_tid |= GCFLAG_PUBLIC; + /* here it is fine if it stays in read caches because + the object is immutable anyway and there are no + write_barriers allowed. */ + dprintf(("steal prot immutable -> public: %p -> %p\n", obj, O)); + stub = O; + goto done; + } + /* old protected: */ + dprintf(("prot immutable -> public: %p\n", obj)); + obj->h_tid |= GCFLAG_PUBLIC; + + return; + done: + *pobj = stub; + dprintf((" stolen: fixing *%p: %p -> %p\n", pobj, obj, stub)); +} + static void replace_ptr_to_protected_with_stub(gcptr *pobj) { gcptr stub, obj = *pobj; @@ -27,49 +80,7 @@ return; if (obj->h_tid & GCFLAG_IMMUTABLE) { - assert(!(obj->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED)); - if (obj->h_tid & GCFLAG_PUBLIC) { - /* young public, replace with stolen old copy */ - assert(obj->h_tid & GCFLAG_MOVED); - assert(IS_POINTER(obj->h_revision)); - stub = (gcptr)obj->h_revision; - assert(!IS_POINTER(stub->h_revision)); /* not outdated */ - goto done; - } - - /* old or young protected! mark as PUBLIC */ - if (!(obj->h_tid & GCFLAG_OLD)) { - /* young protected */ - gcptr O; - - if (obj->h_tid & GCFLAG_HAS_ID) { - /* use id-copy for us */ - O = (gcptr)obj->h_original; - obj->h_tid &= ~GCFLAG_HAS_ID; - stm_copy_to_old_id_copy(obj, O); - O->h_original = 0; - } else { - O = stmgc_duplicate_old(obj); - - /* young and without original? */ - if (!(obj->h_original)) - obj->h_original = (revision_t)O; - } - obj->h_tid |= (GCFLAG_MOVED | GCFLAG_PUBLIC); - obj->h_revision = (revision_t)O; - - O->h_tid |= GCFLAG_PUBLIC; - /* here it is fine if it stays in read caches because - the object is immutable anyway and there are no - write_barriers allowed. */ - dprintf(("steal prot immutable -> public: %p -> %p\n", obj, O)); - stub = O; - goto done; - } - /* old protected: */ - dprintf(("prot immutable -> public: %p\n", obj)); - obj->h_tid |= GCFLAG_PUBLIC; - + replace_ptr_to_immutable_with_stub(pobj); return; } diff --git a/c4/stmgc.h b/c4/stmgc.h --- a/c4/stmgc.h +++ b/c4/stmgc.h @@ -28,12 +28,21 @@ #define PREBUILT_REVISION 1 +/* push roots around allocating functions! */ + /* allocate an object out of the local nursery */ gcptr stm_allocate(size_t size, unsigned long tid); /* allocate an object that is be immutable. it cannot be changed with a stm_write_barrier() or after the next commit */ gcptr stm_allocate_immutable(size_t size, unsigned long tid); +/* allocates a public reference to the object that will + not be freed until stm_unregister_integer_address is + called on the result */ +intptr_t stm_allocate_public_integer_address(gcptr); +void stm_unregister_integer_address(intptr_t); + + /* returns a never changing hash for the object */ revision_t stm_hash(gcptr); /* returns a number for the object which is unique during its lifetime */ @@ -171,6 +180,8 @@ extern __thread void *stm_to_clear_on_abort; extern __thread size_t stm_bytes_to_clear_on_abort; +/* only user currently is stm_allocate_public_integer_address() */ +void stm_register_integer_address(intptr_t); /* macro functionality */ diff --git a/c4/test/support.py b/c4/test/support.py --- a/c4/test/support.py +++ b/c4/test/support.py @@ -47,6 +47,9 @@ #define PREBUILT_REVISION ... gcptr stm_allocate(size_t size, unsigned long tid); + gcptr stm_allocate_immutable(size_t size, unsigned long tid); + intptr_t stm_allocate_public_integer_address(gcptr adr); + void stm_unregister_integer_address(intptr_t adr); revision_t stm_hash(gcptr); revision_t stm_id(gcptr); _Bool stm_pointer_equal(gcptr, gcptr); diff --git a/c4/test/test_extra.py b/c4/test/test_extra.py --- a/c4/test/test_extra.py +++ b/c4/test/test_extra.py @@ -199,5 +199,45 @@ B = follow_revision(p1o) assert follow_original(B) == p1o + +def test_allocate_public_integer_address(): + p1 = palloc(HDR) + p2 = oalloc(HDR) + p3 = nalloc(HDR) + lib.stm_push_root(p3) + p3p = lib.stm_allocate_public_integer_address(p3) + p1p = lib.stm_allocate_public_integer_address(p1) + p2p = lib.stm_allocate_public_integer_address(p2) + + # p3 stub points to p3o: + p3o = lib.stm_pop_root() + p3po = ffi.cast("gcptr", p3p) + assert ffi.cast("gcptr", p3po.h_revision - 2) == p3o + + # we have stubs here: + assert ffi.cast("gcptr", p1p).h_tid & GCFLAG_PUBLIC + assert classify(ffi.cast("gcptr", p1p)) == 'stub' + assert classify(ffi.cast("gcptr", p2p)) == 'stub' + assert classify(ffi.cast("gcptr", p3p)) == 'stub' + + major_collect() + + # kept alive through stubs: + check_not_free(p3o) + check_not_free(p2) + + check_not_free(ffi.cast("gcptr", p1p)) + check_not_free(ffi.cast("gcptr", p2p)) + check_not_free(ffi.cast("gcptr", p3p)) + + lib.stm_unregister_integer_address(p1p) + lib.stm_unregister_integer_address(p2p) + lib.stm_unregister_integer_address(p3p) + + major_collect() + major_collect() + check_free_old(p3o) + check_free_old(p2) + From noreply at buildbot.pypy.org Thu Aug 22 21:18:10 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 22 Aug 2013 21:18:10 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-static-barrier: import stmgc/e14cbe1e040b Message-ID: <20130822191810.241271C074B@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-static-barrier Changeset: r66295:98d1111982c2 Date: 2013-08-22 21:16 +0200 http://bitbucket.org/pypy/pypy/changeset/98d1111982c2/ Log: import stmgc/e14cbe1e040b diff --git a/rpython/translator/stm/src_stm/et.c b/rpython/translator/stm/src_stm/et.c --- a/rpython/translator/stm/src_stm/et.c +++ b/rpython/translator/stm/src_stm/et.c @@ -7,7 +7,6 @@ */ #include "stmimpl.h" -#ifdef _GC_DEBUG char tmp_buf[128]; char* stm_dbg_get_hdr_str(gcptr obj) { @@ -27,7 +26,6 @@ cur += sprintf(cur, "tid=%ld", stm_get_tid(obj)); return tmp_buf; } -#endif diff --git a/rpython/translator/stm/src_stm/extra.c b/rpython/translator/stm/src_stm/extra.c --- a/rpython/translator/stm/src_stm/extra.c +++ b/rpython/translator/stm/src_stm/extra.c @@ -24,6 +24,53 @@ stm_bytes_to_clear_on_abort = bytes; } + +intptr_t stm_allocate_public_integer_address(gcptr obj) +{ + struct tx_descriptor *d = thread_descriptor; + gcptr stub; + intptr_t result; + /* plan: we allocate a small stub whose reference + we never give to the caller except in the form + of an integer. + During major collections, we visit them and update + their references. */ + + /* we don't want to deal with young objs */ + if (!(obj->h_tid & GCFLAG_OLD)) { + stm_push_root(obj); + stm_minor_collect(); + obj = stm_pop_root(); + } + + spinlock_acquire(d->public_descriptor->collection_lock, 'P'); + + stub = stm_stub_malloc(d->public_descriptor, 0); + stub->h_tid = (obj->h_tid & STM_USER_TID_MASK) + | GCFLAG_PUBLIC | GCFLAG_STUB | GCFLAG_SMALLSTUB + | GCFLAG_OLD; + + stub->h_revision = ((revision_t)obj) | 2; + if (!(obj->h_tid & GCFLAG_PREBUILT_ORIGINAL) && obj->h_original) { + stub->h_original = obj->h_original; + } + else { + stub->h_original = (revision_t)obj; + } + + result = (intptr_t)stub; + spinlock_release(d->public_descriptor->collection_lock); + stm_register_integer_address(result); + + dprintf(("allocate_public_int_adr(%p): %p", obj, stub)); + return result; +} + + + + + + /************************************************************/ /* Each object has a h_original pointer to an old copy of the same object (e.g. an old revision), the "original". diff --git a/rpython/translator/stm/src_stm/gcpage.c b/rpython/translator/stm/src_stm/gcpage.c --- a/rpython/translator/stm/src_stm/gcpage.c +++ b/rpython/translator/stm/src_stm/gcpage.c @@ -23,6 +23,9 @@ /* Only computed during a major collection */ static size_t mc_total_in_use, mc_total_reserved; +/* keeps track of registered smallstubs that will survive unless unregistered */ +static struct G2L registered_stubs; + /* For tests */ long stmgcpage_count(int quantity) { @@ -63,6 +66,8 @@ nblocks_for_size[i] = (GC_PAGE_SIZE - sizeof(page_header_t)) / (WORD * i); } + + memset(®istered_stubs, 0, sizeof(registered_stubs)); } void stmgcpage_init_tls(void) @@ -209,6 +214,34 @@ } +/***** registering of small stubs as integer addresses *****/ + +void stm_register_integer_address(intptr_t adr) +{ + gcptr obj = (gcptr)adr; + assert(obj->h_tid & GCFLAG_SMALLSTUB); + assert(obj->h_tid & GCFLAG_PUBLIC); + + stmgcpage_acquire_global_lock(); + g2l_insert(®istered_stubs, obj, NULL); + stmgcpage_release_global_lock(); + dprintf(("registered %p\n", obj)); +} + +void stm_unregister_integer_address(intptr_t adr) +{ + gcptr obj = (gcptr)adr; + assert(obj->h_tid & GCFLAG_SMALLSTUB); + assert(obj->h_tid & GCFLAG_PUBLIC); + + stmgcpage_acquire_global_lock(); + g2l_delete_item(®istered_stubs, obj); + stmgcpage_release_global_lock(); + dprintf(("unregistered %p\n", obj)); +} + + + /***** Major collections: marking *****/ static struct GcPtrList objects_to_trace; @@ -460,6 +493,27 @@ } } +static void mark_registered_stubs(void) +{ + wlog_t *item; + G2L_LOOP_FORWARD(registered_stubs, item) { + gcptr R = item->addr; + assert(R->h_tid & GCFLAG_SMALLSTUB); + + R->h_tid |= (GCFLAG_MARKED | GCFLAG_VISITED); + + gcptr L = (gcptr)(R->h_revision - 2); + L = stmgcpage_visit(L); + R->h_revision = ((revision_t)L) | 2; + + /* h_original will be kept up-to-date because + it is either == L or L's h_original. And + h_originals don't move */ + } G2L_LOOP_END; + +} + + static void mark_roots(gcptr *root, gcptr *end) { assert(*root == END_MARKER_ON); @@ -898,6 +952,7 @@ assert(gcptrlist_size(&objects_to_trace) == 0); mark_prebuilt_roots(); + mark_registered_stubs(); mark_all_stack_roots(); do { visit_all_objects(); diff --git a/rpython/translator/stm/src_stm/revision b/rpython/translator/stm/src_stm/revision --- a/rpython/translator/stm/src_stm/revision +++ b/rpython/translator/stm/src_stm/revision @@ -1,1 +1,1 @@ -c7b63aa9d1ad +e14cbe1e040b diff --git a/rpython/translator/stm/src_stm/steal.c b/rpython/translator/stm/src_stm/steal.c --- a/rpython/translator/stm/src_stm/steal.c +++ b/rpython/translator/stm/src_stm/steal.c @@ -20,6 +20,59 @@ }; static __thread struct tx_steal_data *steal_data; +static void replace_ptr_to_immutable_with_stub(gcptr * pobj) +{ + gcptr stub, obj = *pobj; + assert(obj->h_tid & GCFLAG_IMMUTABLE); + assert(!(obj->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED)); + if (obj->h_tid & GCFLAG_PUBLIC) { + /* young public, replace with stolen old copy */ + assert(obj->h_tid & GCFLAG_MOVED); + assert(IS_POINTER(obj->h_revision)); + stub = (gcptr)obj->h_revision; + assert(!IS_POINTER(stub->h_revision)); /* not outdated */ + goto done; + } + + /* old or young protected! mark as PUBLIC */ + if (!(obj->h_tid & GCFLAG_OLD)) { + /* young protected */ + gcptr O; + + if (obj->h_tid & GCFLAG_HAS_ID) { + /* use id-copy for us */ + O = (gcptr)obj->h_original; + obj->h_tid &= ~GCFLAG_HAS_ID; + stm_copy_to_old_id_copy(obj, O); + O->h_original = 0; + } else { + O = stmgc_duplicate_old(obj); + + /* young and without original? */ + if (!(obj->h_original)) + obj->h_original = (revision_t)O; + } + obj->h_tid |= (GCFLAG_MOVED | GCFLAG_PUBLIC); + obj->h_revision = (revision_t)O; + + O->h_tid |= GCFLAG_PUBLIC; + /* here it is fine if it stays in read caches because + the object is immutable anyway and there are no + write_barriers allowed. */ + dprintf(("steal prot immutable -> public: %p -> %p\n", obj, O)); + stub = O; + goto done; + } + /* old protected: */ + dprintf(("prot immutable -> public: %p\n", obj)); + obj->h_tid |= GCFLAG_PUBLIC; + + return; + done: + *pobj = stub; + dprintf((" stolen: fixing *%p: %p -> %p\n", pobj, obj, stub)); +} + static void replace_ptr_to_protected_with_stub(gcptr *pobj) { gcptr stub, obj = *pobj; @@ -28,49 +81,7 @@ return; if (obj->h_tid & GCFLAG_IMMUTABLE) { - assert(!(obj->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED)); - if (obj->h_tid & GCFLAG_PUBLIC) { - /* young public, replace with stolen old copy */ - assert(obj->h_tid & GCFLAG_MOVED); - assert(IS_POINTER(obj->h_revision)); - stub = (gcptr)obj->h_revision; - assert(!IS_POINTER(stub->h_revision)); /* not outdated */ - goto done; - } - - /* old or young protected! mark as PUBLIC */ - if (!(obj->h_tid & GCFLAG_OLD)) { - /* young protected */ - gcptr O; - - if (obj->h_tid & GCFLAG_HAS_ID) { - /* use id-copy for us */ - O = (gcptr)obj->h_original; - obj->h_tid &= ~GCFLAG_HAS_ID; - stm_copy_to_old_id_copy(obj, O); - O->h_original = 0; - } else { - O = stmgc_duplicate_old(obj); - - /* young and without original? */ - if (!(obj->h_original)) - obj->h_original = (revision_t)O; - } - obj->h_tid |= (GCFLAG_MOVED | GCFLAG_PUBLIC); - obj->h_revision = (revision_t)O; - - O->h_tid |= GCFLAG_PUBLIC; - /* here it is fine if it stays in read caches because - the object is immutable anyway and there are no - write_barriers allowed. */ - dprintf(("steal prot immutable -> public: %p -> %p\n", obj, O)); - stub = O; - goto done; - } - /* old protected: */ - dprintf(("prot immutable -> public: %p\n", obj)); - obj->h_tid |= GCFLAG_PUBLIC; - + replace_ptr_to_immutable_with_stub(pobj); return; } diff --git a/rpython/translator/stm/src_stm/stmgc.h b/rpython/translator/stm/src_stm/stmgc.h --- a/rpython/translator/stm/src_stm/stmgc.h +++ b/rpython/translator/stm/src_stm/stmgc.h @@ -29,12 +29,21 @@ #define PREBUILT_REVISION 1 +/* push roots around allocating functions! */ + /* allocate an object out of the local nursery */ gcptr stm_allocate(size_t size, unsigned long tid); /* allocate an object that is be immutable. it cannot be changed with a stm_write_barrier() or after the next commit */ gcptr stm_allocate_immutable(size_t size, unsigned long tid); +/* allocates a public reference to the object that will + not be freed until stm_unregister_integer_address is + called on the result */ +intptr_t stm_allocate_public_integer_address(gcptr); +void stm_unregister_integer_address(intptr_t); + + /* returns a never changing hash for the object */ revision_t stm_hash(gcptr); /* returns a number for the object which is unique during its lifetime */ @@ -172,6 +181,8 @@ extern __thread void *stm_to_clear_on_abort; extern __thread size_t stm_bytes_to_clear_on_abort; +/* only user currently is stm_allocate_public_integer_address() */ +void stm_register_integer_address(intptr_t); /* macro functionality */ From noreply at buildbot.pypy.org Thu Aug 22 21:18:12 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 22 Aug 2013 21:18:12 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-static-barrier: hg merge stmgc-c4 Message-ID: <20130822191812.6D0E41C074B@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-static-barrier Changeset: r66296:a40a970bbea5 Date: 2013-08-22 21:17 +0200 http://bitbucket.org/pypy/pypy/changeset/a40a970bbea5/ Log: hg merge stmgc-c4 diff --git a/pypy/tool/jitlogparser/parser.py b/pypy/tool/jitlogparser/parser.py --- a/pypy/tool/jitlogparser/parser.py +++ b/pypy/tool/jitlogparser/parser.py @@ -100,7 +100,7 @@ if ofs >= 0: asm.append((ofs, v.strip("\n"))) # - prefix = hex(dump_start)[:-8] + prefix = hex(dump_start)[:-9] asm_index = 0 for i, op in enumerate(loop.operations): end = 0 diff --git a/rpython/jit/backend/arm/regalloc.py b/rpython/jit/backend/arm/regalloc.py --- a/rpython/jit/backend/arm/regalloc.py +++ b/rpython/jit/backend/arm/regalloc.py @@ -626,7 +626,7 @@ # the frame is in fp, but we have to point where in the frame is # the potential argument to FINISH descr = op.getdescr() - fail_descr = cast_instance_to_gcref(descr) + fail_descr = rgc.cast_instance_to_gcref(descr) # we know it does not move, but well fail_descr = rgc._make_sure_does_not_move(fail_descr) fail_descr = rgc.cast_gcref_to_int(fail_descr) diff --git a/rpython/jit/backend/llsupport/assembler.py b/rpython/jit/backend/llsupport/assembler.py --- a/rpython/jit/backend/llsupport/assembler.py +++ b/rpython/jit/backend/llsupport/assembler.py @@ -172,8 +172,6 @@ break exc = guardtok.exc target = self.failure_recovery_code[exc + 2 * withfloats] - fail_descr = cast_instance_to_gcref(guardtok.faildescr) - fail_descr = rffi.cast(lltype.Signed, fail_descr) base_ofs = self.cpu.get_baseofs_of_frame_field() positions = [0] * len(guardtok.fail_locs) for i, loc in enumerate(guardtok.fail_locs): @@ -196,6 +194,8 @@ guardtok.faildescr.rd_locs = positions # we want the descr to keep alive guardtok.faildescr.rd_loop_token = self.current_clt + fail_descr = rgc.cast_instance_to_gcref(guardtok.faildescr) + fail_descr = rgc._make_sure_does_not_move(fail_descr) return fail_descr, target def call_assembler(self, op, guard_op, argloc, vloc, result_loc, tmploc): @@ -226,10 +226,8 @@ else: raise AssertionError(kind) - import pdb;pdb.set_trace() - gcref = cast_instance_to_gcref(value) - gcref = rgc._make_sure_does_not_move(gcref) - value = rffi.cast(lltype.Signed, gcref) + gcref = rgc.cast_instance_to_gcref(value) + value = rgc._make_sure_does_not_move(gcref) je_location = self._call_assembler_check_descr(value, tmploc) # # Path A: use assembler_helper_adr diff --git a/rpython/jit/backend/llsupport/gc.py b/rpython/jit/backend/llsupport/gc.py --- a/rpython/jit/backend/llsupport/gc.py +++ b/rpython/jit/backend/llsupport/gc.py @@ -102,24 +102,19 @@ for i in range(op.numargs()): v = op.getarg(i) if isinstance(v, ConstPtr) and bool(v.value): - p = v.value - new_p = rgc._make_sure_does_not_move(p) - v.value = new_p - gcrefs_output_list.append(new_p) - + v.imm_value = rgc._make_sure_does_not_move(v.value) + # XXX: fix for stm, record imm_values and unregister + # them again (below too): + gcrefs_output_list.append(v.value) + + if self.stm: + return # for descr, we do it on the fly in assembler.py if op.is_guard() or op.getopnum() == rop.FINISH: # the only ops with descrs that get recorded in a trace - from rpython.jit.metainterp.history import AbstractDescr descr = op.getdescr() - llref = cast_instance_to_gcref(descr) - new_llref = rgc._make_sure_does_not_move(llref) - if we_are_translated(): - new_d = cast_base_ptr_to_instance(AbstractDescr, new_llref) - # tests don't allow this: - op.setdescr(new_d) - else: - assert llref == new_llref - gcrefs_output_list.append(new_llref) + llref = rgc.cast_instance_to_gcref(descr) + rgc._make_sure_does_not_move(llref) + gcrefs_output_list.append(llref) def rewrite_assembler(self, cpu, operations, gcrefs_output_list): if not self.stm: @@ -431,8 +426,8 @@ @specialize.arg(2) def _do_barrier(self, gcref_struct, returns_modified_object): + raise NotImplementedError("implement in subclasses!") assert self.returns_modified_object == returns_modified_object - # XXX: fastpath for Read and Write variants funcptr = self.get_barrier_funcptr(returns_modified_object) res = funcptr(llmemory.cast_ptr_to_adr(gcref_struct)) return llmemory.cast_adr_to_ptr(res, llmemory.GCREF) @@ -442,16 +437,59 @@ def __init__(self, gc_ll_descr, stmcat): assert stmcat == 'P2R' STMBarrierDescr.__init__(self, gc_ll_descr, stmcat, - 'stm_read_barrier') + 'stm_DirectReadBarrier') # XXX: implement fastpath then change to stm_DirectReadBarrier + @specialize.arg(2) + def _do_barrier(self, gcref_struct, returns_modified_object): + assert returns_modified_object + from rpython.memory.gc.stmgc import StmGC + objadr = llmemory.cast_ptr_to_adr(gcref_struct) + objhdr = rffi.cast(StmGC.GCHDRP, gcref_struct) + + # if h_revision == privat_rev of transaction + priv_rev = self.llop1.stm_get_adr_of_private_rev_num(rffi.SIGNEDP) + if objhdr.h_revision == priv_rev[0]: + return gcref_struct + + # readcache[obj] == obj + read_cache = self.llop1.stm_get_adr_of_read_barrier_cache(rffi.SIGNEDP) + objint = llmemory.cast_adr_to_int(objadr) + assert WORD == 8, "check for 32bit compatibility" + index = (objint & StmGC.FX_MASK) / WORD + CP = lltype.Ptr(rffi.CArray(lltype.Signed)) + rcp = rffi.cast(CP, read_cache[0]) + if rcp[index] == objint: + return gcref_struct + + funcptr = self.get_barrier_funcptr(returns_modified_object) + res = funcptr(objadr) + return llmemory.cast_adr_to_ptr(res, llmemory.GCREF) + class STMWriteBarrierDescr(STMBarrierDescr): def __init__(self, gc_ll_descr, stmcat): assert stmcat in ['P2W'] STMBarrierDescr.__init__(self, gc_ll_descr, stmcat, - 'stm_write_barrier') - # XXX: implement fastpath, then change to stm_WriteBarrier + 'stm_WriteBarrier') + + @specialize.arg(2) + def _do_barrier(self, gcref_struct, returns_modified_object): + assert returns_modified_object + from rpython.memory.gc.stmgc import StmGC + objadr = llmemory.cast_ptr_to_adr(gcref_struct) + objhdr = rffi.cast(StmGC.GCHDRP, gcref_struct) + + # if h_revision == privat_rev of transaction + priv_rev = self.llop1.stm_get_adr_of_private_rev_num(rffi.SIGNEDP) + if objhdr.h_revision == priv_rev[0]: + # also WRITE_BARRIER not set? + if not (objhdr.h_tid & StmGC.GCFLAG_WRITE_BARRIER): + return gcref_struct + + funcptr = self.get_barrier_funcptr(returns_modified_object) + res = funcptr(objadr) + return llmemory.cast_adr_to_ptr(res, llmemory.GCREF) class GcLLDescr_framework(GcLLDescription): @@ -664,6 +702,12 @@ if self.stm: # XXX remove the indirections in the following calls from rpython.rlib import rstm + def stm_allocate_nonmovable_int_adr(obj): + return llop1.stm_allocate_nonmovable_int_adr( + lltype.Signed, obj) + self.generate_function('stm_allocate_nonmovable_int_adr', + stm_allocate_nonmovable_int_adr, + [llmemory.GCREF], RESULT=lltype.Signed) self.generate_function('stm_try_inevitable', rstm.become_inevitable, [], RESULT=lltype.Void) diff --git a/rpython/jit/backend/llsupport/llmodel.py b/rpython/jit/backend/llsupport/llmodel.py --- a/rpython/jit/backend/llsupport/llmodel.py +++ b/rpython/jit/backend/llsupport/llmodel.py @@ -52,6 +52,10 @@ self._setup_exception_handling_untranslated() self.asmmemmgr = AsmMemoryManager() self._setup_frame_realloc(translate_support_code) + self._setup_descrs() + self.setup() + + def _setup_descrs(self): ad = self.gc_ll_descr.getframedescrs(self).arraydescr self.signedarraydescr = ad # the same as normal JITFRAME, however with an array of pointers @@ -63,7 +67,6 @@ else: self.floatarraydescr = ArrayDescr(ad.basesize, ad.itemsize, ad.lendescr, FLAG_FLOAT) - self.setup() def getarraydescr_for_frame(self, type): if type == history.FLOAT: @@ -475,6 +478,7 @@ def bh_arraylen_gc(self, array, arraydescr): assert isinstance(arraydescr, ArrayDescr) + array = self.gc_ll_descr.do_stm_barrier(array, 'R') ofs = arraydescr.lendescr.offset return rffi.cast(rffi.CArrayPtr(lltype.Signed), array)[ofs/WORD] @@ -617,18 +621,22 @@ # --- end of GC unsafe code --- def bh_strlen(self, string): + string = self.gc_ll_descr.do_stm_barrier(string, 'R') s = lltype.cast_opaque_ptr(lltype.Ptr(rstr.STR), string) return len(s.chars) def bh_unicodelen(self, string): + string = self.gc_ll_descr.do_stm_barrier(string, 'R') u = lltype.cast_opaque_ptr(lltype.Ptr(rstr.UNICODE), string) return len(u.chars) def bh_strgetitem(self, string, index): + string = self.gc_ll_descr.do_stm_barrier(string, 'R') s = lltype.cast_opaque_ptr(lltype.Ptr(rstr.STR), string) return ord(s.chars[index]) def bh_unicodegetitem(self, string, index): + string = self.gc_ll_descr.do_stm_barrier(string, 'R') u = lltype.cast_opaque_ptr(lltype.Ptr(rstr.UNICODE), string) return ord(u.chars[index]) @@ -759,6 +767,8 @@ def bh_new_with_vtable(self, vtable, sizedescr): res = self.gc_ll_descr.gc_malloc(sizedescr) if self.vtable_offset is not None: + assert not self.gc_ll_descr.stm + res = self.gc_ll_descr.do_stm_barrier(res, 'W') as_array = rffi.cast(rffi.CArrayPtr(lltype.Signed), res) as_array[self.vtable_offset/WORD] = vtable return res @@ -767,6 +777,7 @@ return lltype.malloc(rffi.CCHARP.TO, size, flavor='raw') def bh_classof(self, struct): + struct = self.gc_ll_descr.do_stm_barrier(struct, 'R') struct = lltype.cast_opaque_ptr(rclass.OBJECTPTR, struct) result_adr = llmemory.cast_ptr_to_adr(struct.typeptr) return heaptracker.adr2int(result_adr) @@ -781,19 +792,25 @@ return self.gc_ll_descr.gc_malloc_unicode(length) def bh_strsetitem(self, string, index, newvalue): + string = self.gc_ll_descr.do_stm_barrier(string, 'W') s = lltype.cast_opaque_ptr(lltype.Ptr(rstr.STR), string) s.chars[index] = chr(newvalue) def bh_unicodesetitem(self, string, index, newvalue): + string = self.gc_ll_descr.do_stm_barrier(string, 'W') u = lltype.cast_opaque_ptr(lltype.Ptr(rstr.UNICODE), string) u.chars[index] = unichr(newvalue) def bh_copystrcontent(self, src, dst, srcstart, dststart, length): + src = self.gc_ll_descr.do_stm_barrier(src, 'R') + dst = self.gc_ll_descr.do_stm_barrier(dst, 'W') src = lltype.cast_opaque_ptr(lltype.Ptr(rstr.STR), src) dst = lltype.cast_opaque_ptr(lltype.Ptr(rstr.STR), dst) rstr.copy_string_contents(src, dst, srcstart, dststart, length) def bh_copyunicodecontent(self, src, dst, srcstart, dststart, length): + src = self.gc_ll_descr.do_stm_barrier(src, 'R') + dst = self.gc_ll_descr.do_stm_barrier(dst, 'W') src = lltype.cast_opaque_ptr(lltype.Ptr(rstr.UNICODE), src) dst = lltype.cast_opaque_ptr(lltype.Ptr(rstr.UNICODE), dst) rstr.copy_unicode_contents(src, dst, srcstart, dststart, length) diff --git a/rpython/jit/backend/llsupport/rewrite.py b/rpython/jit/backend/llsupport/rewrite.py --- a/rpython/jit/backend/llsupport/rewrite.py +++ b/rpython/jit/backend/llsupport/rewrite.py @@ -172,8 +172,8 @@ size_box, descr=descrs.jfi_frame_size) self.newops.append(op0) - self.gen_malloc_nursery_varsize_frame(size_box, frame) - self.gen_initialize_tid(frame, descrs.arraydescr.tid) + self.gen_malloc_nursery_varsize_frame(size_box, frame, + descrs.arraydescr.tid) length_box = history.BoxInt() op1 = ResOperation(rop.GETFIELD_GC, [history.ConstInt(frame_info)], length_box, @@ -321,7 +321,7 @@ self.recent_mallocs[v_result] = None return True - def gen_malloc_nursery_varsize_frame(self, sizebox, v_result): + def gen_malloc_nursery_varsize_frame(self, sizebox, v_result, tid): """ Generate CALL_MALLOC_NURSERY_VARSIZE_FRAME """ self.emitting_an_operation_that_can_collect() @@ -332,6 +332,8 @@ self.newops.append(op) self.recent_mallocs[v_result] = None + self.gen_initialize_tid(v_result, tid) + def gen_malloc_nursery(self, size, v_result): """Try to generate or update a CALL_MALLOC_NURSERY. If that fails, generate a plain CALL_MALLOC_GC instead. diff --git a/rpython/jit/backend/llsupport/stmrewrite.py b/rpython/jit/backend/llsupport/stmrewrite.py --- a/rpython/jit/backend/llsupport/stmrewrite.py +++ b/rpython/jit/backend/llsupport/stmrewrite.py @@ -3,6 +3,7 @@ from rpython.jit.metainterp.history import BoxPtr, ConstPtr, ConstInt from rpython.rlib.objectmodel import specialize from rpython.rlib.objectmodel import we_are_translated +from rpython.jit.metainterp import history # # STM Support @@ -92,11 +93,13 @@ continue # ---------- calls ---------- if op.is_call(): - self.known_category.clear() if op.getopnum() == rop.CALL_RELEASE_GIL: self.fallback_inevitable(op) + elif op.getopnum() == rop.CALL_ASSEMBLER: + self.handle_call_assembler(op) else: self.newops.append(op) + self.known_category.clear() continue # ---------- copystrcontent ---------- if op.getopnum() in (rop.COPYSTRCONTENT, @@ -138,8 +141,15 @@ for v, c in self.known_category.items(): if c == 'R': self.known_category[v] = 'P' - - + + def gen_malloc_nursery_varsize_frame(self, sizebox, v_result, tid): + """ For now don't generate CALL_MALLOC_NURSERY_VARSIZE_FRAME + """ + addr = self.gc_ll_descr.get_malloc_fn_addr('malloc_big_fixedsize') + args = [ConstInt(addr), sizebox, ConstInt(tid)] + descr = self.gc_ll_descr.malloc_big_fixedsize_descr + self._gen_call_malloc_gc(args, v_result, descr) + def gen_write_barrier(self, v): raise NotImplementedError diff --git a/rpython/jit/backend/llsupport/test/test_stmrewrite.py b/rpython/jit/backend/llsupport/test/test_stmrewrite.py --- a/rpython/jit/backend/llsupport/test/test_stmrewrite.py +++ b/rpython/jit/backend/llsupport/test/test_stmrewrite.py @@ -1,10 +1,35 @@ from rpython.jit.backend.llsupport.descr import * from rpython.jit.backend.llsupport.gc import * from rpython.jit.metainterp.gc import get_description +from rpython.jit.metainterp import resoperation from rpython.jit.backend.llsupport.test.test_rewrite import ( RewriteTests, BaseFakeCPU) from rpython.rtyper.lltypesystem import lltype, rclass, rffi, llmemory + +def test_all_operations_with_gc_in_their_name(): + # hack, but will fail if we add a new ResOperation called .._GC_.. + import os, re + r_gc = re.compile(r"(^|_)GC(_|$)") + with open(os.path.join(os.path.dirname( + os.path.dirname(os.path.abspath(__file__))), 'stmrewrite.py')) as f: + source = f.read() + words = re.split("\W", source) + # extra op names with GC in their name but where it's ok if stmrewrite + # doesn't mention them: + words.append('CALL_MALLOC_GC') + words.append('COND_CALL_GC_WB') + words.append('COND_CALL_GC_WB_ARRAY') + # + words = set(words) + missing = [] + for name in sorted(resoperation.opname.values()): + if r_gc.search(name): + if name not in words: + missing.append(name) + assert not missing + + class TestStm(RewriteTests): def setup_method(self, meth): class config_(object): diff --git a/rpython/jit/backend/llsupport/test/zrpy_gc_test.py b/rpython/jit/backend/llsupport/test/zrpy_gc_test.py --- a/rpython/jit/backend/llsupport/test/zrpy_gc_test.py +++ b/rpython/jit/backend/llsupport/test/zrpy_gc_test.py @@ -8,7 +8,7 @@ import os from rpython.rlib import rgc from rpython.rtyper.lltypesystem import lltype -from rpython.rlib.jit import JitDriver, dont_look_inside +from rpython.rlib.jit import JitDriver, dont_look_inside, promote from rpython.rlib.jit import elidable, unroll_safe from rpython.jit.backend.llsupport.gc import GcLLDescr_framework from rpython.tool.udir import udir @@ -795,6 +795,7 @@ def define_compile_framework_ptr_eq(cls): # test ptr_eq + @dont_look_inside def raiseassert(cond): if not bool(cond): raise AssertionError @@ -808,18 +809,27 @@ @unroll_safe def f(n, x, x0, x1, x2, x3, x4, x5, x6, x7, ptrs, s): + if n % 3 == 0: + x0 = promote(x0) + elif n % 3 == 1: + x1 = promote(x1) + else: + x2 = promote(x2) raiseassert(x0 != ptrs[0]) raiseassert(x0 == ptrs[1]) raiseassert(x0 != ptrs[2]) raiseassert(x0 != ptrs[3]) + raiseassert(x1 != ptrs[0]) raiseassert(x1 != ptrs[1]) raiseassert(x1 == ptrs[2]) raiseassert(x1 != ptrs[3]) + raiseassert(x2 == ptrs[0]) raiseassert(x2 != ptrs[1]) raiseassert(x2 != ptrs[2]) raiseassert(x2 != ptrs[3]) + raiseassert(ptrs[0] is None) raiseassert(ptrs[1] is not None) raiseassert(ptrs[2] is not None) diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -249,8 +249,8 @@ self._store_and_reset_exception(self.mc, eax) ofs = self.cpu.get_ofs_of_frame_field('jf_guard_exc') self.mc.MOV_br(ofs, eax.value) - propagate_exception_descr = rffi.cast(lltype.Signed, - cast_instance_to_gcref(self.cpu.propagate_exception_descr)) + propagate_exception_descr = rgc._make_sure_does_not_move( + rgc.cast_instance_to_gcref(self.cpu.propagate_exception_descr)) ofs = self.cpu.get_ofs_of_frame_field('jf_descr') self.mc.MOV(RawEbpLoc(ofs), imm(propagate_exception_descr)) self.mc.MOV_rr(eax.value, ebp.value) @@ -876,6 +876,19 @@ return rst def _call_header_shadowstack(self, gcrootmap): + # do a write-barrier on ebp / frame for stm + # XXX: may not be necessary if we are sure that we only get + # freshly allocated frames or already write-ready frames + # from the caller... + gc_ll_descr = self.cpu.gc_ll_descr + gcrootmap = gc_ll_descr.gcrootmap + if gcrootmap and gcrootmap.is_stm: + if not hasattr(gc_ll_descr, 'P2Wdescr'): + raise Exception("unreachable code") + wbdescr = gc_ll_descr.P2Wdescr + self._stm_barrier_fastpath(self.mc, wbdescr, [ebp], is_frame=True) + + # put the frame in ebp on the shadowstack for the GC to find rst = self._load_shadowstack_top_in_ebx(self.mc, gcrootmap) self.mc.MOV_mr((ebx.value, 0), ebp.value) # MOV [ebx], ebp self.mc.ADD_ri(ebx.value, WORD) @@ -2108,10 +2121,10 @@ cb.emit() def _store_force_index(self, guard_op): - faildescr = guard_op.getdescr() + faildescr = rgc._make_sure_does_not_move( + rgc.cast_instance_to_gcref(guard_op.getdescr())) ofs = self.cpu.get_ofs_of_frame_field('jf_force_descr') - self.mc.MOV(raw_stack(ofs), imm(rffi.cast(lltype.Signed, - cast_instance_to_gcref(faildescr)))) + self.mc.MOV(raw_stack(ofs), imm(faildescr)) def _emit_guard_not_forced(self, guard_token): ofs = self.cpu.get_ofs_of_frame_field('jf_descr') @@ -2189,6 +2202,15 @@ def _call_assembler_check_descr(self, value, tmploc): ofs = self.cpu.get_ofs_of_frame_field('jf_descr') + + if self.cpu.gc_ll_descr.stm: + # value is non-moving, but jf_descr may have a changed + # descr -> different copy + self._stm_ptr_eq_fastpath(self.mc, [mem(eax, ofs), imm(value)], + tmploc) + self.mc.J_il8(rx86.Conditions['NZ'], 0) + return self.mc.get_relative_pos() + self.mc.CMP(mem(eax, ofs), imm(value)) # patched later self.mc.J_il8(rx86.Conditions['E'], 0) # goto B if we get 'done_with_this_frame' @@ -2315,8 +2337,9 @@ mc.CALL(imm(func)) # result still on stack mc.POP_r(X86_64_SCRATCH_REG.value) - # set flags: - mc.TEST_rr(X86_64_SCRATCH_REG.value, X86_64_SCRATCH_REG.value) + # _Bool return type only sets lower 8 bits of return value + sl = X86_64_SCRATCH_REG.lowest8bits() + mc.TEST8_rr(sl.value, sl.value) # # END SLOWPATH # diff --git a/rpython/jit/backend/x86/regalloc.py b/rpython/jit/backend/x86/regalloc.py --- a/rpython/jit/backend/x86/regalloc.py +++ b/rpython/jit/backend/x86/regalloc.py @@ -8,7 +8,7 @@ BoxFloat, INT, REF, FLOAT, TargetToken) from rpython.jit.backend.x86.regloc import * -from rpython.rtyper.lltypesystem import lltype, rffi, rstr +from rpython.rtyper.lltypesystem import lltype, rffi, rstr, llmemory from rpython.rtyper.annlowlevel import cast_instance_to_gcref from rpython.rlib.objectmodel import we_are_translated from rpython.rlib import rgc @@ -45,9 +45,11 @@ if isinstance(c, ConstInt): return imm(c.value) elif isinstance(c, ConstPtr): - if we_are_translated() and c.value and rgc.can_move(c.value): + # if we_are_translated() and c.value and rgc.can_move(c.value): + # not_implemented("convert_to_imm: ConstPtr needs special care") + if c.value and not c.imm_value: not_implemented("convert_to_imm: ConstPtr needs special care") - return imm(rffi.cast(lltype.Signed, c.value)) + return imm(c.get_imm_value()) else: not_implemented("convert_to_imm: got a %s" % c) @@ -369,7 +371,6 @@ fail_descr = rgc.cast_instance_to_gcref(descr) # we know it does not move, but well fail_descr = rgc._make_sure_does_not_move(fail_descr) - fail_descr = rgc.cast_gcref_to_int(fail_descr) if op.numargs() == 1: loc = self.make_sure_var_in_reg(op.getarg(0)) locs = [loc, imm(fail_descr)] diff --git a/rpython/jit/backend/x86/rx86.py b/rpython/jit/backend/x86/rx86.py --- a/rpython/jit/backend/x86/rx86.py +++ b/rpython/jit/backend/x86/rx86.py @@ -539,6 +539,7 @@ PUSH_r = insn(rex_nw, register(1), '\x50') PUSH_b = insn(rex_nw, '\xFF', orbyte(6<<3), stack_bp(1)) + PUSH_m = insn(rex_nw, '\xFF', orbyte(6<<3), mem_reg_plus_const(1)) PUSH_i8 = insn('\x6A', immediate(1, 'b')) PUSH_i32 = insn('\x68', immediate(1, 'i')) def PUSH_i(mc, immed): @@ -549,6 +550,7 @@ POP_r = insn(rex_nw, register(1), '\x58') POP_b = insn(rex_nw, '\x8F', orbyte(0<<3), stack_bp(1)) + POP_m = insn(rex_nw, '\x8F', orbyte(0<<3), mem_reg_plus_const(1)) LEA_rb = insn(rex_w, '\x8D', register(1,8), stack_bp(2)) LEA_rs = insn(rex_w, '\x8D', register(1,8), stack_sp(2)) @@ -584,6 +586,7 @@ TEST8_mi = insn(rex_nw, '\xF6', orbyte(0<<3), mem_reg_plus_const(1), immediate(2, 'b')) TEST8_bi = insn(rex_nw, '\xF6', orbyte(0<<3), stack_bp(1), immediate(2, 'b')) TEST8_ji = insn(rex_nw, '\xF6', orbyte(0<<3), abs_(1), immediate(2, 'b')) + TEST8_rr = insn(rex_fw, '\x84', byte_register(2,8), byte_register(1),'\xC0') TEST_rr = insn(rex_w, '\x85', register(2,8), register(1), '\xC0') BTS_mr = insn(rex_w, '\x0F\xAB', register(2,8), mem_reg_plus_const(1)) diff --git a/rpython/jit/backend/x86/test/test_stm_integration.py b/rpython/jit/backend/x86/test/test_stm_integration.py --- a/rpython/jit/backend/x86/test/test_stm_integration.py +++ b/rpython/jit/backend/x86/test/test_stm_integration.py @@ -1,5 +1,5 @@ import py -from rpython.rtyper.lltypesystem import lltype, llmemory, rffi, rstr +from rpython.rtyper.lltypesystem import lltype, llmemory, rffi, rstr, rclass from rpython.jit.metainterp.history import ResOperation, TargetToken,\ JitCellToken from rpython.jit.metainterp.history import (BoxInt, BoxPtr, ConstInt, @@ -20,9 +20,18 @@ from rpython.jit.backend.llsupport import jitframe from rpython.memory.gc.stmgc import StmGC from rpython.jit.metainterp import history +from rpython.jit.codewriter.effectinfo import EffectInfo +from rpython.rlib import rgc +from rpython.rtyper.llinterp import LLException import itertools, sys import ctypes +def cast_to_int(obj): + if isinstance(obj, rgc._GcRef): + return rgc.cast_gcref_to_int(obj) + else: + return rffi.cast(lltype.Signed, obj) + CPU = getcpuclass() class MockSTMRootMap(object): @@ -34,9 +43,6 @@ self.stack_addr = lltype.malloc(TP, 1, flavor='raw') self.stack_addr[0] = rffi.cast(lltype.Signed, self.stack) - def __del__(self): - lltype.free(self.stack_addr, flavor='raw') - lltype.free(self.stack, flavor='raw') def register_asm_addr(self, start, mark): pass def get_root_stack_top_addr(self): @@ -101,6 +107,31 @@ class FakeGCHeaderBuilder: size_gc_header = WORD +class fakellop: + PRIV_REV = 66 + def __init__(self): + self.TP = rffi.CArray(lltype.Signed) + self.privrevp = lltype.malloc(self.TP, n=1, flavor='raw', + track_allocation=False, zero=True) + self.privrevp[0] = fakellop.PRIV_REV + + entries = (StmGC.FX_MASK + 1) / WORD + self.read_cache = lltype.malloc(self.TP, n=entries, flavor='raw', + track_allocation=False, zero=True) + self.read_cache_adr = lltype.malloc(self.TP, 1, flavor='raw', + track_allocation=False) + self.read_cache_adr[0] = rffi.cast(lltype.Signed, self.read_cache) + + def set_cache_item(self, obj, value): + obj_int = rffi.cast(lltype.Signed, obj) + idx = (obj_int & StmGC.FX_MASK) / WORD + self.read_cache[idx] = rffi.cast(lltype.Signed, value) + + def stm_get_adr_of_private_rev_num(self, _): + return self.privrevp + + def stm_get_adr_of_read_barrier_cache(self, _): + return self.read_cache_adr class GCDescrStm(GCDescrShadowstackDirect): def __init__(self): @@ -142,11 +173,41 @@ inevitable, [], RESULT=lltype.Void) def ptr_eq(x, y): - self.ptr_eq_called_on.append((x, y)) + print "=== ptr_eq", hex(cast_to_int(x)), hex(cast_to_int(y)) + self.ptr_eq_called_on.append((cast_to_int(x), cast_to_int(y))) return x == y self.generate_function('stm_ptr_eq', ptr_eq, [llmemory.GCREF] * 2, RESULT=lltype.Bool) + def stm_allocate_nonmovable_int_adr(obj): + assert False # should not be reached + return rgc.cast_gcref_to_int(obj) + self.generate_function('stm_allocate_nonmovable_int_adr', + stm_allocate_nonmovable_int_adr, + [llmemory.GCREF], + RESULT=lltype.Signed) + + def malloc_big_fixedsize(size, tid): + print "malloc:", size, tid + if size > sys.maxint / 2: + # for testing exception + return lltype.nullptr(llmemory.GCREF.TO) + + entries = size + StmGC.GCHDRSIZE + TP = rffi.CArray(lltype.Char) + obj = lltype.malloc(TP, n=entries, flavor='raw', + track_allocation=False, zero=True) + objptr = rffi.cast(StmGC.GCHDRP, obj) + objptr.h_tid = rffi.cast(lltype.Unsigned, + StmGC.GCFLAG_OLD + | StmGC.GCFLAG_WRITE_BARRIER | tid) + objptr.h_revision = rffi.cast(lltype.Signed, -sys.maxint) + print "return:", obj, objptr + return rffi.cast(llmemory.GCREF, objptr) + self.generate_function('malloc_big_fixedsize', malloc_big_fixedsize, + [lltype.Signed] * 2) + + def malloc_jitframe(self, frame_info): """ Allocate a new frame, overwritten by tests """ @@ -180,6 +241,18 @@ def setup_method(self, meth): cpu = CPU(None, None) cpu.gc_ll_descr = GCDescrStm() + + def latest_descr(self, deadframe): + deadframe = lltype.cast_opaque_ptr(JITFRAMEPTR, deadframe) + descr = deadframe.jf_descr + res = history.AbstractDescr.show(self, descr) + assert isinstance(res, history.AbstractFailDescr) + return res + import types + cpu.get_latest_descr = types.MethodType(latest_descr, cpu, + cpu.__class__) + + self.p2wd = cpu.gc_ll_descr.P2Wdescr self.p2rd = cpu.gc_ll_descr.P2Rdescr @@ -205,7 +278,7 @@ def assert_in(self, called_on, args): for i, ref in enumerate(args): - assert rffi.cast_ptr_to_adr(ref) == called_on[i] + assert rffi.cast_ptr_to_adr(ref) in called_on def assert_not_in(self, called_on, args): for ref in args: @@ -236,6 +309,94 @@ s.h_tid = rffi.cast(lltype.Unsigned, StmGC.PREBUILT_FLAGS | tid) s.h_revision = rffi.cast(lltype.Signed, StmGC.PREBUILT_REVISION) return s + + + + def test_gc_read_barrier_fastpath(self): + from rpython.jit.backend.llsupport.gc import STMReadBarrierDescr + descr = STMReadBarrierDescr(self.cpu.gc_ll_descr, 'P2R') + + called = [] + def read(obj): + called.append(obj) + return obj + + functype = lltype.Ptr(lltype.FuncType( + [llmemory.Address], llmemory.Address)) + funcptr = llhelper(functype, read) + descr.b_failing_case_ptr = funcptr + descr.llop1 = fakellop() + + # -------- TEST -------- + for rev in [fakellop.PRIV_REV+4, fakellop.PRIV_REV]: + called[:] = [] + + s = self.allocate_prebuilt_s() + sgcref = lltype.cast_opaque_ptr(llmemory.GCREF, s) + s.h_revision = rev + + descr._do_barrier(sgcref, + returns_modified_object=True) + + # check if rev-fastpath worked + if rev == fakellop.PRIV_REV: + # fastpath + self.assert_not_in(called, [sgcref]) + else: + self.assert_in(called, [sgcref]) + + # now check if sgcref in readcache: + called[:] = [] + descr.llop1.set_cache_item(sgcref, sgcref) + descr._do_barrier(sgcref, + returns_modified_object=True) + self.assert_not_in(called, [sgcref]) + descr.llop1.set_cache_item(sgcref, 0) + + + def test_gc_write_barrier_fastpath(self): + from rpython.jit.backend.llsupport.gc import STMWriteBarrierDescr + descr = STMWriteBarrierDescr(self.cpu.gc_ll_descr, 'P2W') + + called = [] + def write(obj): + called.append(obj) + return obj + + functype = lltype.Ptr(lltype.FuncType( + [llmemory.Address], llmemory.Address)) + funcptr = llhelper(functype, write) + descr.b_failing_case_ptr = funcptr + descr.llop1 = fakellop() + + # -------- TEST -------- + for rev in [fakellop.PRIV_REV+4, fakellop.PRIV_REV]: + called[:] = [] + + s = self.allocate_prebuilt_s() + sgcref = lltype.cast_opaque_ptr(llmemory.GCREF, s) + s.h_revision = rev + + descr._do_barrier(sgcref, + returns_modified_object=True) + + # check if rev-fastpath worked + if rev == fakellop.PRIV_REV: + # fastpath + self.assert_not_in(called, [sgcref]) + else: + self.assert_in(called, [sgcref]) + + # now set WRITE_BARRIER -> always call slowpath + called[:] = [] + s.h_tid |= StmGC.GCFLAG_WRITE_BARRIER + descr._do_barrier(sgcref, + returns_modified_object=True) + self.assert_in(called, [sgcref]) + + + + def test_read_barrier_fastpath(self): cpu = self.cpu @@ -267,7 +428,7 @@ # check if rev-fastpath worked if rev == PRIV_REV: # fastpath - assert not called_on + self.assert_not_in(called_on, [sgcref]) else: self.assert_in(called_on, [sgcref]) @@ -310,7 +471,7 @@ # check if rev-fastpath worked if rev == PRIV_REV: # fastpath and WRITE_BARRIER not set - assert not called_on + self.assert_not_in(called_on, [sgcref]) else: self.assert_in(called_on, [sgcref]) @@ -377,31 +538,32 @@ looptoken = JitCellToken() c_loop = cpu.compile_loop(inputargs + [i1], operations, looptoken) - print c_loop + args = [s for i, s in enumerate((s1, s2)) if not isinstance((p1, p2)[i], Const)] + [7] - + print "======" + print "inputargs:", inputargs+[i1], args + print "\n".join(map(str,c_loop[1])) + frame = self.cpu.execute_token(looptoken, *args) frame = rffi.cast(JITFRAMEPTR, frame) frame_adr = rffi.cast(lltype.Signed, frame.jf_descr) guard_failed = frame_adr != id(finaldescr) # CHECK: - a, b = s1, s2 + a, b = cast_to_int(s1), cast_to_int(s2) if isinstance(p1, Const): - s1 = p1.value + a = cast_to_int(p1.value) if isinstance(p2, Const): - s2 = p2.value + b = cast_to_int(p2.value) - if s1 == s2 or \ - rffi.cast(lltype.Signed, s1) == 0 or \ - rffi.cast(lltype.Signed, s2) == 0: - assert (s1, s2) not in called_on + if a == b or a == 0 or b == 0: + assert (a, b) not in called_on else: - assert [(s1, s2)] == called_on + assert [(a, b)] == called_on if guard is not None: - if s1 == s2: + if a == b: if guard in (rop.GUARD_TRUE, rop.GUARD_VALUE): assert not guard_failed else: @@ -412,7 +574,204 @@ assert guard_failed - + + + def test_assembler_call(self): + cpu = self.cpu + cpu.gc_ll_descr.init_nursery(100) + cpu.setup_once() + + called = [] + def assembler_helper(deadframe, virtualizable): + frame = rffi.cast(JITFRAMEPTR, deadframe) + frame_adr = rffi.cast(lltype.Signed, frame.jf_descr) + called.append(frame_adr) + return 4 + 9 + + FUNCPTR = lltype.Ptr(lltype.FuncType([llmemory.GCREF, + llmemory.GCREF], + lltype.Signed)) + class FakeJitDriverSD: + index_of_virtualizable = -1 + _assembler_helper_ptr = llhelper(FUNCPTR, assembler_helper) + assembler_helper_adr = llmemory.cast_ptr_to_adr( + _assembler_helper_ptr) + + ops = ''' + [i0, i1, i2, i3, i4, i5, i6, i7, i8, i9] + i10 = int_add(i0, i1) + i11 = int_add(i10, i2) + i12 = int_add(i11, i3) + i13 = int_add(i12, i4) + i14 = int_add(i13, i5) + i15 = int_add(i14, i6) + i16 = int_add(i15, i7) + i17 = int_add(i16, i8) + i18 = int_add(i17, i9) + finish(i18)''' + loop = parse(ops) + looptoken = JitCellToken() + looptoken.outermost_jitdriver_sd = FakeJitDriverSD() + finish_descr = loop.operations[-1].getdescr() + self.cpu.done_with_this_frame_descr_int = BasicFinalDescr() + self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + ARGS = [lltype.Signed] * 10 + RES = lltype.Signed + FakeJitDriverSD.portal_calldescr = self.cpu.calldescrof( + lltype.Ptr(lltype.FuncType(ARGS, RES)), ARGS, RES, + EffectInfo.MOST_GENERAL) + args = [i+1 for i in range(10)] + deadframe = self.cpu.execute_token(looptoken, *args) + + ops = ''' + [i0, i1, i2, i3, i4, i5, i6, i7, i8, i9] + i10 = int_add(i0, 42) + i11 = call_assembler(i10, i1, i2, i3, i4, i5, i6, i7, i8, i9, descr=looptoken) + guard_not_forced()[] + finish(i11) + ''' + loop = parse(ops, namespace=locals()) + othertoken = JitCellToken() + self.cpu.compile_loop(loop.inputargs, loop.operations, othertoken) + args = [i+1 for i in range(10)] + deadframe = self.cpu.execute_token(othertoken, *args) + assert called == [id(finish_descr)] + del called[:] + + # compile a replacement + ops = ''' + [i0, i1, i2, i3, i4, i5, i6, i7, i8, i9] + i10 = int_sub(i0, i1) + i11 = int_sub(i10, i2) + i12 = int_sub(i11, i3) + i13 = int_sub(i12, i4) + i14 = int_sub(i13, i5) + i15 = int_sub(i14, i6) + i16 = int_sub(i15, i7) + i17 = int_sub(i16, i8) + i18 = int_sub(i17, i9) + finish(i18)''' + loop2 = parse(ops) + looptoken2 = JitCellToken() + looptoken2.outermost_jitdriver_sd = FakeJitDriverSD() + self.cpu.compile_loop(loop2.inputargs, loop2.operations, looptoken2) + finish_descr2 = loop2.operations[-1].getdescr() + + # install it + self.cpu.redirect_call_assembler(looptoken, looptoken2) + + # now call_assembler should go to looptoken2 + args = [i+1 for i in range(10)] + deadframe = self.cpu.execute_token(othertoken, *args) + assert called == [id(finish_descr2)] + + + def test_call_malloc_gc(self): + cpu = self.cpu + cpu.gc_ll_descr.init_nursery(100) + cpu.setup_once() + + size = WORD*3 + addr = cpu.gc_ll_descr.get_malloc_fn_addr('malloc_big_fixedsize') + typeid = 11 + descr = cpu.gc_ll_descr.malloc_big_fixedsize_descr + + p0 = BoxPtr() + ops1 = [ResOperation(rop.CALL_MALLOC_GC, + [ConstInt(addr), ConstInt(size), ConstInt(typeid)], + p0, descr), + ResOperation(rop.FINISH, [p0], None, + descr=BasicFinalDescr(0)), + ] + + inputargs = [] + looptoken = JitCellToken() + c_loop = cpu.compile_loop(inputargs, ops1, + looptoken) + + args = [] + print "======" + print "inputargs:", inputargs, args + print "\n".join(map(str,c_loop[1])) + + frame = self.cpu.execute_token(looptoken, *args) + + + def test_assembler_call_propagate_exc(self): + cpu = self.cpu + cpu._setup_descrs() + cpu.gc_ll_descr.init_nursery(100) + + excdescr = BasicFailDescr(666) + cpu.propagate_exception_descr = excdescr + cpu.setup_once() # xxx redo it, because we added + # propagate_exception + + def assembler_helper(deadframe, virtualizable): + #assert cpu.get_latest_descr(deadframe) is excdescr + # let's assume we handled that + return 3 + + FUNCPTR = lltype.Ptr(lltype.FuncType([llmemory.GCREF, + llmemory.GCREF], + lltype.Signed)) + class FakeJitDriverSD: + index_of_virtualizable = -1 + _assembler_helper_ptr = llhelper(FUNCPTR, assembler_helper) + assembler_helper_adr = llmemory.cast_ptr_to_adr( + _assembler_helper_ptr) + + + + addr = cpu.gc_ll_descr.get_malloc_fn_addr('malloc_big_fixedsize') + typeid = 11 + descr = cpu.gc_ll_descr.malloc_big_fixedsize_descr + + p0 = BoxPtr() + i0 = BoxInt() + ops = [ResOperation(rop.CALL_MALLOC_GC, + [ConstInt(addr), i0, ConstInt(typeid)], + p0, descr), + ResOperation(rop.FINISH, [p0], None, + descr=BasicFinalDescr(0)), + ] + + inputargs = [i0] + looptoken = JitCellToken() + looptoken.outermost_jitdriver_sd = FakeJitDriverSD() + c_loop = cpu.compile_loop(inputargs, ops, looptoken) + + + ARGS = [lltype.Signed] * 10 + RES = lltype.Signed + FakeJitDriverSD.portal_calldescr = cpu.calldescrof( + lltype.Ptr(lltype.FuncType(ARGS, RES)), ARGS, RES, + EffectInfo.MOST_GENERAL) + i1 = ConstInt(sys.maxint - 1) + i2 = BoxInt() + finaldescr = BasicFinalDescr(1) + not_forced = ResOperation(rop.GUARD_NOT_FORCED, [], None, + descr=BasicFailDescr(1)) + not_forced.setfailargs([]) + ops = [ResOperation(rop.CALL_ASSEMBLER, [i1], i2, descr=looptoken), + not_forced, + ResOperation(rop.FINISH, [i1], None, descr=finaldescr), + ] + othertoken = JitCellToken() + cpu.done_with_this_frame_descr_int = BasicFinalDescr() + loop = cpu.compile_loop([], ops, othertoken) + + deadframe = cpu.execute_token(othertoken) + frame = rffi.cast(JITFRAMEPTR, deadframe) + frame_adr = rffi.cast(lltype.Signed, frame.jf_descr) + assert frame_adr != id(finaldescr) + + + + + + + diff --git a/rpython/jit/metainterp/history.py b/rpython/jit/metainterp/history.py --- a/rpython/jit/metainterp/history.py +++ b/rpython/jit/metainterp/history.py @@ -10,6 +10,7 @@ from rpython.jit.metainterp.resoperation import ResOperation, rop from rpython.jit.codewriter import heaptracker, longlong from rpython.rlib.objectmodel import compute_identity_hash +from rpython.rlib import rgc import weakref # ____________________________________________________________ @@ -308,17 +309,24 @@ class ConstPtr(Const): type = REF value = lltype.nullptr(llmemory.GCREF.TO) - _attrs_ = ('value',) + imm_value = 0 + _attrs_ = ('value', 'imm_value',) def __init__(self, value): assert lltype.typeOf(value) == llmemory.GCREF self.value = value + self.imm_value = 0 def clonebox(self): return BoxPtr(self.value) nonconstbox = clonebox + def get_imm_value(self): + # imm_value set if needed: + assert (not self.value) or self.imm_value + return self.imm_value + def getref_base(self): return self.value diff --git a/rpython/memory/gc/stmgc.py b/rpython/memory/gc/stmgc.py --- a/rpython/memory/gc/stmgc.py +++ b/rpython/memory/gc/stmgc.py @@ -15,6 +15,18 @@ first_gcflag = 1 << (LONG_BIT//2) + + +def get_hdr_tid(addr): + return llmemory.cast_adr_to_ptr(addr + StmGC.H_TID, rffi.SIGNEDP) + +def get_hdr_revision(addr): + return llmemory.cast_adr_to_ptr(addr + StmGC.H_REVISION, rffi.SIGNEDP) + +def get_hdr_original(addr): + return llmemory.cast_adr_to_ptr(addr + StmGC.H_ORIGINAL, rffi.SIGNEDP) + + class StmGC(MovingGCBase): _alloc_flavor_ = "raw" inline_simple_malloc = True @@ -24,10 +36,19 @@ malloc_zero_filled = True #gcflag_extra = GCFLAG_EXTRA + + GCHDR = lltype.GcStruct( + 'GCPTR', + ('h_tid', lltype.Unsigned), + ('h_revision', lltype.Signed), + ('h_original', lltype.Unsigned)) + GCHDRP = lltype.Ptr(GCHDR) + GCHDRSIZE = 3 * WORD + HDR = rffi.COpaque('struct stm_object_s') H_TID = 0 H_REVISION = WORD - H_ORIGINAL = WORD + WORD + H_ORIGINAL = WORD * 2 typeid_is_in_field = None VISIT_FPTR = lltype.Ptr(lltype.FuncType([llmemory.Address], lltype.Void)) @@ -58,6 +79,8 @@ FX_MASK = 65535 + def get_type_id(self, obj): + return llop.stm_get_tid(llgroup.HALFWORD, obj) def setup(self): # Hack: MovingGCBase.setup() sets up stuff related to id(), which @@ -67,24 +90,12 @@ llop.stm_initialize(lltype.Void) - def get_type_id(self, obj): - return llop.stm_get_tid(llgroup.HALFWORD, obj) - - def get_hdr_tid(self, addr): - return llmemory.cast_adr_to_ptr(addr + self.H_TID, rffi.SIGNEDP) - - def get_hdr_revision(self, addr): - return llmemory.cast_adr_to_ptr(addr + self.H_REVISION, rffi.SIGNEDP) - - def get_hdr_original(self, addr): - return llmemory.cast_adr_to_ptr(addr + self.H_ORIGINAL, rffi.SIGNEDP) - def get_original_copy(self, obj): addr = llmemory.cast_ptr_to_adr(obj) - if bool(self.get_hdr_tid(addr)[0] & self.GCFLAG_PREBUILT_ORIGINAL): + if bool(get_hdr_tid(addr)[0] & StmGC.GCFLAG_PREBUILT_ORIGINAL): return obj # - orig = self.get_hdr_original(addr)[0] + orig = get_hdr_original(addr)[0] if orig == 0: return obj # @@ -127,11 +138,12 @@ return llop.stm_weakref_allocate(llmemory.GCREF, size, typeid16, obj) + def can_move(self, obj): """Means the reference will stay valid, except if not seen by the GC, then it can get collected.""" - tid = self.get_hdr_tid(obj)[0] - if bool(tid & self.GCFLAG_OLD): + tid = get_hdr_tid(obj)[0] + if bool(tid & StmGC.GCFLAG_OLD): return False # XXX wrong so far. We should add a flag to the # object that means "don't ever kill this copy" return True @@ -157,7 +169,7 @@ source_start, dest_start, length): ll_assert(False, 'XXX') return False - + def id(self, gcobj): return llop.stm_id(lltype.Signed, gcobj) diff --git a/rpython/memory/gctransform/framework.py b/rpython/memory/gctransform/framework.py --- a/rpython/memory/gctransform/framework.py +++ b/rpython/memory/gctransform/framework.py @@ -314,13 +314,6 @@ self.can_move_ptr = getfn(GCClass.can_move.im_func, [s_gc, annmodel.SomeAddress()], annmodel.SomeBool()) - if hasattr(GCClass, 'get_original_copy'): - self.get_original_copy_ptr = getfn( - GCClass.get_original_copy.im_func, - [s_gc, annmodel.SomePtr(llmemory.GCREF)], - annmodel.SomePtr(llmemory.GCREF)) - else: - self.get_original_copy_ptr = None if hasattr(GCClass, 'shrink_array'): self.shrink_array_ptr = getfn( @@ -751,16 +744,6 @@ hop.genop("direct_call", [self.can_move_ptr, self.c_const_gc, v_addr], resultvar=op.result) - def gct_gc_get_original_copy(self, hop): - if self.get_original_copy_ptr is None: - raise Exception("unreachable code") - op = hop.spaceop - v_addr = hop.genop('cast_ptr_to_adr', - [op.args[0]], resulttype=llmemory.Address) - hop.genop("direct_call", [self.get_original_copy_ptr, - self.c_const_gc, v_addr], - resultvar=op.result) - def gct_shrink_array(self, hop): if self.shrink_array_ptr is None: return GCTransformer.gct_shrink_array(self, hop) diff --git a/rpython/memory/gctransform/stmframework.py b/rpython/memory/gctransform/stmframework.py --- a/rpython/memory/gctransform/stmframework.py +++ b/rpython/memory/gctransform/stmframework.py @@ -99,6 +99,7 @@ gct_stm_become_inevitable = _gct_with_roots_pushed gct_stm_perform_transaction = _gct_with_roots_pushed + gct_stm_allocate_nonmovable_int_adr = _gct_with_roots_pushed class StmRootWalker(BaseRootWalker): diff --git a/rpython/rlib/rgc.py b/rpython/rlib/rgc.py --- a/rpython/rlib/rgc.py +++ b/rpython/rlib/rgc.py @@ -26,12 +26,6 @@ return None # means 'not translated at all'; # in "if stm_is_enabled()" it is equivalent to False -def stm_get_original_copy(obj): - """ Returns a non-moving reference to an object (only use if obj is - already OLD!) - """ - return obj - # ____________________________________________________________ # Annotation and specialization @@ -76,20 +70,6 @@ hop.exception_cannot_occur() return hop.inputconst(lltype.Bool, hop.s_result.const) - -class StmGCGetOriginalCopy(ExtRegistryEntry): - _about_ = stm_get_original_copy - - def compute_result_annotation(self, s_obj): - from rpython.annotator import model as annmodel - return annmodel.SomePtr(llmemory.GCREF) - - def specialize_call(self, hop): - hop.exception_cannot_occur() - return hop.genop('gc_get_original_copy', hop.args_v, - resulttype=hop.r_result) - - def can_move(p): """Check if the GC object 'p' is at an address that can move. Must not be called with None. With non-moving GCs, it is always False. @@ -119,7 +99,17 @@ on objects that are already a bit old, so have a chance to be already non-movable.""" if not we_are_translated(): - return p + if isinstance(p, _GcRef): + return cast_gcref_to_int(p) + else: + from rpython.rtyper.lltypesystem import rffi + return rffi.cast(lltype.Signed, p) + + if stm_is_enabled(): + from rpython.rtyper.lltypesystem.lloperation import llop + res = llop.stm_allocate_nonmovable_int_adr(lltype.Signed, p) + return res + i = 0 while can_move(p): if i > 6: @@ -127,10 +117,7 @@ collect(i) i += 1 - if stm_is_enabled(): - return stm_get_original_copy(p) - else: - return p + return 0 def _heap_stats(): raise NotImplementedError # can't be run directly diff --git a/rpython/rlib/test/test_rgc.py b/rpython/rlib/test/test_rgc.py --- a/rpython/rlib/test/test_rgc.py +++ b/rpython/rlib/test/test_rgc.py @@ -228,3 +228,5 @@ x1 = X() n = rgc.get_rpy_memory_usage(rgc.cast_instance_to_gcref(x1)) assert n >= 8 and n <= 64 + + diff --git a/rpython/rtyper/extfunc.py b/rpython/rtyper/extfunc.py --- a/rpython/rtyper/extfunc.py +++ b/rpython/rtyper/extfunc.py @@ -121,6 +121,7 @@ class ExtFuncEntry(ExtRegistryEntry): safe_not_sandboxed = False + transactionsafe = False # common case: args is a list of annotation or types def normalize_args(self, *args_s): @@ -198,6 +199,7 @@ impl._llfnobjattrs_ = { '_name': self.name, '_safe_not_sandboxed': self.safe_not_sandboxed, + 'transactionsafe': self.transactionsafe } obj = rtyper.getannmixlevel().delayedfunction( impl, signature_args, hop.s_result) @@ -208,7 +210,8 @@ # print '<<<<<<<<<<<<<-----------------------------------' obj = rtyper.type_system.getexternalcallable(args_ll, ll_result, name, _external_name=self.name, _callable=fakeimpl, - _safe_not_sandboxed=self.safe_not_sandboxed) + _safe_not_sandboxed=self.safe_not_sandboxed, + transactionsafe=self.transactionsafe) vlist = [hop.inputconst(typeOf(obj), obj)] + hop.inputargs(*args_r) hop.exception_is_here() return hop.genop('direct_call', vlist, r_result) @@ -216,7 +219,7 @@ def register_external(function, args, result=None, export_name=None, llimpl=None, ooimpl=None, llfakeimpl=None, oofakeimpl=None, - sandboxsafe=False): + sandboxsafe=False, _transactionsafe=False): """ function: the RPython function that will be rendered as an external function (e.g.: math.floor) args: a list containing the annotation of the arguments @@ -225,6 +228,7 @@ llimpl, ooimpl: optional; if provided, these RPython functions are called instead of the target function llfakeimpl, oofakeimpl: optional; if provided, they are called by the llinterpreter sandboxsafe: use True if the function performs no I/O (safe for --sandbox) + _transactionsafe: use True if the llimpl is transactionsafe (see rffi.llexternal) """ if export_name is None: @@ -233,6 +237,7 @@ class FunEntry(ExtFuncEntry): _about_ = function safe_not_sandboxed = sandboxsafe + transactionsafe = _transactionsafe if args is None: def normalize_args(self, *args_s): diff --git a/rpython/rtyper/lltypesystem/llarena.py b/rpython/rtyper/lltypesystem/llarena.py --- a/rpython/rtyper/lltypesystem/llarena.py +++ b/rpython/rtyper/lltypesystem/llarena.py @@ -600,12 +600,14 @@ [lltype.Signed, lltype.Signed], lltype.Signed, sandboxsafe=True, - _nowrapper=True) + _nowrapper=True, + transactionsafe=True) register_external(_round_up_for_allocation, [int, int], int, 'll_arena.round_up_for_allocation', llimpl=llimpl_round_up_for_allocation, llfakeimpl=round_up_for_allocation, - sandboxsafe=True) + sandboxsafe=True, + _transactionsafe=True) def llimpl_arena_new_view(addr): return addr diff --git a/rpython/rtyper/lltypesystem/lloperation.py b/rpython/rtyper/lltypesystem/lloperation.py --- a/rpython/rtyper/lltypesystem/lloperation.py +++ b/rpython/rtyper/lltypesystem/lloperation.py @@ -425,6 +425,7 @@ 'stm_finalize': LLOp(), 'stm_barrier': LLOp(sideeffects=False), 'stm_allocate': LLOp(sideeffects=False, canmallocgc=True), + 'stm_allocate_nonmovable_int_adr': LLOp(sideeffects=False, canmallocgc=True), 'stm_become_inevitable': LLOp(canmallocgc=True), 'stm_minor_collect': LLOp(canmallocgc=True), 'stm_major_collect': LLOp(canmallocgc=True), @@ -533,7 +534,6 @@ 'gc_obtain_free_space': LLOp(), 'gc_set_max_heap_size': LLOp(), 'gc_can_move' : LLOp(sideeffects=False), - 'gc_get_original_copy': LLOp(sideeffects=False), 'gc_thread_prepare' : LLOp(canmallocgc=True), 'gc_thread_run' : LLOp(), 'gc_thread_start' : LLOp(), diff --git a/rpython/translator/c/funcgen.py b/rpython/translator/c/funcgen.py --- a/rpython/translator/c/funcgen.py +++ b/rpython/translator/c/funcgen.py @@ -617,6 +617,7 @@ OP_STM_MAJOR_COLLECT = _OP_STM OP_STM_MINOR_COLLECT = _OP_STM OP_STM_CLEAR_EXCEPTION_DATA_ON_ABORT= _OP_STM + OP_STM_ALLOCATE_NONMOVABLE_INT_ADR = _OP_STM def OP_PTR_NONZERO(self, op): diff --git a/rpython/translator/stm/funcgen.py b/rpython/translator/stm/funcgen.py --- a/rpython/translator/stm/funcgen.py +++ b/rpython/translator/stm/funcgen.py @@ -134,6 +134,11 @@ result = funcgen.expr(op.result) return '%s = stm_weakref_allocate(%s, %s, %s);' % (result, arg0, arg1, arg2) + +def stm_allocate_nonmovable_int_adr(funcgen, op): + arg0 = funcgen.expr(op.args[0]) + result = funcgen.expr(op.result) + return '%s = stm_allocate_public_integer_address(%s);' % (result, arg0) def stm_allocate(funcgen, op): arg0 = funcgen.expr(op.args[0]) diff --git a/rpython/translator/stm/inevitable.py b/rpython/translator/stm/inevitable.py --- a/rpython/translator/stm/inevitable.py +++ b/rpython/translator/stm/inevitable.py @@ -15,8 +15,7 @@ 'jit_force_quasi_immutable', 'jit_marker', 'jit_is_virtual', 'jit_record_known_class', 'gc_identityhash', 'gc_id', 'gc_can_move', 'gc__collect', - 'gc_adr_of_root_stack_top', 'gc_get_original_copy', - 'stmgc_get_original_copy', + 'gc_adr_of_root_stack_top', 'weakref_create', 'weakref_deref', 'stm_threadlocalref_get', 'stm_threadlocalref_set', 'stm_threadlocalref_count', 'stm_threadlocalref_addr', From noreply at buildbot.pypy.org Thu Aug 22 21:23:40 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 22 Aug 2013 21:23:40 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: For now, withmapdict=True doesn't really make sense with STM, so Message-ID: <20130822192340.525701C074B@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c4 Changeset: r66297:da6a6e77fe6f Date: 2013-08-22 21:23 +0200 http://bitbucket.org/pypy/pypy/changeset/da6a6e77fe6f/ Log: For now, withmapdict=True doesn't really make sense with STM, so don't enable it if we select both STM and the JIT. diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -347,11 +347,13 @@ # extra optimizations with the JIT if level == 'jit': config.objspace.std.suggest(withcelldict=True) - config.objspace.std.suggest(withmapdict=True) + if not config.translation.stm: + config.objspace.std.suggest(withmapdict=True) # tweaks some parameters with STM if config.translation.stm: config.objspace.std.suggest(methodcachesizeexp=9) + # XXX try at some point to see if withmapdict=True would make sense def enable_allworkingmodules(config): From noreply at buildbot.pypy.org Thu Aug 22 22:20:57 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 22 Aug 2013 22:20:57 +0200 (CEST) Subject: [pypy-commit] stmgc default: Add a passing test Message-ID: <20130822202057.7F3121C074B@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r494:8c2d3f61dc9c Date: 2013-08-22 22:04 +0200 http://bitbucket.org/pypy/stmgc/changeset/8c2d3f61dc9c/ Log: Add a passing test diff --git a/c4/et.c b/c4/et.c --- a/c4/et.c +++ b/c4/et.c @@ -1005,6 +1005,7 @@ static void init_transaction(struct tx_descriptor *d) { + assert(d->atomic == 0); assert(d->active == 0); stm_start_sharedlock(); assert(d->active == 0); diff --git a/c4/test/test_atomic.py b/c4/test/test_atomic.py --- a/c4/test/test_atomic.py +++ b/c4/test/test_atomic.py @@ -56,3 +56,11 @@ assert lib.stm_in_transaction() lib.stm_begin_inevitable_transaction() lib.stm_atomic(-1) + +def test_atomic_but_abort(): + @perform_transaction + def run(retry_counter): + assert lib.stm_atomic(0) == 0 + if retry_counter == 0: + lib.stm_atomic(+1) + abort_and_retry() From noreply at buildbot.pypy.org Thu Aug 22 22:20:59 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 22 Aug 2013 22:20:59 +0200 (CEST) Subject: [pypy-commit] stmgc default: Subtle bug and fix: v_atomic should not be re-read just before Message-ID: <20130822202059.8140B1C074B@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r495:c83f63e68ab7 Date: 2013-08-22 22:17 +0200 http://bitbucket.org/pypy/stmgc/changeset/c83f63e68ab7/ Log: Subtle bug and fix: v_atomic should not be re-read just before CommitTransaction, because the latter can still abort. diff --git a/c4/stmsync.c b/c4/stmsync.c --- a/c4/stmsync.c +++ b/c4/stmsync.c @@ -163,6 +163,7 @@ stm_shadowstack = v_saved_value + 2; /*skip the two values pushed above*/ do { + v_atomic = d->atomic; v_counter = counter + 1; /* If counter==0, initialize 'reads_size_limit_nonatomic' from the configured length limit. If counter>0, we did an abort, which @@ -192,7 +193,6 @@ result = callback(arg, counter); assert(stm_shadowstack == v_saved_value + 2); - v_atomic = d->atomic; if (!d->atomic) CommitTransaction(); diff --git a/c4/test/test_atomic.py b/c4/test/test_atomic.py --- a/c4/test/test_atomic.py +++ b/c4/test/test_atomic.py @@ -64,3 +64,27 @@ if retry_counter == 0: lib.stm_atomic(+1) abort_and_retry() + +def test_bug_v_atomic(): + p1 = palloc(HDR + WORD) + # + def f1(r): + def cb(retry_counter): + assert retry_counter == 0 + r.enter_in_parallel() + lib.setlong(p1, 0, 1111) + lib.stm_commit_transaction() + lib.stm_begin_inevitable_transaction() + r.leave_in_parallel() + perform_transaction(cb) + # + def f2(r): + def cb(retry_counter): + if retry_counter == 0: + lib.setlong(p1, 0, 2222) + r.wait_while_in_parallel() + # finish the transaction, but it will abort + lib.stm_atomic(+1) + perform_transaction(cb) + # + run_parallel(f1, f2, max_aborts=1) From noreply at buildbot.pypy.org Thu Aug 22 22:22:45 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 22 Aug 2013 22:22:45 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-static-barrier: import stmgc/c83f63e68ab7 Message-ID: <20130822202245.95B761C074B@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-static-barrier Changeset: r66298:fa5432ef32cf Date: 2013-08-22 22:21 +0200 http://bitbucket.org/pypy/pypy/changeset/fa5432ef32cf/ Log: import stmgc/c83f63e68ab7 diff --git a/rpython/translator/stm/src_stm/et.c b/rpython/translator/stm/src_stm/et.c --- a/rpython/translator/stm/src_stm/et.c +++ b/rpython/translator/stm/src_stm/et.c @@ -1006,6 +1006,7 @@ static void init_transaction(struct tx_descriptor *d) { + assert(d->atomic == 0); assert(d->active == 0); stm_start_sharedlock(); assert(d->active == 0); diff --git a/rpython/translator/stm/src_stm/revision b/rpython/translator/stm/src_stm/revision --- a/rpython/translator/stm/src_stm/revision +++ b/rpython/translator/stm/src_stm/revision @@ -1,1 +1,1 @@ -e14cbe1e040b +c83f63e68ab7 diff --git a/rpython/translator/stm/src_stm/stmsync.c b/rpython/translator/stm/src_stm/stmsync.c --- a/rpython/translator/stm/src_stm/stmsync.c +++ b/rpython/translator/stm/src_stm/stmsync.c @@ -164,6 +164,7 @@ stm_shadowstack = v_saved_value + 2; /*skip the two values pushed above*/ do { + v_atomic = d->atomic; v_counter = counter + 1; /* If counter==0, initialize 'reads_size_limit_nonatomic' from the configured length limit. If counter>0, we did an abort, which @@ -193,7 +194,6 @@ result = callback(arg, counter); assert(stm_shadowstack == v_saved_value + 2); - v_atomic = d->atomic; if (!d->atomic) CommitTransaction(); From noreply at buildbot.pypy.org Thu Aug 22 22:27:39 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Thu, 22 Aug 2013 22:27:39 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: some pure operations need barriers (stmrewrite.py) Message-ID: <20130822202739.564111C074B@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r66299:f727195ab6ee Date: 2013-08-22 22:08 +0200 http://bitbucket.org/pypy/pypy/changeset/f727195ab6ee/ Log: some pure operations need barriers (stmrewrite.py) diff --git a/rpython/jit/backend/llsupport/stmrewrite.py b/rpython/jit/backend/llsupport/stmrewrite.py --- a/rpython/jit/backend/llsupport/stmrewrite.py +++ b/rpython/jit/backend/llsupport/stmrewrite.py @@ -67,6 +67,15 @@ # uses h_tid which doesn't need a read-barrier self.newops.append(op) continue + # ---------- pure operations needing read-barrier ---------- + if op.getopnum() in (rop.GETFIELD_GC_PURE, + rop.GETARRAYITEM_GC_PURE, + rop.ARRAYLEN_GC,): + # e.g. getting inst_intval of a W_IntObject that is + # currently only a stub needs to first resolve to a + # real object + self.handle_category_operations(op, 'R') + continue # ---------- pure operations, guards ---------- if op.is_always_pure() or op.is_guard() or op.is_ovf(): self.newops.append(op) diff --git a/rpython/jit/backend/llsupport/test/zrpy_gc_test.py b/rpython/jit/backend/llsupport/test/zrpy_gc_test.py --- a/rpython/jit/backend/llsupport/test/zrpy_gc_test.py +++ b/rpython/jit/backend/llsupport/test/zrpy_gc_test.py @@ -813,7 +813,7 @@ x0 = promote(x0) elif n % 3 == 1: x1 = promote(x1) - else: + else: # None x2 = promote(x2) raiseassert(x0 != ptrs[0]) raiseassert(x0 == ptrs[1]) @@ -849,3 +849,29 @@ def test_compile_framework_ptr_eq(self): self.run('compile_framework_ptr_eq') + + def define_compile_framework_call_assembler2(self): + S = lltype.GcStruct('S', ('i', lltype.Signed), + ('v', lltype.Signed)) + driver = JitDriver(greens = [], + reds = ['a']) + + def inner(a): + while a.i: + driver.jit_merge_point(a=a) + a.i -= 1 + print a.v + + def f(n, x, x0, x1, x2, x3, x4, x5, x6, x7, l, s): + u = lltype.malloc(S) + u.i = 10000 + u.v = n + inner(u) + + return n - 1, x, x0, x1, x2, x3, x4, x5, x6, x7, l, s + + return None, f, None + + def test_compile_framework_call_assembler2(self): + self.run('compile_framework_call_assembler2') + From noreply at buildbot.pypy.org Thu Aug 22 22:42:58 2013 From: noreply at buildbot.pypy.org (rguillebert) Date: Thu, 22 Aug 2013 22:42:58 +0200 (CEST) Subject: [pypy-commit] pypy default: numpy.rint(int) should return a float Message-ID: <20130822204258.DE5721C0149@cobra.cs.uni-duesseldorf.de> Author: Romain Guillebert Branch: Changeset: r66300:8b0616ce3b90 Date: 2013-08-22 22:42 +0200 http://bitbucket.org/pypy/pypy/changeset/8b0616ce3b90/ Log: numpy.rint(int) should return a float diff --git a/pypy/module/micronumpy/test/test_ufuncs.py b/pypy/module/micronumpy/test/test_ufuncs.py --- a/pypy/module/micronumpy/test/test_ufuncs.py +++ b/pypy/module/micronumpy/test/test_ufuncs.py @@ -272,7 +272,7 @@ assert rint(complex(inf, 1.5)) == complex(inf, 2.) assert rint(complex(0.5, inf)) == complex(0., inf) - assert rint(sys.maxint) == sys.maxint + assert rint(sys.maxint) > 0.0 def test_sign(self): from numpypy import array, sign, dtype diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -308,13 +308,6 @@ return min(v1, v2) @simple_unary_op - def rint(self, v): - if isfinite(float(v)): - return rfloat.round_double(float(v), 0, half_even=True) - else: - return v - - @simple_unary_op def ones_like(self, v): return 1 @@ -322,6 +315,10 @@ def zeros_like(self, v): return 0 + @raw_unary_op + def rint(self, v): + float64 = Float64() + return float64.rint(float64.box(v)) class NonNativePrimitive(Primitive): _mixin_ = True @@ -1036,6 +1033,25 @@ else: return v1 + v2 + @simple_unary_op + def rint(self, v): + x = float(v) + if isfinite(x): + import math + y = math.floor(x) + r = x - y + + if r > 0.5: + y += 1.0 + + if r == 0.5: + r = y - 2.0 * math.floor(0.5 * y) + if r == 1.0: + y += 1.0 + return y + else: + return x + class NonNativeFloat(NonNativePrimitive, Float): _mixin_ = True From noreply at buildbot.pypy.org Thu Aug 22 22:58:10 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 22 Aug 2013 22:58:10 +0200 (CEST) Subject: [pypy-commit] stmgc default: A passing test Message-ID: <20130822205810.BF6821C074B@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r496:ed13d51eb291 Date: 2013-08-22 22:56 +0200 http://bitbucket.org/pypy/stmgc/changeset/ed13d51eb291/ Log: A passing test diff --git a/c4/test/test_atomic.py b/c4/test/test_atomic.py --- a/c4/test/test_atomic.py +++ b/c4/test/test_atomic.py @@ -65,6 +65,26 @@ lib.stm_atomic(+1) abort_and_retry() +def test_entering_atomic(): + seen = [] + def run1(c1): + assert c1 == 0 + lib.stm_atomic(+1) + def run2(c2): + if c2 == 0: + if not seen: + assert lib.stm_atomic(0) == 1 + lib.stm_atomic(-1) + seen.append("continue running, but now in non-atomic mode") + return True + assert lib.stm_atomic(0) == 0 + seen.append("aborting now") + abort_and_retry() + seen.append("done!") + perform_transaction(run2) + perform_transaction(run1) + assert len(seen) == len(set(seen)) == 3 + def test_bug_v_atomic(): p1 = palloc(HDR + WORD) # From noreply at buildbot.pypy.org Thu Aug 22 22:58:12 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 22 Aug 2013 22:58:12 +0200 (CEST) Subject: [pypy-commit] stmgc default: Kill v_atomic. After any abort, reset the "atomic" flag to 0, because Message-ID: <20130822205812.17F751C074B@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r497:f614fd3a4080 Date: 2013-08-22 22:58 +0200 http://bitbucket.org/pypy/stmgc/changeset/f614fd3a4080/ Log: Kill v_atomic. After any abort, reset the "atomic" flag to 0, because we're anyway then resuming from a transaction start. This happens to fix a crash on pypy-stm, but I didn't manage to write a test here... diff --git a/c4/et.c b/c4/et.c --- a/c4/et.c +++ b/c4/et.c @@ -957,6 +957,7 @@ SpinLoop(SPLP_ABORT); // jump back to the setjmp_buf (this call does not return) d->active = 0; + d->atomic = 0; stm_stop_sharedlock(); longjmp(*d->setjmp_buf, 1); } @@ -1361,6 +1362,7 @@ revision_t cur_time; struct tx_descriptor *d = thread_descriptor; assert(d->active >= 1); + assert(d->atomic == 0); dprintf(("CommitTransaction(%p)\n", d)); spinlock_acquire(d->public_descriptor->collection_lock, 'C'); /*committing*/ if (d->public_descriptor->stolen_objects.size != 0) diff --git a/c4/stmsync.c b/c4/stmsync.c --- a/c4/stmsync.c +++ b/c4/stmsync.c @@ -135,12 +135,11 @@ jmp_buf _jmpbuf; long volatile v_counter = 0; gcptr *volatile v_saved_value = stm_shadowstack; - long volatile v_atomic; stm_push_root(arg); stm_push_root(END_MARKER_OFF); - if (!(v_atomic = thread_descriptor->atomic)) + if (!thread_descriptor->atomic) CommitTransaction(); #ifdef _GC_ON_CPYTHON @@ -159,11 +158,9 @@ struct tx_descriptor *d = thread_descriptor; long counter, result; counter = v_counter; - d->atomic = v_atomic; stm_shadowstack = v_saved_value + 2; /*skip the two values pushed above*/ do { - v_atomic = d->atomic; v_counter = counter + 1; /* If counter==0, initialize 'reads_size_limit_nonatomic' from the configured length limit. If counter>0, we did an abort, which @@ -185,6 +182,7 @@ /* atomic transaction: a common case is that callback() returned even though we are atomic because we need a major GC. For that case, release and reaquire the rw lock here. */ + assert(d->active >= 1); stm_possible_safe_point(); } From noreply at buildbot.pypy.org Thu Aug 22 22:59:30 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 22 Aug 2013 22:59:30 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-static-barrier: import stmgc/f614fd3a4080 Message-ID: <20130822205930.5EDE61C074B@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-static-barrier Changeset: r66301:ba4f437bc2c2 Date: 2013-08-22 22:58 +0200 http://bitbucket.org/pypy/pypy/changeset/ba4f437bc2c2/ Log: import stmgc/f614fd3a4080 diff --git a/rpython/translator/stm/src_stm/et.c b/rpython/translator/stm/src_stm/et.c --- a/rpython/translator/stm/src_stm/et.c +++ b/rpython/translator/stm/src_stm/et.c @@ -958,6 +958,7 @@ SpinLoop(SPLP_ABORT); // jump back to the setjmp_buf (this call does not return) d->active = 0; + d->atomic = 0; stm_stop_sharedlock(); longjmp(*d->setjmp_buf, 1); } @@ -1362,6 +1363,7 @@ revision_t cur_time; struct tx_descriptor *d = thread_descriptor; assert(d->active >= 1); + assert(d->atomic == 0); dprintf(("CommitTransaction(%p)\n", d)); spinlock_acquire(d->public_descriptor->collection_lock, 'C'); /*committing*/ if (d->public_descriptor->stolen_objects.size != 0) diff --git a/rpython/translator/stm/src_stm/revision b/rpython/translator/stm/src_stm/revision --- a/rpython/translator/stm/src_stm/revision +++ b/rpython/translator/stm/src_stm/revision @@ -1,1 +1,1 @@ -c83f63e68ab7 +f614fd3a4080 diff --git a/rpython/translator/stm/src_stm/stmsync.c b/rpython/translator/stm/src_stm/stmsync.c --- a/rpython/translator/stm/src_stm/stmsync.c +++ b/rpython/translator/stm/src_stm/stmsync.c @@ -136,12 +136,11 @@ jmp_buf _jmpbuf; long volatile v_counter = 0; gcptr *volatile v_saved_value = stm_shadowstack; - long volatile v_atomic; stm_push_root(arg); stm_push_root(END_MARKER_OFF); - if (!(v_atomic = thread_descriptor->atomic)) + if (!thread_descriptor->atomic) CommitTransaction(); #ifdef _GC_ON_CPYTHON @@ -160,11 +159,9 @@ struct tx_descriptor *d = thread_descriptor; long counter, result; counter = v_counter; - d->atomic = v_atomic; stm_shadowstack = v_saved_value + 2; /*skip the two values pushed above*/ do { - v_atomic = d->atomic; v_counter = counter + 1; /* If counter==0, initialize 'reads_size_limit_nonatomic' from the configured length limit. If counter>0, we did an abort, which @@ -186,6 +183,7 @@ /* atomic transaction: a common case is that callback() returned even though we are atomic because we need a major GC. For that case, release and reaquire the rw lock here. */ + assert(d->active >= 1); stm_possible_safe_point(); } From noreply at buildbot.pypy.org Thu Aug 22 22:59:48 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Thu, 22 Aug 2013 22:59:48 +0200 (CEST) Subject: [pypy-commit] pypy default: simplify Message-ID: <20130822205948.CC5091C074B@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: Changeset: r66302:5492c8355133 Date: 2013-08-22 13:57 -0700 http://bitbucket.org/pypy/pypy/changeset/5492c8355133/ Log: simplify diff --git a/pypy/objspace/std/dictmultiobject.py b/pypy/objspace/std/dictmultiobject.py --- a/pypy/objspace/std/dictmultiobject.py +++ b/pypy/objspace/std/dictmultiobject.py @@ -1276,10 +1276,7 @@ return _all_contained_in(space, self, w_other) return space.w_False - def descr_ne(self, space, w_other): - if not _is_set_like(w_other): - return space.w_NotImplemented - return space.not_(space.eq(self, w_other)) + descr_ne = negate(descr_eq) def descr_lt(self, space, w_other): if not _is_set_like(w_other): diff --git a/pypy/objspace/std/util.py b/pypy/objspace/std/util.py --- a/pypy/objspace/std/util.py +++ b/pypy/objspace/std/util.py @@ -9,10 +9,7 @@ tmp = f(self, space, w_other) if tmp is space.w_NotImplemented: return space.w_NotImplemented - elif tmp is space.w_False: - return space.w_True - else: - return space.w_False + return space.newbool(tmp is space.w_False) _negator.func_name = 'negate-%s' % f.func_name return _negator From noreply at buildbot.pypy.org Thu Aug 22 22:59:50 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Thu, 22 Aug 2013 22:59:50 +0200 (CEST) Subject: [pypy-commit] pypy default: cleanup Message-ID: <20130822205950.5F1471C074B@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: Changeset: r66303:83074dd846cf Date: 2013-08-22 13:58 -0700 http://bitbucket.org/pypy/pypy/changeset/83074dd846cf/ Log: cleanup diff --git a/pypy/objspace/std/intobject.py b/pypy/objspace/std/intobject.py --- a/pypy/objspace/std/intobject.py +++ b/pypy/objspace/std/intobject.py @@ -1,20 +1,22 @@ +"""The builtin int implementation + +In order to have the same behavior running on CPython, and after RPython +translation this module uses rarithmetic.ovfcheck to explicitly check +for overflows, something CPython does not do anymore. +""" + +from rpython.rlib import jit +from rpython.rlib.rarithmetic import LONG_BIT, is_valid_int, ovfcheck, r_uint +from rpython.rlib.rbigint import rbigint + from pypy.interpreter.error import OperationError from pypy.objspace.std import newformat from pypy.objspace.std.inttype import W_AbstractIntObject -from pypy.objspace.std.model import registerimplementation, W_Object +from pypy.objspace.std.model import W_Object, registerimplementation from pypy.objspace.std.multimethod import FailedToImplementArgs from pypy.objspace.std.noneobject import W_NoneObject from pypy.objspace.std.register_all import register_all -from rpython.rlib import jit -from rpython.rlib.rarithmetic import ovfcheck, LONG_BIT, r_uint, is_valid_int -from rpython.rlib.rbigint import rbigint -""" -In order to have the same behavior running -on CPython, and after RPython translation we use ovfcheck -from rarithmetic to explicitly check for overflows, -something CPython does not do anymore. -""" class W_IntObject(W_AbstractIntObject): __slots__ = 'intval' @@ -22,28 +24,29 @@ from pypy.objspace.std.inttype import int_typedef as typedef - def __init__(w_self, intval): + def __init__(self, intval): assert is_valid_int(intval) - w_self.intval = intval + self.intval = intval - def __repr__(w_self): - """ representation for debugging purposes """ - return "%s(%d)" % (w_self.__class__.__name__, w_self.intval) + def __repr__(self): + """representation for debugging purposes""" + return "%s(%d)" % (self.__class__.__name__, self.intval) - def unwrap(w_self, space): - return int(w_self.intval) + def unwrap(self, space): + return int(self.intval) int_w = unwrap - def uint_w(w_self, space): - intval = w_self.intval + def uint_w(self, space): + intval = self.intval if intval < 0: - raise OperationError(space.w_ValueError, - space.wrap("cannot convert negative integer to unsigned")) + raise OperationError( + space.w_ValueError, + space.wrap("cannot convert negative integer to unsigned")) else: return r_uint(intval) - def bigint_w(w_self, space): - return rbigint.fromint(w_self.intval) + def bigint_w(self, space): + return rbigint.fromint(self.intval) def float_w(self, space): return float(self.intval) @@ -144,7 +147,8 @@ x = float(w_int1.intval) y = float(w_int2.intval) if y == 0.0: - raise FailedToImplementArgs(space.w_ZeroDivisionError, space.wrap("float division")) + raise FailedToImplementArgs(space.w_ZeroDivisionError, + space.wrap("float division")) return space.wrap(x / y) def mod__Int_Int(space, w_int1, w_int2): diff --git a/pypy/objspace/std/longobject.py b/pypy/objspace/std/longobject.py --- a/pypy/objspace/std/longobject.py +++ b/pypy/objspace/std/longobject.py @@ -1,13 +1,17 @@ +"""The builtin long implementation""" + import sys + +from rpython.rlib.rbigint import rbigint + from pypy.interpreter.error import OperationError from pypy.objspace.std import model, newformat -from pypy.objspace.std.model import registerimplementation, W_Object +from pypy.objspace.std.intobject import W_IntObject +from pypy.objspace.std.longtype import W_AbstractLongObject, long_typedef +from pypy.objspace.std.model import W_Object, registerimplementation +from pypy.objspace.std.multimethod import FailedToImplementArgs +from pypy.objspace.std.noneobject import W_NoneObject from pypy.objspace.std.register_all import register_all -from pypy.objspace.std.multimethod import FailedToImplementArgs -from pypy.objspace.std.intobject import W_IntObject -from pypy.objspace.std.noneobject import W_NoneObject -from rpython.rlib.rbigint import rbigint -from pypy.objspace.std.longtype import long_typedef, W_AbstractLongObject class W_LongObject(W_AbstractLongObject): @@ -16,8 +20,8 @@ typedef = long_typedef - def __init__(w_self, l): - w_self.num = l # instance of rbigint + def __init__(self, l): + self.num = l # instance of rbigint def fromint(space, intval): return W_LongObject(rbigint.fromint(intval)) @@ -49,16 +53,16 @@ fromrarith_int._annspecialcase_ = "specialize:argtype(0)" fromrarith_int = staticmethod(fromrarith_int) - def int_w(w_self, space): + def int_w(self, space): try: - return w_self.num.toint() + return self.num.toint() except OverflowError: raise OperationError(space.w_OverflowError, space.wrap( "long int too large to convert to int")) - def uint_w(w_self, space): + def uint_w(self, space): try: - return w_self.num.touint() + return self.num.touint() except ValueError: raise OperationError(space.w_ValueError, space.wrap( "cannot convert negative integer to unsigned int")) @@ -66,8 +70,8 @@ raise OperationError(space.w_OverflowError, space.wrap( "long int too large to convert to unsigned int")) - def bigint_w(w_self, space): - return w_self.num + def bigint_w(self, space): + return self.num def float_w(self, space): return self.tofloat(space) @@ -324,7 +328,8 @@ sys.maxint == 2147483647) # binary ops -for opname in ['add', 'sub', 'mul', 'div', 'floordiv', 'truediv', 'mod', 'divmod', 'lshift']: +for opname in ['add', 'sub', 'mul', 'div', 'floordiv', 'truediv', 'mod', + 'divmod', 'lshift']: exec compile(""" def %(opname)s_ovr__Int_Int(space, w_int1, w_int2): if recover_with_smalllong(space) and %(opname)r != 'truediv': From noreply at buildbot.pypy.org Fri Aug 23 09:47:14 2013 From: noreply at buildbot.pypy.org (bivab) Date: Fri, 23 Aug 2013 09:47:14 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: add another task Message-ID: <20130823074714.52BBC1C074B@cobra.cs.uni-duesseldorf.de> Author: David Schneider Branch: extradoc Changeset: r5021:8673099b2149 Date: 2013-08-23 09:47 +0200 http://bitbucket.org/pypy/extradoc/changeset/8673099b2149/ Log: add another task diff --git a/sprintinfo/london-2013/planning.txt b/sprintinfo/london-2013/planning.txt --- a/sprintinfo/london-2013/planning.txt +++ b/sprintinfo/london-2013/planning.txt @@ -7,3 +7,5 @@ * cffi 1.0: think about how to separate compilation from execution of the script (e.g. a separate foo.c file, and "python -m cffi foo.c") + +* cffi: think about and collect which C-extensions we would like to bring to cffi From noreply at buildbot.pypy.org Fri Aug 23 12:44:37 2013 From: noreply at buildbot.pypy.org (cfbolz) Date: Fri, 23 Aug 2013 12:44:37 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: two more people that I know are coming Message-ID: <20130823104437.5A62B1C01A6@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: extradoc Changeset: r5022:afc00a4190a5 Date: 2013-08-23 12:44 +0200 http://bitbucket.org/pypy/extradoc/changeset/afc00a4190a5/ Log: two more people that I know are coming diff --git a/sprintinfo/london-2013/people.txt b/sprintinfo/london-2013/people.txt --- a/sprintinfo/london-2013/people.txt +++ b/sprintinfo/london-2013/people.txt @@ -9,7 +9,7 @@ ==================== ============== ======================= Name Arrive/Depart Accomodation ==================== ============== ======================= -Carl Friedrich Bolz ? Lukas +Carl Friedrich Bolz 25/8-2/9 Lukas Lukas Diekmann lives there Romain Guillebert 25/8-1/9 hotel LSE Northumberl. Laurence Tratt lives there @@ -22,6 +22,8 @@ Manuel Jacob 24/8-3/9 sth. cheap, pref. share Ronan Lamy 25/8-1/9 hotel Strand Continent. Antonio Cuni 26/8-5/9 hotel LSE Northumberl. +Russel Winder ? ? +Rami Chowdhury ? ? ==================== ============== ======================= From noreply at buildbot.pypy.org Fri Aug 23 16:54:40 2013 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 23 Aug 2013 16:54:40 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: New task Message-ID: <20130823145440.D92441C12F0@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r5023:9d405a53fb80 Date: 2013-08-23 16:54 +0200 http://bitbucket.org/pypy/extradoc/changeset/9d405a53fb80/ Log: New task diff --git a/sprintinfo/london-2013/planning.txt b/sprintinfo/london-2013/planning.txt --- a/sprintinfo/london-2013/planning.txt +++ b/sprintinfo/london-2013/planning.txt @@ -9,3 +9,7 @@ the script (e.g. a separate foo.c file, and "python -m cffi foo.c") * cffi: think about and collect which C-extensions we would like to bring to cffi + +* PYPYLOG output: they need to be disentangled when the process uses + threads or greenlets, probably by adding a thread-or-greenlet number + prefix (see branch stmgc-c4 where we already add a thread num prefix) From noreply at buildbot.pypy.org Fri Aug 23 18:19:40 2013 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 23 Aug 2013 18:19:40 +0200 (CEST) Subject: [pypy-commit] pypy default: (fijal, arigo) Message-ID: <20130823161940.5D1A61C01A6@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r66304:9b31edfc3a5b Date: 2013-08-23 18:17 +0200 http://bitbucket.org/pypy/pypy/changeset/9b31edfc3a5b/ Log: (fijal, arigo) Using "PYPYLOG=..:filename", on any fork(), it will now create a new file "filename.forkCHILDPID", starting with FORKED: diff --git a/rpython/rlib/debug.py b/rpython/rlib/debug.py --- a/rpython/rlib/debug.py +++ b/rpython/rlib/debug.py @@ -196,6 +196,25 @@ return hop.genop('debug_flush', []) +def debug_forked(original_offset): + """ Call after a fork(), passing as argument the result of + debug_offset() called before the fork. + """ + pass + +class Entry(ExtRegistryEntry): + _about_ = debug_forked + + def compute_result_annotation(self, s_original_offset): + return None + + def specialize_call(self, hop): + from rpython.rtyper.lltypesystem import lltype + vlist = hop.inputargs(lltype.Signed) + hop.exception_cannot_occur() + return hop.genop('debug_forked', vlist) + + def llinterpcall(RESTYPE, pythonfunction, *args): """When running on the llinterp, this causes the llinterp to call to the provided Python function with the run-time value of the given args. diff --git a/rpython/rtyper/module/ll_os.py b/rpython/rtyper/module/ll_os.py --- a/rpython/rtyper/module/ll_os.py +++ b/rpython/rtyper/module/ll_os.py @@ -1566,7 +1566,7 @@ @registering_if(os, 'fork') def register_os_fork(self): - from rpython.rlib import rthread + from rpython.rlib import rthread, debug eci = self.gcc_profiling_bug_workaround('pid_t _noprof_fork(void)', 'return fork();') os_fork = self.llexternal('_noprof_fork', [], rffi.PID_T, @@ -1575,11 +1575,14 @@ def fork_llimpl(): # NB. keep forkpty() up-to-date, too + ofs = debug.debug_offset() opaqueaddr = rthread.gc_thread_before_fork() childpid = rffi.cast(lltype.Signed, os_fork()) rthread.gc_thread_after_fork(childpid, opaqueaddr) if childpid == -1: raise OSError(rposix.get_errno(), "os_fork failed") + if childpid == 0: + debug.debug_forked(ofs) return rffi.cast(lltype.Signed, childpid) return extdef([], int, llimpl=fork_llimpl, @@ -1619,6 +1622,7 @@ def forkpty_llimpl(): master_p = lltype.malloc(rffi.INTP.TO, 1, flavor='raw') master_p[0] = rffi.cast(rffi.INT, -1) + ofs = debug.debug_offset() opaqueaddr = rthread.gc_thread_before_fork() childpid = rffi.cast(lltype.Signed, os_forkpty(master_p, None, None, None)) @@ -1627,6 +1631,8 @@ lltype.free(master_p, flavor='raw') if childpid == -1: raise OSError(rposix.get_errno(), "os_forkpty failed") + if childpid == 0: + debug.debug_forked(ofs) return (rffi.cast(lltype.Signed, childpid), rffi.cast(lltype.Signed, master_fd)) diff --git a/rpython/translator/c/src/debug_print.c b/rpython/translator/c/src/debug_print.c --- a/rpython/translator/c/src/debug_print.c +++ b/rpython/translator/c/src/debug_print.c @@ -23,16 +23,12 @@ static char *debug_start_colors_2 = ""; static char *debug_stop_colors = ""; static char *debug_prefix = NULL; +static char *debug_filename = NULL; +static char *debug_filename_with_fork = NULL; static void pypy_debug_open(void) { char *filename = getenv("PYPYLOG"); - if (filename) -#ifndef _WIN32 - unsetenv("PYPYLOG"); /* don't pass it to subprocesses */ -#else - putenv("PYPYLOG="); /* don't pass it to subprocesses */ -#endif if (filename && filename[0]) { char *colon = strchr(filename, ':'); @@ -52,7 +48,10 @@ filename = colon + 1; } if (strcmp(filename, "-") != 0) - pypy_debug_file = fopen(filename, "w"); + { + debug_filename = strdup(filename); + pypy_debug_file = fopen(filename, "w"); + } } if (!pypy_debug_file) { @@ -64,6 +63,12 @@ debug_stop_colors = "\033[0m"; } } + if (filename) +#ifndef _WIN32 + unsetenv("PYPYLOG"); /* don't pass it to subprocesses */ +#else + putenv("PYPYLOG="); /* don't pass it to subprocesses */ +#endif debug_ready = 1; } @@ -73,6 +78,7 @@ return -1; // note that we deliberately ignore errno, since -1 is fine // in case this is not a real file + fflush(pypy_debug_file); return ftell(pypy_debug_file); } @@ -82,6 +88,26 @@ pypy_debug_open(); } +void pypy_debug_forked(long original_offset) +{ + if (debug_filename != NULL) + { + char *filename = malloc(strlen(debug_filename) + 32); + fclose(pypy_debug_file); + pypy_debug_file = NULL; + if (filename == NULL) + return; /* bah */ + sprintf(filename, "%s.fork%ld", debug_filename, (long)getpid()); + pypy_debug_file = fopen(filename, "w"); + if (pypy_debug_file) + fprintf(pypy_debug_file, "FORKED: %ld %s\n", original_offset, + debug_filename_with_fork ? debug_filename_with_fork + : debug_filename); + free(debug_filename_with_fork); + debug_filename_with_fork = filename; + } +} + #ifndef _WIN32 diff --git a/rpython/translator/c/src/debug_print.h b/rpython/translator/c/src/debug_print.h --- a/rpython/translator/c/src/debug_print.h +++ b/rpython/translator/c/src/debug_print.h @@ -29,6 +29,7 @@ #define PYPY_DEBUG_START(cat) pypy_debug_start(cat) #define PYPY_DEBUG_STOP(cat) pypy_debug_stop(cat) #define OP_DEBUG_OFFSET(res) res = pypy_debug_offset() +#define OP_DEBUG_FORKED(ofs, _) pypy_debug_forked(ofs) #define OP_HAVE_DEBUG_PRINTS(r) r = (pypy_have_debug_prints & 1) #define OP_DEBUG_FLUSH() fflush(pypy_debug_file) @@ -39,6 +40,7 @@ void pypy_debug_start(const char *category); void pypy_debug_stop(const char *category); long pypy_debug_offset(void); +void pypy_debug_forked(long original_offset); extern long pypy_have_debug_prints; extern FILE *pypy_debug_file; diff --git a/rpython/translator/c/test/test_standalone.py b/rpython/translator/c/test/test_standalone.py --- a/rpython/translator/c/test/test_standalone.py +++ b/rpython/translator/c/test/test_standalone.py @@ -447,6 +447,57 @@ assert 'bar' == lines[1] assert 'foo}' in lines[2] + def test_debug_print_fork(self): + if not hasattr(os, 'fork'): + py.test.skip("requires fork()") + + def entry_point(argv): + debug_start("foo") + debug_print("test line") + childpid = os.fork() + debug_print("childpid =", childpid) + if childpid == 0: + childpid2 = os.fork() # double-fork + debug_print("childpid2 =", childpid2) + debug_stop("foo") + return 0 + t, cbuilder = self.compile(entry_point) + path = udir.join('test_debug_print_fork.log') + out, err = cbuilder.cmdexec("", err=True, + env={'PYPYLOG': ':%s' % path}) + assert not err + # + f = open(str(path), 'r') + lines = f.readlines() + f.close() + assert '{foo' in lines[0] + assert lines[1] == "test line\n" + offset1 = len(lines[0]) + len(lines[1]) + assert lines[2].startswith('childpid = ') + childpid = int(lines[2][11:]) + assert childpid != 0 + assert 'foo}' in lines[3] + assert len(lines) == 4 + # + f = open('%s.fork%d' % (path, childpid), 'r') + lines = f.readlines() + f.close() + assert lines[0] == 'FORKED: %d %s\n' % (offset1, path) + assert lines[1] == 'childpid = 0\n' + offset2 = len(lines[0]) + len(lines[1]) + assert lines[2].startswith('childpid2 = ') + childpid2 = int(lines[2][11:]) + assert childpid2 != 0 + assert 'foo}' in lines[3] + assert len(lines) == 4 + # + f = open('%s.fork%d' % (path, childpid2), 'r') + lines = f.readlines() + f.close() + assert lines[0] == 'FORKED: %d %s.fork%d\n' % (offset2, path, childpid) + assert lines[1] == 'childpid2 = 0\n' + assert 'foo}' in lines[2] + assert len(lines) == 3 def test_fatal_error(self): def g(x): From noreply at buildbot.pypy.org Fri Aug 23 18:37:34 2013 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Fri, 23 Aug 2013 18:37:34 +0200 (CEST) Subject: [pypy-commit] pypy default: Use a strlist for posix.listdir() Message-ID: <20130823163734.891DC1C01A6@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r66305:1043bb8b12f8 Date: 2013-08-23 09:24 -0700 http://bitbucket.org/pypy/pypy/changeset/1043bb8b12f8/ Log: Use a strlist for posix.listdir() diff --git a/pypy/module/posix/interp_posix.py b/pypy/module/posix/interp_posix.py --- a/pypy/module/posix/interp_posix.py +++ b/pypy/module/posix/interp_posix.py @@ -577,13 +577,12 @@ except OperationError, e: # fall back to the original byte string result_w[i] = w_bytes + return space.newlist(result_w) else: dirname = space.str0_w(w_dirname) - result = rposix.listdir(dirname) - result_w = [space.wrap(s) for s in result] + return space.newlist_str(rposix.listdir(dirname)) except OSError, e: raise wrap_oserror2(space, e, w_dirname) - return space.newlist(result_w) def pipe(space): "Create a pipe. Returns (read_end, write_end)." From noreply at buildbot.pypy.org Fri Aug 23 18:37:36 2013 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Fri, 23 Aug 2013 18:37:36 +0200 (CEST) Subject: [pypy-commit] pypy default: merged upstream Message-ID: <20130823163736.37A0B1C01A6@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r66306:c82174cb45ee Date: 2013-08-23 09:37 -0700 http://bitbucket.org/pypy/pypy/changeset/c82174cb45ee/ Log: merged upstream diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -685,7 +685,7 @@ name='string', char='S', w_box_type = space.gettypefor(interp_boxes.W_StringBox), - alternate_constructors=[space.w_str], + alternate_constructors=[space.w_str, space.gettypefor(interp_boxes.W_CharacterBox)], aliases=["str"], ) self.w_unicodedtype = W_Dtype( diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -781,7 +781,7 @@ def test_character_dtype(self): from numpypy import array, character x = array([["A", "B"], ["C", "D"]], character) - assert x == [["A", "B"], ["C", "D"]] + assert (x == [["A", "B"], ["C", "D"]]).all() class AppTestRecordDtypes(BaseNumpyAppTest): spaceconfig = dict(usemodules=["micronumpy", "struct", "binascii"]) diff --git a/pypy/module/micronumpy/test/test_ufuncs.py b/pypy/module/micronumpy/test/test_ufuncs.py --- a/pypy/module/micronumpy/test/test_ufuncs.py +++ b/pypy/module/micronumpy/test/test_ufuncs.py @@ -272,7 +272,7 @@ assert rint(complex(inf, 1.5)) == complex(inf, 2.) assert rint(complex(0.5, inf)) == complex(0., inf) - assert rint(sys.maxint) == sys.maxint + assert rint(sys.maxint) > 0.0 def test_sign(self): from numpypy import array, sign, dtype diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -308,13 +308,6 @@ return min(v1, v2) @simple_unary_op - def rint(self, v): - if isfinite(float(v)): - return rfloat.round_double(float(v), 0, half_even=True) - else: - return v - - @simple_unary_op def ones_like(self, v): return 1 @@ -322,6 +315,10 @@ def zeros_like(self, v): return 0 + @raw_unary_op + def rint(self, v): + float64 = Float64() + return float64.rint(float64.box(v)) class NonNativePrimitive(Primitive): _mixin_ = True @@ -1036,6 +1033,25 @@ else: return v1 + v2 + @simple_unary_op + def rint(self, v): + x = float(v) + if isfinite(x): + import math + y = math.floor(x) + r = x - y + + if r > 0.5: + y += 1.0 + + if r == 0.5: + r = y - 2.0 * math.floor(0.5 * y) + if r == 1.0: + y += 1.0 + return y + else: + return x + class NonNativeFloat(NonNativePrimitive, Float): _mixin_ = True diff --git a/pypy/objspace/std/dictmultiobject.py b/pypy/objspace/std/dictmultiobject.py --- a/pypy/objspace/std/dictmultiobject.py +++ b/pypy/objspace/std/dictmultiobject.py @@ -1276,10 +1276,7 @@ return _all_contained_in(space, self, w_other) return space.w_False - def descr_ne(self, space, w_other): - if not _is_set_like(w_other): - return space.w_NotImplemented - return space.not_(space.eq(self, w_other)) + descr_ne = negate(descr_eq) def descr_lt(self, space, w_other): if not _is_set_like(w_other): diff --git a/pypy/objspace/std/intobject.py b/pypy/objspace/std/intobject.py --- a/pypy/objspace/std/intobject.py +++ b/pypy/objspace/std/intobject.py @@ -1,20 +1,22 @@ +"""The builtin int implementation + +In order to have the same behavior running on CPython, and after RPython +translation this module uses rarithmetic.ovfcheck to explicitly check +for overflows, something CPython does not do anymore. +""" + +from rpython.rlib import jit +from rpython.rlib.rarithmetic import LONG_BIT, is_valid_int, ovfcheck, r_uint +from rpython.rlib.rbigint import rbigint + from pypy.interpreter.error import OperationError from pypy.objspace.std import newformat from pypy.objspace.std.inttype import W_AbstractIntObject -from pypy.objspace.std.model import registerimplementation, W_Object +from pypy.objspace.std.model import W_Object, registerimplementation from pypy.objspace.std.multimethod import FailedToImplementArgs from pypy.objspace.std.noneobject import W_NoneObject from pypy.objspace.std.register_all import register_all -from rpython.rlib import jit -from rpython.rlib.rarithmetic import ovfcheck, LONG_BIT, r_uint, is_valid_int -from rpython.rlib.rbigint import rbigint -""" -In order to have the same behavior running -on CPython, and after RPython translation we use ovfcheck -from rarithmetic to explicitly check for overflows, -something CPython does not do anymore. -""" class W_IntObject(W_AbstractIntObject): __slots__ = 'intval' @@ -22,28 +24,29 @@ from pypy.objspace.std.inttype import int_typedef as typedef - def __init__(w_self, intval): + def __init__(self, intval): assert is_valid_int(intval) - w_self.intval = intval + self.intval = intval - def __repr__(w_self): - """ representation for debugging purposes """ - return "%s(%d)" % (w_self.__class__.__name__, w_self.intval) + def __repr__(self): + """representation for debugging purposes""" + return "%s(%d)" % (self.__class__.__name__, self.intval) - def unwrap(w_self, space): - return int(w_self.intval) + def unwrap(self, space): + return int(self.intval) int_w = unwrap - def uint_w(w_self, space): - intval = w_self.intval + def uint_w(self, space): + intval = self.intval if intval < 0: - raise OperationError(space.w_ValueError, - space.wrap("cannot convert negative integer to unsigned")) + raise OperationError( + space.w_ValueError, + space.wrap("cannot convert negative integer to unsigned")) else: return r_uint(intval) - def bigint_w(w_self, space): - return rbigint.fromint(w_self.intval) + def bigint_w(self, space): + return rbigint.fromint(self.intval) def float_w(self, space): return float(self.intval) @@ -144,7 +147,8 @@ x = float(w_int1.intval) y = float(w_int2.intval) if y == 0.0: - raise FailedToImplementArgs(space.w_ZeroDivisionError, space.wrap("float division")) + raise FailedToImplementArgs(space.w_ZeroDivisionError, + space.wrap("float division")) return space.wrap(x / y) def mod__Int_Int(space, w_int1, w_int2): diff --git a/pypy/objspace/std/longobject.py b/pypy/objspace/std/longobject.py --- a/pypy/objspace/std/longobject.py +++ b/pypy/objspace/std/longobject.py @@ -1,13 +1,17 @@ +"""The builtin long implementation""" + import sys + +from rpython.rlib.rbigint import rbigint + from pypy.interpreter.error import OperationError from pypy.objspace.std import model, newformat -from pypy.objspace.std.model import registerimplementation, W_Object +from pypy.objspace.std.intobject import W_IntObject +from pypy.objspace.std.longtype import W_AbstractLongObject, long_typedef +from pypy.objspace.std.model import W_Object, registerimplementation +from pypy.objspace.std.multimethod import FailedToImplementArgs +from pypy.objspace.std.noneobject import W_NoneObject from pypy.objspace.std.register_all import register_all -from pypy.objspace.std.multimethod import FailedToImplementArgs -from pypy.objspace.std.intobject import W_IntObject -from pypy.objspace.std.noneobject import W_NoneObject -from rpython.rlib.rbigint import rbigint -from pypy.objspace.std.longtype import long_typedef, W_AbstractLongObject class W_LongObject(W_AbstractLongObject): @@ -16,8 +20,8 @@ typedef = long_typedef - def __init__(w_self, l): - w_self.num = l # instance of rbigint + def __init__(self, l): + self.num = l # instance of rbigint def fromint(space, intval): return W_LongObject(rbigint.fromint(intval)) @@ -49,16 +53,16 @@ fromrarith_int._annspecialcase_ = "specialize:argtype(0)" fromrarith_int = staticmethod(fromrarith_int) - def int_w(w_self, space): + def int_w(self, space): try: - return w_self.num.toint() + return self.num.toint() except OverflowError: raise OperationError(space.w_OverflowError, space.wrap( "long int too large to convert to int")) - def uint_w(w_self, space): + def uint_w(self, space): try: - return w_self.num.touint() + return self.num.touint() except ValueError: raise OperationError(space.w_ValueError, space.wrap( "cannot convert negative integer to unsigned int")) @@ -66,8 +70,8 @@ raise OperationError(space.w_OverflowError, space.wrap( "long int too large to convert to unsigned int")) - def bigint_w(w_self, space): - return w_self.num + def bigint_w(self, space): + return self.num def float_w(self, space): return self.tofloat(space) @@ -324,7 +328,8 @@ sys.maxint == 2147483647) # binary ops -for opname in ['add', 'sub', 'mul', 'div', 'floordiv', 'truediv', 'mod', 'divmod', 'lshift']: +for opname in ['add', 'sub', 'mul', 'div', 'floordiv', 'truediv', 'mod', + 'divmod', 'lshift']: exec compile(""" def %(opname)s_ovr__Int_Int(space, w_int1, w_int2): if recover_with_smalllong(space) and %(opname)r != 'truediv': diff --git a/pypy/objspace/std/util.py b/pypy/objspace/std/util.py --- a/pypy/objspace/std/util.py +++ b/pypy/objspace/std/util.py @@ -9,10 +9,7 @@ tmp = f(self, space, w_other) if tmp is space.w_NotImplemented: return space.w_NotImplemented - elif tmp is space.w_False: - return space.w_True - else: - return space.w_False + return space.newbool(tmp is space.w_False) _negator.func_name = 'negate-%s' % f.func_name return _negator diff --git a/rpython/rlib/debug.py b/rpython/rlib/debug.py --- a/rpython/rlib/debug.py +++ b/rpython/rlib/debug.py @@ -196,6 +196,25 @@ return hop.genop('debug_flush', []) +def debug_forked(original_offset): + """ Call after a fork(), passing as argument the result of + debug_offset() called before the fork. + """ + pass + +class Entry(ExtRegistryEntry): + _about_ = debug_forked + + def compute_result_annotation(self, s_original_offset): + return None + + def specialize_call(self, hop): + from rpython.rtyper.lltypesystem import lltype + vlist = hop.inputargs(lltype.Signed) + hop.exception_cannot_occur() + return hop.genop('debug_forked', vlist) + + def llinterpcall(RESTYPE, pythonfunction, *args): """When running on the llinterp, this causes the llinterp to call to the provided Python function with the run-time value of the given args. diff --git a/rpython/rtyper/module/ll_os.py b/rpython/rtyper/module/ll_os.py --- a/rpython/rtyper/module/ll_os.py +++ b/rpython/rtyper/module/ll_os.py @@ -1566,7 +1566,7 @@ @registering_if(os, 'fork') def register_os_fork(self): - from rpython.rlib import rthread + from rpython.rlib import rthread, debug eci = self.gcc_profiling_bug_workaround('pid_t _noprof_fork(void)', 'return fork();') os_fork = self.llexternal('_noprof_fork', [], rffi.PID_T, @@ -1575,11 +1575,14 @@ def fork_llimpl(): # NB. keep forkpty() up-to-date, too + ofs = debug.debug_offset() opaqueaddr = rthread.gc_thread_before_fork() childpid = rffi.cast(lltype.Signed, os_fork()) rthread.gc_thread_after_fork(childpid, opaqueaddr) if childpid == -1: raise OSError(rposix.get_errno(), "os_fork failed") + if childpid == 0: + debug.debug_forked(ofs) return rffi.cast(lltype.Signed, childpid) return extdef([], int, llimpl=fork_llimpl, @@ -1619,6 +1622,7 @@ def forkpty_llimpl(): master_p = lltype.malloc(rffi.INTP.TO, 1, flavor='raw') master_p[0] = rffi.cast(rffi.INT, -1) + ofs = debug.debug_offset() opaqueaddr = rthread.gc_thread_before_fork() childpid = rffi.cast(lltype.Signed, os_forkpty(master_p, None, None, None)) @@ -1627,6 +1631,8 @@ lltype.free(master_p, flavor='raw') if childpid == -1: raise OSError(rposix.get_errno(), "os_forkpty failed") + if childpid == 0: + debug.debug_forked(ofs) return (rffi.cast(lltype.Signed, childpid), rffi.cast(lltype.Signed, master_fd)) diff --git a/rpython/translator/c/src/debug_print.c b/rpython/translator/c/src/debug_print.c --- a/rpython/translator/c/src/debug_print.c +++ b/rpython/translator/c/src/debug_print.c @@ -23,16 +23,12 @@ static char *debug_start_colors_2 = ""; static char *debug_stop_colors = ""; static char *debug_prefix = NULL; +static char *debug_filename = NULL; +static char *debug_filename_with_fork = NULL; static void pypy_debug_open(void) { char *filename = getenv("PYPYLOG"); - if (filename) -#ifndef _WIN32 - unsetenv("PYPYLOG"); /* don't pass it to subprocesses */ -#else - putenv("PYPYLOG="); /* don't pass it to subprocesses */ -#endif if (filename && filename[0]) { char *colon = strchr(filename, ':'); @@ -52,7 +48,10 @@ filename = colon + 1; } if (strcmp(filename, "-") != 0) - pypy_debug_file = fopen(filename, "w"); + { + debug_filename = strdup(filename); + pypy_debug_file = fopen(filename, "w"); + } } if (!pypy_debug_file) { @@ -64,6 +63,12 @@ debug_stop_colors = "\033[0m"; } } + if (filename) +#ifndef _WIN32 + unsetenv("PYPYLOG"); /* don't pass it to subprocesses */ +#else + putenv("PYPYLOG="); /* don't pass it to subprocesses */ +#endif debug_ready = 1; } @@ -73,6 +78,7 @@ return -1; // note that we deliberately ignore errno, since -1 is fine // in case this is not a real file + fflush(pypy_debug_file); return ftell(pypy_debug_file); } @@ -82,6 +88,26 @@ pypy_debug_open(); } +void pypy_debug_forked(long original_offset) +{ + if (debug_filename != NULL) + { + char *filename = malloc(strlen(debug_filename) + 32); + fclose(pypy_debug_file); + pypy_debug_file = NULL; + if (filename == NULL) + return; /* bah */ + sprintf(filename, "%s.fork%ld", debug_filename, (long)getpid()); + pypy_debug_file = fopen(filename, "w"); + if (pypy_debug_file) + fprintf(pypy_debug_file, "FORKED: %ld %s\n", original_offset, + debug_filename_with_fork ? debug_filename_with_fork + : debug_filename); + free(debug_filename_with_fork); + debug_filename_with_fork = filename; + } +} + #ifndef _WIN32 diff --git a/rpython/translator/c/src/debug_print.h b/rpython/translator/c/src/debug_print.h --- a/rpython/translator/c/src/debug_print.h +++ b/rpython/translator/c/src/debug_print.h @@ -29,6 +29,7 @@ #define PYPY_DEBUG_START(cat) pypy_debug_start(cat) #define PYPY_DEBUG_STOP(cat) pypy_debug_stop(cat) #define OP_DEBUG_OFFSET(res) res = pypy_debug_offset() +#define OP_DEBUG_FORKED(ofs, _) pypy_debug_forked(ofs) #define OP_HAVE_DEBUG_PRINTS(r) r = (pypy_have_debug_prints & 1) #define OP_DEBUG_FLUSH() fflush(pypy_debug_file) @@ -39,6 +40,7 @@ void pypy_debug_start(const char *category); void pypy_debug_stop(const char *category); long pypy_debug_offset(void); +void pypy_debug_forked(long original_offset); extern long pypy_have_debug_prints; extern FILE *pypy_debug_file; diff --git a/rpython/translator/c/test/test_standalone.py b/rpython/translator/c/test/test_standalone.py --- a/rpython/translator/c/test/test_standalone.py +++ b/rpython/translator/c/test/test_standalone.py @@ -447,6 +447,57 @@ assert 'bar' == lines[1] assert 'foo}' in lines[2] + def test_debug_print_fork(self): + if not hasattr(os, 'fork'): + py.test.skip("requires fork()") + + def entry_point(argv): + debug_start("foo") + debug_print("test line") + childpid = os.fork() + debug_print("childpid =", childpid) + if childpid == 0: + childpid2 = os.fork() # double-fork + debug_print("childpid2 =", childpid2) + debug_stop("foo") + return 0 + t, cbuilder = self.compile(entry_point) + path = udir.join('test_debug_print_fork.log') + out, err = cbuilder.cmdexec("", err=True, + env={'PYPYLOG': ':%s' % path}) + assert not err + # + f = open(str(path), 'r') + lines = f.readlines() + f.close() + assert '{foo' in lines[0] + assert lines[1] == "test line\n" + offset1 = len(lines[0]) + len(lines[1]) + assert lines[2].startswith('childpid = ') + childpid = int(lines[2][11:]) + assert childpid != 0 + assert 'foo}' in lines[3] + assert len(lines) == 4 + # + f = open('%s.fork%d' % (path, childpid), 'r') + lines = f.readlines() + f.close() + assert lines[0] == 'FORKED: %d %s\n' % (offset1, path) + assert lines[1] == 'childpid = 0\n' + offset2 = len(lines[0]) + len(lines[1]) + assert lines[2].startswith('childpid2 = ') + childpid2 = int(lines[2][11:]) + assert childpid2 != 0 + assert 'foo}' in lines[3] + assert len(lines) == 4 + # + f = open('%s.fork%d' % (path, childpid2), 'r') + lines = f.readlines() + f.close() + assert lines[0] == 'FORKED: %d %s.fork%d\n' % (offset2, path, childpid) + assert lines[1] == 'childpid2 = 0\n' + assert 'foo}' in lines[2] + assert len(lines) == 3 def test_fatal_error(self): def g(x): From noreply at buildbot.pypy.org Fri Aug 23 19:39:47 2013 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 23 Aug 2013 19:39:47 +0200 (CEST) Subject: [pypy-commit] pypy default: Change again the process slightly. Message-ID: <20130823173947.BB6A11C01A6@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r66307:8c97e8124776 Date: 2013-08-23 19:39 +0200 http://bitbucket.org/pypy/pypy/changeset/8c97e8124776/ Log: Change again the process slightly. diff --git a/pypy/doc/windows.rst b/pypy/doc/windows.rst --- a/pypy/doc/windows.rst +++ b/pypy/doc/windows.rst @@ -234,70 +234,63 @@ sys.maxint before they are converted to ``long``. The first decision that someone needs to make is if this incompatibility is reasonable. -Assuming that it is, the fixes are probably not too much work if the -goal is only to get a translated PyPy executable and to run tests with -it --- and not care about running all the tests of PyPy before -translation. To do that, the only tests that you should run (and start -with) are some tests in rpython/translator/c/test/, like -``test_standalone.py`` and ``test_newgc.py``. Keep in mind that this -runs small translations, and some details may go wrong, running on top -of CPython Win64; notably, constant integer values should be allowed up -to ``2**63-1``, but any value larger than ``2**32-1`` will be considered -out of bound. To fix this, you need to explicitly wrap such large -integers e.g. in the class ``r_longlong`` of rpython.rlib.rarithmetic. -This makes the translation toolchain handle them as longlong, which -have the correct range, even though in the end it is the same type, -i.e. a 64-bit integer. +Assuming that it is, the first thing to do is probably to hack *CPython* +until it fits this model: replace the field in PyIntObject with a ``long +long`` field, and change the value of ``sys.maxint``. This might just +work, even if half-brokenly: I'm sure you can crash it because of the +precision loss that undoubtedly occurs everywhere, but try not to. :-) -What is really needed is to review all the C files in +Such a hacked CPython is what you'll use in the next steps. We'll call +it CPython64/64. + +It is probably not too much work if the goal is only to get a translated +PyPy executable, and to run all tests before transaction. But you need +to start somewhere, and you should start with some tests in +rpython/translator/c/test/, like ``test_standalone.py`` and +``test_newgc.py``: try to have them pass on top of CPython64/64. + +Keep in mind that this runs small translations, and some details may go +wrong. The most obvious one is to check that it produces C files that +use the integer type ``Signed`` --- but what is ``Signed`` defined to? +It should be equal to ``long`` on every other platforms, but on Win64 it +should be something like ``long long``. + +What is more generally needed is to review all the C files in rpython/translator/c/src for the word ``long``, because this means a -32-bit integer even on Win64. Replace it with ``Signed``, and check the -definition of ``Signed``: it should be equal to ``long`` on every other -platforms (so you can replace one with the other without breaking -anything on other platforms), and on Win64 it should be something like -``long long``. +32-bit integer even on Win64. Replace it with ``Signed`` most of the +times. You can replace one with the other without breaking anything on +any other platform, so feel free to. -These two types have corresponding RPython types: ``rffi.LONG`` and -``lltype.Signed`` respectively. Add tests that check that integers -casted to one type or the other really have 32 and 64 bits respectively, -on Win64. +Then, these two C types have corresponding RPython types: ``rffi.LONG`` +and ``lltype.Signed`` respectively. The first should really correspond +to the C ``long``. Add tests that check that integers casted to one +type or the other really have 32 and 64 bits respectively, on Win64. Once these basic tests work, you need to review ``rpython/rlib/`` for -usages of ``rffi.LONG`` versus ``lltype.Signed``. Important: at this -point the goal would not be to run the tests in these directories! -Doing so would create more confusion to work around. Instead, the goal -would be to fix some ``LONG-versus-Signed`` issues, and if necessary -make sure that the tests still run fine e.g. on Win32. There was some -early work done notably in ``rpython/rlib/rarithmetic`` with the goal of -running all the tests on Win64, but I think by now that it's a bad idea: -again, we should only make sure that the tests work on Win32, and that -PyPy translates on Win64 and then run the (standard lib-python) tests. +usages of ``rffi.LONG`` versus ``lltype.Signed``. The goal would be to +fix some more ``LONG-versus-Signed`` issues, by fixing the tests --- as +always run on top of CPython64/64. Note that there was some early work +done in ``rpython/rlib/rarithmetic`` with the goal of running all the +tests on Win64 on the regular CPython, but I think by now that it's a +bad idea. Look only at CPython64/64. -The goal here is to get a translation of PyPy with ``-O2`` with a -minimal set of modules, starting with ``--no-allworkingmodules``. Check +The major intermediate goal is to get a translation of PyPy with ``-O2`` +with a minimal set of modules, starting with ``--no-allworkingmodules``; +you need to use CPython64/64 to run this translation too. Check carefully the warnings of the C compiler at the end. I think that MSVC is "nice" in the sense that by default a lot of mismatches of integer sizes are reported as warnings. -Why first try to translate when the modules ``pypy/module/*/`` may need -fixes too? The idea is that you really need to get a minimal translated -PyPy, with the minimal amount of modules (this used to be with the -``--translationmodules`` option, if it still works). Then we have a -Python interpreter, namely this minimal PyPy, which can run a full -translation and which has the "correct" setting of ``sys.maxint`` and -64-bit integers. So once we get this minimal PyPy we can use it to -translate a complete PyPy with less troubles. (We still need to review -e.g. ``rffi.LONG`` / ``lltype.Signed`` issues, obviously.) - -Alternatively, you might try to hack CPython to have ints store a 64-bit -number and ``sys.maxint`` be 2**63-1. This might be easier, and work as -long as you don't try too hard to crash it because of the precision loss -that undoubtedly occurs everywhere. Running the translation with such a -hacked CPython would give the same effect as running it on top of the -minimal PyPy described above. (Note that it's ok to do that: once we get -a full PyPy, we can simply tell people that future translations must be -run on top of that. We end up with a strange kind of dependency, but -I believe it's ok here, as Windows executables are supposed to never be -broken by newer versions of Windows.) +Then you need to review ``pypy/module/*/`` for ``LONG-versus-Signed`` +issues. At some time during this review, we get a working translated +PyPy on Windows 64 that includes all ``--translationmodules``, i.e. +everything needed to run translations. When we are there, the hacked +CPython64/64 becomes much less important, because we can run future +translations on top of this translated PyPy. As soon as we get there, +please *distribute* the translated PyPy. It's an essential component +for anyone else that wants to work on Win64! We end up with a strange +kind of dependency --- we need a translated PyPy in order to translate a +PyPy ---, but I believe it's ok here, as Windows executables are +supposed to never be broken by newer versions of Windows. Happy hacking :-) From noreply at buildbot.pypy.org Fri Aug 23 20:10:19 2013 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 23 Aug 2013 20:10:19 +0200 (CEST) Subject: [pypy-commit] stmgc default: Fix a bug found by running PyPy (graphwalktest.py, which I will check in Message-ID: <20130823181019.9DDC71C01A6@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r498:d2e01fce511f Date: 2013-08-23 20:10 +0200 http://bitbucket.org/pypy/stmgc/changeset/d2e01fce511f/ Log: Fix a bug found by running PyPy (graphwalktest.py, which I will check in together with other test files I'm writing). diff --git a/c4/steal.c b/c4/steal.c --- a/c4/steal.c +++ b/c4/steal.c @@ -217,11 +217,12 @@ dprintf(("already stolen: %p -> %p\n", P, L)); /* note that we should follow h_revision at least one more - step: it is necessary if L is public but young (and then - has GCFLAG_MOVED), but it is fine to do it more - generally. */ - v = ACCESS_ONCE(L->h_revision); - if (IS_POINTER(v)) { + step: in the case where L is public but young (and then + has GCFLAG_MOVED). Don't do it generally! L might be + a stub again. */ + if (L->h_tid & GCFLAG_MOVED) { + v = ACCESS_ONCE(L->h_revision); + assert(IS_POINTER(v)); L = (gcptr)v; dprintf(("\t---> %p\n", L)); } From noreply at buildbot.pypy.org Fri Aug 23 20:11:22 2013 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 23 Aug 2013 20:11:22 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-static-barrier: import stmgc/d2e01fce511f Message-ID: <20130823181122.359BB1C01A6@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-static-barrier Changeset: r66308:5904f9845d8c Date: 2013-08-23 20:10 +0200 http://bitbucket.org/pypy/pypy/changeset/5904f9845d8c/ Log: import stmgc/d2e01fce511f diff --git a/rpython/translator/stm/src_stm/revision b/rpython/translator/stm/src_stm/revision --- a/rpython/translator/stm/src_stm/revision +++ b/rpython/translator/stm/src_stm/revision @@ -1,1 +1,1 @@ -f614fd3a4080 +d2e01fce511f diff --git a/rpython/translator/stm/src_stm/steal.c b/rpython/translator/stm/src_stm/steal.c --- a/rpython/translator/stm/src_stm/steal.c +++ b/rpython/translator/stm/src_stm/steal.c @@ -218,11 +218,12 @@ dprintf(("already stolen: %p -> %p\n", P, L)); /* note that we should follow h_revision at least one more - step: it is necessary if L is public but young (and then - has GCFLAG_MOVED), but it is fine to do it more - generally. */ - v = ACCESS_ONCE(L->h_revision); - if (IS_POINTER(v)) { + step: in the case where L is public but young (and then + has GCFLAG_MOVED). Don't do it generally! L might be + a stub again. */ + if (L->h_tid & GCFLAG_MOVED) { + v = ACCESS_ONCE(L->h_revision); + assert(IS_POINTER(v)); L = (gcptr)v; dprintf(("\t---> %p\n", L)); } From noreply at buildbot.pypy.org Sat Aug 24 02:14:29 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Sat, 24 Aug 2013 02:14:29 +0200 (CEST) Subject: [pypy-commit] pypy default: fix translation: backout 1043bb8b12f8 for now Message-ID: <20130824001429.E79451C12F0@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: Changeset: r66309:96d6dfac5ab9 Date: 2013-08-23 17:13 -0700 http://bitbucket.org/pypy/pypy/changeset/96d6dfac5ab9/ Log: fix translation: backout 1043bb8b12f8 for now diff --git a/pypy/module/posix/interp_posix.py b/pypy/module/posix/interp_posix.py --- a/pypy/module/posix/interp_posix.py +++ b/pypy/module/posix/interp_posix.py @@ -577,12 +577,13 @@ except OperationError, e: # fall back to the original byte string result_w[i] = w_bytes - return space.newlist(result_w) else: dirname = space.str0_w(w_dirname) - return space.newlist_str(rposix.listdir(dirname)) + result = rposix.listdir(dirname) + result_w = [space.wrap(s) for s in result] except OSError, e: raise wrap_oserror2(space, e, w_dirname) + return space.newlist(result_w) def pipe(space): "Create a pipe. Returns (read_end, write_end)." From noreply at buildbot.pypy.org Sat Aug 24 10:58:24 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 24 Aug 2013 10:58:24 +0200 (CEST) Subject: [pypy-commit] stmgc default: stm_decode_abort_info: the current hack for strings doesn't really Message-ID: <20130824085824.307671C01A6@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r499:cb61cf4e30a9 Date: 2013-08-24 10:58 +0200 http://bitbucket.org/pypy/stmgc/changeset/cb61cf4e30a9/ Log: stm_decode_abort_info: the current hack for strings doesn't really work. Need to think about a different one... diff --git a/c4/extra.c b/c4/extra.c --- a/c4/extra.c +++ b/c4/extra.c @@ -300,15 +300,16 @@ case 3: /* a string of bytes from the target object */ rps = *(char **)(object + offset); offset = *fieldoffsets++; - if (rps) { + /* XXX think of a different hack: this one doesn't really + work if we see stubs! */ + if (rps && !(((gcptr)rps)->h_tid & GCFLAG_STUB)) { /* xxx a bit ad-hoc: it's a string whose length is a * long at 'offset', following immediately the offset */ rps_size = *(long *)(rps + offset); - offset += sizeof(long); assert(rps_size >= 0); res_size = sprintf(buffer, "%zu:", rps_size); WRITE_BUF(buffer, res_size); - WRITE_BUF(rps + offset, rps_size); + WRITE_BUF(rps + offset + sizeof(long), rps_size); } else { WRITE_BUF("0:", 2); From noreply at buildbot.pypy.org Sat Aug 24 16:50:58 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 24 Aug 2013 16:50:58 +0200 (CEST) Subject: [pypy-commit] pypy default: Add the pblend* instructions. Message-ID: <20130824145058.14A2B1C0223@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r66310:3e8d338192e8 Date: 2013-08-24 16:50 +0200 http://bitbucket.org/pypy/pypy/changeset/3e8d338192e8/ Log: Add the pblend* instructions. diff --git a/rpython/translator/c/gcc/trackgcroot.py b/rpython/translator/c/gcc/trackgcroot.py --- a/rpython/translator/c/gcc/trackgcroot.py +++ b/rpython/translator/c/gcc/trackgcroot.py @@ -489,7 +489,7 @@ 'pabs', 'pack', 'padd', 'palign', 'pand', 'pavg', 'pcmp', 'pextr', 'phadd', 'phsub', 'pinsr', 'pmadd', 'pmax', 'pmin', 'pmovmsk', 'pmul', 'por', 'psadb', 'pshuf', 'psign', 'psll', 'psra', 'psrl', - 'psub', 'punpck', 'pxor', 'pmovzx', 'pmovsx', + 'psub', 'punpck', 'pxor', 'pmovzx', 'pmovsx', 'pblend', # all vectors don't produce pointers 'v', # sign-extending moves should not produce GC pointers From noreply at buildbot.pypy.org Sun Aug 25 01:15:56 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Sun, 25 Aug 2013 01:15:56 +0200 (CEST) Subject: [pypy-commit] pypy default: missing import Message-ID: <20130824231556.D78741C0223@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: Changeset: r66311:dbcd4a6232fd Date: 2013-08-24 16:14 -0700 http://bitbucket.org/pypy/pypy/changeset/dbcd4a6232fd/ Log: missing import diff --git a/rpython/rtyper/module/ll_os.py b/rpython/rtyper/module/ll_os.py --- a/rpython/rtyper/module/ll_os.py +++ b/rpython/rtyper/module/ll_os.py @@ -1566,7 +1566,7 @@ @registering_if(os, 'fork') def register_os_fork(self): - from rpython.rlib import rthread, debug + from rpython.rlib import debug, rthread eci = self.gcc_profiling_bug_workaround('pid_t _noprof_fork(void)', 'return fork();') os_fork = self.llexternal('_noprof_fork', [], rffi.PID_T, @@ -1613,7 +1613,7 @@ @registering_if(os, 'forkpty') def register_os_forkpty(self): - from rpython.rlib import rthread + from rpython.rlib import debug, rthread os_forkpty = self.llexternal( 'forkpty', [rffi.INTP, rffi.VOIDP, rffi.VOIDP, rffi.VOIDP], From noreply at buildbot.pypy.org Sun Aug 25 15:06:44 2013 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Sun, 25 Aug 2013 15:06:44 +0200 (CEST) Subject: [pypy-commit] pypy refactor-str-types: hg merge default Message-ID: <20130825130644.2CD901C074B@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: refactor-str-types Changeset: r66312:49e961a18a14 Date: 2013-08-25 14:07 +0100 http://bitbucket.org/pypy/pypy/changeset/49e961a18a14/ Log: hg merge default diff --git a/pypy/doc/windows.rst b/pypy/doc/windows.rst --- a/pypy/doc/windows.rst +++ b/pypy/doc/windows.rst @@ -6,6 +6,10 @@ The following text gives some hints about how to translate the PyPy interpreter. +PyPy supports only being translated as a 32bit program, even on +64bit Windows. See at the end of this page for what is missing +for a full 64bit translation. + To build pypy-c you need a C compiler. Microsoft Visual Studio is preferred, but can also use the mingw32 port of gcc. @@ -63,7 +67,7 @@ INCLUDE, LIB and PATH (for DLLs) environment variables appropriately. Abridged method (for -Ojit builds using Visual Studio 2008) -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Download the versions of all the external packages from https://bitbucket.org/pypy/pypy/downloads/local.zip @@ -112,13 +116,14 @@ nmake -f makefile.msc The sqlite3 database library -~~~~~~~~~~~~~~~~~~~~ +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Download http://www.sqlite.org/2013/sqlite-amalgamation-3071601.zip and extract it into a directory under the base directory. Also get http://www.sqlite.org/2013/sqlite-dll-win32-x86-3071601.zip and extract the dll into the bin directory, and the sqlite3.def into the sources directory. Now build the import library so cffi can use the header and dll:: + lib /DEF:sqlite3.def" /OUT:sqlite3.lib" copy sqlite3.lib path\to\libs @@ -206,8 +211,86 @@ March 2012, --cc is not a valid option for pytest.py. However if you set an environment variable CC to the compliter exe, testing will use it. -.. _'mingw32 build': http://sourceforge.net/projects/mingw-w64/files/Toolchains%20targetting%20Win32/Automated%20Builds +.. _`mingw32 build`: http://sourceforge.net/projects/mingw-w64/files/Toolchains%20targetting%20Win32/Automated%20Builds .. _`mingw64 build`: http://sourceforge.net/projects/mingw-w64/files/Toolchains%20targetting%20Win64/Automated%20Builds .. _`msys for mingw`: http://sourceforge.net/projects/mingw-w64/files/External%20binary%20packages%20%28Win64%20hosted%29/MSYS%20%2832-bit%29 .. _`libffi source files`: http://sourceware.org/libffi/ .. _`RPython translation toolchain`: translation.html + + +What is missing for a full 64-bit translation +--------------------------------------------- + +The main blocker is that we assume that the integer type of RPython is +large enough to (occasionally) contain a pointer value cast to an +integer. The simplest fix is to make sure that it is so, but it will +give the following incompatibility between CPython and PyPy on Win64: + +CPython: ``sys.maxint == 2**32-1, sys.maxsize == 2**64-1`` + +PyPy: ``sys.maxint == sys.maxsize == 2**64-1`` + +...and, correspondingly, PyPy supports ints up to the larger value of +sys.maxint before they are converted to ``long``. The first decision +that someone needs to make is if this incompatibility is reasonable. + +Assuming that it is, the first thing to do is probably to hack *CPython* +until it fits this model: replace the field in PyIntObject with a ``long +long`` field, and change the value of ``sys.maxint``. This might just +work, even if half-brokenly: I'm sure you can crash it because of the +precision loss that undoubtedly occurs everywhere, but try not to. :-) + +Such a hacked CPython is what you'll use in the next steps. We'll call +it CPython64/64. + +It is probably not too much work if the goal is only to get a translated +PyPy executable, and to run all tests before transaction. But you need +to start somewhere, and you should start with some tests in +rpython/translator/c/test/, like ``test_standalone.py`` and +``test_newgc.py``: try to have them pass on top of CPython64/64. + +Keep in mind that this runs small translations, and some details may go +wrong. The most obvious one is to check that it produces C files that +use the integer type ``Signed`` --- but what is ``Signed`` defined to? +It should be equal to ``long`` on every other platforms, but on Win64 it +should be something like ``long long``. + +What is more generally needed is to review all the C files in +rpython/translator/c/src for the word ``long``, because this means a +32-bit integer even on Win64. Replace it with ``Signed`` most of the +times. You can replace one with the other without breaking anything on +any other platform, so feel free to. + +Then, these two C types have corresponding RPython types: ``rffi.LONG`` +and ``lltype.Signed`` respectively. The first should really correspond +to the C ``long``. Add tests that check that integers casted to one +type or the other really have 32 and 64 bits respectively, on Win64. + +Once these basic tests work, you need to review ``rpython/rlib/`` for +usages of ``rffi.LONG`` versus ``lltype.Signed``. The goal would be to +fix some more ``LONG-versus-Signed`` issues, by fixing the tests --- as +always run on top of CPython64/64. Note that there was some early work +done in ``rpython/rlib/rarithmetic`` with the goal of running all the +tests on Win64 on the regular CPython, but I think by now that it's a +bad idea. Look only at CPython64/64. + +The major intermediate goal is to get a translation of PyPy with ``-O2`` +with a minimal set of modules, starting with ``--no-allworkingmodules``; +you need to use CPython64/64 to run this translation too. Check +carefully the warnings of the C compiler at the end. I think that MSVC +is "nice" in the sense that by default a lot of mismatches of integer +sizes are reported as warnings. + +Then you need to review ``pypy/module/*/`` for ``LONG-versus-Signed`` +issues. At some time during this review, we get a working translated +PyPy on Windows 64 that includes all ``--translationmodules``, i.e. +everything needed to run translations. When we are there, the hacked +CPython64/64 becomes much less important, because we can run future +translations on top of this translated PyPy. As soon as we get there, +please *distribute* the translated PyPy. It's an essential component +for anyone else that wants to work on Win64! We end up with a strange +kind of dependency --- we need a translated PyPy in order to translate a +PyPy ---, but I believe it's ok here, as Windows executables are +supposed to never be broken by newer versions of Windows. + +Happy hacking :-) diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -685,7 +685,7 @@ name='string', char='S', w_box_type = space.gettypefor(interp_boxes.W_StringBox), - alternate_constructors=[space.w_str], + alternate_constructors=[space.w_str, space.gettypefor(interp_boxes.W_CharacterBox)], aliases=["str"], ) self.w_unicodedtype = W_Dtype( diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -781,7 +781,7 @@ def test_character_dtype(self): from numpypy import array, character x = array([["A", "B"], ["C", "D"]], character) - assert x == [["A", "B"], ["C", "D"]] + assert (x == [["A", "B"], ["C", "D"]]).all() class AppTestRecordDtypes(BaseNumpyAppTest): spaceconfig = dict(usemodules=["micronumpy", "struct", "binascii"]) diff --git a/pypy/module/micronumpy/test/test_ufuncs.py b/pypy/module/micronumpy/test/test_ufuncs.py --- a/pypy/module/micronumpy/test/test_ufuncs.py +++ b/pypy/module/micronumpy/test/test_ufuncs.py @@ -272,7 +272,7 @@ assert rint(complex(inf, 1.5)) == complex(inf, 2.) assert rint(complex(0.5, inf)) == complex(0., inf) - assert rint(sys.maxint) == sys.maxint + assert rint(sys.maxint) > 0.0 def test_sign(self): from numpypy import array, sign, dtype diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -308,13 +308,6 @@ return min(v1, v2) @simple_unary_op - def rint(self, v): - if isfinite(float(v)): - return rfloat.round_double(float(v), 0, half_even=True) - else: - return v - - @simple_unary_op def ones_like(self, v): return 1 @@ -322,6 +315,10 @@ def zeros_like(self, v): return 0 + @raw_unary_op + def rint(self, v): + float64 = Float64() + return float64.rint(float64.box(v)) class NonNativePrimitive(Primitive): _mixin_ = True @@ -1036,6 +1033,25 @@ else: return v1 + v2 + @simple_unary_op + def rint(self, v): + x = float(v) + if isfinite(x): + import math + y = math.floor(x) + r = x - y + + if r > 0.5: + y += 1.0 + + if r == 0.5: + r = y - 2.0 * math.floor(0.5 * y) + if r == 1.0: + y += 1.0 + return y + else: + return x + class NonNativeFloat(NonNativePrimitive, Float): _mixin_ = True diff --git a/pypy/objspace/std/dictmultiobject.py b/pypy/objspace/std/dictmultiobject.py --- a/pypy/objspace/std/dictmultiobject.py +++ b/pypy/objspace/std/dictmultiobject.py @@ -1277,10 +1277,7 @@ return _all_contained_in(space, self, w_other) return space.w_False - def descr_ne(self, space, w_other): - if not _is_set_like(w_other): - return space.w_NotImplemented - return space.not_(space.eq(self, w_other)) + descr_ne = negate(descr_eq) def descr_lt(self, space, w_other): if not _is_set_like(w_other): diff --git a/pypy/objspace/std/intobject.py b/pypy/objspace/std/intobject.py --- a/pypy/objspace/std/intobject.py +++ b/pypy/objspace/std/intobject.py @@ -1,20 +1,22 @@ +"""The builtin int implementation + +In order to have the same behavior running on CPython, and after RPython +translation this module uses rarithmetic.ovfcheck to explicitly check +for overflows, something CPython does not do anymore. +""" + +from rpython.rlib import jit +from rpython.rlib.rarithmetic import LONG_BIT, is_valid_int, ovfcheck, r_uint +from rpython.rlib.rbigint import rbigint + from pypy.interpreter.error import OperationError from pypy.objspace.std import newformat from pypy.objspace.std.inttype import W_AbstractIntObject -from pypy.objspace.std.model import registerimplementation, W_Object +from pypy.objspace.std.model import W_Object, registerimplementation from pypy.objspace.std.multimethod import FailedToImplementArgs from pypy.objspace.std.noneobject import W_NoneObject from pypy.objspace.std.register_all import register_all -from rpython.rlib import jit -from rpython.rlib.rarithmetic import ovfcheck, LONG_BIT, r_uint, is_valid_int -from rpython.rlib.rbigint import rbigint -""" -In order to have the same behavior running -on CPython, and after RPython translation we use ovfcheck -from rarithmetic to explicitly check for overflows, -something CPython does not do anymore. -""" class W_IntObject(W_AbstractIntObject): __slots__ = 'intval' @@ -22,28 +24,29 @@ from pypy.objspace.std.inttype import int_typedef as typedef - def __init__(w_self, intval): + def __init__(self, intval): assert is_valid_int(intval) - w_self.intval = intval + self.intval = intval - def __repr__(w_self): - """ representation for debugging purposes """ - return "%s(%d)" % (w_self.__class__.__name__, w_self.intval) + def __repr__(self): + """representation for debugging purposes""" + return "%s(%d)" % (self.__class__.__name__, self.intval) - def unwrap(w_self, space): - return int(w_self.intval) + def unwrap(self, space): + return int(self.intval) int_w = unwrap - def uint_w(w_self, space): - intval = w_self.intval + def uint_w(self, space): + intval = self.intval if intval < 0: - raise OperationError(space.w_ValueError, - space.wrap("cannot convert negative integer to unsigned")) + raise OperationError( + space.w_ValueError, + space.wrap("cannot convert negative integer to unsigned")) else: return r_uint(intval) - def bigint_w(w_self, space): - return rbigint.fromint(w_self.intval) + def bigint_w(self, space): + return rbigint.fromint(self.intval) def float_w(self, space): return float(self.intval) @@ -144,7 +147,8 @@ x = float(w_int1.intval) y = float(w_int2.intval) if y == 0.0: - raise FailedToImplementArgs(space.w_ZeroDivisionError, space.wrap("float division")) + raise FailedToImplementArgs(space.w_ZeroDivisionError, + space.wrap("float division")) return space.wrap(x / y) def mod__Int_Int(space, w_int1, w_int2): diff --git a/pypy/objspace/std/longobject.py b/pypy/objspace/std/longobject.py --- a/pypy/objspace/std/longobject.py +++ b/pypy/objspace/std/longobject.py @@ -1,13 +1,17 @@ +"""The builtin long implementation""" + import sys + +from rpython.rlib.rbigint import rbigint + from pypy.interpreter.error import OperationError from pypy.objspace.std import model, newformat -from pypy.objspace.std.model import registerimplementation, W_Object +from pypy.objspace.std.intobject import W_IntObject +from pypy.objspace.std.longtype import W_AbstractLongObject, long_typedef +from pypy.objspace.std.model import W_Object, registerimplementation +from pypy.objspace.std.multimethod import FailedToImplementArgs +from pypy.objspace.std.noneobject import W_NoneObject from pypy.objspace.std.register_all import register_all -from pypy.objspace.std.multimethod import FailedToImplementArgs -from pypy.objspace.std.intobject import W_IntObject -from pypy.objspace.std.noneobject import W_NoneObject -from rpython.rlib.rbigint import rbigint -from pypy.objspace.std.longtype import long_typedef, W_AbstractLongObject class W_LongObject(W_AbstractLongObject): @@ -16,8 +20,8 @@ typedef = long_typedef - def __init__(w_self, l): - w_self.num = l # instance of rbigint + def __init__(self, l): + self.num = l # instance of rbigint def fromint(space, intval): return W_LongObject(rbigint.fromint(intval)) @@ -49,16 +53,16 @@ fromrarith_int._annspecialcase_ = "specialize:argtype(0)" fromrarith_int = staticmethod(fromrarith_int) - def int_w(w_self, space): + def int_w(self, space): try: - return w_self.num.toint() + return self.num.toint() except OverflowError: raise OperationError(space.w_OverflowError, space.wrap( "long int too large to convert to int")) - def uint_w(w_self, space): + def uint_w(self, space): try: - return w_self.num.touint() + return self.num.touint() except ValueError: raise OperationError(space.w_ValueError, space.wrap( "cannot convert negative integer to unsigned int")) @@ -66,8 +70,8 @@ raise OperationError(space.w_OverflowError, space.wrap( "long int too large to convert to unsigned int")) - def bigint_w(w_self, space): - return w_self.num + def bigint_w(self, space): + return self.num def float_w(self, space): return self.tofloat(space) @@ -324,7 +328,8 @@ sys.maxint == 2147483647) # binary ops -for opname in ['add', 'sub', 'mul', 'div', 'floordiv', 'truediv', 'mod', 'divmod', 'lshift']: +for opname in ['add', 'sub', 'mul', 'div', 'floordiv', 'truediv', 'mod', + 'divmod', 'lshift']: exec compile(""" def %(opname)s_ovr__Int_Int(space, w_int1, w_int2): if recover_with_smalllong(space) and %(opname)r != 'truediv': diff --git a/pypy/objspace/std/util.py b/pypy/objspace/std/util.py --- a/pypy/objspace/std/util.py +++ b/pypy/objspace/std/util.py @@ -9,10 +9,7 @@ tmp = f(self, space, w_other) if tmp is space.w_NotImplemented: return space.w_NotImplemented - elif tmp is space.w_False: - return space.w_True - else: - return space.w_False + return space.newbool(tmp is space.w_False) _negator.func_name = 'negate-%s' % f.func_name return _negator diff --git a/rpython/rlib/debug.py b/rpython/rlib/debug.py --- a/rpython/rlib/debug.py +++ b/rpython/rlib/debug.py @@ -196,6 +196,25 @@ return hop.genop('debug_flush', []) +def debug_forked(original_offset): + """ Call after a fork(), passing as argument the result of + debug_offset() called before the fork. + """ + pass + +class Entry(ExtRegistryEntry): + _about_ = debug_forked + + def compute_result_annotation(self, s_original_offset): + return None + + def specialize_call(self, hop): + from rpython.rtyper.lltypesystem import lltype + vlist = hop.inputargs(lltype.Signed) + hop.exception_cannot_occur() + return hop.genop('debug_forked', vlist) + + def llinterpcall(RESTYPE, pythonfunction, *args): """When running on the llinterp, this causes the llinterp to call to the provided Python function with the run-time value of the given args. diff --git a/rpython/rtyper/module/ll_os.py b/rpython/rtyper/module/ll_os.py --- a/rpython/rtyper/module/ll_os.py +++ b/rpython/rtyper/module/ll_os.py @@ -1566,7 +1566,7 @@ @registering_if(os, 'fork') def register_os_fork(self): - from rpython.rlib import rthread + from rpython.rlib import debug, rthread eci = self.gcc_profiling_bug_workaround('pid_t _noprof_fork(void)', 'return fork();') os_fork = self.llexternal('_noprof_fork', [], rffi.PID_T, @@ -1575,11 +1575,14 @@ def fork_llimpl(): # NB. keep forkpty() up-to-date, too + ofs = debug.debug_offset() opaqueaddr = rthread.gc_thread_before_fork() childpid = rffi.cast(lltype.Signed, os_fork()) rthread.gc_thread_after_fork(childpid, opaqueaddr) if childpid == -1: raise OSError(rposix.get_errno(), "os_fork failed") + if childpid == 0: + debug.debug_forked(ofs) return rffi.cast(lltype.Signed, childpid) return extdef([], int, llimpl=fork_llimpl, @@ -1610,7 +1613,7 @@ @registering_if(os, 'forkpty') def register_os_forkpty(self): - from rpython.rlib import rthread + from rpython.rlib import debug, rthread os_forkpty = self.llexternal( 'forkpty', [rffi.INTP, rffi.VOIDP, rffi.VOIDP, rffi.VOIDP], @@ -1619,6 +1622,7 @@ def forkpty_llimpl(): master_p = lltype.malloc(rffi.INTP.TO, 1, flavor='raw') master_p[0] = rffi.cast(rffi.INT, -1) + ofs = debug.debug_offset() opaqueaddr = rthread.gc_thread_before_fork() childpid = rffi.cast(lltype.Signed, os_forkpty(master_p, None, None, None)) @@ -1627,6 +1631,8 @@ lltype.free(master_p, flavor='raw') if childpid == -1: raise OSError(rposix.get_errno(), "os_forkpty failed") + if childpid == 0: + debug.debug_forked(ofs) return (rffi.cast(lltype.Signed, childpid), rffi.cast(lltype.Signed, master_fd)) diff --git a/rpython/translator/c/gcc/trackgcroot.py b/rpython/translator/c/gcc/trackgcroot.py --- a/rpython/translator/c/gcc/trackgcroot.py +++ b/rpython/translator/c/gcc/trackgcroot.py @@ -489,7 +489,7 @@ 'pabs', 'pack', 'padd', 'palign', 'pand', 'pavg', 'pcmp', 'pextr', 'phadd', 'phsub', 'pinsr', 'pmadd', 'pmax', 'pmin', 'pmovmsk', 'pmul', 'por', 'psadb', 'pshuf', 'psign', 'psll', 'psra', 'psrl', - 'psub', 'punpck', 'pxor', + 'psub', 'punpck', 'pxor', 'pmovzx', 'pmovsx', 'pblend', # all vectors don't produce pointers 'v', # sign-extending moves should not produce GC pointers diff --git a/rpython/translator/c/src/debug_print.c b/rpython/translator/c/src/debug_print.c --- a/rpython/translator/c/src/debug_print.c +++ b/rpython/translator/c/src/debug_print.c @@ -23,16 +23,12 @@ static char *debug_start_colors_2 = ""; static char *debug_stop_colors = ""; static char *debug_prefix = NULL; +static char *debug_filename = NULL; +static char *debug_filename_with_fork = NULL; static void pypy_debug_open(void) { char *filename = getenv("PYPYLOG"); - if (filename) -#ifndef _WIN32 - unsetenv("PYPYLOG"); /* don't pass it to subprocesses */ -#else - putenv("PYPYLOG="); /* don't pass it to subprocesses */ -#endif if (filename && filename[0]) { char *colon = strchr(filename, ':'); @@ -52,7 +48,10 @@ filename = colon + 1; } if (strcmp(filename, "-") != 0) - pypy_debug_file = fopen(filename, "w"); + { + debug_filename = strdup(filename); + pypy_debug_file = fopen(filename, "w"); + } } if (!pypy_debug_file) { @@ -64,6 +63,12 @@ debug_stop_colors = "\033[0m"; } } + if (filename) +#ifndef _WIN32 + unsetenv("PYPYLOG"); /* don't pass it to subprocesses */ +#else + putenv("PYPYLOG="); /* don't pass it to subprocesses */ +#endif debug_ready = 1; } @@ -73,6 +78,7 @@ return -1; // note that we deliberately ignore errno, since -1 is fine // in case this is not a real file + fflush(pypy_debug_file); return ftell(pypy_debug_file); } @@ -82,6 +88,26 @@ pypy_debug_open(); } +void pypy_debug_forked(long original_offset) +{ + if (debug_filename != NULL) + { + char *filename = malloc(strlen(debug_filename) + 32); + fclose(pypy_debug_file); + pypy_debug_file = NULL; + if (filename == NULL) + return; /* bah */ + sprintf(filename, "%s.fork%ld", debug_filename, (long)getpid()); + pypy_debug_file = fopen(filename, "w"); + if (pypy_debug_file) + fprintf(pypy_debug_file, "FORKED: %ld %s\n", original_offset, + debug_filename_with_fork ? debug_filename_with_fork + : debug_filename); + free(debug_filename_with_fork); + debug_filename_with_fork = filename; + } +} + #ifndef _WIN32 diff --git a/rpython/translator/c/src/debug_print.h b/rpython/translator/c/src/debug_print.h --- a/rpython/translator/c/src/debug_print.h +++ b/rpython/translator/c/src/debug_print.h @@ -29,6 +29,7 @@ #define PYPY_DEBUG_START(cat) pypy_debug_start(cat) #define PYPY_DEBUG_STOP(cat) pypy_debug_stop(cat) #define OP_DEBUG_OFFSET(res) res = pypy_debug_offset() +#define OP_DEBUG_FORKED(ofs, _) pypy_debug_forked(ofs) #define OP_HAVE_DEBUG_PRINTS(r) r = (pypy_have_debug_prints & 1) #define OP_DEBUG_FLUSH() fflush(pypy_debug_file) @@ -39,6 +40,7 @@ void pypy_debug_start(const char *category); void pypy_debug_stop(const char *category); long pypy_debug_offset(void); +void pypy_debug_forked(long original_offset); extern long pypy_have_debug_prints; extern FILE *pypy_debug_file; diff --git a/rpython/translator/c/test/test_standalone.py b/rpython/translator/c/test/test_standalone.py --- a/rpython/translator/c/test/test_standalone.py +++ b/rpython/translator/c/test/test_standalone.py @@ -447,6 +447,57 @@ assert 'bar' == lines[1] assert 'foo}' in lines[2] + def test_debug_print_fork(self): + if not hasattr(os, 'fork'): + py.test.skip("requires fork()") + + def entry_point(argv): + debug_start("foo") + debug_print("test line") + childpid = os.fork() + debug_print("childpid =", childpid) + if childpid == 0: + childpid2 = os.fork() # double-fork + debug_print("childpid2 =", childpid2) + debug_stop("foo") + return 0 + t, cbuilder = self.compile(entry_point) + path = udir.join('test_debug_print_fork.log') + out, err = cbuilder.cmdexec("", err=True, + env={'PYPYLOG': ':%s' % path}) + assert not err + # + f = open(str(path), 'r') + lines = f.readlines() + f.close() + assert '{foo' in lines[0] + assert lines[1] == "test line\n" + offset1 = len(lines[0]) + len(lines[1]) + assert lines[2].startswith('childpid = ') + childpid = int(lines[2][11:]) + assert childpid != 0 + assert 'foo}' in lines[3] + assert len(lines) == 4 + # + f = open('%s.fork%d' % (path, childpid), 'r') + lines = f.readlines() + f.close() + assert lines[0] == 'FORKED: %d %s\n' % (offset1, path) + assert lines[1] == 'childpid = 0\n' + offset2 = len(lines[0]) + len(lines[1]) + assert lines[2].startswith('childpid2 = ') + childpid2 = int(lines[2][11:]) + assert childpid2 != 0 + assert 'foo}' in lines[3] + assert len(lines) == 4 + # + f = open('%s.fork%d' % (path, childpid2), 'r') + lines = f.readlines() + f.close() + assert lines[0] == 'FORKED: %d %s.fork%d\n' % (offset2, path, childpid) + assert lines[1] == 'childpid2 = 0\n' + assert 'foo}' in lines[2] + assert len(lines) == 3 def test_fatal_error(self): def g(x): From noreply at buildbot.pypy.org Sun Aug 25 22:27:16 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Sun, 25 Aug 2013 22:27:16 +0200 (CEST) Subject: [pypy-commit] pypy refactor-str-types: fix translation? wrap remaining W_StringBufferObject usages w/ withstrbuf Message-ID: <20130825202716.740701C074B@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: refactor-str-types Changeset: r66313:9a79f1f76a91 Date: 2013-08-25 13:26 -0700 http://bitbucket.org/pypy/pypy/changeset/9a79f1f76a91/ Log: fix translation? wrap remaining W_StringBufferObject usages w/ withstrbuf checks diff --git a/pypy/objspace/std/bytesobject.py b/pypy/objspace/std/bytesobject.py --- a/pypy/objspace/std/bytesobject.py +++ b/pypy/objspace/std/bytesobject.py @@ -566,49 +566,55 @@ return space.wrap(StringBuffer(self._value)) def descr_eq(self, space, w_other): - from pypy.objspace.std.strbufobject import W_StringBufferObject - if isinstance(w_other, W_StringBufferObject): - return space.newbool(self._value == w_other.force()) + if space.config.objspace.std.withstrbuf: + from pypy.objspace.std.strbufobject import W_StringBufferObject + if isinstance(w_other, W_StringBufferObject): + return space.newbool(self._value == w_other.force()) if not isinstance(w_other, W_BytesObject): return space.w_NotImplemented return space.newbool(self._value == w_other._value) def descr_ne(self, space, w_other): - from pypy.objspace.std.strbufobject import W_StringBufferObject - if isinstance(w_other, W_StringBufferObject): - return space.newbool(self._value != w_other.force()) + if space.config.objspace.std.withstrbuf: + from pypy.objspace.std.strbufobject import W_StringBufferObject + if isinstance(w_other, W_StringBufferObject): + return space.newbool(self._value != w_other.force()) if not isinstance(w_other, W_BytesObject): return space.w_NotImplemented return space.newbool(self._value != w_other._value) def descr_lt(self, space, w_other): - from pypy.objspace.std.strbufobject import W_StringBufferObject - if isinstance(w_other, W_StringBufferObject): - return space.newbool(self._value < w_other.force()) + if space.config.objspace.std.withstrbuf: + from pypy.objspace.std.strbufobject import W_StringBufferObject + if isinstance(w_other, W_StringBufferObject): + return space.newbool(self._value < w_other.force()) if not isinstance(w_other, W_BytesObject): return space.w_NotImplemented return space.newbool(self._value < w_other._value) def descr_le(self, space, w_other): - from pypy.objspace.std.strbufobject import W_StringBufferObject - if isinstance(w_other, W_StringBufferObject): - return space.newbool(self._value <= w_other.force()) + if space.config.objspace.std.withstrbuf: + from pypy.objspace.std.strbufobject import W_StringBufferObject + if isinstance(w_other, W_StringBufferObject): + return space.newbool(self._value <= w_other.force()) if not isinstance(w_other, W_BytesObject): return space.w_NotImplemented return space.newbool(self._value <= w_other._value) def descr_gt(self, space, w_other): - from pypy.objspace.std.strbufobject import W_StringBufferObject - if isinstance(w_other, W_StringBufferObject): - return space.newbool(self._value > w_other.force()) + if space.config.objspace.std.withstrbuf: + from pypy.objspace.std.strbufobject import W_StringBufferObject + if isinstance(w_other, W_StringBufferObject): + return space.newbool(self._value > w_other.force()) if not isinstance(w_other, W_BytesObject): return space.w_NotImplemented return space.newbool(self._value > w_other._value) def descr_ge(self, space, w_other): - from pypy.objspace.std.strbufobject import W_StringBufferObject - if isinstance(w_other, W_StringBufferObject): - return space.newbool(self._value >= w_other.force()) + if space.config.objspace.std.withstrbuf: + from pypy.objspace.std.strbufobject import W_StringBufferObject + if isinstance(w_other, W_StringBufferObject): + return space.newbool(self._value >= w_other.force()) if not isinstance(w_other, W_BytesObject): return space.w_NotImplemented return space.newbool(self._value >= w_other._value) From noreply at buildbot.pypy.org Mon Aug 26 02:48:33 2013 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Mon, 26 Aug 2013 02:48:33 +0200 (CEST) Subject: [pypy-commit] pypy improve-docs: hg merge default: probably the most annoying merge I ever did. Message-ID: <20130826004833.C87DD1C00A8@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: improve-docs Changeset: r66314:60f07e7ea8cd Date: 2013-08-26 01:49 +0100 http://bitbucket.org/pypy/pypy/changeset/60f07e7ea8cd/ Log: hg merge default: probably the most annoying merge I ever did. diff too long, truncating to 2000 out of 80816 lines diff --git a/.hgignore b/.hgignore --- a/.hgignore +++ b/.hgignore @@ -6,6 +6,7 @@ .idea .project .pydevproject +__pycache__ syntax: regexp ^testresult$ diff --git a/.hgtags b/.hgtags --- a/.hgtags +++ b/.hgtags @@ -3,3 +3,6 @@ d8ac7d23d3ec5f9a0fa1264972f74a010dbfd07f release-1.6 ff4af8f318821f7f5ca998613a60fca09aa137da release-1.7 07e08e9c885ca67d89bcc304e45a32346daea2fa release-2.0-beta-1 +9b623bc48b5950cf07184462a0e48f2c4df0d720 pypy-2.1-beta1-arm +9b623bc48b5950cf07184462a0e48f2c4df0d720 pypy-2.1-beta1-arm +ab0dd631c22015ed88e583d9fdd4c43eebf0be21 pypy-2.1-beta1-arm diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -2,9 +2,9 @@ ======= Except when otherwise stated (look for LICENSE files in directories or -information at the beginning of each file) all software and -documentation in the 'pypy', 'ctype_configure', 'dotviewer', 'demo', -and 'lib_pypy' directories is licensed as follows: +information at the beginning of each file) all software and documentation in +the 'rpython', 'pypy', 'ctype_configure', 'dotviewer', 'demo', and 'lib_pypy' +directories is licensed as follows: The MIT License @@ -38,176 +38,239 @@ Armin Rigo Maciej Fijalkowski Carl Friedrich Bolz + Antonio Cuni Amaury Forgeot d'Arc - Antonio Cuni Samuele Pedroni + Alex Gaynor Michael Hudson + David Schneider Holger Krekel - Alex Gaynor Christian Tismer Hakan Ardo Benjamin Peterson - David Schneider + Matti Picus + Philip Jenvey + Anders Chrigstrom + Brian Kearns Eric van Riet Paap - Anders Chrigstrom Richard Emslie + Alexander Schremmer + Wim Lavrijsen Dan Villiom Podlaski Christiansen - Alexander Schremmer + Manuel Jacob Lukas Diekmann + Sven Hager + Anders Lehmann Aurelien Campeas - Anders Lehmann + Niklaus Haldimann + Ronan Lamy Camillo Bruni - Niklaus Haldimann - Sven Hager + Laura Creighton + Toon Verwaest Leonardo Santagada - Toon Verwaest Seo Sanghyeon Justin Peel + Ronny Pfannschmidt + David Edelsohn + Anders Hammarquist + Jakub Gustak + Guido Wesdorp Lawrence Oluyede Bartosz Skowron - Jakub Gustak - Guido Wesdorp Daniel Roberts - Laura Creighton + Niko Matsakis Adrien Di Mascio Ludovic Aubry - Niko Matsakis - Wim Lavrijsen - Matti Picus + Alexander Hesse + Jacob Hallen + Romain Guillebert Jason Creighton - Jacob Hallen Alex Martelli - Anders Hammarquist + Michal Bendowski Jan de Mooij + Michael Foord Stephan Diehl - Michael Foord Stefan Schwarzer + Valentino Volonghi Tomek Meka Patrick Maupin + stian Bob Ippolito Bruno Gola + Jean-Paul Calderone + Timo Paulssen Alexandre Fayolle + Simon Burton Marius Gedminas - Simon Burton - David Edelsohn - Jean-Paul Calderone John Witulski - Timo Paulssen - holger krekel + Greg Price Dario Bertini Mark Pearse + Simon Cross + Konstantin Lopuhin Andreas Stührk Jean-Philippe St. Pierre Guido van Rossum Pavel Vinogradov - Valentino Volonghi Paul deGrandis Ilya Osadchiy - Ronny Pfannschmidt Adrian Kuhn + Boris Feigin tav Georg Brandl - Philip Jenvey + Bert Freudenberg + Stian Andreassen + Stefano Rivera + Wanja Saatkamp Gerald Klix - Wanja Saatkamp - Boris Feigin + Mike Blume + Taavi Burns Oscar Nierstrasz David Malcolm Eugene Oden Henry Mason + Preston Timmons Jeff Terrace + David Ripton + Dusty Phillips Lukas Renggli Guenter Jantzen + Tobias Oberstein + Remi Meier Ned Batchelder - Bert Freudenberg Amit Regmi Ben Young Nicolas Chauvat Andrew Durdin Michael Schneider Nicholas Riley + Jason Chu + Igor Trindade Oliveira + Jeremy Thurgood Rocco Moretti Gintautas Miliauskas Michael Twomey - Igor Trindade Oliveira Lucian Branescu Mihaila + Tim Felgentreff + Tyler Wade + Gabriel Lavoie Olivier Dormond Jared Grubb Karl Bartel - Gabriel Lavoie + Brian Dorsey Victor Stinner - Brian Dorsey Stuart Williams + Jasper Schulz Toby Watson Antoine Pitrou + Aaron Iles + Michael Cheng Justas Sadzevicius + Gasper Zejn Neil Shepperd Mikael Schönenberg - Gasper Zejn + Elmo Mäntynen + Tobias Pape Jonathan David Riehl - Elmo Mäntynen + Stanislaw Halik Anders Qvist + Chirag Jadwani Beatrice During + Alex Perry + Vincent Legoll + Alan McIntyre Alexander Sedov Corbin Simpson - Vincent Legoll - Romain Guillebert - Alan McIntyre - Alex Perry + Christopher Pope + Laurence Tratt + Guillebert Romain + Christian Tismer + Dan Stromberg + Stefano Parmesan + Christian Hudon + Alexis Daboville Jens-Uwe Mager - Simon Cross - Dan Stromberg - Guillebert Romain Carl Meyer + Karl Ramm Pieter Zieschang + Gabriel + Paweł Piotr Przeradowski + Andrew Dalke + Sylvain Thenault + Nathan Taylor + Vladimir Kryachko + Jacek Generowicz Alejandro J. Cura - Sylvain Thenault - Christoph Gerum + Jacob Oscarson Travis Francis Athougies + Kristjan Valur Jonsson + Neil Blakey-Milner + Lutz Paelike + Lucio Torre + Lars Wassermann Henrik Vendelbo - Lutz Paelike - Jacob Oscarson - Martin Blais - Lucio Torre - Lene Wagner + Dan Buch Miguel de Val Borro Artur Lisiecki - Bruno Gola + Sergey Kishchenko Ignas Mikalajunas - Stefano Rivera + Christoph Gerum + Martin Blais + Lene Wagner + Tomo Cocoa + Andrews Medina + roberto at goyle + William Leslie + Bobby Impollonia + timo at eistee.fritz.box + Andrew Thompson + Yusei Tahara + Roberto De Ioris + Juan Francisco Cantero Hurtado + Godefroid Chappelle Joshua Gilbert - Godefroid Chappelle - Yusei Tahara + Dan Colish Christopher Armstrong + Michael Hudson-Doyle + Anders Sigfridsson + Yasir Suhail + Floris Bruynooghe + Akira Li + Gustavo Niemeyer Stephan Busemann - Gustavo Niemeyer - William Leslie - Akira Li - Kristjan Valur Jonsson - Bobby Impollonia - Michael Hudson-Doyle - Laurence Tratt - Yasir Suhail - Andrew Thompson - Anders Sigfridsson - Floris Bruynooghe - Jacek Generowicz - Dan Colish - Zooko Wilcox-O Hearn - Dan Loewenherz + Anna Katrina Dominguez + Christian Muirhead + James Lan + shoma hosaka + Daniel Neuhäuser + Buck Golemon + Konrad Delong + Dinu Gherman Chris Lambacher - Dinu Gherman - Brett Cannon - Daniel Neuhäuser - Michael Chermside - Konrad Delong - Anna Ravencroft - Greg Price - Armin Ronacher - Christian Muirhead + coolbutuseless at gmail.com Jim Baker Rodrigo Araújo - Romain Guillebert + Armin Ronacher + Brett Cannon + yrttyr + Zooko Wilcox-O Hearn + Tomer Chachamu + Christopher Groskopf + opassembler.py + Antony Lee + Jim Hunziker + Markus Unterwaditzer + Even Wiik Thomassen + jbs + soareschen + Flavio Percoco + Kristoffer Kleine + yasirs + Michael Chermside + Anna Ravencroft + Andrew Chambers + Julien Phalip + Dan Loewenherz Heinrich-Heine University, Germany Open End AB (formerly AB Strakt), Sweden @@ -218,45 +281,22 @@ Impara, Germany Change Maker, Sweden University of California Berkeley, USA + Google Inc. + King's College London The PyPy Logo as used by http://speed.pypy.org and others was created by Samuel Reis and is distributed on terms of Creative Commons Share Alike License. -License for 'lib-python/2.7.0' and 'lib-python/2.7.0-modified' -============================================================== +License for 'lib-python/2.7' +============================ -Except when otherwise stated (look for LICENSE files or -copyright/license information at the beginning of each file) the files -in the 'lib-python/2.7.0' and 'lib-python/2.7.0-modified' directories -are all copyrighted by the Python Software Foundation and licensed under -the Python Software License of which you can find a copy here: +Except when otherwise stated (look for LICENSE files or copyright/license +information at the beginning of each file) the files in the 'lib-python/2.7' +directory are all copyrighted by the Python Software Foundation and licensed +under the Python Software License of which you can find a copy here: http://www.python.org/doc/Copyright.html -License for 'pypy/translator/jvm/src/jna.jar' -============================================= - -The file 'pypy/translator/jvm/src/jna.jar' is licensed under the GNU -Lesser General Public License of which you can find a copy here: -http://www.gnu.org/licenses/lgpl.html - -License for 'pypy/translator/jvm/src/jasmin.jar' -================================================ - -The file 'pypy/translator/jvm/src/jasmin.jar' is copyright (c) 1996-2004 Jon Meyer -and distributed with permission. The use of Jasmin by PyPy does not imply -that PyPy is endorsed by Jon Meyer nor any of Jasmin's contributors. Furthermore, -the following disclaimer applies to Jasmin: - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED -WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A -PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR -TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF -ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - License for 'pypy/module/unicodedata/' ====================================== diff --git a/dotviewer/drawgraph.py b/dotviewer/drawgraph.py --- a/dotviewer/drawgraph.py +++ b/dotviewer/drawgraph.py @@ -22,6 +22,7 @@ 'yellow': (255,255,0), } re_nonword=re.compile(r'([^0-9a-zA-Z_.]+)') +re_linewidth=re.compile(r'setlinewidth\((\d+(\.\d*)?|\.\d+)\)') def combine(color1, color2, alpha): r1, g1, b1 = color1 @@ -138,6 +139,13 @@ self.yl = float(yl) rest = rest[3:] self.style, self.color = rest + linematch = re_linewidth.match(self.style) + if linematch: + num = linematch.group(1) + self.linewidth = int(round(float(num))) + self.style = self.style[linematch.end(0):] + else: + self.linewidth = 1 self.highlight = False self.cachedbezierpoints = None self.cachedarrowhead = None @@ -520,8 +528,8 @@ fgcolor = highlight_color(fgcolor) points = [self.map(*xy) for xy in edge.bezierpoints()] - def drawedgebody(points=points, fgcolor=fgcolor): - pygame.draw.lines(self.screen, fgcolor, False, points) + def drawedgebody(points=points, fgcolor=fgcolor, width=edge.linewidth): + pygame.draw.lines(self.screen, fgcolor, False, points, width) edgebodycmd.append(drawedgebody) points = [self.map(*xy) for xy in edge.arrowhead()] diff --git a/dotviewer/graphparse.py b/dotviewer/graphparse.py --- a/dotviewer/graphparse.py +++ b/dotviewer/graphparse.py @@ -152,7 +152,8 @@ try: plaincontent = dot2plain_graphviz(content, contenttype) except PlainParseError, e: - print e - # failed, retry via codespeak - plaincontent = dot2plain_codespeak(content, contenttype) + raise + ##print e + ### failed, retry via codespeak + ##plaincontent = dot2plain_codespeak(content, contenttype) return list(parse_plain(graph_id, plaincontent, links, fixedfont)) diff --git a/dotviewer/test/test_interactive.py b/dotviewer/test/test_interactive.py --- a/dotviewer/test/test_interactive.py +++ b/dotviewer/test/test_interactive.py @@ -34,6 +34,23 @@ } ''' +SOURCE2=r'''digraph f { + a; d; e; f; g; h; i; j; k; l; + a -> d [penwidth=1, style="setlinewidth(1)"]; + d -> e [penwidth=2, style="setlinewidth(2)"]; + e -> f [penwidth=4, style="setlinewidth(4)"]; + f -> g [penwidth=8, style="setlinewidth(8)"]; + g -> h [penwidth=16, style="setlinewidth(16)"]; + h -> i [penwidth=32, style="setlinewidth(32)"]; + i -> j [penwidth=64, style="setlinewidth(64)"]; + j -> k [penwidth=128, style="setlinewidth(128)"]; + k -> l [penwidth=256, style="setlinewidth(256)"]; +}''' + + + + + def setup_module(mod): if not option.pygame: py.test.skip("--pygame not enabled") @@ -161,3 +178,10 @@ page = MyPage(str(dotfile)) page.fixedfont = True graphclient.display_page(page) + +def test_linewidth(): + udir.join("graph2.dot").write(SOURCE2) + from dotviewer import graphpage, graphclient + dotfile = udir.join('graph2.dot') + page = graphpage.DotFileGraphPage(str(dotfile)) + graphclient.display_page(page) diff --git a/lib-python/2.7/distutils/sysconfig_pypy.py b/lib-python/2.7/distutils/sysconfig_pypy.py --- a/lib-python/2.7/distutils/sysconfig_pypy.py +++ b/lib-python/2.7/distutils/sysconfig_pypy.py @@ -12,6 +12,7 @@ import sys import os +import shlex from distutils.errors import DistutilsPlatformError @@ -124,11 +125,19 @@ if compiler.compiler_type == "unix": compiler.compiler_so.extend(['-O2', '-fPIC', '-Wimplicit']) compiler.shared_lib_extension = get_config_var('SO') + if "CPPFLAGS" in os.environ: + cppflags = shlex.split(os.environ["CPPFLAGS"]) + compiler.compiler.extend(cppflags) + compiler.compiler_so.extend(cppflags) + compiler.linker_so.extend(cppflags) if "CFLAGS" in os.environ: - cflags = os.environ["CFLAGS"].split() + cflags = shlex.split(os.environ["CFLAGS"]) compiler.compiler.extend(cflags) compiler.compiler_so.extend(cflags) compiler.linker_so.extend(cflags) + if "LDFLAGS" in os.environ: + ldflags = shlex.split(os.environ["LDFLAGS"]) + compiler.linker_so.extend(ldflags) from sysconfig_cpython import ( diff --git a/lib-python/2.7/json/__init__.py b/lib-python/2.7/json/__init__.py --- a/lib-python/2.7/json/__init__.py +++ b/lib-python/2.7/json/__init__.py @@ -105,6 +105,12 @@ __author__ = 'Bob Ippolito ' +try: + # PyPy speedup, the interface is different than CPython's _json + import _pypyjson +except ImportError: + _pypyjson = None + from .decoder import JSONDecoder from .encoder import JSONEncoder @@ -241,7 +247,6 @@ _default_decoder = JSONDecoder(encoding=None, object_hook=None, object_pairs_hook=None) - def load(fp, encoding=None, cls=None, object_hook=None, parse_float=None, parse_int=None, parse_constant=None, object_pairs_hook=None, **kw): """Deserialize ``fp`` (a ``.read()``-supporting file-like object containing @@ -323,7 +328,10 @@ if (cls is None and encoding is None and object_hook is None and parse_int is None and parse_float is None and parse_constant is None and object_pairs_hook is None and not kw): - return _default_decoder.decode(s) + if _pypyjson and not isinstance(s, unicode): + return _pypyjson.loads(s) + else: + return _default_decoder.decode(s) if cls is None: cls = JSONDecoder if object_hook is not None: diff --git a/lib-python/2.7/logging/__init__.py b/lib-python/2.7/logging/__init__.py --- a/lib-python/2.7/logging/__init__.py +++ b/lib-python/2.7/logging/__init__.py @@ -134,6 +134,11 @@ DEBUG = 10 NOTSET = 0 +# NOTE(flaper87): This is different from +# python's stdlib module since pypy's +# dicts are much faster when their +# keys are all of the same type. +# Introduced in commit 9de7b40c586f _levelToName = { CRITICAL: 'CRITICAL', ERROR: 'ERROR', @@ -168,7 +173,11 @@ Otherwise, the string "Level %s" % level is returned. """ - return _levelToName.get(level, ("Level %s" % level)) + + # NOTE(flaper87): Check also in _nameToLevel + # if value is None. + return (_levelToName.get(level) or + _nameToLevel.get(level, ("Level %s" % level))) def addLevelName(level, levelName): """ diff --git a/lib-python/2.7/socket.py b/lib-python/2.7/socket.py --- a/lib-python/2.7/socket.py +++ b/lib-python/2.7/socket.py @@ -165,6 +165,8 @@ # All _delegate_methods must also be initialized here. send = recv = recv_into = sendto = recvfrom = recvfrom_into = _dummy __getattr__ = _dummy + def _drop(self): + pass # Wrapper around platform socket objects. This implements # a platform-independent dup() functionality. The @@ -179,12 +181,21 @@ def __init__(self, family=AF_INET, type=SOCK_STREAM, proto=0, _sock=None): if _sock is None: _sock = _realsocket(family, type, proto) - elif _type(_sock) is _realsocket: + else: + # PyPy note about refcounting: implemented with _reuse()/_drop() + # on the class '_socket.socket'. Python 3 did it differently + # with a reference counter on this class 'socket._socketobject' + # instead, but it is a less compatible change. + + # Note that a few libraries (like eventlet) poke at the + # private implementation of socket.py, passing custom + # objects to _socketobject(). These libraries need the + # following fix for use on PyPy: the custom objects need + # methods _reuse() and _drop() that maintains an explicit + # reference counter, starting at 0. When it drops back to + # zero, close() must be called. _sock._reuse() - # PyPy note about refcounting: implemented with _reuse()/_drop() - # on the class '_socket.socket'. Python 3 did it differently - # with a reference counter on this class 'socket._socketobject' - # instead, but it is a less compatible change (breaks eventlet). + self._sock = _sock def send(self, data, flags=0): @@ -216,9 +227,8 @@ def close(self): s = self._sock - if type(s) is _realsocket: - s._drop() self._sock = _closedsocket() + s._drop() close.__doc__ = _realsocket.close.__doc__ def accept(self): @@ -280,8 +290,14 @@ "_close"] def __init__(self, sock, mode='rb', bufsize=-1, close=False): - if type(sock) is _realsocket: - sock._reuse() + # Note that a few libraries (like eventlet) poke at the + # private implementation of socket.py, passing custom + # objects to _fileobject(). These libraries need the + # following fix for use on PyPy: the custom objects need + # methods _reuse() and _drop() that maintains an explicit + # reference counter, starting at 0. When it drops back to + # zero, close() must be called. + sock._reuse() self._sock = sock self.mode = mode # Not actually used in this version if bufsize < 0: @@ -317,11 +333,11 @@ self.flush() finally: s = self._sock - if type(s) is _realsocket: + self._sock = None + if s is not None: s._drop() - if self._close: - self._sock.close() - self._sock = None + if self._close: + s.close() def __del__(self): try: diff --git a/lib-python/2.7/ssl.py b/lib-python/2.7/ssl.py --- a/lib-python/2.7/ssl.py +++ b/lib-python/2.7/ssl.py @@ -110,6 +110,12 @@ suppress_ragged_eofs=True, ciphers=None): socket.__init__(self, _sock=sock._sock) + # "close" the original socket: it is not usable any more. + # this only calls _drop(), which should not actually call + # the operating system's close() because the reference + # counter is greater than 1 (we hold one too). + sock.close() + if ciphers is None and ssl_version != _SSLv2_IF_EXISTS: ciphers = _DEFAULT_CIPHERS @@ -352,11 +358,19 @@ works with the SSL connection. Just use the code from the socket module.""" - self._makefile_refs += 1 # close=True so as to decrement the reference count when done with # the file-like object. return _fileobject(self, mode, bufsize, close=True) + def _reuse(self): + self._makefile_refs += 1 + + def _drop(self): + if self._makefile_refs < 1: + self.close() + else: + self._makefile_refs -= 1 + def wrap_socket(sock, keyfile=None, certfile=None, diff --git a/lib-python/2.7/test/test_logging.py b/lib-python/2.7/test/test_logging.py --- a/lib-python/2.7/test/test_logging.py +++ b/lib-python/2.7/test/test_logging.py @@ -278,6 +278,24 @@ def test_invalid_name(self): self.assertRaises(TypeError, logging.getLogger, any) + def test_get_level_name(self): + """Test getLevelName returns level constant.""" + # NOTE(flaper87): Bug #1517 + self.assertEqual(logging.getLevelName('NOTSET'), 0) + self.assertEqual(logging.getLevelName('DEBUG'), 10) + self.assertEqual(logging.getLevelName('INFO'), 20) + self.assertEqual(logging.getLevelName('WARN'), 30) + self.assertEqual(logging.getLevelName('WARNING'), 30) + self.assertEqual(logging.getLevelName('ERROR'), 40) + self.assertEqual(logging.getLevelName('CRITICAL'), 50) + + self.assertEqual(logging.getLevelName(0), 'NOTSET') + self.assertEqual(logging.getLevelName(10), 'DEBUG') + self.assertEqual(logging.getLevelName(20), 'INFO') + self.assertEqual(logging.getLevelName(30), 'WARNING') + self.assertEqual(logging.getLevelName(40), 'ERROR') + self.assertEqual(logging.getLevelName(50), 'CRITICAL') + class BasicFilterTest(BaseTest): """Test the bundled Filter class.""" diff --git a/lib-python/2.7/test/test_os.py b/lib-python/2.7/test/test_os.py --- a/lib-python/2.7/test/test_os.py +++ b/lib-python/2.7/test/test_os.py @@ -275,7 +275,7 @@ try: result.f_bfree = 1 self.fail("No exception thrown") - except TypeError: + except (TypeError, AttributeError): pass try: diff --git a/lib-python/2.7/test/test_socket.py b/lib-python/2.7/test/test_socket.py --- a/lib-python/2.7/test/test_socket.py +++ b/lib-python/2.7/test/test_socket.py @@ -1066,6 +1066,9 @@ def recv(self, size): return self._recv_step.next()() + def _reuse(self): pass + def _drop(self): pass + @staticmethod def _raise_eintr(): raise socket.error(errno.EINTR) @@ -1321,7 +1324,8 @@ closed = False def flush(self): pass def close(self): self.closed = True - def _decref_socketios(self): pass + def _reuse(self): pass + def _drop(self): pass # must not close unless we request it: the original use of _fileobject # by module socket requires that the underlying socket not be closed until diff --git a/lib-python/2.7/test/test_urllib2.py b/lib-python/2.7/test/test_urllib2.py --- a/lib-python/2.7/test/test_urllib2.py +++ b/lib-python/2.7/test/test_urllib2.py @@ -270,6 +270,8 @@ self.reason = reason def read(self): return '' + def _reuse(self): pass + def _drop(self): pass class MockHTTPClass: def __init__(self): diff --git a/lib-python/2.7/urllib2.py b/lib-python/2.7/urllib2.py --- a/lib-python/2.7/urllib2.py +++ b/lib-python/2.7/urllib2.py @@ -1193,6 +1193,8 @@ # out of socket._fileobject() and into a base class. r.recv = r.read + r._reuse = lambda: None + r._drop = lambda: None fp = socket._fileobject(r, close=True) resp = addinfourl(fp, r.msg, req.get_full_url()) diff --git a/lib_pypy/_ctypes/structure.py b/lib_pypy/_ctypes/structure.py --- a/lib_pypy/_ctypes/structure.py +++ b/lib_pypy/_ctypes/structure.py @@ -20,7 +20,7 @@ or tp._type_ not in "iIhHbBlLqQ"): #XXX: are those all types? # we just dont get the type name - # in the interp levle thrown TypeError + # in the interp level thrown TypeError # from rawffi if there are more raise TypeError('bit fields not allowed for type ' + tp.__name__) diff --git a/lib_pypy/_curses.py b/lib_pypy/_curses.py --- a/lib_pypy/_curses.py +++ b/lib_pypy/_curses.py @@ -476,6 +476,15 @@ def _chtype(ch): return int(ffi.cast("chtype", ch)) +def _texttype(text): + if isinstance(text, str): + return text + elif isinstance(text, unicode): + return str(text) # default encoding + else: + raise TypeError("str or unicode expected, got a '%s' object" + % (type(text).__name__,)) + def _extract_yx(args): if len(args) >= 2: @@ -589,6 +598,7 @@ @_argspec(1, 1, 2) def addstr(self, y, x, text, attr=None): + text = _texttype(text) if attr is not None: attr_old = lib.getattrs(self._win) lib.wattrset(self._win, attr) @@ -602,6 +612,7 @@ @_argspec(2, 1, 2) def addnstr(self, y, x, text, n, attr=None): + text = _texttype(text) if attr is not None: attr_old = lib.getattrs(self._win) lib.wattrset(self._win, attr) @@ -780,6 +791,7 @@ @_argspec(1, 1, 2) def insstr(self, y, x, text, attr=None): + text = _texttype(text) if attr is not None: attr_old = lib.getattrs(self._win) lib.wattrset(self._win, attr) @@ -793,6 +805,7 @@ @_argspec(2, 1, 2) def insnstr(self, y, x, text, n, attr=None): + text = _texttype(text) if attr is not None: attr_old = lib.getattrs(self._win) lib.wattrset(self._win, attr) @@ -953,7 +966,7 @@ r, g, b = ffi.new("short *"), ffi.new("short *"), ffi.new("short *") if lib.color_content(color, r, g, b) == lib.ERR: raise error("Argument 1 was out of range. Check value of COLORS.") - return (r, g, b) + return (r[0], g[0], b[0]) def color_pair(n): @@ -1108,6 +1121,7 @@ term = ffi.NULL err = ffi.new("int *") if lib.setupterm(term, fd, err) == lib.ERR: + err = err[0] if err == 0: raise error("setupterm: could not find terminal") elif err == -1: @@ -1197,6 +1211,7 @@ def putp(text): + text = _texttype(text) return _check_ERR(lib.putp(text), "putp") diff --git a/lib_pypy/_pypy_irc_topic.py b/lib_pypy/_pypy_irc_topic.py --- a/lib_pypy/_pypy_irc_topic.py +++ b/lib_pypy/_pypy_irc_topic.py @@ -165,6 +165,63 @@ Nyy rkprcgoybpxf frrz fnar. N cvax tyvggrel ebgngvat ynzoqn "vg'f yvxryl grzcbenel hagvy sberire" nevtb +"Lbh xabj jung'f avpr nobhg EClguba? Ybatre fjbeq svtugf." +nccneragyl pbashfvba vf n srngher +nccneragyl pbashfvba vf n srngher... be vf vg? +ClCl 1.7 eryrnfrq +vs lbh jnag gb or penml, lbh qba'g unir gb sbepr vg +vs lbh jnag vg gb or iveghny, lbh fubhyq abg sbepr vg + svwny: V whfg... fgnegrq pbqvat naq fhqqragyl... nobzvangvba +fabj, fabj! :-) +fabj, fabj, fabj, fabj +clcl 1.8 eryrnfrq +vg jnf srj lnxf gbb yngr +ClCl vf n enpr orgjrra hf funivat lnxf, naq gur havirefr gelvat gb cebqhpr zber naq zber lnxf +Jevgvat na SC7 nabalzbhf cebcbfny sbe ClCl vf yvxr znxvat n gi nq sbe n uvtu cresbeznapr fcbegf pne jvgubhg orvat noyr gb zragvba vgf zbqry be znahsnpghere +Fabj, fabj (ntnva) +Fgvyy fabjvat +thrff jung, fabj +"lbh haqrerfgvzngr gur vzcbegnapr bs zvpebnepuvgrpgher" "ab, vg'f zber gung jr ner fgvyy unccvyl va gur ynaq bs ernpunoyr sehvg" +Jub nz V? Naq vs lrf, ubj znal? +ClCl vf nyjnlf n cynfzn +"genafyngvba gbbypunva" = "EClgure"? gb jevgr vagrEClguref va +"sberire" va clcl grezf zrnaf yvxr srj zbaguf :) +"Onu. PClguba bofphevgl. - Nezva Evtb" +svwny: pna V vavgvngr lbh ba gur (rnfl ohg) aba-gevivny gbcvp bs jevgvat P shapgvba glcrf? :-) +nyy fbsgjner vzcebirzragf unccra ol n ovg +gur genprf qba'g yvr +:-) be engure ":-/" bss-ol-bar-xrl reebe +Be Nezva Evtb. V guvax ur'f noyr gb haqrefgnaq k86 gur jnl Plcure pna frr jbzra va gur zngevk. +V zvtug, ohg abobql erfcrpgf zr +cerohvyg vafgnapr Ryyvcfvf unf ab nggevohgr 'reeab' +guvf frnfba'f svefg "fabj! fabj!" vf urer +ClCl 2.0 orgn1 eryrnfrq - orggre yngr guna arire +Fjvgreynaq 2012: zber fabj va Qrprzore guna rire fvapr clcl fgnegrq +Fjvgmreynaq 2012: zber fabj va Qrprzore guna rire fvapr clcl fgnegrq + n sngny reebe, ol qrsvavgvba, vf sngny + V'z tynq gung jr cebtenz va Clguba naq jr qba'g qrny jvgu gubfr vffhrf rirel qnl. Ncneg gur snpg gung jr unir gb qrny jvgu gurz naljnl, vg frrzf +unccl arj lrne! +"zrffl" vf abg n whqtrzrag, ohg whfg n snpg bs pbzcyvpngrqarff +n ybg bs fabj +qb lbh xabj nobhg n gbnfgre jvgu 8XO bs ENZ naq 64XO bs EBZ? +vg'f orra fabjvat rirelqnl sbe n juvyr, V pna'g whfg chg "fabj, fabj" hc urer rirel qnl +fabjonyy svtugf! +sbejneq pbzcngvovyvgl jvgu bcgvzvmngvbaf gung unira'g orra vairagrq lrg +jr fgvyy unir gb jevgr fbsgjner jvgu n zrgnfcnpr ohooyr va vg +cebonoyl gur ynfg gvzr va gur frnfba, ohg: fabj, fabj! +ClCl 2.0-orgn2 eryrnfrq +Gur ceboyrz vf gung sbe nyzbfg nal aba-gevivny cebtenz, vg'f abg pyrne jung 'pbeerpg' zrnaf. +ClCl 2.0 nyzbfg eryrnfrq +ClCl 2.0 eryrnfrq +WVG pbzcvyref fubhyq or jevggra ol crbcyr jub npghnyyl unir snvgu va WVG pbzcvyref' novyvgl gb znxrf guvatf tb fpernzvat snfg +ClCl 2.0.1 eryrnfrq +arire haqrerfgvzngr gur vzcebonoyr jura lbh qb fbzrguvat ng 2TUm +ClCl 2.0.2 eryrnfrq +nyy jr arrq vf n angvir Cebybt znpuvar +V haqrefgnaq ubj qravnyvfz vf n onq qrohttvat grpuavdhr +rirel IZ fubhyq pbzr jvgu arheny argjbex genvarq gb erpbtavmr zvpeborapuznexf naq enaqbzyl syhpghngr gurz +/-9000% +lbh qvq abg nccebnpu clcl sebz gur rnfl raq: fgz, gura wvg. vg'f n ovg gur Abegu snpr +va ClCl orvat bayl zbqrengryl zntvp vf n tbbq guvat """ from string import ascii_uppercase, ascii_lowercase diff --git a/lib_pypy/_sqlite3.py b/lib_pypy/_sqlite3.py --- a/lib_pypy/_sqlite3.py +++ b/lib_pypy/_sqlite3.py @@ -1305,7 +1305,7 @@ for i in xrange(_lib.sqlite3_column_count(self._statement)): name = _lib.sqlite3_column_name(self._statement, i) if name: - name = _ffi.string(name).decode('utf-8').split("[")[0].strip() + name = _ffi.string(name).split("[")[0].strip() desc.append((name, None, None, None, None, None, None)) return desc diff --git a/lib_pypy/_tkinter/__init__.py b/lib_pypy/_tkinter/__init__.py new file mode 100644 --- /dev/null +++ b/lib_pypy/_tkinter/__init__.py @@ -0,0 +1,48 @@ +# _tkinter package -- low-level interface to libtk and libtcl. +# +# This is an internal module, applications should "import Tkinter" instead. +# +# This version is based on cffi, and is a translation of _tkinter.c +# from CPython, version 2.7.4. + +class TclError(Exception): + pass + +import cffi +try: + from .tklib import tklib, tkffi +except cffi.VerificationError: + raise ImportError("Tk headers and development libraries are required") + +from .app import TkApp + +TK_VERSION = tkffi.string(tklib.get_tk_version()) +TCL_VERSION = tkffi.string(tklib.get_tcl_version()) + +READABLE = tklib.TCL_READABLE +WRITABLE = tklib.TCL_WRITABLE +EXCEPTION = tklib.TCL_EXCEPTION + +def create(screenName=None, baseName=None, className=None, + interactive=False, wantobjects=False, wantTk=True, + sync=False, use=None): + return TkApp(screenName, baseName, className, + interactive, wantobjects, wantTk, sync, use) + +def _flatten(item): + def _flatten1(output, item, depth): + if depth > 1000: + raise ValueError("nesting too deep in _flatten") + if not isinstance(item, (list, tuple)): + raise TypeError("argument must be sequence") + # copy items to output tuple + for o in item: + if isinstance(o, (list, tuple)): + _flatten1(output, o, depth + 1) + elif o is not None: + output.append(o) + + result = [] + _flatten1(result, item, 0) + return tuple(result) + diff --git a/lib_pypy/_tkinter/app.py b/lib_pypy/_tkinter/app.py new file mode 100644 --- /dev/null +++ b/lib_pypy/_tkinter/app.py @@ -0,0 +1,389 @@ +# The TkApp class. + +from .tklib import tklib, tkffi +from . import TclError +from .tclobj import TclObject, FromObj, AsObj, TypeCache + +import sys + +def varname_converter(input): + if isinstance(input, TclObject): + return input.string + return input + + +def Tcl_AppInit(app): + if tklib.Tcl_Init(app.interp) == tklib.TCL_ERROR: + app.raiseTclError() + skip_tk_init = tklib.Tcl_GetVar( + app.interp, "_tkinter_skip_tk_init", tklib.TCL_GLOBAL_ONLY) + if skip_tk_init and tkffi.string(skip_tk_init) == "1": + return + + if tklib.Tk_Init(app.interp) == tklib.TCL_ERROR: + app.raiseTclError() + +class _CommandData(object): + def __new__(cls, app, name, func): + self = object.__new__(cls) + self.app = app + self.name = name + self.func = func + handle = tkffi.new_handle(self) + app._commands[name] = handle # To keep the command alive + return tkffi.cast("ClientData", handle) + + @tkffi.callback("Tcl_CmdProc") + def PythonCmd(clientData, interp, argc, argv): + self = tkffi.from_handle(clientData) + assert self.app.interp == interp + try: + args = [tkffi.string(arg) for arg in argv[1:argc]] + result = self.func(*args) + obj = AsObj(result) + tklib.Tcl_SetObjResult(interp, obj) + except: + self.app.errorInCmd = True + self.app.exc_info = sys.exc_info() + return tklib.TCL_ERROR + else: + return tklib.TCL_OK + + @tkffi.callback("Tcl_CmdDeleteProc") + def PythonCmdDelete(clientData): + self = tkffi.from_handle(clientData) + app = self.app + del app._commands[self.name] + return + + +class TkApp(object): + def __new__(cls, screenName, baseName, className, + interactive, wantobjects, wantTk, sync, use): + if not wantobjects: + raise NotImplementedError("wantobjects=True only") + self = object.__new__(cls) + self.interp = tklib.Tcl_CreateInterp() + self._wantobjects = wantobjects + self.threaded = bool(tklib.Tcl_GetVar2Ex( + self.interp, "tcl_platform", "threaded", + tklib.TCL_GLOBAL_ONLY)) + self.thread_id = tklib.Tcl_GetCurrentThread() + self.dispatching = False + self.quitMainLoop = False + self.errorInCmd = False + + self._typeCache = TypeCache() + self._commands = {} + + # Delete the 'exit' command, which can screw things up + tklib.Tcl_DeleteCommand(self.interp, "exit") + + if screenName is not None: + tklib.Tcl_SetVar2(self.interp, "env", "DISPLAY", screenName, + tklib.TCL_GLOBAL_ONLY) + + if interactive: + tklib.Tcl_SetVar(self.interp, "tcl_interactive", "1", + tklib.TCL_GLOBAL_ONLY) + else: + tklib.Tcl_SetVar(self.interp, "tcl_interactive", "0", + tklib.TCL_GLOBAL_ONLY) + + # This is used to get the application class for Tk 4.1 and up + argv0 = className.lower() + tklib.Tcl_SetVar(self.interp, "argv0", argv0, + tklib.TCL_GLOBAL_ONLY) + + if not wantTk: + tklib.Tcl_SetVar(self.interp, "_tkinter_skip_tk_init", "1", + tklib.TCL_GLOBAL_ONLY) + + # some initial arguments need to be in argv + if sync or use: + args = "" + if sync: + args += "-sync" + if use: + if sync: + args += " " + args += "-use " + use + + tklib.Tcl_SetVar(self.interp, "argv", args, + tklib.TCL_GLOBAL_ONLY) + + Tcl_AppInit(self) + # EnableEventHook() + return self + + def __del__(self): + tklib.Tcl_DeleteInterp(self.interp) + # DisableEventHook() + + def raiseTclError(self): + if self.errorInCmd: + self.errorInCmd = False + raise self.exc_info[0], self.exc_info[1], self.exc_info[2] + raise TclError(tkffi.string(tklib.Tcl_GetStringResult(self.interp))) + + def wantobjects(self): + return self._wantobjects + + def _check_tcl_appartment(self): + if self.threaded and self.thread_id != tklib.Tcl_GetCurrentThread(): + raise RuntimeError("Calling Tcl from different appartment") + + def loadtk(self): + # We want to guard against calling Tk_Init() multiple times + err = tklib.Tcl_Eval(self.interp, "info exists tk_version") + if err == tklib.TCL_ERROR: + self.raiseTclError() + tk_exists = tklib.Tcl_GetStringResult(self.interp) + if not tk_exists or tkffi.string(tk_exists) != "1": + err = tklib.Tk_Init(self.interp) + if err == tklib.TCL_ERROR: + self.raiseTclError() + + def _var_invoke(self, func, *args, **kwargs): + if self.threaded and self.thread_id != tklib.Tcl_GetCurrentThread(): + # The current thread is not the interpreter thread. + # Marshal the call to the interpreter thread, then wait + # for completion. + raise NotImplementedError("Call from another thread") + return func(*args, **kwargs) + + def _getvar(self, name1, name2=None, global_only=False): + name1 = varname_converter(name1) + if not name2: + name2 = tkffi.NULL + flags=tklib.TCL_LEAVE_ERR_MSG + if global_only: + flags |= tklib.TCL_GLOBAL_ONLY + res = tklib.Tcl_GetVar2Ex(self.interp, name1, name2, flags) + if not res: + self.raiseTclError() + assert self._wantobjects + return FromObj(self, res) + + def _setvar(self, name1, value, global_only=False): + name1 = varname_converter(name1) + newval = AsObj(value) + flags=tklib.TCL_LEAVE_ERR_MSG + if global_only: + flags |= tklib.TCL_GLOBAL_ONLY + res = tklib.Tcl_SetVar2Ex(self.interp, name1, tkffi.NULL, + newval, flags) + if not res: + self.raiseTclError() + + def _unsetvar(self, name1, name2=None, global_only=False): + name1 = varname_converter(name1) + if not name2: + name2 = tkffi.NULL + flags=tklib.TCL_LEAVE_ERR_MSG + if global_only: + flags |= tklib.TCL_GLOBAL_ONLY + res = tklib.Tcl_UnsetVar2(self.interp, name1, name2, flags) + if res == tklib.TCL_ERROR: + self.raiseTclError() + + def getvar(self, name1, name2=None): + return self._var_invoke(self._getvar, name1, name2) + + def globalgetvar(self, name1, name2=None): + return self._var_invoke(self._getvar, name1, name2, global_only=True) + + def setvar(self, name1, value): + return self._var_invoke(self._setvar, name1, value) + + def globalsetvar(self, name1, value): + return self._var_invoke(self._setvar, name1, value, global_only=True) + + def unsetvar(self, name1, name2=None): + return self._var_invoke(self._unsetvar, name1, name2) + + def globalunsetvar(self, name1, name2=None): + return self._var_invoke(self._unsetvar, name1, name2, global_only=True) + + # COMMANDS + + def createcommand(self, cmdName, func): + if not callable(func): + raise TypeError("command not callable") + + if self.threaded and self.thread_id != tklib.Tcl_GetCurrentThread(): + raise NotImplementedError("Call from another thread") + + clientData = _CommandData(self, cmdName, func) + + if self.threaded and self.thread_id != tklib.Tcl_GetCurrentThread(): + raise NotImplementedError("Call from another thread") + + res = tklib.Tcl_CreateCommand( + self.interp, cmdName, _CommandData.PythonCmd, + clientData, _CommandData.PythonCmdDelete) + if not res: + raise TclError("can't create Tcl command") + + def deletecommand(self, cmdName): + if self.threaded and self.thread_id != tklib.Tcl_GetCurrentThread(): + raise NotImplementedError("Call from another thread") + + res = tklib.Tcl_DeleteCommand(self.interp, cmdName) + if res == -1: + raise TclError("can't delete Tcl command") + + def call(self, *args): + flags = tklib.TCL_EVAL_DIRECT | tklib.TCL_EVAL_GLOBAL + + # If args is a single tuple, replace with contents of tuple + if len(args) == 1 and isinstance(args[0], tuple): + args = args[0] + + if self.threaded and self.thread_id != tklib.Tcl_GetCurrentThread(): + # We cannot call the command directly. Instead, we must + # marshal the parameters to the interpreter thread. + raise NotImplementedError("Call from another thread") + + objects = tkffi.new("Tcl_Obj*[]", len(args)) + argc = len(args) + try: + for i, arg in enumerate(args): + if arg is None: + argc = i + break + obj = AsObj(arg) + tklib.Tcl_IncrRefCount(obj) + objects[i] = obj + + res = tklib.Tcl_EvalObjv(self.interp, argc, objects, flags) + if res == tklib.TCL_ERROR: + self.raiseTclError() + else: + result = self._callResult() + finally: + for obj in objects: + if obj: + tklib.Tcl_DecrRefCount(obj) + return result + + def _callResult(self): + assert self._wantobjects + value = tklib.Tcl_GetObjResult(self.interp) + # Not sure whether the IncrRef is necessary, but something + # may overwrite the interpreter result while we are + # converting it. + tklib.Tcl_IncrRefCount(value) + res = FromObj(self, value) + tklib.Tcl_DecrRefCount(value) + return res + + def eval(self, script): + self._check_tcl_appartment() + res = tklib.Tcl_Eval(self.interp, script) + if res == tklib.TCL_ERROR: + self.raiseTclError() + return tkffi.string(tklib.Tcl_GetStringResult(self.interp)) + + def evalfile(self, filename): + self._check_tcl_appartment() + res = tklib.Tcl_EvalFile(self.interp, filename) + if res == tklib.TCL_ERROR: + self.raiseTclError() + return tkffi.string(tklib.Tcl_GetStringResult(self.interp)) + + def split(self, arg): + if isinstance(arg, tuple): + return self._splitObj(arg) + else: + return self._split(arg) + + def splitlist(self, arg): + if isinstance(arg, tuple): + return arg + if isinstance(arg, unicode): + arg = arg.encode('utf8') + + argc = tkffi.new("int*") + argv = tkffi.new("char***") + res = tklib.Tcl_SplitList(self.interp, arg, argc, argv) + if res == tklib.TCL_ERROR: + self.raiseTclError() + + result = tuple(tkffi.string(argv[0][i]) + for i in range(argc[0])) + tklib.Tcl_Free(argv[0]) + return result + + def _splitObj(self, arg): + if isinstance(arg, tuple): + size = len(arg) + # Recursively invoke SplitObj for all tuple items. + # If this does not return a new object, no action is + # needed. + result = None + newelems = (self._splitObj(elem) for elem in arg) + for elem, newelem in zip(arg, newelems): + if elem is not newelem: + return newelems + elif isinstance(arg, str): + argc = tkffi.new("int*") + argv = tkffi.new("char***") + res = tklib.Tcl_SplitList(tkffi.NULL, arg, argc, argv) + if res == tklib.TCL_ERROR: + return arg + tklib.Tcl_Free(argv[0]) + if argc[0] > 1: + return self._split(arg) + return arg + + def _split(self, arg): + argc = tkffi.new("int*") + argv = tkffi.new("char***") + res = tklib.Tcl_SplitList(tkffi.NULL, arg, argc, argv) + if res == tklib.TCL_ERROR: + # Not a list. + # Could be a quoted string containing funnies, e.g. {"}. + # Return the string itself. + return arg + + try: + if argc[0] == 0: + return "" + elif argc[0] == 1: + return argv[0][0] + else: + return (self._split(argv[0][i]) + for i in range(argc[0])) + finally: + tklib.Tcl_Free(argv[0]) + + def getboolean(self, s): + if isinstance(s, int): + return s + v = tkffi.new("int*") + res = tklib.Tcl_GetBoolean(self.interp, s, v) + if res == tklib.TCL_ERROR: + self.raiseTclError() + + def mainloop(self, threshold): + self._check_tcl_appartment() + self.dispatching = True + while (tklib.Tk_GetNumMainWindows() > threshold and + not self.quitMainLoop and not self.errorInCmd): + + if self.threaded: + result = tklib.Tcl_DoOneEvent(0) + else: + raise NotImplementedError("TCL configured without threads") + + if result < 0: + break + self.dispatching = False + self.quitMainLoop = False + if self.errorInCmd: + self.errorInCmd = False + raise self.exc_info[0], self.exc_info[1], self.exc_info[2] + + def quit(self): + self.quitMainLoop = True diff --git a/lib_pypy/_tkinter/tclobj.py b/lib_pypy/_tkinter/tclobj.py new file mode 100644 --- /dev/null +++ b/lib_pypy/_tkinter/tclobj.py @@ -0,0 +1,114 @@ +# TclObject, conversions with Python objects + +from .tklib import tklib, tkffi + +class TypeCache(object): + def __init__(self): + self.BooleanType = tklib.Tcl_GetObjType("boolean") + self.ByteArrayType = tklib.Tcl_GetObjType("bytearray") + self.DoubleType = tklib.Tcl_GetObjType("double") + self.IntType = tklib.Tcl_GetObjType("int") + self.ListType = tklib.Tcl_GetObjType("list") + self.ProcBodyType = tklib.Tcl_GetObjType("procbody") + self.StringType = tklib.Tcl_GetObjType("string") + + +def FromObj(app, value): + """Convert a TclObj pointer into a Python object.""" + typeCache = app._typeCache + if not value.typePtr: + buf = tkffi.buffer(value.bytes, value.length) + result = buf[:] + # If the result contains any bytes with the top bit set, it's + # UTF-8 and we should decode it to Unicode. + try: + result.decode('ascii') + except UnicodeDecodeError: + result = result.decode('utf8') + return result + + elif value.typePtr == typeCache.BooleanType: + return result + elif value.typePtr == typeCache.ByteArrayType: + return result + elif value.typePtr == typeCache.DoubleType: + return value.internalRep.doubleValue + elif value.typePtr == typeCache.IntType: + return value.internalRep.longValue + elif value.typePtr == typeCache.ListType: + size = tkffi.new('int*') + status = tklib.Tcl_ListObjLength(app.interp, value, size) + if status == tklib.TCL_ERROR: + app.raiseTclError() + result = [] + tcl_elem = tkffi.new("Tcl_Obj**") + for i in range(size[0]): + status = tklib.Tcl_ListObjIndex(app.interp, + value, i, tcl_elem) + if status == tklib.TCL_ERROR: + app.raiseTclError() + result.append(FromObj(app, tcl_elem[0])) + return tuple(result) + elif value.typePtr == typeCache.ProcBodyType: + return result + elif value.typePtr == typeCache.StringType: + buf = tklib.Tcl_GetUnicode(value) + length = tklib.Tcl_GetCharLength(value) + buf = tkffi.buffer(tkffi.cast("char*", buf), length*2)[:] + return buf.decode('utf-16') + + return TclObject(value) + +def AsObj(value): + if isinstance(value, str): + return tklib.Tcl_NewStringObj(value, len(value)) + elif isinstance(value, bool): + return tklib.Tcl_NewBooleanObj(value) + elif isinstance(value, int): + return tklib.Tcl_NewLongObj(value) + elif isinstance(value, float): + return tklib.Tcl_NewDoubleObj(value) + elif isinstance(value, tuple): + argv = tkffi.new("Tcl_Obj*[]", len(value)) + for i in range(len(value)): + argv[i] = AsObj(value[i]) + return tklib.Tcl_NewListObj(len(value), argv) + elif isinstance(value, unicode): + encoded = value.encode('utf-16')[2:] + buf = tkffi.new("char[]", encoded) + inbuf = tkffi.cast("Tcl_UniChar*", buf) + return tklib.Tcl_NewUnicodeObj(buf, len(encoded)/2) + elif isinstance(value, TclObject): + tklib.Tcl_IncrRefCount(value._value) + return value._value + else: + return AsObj(str(value)) + +class TclObject(object): + def __new__(cls, value): + self = object.__new__(cls) + tklib.Tcl_IncrRefCount(value) + self._value = value + self._string = None + return self + + def __del__(self): + tklib.Tcl_DecrRefCount(self._value) + + def __str__(self): + if self._string and isinstance(self._string, str): + return self._string + return tkffi.string(tklib.Tcl_GetString(self._value)) + + @property + def string(self): + if self._string is None: + length = tkffi.new("int*") + s = tklib.Tcl_GetStringFromObj(self._value, length) + value = tkffi.buffer(s, length[0])[:] + try: + value.decode('ascii') + except UnicodeDecodeError: + value = value.decode('utf8') + self._string = value + return self._string diff --git a/lib_pypy/_tkinter/tklib.py b/lib_pypy/_tkinter/tklib.py new file mode 100644 --- /dev/null +++ b/lib_pypy/_tkinter/tklib.py @@ -0,0 +1,114 @@ +# C bindings with libtcl and libtk. + +from cffi import FFI + +tkffi = FFI() + +tkffi.cdef(""" +char *get_tk_version(); +char *get_tcl_version(); +#define TCL_READABLE ... +#define TCL_WRITABLE ... +#define TCL_EXCEPTION ... +#define TCL_ERROR ... +#define TCL_OK ... + +#define TCL_LEAVE_ERR_MSG ... +#define TCL_GLOBAL_ONLY ... +#define TCL_EVAL_DIRECT ... +#define TCL_EVAL_GLOBAL ... + +typedef unsigned short Tcl_UniChar; +typedef ... Tcl_Interp; +typedef ...* Tcl_ThreadId; +typedef ...* Tcl_Command; + +typedef struct Tcl_ObjType { + char *name; + ...; +} Tcl_ObjType; +typedef struct Tcl_Obj { + char *bytes; + int length; + Tcl_ObjType *typePtr; + union { /* The internal representation: */ + long longValue; /* - an long integer value. */ + double doubleValue; /* - a double-precision floating value. */ + struct { /* - internal rep as two pointers. */ + void *ptr1; + void *ptr2; + } twoPtrValue; + } internalRep; + ...; +} Tcl_Obj; + +Tcl_Interp *Tcl_CreateInterp(); +void Tcl_DeleteInterp(Tcl_Interp* interp); +int Tcl_Init(Tcl_Interp* interp); +int Tk_Init(Tcl_Interp* interp); + +void Tcl_Free(char* ptr); + +const char *Tcl_SetVar(Tcl_Interp* interp, const char* varName, const char* newValue, int flags); +const char *Tcl_SetVar2(Tcl_Interp* interp, const char* name1, const char* name2, const char* newValue, int flags); +const char *Tcl_GetVar(Tcl_Interp* interp, const char* varName, int flags); +Tcl_Obj *Tcl_SetVar2Ex(Tcl_Interp* interp, const char* name1, const char* name2, Tcl_Obj* newValuePtr, int flags); +Tcl_Obj *Tcl_GetVar2Ex(Tcl_Interp* interp, const char* name1, const char* name2, int flags); +int Tcl_UnsetVar2(Tcl_Interp* interp, const char* name1, const char* name2, int flags); +const Tcl_ObjType *Tcl_GetObjType(const char* typeName); + +Tcl_Obj *Tcl_NewStringObj(const char* bytes, int length); +Tcl_Obj *Tcl_NewUnicodeObj(const Tcl_UniChar* unicode, int numChars); +Tcl_Obj *Tcl_NewLongObj(long longValue); +Tcl_Obj *Tcl_NewBooleanObj(int boolValue); +Tcl_Obj *Tcl_NewDoubleObj(double doubleValue); + +void Tcl_IncrRefCount(Tcl_Obj* objPtr); +void Tcl_DecrRefCount(Tcl_Obj* objPtr); + +int Tcl_GetBoolean(Tcl_Interp* interp, const char* src, int* boolPtr); +char *Tcl_GetString(Tcl_Obj* objPtr); +char *Tcl_GetStringFromObj(Tcl_Obj* objPtr, int* lengthPtr); + +Tcl_UniChar *Tcl_GetUnicode(Tcl_Obj* objPtr); +int Tcl_GetCharLength(Tcl_Obj* objPtr); + +Tcl_Obj *Tcl_NewListObj(int objc, Tcl_Obj* const objv[]); +int Tcl_ListObjLength(Tcl_Interp* interp, Tcl_Obj* listPtr, int* intPtr); +int Tcl_ListObjIndex(Tcl_Interp* interp, Tcl_Obj* listPtr, int index, Tcl_Obj** objPtrPtr); +int Tcl_SplitList(Tcl_Interp* interp, char* list, int* argcPtr, const char*** argvPtr); + +int Tcl_Eval(Tcl_Interp* interp, const char* script); +int Tcl_EvalFile(Tcl_Interp* interp, const char* filename); +int Tcl_EvalObjv(Tcl_Interp* interp, int objc, Tcl_Obj** objv, int flags); +Tcl_Obj *Tcl_GetObjResult(Tcl_Interp* interp); +const char *Tcl_GetStringResult(Tcl_Interp* interp); +void Tcl_SetObjResult(Tcl_Interp* interp, Tcl_Obj* objPtr); + +typedef void* ClientData; +typedef int Tcl_CmdProc( + ClientData clientData, + Tcl_Interp *interp, + int argc, + const char *argv[]); +typedef void Tcl_CmdDeleteProc( + ClientData clientData); +Tcl_Command Tcl_CreateCommand(Tcl_Interp* interp, const char* cmdName, Tcl_CmdProc proc, ClientData clientData, Tcl_CmdDeleteProc deleteProc); +int Tcl_DeleteCommand(Tcl_Interp* interp, const char* cmdName); + +Tcl_ThreadId Tcl_GetCurrentThread(); +int Tcl_DoOneEvent(int flags); + +int Tk_GetNumMainWindows(); +""") + +tklib = tkffi.verify(""" +#include +#include + +char *get_tk_version() { return TK_VERSION; } +char *get_tcl_version() { return TCL_VERSION; } +""", +include_dirs=['/usr/include/tcl'], +libraries=['tcl', 'tk'], +) diff --git a/lib_pypy/cffi.egg-info b/lib_pypy/cffi.egg-info --- a/lib_pypy/cffi.egg-info +++ b/lib_pypy/cffi.egg-info @@ -1,6 +1,6 @@ Metadata-Version: 1.0 Name: cffi -Version: 0.6 +Version: 0.7 Summary: Foreign Function Interface for Python calling C code. Home-page: http://cffi.readthedocs.org Author: Armin Rigo, Maciej Fijalkowski diff --git a/lib_pypy/cffi/backend_ctypes.py b/lib_pypy/cffi/backend_ctypes.py --- a/lib_pypy/cffi/backend_ctypes.py +++ b/lib_pypy/cffi/backend_ctypes.py @@ -707,7 +707,7 @@ class CTypesStructOrUnion(CTypesBaseStructOrUnion): __slots__ = ['_blob'] _ctype = struct_or_union - _reftypename = '%s %s &' % (kind, name) + _reftypename = '%s &' % (name,) _kind = kind # CTypesStructOrUnion._fix_class() @@ -934,7 +934,7 @@ # class CTypesEnum(CTypesInt): __slots__ = [] - _reftypename = 'enum %s &' % name + _reftypename = '%s &' % name def _get_own_repr(self): value = self._value diff --git a/lib_pypy/cffi/model.py b/lib_pypy/cffi/model.py --- a/lib_pypy/cffi/model.py +++ b/lib_pypy/cffi/model.py @@ -244,6 +244,10 @@ self.forcename = forcename self.build_c_name_with_marker() + def get_official_name(self): + assert self.c_name_with_marker.endswith('&') + return self.c_name_with_marker[:-1] + class StructOrUnion(StructOrUnionOrEnum): fixedlayout = None @@ -357,7 +361,9 @@ def build_backend_type(self, ffi, finishlist): self.check_not_partial() finishlist.append(self) - return global_cache(self, ffi, 'new_struct_type', self.name, key=self) + + return global_cache(self, ffi, 'new_struct_type', + self.get_official_name(), key=self) class UnionType(StructOrUnion): @@ -365,7 +371,8 @@ def build_backend_type(self, ffi, finishlist): finishlist.append(self) - return global_cache(self, ffi, 'new_union_type', self.name, key=self) + return global_cache(self, ffi, 'new_union_type', + self.get_official_name(), key=self) class EnumType(StructOrUnionOrEnum): @@ -388,7 +395,8 @@ def build_backend_type(self, ffi, finishlist): self.check_not_partial() base_btype = self.build_baseinttype(ffi, finishlist) - return global_cache(self, ffi, 'new_enum_type', self.name, + return global_cache(self, ffi, 'new_enum_type', + self.get_official_name(), self.enumerators, self.enumvalues, base_btype, key=self) diff --git a/lib_pypy/ctypes_config_cache/syslog.ctc.py b/lib_pypy/ctypes_config_cache/syslog.ctc.py deleted file mode 100644 --- a/lib_pypy/ctypes_config_cache/syslog.ctc.py +++ /dev/null @@ -1,75 +0,0 @@ -""" -'ctypes_configure' source for syslog.py. -Run this to rebuild _syslog_cache.py. -""" - -from ctypes_configure.configure import (configure, - ExternalCompilationInfo, ConstantInteger, DefinedConstantInteger) -import dumpcache - - -_CONSTANTS = ( - 'LOG_EMERG', - 'LOG_ALERT', - 'LOG_CRIT', - 'LOG_ERR', - 'LOG_WARNING', - 'LOG_NOTICE', - 'LOG_INFO', - 'LOG_DEBUG', - - 'LOG_PID', - 'LOG_CONS', - 'LOG_NDELAY', - - 'LOG_KERN', - 'LOG_USER', - 'LOG_MAIL', - 'LOG_DAEMON', - 'LOG_AUTH', - 'LOG_LPR', - 'LOG_LOCAL0', - 'LOG_LOCAL1', - 'LOG_LOCAL2', - 'LOG_LOCAL3', - 'LOG_LOCAL4', - 'LOG_LOCAL5', - 'LOG_LOCAL6', - 'LOG_LOCAL7', -) -_OPTIONAL_CONSTANTS = ( - 'LOG_NOWAIT', - 'LOG_PERROR', - - 'LOG_SYSLOG', - 'LOG_CRON', - 'LOG_UUCP', - 'LOG_NEWS', -) - -# Constant aliases if there are not defined -_ALIAS = ( - ('LOG_SYSLOG', 'LOG_DAEMON'), - ('LOG_CRON', 'LOG_DAEMON'), - ('LOG_NEWS', 'LOG_MAIL'), - ('LOG_UUCP', 'LOG_MAIL'), -) - -class SyslogConfigure: - _compilation_info_ = ExternalCompilationInfo(includes=['sys/syslog.h']) -for key in _CONSTANTS: - setattr(SyslogConfigure, key, ConstantInteger(key)) -for key in _OPTIONAL_CONSTANTS: - setattr(SyslogConfigure, key, DefinedConstantInteger(key)) - -config = configure(SyslogConfigure) -for key in _OPTIONAL_CONSTANTS: - if config[key] is None: - del config[key] -for alias, key in _ALIAS: - config.setdefault(alias, config[key]) - -all_constants = config.keys() -all_constants.sort() -config['ALL_CONSTANTS'] = tuple(all_constants) -dumpcache.dumpcache2('syslog', config) diff --git a/lib_pypy/greenlet.py b/lib_pypy/greenlet.py --- a/lib_pypy/greenlet.py +++ b/lib_pypy/greenlet.py @@ -1,3 +1,4 @@ +import sys import _continuation __version__ = "0.4.0" @@ -5,7 +6,7 @@ # ____________________________________________________________ # Exceptions -class GreenletExit(Exception): +class GreenletExit(BaseException): """This special exception does not propagate to the parent greenlet; it can be used to kill a single greenlet.""" @@ -46,16 +47,16 @@ if parent is not None: self.parent = parent - def switch(self, *args): + def switch(self, *args, **kwds): "Switch execution to this greenlet, optionally passing the values " "given as argument(s). Returns the value passed when switching back." - return self.__switch('switch', args) + return self.__switch('switch', (args, kwds)) def throw(self, typ=GreenletExit, val=None, tb=None): "raise exception in greenlet, return value passed when switching back" return self.__switch('throw', typ, val, tb) - def __switch(target, methodname, *args): + def __switch(target, methodname, *baseargs): current = getcurrent() # while not (target.__main or _continulet.is_pending(target)): @@ -65,9 +66,9 @@ greenlet_func = _greenlet_start else: greenlet_func = _greenlet_throw - _continulet.__init__(target, greenlet_func, *args) + _continulet.__init__(target, greenlet_func, *baseargs) methodname = 'switch' - args = () + baseargs = () target.__started = True break # already done, go to the parent instead @@ -75,14 +76,27 @@ # up the 'parent' explicitly. Good enough, because a Ctrl-C # will show that the program is caught in this loop here.) target = target.parent + # convert a "raise GreenletExit" into "return GreenletExit" + if methodname == 'throw': + try: + raise baseargs[0], baseargs[1] + except GreenletExit, e: + methodname = 'switch' + baseargs = (((e,), {}),) + except: + baseargs = sys.exc_info()[:2] + baseargs[2:] # try: unbound_method = getattr(_continulet, methodname) - args = unbound_method(current, *args, to=target) + args, kwds = unbound_method(current, *baseargs, to=target) finally: _tls.current = current # - if len(args) == 1: + if kwds: + if args: + return args, kwds + return kwds + elif len(args) == 1: return args[0] else: return args @@ -129,18 +143,22 @@ _tls.current = gmain def _greenlet_start(greenlet, args): + args, kwds = args _tls.current = greenlet try: - res = greenlet.run(*args) + res = greenlet.run(*args, **kwds) except GreenletExit, e: res = e finally: _continuation.permute(greenlet, greenlet.parent) - return (res,) + return ((res,), None) def _greenlet_throw(greenlet, exc, value, tb): _tls.current = greenlet try: raise exc, value, tb + except GreenletExit, e: + res = e finally: _continuation.permute(greenlet, greenlet.parent) + return ((res,), None) diff --git a/lib_pypy/grp.py b/lib_pypy/grp.py --- a/lib_pypy/grp.py +++ b/lib_pypy/grp.py @@ -8,6 +8,7 @@ from ctypes import Structure, c_char_p, c_int, POINTER from ctypes_support import standard_c_lib as libc +import _structseq try: from __pypy__ import builtinify except ImportError: builtinify = lambda f: f @@ -23,32 +24,13 @@ ('gr_mem', POINTER(c_char_p)), ) -class Group(object): - def __init__(self, gr_name, gr_passwd, gr_gid, gr_mem): - self.gr_name = gr_name - self.gr_passwd = gr_passwd - self.gr_gid = gr_gid - self.gr_mem = gr_mem +class struct_group: + __metaclass__ = _structseq.structseqtype - def __getitem__(self, item): - if item == 0: - return self.gr_name - elif item == 1: - return self.gr_passwd - elif item == 2: - return self.gr_gid - elif item == 3: - return self.gr_mem - else: - raise IndexError(item) - - def __len__(self): - return 4 - - def __repr__(self): - return str((self.gr_name, self.gr_passwd, self.gr_gid, self.gr_mem)) - - # whatever else... + gr_name = _structseq.structseqfield(0) + gr_passwd = _structseq.structseqfield(1) + gr_gid = _structseq.structseqfield(2) + gr_mem = _structseq.structseqfield(3) libc.getgrgid.argtypes = [gid_t] libc.getgrgid.restype = POINTER(GroupStruct) @@ -71,8 +53,8 @@ while res.contents.gr_mem[i]: mem.append(res.contents.gr_mem[i]) i += 1 - return Group(res.contents.gr_name, res.contents.gr_passwd, - res.contents.gr_gid, mem) + return struct_group((res.contents.gr_name, res.contents.gr_passwd, + res.contents.gr_gid, mem)) @builtinify def getgrgid(gid): diff --git a/lib_pypy/pyrepl/curses.py b/lib_pypy/pyrepl/curses.py --- a/lib_pypy/pyrepl/curses.py +++ b/lib_pypy/pyrepl/curses.py @@ -19,11 +19,15 @@ # CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN # CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -# avoid importing the whole curses, if possible -try: +# If we are running on top of pypy, we import only _minimal_curses. +# Don't try to fall back to _curses, because that's going to use cffi +# and fall again more loudly. +import sys +if '__pypy__' in sys.builtin_module_names: # pypy case import _minimal_curses as _curses -except ImportError: +else: + # cpython case try: import _curses except ImportError: diff --git a/lib_pypy/readline.egg-info b/lib_pypy/readline.egg-info new file mode 100644 --- /dev/null +++ b/lib_pypy/readline.egg-info From noreply at buildbot.pypy.org Mon Aug 26 11:52:17 2013 From: noreply at buildbot.pypy.org (cfbolz) Date: Mon, 26 Aug 2013 11:52:17 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: (all) planning for today Message-ID: <20130826095217.C5D801C0134@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: extradoc Changeset: r5025:1de3bbdda807 Date: 2013-08-26 10:51 +0100 http://bitbucket.org/pypy/extradoc/changeset/1de3bbdda807/ Log: (all) planning for today diff --git a/sprintinfo/london-2013/planning.txt b/sprintinfo/london-2013/planning.txt --- a/sprintinfo/london-2013/planning.txt +++ b/sprintinfo/london-2013/planning.txt @@ -1,3 +1,30 @@ +People present +--------------- + +Carl Friedrich +Laurie +Remy +Marko +Olmo +Romain +Manuel +Rami +Lukas +Richard +Ronan +Armin +Tom +Edd +Maciej + +People not present +------------------- + +Anto + + + + Tasks ----- @@ -13,3 +40,42 @@ * PYPYLOG output: they need to be disentangled when the process uses threads or greenlets, probably by adding a thread-or-greenlet number prefix (see branch stmgc-c4 where we already add a thread num prefix) + +* general STM things (Remy, Armin) + +* general Numpy things (Marko, Romain) + +* fix some of the RPython nits that Edd found (Ronan, Edd) + +* continue less-stringly-ops + +* better error messages (Ronan, Edd) + +* programming + +* JIT for dont-know-yet (Richard, Lukas) + +* explore Laurie's crazy scheme of persisting loop entry counts (Maciej, Laurie) + +* progress on the documentation branch (Olmo, Manuel) + +* improve installation instruction (Olmo, Manuel) + +* shave all the yaks + +* find the slow generator task (Marko, Romain) + +* general wizardry (Carl Friedrich; Armin) + +* general getting started (Rami, Carl Friedrich) + + +Discussions planned +-------------------- + +* demo session Tuesday +* STM dissemination round +* JIT optimizer mess +* roadmap planning +* scientific computing roadmap +* LuaJIT discussion TODAY (Tom, Armin, Maciek, Carl Friedrich, Laurie) From noreply at buildbot.pypy.org Mon Aug 26 13:08:14 2013 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Mon, 26 Aug 2013 13:08:14 +0200 (CEST) Subject: [pypy-commit] pypy improve-docs: Remove dead link. Message-ID: <20130826110814.58DAB1C0134@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: improve-docs Changeset: r66315:a40a4e6e24d8 Date: 2013-08-26 12:07 +0100 http://bitbucket.org/pypy/pypy/changeset/a40a4e6e24d8/ Log: Remove dead link. diff --git a/pypy/doc/index.rst b/pypy/doc/index.rst --- a/pypy/doc/index.rst +++ b/pypy/doc/index.rst @@ -31,7 +31,6 @@ cppyy objspace-proxies sandbox - clr-module Development documentation From noreply at buildbot.pypy.org Mon Aug 26 14:33:18 2013 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Mon, 26 Aug 2013 14:33:18 +0200 (CEST) Subject: [pypy-commit] pypy improve-docs: Structure the TODO better. Message-ID: <20130826123318.61DD01C0134@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: improve-docs Changeset: r66316:35651f0ce780 Date: 2013-08-26 13:34 +0100 http://bitbucket.org/pypy/pypy/changeset/35651f0ce780/ Log: Structure the TODO better. diff --git a/TODO-docs b/TODO-docs --- a/TODO-docs +++ b/TODO-docs @@ -4,18 +4,28 @@ General ------- -* make inter-documentation links work -* work on configuration/options documentation * structure documentation and add appropriate toctrees * integrate numerous getting started documents into something more useful (eg. "Installing PyPy", "Building PyPy from source", "Playing with the RPython Toolchain", "Write your own interpreter in RPython") -* remove documentation for removed features * architecture documents don't really show the separation between PyPy and RPython * where should the documentation about coding style etc. be put? + + +Cleanup +~~~~~~~ + +* remove documentation for removed features * update / remove dead links +Meta +~~~~ + +* make inter-documentation links work +* work on configuration/options documentation generation + + RPython ------- From noreply at buildbot.pypy.org Mon Aug 26 14:45:12 2013 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Mon, 26 Aug 2013 14:45:12 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: Remove point 'improve installation instruction' because it's just a small subtask of 'progress on the documentation branch'. Message-ID: <20130826124512.7CEF61C0134@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: extradoc Changeset: r5026:dad44e6a0545 Date: 2013-08-26 13:46 +0100 http://bitbucket.org/pypy/extradoc/changeset/dad44e6a0545/ Log: Remove point 'improve installation instruction' because it's just a small subtask of 'progress on the documentation branch'. diff --git a/sprintinfo/london-2013/planning.txt b/sprintinfo/london-2013/planning.txt --- a/sprintinfo/london-2013/planning.txt +++ b/sprintinfo/london-2013/planning.txt @@ -59,8 +59,6 @@ * progress on the documentation branch (Olmo, Manuel) -* improve installation instruction (Olmo, Manuel) - * shave all the yaks * find the slow generator task (Marko, Romain) From noreply at buildbot.pypy.org Mon Aug 26 14:59:58 2013 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Mon, 26 Aug 2013 14:59:58 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: Add a point about Mac packaging. Message-ID: <20130826125958.199791C0134@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: extradoc Changeset: r5027:db60dc6b2400 Date: 2013-08-26 14:01 +0100 http://bitbucket.org/pypy/extradoc/changeset/db60dc6b2400/ Log: Add a point about Mac packaging. diff --git a/sprintinfo/london-2013/planning.txt b/sprintinfo/london-2013/planning.txt --- a/sprintinfo/london-2013/planning.txt +++ b/sprintinfo/london-2013/planning.txt @@ -59,6 +59,8 @@ * progress on the documentation branch (Olmo, Manuel) +* improve packaging on Mac to make sure non-technical users are able to use PyPy (Olmo, Manuel) + * shave all the yaks * find the slow generator task (Marko, Romain) From noreply at buildbot.pypy.org Mon Aug 26 16:01:13 2013 From: noreply at buildbot.pypy.org (rguillebert) Date: Mon, 26 Aug 2013 16:01:13 +0200 (CEST) Subject: [pypy-commit] pypy default: Fix ndarray(dtype=str).fill() Message-ID: <20130826140113.32FBA1C3639@cobra.cs.uni-duesseldorf.de> Author: Romain Guillebert Branch: Changeset: r66317:a4a455634894 Date: 2013-08-26 15:00 +0100 http://bitbucket.org/pypy/pypy/changeset/a4a455634894/ Log: Fix ndarray(dtype=str).fill() diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -1922,6 +1922,12 @@ a = numpy.arange(10.).reshape((5, 2))[::2] assert (loads(dumps(a)) == a).all() + def test_string_filling(self): + import numpypy as numpy + a = numpy.empty((10,10), dtype='c1') + a.fill(12) + assert (a == '1').all() + class AppTestMultiDim(BaseNumpyAppTest): def test_init(self): import numpypy diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -1764,12 +1764,16 @@ arr.storage[i] = arg[i] return interp_boxes.W_StringBox(arr, 0, arr.dtype) - @jit.unroll_safe def store(self, arr, i, offset, box): assert isinstance(box, interp_boxes.W_StringBox) # XXX simplify to range(box.dtype.get_size()) ? + return self._store(arr.storage, i, offset, box) + + @jit.unroll_safe + def _store(self, storage, i, offset, box): + assert isinstance(box, interp_boxes.W_StringBox) for k in range(min(self.size, box.arr.size-offset)): - arr.storage[k + i] = box.arr.storage[k + offset] + storage[k + i] = box.arr.storage[k + offset] def read(self, arr, i, offset, dtype=None): if dtype is None: @@ -1859,6 +1863,11 @@ arr.storage[j] = '\x00' return interp_boxes.W_StringBox(arr, 0, arr.dtype) + def fill(self, storage, width, box, start, stop, offset): + from pypy.module.micronumpy.arrayimpl.concrete import ConcreteArrayNotOwning + for i in xrange(start, stop, width): + self._store(storage, i, offset, box) + NonNativeStringType = StringType class UnicodeType(BaseType, BaseStringType): From noreply at buildbot.pypy.org Mon Aug 26 16:33:05 2013 From: noreply at buildbot.pypy.org (fijal) Date: Mon, 26 Aug 2013 16:33:05 +0200 (CEST) Subject: [pypy-commit] pypy jit-threshold-hooks: (fijal, ltratt) Add support for getting/setting threshold on descrs of guards Message-ID: <20130826143305.3E19E1C0134@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: jit-threshold-hooks Changeset: r66318:7bf93e121a95 Date: 2013-08-26 15:32 +0100 http://bitbucket.org/pypy/pypy/changeset/7bf93e121a95/ Log: (fijal, ltratt) Add support for getting/setting threshold on descrs of guards diff --git a/pypy/module/pypyjit/interp_resop.py b/pypy/module/pypyjit/interp_resop.py --- a/pypy/module/pypyjit/interp_resop.py +++ b/pypy/module/pypyjit/interp_resop.py @@ -4,7 +4,7 @@ from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.gateway import unwrap_spec, interp2app from pypy.interpreter.pycode import PyCode -from pypy.interpreter.error import OperationError +from pypy.interpreter.error import OperationError, operationerrfmt from rpython.rtyper.lltypesystem import lltype from rpython.rtyper.annlowlevel import cast_base_ptr_to_instance, hlstr from rpython.rtyper.lltypesystem.rclass import OBJECT @@ -180,6 +180,13 @@ def descr_name(self, space): return space.wrap(hlstr(jit_hooks.resop_getopname(self.op))) + def descr_getdescr(self, space): + lldescr = jit_hooks.resop_getdescr(self.op) + if not lldescr: + raise operationerrfmt(space.w_TypeError, + "%s is not a guard", self.repr_of_resop) + return WrappedDescr(lldescr) + @unwrap_spec(no=int) def descr_getarg(self, space, no): return WrappedBox(jit_hooks.resop_getarg(self.op, no)) @@ -195,6 +202,24 @@ box = space.interp_w(WrappedBox, w_box) jit_hooks.resop_setresult(self.op, box.llbox) +class WrappedDescr(W_Root): + """ A class representing a single descr for a ResOperation + """ + def __init__(self, lldescr): + self.lldescr = lldescr + + def get_threshold(self, space): + return space.wrap(jit_hooks.descr_getthreshold(self.lldescr)) + + @unwrap_spec(v=int) + def set_threshold(self, space, v): + jit_hooks.descr_setthreshold(self.lldescr, v) + +WrappedDescr.typedef = TypeDef("Descr", + threshold = GetSetProperty(WrappedDescr.get_threshold, + WrappedDescr.set_threshold) +) + class DebugMergePoint(WrappedOp): """ A class representing Debug Merge Point - the entry point to a jitted loop. @@ -232,7 +257,8 @@ getarg = interp2app(WrappedOp.descr_getarg), setarg = interp2app(WrappedOp.descr_setarg), result = GetSetProperty(WrappedOp.descr_getresult, - WrappedOp.descr_setresult) + WrappedOp.descr_setresult), + descr = GetSetProperty(WrappedOp.descr_getdescr) ) WrappedOp.acceptable_as_base_class = False diff --git a/pypy/module/pypyjit/test/test_jit_hook.py b/pypy/module/pypyjit/test/test_jit_hook.py --- a/pypy/module/pypyjit/test/test_jit_hook.py +++ b/pypy/module/pypyjit/test/test_jit_hook.py @@ -5,6 +5,7 @@ from rpython.jit.metainterp.history import JitCellToken, ConstInt, ConstPtr,\ BasicFailDescr from rpython.jit.metainterp.resoperation import rop +from rpython.jit.metainterp.compile import ResumeGuardDescr from rpython.jit.metainterp.logger import Logger from rpython.rtyper.annlowlevel import (cast_instance_to_base_ptr, cast_base_ptr_to_instance) @@ -52,13 +53,14 @@ code_gcref = lltype.cast_opaque_ptr(llmemory.GCREF, ll_code) logger = Logger(MockSD()) + descr = ResumeGuardDescr() oplist = parse(""" [i1, i2, p2] i3 = int_add(i1, i2) debug_merge_point(0, 0, 0, 0, 0, ConstPtr(ptr0)) guard_nonnull(p2) [] - guard_true(i3) [] - """, namespace={'ptr0': code_gcref}).operations + guard_true(i3, descr=descr) [] + """, namespace={'ptr0': code_gcref, 'descr': descr}).operations greenkey = [ConstInt(0), ConstInt(0), ConstPtr(code_gcref)] offset = {} for i, op in enumerate(oplist): @@ -67,24 +69,24 @@ token = JitCellToken() token.number = 0 - di_loop = JitDebugInfo(MockJitDriverSD, logger, token, oplist, 'loop', - greenkey) - di_loop_optimize = JitDebugInfo(MockJitDriverSD, logger, JitCellToken(), - oplist, 'loop', greenkey) - di_loop.asminfo = AsmInfo(offset, 0, 0) di_bridge = JitDebugInfo(MockJitDriverSD, logger, JitCellToken(), oplist, 'bridge', fail_descr=BasicFailDescr()) di_bridge.asminfo = AsmInfo(offset, 0, 0) def interp_on_compile(): - di_loop.oplist = cls.oplist + di_loop = JitDebugInfo(MockJitDriverSD, logger, token, oplist[:], + 'loop', greenkey) + di_loop.asminfo = AsmInfo(offset, 0, 0) pypy_hooks.after_compile(di_loop) def interp_on_compile_bridge(): pypy_hooks.after_compile_bridge(di_bridge) def interp_on_optimize(): - di_loop_optimize.oplist = cls.oplist + di_loop_optimize = JitDebugInfo(MockJitDriverSD, logger, + JitCellToken(), + oplist[:], 'loop', greenkey) + di_loop_optimize.oplist = oplist[:] pypy_hooks.before_compile(di_loop_optimize) def interp_on_abort(): @@ -98,12 +100,9 @@ cls.w_int_add_num = space.wrap(rop.INT_ADD) cls.w_dmp_num = space.wrap(rop.DEBUG_MERGE_POINT) cls.w_on_optimize = space.wrap(interp2app(interp_on_optimize)) - cls.orig_oplist = oplist + cls.oplist = property(lambda : oplist[:]) cls.w_sorted_keys = space.wrap(sorted(Counters.counter_names)) - def setup_method(self, meth): - self.__class__.oplist = self.orig_oplist[:] - def test_on_compile(self): import pypyjit all = [] @@ -248,6 +247,22 @@ raises(AttributeError, 'op.pycode') assert op.call_depth == 5 + def test_descr_set_threshold(self): + import pypyjit + all = [] + + def hook(info): + all.append(info) + + pypyjit.set_compile_hook(hook) + assert not all + self.on_compile() + raises(TypeError, "all[0].operations[0].descr") + descr = all[0].operations[-1].descr + assert descr.threshold == 0 + descr.threshold = 1 + assert descr.threshold == 1 + def test_get_stats_snapshot(self): skip("a bit no idea how to test it") from pypyjit import get_stats_snapshot diff --git a/rpython/rlib/jit_hooks.py b/rpython/rlib/jit_hooks.py --- a/rpython/rlib/jit_hooks.py +++ b/rpython/rlib/jit_hooks.py @@ -42,6 +42,18 @@ ptr = lltype.cast_opaque_ptr(rclass.OBJECTPTR, llref) return cast_base_ptr_to_instance(AbstractResOp, ptr) +def _cast_to_descr(lldescr): + from rpython.jit.metainterp.history import AbstractDescr + from rpython.jit.metainterp.compile import ResumeGuardDescr + + ptr = lltype.cast_opaque_ptr(rclass.OBJECTPTR, lldescr) + if not ptr: + return None + obj = cast_base_ptr_to_instance(AbstractDescr, ptr) + if not isinstance(obj, ResumeGuardDescr): + return None + return obj + @specialize.argtype(0) def _cast_to_gcref(obj): return lltype.cast_opaque_ptr(llmemory.GCREF, @@ -86,6 +98,18 @@ def resop_getresult(llop): return _cast_to_gcref(_cast_to_resop(llop).result) + at register_helper(annmodel.SomePtr(llmemory.GCREF)) +def resop_getdescr(llop): + return _cast_to_gcref(_cast_to_resop(llop).getdescr()) + + at register_helper(annmodel.SomeInteger()) +def descr_getthreshold(lldescr): + return _cast_to_descr(lldescr)._counter + + at register_helper(annmodel.s_None) +def descr_setthreshold(lldescr, threshold): + _cast_to_descr(lldescr)._counter = threshold + @register_helper(annmodel.s_None) def resop_setresult(llop, llbox): _cast_to_resop(llop).result = _cast_to_box(llbox) From noreply at buildbot.pypy.org Mon Aug 26 17:02:24 2013 From: noreply at buildbot.pypy.org (fijal) Date: Mon, 26 Aug 2013 17:02:24 +0200 (CEST) Subject: [pypy-commit] pypy jit-threshold-hooks: (fijal, ltratt) Message-ID: <20130826150224.3F4C91C04B4@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: jit-threshold-hooks Changeset: r66319:c83cf1418bc1 Date: 2013-08-26 16:01 +0100 http://bitbucket.org/pypy/pypy/changeset/c83cf1418bc1/ Log: (fijal, ltratt) * implement simple checksum * a small fix diff --git a/pypy/module/pypyjit/interp_resop.py b/pypy/module/pypyjit/interp_resop.py --- a/pypy/module/pypyjit/interp_resop.py +++ b/pypy/module/pypyjit/interp_resop.py @@ -13,6 +13,7 @@ from rpython.rlib import jit_hooks from rpython.rlib.jit import Counters from rpython.rlib.objectmodel import compute_unique_id +from rpython.rlib.rarithmetic import intmask from pypy.module.pypyjit.interp_jit import pypyjitdriver class Cache(object): @@ -211,8 +212,8 @@ def get_threshold(self, space): return space.wrap(jit_hooks.descr_getthreshold(self.lldescr)) - @unwrap_spec(v=int) - def set_threshold(self, space, v): + def set_threshold(self, space, w_v): + v = space.int_w(w_v) jit_hooks.descr_setthreshold(self.lldescr, v) WrappedDescr.typedef = TypeDef("Descr", @@ -292,6 +293,13 @@ bridge_no = 0 asmaddr = 0 asmlen = 0 + checksum = 0 + + def _compute_checksum(self, ops): + s = 5381 + for op in ops: + s = intmask(((s << 5) + s) + op.getopnum()) + return s def __init__(self, space, debug_info, is_bridge=False): logops = debug_info.logger._make_log_operations() @@ -301,6 +309,7 @@ ofs = {} self.w_ops = space.newlist( wrap_oplist(space, logops, debug_info.operations, ofs)) + self.checksum = self._compute_checksum(debug_info.operations) self.jd_name = debug_info.get_jitdriver().name self.type = debug_info.type @@ -368,6 +377,7 @@ doc="bridge number (if a bridge)"), type = interp_attrproperty('type', cls=W_JitLoopInfo, doc="Loop type"), + checksum = interp_attrproperty('checksum', cls=W_JitLoopInfo), __repr__ = interp2app(W_JitLoopInfo.descr_repr), ) W_JitLoopInfo.acceptable_as_base_class = False diff --git a/pypy/module/pypyjit/test/test_jit_hook.py b/pypy/module/pypyjit/test/test_jit_hook.py --- a/pypy/module/pypyjit/test/test_jit_hook.py +++ b/pypy/module/pypyjit/test/test_jit_hook.py @@ -262,6 +262,7 @@ assert descr.threshold == 0 descr.threshold = 1 assert descr.threshold == 1 + assert all[0].checksum == 6382428994 def test_get_stats_snapshot(self): skip("a bit no idea how to test it") From noreply at buildbot.pypy.org Mon Aug 26 17:09:20 2013 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Mon, 26 Aug 2013 17:09:20 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: Remove again the point about Mac packaging. Message-ID: <20130826150920.8368C1C0134@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: extradoc Changeset: r5028:60e1fc581d24 Date: 2013-08-26 16:08 +0100 http://bitbucket.org/pypy/extradoc/changeset/60e1fc581d24/ Log: Remove again the point about Mac packaging. diff --git a/sprintinfo/london-2013/planning.txt b/sprintinfo/london-2013/planning.txt --- a/sprintinfo/london-2013/planning.txt +++ b/sprintinfo/london-2013/planning.txt @@ -59,8 +59,6 @@ * progress on the documentation branch (Olmo, Manuel) -* improve packaging on Mac to make sure non-technical users are able to use PyPy (Olmo, Manuel) - * shave all the yaks * find the slow generator task (Marko, Romain) From noreply at buildbot.pypy.org Mon Aug 26 18:05:46 2013 From: noreply at buildbot.pypy.org (fijal) Date: Mon, 26 Aug 2013 18:05:46 +0200 (CEST) Subject: [pypy-commit] pypy jit-threshold-hooks: remove the ckecksum for now - we might not need it Message-ID: <20130826160546.CF8321C019E@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: jit-threshold-hooks Changeset: r66320:2f2a03b5daee Date: 2013-08-26 17:05 +0100 http://bitbucket.org/pypy/pypy/changeset/2f2a03b5daee/ Log: remove the ckecksum for now - we might not need it diff --git a/pypy/module/pypyjit/interp_resop.py b/pypy/module/pypyjit/interp_resop.py --- a/pypy/module/pypyjit/interp_resop.py +++ b/pypy/module/pypyjit/interp_resop.py @@ -293,13 +293,6 @@ bridge_no = 0 asmaddr = 0 asmlen = 0 - checksum = 0 - - def _compute_checksum(self, ops): - s = 5381 - for op in ops: - s = intmask(((s << 5) + s) + op.getopnum()) - return s def __init__(self, space, debug_info, is_bridge=False): logops = debug_info.logger._make_log_operations() @@ -309,8 +302,6 @@ ofs = {} self.w_ops = space.newlist( wrap_oplist(space, logops, debug_info.operations, ofs)) - self.checksum = self._compute_checksum(debug_info.operations) - self.jd_name = debug_info.get_jitdriver().name self.type = debug_info.type if is_bridge: @@ -377,7 +368,6 @@ doc="bridge number (if a bridge)"), type = interp_attrproperty('type', cls=W_JitLoopInfo, doc="Loop type"), - checksum = interp_attrproperty('checksum', cls=W_JitLoopInfo), __repr__ = interp2app(W_JitLoopInfo.descr_repr), ) W_JitLoopInfo.acceptable_as_base_class = False diff --git a/pypy/module/pypyjit/test/test_jit_hook.py b/pypy/module/pypyjit/test/test_jit_hook.py --- a/pypy/module/pypyjit/test/test_jit_hook.py +++ b/pypy/module/pypyjit/test/test_jit_hook.py @@ -262,7 +262,6 @@ assert descr.threshold == 0 descr.threshold = 1 assert descr.threshold == 1 - assert all[0].checksum == 6382428994 def test_get_stats_snapshot(self): skip("a bit no idea how to test it") From noreply at buildbot.pypy.org Mon Aug 26 18:14:01 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Mon, 26 Aug 2013 18:14:01 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: import stmgc Message-ID: <20130826161401.7E6B71C019E@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r66321:a9cc9ad24fae Date: 2013-08-26 18:09 +0200 http://bitbucket.org/pypy/pypy/changeset/a9cc9ad24fae/ Log: import stmgc diff --git a/rpython/translator/stm/src_stm/et.c b/rpython/translator/stm/src_stm/et.c --- a/rpython/translator/stm/src_stm/et.c +++ b/rpython/translator/stm/src_stm/et.c @@ -958,6 +958,7 @@ SpinLoop(SPLP_ABORT); // jump back to the setjmp_buf (this call does not return) d->active = 0; + d->atomic = 0; stm_stop_sharedlock(); longjmp(*d->setjmp_buf, 1); } @@ -1006,6 +1007,7 @@ static void init_transaction(struct tx_descriptor *d) { + assert(d->atomic == 0); assert(d->active == 0); stm_start_sharedlock(); assert(d->active == 0); @@ -1361,6 +1363,7 @@ revision_t cur_time; struct tx_descriptor *d = thread_descriptor; assert(d->active >= 1); + assert(d->atomic == 0); dprintf(("CommitTransaction(%p)\n", d)); spinlock_acquire(d->public_descriptor->collection_lock, 'C'); /*committing*/ if (d->public_descriptor->stolen_objects.size != 0) diff --git a/rpython/translator/stm/src_stm/extra.c b/rpython/translator/stm/src_stm/extra.c --- a/rpython/translator/stm/src_stm/extra.c +++ b/rpython/translator/stm/src_stm/extra.c @@ -301,15 +301,16 @@ case 3: /* a string of bytes from the target object */ rps = *(char **)(object + offset); offset = *fieldoffsets++; - if (rps) { + /* XXX think of a different hack: this one doesn't really + work if we see stubs! */ + if (rps && !(((gcptr)rps)->h_tid & GCFLAG_STUB)) { /* xxx a bit ad-hoc: it's a string whose length is a * long at 'offset', following immediately the offset */ rps_size = *(long *)(rps + offset); - offset += sizeof(long); assert(rps_size >= 0); res_size = sprintf(buffer, "%zu:", rps_size); WRITE_BUF(buffer, res_size); - WRITE_BUF(rps + offset, rps_size); + WRITE_BUF(rps + offset + sizeof(long), rps_size); } else { WRITE_BUF("0:", 2); diff --git a/rpython/translator/stm/src_stm/fprintcolor.c b/rpython/translator/stm/src_stm/fprintcolor.c --- a/rpython/translator/stm/src_stm/fprintcolor.c +++ b/rpython/translator/stm/src_stm/fprintcolor.c @@ -58,3 +58,25 @@ } #endif + + +#ifdef STM_BARRIER_COUNT +long stm_barriercount[STM_BARRIER_NUMBERS]; + +void stm_print_barrier_count(void) +{ + static char names[] = STM_BARRIER_NAMES; + char *p = names; + char *q; + int i; + fprintf(stderr, "** Summary of the barrier calls **\n"); + for (i = 0; i < STM_BARRIER_NUMBERS; i += 2) { + q = strchr(p, '\n'); + *q = '\0'; + fprintf(stderr, "%12ld %s\n", stm_barriercount[i], p); + *q = '\n'; + fprintf(stderr, "%12ld \\ fast path\n", stm_barriercount[i + 1]); + p = q + 1; + } +} +#endif diff --git a/rpython/translator/stm/src_stm/fprintcolor.h b/rpython/translator/stm/src_stm/fprintcolor.h --- a/rpython/translator/stm/src_stm/fprintcolor.h +++ b/rpython/translator/stm/src_stm/fprintcolor.h @@ -21,3 +21,8 @@ #define dprintfcolor() 0 #endif + + +#ifdef STM_BARRIER_COUNT +void stm_print_barrier_count(void); +#endif diff --git a/rpython/translator/stm/src_stm/revision b/rpython/translator/stm/src_stm/revision --- a/rpython/translator/stm/src_stm/revision +++ b/rpython/translator/stm/src_stm/revision @@ -1,1 +1,1 @@ -b19dfb209a10 +cb61cf4e30a9 diff --git a/rpython/translator/stm/src_stm/steal.c b/rpython/translator/stm/src_stm/steal.c --- a/rpython/translator/stm/src_stm/steal.c +++ b/rpython/translator/stm/src_stm/steal.c @@ -218,11 +218,12 @@ dprintf(("already stolen: %p -> %p\n", P, L)); /* note that we should follow h_revision at least one more - step: it is necessary if L is public but young (and then - has GCFLAG_MOVED), but it is fine to do it more - generally. */ - v = ACCESS_ONCE(L->h_revision); - if (IS_POINTER(v)) { + step: in the case where L is public but young (and then + has GCFLAG_MOVED). Don't do it generally! L might be + a stub again. */ + if (L->h_tid & GCFLAG_MOVED) { + v = ACCESS_ONCE(L->h_revision); + assert(IS_POINTER(v)); L = (gcptr)v; dprintf(("\t---> %p\n", L)); } diff --git a/rpython/translator/stm/src_stm/stmgc.h b/rpython/translator/stm/src_stm/stmgc.h --- a/rpython/translator/stm/src_stm/stmgc.h +++ b/rpython/translator/stm/src_stm/stmgc.h @@ -94,6 +94,10 @@ - stm_repeat_write_barrier() can be used on an object on which we already did stm_write_barrier(), but a potential collection can have occurred. + + - stm_write_barrier_noptr() is a slightly cheaper version of + stm_write_barrier(), for when we are going to write + non-gc-pointers into the object. */ #if 0 // (optimized version below) gcptr stm_read_barrier(gcptr); @@ -101,6 +105,7 @@ gcptr stm_repeat_read_barrier(gcptr); gcptr stm_immut_read_barrier(gcptr); gcptr stm_repeat_write_barrier(gcptr); /* <= always returns its argument */ +gcptr stm_write_barrier_noptr(gcptr); #endif /* start a new transaction, calls callback(), and when it returns @@ -203,33 +208,52 @@ #define UNLIKELY(test) __builtin_expect(test, 0) +#ifdef STM_BARRIER_COUNT +# define STM_BARRIER_NUMBERS 12 +# define STM_BARRIER_NAMES "stm_read_barrier\n" \ + "stm_write_barrier\n" \ + "stm_repeat_read_barrier\n" \ + "stm_immut_read_barrier\n" \ + "stm_repeat_write_barrier\n" \ + "stm_write_barrier_noptr\n" +# define STM_COUNT(id, x) (stm_barriercount[id]++, x) +extern long stm_barriercount[STM_BARRIER_NUMBERS]; +#else +# define STM_COUNT(id, x) (x) +#endif + #define stm_read_barrier(obj) \ (UNLIKELY(((obj)->h_revision != stm_private_rev_num) && \ (FXCACHE_AT(obj) != (obj))) ? \ - stm_DirectReadBarrier(obj) \ - : (obj)) + STM_COUNT(0, stm_DirectReadBarrier(obj)) \ + : STM_COUNT(1, obj)) #define stm_write_barrier(obj) \ (UNLIKELY(((obj)->h_revision != stm_private_rev_num) || \ (((obj)->h_tid & GCFLAG_WRITE_BARRIER) != 0)) ? \ - stm_WriteBarrier(obj) \ - : (obj)) + STM_COUNT(2, stm_WriteBarrier(obj)) \ + : STM_COUNT(3, obj)) #define stm_repeat_read_barrier(obj) \ (UNLIKELY(((obj)->h_tid & (GCFLAG_PUBLIC_TO_PRIVATE | \ GCFLAG_MOVED)) != 0) ? \ - stm_RepeatReadBarrier(obj) \ - : (obj)) + STM_COUNT(4, stm_RepeatReadBarrier(obj)) \ + : STM_COUNT(5, obj)) #define stm_immut_read_barrier(obj) \ (UNLIKELY(((obj)->h_tid & GCFLAG_STUB) != 0) ? \ - stm_ImmutReadBarrier(obj) \ - : (obj)) + STM_COUNT(6, stm_ImmutReadBarrier(obj)) \ + : STM_COUNT(7, obj)) #define stm_repeat_write_barrier(obj) \ (UNLIKELY(((obj)->h_tid & GCFLAG_WRITE_BARRIER) != 0) ? \ - stm_RepeatWriteBarrier(obj) \ - : (obj)) + STM_COUNT(8, stm_RepeatWriteBarrier(obj)) \ + : STM_COUNT(9, obj)) + +#define stm_write_barrier_noptr(obj) \ + (UNLIKELY((obj)->h_revision != stm_private_rev_num) ? \ + STM_COUNT(10, stm_WriteBarrier(obj)) \ + : STM_COUNT(11, obj)) #endif diff --git a/rpython/translator/stm/src_stm/stmsync.c b/rpython/translator/stm/src_stm/stmsync.c --- a/rpython/translator/stm/src_stm/stmsync.c +++ b/rpython/translator/stm/src_stm/stmsync.c @@ -84,6 +84,13 @@ dprintf(("enter_callback_call(tok=%d)\n", token)); if (token == 1) { stmgcpage_acquire_global_lock(); +#ifdef STM_BARRIER_COUNT + static int seen = 0; + if (!seen) { + seen = 1; + atexit(&stm_print_barrier_count); + } +#endif DescriptorInit(); stmgc_init_nursery(); init_shadowstack(); @@ -129,12 +136,11 @@ jmp_buf _jmpbuf; long volatile v_counter = 0; gcptr *volatile v_saved_value = stm_shadowstack; - long volatile v_atomic; stm_push_root(arg); stm_push_root(END_MARKER_OFF); - if (!(v_atomic = thread_descriptor->atomic)) + if (!thread_descriptor->atomic) CommitTransaction(); #ifdef _GC_ON_CPYTHON @@ -153,7 +159,6 @@ struct tx_descriptor *d = thread_descriptor; long counter, result; counter = v_counter; - d->atomic = v_atomic; stm_shadowstack = v_saved_value + 2; /*skip the two values pushed above*/ do { @@ -178,6 +183,7 @@ /* atomic transaction: a common case is that callback() returned even though we are atomic because we need a major GC. For that case, release and reaquire the rw lock here. */ + assert(d->active >= 1); stm_possible_safe_point(); } @@ -186,7 +192,6 @@ result = callback(arg, counter); assert(stm_shadowstack == v_saved_value + 2); - v_atomic = d->atomic; if (!d->atomic) CommitTransaction(); From noreply at buildbot.pypy.org Mon Aug 26 18:14:02 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Mon, 26 Aug 2013 18:14:02 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: fix bug where spilled references do not get updated by stm barriers Message-ID: <20130826161402.DF9391C019E@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r66322:9832428e54e4 Date: 2013-08-26 18:12 +0200 http://bitbucket.org/pypy/pypy/changeset/9832428e54e4/ Log: fix bug where spilled references do not get updated by stm barriers diff --git a/rpython/jit/backend/llsupport/stmrewrite.py b/rpython/jit/backend/llsupport/stmrewrite.py --- a/rpython/jit/backend/llsupport/stmrewrite.py +++ b/rpython/jit/backend/llsupport/stmrewrite.py @@ -47,7 +47,8 @@ for op in operations: if not we_are_translated(): # only possible in tests: - if op.getopnum() in (rop.COND_CALL_STM_B,): + if op.getopnum() in (rop.COND_CALL_STM_B, + -124): # FORCE_SPILL self.newops.append(op) continue if op.getopnum() == rop.DEBUG_MERGE_POINT: diff --git a/rpython/jit/backend/x86/regalloc.py b/rpython/jit/backend/x86/regalloc.py --- a/rpython/jit/backend/x86/regalloc.py +++ b/rpython/jit/backend/x86/regalloc.py @@ -807,7 +807,21 @@ for i in range(N)] self.perform_discard(op, arglocs) - consider_cond_call_stm_b = consider_cond_call_gc_wb + def consider_cond_call_stm_b(self, op): + assert op.result is None + # we force all arguments in a reg (unless they are Consts), + # because it will be needed anyway by the following setfield_gc + # or setarrayitem_gc. It avoids loading it twice from the memory. + arg = op.getarg(0) + argloc = self.rm.make_sure_var_in_reg(arg) + self.perform_discard(op, [argloc]) + + spilled_loc = self.rm.frame_manager.get(arg) + if spilled_loc: + # spilled var, make sure it gets updated in the frame too + self.assembler.regalloc_mov(argloc, spilled_loc) + + consider_cond_call_gc_wb_array = consider_cond_call_gc_wb def consider_call_malloc_nursery(self, op): diff --git a/rpython/jit/backend/x86/test/test_stm_integration.py b/rpython/jit/backend/x86/test/test_stm_integration.py --- a/rpython/jit/backend/x86/test/test_stm_integration.py +++ b/rpython/jit/backend/x86/test/test_stm_integration.py @@ -767,6 +767,58 @@ assert frame_adr != id(finaldescr) + def test_write_barrier_on_spilled(self): + cpu = self.cpu + + PRIV_REV = rffi.cast(lltype.Signed, StmGC.PREBUILT_REVISION) + self.priv_rev_num[0] = PRIV_REV + + s = self.allocate_prebuilt_s() + other_s = self.allocate_prebuilt_s() + sgcref = lltype.cast_opaque_ptr(llmemory.GCREF, s) + other_sgcref = lltype.cast_opaque_ptr(llmemory.GCREF, other_s) + s.h_revision = PRIV_REV+4 + other_s.h_revision = PRIV_REV+4 + + called_on = [] + def write_barrier(obj): + called_on.append(obj) + if llmemory.cast_ptr_to_adr(sgcref) == obj: + return rffi.cast(llmemory.Address, other_sgcref) + return obj + P2W = FakeSTMBarrier(cpu.gc_ll_descr, 'P2W', write_barrier) + old_p2w = cpu.gc_ll_descr.P2Wdescr + cpu.gc_ll_descr.P2Wdescr = P2W + + cpu.gc_ll_descr.init_nursery(100) + cpu.setup_once() + + + from rpython.jit.tool.oparser import FORCE_SPILL + p0 = BoxPtr() + spill = FORCE_SPILL(None) + spill.initarglist([p0]) + operations = [ + ResOperation(rop.COND_CALL_STM_B, [p0], None, + descr=P2W), + spill, + ResOperation(rop.COND_CALL_STM_B, [p0], None, + descr=P2W), + ResOperation(rop.FINISH, [p0], None, + descr=BasicFinalDescr(0)), + ] + inputargs = [p0] + looptoken = JitCellToken() + print cpu.compile_loop(inputargs, operations, looptoken) + cpu.execute_token(looptoken, sgcref) + + # the second write-barrier must see the result of the + # first one + self.assert_in(called_on, [sgcref, other_sgcref]) + + # for other tests: + cpu.gc_ll_descr.P2Wdescr = old_p2w + From noreply at buildbot.pypy.org Mon Aug 26 18:37:12 2013 From: noreply at buildbot.pypy.org (fijal) Date: Mon, 26 Aug 2013 18:37:12 +0200 (CEST) Subject: [pypy-commit] pypy jit-threshold-hooks: small fix to allocate new cell Message-ID: <20130826163712.2657E1C13EE@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: jit-threshold-hooks Changeset: r66323:96f84f4695e0 Date: 2013-08-26 17:36 +0100 http://bitbucket.org/pypy/pypy/changeset/96f84f4695e0/ Log: small fix to allocate new cell diff --git a/pypy/module/pypyjit/interp_jit.py b/pypy/module/pypyjit/interp_jit.py --- a/pypy/module/pypyjit/interp_jit.py +++ b/pypy/module/pypyjit/interp_jit.py @@ -8,7 +8,7 @@ from rpython.rlib.rarithmetic import r_uint, intmask from rpython.rlib.jit import JitDriver, hint, we_are_jitted, dont_look_inside,\ BaseJitCell -from rpython.rlib import jit +from rpython.rlib import jit, jit_hooks from rpython.rlib.jit import current_trace_length, unroll_parameters import pypy.interpreter.pyopcode # for side-effects from pypy.interpreter.error import OperationError, operationerrfmt @@ -176,5 +176,8 @@ at value given. """ ref = w_code.jit_cells[pos << 1] + if not ref: + ref = jit_hooks.new_jitcell() + w_code.jit_cells[pos << 1] = ref jitcell = cast_base_ptr_to_instance(BaseJitCell, ref) jitcell.counter = value diff --git a/rpython/rlib/jit_hooks.py b/rpython/rlib/jit_hooks.py --- a/rpython/rlib/jit_hooks.py +++ b/rpython/rlib/jit_hooks.py @@ -135,6 +135,12 @@ from rpython.jit.metainterp.history import Const return isinstance(_cast_to_box(llbox), Const) + at register_helper(annmodel.SomePtr(llmemory.GCREF)) +def new_jitcell(): + from rpython.jit.metainterp.warmstate import JitCell + + return _cast_to_gcref(JitCell) + # ------------------------- stats interface --------------------------- @register_helper(annmodel.SomeBool()) From noreply at buildbot.pypy.org Mon Aug 26 18:49:31 2013 From: noreply at buildbot.pypy.org (fijal) Date: Mon, 26 Aug 2013 18:49:31 +0200 (CEST) Subject: [pypy-commit] pypy jit-threshold-hooks: maybe like that? Message-ID: <20130826164931.331ED1C0189@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: jit-threshold-hooks Changeset: r66324:c0ebbcc09d60 Date: 2013-08-26 17:48 +0100 http://bitbucket.org/pypy/pypy/changeset/c0ebbcc09d60/ Log: maybe like that? diff --git a/pypy/module/pypyjit/interp_jit.py b/pypy/module/pypyjit/interp_jit.py --- a/pypy/module/pypyjit/interp_jit.py +++ b/pypy/module/pypyjit/interp_jit.py @@ -43,7 +43,7 @@ def set_jitcell_at(newcell, next_instr, is_being_profiled, bytecode): key = (next_instr << 1) | r_uint(intmask(is_being_profiled)) - bytecode.jit_cells[key] = newcell + bytecode.jit_cells[key] = cast_instance_to_base_ptr(newcell) def should_unroll_one_iteration(next_instr, is_being_profiled, bytecode): @@ -178,6 +178,6 @@ ref = w_code.jit_cells[pos << 1] if not ref: ref = jit_hooks.new_jitcell() - w_code.jit_cells[pos << 1] = ref + w_code.jit_cells[pos << 1] = cast_base_ptr_to_instance(BaseJitCell, ref) jitcell = cast_base_ptr_to_instance(BaseJitCell, ref) jitcell.counter = value diff --git a/rpython/rlib/jit_hooks.py b/rpython/rlib/jit_hooks.py --- a/rpython/rlib/jit_hooks.py +++ b/rpython/rlib/jit_hooks.py @@ -4,6 +4,7 @@ cast_base_ptr_to_instance, llstr) from rpython.rtyper.extregistry import ExtRegistryEntry from rpython.rtyper.lltypesystem import llmemory, lltype, rclass +from rpython.rtyper.lltypesystem.rclass import OBJECTPTR def register_helper(s_result): @@ -135,7 +136,7 @@ from rpython.jit.metainterp.history import Const return isinstance(_cast_to_box(llbox), Const) - at register_helper(annmodel.SomePtr(llmemory.GCREF)) + at register_helper(annmodel.SomePtr(OBJECTPTR)) def new_jitcell(): from rpython.jit.metainterp.warmstate import JitCell From noreply at buildbot.pypy.org Mon Aug 26 18:53:21 2013 From: noreply at buildbot.pypy.org (fijal) Date: Mon, 26 Aug 2013 18:53:21 +0200 (CEST) Subject: [pypy-commit] pypy jit-threshold-hooks: oops Message-ID: <20130826165321.156571C0189@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: jit-threshold-hooks Changeset: r66325:3548c4e24114 Date: 2013-08-26 17:52 +0100 http://bitbucket.org/pypy/pypy/changeset/3548c4e24114/ Log: oops diff --git a/pypy/module/pypyjit/interp_jit.py b/pypy/module/pypyjit/interp_jit.py --- a/pypy/module/pypyjit/interp_jit.py +++ b/pypy/module/pypyjit/interp_jit.py @@ -43,7 +43,7 @@ def set_jitcell_at(newcell, next_instr, is_being_profiled, bytecode): key = (next_instr << 1) | r_uint(intmask(is_being_profiled)) - bytecode.jit_cells[key] = cast_instance_to_base_ptr(newcell) + bytecode.jit_cells[key] = newcell def should_unroll_one_iteration(next_instr, is_being_profiled, bytecode): From noreply at buildbot.pypy.org Mon Aug 26 18:57:47 2013 From: noreply at buildbot.pypy.org (fijal) Date: Mon, 26 Aug 2013 18:57:47 +0200 (CEST) Subject: [pypy-commit] pypy refactor-str-types: I think it's ok to ignore mergecallfamilies if there are no others Message-ID: <20130826165747.5BB0E1C13EE@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: refactor-str-types Changeset: r66326:60bfe1a4f6d5 Date: 2013-08-26 17:57 +0100 http://bitbucket.org/pypy/pypy/changeset/60bfe1a4f6d5/ Log: I think it's ok to ignore mergecallfamilies if there are no others diff --git a/rpython/annotator/description.py b/rpython/annotator/description.py --- a/rpython/annotator/description.py +++ b/rpython/annotator/description.py @@ -147,6 +147,8 @@ def mergecallfamilies(self, *others): """Merge the call families of the given Descs into one.""" + if not others: + return call_families = self.bookkeeper.pbc_maximal_call_families changed, rep, callfamily = call_families.find(self.rowkey()) for desc in others: From noreply at buildbot.pypy.org Mon Aug 26 18:58:37 2013 From: noreply at buildbot.pypy.org (fijal) Date: Mon, 26 Aug 2013 18:58:37 +0200 (CEST) Subject: [pypy-commit] pypy jit-threshold-hooks: another go Message-ID: <20130826165837.BAA361C13EE@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: jit-threshold-hooks Changeset: r66327:fe12d4657961 Date: 2013-08-26 17:58 +0100 http://bitbucket.org/pypy/pypy/changeset/fe12d4657961/ Log: another go diff --git a/pypy/module/pypyjit/interp_jit.py b/pypy/module/pypyjit/interp_jit.py --- a/pypy/module/pypyjit/interp_jit.py +++ b/pypy/module/pypyjit/interp_jit.py @@ -175,9 +175,9 @@ For testing. Set the threshold for this code object at position pos at value given. """ - ref = w_code.jit_cells[pos << 1] - if not ref: + jitcell = w_code.jit_cells[pos << 1] + if not jitcell: ref = jit_hooks.new_jitcell() - w_code.jit_cells[pos << 1] = cast_base_ptr_to_instance(BaseJitCell, ref) - jitcell = cast_base_ptr_to_instance(BaseJitCell, ref) + jitcell = cast_base_ptr_to_instance(BaseJitCell, ref) + w_code.jit_cells[pos << 1] = jitcell jitcell.counter = value From noreply at buildbot.pypy.org Mon Aug 26 19:09:58 2013 From: noreply at buildbot.pypy.org (fijal) Date: Mon, 26 Aug 2013 19:09:58 +0200 (CEST) Subject: [pypy-commit] pypy refactor-str-types: change None to False Message-ID: <20130826170958.0643D1C019E@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: refactor-str-types Changeset: r66328:95efef15ffbe Date: 2013-08-26 18:09 +0100 http://bitbucket.org/pypy/pypy/changeset/95efef15ffbe/ Log: change None to False diff --git a/rpython/annotator/description.py b/rpython/annotator/description.py --- a/rpython/annotator/description.py +++ b/rpython/annotator/description.py @@ -148,7 +148,7 @@ def mergecallfamilies(self, *others): """Merge the call families of the given Descs into one.""" if not others: - return + return False call_families = self.bookkeeper.pbc_maximal_call_families changed, rep, callfamily = call_families.find(self.rowkey()) for desc in others: From noreply at buildbot.pypy.org Mon Aug 26 19:21:41 2013 From: noreply at buildbot.pypy.org (fijal) Date: Mon, 26 Aug 2013 19:21:41 +0200 (CEST) Subject: [pypy-commit] pypy jit-threshold-hooks: (fijal, arigo) we can't call jit hooks from jitcodes Message-ID: <20130826172141.C76BC1C13FA@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: jit-threshold-hooks Changeset: r66329:dfba03975b59 Date: 2013-08-26 18:21 +0100 http://bitbucket.org/pypy/pypy/changeset/dfba03975b59/ Log: (fijal, arigo) we can't call jit hooks from jitcodes diff --git a/pypy/module/pypyjit/interp_jit.py b/pypy/module/pypyjit/interp_jit.py --- a/pypy/module/pypyjit/interp_jit.py +++ b/pypy/module/pypyjit/interp_jit.py @@ -168,6 +168,7 @@ the JIT follow the call.''' return space.call_args(w_callable, __args__) + at jit.dont_look_inside @unwrap_spec(w_code=PyCode, pos=r_uint, value=int) def set_local_threshold(space, w_code, pos, value): """ set_local_threshold(code, pos, value) From noreply at buildbot.pypy.org Mon Aug 26 21:43:34 2013 From: noreply at buildbot.pypy.org (fijal) Date: Mon, 26 Aug 2013 21:43:34 +0200 (CEST) Subject: [pypy-commit] pypy jit-threshold-hooks: return the correct type Message-ID: <20130826194334.118D51C0189@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: jit-threshold-hooks Changeset: r66330:33bbec99fa87 Date: 2013-08-26 20:42 +0100 http://bitbucket.org/pypy/pypy/changeset/33bbec99fa87/ Log: return the correct type diff --git a/rpython/rlib/jit_hooks.py b/rpython/rlib/jit_hooks.py --- a/rpython/rlib/jit_hooks.py +++ b/rpython/rlib/jit_hooks.py @@ -140,7 +140,7 @@ def new_jitcell(): from rpython.jit.metainterp.warmstate import JitCell - return _cast_to_gcref(JitCell) + return cast_instance_to_base_ptr(JitCell()) # ------------------------- stats interface --------------------------- From noreply at buildbot.pypy.org Mon Aug 26 21:45:37 2013 From: noreply at buildbot.pypy.org (fijal) Date: Mon, 26 Aug 2013 21:45:37 +0200 (CEST) Subject: [pypy-commit] pypy jit-threshold-hooks: a test and a fix Message-ID: <20130826194537.B23DD1C019E@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: jit-threshold-hooks Changeset: r66331:46acbfad8195 Date: 2013-08-26 20:44 +0100 http://bitbucket.org/pypy/pypy/changeset/46acbfad8195/ Log: a test and a fix diff --git a/pypy/module/pypyjit/interp_jit.py b/pypy/module/pypyjit/interp_jit.py --- a/pypy/module/pypyjit/interp_jit.py +++ b/pypy/module/pypyjit/interp_jit.py @@ -176,8 +176,9 @@ For testing. Set the threshold for this code object at position pos at value given. """ - jitcell = w_code.jit_cells[pos << 1] - if not jitcell: + try: + jitcell = w_code.jit_cells[pos << 1] + except KeyError: ref = jit_hooks.new_jitcell() jitcell = cast_base_ptr_to_instance(BaseJitCell, ref) w_code.jit_cells[pos << 1] = jitcell diff --git a/pypy/module/pypyjit/test/test_jit_hook.py b/pypy/module/pypyjit/test/test_jit_hook.py --- a/pypy/module/pypyjit/test/test_jit_hook.py +++ b/pypy/module/pypyjit/test/test_jit_hook.py @@ -272,3 +272,11 @@ assert isinstance(stats.w_counters, dict) assert sorted(stats.w_counters.keys()) == self.sorted_keys + def test_set_local_threshold(self): + import pypyjit + + def f(): + pass + + pypyjit.set_local_threshold(f.__code__, 0, 0) + # assert did not crash From noreply at buildbot.pypy.org Mon Aug 26 22:02:41 2013 From: noreply at buildbot.pypy.org (shmuller) Date: Mon, 26 Aug 2013 22:02:41 +0200 (CEST) Subject: [pypy-commit] pypy pypy-pyarray: - Add cpyext implementation of Numpy PyArray_* C-API Message-ID: <20130826200241.D9D6E1C13EE@cobra.cs.uni-duesseldorf.de> Author: Stefan H. Muller Branch: pypy-pyarray Changeset: r66332:01fc9f0596f2 Date: 2013-07-28 15:12 +0200 http://bitbucket.org/pypy/pypy/changeset/01fc9f0596f2/ Log: - Add cpyext implementation of Numpy PyArray_* C-API * pypy/module/cpyext/include/numpy/arrayobject.h * pypy/module/cpyext/ndarrayobject.py * pypy/module/cpyext/test/test_ndarrayobject.py - pypy/module/cpyext/api.py: copy_header_files() now copies the numpy subdirectory as well. - pypy/module/micronumpy/interp_dtype.py: DtypeCache.dtypes_by_num: * Keep in dictionary form, since otherwise not all dtypes can be reached. - lib_pypy/numpy.py, lib_pypy/numpypy/__init__.py: * "import numpy" now displays a warning but falls back to "import numpypy as numpy" *without* raising an ImportError. - pypy/module/cpyext/include/boolobject.h and complexobject.h: * Add #define's for PyIntObject and PyComplexObject. diff --git a/lib_pypy/numpy.py b/lib_pypy/numpy.py --- a/lib_pypy/numpy.py +++ b/lib_pypy/numpy.py @@ -8,8 +8,6 @@ import os -__version__ = '1.7' - def get_include(): head, tail = os.path.split(os.path.dirname(os.path.abspath(__file__))) return os.path.join(head, 'include') From noreply at buildbot.pypy.org Mon Aug 26 22:02:43 2013 From: noreply at buildbot.pypy.org (shmuller) Date: Mon, 26 Aug 2013 22:02:43 +0200 (CEST) Subject: [pypy-commit] pypy pypy-pyarray: - cpyext/ndarrayobject.py: Rename PyArray_* routines to _PyArray_* to Message-ID: <20130826200243.1F3891C13EE@cobra.cs.uni-duesseldorf.de> Author: Stefan H. Muller Branch: pypy-pyarray Changeset: r66333:ada4abc58e15 Date: 2013-07-28 21:49 +0200 http://bitbucket.org/pypy/pypy/changeset/ada4abc58e15/ Log: - cpyext/ndarrayobject.py: Rename PyArray_* routines to _PyArray_* to avoid name clashes with other implementations of numpy in the future. diff --git a/pypy/module/cpyext/include/numpy/arrayobject.h b/pypy/module/cpyext/include/numpy/arrayobject.h --- a/pypy/module/cpyext/include/numpy/arrayobject.h +++ b/pypy/module/cpyext/include/numpy/arrayobject.h @@ -7,10 +7,6 @@ extern "C" { #endif -#include "old_defines.h" - -#define NPY_INLINE - /* fake PyArrayObject so that code that doesn't do direct field access works */ #define PyArrayObject PyObject @@ -23,20 +19,14 @@ #ifndef PyArray_NDIM -#define PyArray_ISCONTIGUOUS(arr) (1) - #define PyArray_NDIM _PyArray_NDIM #define PyArray_DIM _PyArray_DIM -#define PyArray_STRIDE _PyArray_STRIDE #define PyArray_SIZE _PyArray_SIZE #define PyArray_ITEMSIZE _PyArray_ITEMSIZE #define PyArray_NBYTES _PyArray_NBYTES #define PyArray_TYPE _PyArray_TYPE #define PyArray_DATA _PyArray_DATA - -#define PyArray_FromAny _PyArray_FromAny -#define PyArray_FromObject _PyArray_FromObject -#define PyArray_ContiguousFromObject PyArray_FromObject +#define PyArray_FromAny _PyArray_FromAny #define PyArray_SimpleNew _PyArray_SimpleNew #define PyArray_SimpleNewFromData _PyArray_SimpleNewFromData @@ -73,19 +63,6 @@ NPY_NTYPES_ABI_COMPATIBLE=21 }; -#define NPY_INT8 NPY_BYTE -#define NPY_UINT8 NPY_UBYTE -#define NPY_INT16 NPY_SHORT -#define NPY_UINT16 NPY_USHORT -#define NPY_INT32 NPY_INT -#define NPY_UINT32 NPY_UINT -#define NPY_INT64 NPY_LONG -#define NPY_UINT64 NPY_ULONG -#define NPY_FLOAT32 NPY_FLOAT -#define NPY_FLOAT64 NPY_DOUBLE -#define NPY_COMPLEX32 NPY_CFLOAT -#define NPY_COMPLEX64 NPY_CDOUBLE - #ifdef __cplusplus } #endif From noreply at buildbot.pypy.org Mon Aug 26 22:02:44 2013 From: noreply at buildbot.pypy.org (shmuller) Date: Mon, 26 Aug 2013 22:02:44 +0200 (CEST) Subject: [pypy-commit] pypy pypy-pyarray: - cpyext/ndarrayobject.py: Add support for PyArray_STRIDE() and Message-ID: <20130826200244.6A8651C13EE@cobra.cs.uni-duesseldorf.de> Author: Stefan H. Muller Branch: pypy-pyarray Changeset: r66334:3f9be1d43a61 Date: 2013-07-30 11:59 +0200 http://bitbucket.org/pypy/pypy/changeset/3f9be1d43a61/ Log: - cpyext/ndarrayobject.py: Add support for PyArray_STRIDE() and PyArray_FromObject(). - cpyext/include/numpy: Add constants needed by matplotlib. - cpyext/include/complexobject.h: Replace macro with function for const correctness. - lib_pypy/numpy.py: Add __version__. I felt like it's 1.6.2. diff --git a/lib_pypy/numpy.py b/lib_pypy/numpy.py --- a/lib_pypy/numpy.py +++ b/lib_pypy/numpy.py @@ -8,6 +8,8 @@ import os +__version__ = '1.6.2' + def get_include(): head, tail = os.path.split(os.path.dirname(os.path.abspath(__file__))) return os.path.join(head, 'include') diff --git a/pypy/module/cpyext/include/numpy/arrayobject.h b/pypy/module/cpyext/include/numpy/arrayobject.h --- a/pypy/module/cpyext/include/numpy/arrayobject.h +++ b/pypy/module/cpyext/include/numpy/arrayobject.h @@ -7,6 +7,10 @@ extern "C" { #endif +#include "old_defines.h" + +#define NPY_INLINE + /* fake PyArrayObject so that code that doesn't do direct field access works */ #define PyArrayObject PyObject @@ -19,14 +23,20 @@ #ifndef PyArray_NDIM +#define PyArray_ISCONTIGUOUS(arr) (1) + #define PyArray_NDIM _PyArray_NDIM #define PyArray_DIM _PyArray_DIM +#define PyArray_STRIDE _PyArray_STRIDE #define PyArray_SIZE _PyArray_SIZE #define PyArray_ITEMSIZE _PyArray_ITEMSIZE #define PyArray_NBYTES _PyArray_NBYTES #define PyArray_TYPE _PyArray_TYPE #define PyArray_DATA _PyArray_DATA -#define PyArray_FromAny _PyArray_FromAny + +#define PyArray_FromAny _PyArray_FromAny +#define PyArray_FromObject _PyArray_FromObject +#define PyArray_ContiguousFromObject PyArray_FromObject #define PyArray_SimpleNew _PyArray_SimpleNew #define PyArray_SimpleNewFromData _PyArray_SimpleNewFromData @@ -63,6 +73,19 @@ NPY_NTYPES_ABI_COMPATIBLE=21 }; +#define NPY_INT8 NPY_BYTE +#define NPY_UINT8 NPY_UBYTE +#define NPY_INT16 NPY_SHORT +#define NPY_UINT16 NPY_USHORT +#define NPY_INT32 NPY_INT +#define NPY_UINT32 NPY_UINT +#define NPY_INT64 NPY_LONG +#define NPY_UINT64 NPY_ULONG +#define NPY_FLOAT32 NPY_FLOAT +#define NPY_FLOAT64 NPY_DOUBLE +#define NPY_COMPLEX32 NPY_CFLOAT +#define NPY_COMPLEX64 NPY_CDOUBLE + #ifdef __cplusplus } #endif From noreply at buildbot.pypy.org Mon Aug 26 22:02:45 2013 From: noreply at buildbot.pypy.org (shmuller) Date: Mon, 26 Aug 2013 22:02:45 +0200 (CEST) Subject: [pypy-commit] pypy pypy-pyarray: - cpyext/include/complexobject.h: Add Py_LOCAL_INLINE() to translate. Message-ID: <20130826200245.A98BB1C13EE@cobra.cs.uni-duesseldorf.de> Author: Stefan H. Muller Branch: pypy-pyarray Changeset: r66335:92a2115f0785 Date: 2013-07-30 16:29 +0200 http://bitbucket.org/pypy/pypy/changeset/92a2115f0785/ Log: - cpyext/include/complexobject.h: Add Py_LOCAL_INLINE() to translate. - cpyext/stringobject.py: Replace 4x rffi.CCHARP -> CONST_STRING for const correctness. diff --git a/pypy/module/cpyext/include/complexobject.h b/pypy/module/cpyext/include/complexobject.h --- a/pypy/module/cpyext/include/complexobject.h +++ b/pypy/module/cpyext/include/complexobject.h @@ -28,7 +28,7 @@ // shmuller 2013/07/30: Make a function, since macro will fail in C++ due to // const correctness if called with "const Py_complex" //#define PyComplex_FromCComplex(c) _PyComplex_FromCComplex(&c) -PyObject *PyComplex_FromCComplex(Py_complex c) { +Py_LOCAL_INLINE(PyObject) *PyComplex_FromCComplex(Py_complex c) { return _PyComplex_FromCComplex(&c); } diff --git a/pypy/module/cpyext/stringobject.py b/pypy/module/cpyext/stringobject.py --- a/pypy/module/cpyext/stringobject.py +++ b/pypy/module/cpyext/stringobject.py @@ -275,7 +275,7 @@ Py_DecRef(space, string[0]) string[0] = make_ref(space, w_str) - at cpython_api([PyObject, rffi.CCHARP, rffi.CCHARP], PyObject) + at cpython_api([PyObject, CONST_STRING, CONST_STRING], PyObject) def PyString_AsEncodedObject(space, w_str, encoding, errors): """Encode a string object using the codec registered for encoding and return the result as Python object. encoding and errors have the same meaning as @@ -294,7 +294,7 @@ w_errors = space.wrap(rffi.charp2str(errors)) return space.call_method(w_str, 'encode', w_encoding, w_errors) - at cpython_api([PyObject, rffi.CCHARP, rffi.CCHARP], PyObject) + at cpython_api([PyObject, CONST_STRING, CONST_STRING], PyObject) def PyString_AsDecodedObject(space, w_str, encoding, errors): """Decode a string object by passing it to the codec registered for encoding and return the result as Python object. encoding and From noreply at buildbot.pypy.org Mon Aug 26 22:02:46 2013 From: noreply at buildbot.pypy.org (shmuller) Date: Mon, 26 Aug 2013 22:02:46 +0200 (CEST) Subject: [pypy-commit] pypy pypy-pyarray: - cpyext/number.py, cpyext/test/test_number.py: Implement Message-ID: <20130826200246.EBDF91C13EE@cobra.cs.uni-duesseldorf.de> Author: Stefan H. Muller Branch: pypy-pyarray Changeset: r66336:eba88c40cdfa Date: 2013-07-30 20:24 +0200 http://bitbucket.org/pypy/pypy/changeset/eba88c40cdfa/ Log: - cpyext/number.py, cpyext/test/test_number.py: Implement PyNumber_CoerceEx() and PyNumber_Coerce(). diff --git a/pypy/module/cpyext/number.py b/pypy/module/cpyext/number.py --- a/pypy/module/cpyext/number.py +++ b/pypy/module/cpyext/number.py @@ -1,6 +1,6 @@ from pypy.interpreter.error import OperationError from pypy.module.cpyext.api import cpython_api, CANNOT_FAIL, Py_ssize_t -from pypy.module.cpyext.pyobject import PyObject +from pypy.module.cpyext.pyobject import PyObject, PyObjectP, from_ref, make_ref, Py_DecRef from rpython.rtyper.lltypesystem import rffi, lltype from rpython.tool.sourcetools import func_with_new_name @@ -56,6 +56,38 @@ """ return space.index(w_obj) + at cpython_api([PyObjectP, PyObjectP], rffi.INT_real, error=-1) +def PyNumber_CoerceEx(space, pp1, pp2): + """ + """ + w_obj1 = from_ref(space, pp1[0]) + w_obj2 = from_ref(space, pp2[0]) + w_res = space.try_coerce(w_obj1, w_obj2) + if w_res is None: + return 1 + else: + Py_DecRef(space, pp1[0]) + Py_DecRef(space, pp2[0]) + pp1[0] = make_ref(space, space.getitem(w_res, space.wrap(0))) + pp2[0] = make_ref(space, space.getitem(w_res, space.wrap(1))) + return 0 + + at cpython_api([PyObjectP, PyObjectP], rffi.INT_real, error=-1) +def PyNumber_Coerce(space, pp1, pp2): + """ + """ + w_obj1 = from_ref(space, pp1[0]) + w_obj2 = from_ref(space, pp2[0]) + w_res = space.coerce(w_obj1, w_obj2) + if w_res is None: + return 1 + else: + Py_DecRef(space, pp1[0]) + Py_DecRef(space, pp2[0]) + pp1[0] = make_ref(space, space.getitem(w_res, space.wrap(0))) + pp2[0] = make_ref(space, space.getitem(w_res, space.wrap(1))) + return 0 + def func_rename(newname): return lambda func: func_with_new_name(func, newname) diff --git a/pypy/module/cpyext/test/test_number.py b/pypy/module/cpyext/test/test_number.py --- a/pypy/module/cpyext/test/test_number.py +++ b/pypy/module/cpyext/test/test_number.py @@ -2,6 +2,7 @@ from pypy.interpreter.error import OperationError from pypy.module.cpyext.test.test_api import BaseApiTest from pypy.module.cpyext import sequence +from pypy.module.cpyext.pyobject import PyObject, PyObjectP, from_ref, make_ref, Py_DecRef class TestIterator(BaseApiTest): def test_check(self, space, api): @@ -35,6 +36,26 @@ assert w_l is None api.PyErr_Clear() + def test_number_coerce_ex(self, space, api): + pl = make_ref(space, space.wrap(123)) + pf = make_ref(space, space.wrap(42.)) + ppl = lltype.malloc(PyObjectP.TO, 1, flavor='raw') + ppf = lltype.malloc(PyObjectP.TO, 1, flavor='raw') + ppl[0] = pl + ppf[0] = pf + + ret = api.PyNumber_CoerceEx(ppl, ppf) + assert ret == 0 + + w_res = from_ref(space, ppl[0]) + + assert api.PyFloat_Check(w_res) + assert space.unwrap(w_res) == 123. + Py_DecRef(space, ppl[0]) + Py_DecRef(space, ppf[0]) + lltype.free(ppl, flavor='raw') + lltype.free(ppf, flavor='raw') + def test_numbermethods(self, space, api): assert "ab" == space.unwrap( api.PyNumber_Add(space.wrap("a"), space.wrap("b"))) From noreply at buildbot.pypy.org Mon Aug 26 22:02:48 2013 From: noreply at buildbot.pypy.org (shmuller) Date: Mon, 26 Aug 2013 22:02:48 +0200 (CEST) Subject: [pypy-commit] pypy pypy-pyarray: - cpyext/include: Add a few definitions needed by PyCXX to headers (for Message-ID: <20130826200248.38A011C13EE@cobra.cs.uni-duesseldorf.de> Author: Stefan H. Muller Branch: pypy-pyarray Changeset: r66337:aabf1cc286a7 Date: 2013-07-31 00:42 +0200 http://bitbucket.org/pypy/pypy/changeset/aabf1cc286a7/ Log: - cpyext/include: Add a few definitions needed by PyCXX to headers (for matplotlib build). - NEW: cpyext/include/missing.h: Temporary place to put definitions from missing header files. diff --git a/pypy/module/cpyext/include/Python.h b/pypy/module/cpyext/include/Python.h --- a/pypy/module/cpyext/include/Python.h +++ b/pypy/module/cpyext/include/Python.h @@ -126,6 +126,9 @@ #include "pysignals.h" #include "pythread.h" +/* Missing definitions */ +#include "missing.h" + // XXX This shouldn't be included here #include "structmember.h" diff --git a/pypy/module/cpyext/include/funcobject.h b/pypy/module/cpyext/include/funcobject.h --- a/pypy/module/cpyext/include/funcobject.h +++ b/pypy/module/cpyext/include/funcobject.h @@ -12,6 +12,8 @@ PyObject *func_name; /* The __name__ attribute, a string object */ } PyFunctionObject; +PyAPI_DATA(PyTypeObject) PyFunction_Type; + #define PyFunction_GET_CODE(obj) PyFunction_GetCode((PyObject*)(obj)) #define PyMethod_GET_FUNCTION(obj) PyMethod_Function((PyObject*)(obj)) diff --git a/pypy/module/cpyext/include/missing.h b/pypy/module/cpyext/include/missing.h new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/include/missing.h @@ -0,0 +1,15 @@ + +/* Definitions from missing header files */ + +#ifndef Py_MISSING_H +#define Py_MISSING_H +#ifdef __cplusplus +extern "C" { +#endif + +PyAPI_DATA(PyTypeObject) PyMethod_Type, PyRange_Type, PyTraceBack_Type; + +#ifdef __cplusplus +} +#endif +#endif /* !Py_MISSING_H */ diff --git a/pypy/module/cpyext/include/modsupport.h b/pypy/module/cpyext/include/modsupport.h --- a/pypy/module/cpyext/include/modsupport.h +++ b/pypy/module/cpyext/include/modsupport.h @@ -56,6 +56,7 @@ #define PyMODINIT_FUNC void #endif +PyAPI_DATA(char *) _Py_PackageContext; #ifdef __cplusplus } diff --git a/pypy/module/cpyext/include/pythonrun.h b/pypy/module/cpyext/include/pythonrun.h --- a/pypy/module/cpyext/include/pythonrun.h +++ b/pypy/module/cpyext/include/pythonrun.h @@ -6,14 +6,41 @@ extern "C" { #endif - void Py_FatalError(const char *msg); +void Py_FatalError(const char *msg); /* the -3 option will probably not be implemented */ +/* #define Py_Py3kWarningFlag 0 #define Py_FrozenFlag 0 #define Py_VerboseFlag 0 #define Py_DebugFlag 1 +*/ + +/* taken from Python-2.7.3/Include/pydebug.h */ +PyAPI_DATA(int) Py_DebugFlag; +PyAPI_DATA(int) Py_VerboseFlag; +PyAPI_DATA(int) Py_InteractiveFlag; +PyAPI_DATA(int) Py_InspectFlag; +PyAPI_DATA(int) Py_OptimizeFlag; +PyAPI_DATA(int) Py_NoSiteFlag; +PyAPI_DATA(int) Py_BytesWarningFlag; +PyAPI_DATA(int) Py_UseClassExceptionsFlag; +PyAPI_DATA(int) Py_FrozenFlag; +PyAPI_DATA(int) Py_TabcheckFlag; +PyAPI_DATA(int) Py_UnicodeFlag; +PyAPI_DATA(int) Py_IgnoreEnvironmentFlag; +PyAPI_DATA(int) Py_DivisionWarningFlag; +PyAPI_DATA(int) Py_DontWriteBytecodeFlag; +PyAPI_DATA(int) Py_NoUserSiteDirectory; +/* _XXX Py_QnewFlag should go away in 3.0. It's true iff -Qnew is passed, + * on the command line, and is used in 2.2 by ceval.c to make all "/" divisions + * true divisions (which they will be in 3.0). */ +PyAPI_DATA(int) _Py_QnewFlag; +/* Warn about 3.x issues */ +PyAPI_DATA(int) Py_Py3kWarningFlag; +PyAPI_DATA(int) Py_HashRandomizationFlag; + typedef struct { int cf_flags; /* bitmask of CO_xxx flags relevant to future */ From noreply at buildbot.pypy.org Mon Aug 26 22:02:49 2013 From: noreply at buildbot.pypy.org (shmuller) Date: Mon, 26 Aug 2013 22:02:49 +0200 (CEST) Subject: [pypy-commit] pypy pypy-pyarray: - cpyext/include/numpy/arrayobject.h: Many more definitions. Message-ID: <20130826200249.C46981C13EE@cobra.cs.uni-duesseldorf.de> Author: Stefan H. Muller Branch: pypy-pyarray Changeset: r66338:53d9c2233349 Date: 2013-07-31 02:59 +0200 http://bitbucket.org/pypy/pypy/changeset/53d9c2233349/ Log: - cpyext/include/numpy/arrayobject.h: Many more definitions. diff --git a/pypy/module/cpyext/include/numpy/arrayobject.h b/pypy/module/cpyext/include/numpy/arrayobject.h --- a/pypy/module/cpyext/include/numpy/arrayobject.h +++ b/pypy/module/cpyext/include/numpy/arrayobject.h @@ -7,6 +7,8 @@ extern "C" { #endif +#include /* memset */ + #include "old_defines.h" #define NPY_INLINE @@ -14,6 +16,9 @@ /* fake PyArrayObject so that code that doesn't do direct field access works */ #define PyArrayObject PyObject +typedef unsigned char npy_bool; +typedef unsigned char npy_uint8; + #ifndef npy_intp #define npy_intp long #endif @@ -21,30 +26,8 @@ #define import_array() #endif -#ifndef PyArray_NDIM -#define PyArray_ISCONTIGUOUS(arr) (1) - -#define PyArray_NDIM _PyArray_NDIM -#define PyArray_DIM _PyArray_DIM -#define PyArray_STRIDE _PyArray_STRIDE -#define PyArray_SIZE _PyArray_SIZE -#define PyArray_ITEMSIZE _PyArray_ITEMSIZE -#define PyArray_NBYTES _PyArray_NBYTES -#define PyArray_TYPE _PyArray_TYPE -#define PyArray_DATA _PyArray_DATA - -#define PyArray_FromAny _PyArray_FromAny -#define PyArray_FromObject _PyArray_FromObject -#define PyArray_ContiguousFromObject PyArray_FromObject - -#define PyArray_SimpleNew _PyArray_SimpleNew -#define PyArray_SimpleNewFromData _PyArray_SimpleNewFromData -#define PyArray_SimpleNewFromDataOwning _PyArray_SimpleNewFromDataOwning - -#endif - -/* copied from numpy/ndarraytypes.h +/* data types copied from numpy/ndarraytypes.h * keep numbers in sync with micronumpy.interp_dtype.DTypeCache */ enum NPY_TYPES { NPY_BOOL=0, @@ -86,6 +69,67 @@ #define NPY_COMPLEX32 NPY_CFLOAT #define NPY_COMPLEX64 NPY_CDOUBLE + +/* functions */ +#ifndef PyArray_NDIM + +#define PyArray_ISCONTIGUOUS(arr) (1) + +#define PyArray_NDIM _PyArray_NDIM +#define PyArray_DIM _PyArray_DIM +#define PyArray_STRIDE _PyArray_STRIDE +#define PyArray_SIZE _PyArray_SIZE +#define PyArray_ITEMSIZE _PyArray_ITEMSIZE +#define PyArray_NBYTES _PyArray_NBYTES +#define PyArray_TYPE _PyArray_TYPE +#define PyArray_DATA _PyArray_DATA + +#define PyArray_BYTES(obj) ((char *)PyArray_DATA(obj)) + +#define PyArray_FromAny _PyArray_FromAny +#define PyArray_FromObject _PyArray_FromObject +#define PyArray_ContiguousFromObject PyArray_FromObject + +#define PyArray_SimpleNew _PyArray_SimpleNew +#define PyArray_SimpleNewFromData _PyArray_SimpleNewFromData +#define PyArray_SimpleNewFromDataOwning _PyArray_SimpleNewFromDataOwning + +#define PyArray_EMPTY(nd, dims, type_num, fortran) \ + PyArray_SimpleNew(nd, dims, type_num) + +#define PyArray_ZEROS PyArray_EMPTY + +/* +PyObject* PyArray_ZEROS(int nd, npy_intp* dims, int type_num, int fortran) +{ + PyObject *arr = PyArray_EMPTY(nd, dims, type_num, fortran); + memset(PyArray_DATA(arr), 0, PyArray_NBYTES(arr)); + return arr; +} +*/ + +/* Don't use these in loops! */ + +#define PyArray_GETPTR1(obj, i) ((void *)(PyArray_BYTES(obj) + \ + (i)*PyArray_STRIDE(obj,0))) + +#define PyArray_GETPTR2(obj, i, j) ((void *)(PyArray_BYTES(obj) + \ + (i)*PyArray_STRIDE(obj,0) + \ + (j)*PyArray_STRIDE(obj,1))) + +#define PyArray_GETPTR3(obj, i, j, k) ((void *)(PyArray_BYTES(obj) + \ + (i)*PyArray_STRIDE(obj,0) + \ + (j)*PyArray_STRIDE(obj,1) + \ + (k)*PyArray_STRIDE(obj,2))) + +#define PyArray_GETPTR4(obj, i, j, k, l) ((void *)(PyArray_BYTES(obj) + \ + (i)*PyArray_STRIDE(obj,0) + \ + (j)*PyArray_STRIDE(obj,1) + \ + (k)*PyArray_STRIDE(obj,2) + \ + (l)*PyArray_STRIDE(obj,3))) + +#endif + #ifdef __cplusplus } #endif From noreply at buildbot.pypy.org Mon Aug 26 22:02:50 2013 From: noreply at buildbot.pypy.org (shmuller) Date: Mon, 26 Aug 2013 22:02:50 +0200 (CEST) Subject: [pypy-commit] pypy pypy-pyarray: - cpyext/include/numpy/arrayobject.h: Many more definitions. Succeeded Message-ID: <20130826200250.E8AAF1C13EE@cobra.cs.uni-duesseldorf.de> Author: Stefan H. Muller Branch: pypy-pyarray Changeset: r66339:4a0adafeea01 Date: 2013-07-31 17:55 +0200 http://bitbucket.org/pypy/pypy/changeset/4a0adafeea01/ Log: - cpyext/include/numpy/arrayobject.h: Many more definitions. Succeeded to compile a patched version of matplotlib with this. diff --git a/pypy/module/cpyext/include/numpy/arrayobject.h b/pypy/module/cpyext/include/numpy/arrayobject.h --- a/pypy/module/cpyext/include/numpy/arrayobject.h +++ b/pypy/module/cpyext/include/numpy/arrayobject.h @@ -26,6 +26,12 @@ #define import_array() #endif +#define NPY_MAXDIMS 32 + +typedef struct { + npy_intp *ptr; + int len; +} PyArray_Dims; /* data types copied from numpy/ndarraytypes.h * keep numbers in sync with micronumpy.interp_dtype.DTypeCache @@ -70,10 +76,18 @@ #define NPY_COMPLEX64 NPY_CDOUBLE +/* selection of flags */ +#define NPY_C_CONTIGUOUS 0x0001 +#define NPY_OWNDATA 0x0004 +#define NPY_ALIGNED 0x0100 +#define NPY_IN_ARRAY (NPY_C_CONTIGUOUS | NPY_ALIGNED) + + /* functions */ #ifndef PyArray_NDIM #define PyArray_ISCONTIGUOUS(arr) (1) +#define PyArray_Check(arr) (1) #define PyArray_NDIM _PyArray_NDIM #define PyArray_DIM _PyArray_DIM @@ -84,11 +98,16 @@ #define PyArray_TYPE _PyArray_TYPE #define PyArray_DATA _PyArray_DATA -#define PyArray_BYTES(obj) ((char *)PyArray_DATA(obj)) +#define PyArray_Size PyArray_SIZE +#define PyArray_BYTES(arr) ((char *)PyArray_DATA(arr)) #define PyArray_FromAny _PyArray_FromAny #define PyArray_FromObject _PyArray_FromObject #define PyArray_ContiguousFromObject PyArray_FromObject +#define PyArray_ContiguousFromAny PyArray_FromObject + +#define PyArray_FROMANY(obj, typenum, min, max, requirements) (obj) +#define PyArray_FROM_OTF(obj, typenum, requirements) (obj) #define PyArray_SimpleNew _PyArray_SimpleNew #define PyArray_SimpleNewFromData _PyArray_SimpleNewFromData @@ -105,9 +124,11 @@ PyObject *arr = PyArray_EMPTY(nd, dims, type_num, fortran); memset(PyArray_DATA(arr), 0, PyArray_NBYTES(arr)); return arr; -} +}; */ +#define PyArray_Resize(self, newshape, refcheck, fortran) (NULL) + /* Don't use these in loops! */ #define PyArray_GETPTR1(obj, i) ((void *)(PyArray_BYTES(obj) + \ From noreply at buildbot.pypy.org Mon Aug 26 22:02:52 2013 From: noreply at buildbot.pypy.org (shmuller) Date: Mon, 26 Aug 2013 22:02:52 +0200 (CEST) Subject: [pypy-commit] pypy pypy-pyarray: - Add missing symbols for importing matplotlib. Message-ID: <20130826200252.30D281C13EE@cobra.cs.uni-duesseldorf.de> Author: Stefan H. Muller Branch: pypy-pyarray Changeset: r66340:ca566c12805d Date: 2013-08-01 08:54 +0200 http://bitbucket.org/pypy/pypy/changeset/ca566c12805d/ Log: - Add missing symbols for importing matplotlib. diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -401,6 +401,16 @@ 'PyThread_ReInitTLS', 'PyStructSequence_InitType', 'PyStructSequence_New', + + 'PyFunction_Type', 'PyMethod_Type', 'PyRange_Type', 'PyTraceBack_Type', + + 'PyArray_ZEROS', + + 'Py_DebugFlag', 'Py_VerboseFlag', 'Py_InteractiveFlag', 'Py_InspectFlag', + 'Py_OptimizeFlag', 'Py_NoSiteFlag', 'Py_BytesWarningFlag', 'Py_UseClassExceptionsFlag', + 'Py_FrozenFlag', 'Py_TabcheckFlag', 'Py_UnicodeFlag', 'Py_IgnoreEnvironmentFlag', + 'Py_DivisionWarningFlag', 'Py_DontWriteBytecodeFlag', 'Py_NoUserSiteDirectory', + '_Py_QnewFlag', 'Py_Py3kWarningFlag', 'Py_HashRandomizationFlag', '_Py_PackageContext', ] TYPES = {} GLOBALS = { # this needs to include all prebuilt pto, otherwise segfaults occur @@ -990,6 +1000,8 @@ source_dir / "capsule.c", source_dir / "pysignals.c", source_dir / "pythread.c", + source_dir / "ndarrayobject.c", + source_dir / "missing.c", ], separate_module_sources=separate_module_sources, export_symbols=export_symbols_eci, diff --git a/pypy/module/cpyext/include/missing.h b/pypy/module/cpyext/include/missing.h --- a/pypy/module/cpyext/include/missing.h +++ b/pypy/module/cpyext/include/missing.h @@ -7,7 +7,9 @@ extern "C" { #endif -PyAPI_DATA(PyTypeObject) PyMethod_Type, PyRange_Type, PyTraceBack_Type; +PyAPI_DATA(PyTypeObject) PyMethod_Type; +PyAPI_DATA(PyTypeObject) PyRange_Type; +PyAPI_DATA(PyTypeObject) PyTraceBack_Type; #ifdef __cplusplus } diff --git a/pypy/module/cpyext/include/numpy/arrayobject.h b/pypy/module/cpyext/include/numpy/arrayobject.h --- a/pypy/module/cpyext/include/numpy/arrayobject.h +++ b/pypy/module/cpyext/include/numpy/arrayobject.h @@ -7,8 +7,6 @@ extern "C" { #endif -#include /* memset */ - #include "old_defines.h" #define NPY_INLINE @@ -116,16 +114,7 @@ #define PyArray_EMPTY(nd, dims, type_num, fortran) \ PyArray_SimpleNew(nd, dims, type_num) -#define PyArray_ZEROS PyArray_EMPTY - -/* -PyObject* PyArray_ZEROS(int nd, npy_intp* dims, int type_num, int fortran) -{ - PyObject *arr = PyArray_EMPTY(nd, dims, type_num, fortran); - memset(PyArray_DATA(arr), 0, PyArray_NBYTES(arr)); - return arr; -}; -*/ +PyObject* PyArray_ZEROS(int nd, npy_intp* dims, int type_num, int fortran); #define PyArray_Resize(self, newshape, refcheck, fortran) (NULL) diff --git a/pypy/module/cpyext/include/pythonrun.h b/pypy/module/cpyext/include/pythonrun.h --- a/pypy/module/cpyext/include/pythonrun.h +++ b/pypy/module/cpyext/include/pythonrun.h @@ -8,15 +8,6 @@ void Py_FatalError(const char *msg); -/* the -3 option will probably not be implemented */ -/* -#define Py_Py3kWarningFlag 0 - -#define Py_FrozenFlag 0 -#define Py_VerboseFlag 0 -#define Py_DebugFlag 1 -*/ - /* taken from Python-2.7.3/Include/pydebug.h */ PyAPI_DATA(int) Py_DebugFlag; PyAPI_DATA(int) Py_VerboseFlag; diff --git a/pypy/module/cpyext/src/missing.c b/pypy/module/cpyext/src/missing.c new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/src/missing.c @@ -0,0 +1,29 @@ +/* Definitions of missing symbols go here */ + +#include "Python.h" + +PyTypeObject PyFunction_Type; + +PyTypeObject PyMethod_Type; +PyTypeObject PyRange_Type; +PyTypeObject PyTraceBack_Type; + +int Py_DebugFlag = 1; +int Py_VerboseFlag = 0; +int Py_InteractiveFlag = 0; +int Py_InspectFlag = 0; +int Py_OptimizeFlag = 0; +int Py_NoSiteFlag = 0; +int Py_BytesWarningFlag = 0; +int Py_UseClassExceptionsFlag = 0; +int Py_FrozenFlag = 0; +int Py_TabcheckFlag = 0; +int Py_UnicodeFlag = 0; +int Py_IgnoreEnvironmentFlag = 0; +int Py_DivisionWarningFlag = 0; +int Py_DontWriteBytecodeFlag = 0; +int Py_NoUserSiteDirectory = 0; +int _Py_QnewFlag = 0; +int Py_Py3kWarningFlag = 0; +int Py_HashRandomizationFlag = 0; + diff --git a/pypy/module/cpyext/src/ndarrayobject.c b/pypy/module/cpyext/src/ndarrayobject.c new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/src/ndarrayobject.c @@ -0,0 +1,13 @@ + +#include "Python.h" +#include "numpy/arrayobject.h" +#include /* memset */ + +PyObject* +PyArray_ZEROS(int nd, npy_intp* dims, int type_num, int fortran) +{ + PyObject *arr = PyArray_EMPTY(nd, dims, type_num, fortran); + memset(PyArray_DATA(arr), 0, PyArray_NBYTES(arr)); + return arr; +} + From noreply at buildbot.pypy.org Mon Aug 26 22:02:53 2013 From: noreply at buildbot.pypy.org (shmuller) Date: Mon, 26 Aug 2013 22:02:53 +0200 (CEST) Subject: [pypy-commit] pypy pypy-pyarray: - lib_pypy/numpypy/__init__.py: __version__ and get_include() now Message-ID: <20130826200253.6CC541C13EE@cobra.cs.uni-duesseldorf.de> Author: Stefan H. Muller Branch: pypy-pyarray Changeset: r66341:3c29d99be539 Date: 2013-08-05 05:03 +0200 http://bitbucket.org/pypy/pypy/changeset/3c29d99be539/ Log: - lib_pypy/numpypy/__init__.py: __version__ and get_include() now defined here, so that site-packages/numpy can get it. - lib_pypy/numpy.py: Contains only the warning. This file should be shadowed if site-packages/numpy is installed. It presently isn't so it must be renamed. - cpyext/include/complexobject.h: Fix error in macro usage. - micronumpy/base.py: convert_to_array(): Add call to __array__() method, if it exists. diff --git a/lib_pypy/numpy.py b/lib_pypy/numpy.py --- a/lib_pypy/numpy.py +++ b/lib_pypy/numpy.py @@ -6,11 +6,4 @@ from numpypy import * -import os -__version__ = '1.6.2' - -def get_include(): - head, tail = os.path.split(os.path.dirname(os.path.abspath(__file__))) - return os.path.join(head, 'include') - diff --git a/lib_pypy/numpypy/__init__.py b/lib_pypy/numpypy/__init__.py --- a/lib_pypy/numpypy/__init__.py +++ b/lib_pypy/numpypy/__init__.py @@ -6,9 +6,19 @@ from __builtin__ import bool, int, long, float, complex, object, unicode, str from core import abs, max, min -__all__ = [] +__version__ = '1.7.0' + +import os +def get_include(): + head, tail = os.path.split(os.path.dirname(os.path.abspath(__file__))) + return os.path.join(head, '../include') + + +__all__ = ['__version__', 'get_include'] __all__ += core.__all__ __all__ += lib.__all__ #import sys #sys.modules.setdefault('numpy', sys.modules['numpypy']) + + diff --git a/pypy/module/cpyext/include/complexobject.h b/pypy/module/cpyext/include/complexobject.h --- a/pypy/module/cpyext/include/complexobject.h +++ b/pypy/module/cpyext/include/complexobject.h @@ -28,7 +28,7 @@ // shmuller 2013/07/30: Make a function, since macro will fail in C++ due to // const correctness if called with "const Py_complex" //#define PyComplex_FromCComplex(c) _PyComplex_FromCComplex(&c) -Py_LOCAL_INLINE(PyObject) *PyComplex_FromCComplex(Py_complex c) { +Py_LOCAL_INLINE(PyObject *) PyComplex_FromCComplex(Py_complex c) { return _PyComplex_FromCComplex(&c); } diff --git a/pypy/module/micronumpy/base.py b/pypy/module/micronumpy/base.py --- a/pypy/module/micronumpy/base.py +++ b/pypy/module/micronumpy/base.py @@ -1,4 +1,5 @@ +from pypy.interpreter.error import OperationError from pypy.interpreter.baseobjspace import W_Root from rpython.tool.pairtype import extendabletype from pypy.module.micronumpy.support import calc_strides @@ -91,10 +92,20 @@ if isinstance(w_obj, W_NDimArray): return w_obj - elif issequence_w(space, w_obj): - # Convert to array. - return array(space, w_obj, w_order=None) else: - # If it's a scalar - dtype = interp_ufuncs.find_dtype_for_scalar(space, w_obj) - return W_NDimArray.new_scalar(space, dtype, w_obj) + # Use __array__() method if it exists + w_array = space.lookup(w_obj, "__array__") + if w_array is not None: + w_result = space.get_and_call_function(w_array, w_obj) + if isinstance(w_result, W_NDimArray): + return w_result + else: + raise OperationError(space.w_ValueError, + space.wrap("object __array__ method not producing an array")) + elif issequence_w(space, w_obj): + # Convert to array. + return array(space, w_obj, w_order=None) + else: + # If it's a scalar + dtype = interp_ufuncs.find_dtype_for_scalar(space, w_obj) + return W_NDimArray.new_scalar(space, dtype, w_obj) From noreply at buildbot.pypy.org Mon Aug 26 22:02:54 2013 From: noreply at buildbot.pypy.org (shmuller) Date: Mon, 26 Aug 2013 22:02:54 +0200 (CEST) Subject: [pypy-commit] pypy pypy-pyarray: Add some missing numpypy features Message-ID: <20130826200254.97C3E1C13EE@cobra.cs.uni-duesseldorf.de> Author: Stefan H. Muller Branch: pypy-pyarray Changeset: r66342:09db6e0e5c1d Date: 2013-08-06 00:12 +0200 http://bitbucket.org/pypy/pypy/changeset/09db6e0e5c1d/ Log: Add some missing numpypy features - ndarrayobject.py: PyArray_Check(), PyArray_CheckExact() - interp_numarray.py: Stub implementations for __array__(), __array_prepare__(), __array_wrap__(). diff --git a/pypy/module/cpyext/include/numpy/arrayobject.h b/pypy/module/cpyext/include/numpy/arrayobject.h --- a/pypy/module/cpyext/include/numpy/arrayobject.h +++ b/pypy/module/cpyext/include/numpy/arrayobject.h @@ -85,16 +85,17 @@ #ifndef PyArray_NDIM #define PyArray_ISCONTIGUOUS(arr) (1) -#define PyArray_Check(arr) (1) -#define PyArray_NDIM _PyArray_NDIM -#define PyArray_DIM _PyArray_DIM -#define PyArray_STRIDE _PyArray_STRIDE -#define PyArray_SIZE _PyArray_SIZE -#define PyArray_ITEMSIZE _PyArray_ITEMSIZE -#define PyArray_NBYTES _PyArray_NBYTES -#define PyArray_TYPE _PyArray_TYPE -#define PyArray_DATA _PyArray_DATA +#define PyArray_Check _PyArray_Check +#define PyArray_CheckExact _PyArray_CheckExact +#define PyArray_NDIM _PyArray_NDIM +#define PyArray_DIM _PyArray_DIM +#define PyArray_STRIDE _PyArray_STRIDE +#define PyArray_SIZE _PyArray_SIZE +#define PyArray_ITEMSIZE _PyArray_ITEMSIZE +#define PyArray_NBYTES _PyArray_NBYTES +#define PyArray_TYPE _PyArray_TYPE +#define PyArray_DATA _PyArray_DATA #define PyArray_Size PyArray_SIZE #define PyArray_BYTES(arr) ((char *)PyArray_DATA(arr)) diff --git a/pypy/module/cpyext/ndarrayobject.py b/pypy/module/cpyext/ndarrayobject.py --- a/pypy/module/cpyext/ndarrayobject.py +++ b/pypy/module/cpyext/ndarrayobject.py @@ -12,6 +12,20 @@ # the asserts are needed, otherwise the translation fails + at cpython_api([PyObject], rffi.INT_real, error=CANNOT_FAIL) +def _PyArray_Check(space, w_obj): + w_obj_type = space.type(w_obj) + w_type = space.gettypeobject(W_NDimArray.typedef) + return (space.is_w(w_obj_type, w_type) or + space.is_true(space.issubtype(w_obj_type, w_type))) + + at cpython_api([PyObject], rffi.INT_real, error=CANNOT_FAIL) +def _PyArray_CheckExact(space, w_obj): + w_obj_type = space.type(w_obj) + w_type = space.gettypeobject(W_NDimArray.typedef) + return space.is_w(w_obj_type, w_type) + + @cpython_api([PyObject], Py_ssize_t, error=CANNOT_FAIL) def _PyArray_NDIM(space, w_array): assert isinstance(w_array, W_NDimArray) diff --git a/pypy/module/cpyext/test/test_ndarrayobject.py b/pypy/module/cpyext/test/test_ndarrayobject.py --- a/pypy/module/cpyext/test/test_ndarrayobject.py +++ b/pypy/module/cpyext/test/test_ndarrayobject.py @@ -24,6 +24,14 @@ class TestNDArrayObject(BaseApiTest): + def test_Check(self, space, api): + a = array(space, [10, 5, 3]) + x = space.wrap(10.) + assert api._PyArray_Check(a) + assert api._PyArray_CheckExact(a) + assert not api._PyArray_Check(x) + assert not api._PyArray_CheckExact(x) + def test_NDIM(self, space, api): a = array(space, [10, 5, 3]) assert api._PyArray_NDIM(a) == 3 diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -418,6 +418,26 @@ raise OperationError(space.w_NotImplementedError, space.wrap( "non-int arg not supported")) + def descr___array__(self, space): + # stub implementation of __array__() + return self + + def descr___array_prepare__(self, space, w_array): + # stub implementation of __array_prepare__() + if isinstance(w_array, W_NDimArray): + return w_array + else: + raise OperationError(space.w_TypeError, + space.wrap("can only be called with ndarray object")) + + def descr___array_wrap__(self, space, w_array): + # stub implementation of __array_wrap__() + if isinstance(w_array, W_NDimArray): + return w_array + else: + raise OperationError(space.w_TypeError, + space.wrap("can only be called with ndarray object")) + def descr_array_iface(self, space): addr = self.implementation.get_storage_as_int(space) # will explode if it can't @@ -1084,6 +1104,10 @@ __reduce__ = interp2app(W_NDimArray.descr_reduce), __setstate__ = interp2app(W_NDimArray.descr_setstate), __array_finalize__ = interp2app(W_NDimArray.descr___array_finalize__), + + __array__ = interp2app(W_NDimArray.descr___array__), + __array_prepare__ = interp2app(W_NDimArray.descr___array_prepare__), + __array_wrap__ = interp2app(W_NDimArray.descr___array_wrap__), ) @unwrap_spec(ndmin=int, copy=bool, subok=bool) From noreply at buildbot.pypy.org Mon Aug 26 22:02:55 2013 From: noreply at buildbot.pypy.org (shmuller) Date: Mon, 26 Aug 2013 22:02:55 +0200 (CEST) Subject: [pypy-commit] pypy pypy-pyarray: Add PyArray_New() and many macros for f2py. Message-ID: <20130826200255.D53D21C13EE@cobra.cs.uni-duesseldorf.de> Author: Stefan H. Muller Branch: pypy-pyarray Changeset: r66343:bb2aafab5a92 Date: 2013-08-08 00:41 +0200 http://bitbucket.org/pypy/pypy/changeset/bb2aafab5a92/ Log: Add PyArray_New() and many macros for f2py. The modified version of f2py from the numpy pypy-hack branch generates code that compiles with this, but the generated modules don't work due to bypassing of Py_InitModule(). diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -404,7 +404,7 @@ 'PyFunction_Type', 'PyMethod_Type', 'PyRange_Type', 'PyTraceBack_Type', - 'PyArray_ZEROS', + 'PyArray_Type', '_PyArray_FILLWBYTE', '_PyArray_ZEROS', '_PyArray_CopyInto', 'Py_DebugFlag', 'Py_VerboseFlag', 'Py_InteractiveFlag', 'Py_InspectFlag', 'Py_OptimizeFlag', 'Py_NoSiteFlag', 'Py_BytesWarningFlag', 'Py_UseClassExceptionsFlag', diff --git a/pypy/module/cpyext/include/numpy/arrayobject.h b/pypy/module/cpyext/include/numpy/arrayobject.h --- a/pypy/module/cpyext/include/numpy/arrayobject.h +++ b/pypy/module/cpyext/include/numpy/arrayobject.h @@ -13,13 +13,20 @@ /* fake PyArrayObject so that code that doesn't do direct field access works */ #define PyArrayObject PyObject +#define PyArray_Descr PyObject + +PyTypeObject PyArray_Type; typedef unsigned char npy_bool; typedef unsigned char npy_uint8; +typedef int npy_int; #ifndef npy_intp #define npy_intp long #endif +#ifndef NPY_INTP_FMT +#define NPY_INTP_FMT "ld" +#endif #ifndef import_array #define import_array() #endif @@ -73,21 +80,51 @@ #define NPY_COMPLEX32 NPY_CFLOAT #define NPY_COMPLEX64 NPY_CDOUBLE +#define PyTypeNum_ISBOOL(type) ((type) == NPY_BOOL) +#define PyTypeNum_ISINTEGER(type) (((type) >= NPY_BYTE) && \ + ((type) <= NPY_ULONGLONG)) +#define PyTypeNum_ISFLOAT(type) ((((type) >= NPY_FLOAT) && \ + ((type) <= NPY_LONGDOUBLE)) || \ + ((type) == NPY_HALF)) +#define PyTypeNum_ISCOMPLEX(type) (((type) >= NPY_CFLOAT) && \ + ((type) <= NPY_CLONGDOUBLE)) + +#define PyArray_ISBOOL(arr) (PyTypeNum_ISBOOL(PyArray_TYPE(arr))) +#define PyArray_ISINTEGER(arr) (PyTypeNum_ISINTEGER(PyArray_TYPE(arr))) +#define PyArray_ISFLOAT(arr) (PyTypeNum_ISFLOAT(PyArray_TYPE(arr))) +#define PyArray_ISCOMPLEX(arr) (PyTypeNum_ISCOMPLEX(PyArray_TYPE(arr))) + /* selection of flags */ -#define NPY_C_CONTIGUOUS 0x0001 +#define NPY_CONTIGUOUS 0x0001 +#define NPY_FORTRAN 0x0002 #define NPY_OWNDATA 0x0004 +#define NPY_FORCECAST 0x0010 #define NPY_ALIGNED 0x0100 -#define NPY_IN_ARRAY (NPY_C_CONTIGUOUS | NPY_ALIGNED) - +#define NPY_NOTSWAPPED 0x0200 +#define NPY_WRITEABLE 0x0400 +#define NPY_C_CONTIGUOUS NPY_CONTIGUOUS +#define NPY_F_CONTIGUOUS NPY_FORTRAN +#define NPY_IN_ARRAY (NPY_C_CONTIGUOUS | NPY_ALIGNED) +#define NPY_BEHAVED (NPY_ALIGNED | NPY_WRITEABLE) +#define NPY_CARRAY (NPY_CONTIGUOUS | NPY_BEHAVED) +#define NPY_FARRAY (NPY_FORTRAN | NPY_BEHAVED) +#define NPY_DEFAULT NPY_CARRAY /* functions */ #ifndef PyArray_NDIM -#define PyArray_ISCONTIGUOUS(arr) (1) - #define PyArray_Check _PyArray_Check #define PyArray_CheckExact _PyArray_CheckExact + +#define PyArray_ISONESEGMENT(arr) (1) +#define PyArray_FLAGS(arr) (0) + +#define PyArray_ISCONTIGUOUS _PyArray_ISCONTIGUOUS + +#define PyArray_ISCARRAY(arr) PyArray_ISCONTIGUOUS(arr) +#define PyArray_ISFARRAY(arr) (!PyArray_ISCONTIGUOUS(arr)) + #define PyArray_NDIM _PyArray_NDIM #define PyArray_DIM _PyArray_DIM #define PyArray_STRIDE _PyArray_STRIDE @@ -106,8 +143,10 @@ #define PyArray_ContiguousFromAny PyArray_FromObject #define PyArray_FROMANY(obj, typenum, min, max, requirements) (obj) -#define PyArray_FROM_OTF(obj, typenum, requirements) (obj) +#define PyArray_FROM_OTF(obj, typenum, requirements) \ + PyArray_FromObject(obj, typenum, 0, 0) +#define PyArray_New _PyArray_New #define PyArray_SimpleNew _PyArray_SimpleNew #define PyArray_SimpleNewFromData _PyArray_SimpleNewFromData #define PyArray_SimpleNewFromDataOwning _PyArray_SimpleNewFromDataOwning @@ -115,7 +154,13 @@ #define PyArray_EMPTY(nd, dims, type_num, fortran) \ PyArray_SimpleNew(nd, dims, type_num) -PyObject* PyArray_ZEROS(int nd, npy_intp* dims, int type_num, int fortran); +void _PyArray_FILLWBYTE(PyObject* obj, int val); +PyObject* _PyArray_ZEROS(int nd, npy_intp* dims, int type_num, int fortran); +int _PyArray_CopyInto(PyArrayObject* dest, PyArrayObject* src); + +#define PyArray_FILLWBYTE _PyArray_FILLWBYTE +#define PyArray_ZEROS _PyArray_ZEROS +#define PyArray_CopyInto _PyArray_CopyInto #define PyArray_Resize(self, newshape, refcheck, fortran) (NULL) diff --git a/pypy/module/cpyext/ndarrayobject.py b/pypy/module/cpyext/ndarrayobject.py --- a/pypy/module/cpyext/ndarrayobject.py +++ b/pypy/module/cpyext/ndarrayobject.py @@ -2,6 +2,7 @@ Numpy C-API for PyPy - S. H. Muller, 2013/07/26 """ +from pypy.interpreter.error import OperationError from rpython.rtyper.lltypesystem import rffi, lltype from pypy.module.cpyext.api import cpython_api, Py_ssize_t, CANNOT_FAIL from pypy.module.cpyext.pyobject import PyObject @@ -10,6 +11,9 @@ from pypy.module.micronumpy.arrayimpl.scalar import Scalar from rpython.rlib.rawstorage import RAW_STORAGE_PTR +NPY_FORTRAN = 0x0002 +NPY_OWNDATA = 0x0004 + # the asserts are needed, otherwise the translation fails @cpython_api([PyObject], rffi.INT_real, error=CANNOT_FAIL) @@ -26,7 +30,13 @@ return space.is_w(w_obj_type, w_type) - at cpython_api([PyObject], Py_ssize_t, error=CANNOT_FAIL) + at cpython_api([PyObject], rffi.INT_real, error=CANNOT_FAIL) +def _PyArray_ISCONTIGUOUS(space, w_array): + assert isinstance(w_array, W_NDimArray) + return w_array.implementation.order == 'C' + + + at cpython_api([PyObject], rffi.INT_real, error=CANNOT_FAIL) def _PyArray_NDIM(space, w_array): assert isinstance(w_array, W_NDimArray) return len(w_array.get_shape()) @@ -46,7 +56,7 @@ assert isinstance(w_array, W_NDimArray) return w_array.get_size() - at cpython_api([PyObject], Py_ssize_t, error=CANNOT_FAIL) + at cpython_api([PyObject], rffi.INT_real, error=CANNOT_FAIL) def _PyArray_ITEMSIZE(space, w_array): assert isinstance(w_array, W_NDimArray) return w_array.get_dtype().get_size() @@ -56,7 +66,7 @@ assert isinstance(w_array, W_NDimArray) return w_array.get_size() * w_array.get_dtype().get_size() - at cpython_api([PyObject], Py_ssize_t, error=CANNOT_FAIL) + at cpython_api([PyObject], rffi.INT_real, error=CANNOT_FAIL) def _PyArray_TYPE(space, w_array): assert isinstance(w_array, W_NDimArray) return w_array.get_dtype().num @@ -99,30 +109,34 @@ return wrap_impl(space, space.type(w_array), w_array, new_impl) - at cpython_api([Py_ssize_t, rffi.LONGP, Py_ssize_t], PyObject) -def _PyArray_SimpleNew(space, nd, dims, typenum): - dtype = get_dtype_cache(space).dtypes_by_num[typenum] +def get_shape_and_dtype(space, nd, dims, typenum): shape = [] for i in range(nd): # back-and-forth wrapping needed to translate shape.append(space.int_w(space.wrap(dims[i]))) + dtype = get_dtype_cache(space).dtypes_by_num[typenum] + return shape, dtype +def simple_new(space, nd, dims, typenum, + order='C', owning=False, w_subtype=None): + shape, dtype = get_shape_and_dtype(space, nd, dims, typenum) return W_NDimArray.from_shape(space, shape, dtype) - -def simple_new_from_data(space, nd, dims, typenum, data, owning): - dtype = get_dtype_cache(space).dtypes_by_num[typenum] +def simple_new_from_data(space, nd, dims, typenum, data, + order='C', owning=False, w_subtype=None): + shape, dtype = get_shape_and_dtype(space, nd, dims, typenum) storage = rffi.cast(RAW_STORAGE_PTR, data) if nd == 0: w_val = dtype.itemtype.box_raw_data(storage) return W_NDimArray(Scalar(dtype, w_val)) - else: - shape = [] - for i in range(nd): - # back-and-forth wrapping needed to translate - shape.append(space.int_w(space.wrap(dims[i]))) - - return W_NDimArray.from_shape_and_storage(space, shape, storage, dtype, owning=owning) + else: + return W_NDimArray.from_shape_and_storage(space, shape, storage, dtype, + order=order, owning=owning, w_subtype=w_subtype) + + + at cpython_api([Py_ssize_t, rffi.LONGP, Py_ssize_t], PyObject) +def _PyArray_SimpleNew(space, nd, dims, typenum): + return simple_new(space, nd, dims, typenum) @cpython_api([Py_ssize_t, rffi.LONGP, Py_ssize_t, rffi.VOIDP], PyObject) def _PyArray_SimpleNewFromData(space, nd, dims, typenum, data): @@ -135,3 +149,24 @@ # ((PyArrayObject*)arr)->flags |= NPY_OWNDATA; return simple_new_from_data(space, nd, dims, typenum, data, owning=True) + + at cpython_api([rffi.VOIDP, Py_ssize_t, rffi.LONGP, Py_ssize_t, rffi.LONGP, + rffi.VOIDP, Py_ssize_t, Py_ssize_t, PyObject], PyObject) +def _PyArray_New(space, subtype, nd, dims, typenum, strides, data, itemsize, flags, obj): + if strides: + raise OperationError(space.w_NotImplementedError, + space.wrap("strides must be NULL")) + + order = 'F' if flags & NPY_FORTRAN else 'C' + owning = True if flags & NPY_OWNDATA else False + w_subtype = None + + if data: + return simple_new_from_data(space, nd, dims, typenum, data, + order=order, owning=owning, w_subtype=w_subtype) + else: + return simple_new(space, nd, dims, typenum, + order=order, owning=owning, w_subtype=w_subtype) + + + diff --git a/pypy/module/cpyext/src/ndarrayobject.c b/pypy/module/cpyext/src/ndarrayobject.c --- a/pypy/module/cpyext/src/ndarrayobject.c +++ b/pypy/module/cpyext/src/ndarrayobject.c @@ -1,13 +1,27 @@ #include "Python.h" #include "numpy/arrayobject.h" -#include /* memset */ +#include /* memset, memcpy */ + +PyTypeObject PyArray_Type; + +void +_PyArray_FILLWBYTE(PyObject* obj, int val) { + memset(PyArray_DATA(obj), val, PyArray_NBYTES(obj)); +} PyObject* -PyArray_ZEROS(int nd, npy_intp* dims, int type_num, int fortran) +_PyArray_ZEROS(int nd, npy_intp* dims, int type_num, int fortran) { PyObject *arr = PyArray_EMPTY(nd, dims, type_num, fortran); memset(PyArray_DATA(arr), 0, PyArray_NBYTES(arr)); return arr; } +int +_PyArray_CopyInto(PyArrayObject* dest, PyArrayObject* src) +{ + memcpy(PyArray_DATA(dest), PyArray_DATA(src), PyArray_NBYTES(dest)); + return 0; +} + diff --git a/pypy/module/cpyext/test/test_ndarrayobject.py b/pypy/module/cpyext/test/test_ndarrayobject.py --- a/pypy/module/cpyext/test/test_ndarrayobject.py +++ b/pypy/module/cpyext/test/test_ndarrayobject.py @@ -11,13 +11,13 @@ dtype = get_dtype_cache(space).w_float64dtype return W_NDimArray.new_scalar(space, dtype, space.wrap(10.)) -def array(space, shape): +def array(space, shape, order='C'): dtype = get_dtype_cache(space).w_float64dtype - return W_NDimArray.from_shape(space, shape, dtype, order='C') + return W_NDimArray.from_shape(space, shape, dtype, order=order) -def iarray(space, shape): +def iarray(space, shape, order='C'): dtype = get_dtype_cache(space).w_int64dtype - return W_NDimArray.from_shape(space, shape, dtype, order='C') + return W_NDimArray.from_shape(space, shape, dtype, order=order) NULL = lltype.nullptr(rffi.VOIDP.TO) @@ -32,6 +32,12 @@ assert not api._PyArray_Check(x) assert not api._PyArray_CheckExact(x) + def test_ISCONTIGUOUS(self, space, api): + a = array(space, [10, 5, 3], order='C') + f = array(space, [10, 5, 3], order='F') + assert api._PyArray_ISCONTIGUOUS(a) == 1 + assert api._PyArray_ISCONTIGUOUS(f) == 0 + def test_NDIM(self, space, api): a = array(space, [10, 5, 3]) assert api._PyArray_NDIM(a) == 3 From noreply at buildbot.pypy.org Mon Aug 26 22:02:57 2013 From: noreply at buildbot.pypy.org (shmuller) Date: Mon, 26 Aug 2013 22:02:57 +0200 (CEST) Subject: [pypy-commit] pypy pypy-pyarray: Implement PyArray_FLAGS() Message-ID: <20130826200257.0DD461C13EE@cobra.cs.uni-duesseldorf.de> Author: Stefan H. Muller Branch: pypy-pyarray Changeset: r66344:232f215b317d Date: 2013-08-08 16:16 +0200 http://bitbucket.org/pypy/pypy/changeset/232f215b317d/ Log: Implement PyArray_FLAGS() - ndarrayobject.py: Implement PyArray_FLAGS() instead of PyArray_ISCONTIGUOUS(). This should yield correct results w.r.t. CPython. - arrayobject.h: Literally copy many more macros from numpy, which can be all traced back to PyArray_FLAGS(). diff --git a/pypy/module/cpyext/include/numpy/arrayobject.h b/pypy/module/cpyext/include/numpy/arrayobject.h --- a/pypy/module/cpyext/include/numpy/arrayobject.h +++ b/pypy/module/cpyext/include/numpy/arrayobject.h @@ -10,12 +10,15 @@ #include "old_defines.h" #define NPY_INLINE +#define NPY_UNUSED(x) x +#define PyArray_MAX(a,b) (((a)>(b))?(a):(b)) +#define PyArray_MIN(a,b) (((a)<(b))?(a):(b)) /* fake PyArrayObject so that code that doesn't do direct field access works */ #define PyArrayObject PyObject #define PyArray_Descr PyObject -PyTypeObject PyArray_Type; +//PyTypeObject PyArray_Type; typedef unsigned char npy_bool; typedef unsigned char npy_uint8; @@ -95,35 +98,79 @@ #define PyArray_ISCOMPLEX(arr) (PyTypeNum_ISCOMPLEX(PyArray_TYPE(arr))) -/* selection of flags */ -#define NPY_CONTIGUOUS 0x0001 -#define NPY_FORTRAN 0x0002 -#define NPY_OWNDATA 0x0004 -#define NPY_FORCECAST 0x0010 -#define NPY_ALIGNED 0x0100 -#define NPY_NOTSWAPPED 0x0200 -#define NPY_WRITEABLE 0x0400 -#define NPY_C_CONTIGUOUS NPY_CONTIGUOUS -#define NPY_F_CONTIGUOUS NPY_FORTRAN -#define NPY_IN_ARRAY (NPY_C_CONTIGUOUS | NPY_ALIGNED) -#define NPY_BEHAVED (NPY_ALIGNED | NPY_WRITEABLE) -#define NPY_CARRAY (NPY_CONTIGUOUS | NPY_BEHAVED) -#define NPY_FARRAY (NPY_FORTRAN | NPY_BEHAVED) -#define NPY_DEFAULT NPY_CARRAY +/* flags */ +#define NPY_ARRAY_C_CONTIGUOUS 0x0001 +#define NPY_ARRAY_F_CONTIGUOUS 0x0002 +#define NPY_ARRAY_OWNDATA 0x0004 +#define NPY_ARRAY_FORCECAST 0x0010 +#define NPY_ARRAY_ENSURECOPY 0x0020 +#define NPY_ARRAY_ENSUREARRAY 0x0040 +#define NPY_ARRAY_ELEMENTSTRIDES 0x0080 +#define NPY_ARRAY_ALIGNED 0x0100 +#define NPY_ARRAY_NOTSWAPPED 0x0200 +#define NPY_ARRAY_WRITEABLE 0x0400 +#define NPY_ARRAY_UPDATEIFCOPY 0x1000 + +#define NPY_ARRAY_BEHAVED (NPY_ARRAY_ALIGNED | \ + NPY_ARRAY_WRITEABLE) +#define NPY_ARRAY_BEHAVED_NS (NPY_ARRAY_ALIGNED | \ + NPY_ARRAY_WRITEABLE | \ + NPY_ARRAY_NOTSWAPPED) +#define NPY_ARRAY_CARRAY (NPY_ARRAY_C_CONTIGUOUS | \ + NPY_ARRAY_BEHAVED) +#define NPY_ARRAY_CARRAY_RO (NPY_ARRAY_C_CONTIGUOUS | \ + NPY_ARRAY_ALIGNED) +#define NPY_ARRAY_FARRAY (NPY_ARRAY_F_CONTIGUOUS | \ + NPY_ARRAY_BEHAVED) +#define NPY_ARRAY_FARRAY_RO (NPY_ARRAY_F_CONTIGUOUS | \ + NPY_ARRAY_ALIGNED) +#define NPY_ARRAY_DEFAULT (NPY_ARRAY_CARRAY) +#define NPY_ARRAY_IN_ARRAY (NPY_ARRAY_CARRAY_RO) +#define NPY_ARRAY_OUT_ARRAY (NPY_ARRAY_CARRAY) +#define NPY_ARRAY_INOUT_ARRAY (NPY_ARRAY_CARRAY | \ + NPY_ARRAY_UPDATEIFCOPY) +#define NPY_ARRAY_IN_FARRAY (NPY_ARRAY_FARRAY_RO) +#define NPY_ARRAY_OUT_FARRAY (NPY_ARRAY_FARRAY) +#define NPY_ARRAY_INOUT_FARRAY (NPY_ARRAY_FARRAY | \ + NPY_ARRAY_UPDATEIFCOPY) + +#define NPY_ARRAY_UPDATE_ALL (NPY_ARRAY_C_CONTIGUOUS | \ + NPY_ARRAY_F_CONTIGUOUS | \ + NPY_ARRAY_ALIGNED) + +#define NPY_FARRAY NPY_ARRAY_FARRAY +#define NPY_CARRAY NPY_ARRAY_CARRAY + +#define PyArray_CHKFLAGS(m, flags) (PyArray_FLAGS(m) & (flags)) + +#define PyArray_ISCONTIGUOUS(m) PyArray_CHKFLAGS(m, NPY_ARRAY_C_CONTIGUOUS) +#define PyArray_ISWRITEABLE(m) PyArray_CHKFLAGS(m, NPY_ARRAY_WRITEABLE) +#define PyArray_ISALIGNED(m) PyArray_CHKFLAGS(m, NPY_ARRAY_ALIGNED) + +#define PyArray_IS_C_CONTIGUOUS(m) PyArray_CHKFLAGS(m, NPY_ARRAY_C_CONTIGUOUS) +#define PyArray_IS_F_CONTIGUOUS(m) PyArray_CHKFLAGS(m, NPY_ARRAY_F_CONTIGUOUS) + +#define PyArray_FLAGSWAP(m, flags) (PyArray_CHKFLAGS(m, flags) && \ + PyArray_ISNOTSWAPPED(m)) + +#define PyArray_ISCARRAY(m) PyArray_FLAGSWAP(m, NPY_ARRAY_CARRAY) +#define PyArray_ISCARRAY_RO(m) PyArray_FLAGSWAP(m, NPY_ARRAY_CARRAY_RO) +#define PyArray_ISFARRAY(m) PyArray_FLAGSWAP(m, NPY_ARRAY_FARRAY) +#define PyArray_ISFARRAY_RO(m) PyArray_FLAGSWAP(m, NPY_ARRAY_FARRAY_RO) +#define PyArray_ISBEHAVED(m) PyArray_FLAGSWAP(m, NPY_ARRAY_BEHAVED) +#define PyArray_ISBEHAVED_RO(m) PyArray_FLAGSWAP(m, NPY_ARRAY_ALIGNED) + +#define PyArray_ISONESEGMENT(arr) (1) +#define PyArray_ISNOTSWAPPED(arr) (1) +#define PyArray_ISBYTESWAPPED(arr) (0) + /* functions */ #ifndef PyArray_NDIM #define PyArray_Check _PyArray_Check #define PyArray_CheckExact _PyArray_CheckExact - -#define PyArray_ISONESEGMENT(arr) (1) -#define PyArray_FLAGS(arr) (0) - -#define PyArray_ISCONTIGUOUS _PyArray_ISCONTIGUOUS - -#define PyArray_ISCARRAY(arr) PyArray_ISCONTIGUOUS(arr) -#define PyArray_ISFARRAY(arr) (!PyArray_ISCONTIGUOUS(arr)) +#define PyArray_FLAGS _PyArray_FLAGS #define PyArray_NDIM _PyArray_NDIM #define PyArray_DIM _PyArray_DIM diff --git a/pypy/module/cpyext/include/numpy/old_defines.h b/pypy/module/cpyext/include/numpy/old_defines.h --- a/pypy/module/cpyext/include/numpy/old_defines.h +++ b/pypy/module/cpyext/include/numpy/old_defines.h @@ -2,9 +2,11 @@ #ifndef OLD_DEFINES_H #define OLD_DEFINES_H +/* #if defined(NPY_NO_DEPRECATED_API) && NPY_NO_DEPRECATED_API >= NPY_1_7_API_VERSION #error The header "old_defines.h" is deprecated as of NumPy 1.7. #endif +*/ #define NDARRAY_VERSION NPY_VERSION diff --git a/pypy/module/cpyext/ndarrayobject.py b/pypy/module/cpyext/ndarrayobject.py --- a/pypy/module/cpyext/ndarrayobject.py +++ b/pypy/module/cpyext/ndarrayobject.py @@ -8,11 +8,38 @@ from pypy.module.cpyext.pyobject import PyObject from pypy.module.micronumpy.interp_numarray import W_NDimArray, convert_to_array, wrap_impl from pypy.module.micronumpy.interp_dtype import get_dtype_cache +from pypy.module.micronumpy.arrayimpl.concrete import ConcreteArray from pypy.module.micronumpy.arrayimpl.scalar import Scalar from rpython.rlib.rawstorage import RAW_STORAGE_PTR -NPY_FORTRAN = 0x0002 -NPY_OWNDATA = 0x0004 +NPY_C_CONTIGUOUS = 0x0001 +NPY_F_CONTIGUOUS = 0x0002 +NPY_OWNDATA = 0x0004 +NPY_FORCECAST = 0x0010 +NPY_ENSURECOPY = 0x0020 +NPY_ENSUREARRAY = 0x0040 +NPY_ELEMENTSTRIDES = 0x0080 +NPY_ALIGNED = 0x0100 +NPY_NOTSWAPPED = 0x0200 +NPY_WRITEABLE = 0x0400 +NPY_UPDATEIFCOPY = 0x1000 + +NPY_BEHAVED = NPY_ALIGNED | NPY_WRITEABLE +NPY_BEHAVED_NS = NPY_ALIGNED | NPY_WRITEABLE | NPY_NOTSWAPPED +NPY_CARRAY = NPY_C_CONTIGUOUS | NPY_BEHAVED +NPY_CARRAY_RO = NPY_C_CONTIGUOUS | NPY_ALIGNED +NPY_FARRAY = NPY_F_CONTIGUOUS | NPY_BEHAVED +NPY_FARRAY_RO = NPY_F_CONTIGUOUS | NPY_ALIGNED +NPY_DEFAULT = NPY_CARRAY +NPY_IN = NPY_CARRAY_RO +NPY_OUT = NPY_CARRAY +NPY_INOUT = NPY_CARRAY | NPY_UPDATEIFCOPY +NPY_IN_FARRAY = NPY_FARRAY_RO +NPY_OUT_FARRAY = NPY_FARRAY +NPY_INOUT_FARRAY = NPY_FARRAY | NPY_UPDATEIFCOPY +NPY_CONTIGUOUS = NPY_C_CONTIGUOUS | NPY_F_CONTIGUOUS +NPY_UPDATE_ALL = NPY_CONTIGUOUS | NPY_ALIGNED + # the asserts are needed, otherwise the translation fails @@ -29,12 +56,19 @@ w_type = space.gettypeobject(W_NDimArray.typedef) return space.is_w(w_obj_type, w_type) - @cpython_api([PyObject], rffi.INT_real, error=CANNOT_FAIL) -def _PyArray_ISCONTIGUOUS(space, w_array): +def _PyArray_FLAGS(space, w_array): assert isinstance(w_array, W_NDimArray) - return w_array.implementation.order == 'C' - + flags = NPY_BEHAVED_NS + if isinstance(w_array.implementation, ConcreteArray): + flags |= NPY_OWNDATA + if len(w_array.get_shape()) < 2: + flags |= NPY_CONTIGUOUS + elif w_array.implementation.order == 'C': + flags |= NPY_C_CONTIGUOUS + else: + flags |= NPY_F_CONTIGUOUS + return flags @cpython_api([PyObject], rffi.INT_real, error=CANNOT_FAIL) def _PyArray_NDIM(space, w_array): @@ -157,7 +191,7 @@ raise OperationError(space.w_NotImplementedError, space.wrap("strides must be NULL")) - order = 'F' if flags & NPY_FORTRAN else 'C' + order = 'C' if flags & NPY_C_CONTIGUOUS else 'F' owning = True if flags & NPY_OWNDATA else False w_subtype = None diff --git a/pypy/module/cpyext/test/test_ndarrayobject.py b/pypy/module/cpyext/test/test_ndarrayobject.py --- a/pypy/module/cpyext/test/test_ndarrayobject.py +++ b/pypy/module/cpyext/test/test_ndarrayobject.py @@ -32,11 +32,16 @@ assert not api._PyArray_Check(x) assert not api._PyArray_CheckExact(x) - def test_ISCONTIGUOUS(self, space, api): - a = array(space, [10, 5, 3], order='C') + def test_FLAGS(self, space, api): + s = array(space, [10]) + c = array(space, [10, 5, 3], order='C') f = array(space, [10, 5, 3], order='F') - assert api._PyArray_ISCONTIGUOUS(a) == 1 - assert api._PyArray_ISCONTIGUOUS(f) == 0 + assert api._PyArray_FLAGS(s) & 0x0001 + assert api._PyArray_FLAGS(s) & 0x0002 + assert api._PyArray_FLAGS(c) & 0x0001 + assert api._PyArray_FLAGS(f) & 0x0002 + assert not api._PyArray_FLAGS(c) & 0x0002 + assert not api._PyArray_FLAGS(f) & 0x0001 def test_NDIM(self, space, api): a = array(space, [10, 5, 3]) From noreply at buildbot.pypy.org Mon Aug 26 22:02:58 2013 From: noreply at buildbot.pypy.org (shmuller) Date: Mon, 26 Aug 2013 22:02:58 +0200 (CEST) Subject: [pypy-commit] pypy pypy-pyarray: Declare PyArray_Type as extern. Message-ID: <20130826200258.376171C13EE@cobra.cs.uni-duesseldorf.de> Author: Stefan H. Muller Branch: pypy-pyarray Changeset: r66345:8a17098648b8 Date: 2013-08-08 18:16 +0200 http://bitbucket.org/pypy/pypy/changeset/8a17098648b8/ Log: Declare PyArray_Type as extern. diff --git a/pypy/module/cpyext/include/numpy/arrayobject.h b/pypy/module/cpyext/include/numpy/arrayobject.h --- a/pypy/module/cpyext/include/numpy/arrayobject.h +++ b/pypy/module/cpyext/include/numpy/arrayobject.h @@ -18,7 +18,7 @@ #define PyArrayObject PyObject #define PyArray_Descr PyObject -//PyTypeObject PyArray_Type; +extern PyTypeObject PyArray_Type; typedef unsigned char npy_bool; typedef unsigned char npy_uint8; From noreply at buildbot.pypy.org Mon Aug 26 22:02:59 2013 From: noreply at buildbot.pypy.org (shmuller) Date: Mon, 26 Aug 2013 22:02:59 +0200 (CEST) Subject: [pypy-commit] pypy pypy-pyarray: Implement PyNumber_Float() as space.float(w_obj). Message-ID: <20130826200259.6510F1C13EE@cobra.cs.uni-duesseldorf.de> Author: Stefan H. Muller Branch: pypy-pyarray Changeset: r66346:56fa5d73e5ec Date: 2013-08-10 00:33 +0200 http://bitbucket.org/pypy/pypy/changeset/56fa5d73e5ec/ Log: Implement PyNumber_Float() as space.float(w_obj). diff --git a/pypy/module/cpyext/floatobject.py b/pypy/module/cpyext/floatobject.py --- a/pypy/module/cpyext/floatobject.py +++ b/pypy/module/cpyext/floatobject.py @@ -25,7 +25,7 @@ """ Returns the o converted to a float object on success, or NULL on failure. This is the equivalent of the Python expression float(o).""" - return space.call_function(space.w_float, w_obj) + return space.float(w_obj) @cpython_api([PyObject, rffi.CCHARPP], PyObject) def PyFloat_FromString(space, w_obj, _): From noreply at buildbot.pypy.org Mon Aug 26 22:03:00 2013 From: noreply at buildbot.pypy.org (shmuller) Date: Mon, 26 Aug 2013 22:03:00 +0200 (CEST) Subject: [pypy-commit] pypy pypy-pyarray: array() constructor now uses for __array__() method. Message-ID: <20130826200300.946061C13EE@cobra.cs.uni-duesseldorf.de> Author: Stefan H. Muller Branch: pypy-pyarray Changeset: r66347:82c65a5a6f82 Date: 2013-08-10 23:20 +0200 http://bitbucket.org/pypy/pypy/changeset/82c65a5a6f82/ Log: array() constructor now uses for __array__() method. diff --git a/pypy/module/micronumpy/base.py b/pypy/module/micronumpy/base.py --- a/pypy/module/micronumpy/base.py +++ b/pypy/module/micronumpy/base.py @@ -87,6 +87,7 @@ def convert_to_array(space, w_obj): + #XXX: This whole routine should very likely simply be array() from pypy.module.micronumpy.interp_numarray import array from pypy.module.micronumpy import interp_ufuncs diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -418,7 +418,7 @@ raise OperationError(space.w_NotImplementedError, space.wrap( "non-int arg not supported")) - def descr___array__(self, space): + def descr___array__(self, space, w_dtype=None): # stub implementation of __array__() return self @@ -1113,13 +1113,27 @@ @unwrap_spec(ndmin=int, copy=bool, subok=bool) def array(space, w_object, w_dtype=None, copy=True, w_order=None, subok=False, ndmin=0): + # for anything that isn't already an array, try __array__ method first + if not isinstance(w_object, W_NDimArray): + w___array__ = space.lookup(w_object, "__array__") + if w___array__ is not None: + w_array = space.get_and_call_function(w___array__, w_object, w_dtype) + if isinstance(w_array, W_NDimArray): + # feed w_array back into array() for other properties + return array(space, w_array, w_dtype, False, w_order, subok, ndmin) + else: + raise operationerrfmt(space.w_ValueError, + "object __array__ method not producing an array") + + # scalars and strings w/o __array__ method isstr = space.isinstance_w(w_object, space.w_str) if not issequence_w(space, w_object) or isstr: if space.is_none(w_dtype) or isstr: w_dtype = interp_ufuncs.find_dtype_for_scalar(space, w_object) - dtype = space.interp_w(interp_dtype.W_Dtype, - space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype)) + dtype = space.interp_w(interp_dtype.W_Dtype, + space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype)) return W_NDimArray.new_scalar(space, dtype, w_object) + if space.is_none(w_order): order = 'C' else: @@ -1128,6 +1142,7 @@ raise operationerrfmt(space.w_ValueError, "Unknown order: %s", order) + # arrays with correct dtype dtype = interp_dtype.decode_w_dtype(space, w_dtype) if isinstance(w_object, W_NDimArray) and \ (space.is_none(w_dtype) or w_object.get_dtype() is dtype): @@ -1144,6 +1159,8 @@ w_ret.implementation = w_ret.implementation.set_shape(space, w_ret, shape) return w_ret + + # not an array or incorrect dtype shape, elems_w = find_shape_and_elems(space, w_object, dtype) if dtype is None or ( dtype.is_str_or_unicode() and dtype.itemtype.get_size() < 1): From noreply at buildbot.pypy.org Mon Aug 26 22:03:01 2013 From: noreply at buildbot.pypy.org (shmuller) Date: Mon, 26 Aug 2013 22:03:01 +0200 (CEST) Subject: [pypy-commit] pypy pypy-pyarray: Implement ndarray.nonzero() Message-ID: <20130826200301.D77441C13EE@cobra.cs.uni-duesseldorf.de> Author: Stefan H. Muller Branch: pypy-pyarray Changeset: r66348:f0b2849dfe98 Date: 2013-08-11 18:59 +0200 http://bitbucket.org/pypy/pypy/changeset/f0b2849dfe98/ Log: Implement ndarray.nonzero() diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -5,7 +5,7 @@ from pypy.module.micronumpy.base import W_NDimArray, convert_to_array,\ ArrayArgumentException, issequence_w, wrap_impl from pypy.module.micronumpy import interp_dtype, interp_ufuncs, interp_boxes,\ - interp_arrayops + interp_arrayops, iter from pypy.module.micronumpy.strides import find_shape_and_elems,\ get_shape_from_iterable, to_coords, shape_agreement, \ shape_agreement_multiple @@ -351,6 +351,31 @@ "order not implemented")) return self.descr_reshape(space, [space.wrap(-1)]) + def descr_nonzero(self, space): + impl = self.implementation + arr_iter = iter.MultiDimViewIterator(impl, impl.dtype, 0, + impl.strides, impl.backstrides, impl.shape) + + index_type = interp_dtype.get_dtype_cache(space).w_int64dtype + box = index_type.itemtype.box + + nd = len(impl.shape) + s = loop.count_all_true(self) + w_res = W_NDimArray.from_shape(space, [s, nd], index_type) + res_iter = w_res.create_iter() + + dims = range(nd) + while not arr_iter.done(): + if arr_iter.getitem_bool(): + for d in dims: + res_iter.setitem(box(arr_iter.indexes[d])) + res_iter.next() + arr_iter.next() + + w_res = w_res.implementation.swapaxes(space, w_res, 0, 1) + l_w = [w_res.descr_getitem(space, space.wrap(d)) for d in dims] + return space.newtuple(l_w) + def descr_take(self, space, w_obj, w_axis=None, w_out=None): # if w_axis is None and w_out is Nont this is an equivalent to # fancy indexing @@ -707,7 +732,7 @@ descr_conj = _unaryop_impl('conjugate') - def descr_nonzero(self, space): + def descr___nonzero__(self, space): if self.get_size() > 1: raise OperationError(space.w_ValueError, space.wrap( "The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()")) @@ -995,7 +1020,7 @@ __neg__ = interp2app(W_NDimArray.descr_neg), __abs__ = interp2app(W_NDimArray.descr_abs), __invert__ = interp2app(W_NDimArray.descr_invert), - __nonzero__ = interp2app(W_NDimArray.descr_nonzero), + __nonzero__ = interp2app(W_NDimArray.descr___nonzero__), __add__ = interp2app(W_NDimArray.descr_add), __sub__ = interp2app(W_NDimArray.descr_sub), @@ -1070,6 +1095,7 @@ tolist = interp2app(W_NDimArray.descr_tolist), flatten = interp2app(W_NDimArray.descr_flatten), ravel = interp2app(W_NDimArray.descr_ravel), + nonzero = interp2app(W_NDimArray.descr_nonzero), take = interp2app(W_NDimArray.descr_take), compress = interp2app(W_NDimArray.descr_compress), repeat = interp2app(W_NDimArray.descr_repeat), diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -2300,6 +2300,13 @@ assert (arange(6).reshape(2, 3).ravel() == arange(6)).all() assert (arange(6).reshape(2, 3).T.ravel() == [0, 3, 1, 4, 2, 5]).all() + def test_nonzero(self): + from numpypy import array + a = array([[1, 0, 3], [2, 0, 4]]) + nz = a.nonzero() + assert (nz[0] == array([0, 0, 1, 1])).all() + assert (nz[1] == array([0, 2, 0, 2])).all() + def test_take(self): from numpypy import arange try: From noreply at buildbot.pypy.org Mon Aug 26 22:03:03 2013 From: noreply at buildbot.pypy.org (shmuller) Date: Mon, 26 Aug 2013 22:03:03 +0200 (CEST) Subject: [pypy-commit] pypy pypy-pyarray: ndarray.nonzero(): Deal with scalars, add test. Message-ID: <20130826200303.1CCE21C13EE@cobra.cs.uni-duesseldorf.de> Author: Stefan H. Muller Branch: pypy-pyarray Changeset: r66349:01ebe4a52002 Date: 2013-08-11 21:09 +0200 http://bitbucket.org/pypy/pypy/changeset/01ebe4a52002/ Log: ndarray.nonzero(): Deal with scalars, add test. - lib_pypy/numpypy/core/fromnumeric.py: Put original numpy implementation back for nonzero(). diff --git a/lib_pypy/numpypy/core/fromnumeric.py b/lib_pypy/numpypy/core/fromnumeric.py --- a/lib_pypy/numpypy/core/fromnumeric.py +++ b/lib_pypy/numpypy/core/fromnumeric.py @@ -1133,7 +1133,13 @@ (array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2])) """ - raise NotImplementedError('Waiting on interp level method') + try: + nonzero = a.nonzero + except AttributeError: + res = _wrapit(a, 'nonzero') + else: + res = nonzero() + return res def shape(a): diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -352,15 +352,21 @@ return self.descr_reshape(space, [space.wrap(-1)]) def descr_nonzero(self, space): + s = loop.count_all_true(self) + index_type = interp_dtype.get_dtype_cache(space).w_int64dtype + box = index_type.itemtype.box + + if self.is_scalar(): + w_res = W_NDimArray.from_shape(space, [s], index_type) + if s == 1: + w_res.implementation.setitem(0, box(0)) + return space.newtuple([w_res]) + impl = self.implementation arr_iter = iter.MultiDimViewIterator(impl, impl.dtype, 0, impl.strides, impl.backstrides, impl.shape) - index_type = interp_dtype.get_dtype_cache(space).w_int64dtype - box = index_type.itemtype.box - nd = len(impl.shape) - s = loop.count_all_true(self) w_res = W_NDimArray.from_shape(space, [s, nd], index_type) res_iter = w_res.create_iter() diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -2302,8 +2302,13 @@ def test_nonzero(self): from numpypy import array - a = array([[1, 0, 3], [2, 0, 4]]) - nz = a.nonzero() + nz = array(0).nonzero() + assert nz[0].size == 0 + + nz = array(2).nonzero() + assert (nz[0] == array([0])).all() + + nz = array([[1, 0, 3], [2, 0, 4]]).nonzero() assert (nz[0] == array([0, 0, 1, 1])).all() assert (nz[1] == array([0, 2, 0, 2])).all() From noreply at buildbot.pypy.org Mon Aug 26 22:03:04 2013 From: noreply at buildbot.pypy.org (shmuller) Date: Mon, 26 Aug 2013 22:03:04 +0200 (CEST) Subject: [pypy-commit] pypy pypy-pyarray: Put split nonzero() between scalar.py, concrete.py and loops.py. Message-ID: <20130826200304.539161C13EE@cobra.cs.uni-duesseldorf.de> Author: Stefan H. Muller Branch: pypy-pyarray Changeset: r66350:b70301c90922 Date: 2013-08-12 23:49 +0200 http://bitbucket.org/pypy/pypy/changeset/b70301c90922/ Log: Put split nonzero() between scalar.py, concrete.py and loops.py. - Separate implementations for 1D and ND case. Try to reunify in next commit. diff --git a/pypy/module/micronumpy/arrayimpl/concrete.py b/pypy/module/micronumpy/arrayimpl/concrete.py --- a/pypy/module/micronumpy/arrayimpl/concrete.py +++ b/pypy/module/micronumpy/arrayimpl/concrete.py @@ -279,6 +279,22 @@ return W_NDimArray.new_slice(space, self.start, strides, backstrides, shape, self, orig_arr) + def nonzero(self, space, index_type): + s = loop.count_all_true_concrete(self) + box = index_type.itemtype.box + nd = len(self.shape) + + if nd == 1: + w_res = W_NDimArray.from_shape(space, [s], index_type) + loop.nonzero_onedim(w_res, self, box) + return space.newtuple([w_res]) + else: + w_res = W_NDimArray.from_shape(space, [s, nd], index_type) + loop.nonzero_multidim(w_res, self, box) + w_res = w_res.implementation.swapaxes(space, w_res, 0, 1) + l_w = [w_res.descr_getitem(space, space.wrap(d)) for d in range(nd)] + return space.newtuple(l_w) + def get_storage_as_int(self, space): return rffi.cast(lltype.Signed, self.storage) + self.start diff --git a/pypy/module/micronumpy/arrayimpl/scalar.py b/pypy/module/micronumpy/arrayimpl/scalar.py --- a/pypy/module/micronumpy/arrayimpl/scalar.py +++ b/pypy/module/micronumpy/arrayimpl/scalar.py @@ -155,6 +155,13 @@ def swapaxes(self, space, orig_array, axis1, axis2): raise Exception("should not be called") + def nonzero(self, space, index_type): + s = self.dtype.itemtype.bool(self.value) + w_res = W_NDimArray.from_shape(space, [s], index_type) + if s == 1: + w_res.implementation.setitem(0, index_type.itemtype.box(0)) + return space.newtuple([w_res]) + def fill(self, w_value): self.value = w_value diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -332,6 +332,10 @@ return self return self.implementation.swapaxes(space, self, axis1, axis2) + def descr_nonzero(self, space): + index_type = interp_dtype.get_dtype_cache(space).w_int64dtype + return self.implementation.nonzero(space, index_type) + def descr_tolist(self, space): if len(self.get_shape()) == 0: return self.get_scalar_value().item(space) @@ -351,37 +355,6 @@ "order not implemented")) return self.descr_reshape(space, [space.wrap(-1)]) - def descr_nonzero(self, space): - s = loop.count_all_true(self) - index_type = interp_dtype.get_dtype_cache(space).w_int64dtype - box = index_type.itemtype.box - - if self.is_scalar(): - w_res = W_NDimArray.from_shape(space, [s], index_type) - if s == 1: - w_res.implementation.setitem(0, box(0)) - return space.newtuple([w_res]) - - impl = self.implementation - arr_iter = iter.MultiDimViewIterator(impl, impl.dtype, 0, - impl.strides, impl.backstrides, impl.shape) - - nd = len(impl.shape) - w_res = W_NDimArray.from_shape(space, [s, nd], index_type) - res_iter = w_res.create_iter() - - dims = range(nd) - while not arr_iter.done(): - if arr_iter.getitem_bool(): - for d in dims: - res_iter.setitem(box(arr_iter.indexes[d])) - res_iter.next() - arr_iter.next() - - w_res = w_res.implementation.swapaxes(space, w_res, 0, 1) - l_w = [w_res.descr_getitem(space, space.wrap(d)) for d in dims] - return space.newtuple(l_w) - def descr_take(self, space, w_obj, w_axis=None, w_out=None): # if w_axis is None and w_out is Nont this is an equivalent to # fancy indexing @@ -1101,11 +1074,11 @@ tolist = interp2app(W_NDimArray.descr_tolist), flatten = interp2app(W_NDimArray.descr_flatten), ravel = interp2app(W_NDimArray.descr_ravel), - nonzero = interp2app(W_NDimArray.descr_nonzero), take = interp2app(W_NDimArray.descr_take), compress = interp2app(W_NDimArray.descr_compress), repeat = interp2app(W_NDimArray.descr_repeat), swapaxes = interp2app(W_NDimArray.descr_swapaxes), + nonzero = interp2app(W_NDimArray.descr_nonzero), flat = GetSetProperty(W_NDimArray.descr_get_flatiter), item = interp2app(W_NDimArray.descr_item), real = GetSetProperty(W_NDimArray.descr_get_real, diff --git a/pypy/module/micronumpy/loop.py b/pypy/module/micronumpy/loop.py --- a/pypy/module/micronumpy/loop.py +++ b/pypy/module/micronumpy/loop.py @@ -9,7 +9,8 @@ from rpython.rlib import jit from rpython.rtyper.lltypesystem import lltype, rffi from pypy.module.micronumpy.base import W_NDimArray -from pypy.module.micronumpy.iter import PureShapeIterator +from pypy.module.micronumpy.iter import PureShapeIterator, OneDimViewIterator, \ + MultiDimViewIterator from pypy.module.micronumpy import constants from pypy.module.micronumpy.support import int_w @@ -323,19 +324,61 @@ greens = ['shapelen', 'dtype'], reds = 'auto') -def count_all_true(arr): +def count_all_true_concrete(impl): s = 0 - if arr.is_scalar(): - return arr.get_dtype().itemtype.bool(arr.get_scalar_value()) - iter = arr.create_iter() - shapelen = len(arr.get_shape()) - dtype = arr.get_dtype() + iter = impl.create_iter() + shapelen = len(impl.shape) + dtype = impl.dtype while not iter.done(): count_all_true_driver.jit_merge_point(shapelen=shapelen, dtype=dtype) s += iter.getitem_bool() iter.next() return s +def count_all_true(arr): + if arr.is_scalar(): + return arr.get_dtype().itemtype.bool(arr.get_scalar_value()) + else: + return count_all_true_concrete(arr.implementation) + +nonzero_driver_onedim = jit.JitDriver(name = 'numpy_nonzero_onedim', + greens = ['shapelen', 'dtype'], + reds = 'auto') + +def nonzero_onedim(res, arr, box): + res_iter = res.create_iter() + arr_iter = OneDimViewIterator(arr, arr.dtype, 0, + arr.strides, arr.shape) + shapelen = 1 + dtype = arr.dtype + while not arr_iter.done(): + nonzero_driver_onedim.jit_merge_point(shapelen=shapelen, dtype=dtype) + if arr_iter.getitem_bool(): + res_iter.setitem(box(arr_iter.index)) + res_iter.next() + arr_iter.next() + return res + +nonzero_driver_multidim = jit.JitDriver(name = 'numpy_nonzero_onedim', + greens = ['shapelen', 'dims', 'dtype'], + reds = 'auto') + +def nonzero_multidim(res, arr, box): + res_iter = res.create_iter() + arr_iter = MultiDimViewIterator(arr, arr.dtype, 0, + arr.strides, arr.backstrides, arr.shape) + shapelen = len(arr.shape) + dtype = arr.dtype + dims = range(shapelen) + while not arr_iter.done(): + nonzero_driver_multidim.jit_merge_point(shapelen=shapelen, dims=dims, dtype=dtype) + if arr_iter.getitem_bool(): + for d in dims: + res_iter.setitem(box(arr_iter.indexes[d])) + res_iter.next() + arr_iter.next() + return res + getitem_filter_driver = jit.JitDriver(name = 'numpy_getitem_bool', greens = ['shapelen', 'arr_dtype', 'index_dtype'], diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -1348,7 +1348,7 @@ for i in xrange(5): assert c[i] == func(b[i], 3) - def test_nonzero(self): + def test___nonzero__(self): from numpypy import array a = array([1, 2]) raises(ValueError, bool, a) @@ -2306,11 +2306,14 @@ assert nz[0].size == 0 nz = array(2).nonzero() - assert (nz[0] == array([0])).all() + assert (nz[0] == [0]).all() + + nz = array([1, 0, 3]).nonzero() + assert (nz[0] == [0, 2]).all() nz = array([[1, 0, 3], [2, 0, 4]]).nonzero() - assert (nz[0] == array([0, 0, 1, 1])).all() - assert (nz[1] == array([0, 2, 0, 2])).all() + assert (nz[0] == [0, 0, 1, 1]).all() + assert (nz[1] == [0, 2, 0, 2]).all() def test_take(self): from numpypy import arange From noreply at buildbot.pypy.org Mon Aug 26 22:03:05 2013 From: noreply at buildbot.pypy.org (shmuller) Date: Mon, 26 Aug 2013 22:03:05 +0200 (CEST) Subject: [pypy-commit] pypy pypy-pyarray: Implement fijal's approach for nonzero() Message-ID: <20130826200305.8C0DA1C13EE@cobra.cs.uni-duesseldorf.de> Author: Stefan H. Muller Branch: pypy-pyarray Changeset: r66351:81c7341f996b Date: 2013-08-13 01:06 +0200 http://bitbucket.org/pypy/pypy/changeset/81c7341f996b/ Log: Implement fijal's approach for nonzero() - Iterators get get_index() method. - create_iter() get additional keyword argument 'require_index'. diff --git a/pypy/module/micronumpy/arrayimpl/base.py b/pypy/module/micronumpy/arrayimpl/base.py --- a/pypy/module/micronumpy/arrayimpl/base.py +++ b/pypy/module/micronumpy/arrayimpl/base.py @@ -6,7 +6,7 @@ def base(self): raise NotImplementedError - def create_iter(self, shape=None, backward_broadcast=False): + def create_iter(self, shape=None, backward_broadcast=False, require_index=False): raise NotImplementedError class BaseArrayIterator(object): diff --git a/pypy/module/micronumpy/arrayimpl/concrete.py b/pypy/module/micronumpy/arrayimpl/concrete.py --- a/pypy/module/micronumpy/arrayimpl/concrete.py +++ b/pypy/module/micronumpy/arrayimpl/concrete.py @@ -282,18 +282,12 @@ def nonzero(self, space, index_type): s = loop.count_all_true_concrete(self) box = index_type.itemtype.box - nd = len(self.shape) - - if nd == 1: - w_res = W_NDimArray.from_shape(space, [s], index_type) - loop.nonzero_onedim(w_res, self, box) - return space.newtuple([w_res]) - else: - w_res = W_NDimArray.from_shape(space, [s, nd], index_type) - loop.nonzero_multidim(w_res, self, box) - w_res = w_res.implementation.swapaxes(space, w_res, 0, 1) - l_w = [w_res.descr_getitem(space, space.wrap(d)) for d in range(nd)] - return space.newtuple(l_w) + nd = len(self.get_shape()) + w_res = W_NDimArray.from_shape(space, [s, nd], index_type) + loop.nonzero(w_res, self, box) + w_res = w_res.implementation.swapaxes(space, w_res, 0, 1) + l_w = [w_res.descr_getitem(space, space.wrap(d)) for d in range(nd)] + return space.newtuple(l_w) def get_storage_as_int(self, space): return rffi.cast(lltype.Signed, self.storage) + self.start @@ -331,13 +325,22 @@ self.backstrides = backstrides self.storage = storage - def create_iter(self, shape=None, backward_broadcast=False): - if shape is None or shape == self.get_shape(): + def create_iter(self, shape=None, backward_broadcast=False, require_index=False): + if shape is not None and shape != self.get_shape(): + r = calculate_broadcast_strides(self.get_strides(), + self.get_backstrides(), + self.get_shape(), shape, backward_broadcast) + return iter.MultiDimViewIterator(self, self.dtype, self.start, r[0], r[1], shape) + + if not require_index: return iter.ConcreteArrayIterator(self) - r = calculate_broadcast_strides(self.get_strides(), - self.get_backstrides(), - self.get_shape(), shape, backward_broadcast) - return iter.MultiDimViewIterator(self, self.dtype, 0, r[0], r[1], shape) + else: + if len(self.get_shape()) == 1: + return iter.OneDimViewIterator(self, self.dtype, self.start, + self.get_strides(), self.get_shape()) + else: + return iter.MultiDimViewIterator(self, self.dtype, self.start, + self.get_strides(), self.get_backstrides(), self.get_shape()) def fill(self, box): self.dtype.fill(self.storage, box, 0, self.size) @@ -400,7 +403,7 @@ def fill(self, box): loop.fill(self, box.convert_to(self.dtype)) - def create_iter(self, shape=None, backward_broadcast=False): + def create_iter(self, shape=None, backward_broadcast=False, require_index=False): if shape is not None and shape != self.get_shape(): r = calculate_broadcast_strides(self.get_strides(), self.get_backstrides(), diff --git a/pypy/module/micronumpy/arrayimpl/scalar.py b/pypy/module/micronumpy/arrayimpl/scalar.py --- a/pypy/module/micronumpy/arrayimpl/scalar.py +++ b/pypy/module/micronumpy/arrayimpl/scalar.py @@ -45,7 +45,7 @@ def get_backstrides(self): return [] - def create_iter(self, shape=None, backward_broadcast=False): + def create_iter(self, shape=None, backward_broadcast=False, require_index=False): return ScalarIterator(self) def get_scalar_value(self): diff --git a/pypy/module/micronumpy/interp_flatiter.py b/pypy/module/micronumpy/interp_flatiter.py --- a/pypy/module/micronumpy/interp_flatiter.py +++ b/pypy/module/micronumpy/interp_flatiter.py @@ -19,7 +19,7 @@ def get_shape(self): return self.shape - def create_iter(self, shape=None, backward_broadcast=False): + def create_iter(self, shape=None, backward_broadcast=False, require_index=False): assert isinstance(self.base(), W_NDimArray) return self.base().create_iter() diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -230,10 +230,11 @@ s.append('])') return s.build() - def create_iter(self, shape=None, backward_broadcast=False): + def create_iter(self, shape=None, backward_broadcast=False, require_index=False): assert isinstance(self.implementation, BaseArrayImplementation) return self.implementation.create_iter(shape=shape, - backward_broadcast=backward_broadcast) + backward_broadcast=backward_broadcast, + require_index=require_index) def create_axis_iter(self, shape, dim, cum): return self.implementation.create_axis_iter(shape, dim, cum) diff --git a/pypy/module/micronumpy/iter.py b/pypy/module/micronumpy/iter.py --- a/pypy/module/micronumpy/iter.py +++ b/pypy/module/micronumpy/iter.py @@ -215,6 +215,9 @@ def reset(self): self.offset %= self.size + def get_index(self, d): + return self.index + class MultiDimViewIterator(ConcreteArrayIterator): ''' The view iterator dtype can be different from the array.dtype, this is what makes it a View @@ -268,6 +271,9 @@ def reset(self): self.offset %= self.size + def get_index(self, d): + return self.indexes[d] + class AxisIterator(base.BaseArrayIterator): def __init__(self, array, shape, dim, cumultative): self.shape = shape diff --git a/pypy/module/micronumpy/loop.py b/pypy/module/micronumpy/loop.py --- a/pypy/module/micronumpy/loop.py +++ b/pypy/module/micronumpy/loop.py @@ -9,8 +9,7 @@ from rpython.rlib import jit from rpython.rtyper.lltypesystem import lltype, rffi from pypy.module.micronumpy.base import W_NDimArray -from pypy.module.micronumpy.iter import PureShapeIterator, OneDimViewIterator, \ - MultiDimViewIterator +from pypy.module.micronumpy.iter import PureShapeIterator from pypy.module.micronumpy import constants from pypy.module.micronumpy.support import int_w @@ -341,40 +340,21 @@ else: return count_all_true_concrete(arr.implementation) -nonzero_driver_onedim = jit.JitDriver(name = 'numpy_nonzero_onedim', - greens = ['shapelen', 'dtype'], - reds = 'auto') +nonzero_driver = jit.JitDriver(name = 'numpy_nonzero', + greens = ['shapelen', 'dims', 'dtype'], + reds = 'auto') -def nonzero_onedim(res, arr, box): +def nonzero(res, arr, box): res_iter = res.create_iter() - arr_iter = OneDimViewIterator(arr, arr.dtype, 0, - arr.strides, arr.shape) - shapelen = 1 - dtype = arr.dtype - while not arr_iter.done(): - nonzero_driver_onedim.jit_merge_point(shapelen=shapelen, dtype=dtype) - if arr_iter.getitem_bool(): - res_iter.setitem(box(arr_iter.index)) - res_iter.next() - arr_iter.next() - return res - -nonzero_driver_multidim = jit.JitDriver(name = 'numpy_nonzero_onedim', - greens = ['shapelen', 'dims', 'dtype'], - reds = 'auto') - -def nonzero_multidim(res, arr, box): - res_iter = res.create_iter() - arr_iter = MultiDimViewIterator(arr, arr.dtype, 0, - arr.strides, arr.backstrides, arr.shape) + arr_iter = arr.create_iter(require_index=True) shapelen = len(arr.shape) dtype = arr.dtype dims = range(shapelen) while not arr_iter.done(): - nonzero_driver_multidim.jit_merge_point(shapelen=shapelen, dims=dims, dtype=dtype) + nonzero_driver.jit_merge_point(shapelen=shapelen, dims=dims, dtype=dtype) if arr_iter.getitem_bool(): for d in dims: - res_iter.setitem(box(arr_iter.indexes[d])) + res_iter.setitem(box(arr_iter.get_index(d))) res_iter.next() arr_iter.next() return res From noreply at buildbot.pypy.org Mon Aug 26 22:03:06 2013 From: noreply at buildbot.pypy.org (mattip) Date: Mon, 26 Aug 2013 22:03:06 +0200 (CEST) Subject: [pypy-commit] pypy pypy-pyarray: add TODO tasks for merging this branch Message-ID: <20130826200306.BC6201C13EE@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: pypy-pyarray Changeset: r66352:d42d210cabf3 Date: 2013-08-26 23:00 +0300 http://bitbucket.org/pypy/pypy/changeset/d42d210cabf3/ Log: add TODO tasks for merging this branch diff --git a/TODO.txt b/TODO.txt new file mode 100644 --- /dev/null +++ b/TODO.txt @@ -0,0 +1,22 @@ +- test array.nonzero() +- test "from numpypy import *" esp. get_include() +- test "import numpy" emitting warning not error +- test all *.h files under pypy/module/cpyext/include/numpy +- make sure all cpyext changes are tested: + PyBoolObject (new) + PyComplexFromCComplex() (changed, problematic) + PyFunctionType (new) + PyMethodType (new) + PyRangeType (new) + PyTracebackType (new) + _PyPackageContext (new) + Py*Flag (most new, some changed) in pythonrun.h + all ndarrayobject.c + copy_header_files() in api.py (changed) + all ndarrayobject.py (new) + PyNumberCoerceEx() (new) + PyNumberCoerce() (new) +- test require_index in create_iter, get_index in iter +- test use of __array__() and friends +- test complex data types in dtypes_by_num +- From noreply at buildbot.pypy.org Mon Aug 26 22:30:45 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Mon, 26 Aug 2013 22:30:45 +0200 (CEST) Subject: [pypy-commit] pypy py3k: merge default Message-ID: <20130826203045.A37101C0189@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r66353:45b18e9c3c6b Date: 2013-08-26 13:14 -0700 http://bitbucket.org/pypy/pypy/changeset/45b18e9c3c6b/ Log: merge default diff too long, truncating to 2000 out of 4091 lines diff --git a/pypy/doc/coding-guide.rst b/pypy/doc/coding-guide.rst --- a/pypy/doc/coding-guide.rst +++ b/pypy/doc/coding-guide.rst @@ -339,9 +339,10 @@ + methods and other class attributes do not change after startup + single inheritance is fully supported -+ simple mixins somewhat work too, but the mixed in class needs a - ``_mixin_ = True`` class attribute. isinstance checks against the - mixin type will fail when translated. ++ use `rpython.rlib.objectmodel.import_from_mixin(M)` in a class + body to copy the whole content of a class `M`. This can be used + to implement mixins: functions and staticmethods are duplicated + (the other class attributes are just copied unmodified). + classes are first-class objects too diff --git a/pypy/doc/getting-started-python.rst b/pypy/doc/getting-started-python.rst --- a/pypy/doc/getting-started-python.rst +++ b/pypy/doc/getting-started-python.rst @@ -57,6 +57,12 @@ zlib-devel bzip2-devel ncurses-devel expat-devel \ openssl-devel gc-devel python-sphinx python-greenlet + On SLES11: + + $ sudo zypper install gcc make python-devel pkg-config \ + zlib-devel libopenssl-devel libbz2-devel sqlite3-devel \ + libexpat-devel libffi-devel python-curses + The above command lines are split with continuation characters, giving the necessary dependencies first, then the optional ones. * ``pkg-config`` (to help us locate libffi files) diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -74,3 +74,4 @@ .. branch: dotviewer-linewidth .. branch: reflex-support .. branch: numpypy-inplace-op +.. branch: rewritten-loop-logging diff --git a/pypy/doc/windows.rst b/pypy/doc/windows.rst --- a/pypy/doc/windows.rst +++ b/pypy/doc/windows.rst @@ -6,6 +6,10 @@ The following text gives some hints about how to translate the PyPy interpreter. +PyPy supports only being translated as a 32bit program, even on +64bit Windows. See at the end of this page for what is missing +for a full 64bit translation. + To build pypy-c you need a C compiler. Microsoft Visual Studio is preferred, but can also use the mingw32 port of gcc. @@ -63,7 +67,7 @@ INCLUDE, LIB and PATH (for DLLs) environment variables appropriately. Abridged method (for -Ojit builds using Visual Studio 2008) -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Download the versions of all the external packages from https://bitbucket.org/pypy/pypy/downloads/local.zip @@ -112,13 +116,14 @@ nmake -f makefile.msc The sqlite3 database library -~~~~~~~~~~~~~~~~~~~~ +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Download http://www.sqlite.org/2013/sqlite-amalgamation-3071601.zip and extract it into a directory under the base directory. Also get http://www.sqlite.org/2013/sqlite-dll-win32-x86-3071601.zip and extract the dll into the bin directory, and the sqlite3.def into the sources directory. Now build the import library so cffi can use the header and dll:: + lib /DEF:sqlite3.def" /OUT:sqlite3.lib" copy sqlite3.lib path\to\libs @@ -206,8 +211,86 @@ March 2012, --cc is not a valid option for pytest.py. However if you set an environment variable CC to the compliter exe, testing will use it. -.. _'mingw32 build': http://sourceforge.net/projects/mingw-w64/files/Toolchains%20targetting%20Win32/Automated%20Builds +.. _`mingw32 build`: http://sourceforge.net/projects/mingw-w64/files/Toolchains%20targetting%20Win32/Automated%20Builds .. _`mingw64 build`: http://sourceforge.net/projects/mingw-w64/files/Toolchains%20targetting%20Win64/Automated%20Builds .. _`msys for mingw`: http://sourceforge.net/projects/mingw-w64/files/External%20binary%20packages%20%28Win64%20hosted%29/MSYS%20%2832-bit%29 .. _`libffi source files`: http://sourceware.org/libffi/ .. _`RPython translation toolchain`: translation.html + + +What is missing for a full 64-bit translation +--------------------------------------------- + +The main blocker is that we assume that the integer type of RPython is +large enough to (occasionally) contain a pointer value cast to an +integer. The simplest fix is to make sure that it is so, but it will +give the following incompatibility between CPython and PyPy on Win64: + +CPython: ``sys.maxint == 2**32-1, sys.maxsize == 2**64-1`` + +PyPy: ``sys.maxint == sys.maxsize == 2**64-1`` + +...and, correspondingly, PyPy supports ints up to the larger value of +sys.maxint before they are converted to ``long``. The first decision +that someone needs to make is if this incompatibility is reasonable. + +Assuming that it is, the first thing to do is probably to hack *CPython* +until it fits this model: replace the field in PyIntObject with a ``long +long`` field, and change the value of ``sys.maxint``. This might just +work, even if half-brokenly: I'm sure you can crash it because of the +precision loss that undoubtedly occurs everywhere, but try not to. :-) + +Such a hacked CPython is what you'll use in the next steps. We'll call +it CPython64/64. + +It is probably not too much work if the goal is only to get a translated +PyPy executable, and to run all tests before transaction. But you need +to start somewhere, and you should start with some tests in +rpython/translator/c/test/, like ``test_standalone.py`` and +``test_newgc.py``: try to have them pass on top of CPython64/64. + +Keep in mind that this runs small translations, and some details may go +wrong. The most obvious one is to check that it produces C files that +use the integer type ``Signed`` --- but what is ``Signed`` defined to? +It should be equal to ``long`` on every other platforms, but on Win64 it +should be something like ``long long``. + +What is more generally needed is to review all the C files in +rpython/translator/c/src for the word ``long``, because this means a +32-bit integer even on Win64. Replace it with ``Signed`` most of the +times. You can replace one with the other without breaking anything on +any other platform, so feel free to. + +Then, these two C types have corresponding RPython types: ``rffi.LONG`` +and ``lltype.Signed`` respectively. The first should really correspond +to the C ``long``. Add tests that check that integers casted to one +type or the other really have 32 and 64 bits respectively, on Win64. + +Once these basic tests work, you need to review ``rpython/rlib/`` for +usages of ``rffi.LONG`` versus ``lltype.Signed``. The goal would be to +fix some more ``LONG-versus-Signed`` issues, by fixing the tests --- as +always run on top of CPython64/64. Note that there was some early work +done in ``rpython/rlib/rarithmetic`` with the goal of running all the +tests on Win64 on the regular CPython, but I think by now that it's a +bad idea. Look only at CPython64/64. + +The major intermediate goal is to get a translation of PyPy with ``-O2`` +with a minimal set of modules, starting with ``--no-allworkingmodules``; +you need to use CPython64/64 to run this translation too. Check +carefully the warnings of the C compiler at the end. I think that MSVC +is "nice" in the sense that by default a lot of mismatches of integer +sizes are reported as warnings. + +Then you need to review ``pypy/module/*/`` for ``LONG-versus-Signed`` +issues. At some time during this review, we get a working translated +PyPy on Windows 64 that includes all ``--translationmodules``, i.e. +everything needed to run translations. When we are there, the hacked +CPython64/64 becomes much less important, because we can run future +translations on top of this translated PyPy. As soon as we get there, +please *distribute* the translated PyPy. It's an essential component +for anyone else that wants to work on Win64! We end up with a strange +kind of dependency --- we need a translated PyPy in order to translate a +PyPy ---, but I believe it's ok here, as Windows executables are +supposed to never be broken by newer versions of Windows. + +Happy hacking :-) diff --git a/pypy/interpreter/buffer.py b/pypy/interpreter/buffer.py --- a/pypy/interpreter/buffer.py +++ b/pypy/interpreter/buffer.py @@ -19,7 +19,7 @@ from pypy.interpreter.typedef import TypeDef from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.error import OperationError -from rpython.rlib.objectmodel import compute_hash +from rpython.rlib.objectmodel import compute_hash, import_from_mixin from rpython.rlib.rstring import StringBuilder @@ -278,8 +278,6 @@ # ____________________________________________________________ class SubBufferMixin(object): - _mixin_ = True - def __init__(self, buffer, offset, size): self.buffer = buffer self.offset = offset @@ -303,10 +301,11 @@ # out of bounds return self.buffer.getslice(self.offset + start, self.offset + stop, step, size) -class SubBuffer(SubBufferMixin, Buffer): - pass +class SubBuffer(Buffer): + import_from_mixin(SubBufferMixin) -class RWSubBuffer(SubBufferMixin, RWBuffer): +class RWSubBuffer(RWBuffer): + import_from_mixin(SubBufferMixin) def setitem(self, index, char): self.buffer.setitem(self.offset + index, char) diff --git a/pypy/module/__pypy__/test/test_signal.py b/pypy/module/__pypy__/test/test_signal.py --- a/pypy/module/__pypy__/test/test_signal.py +++ b/pypy/module/__pypy__/test/test_signal.py @@ -34,7 +34,7 @@ _thread.interrupt_main() for i in range(10): print('x') - time.sleep(0.1) + time.sleep(0.25) except BaseException as e: interrupted.append(e) finally: @@ -59,7 +59,7 @@ for j in range(10): if len(done): break print('.') - time.sleep(0.1) + time.sleep(0.25) print('main thread loop done') assert len(done) == 1 assert len(interrupted) == 1 @@ -117,7 +117,7 @@ def subthread(): try: - time.sleep(0.25) + time.sleep(0.5) with __pypy__.thread.signals_enabled: _thread.interrupt_main() except BaseException as e: diff --git a/pypy/module/array/interp_array.py b/pypy/module/array/interp_array.py --- a/pypy/module/array/interp_array.py +++ b/pypy/module/array/interp_array.py @@ -10,7 +10,7 @@ from rpython.rlib.unroll import unrolling_iterable from rpython.rlib.objectmodel import keepalive_until_here from rpython.rtyper.lltypesystem import lltype, rffi - +from pypy.objspace.std.floatobject import W_FloatObject @unwrap_spec(typecode=str) def w_array(space, w_cls, typecode, __args__): @@ -571,7 +571,7 @@ class TypeCode(object): - def __init__(self, itemtype, unwrap, canoverflow=False, signed=False): + def __init__(self, itemtype, unwrap, canoverflow=False, signed=False, method='__int__'): self.itemtype = itemtype self.bytes = rffi.sizeof(itemtype) self.arraytype = lltype.Array(itemtype, hints={'nolength': True}) @@ -579,6 +579,7 @@ self.signed = signed self.canoverflow = canoverflow self.w_class = None + self.method = method if self.canoverflow: assert self.bytes <= rffi.sizeof(rffi.ULONG) @@ -597,7 +598,7 @@ types = { - 'u': TypeCode(lltype.UniChar, 'unicode_w'), + 'u': TypeCode(lltype.UniChar, 'unicode_w', method=''), 'b': TypeCode(rffi.SIGNEDCHAR, 'int_w', True, True), 'B': TypeCode(rffi.UCHAR, 'int_w', True), 'h': TypeCode(rffi.SHORT, 'int_w', True, True), @@ -609,8 +610,8 @@ # rbigint.touint() which # corresponds to the # C-type unsigned long - 'f': TypeCode(lltype.SingleFloat, 'float_w'), - 'd': TypeCode(lltype.Float, 'float_w'), + 'f': TypeCode(lltype.SingleFloat, 'float_w', method='__float__'), + 'd': TypeCode(lltype.Float, 'float_w', method='__float__'), } for k, v in types.items(): v.typecode = k @@ -674,7 +675,19 @@ def item_w(self, w_item): space = self.space unwrap = getattr(space, mytype.unwrap) - item = unwrap(w_item) + try: + item = unwrap(w_item) + except OperationError, e: + if isinstance(w_item, W_FloatObject): # Odd special case from cpython + raise + if mytype.method != '' and e.match(space, space.w_TypeError): + try: + item = unwrap(space.call_method(w_item, mytype.method)) + except OperationError: + msg = 'array item must be ' + mytype.unwrap[:-2] + raise OperationError(space.w_TypeError, space.wrap(msg)) + else: + raise if mytype.unwrap == 'bigint_w': try: item = item.touint() diff --git a/pypy/module/array/test/test_array.py b/pypy/module/array/test/test_array.py --- a/pypy/module/array/test/test_array.py +++ b/pypy/module/array/test/test_array.py @@ -919,6 +919,14 @@ raises(TypeError, a.__setitem__, Silly()) raises(TypeError, a.__setitem__, OldSilly()) + a = array('c', 'hi') + a[0] = 'b' + assert a[0] == 'b' + + a = array('u', u'hi') + a[0] = u'b' + assert a[0] == u'b' + def test_bytearray(self): a = self.array('u', 'hi') b = self.array('u') diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -685,7 +685,7 @@ name='string', char='S', w_box_type = space.gettypefor(interp_boxes.W_StringBox), - alternate_constructors=[space.w_str], + alternate_constructors=[space.w_str, space.gettypefor(interp_boxes.W_CharacterBox)], aliases=["str"], ) self.w_unicodedtype = W_Dtype( diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -777,6 +777,11 @@ from numpypy import unicode_ assert isinstance(unicode_(3), str) + def test_character_dtype(self): + from numpypy import array, character + x = array([["A", "B"], ["C", "D"]], character) + assert (x == [["A", "B"], ["C", "D"]]).all() + class AppTestRecordDtypes(BaseNumpyAppTest): spaceconfig = dict(usemodules=["micronumpy", "struct", "binascii"]) def test_create(self): diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -1922,6 +1922,12 @@ a = numpy.arange(10.).reshape((5, 2))[::2] assert (loads(dumps(a)) == a).all() + def test_string_filling(self): + import numpypy as numpy + a = numpy.empty((10,10), dtype='c1') + a.fill(12) + assert (a == '1').all() + class AppTestMultiDim(BaseNumpyAppTest): def test_init(self): import numpypy diff --git a/pypy/module/micronumpy/test/test_ufuncs.py b/pypy/module/micronumpy/test/test_ufuncs.py --- a/pypy/module/micronumpy/test/test_ufuncs.py +++ b/pypy/module/micronumpy/test/test_ufuncs.py @@ -257,6 +257,7 @@ def test_rint(self): from numpypy import array, complex, rint, isnan + import sys nnan, nan, inf, ninf = float('-nan'), float('nan'), float('inf'), float('-inf') @@ -271,6 +272,8 @@ assert rint(complex(inf, 1.5)) == complex(inf, 2.) assert rint(complex(0.5, inf)) == complex(0., inf) + assert rint(sys.maxint) > 0.0 + def test_sign(self): from numpypy import array, sign, dtype diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -308,13 +308,6 @@ return min(v1, v2) @simple_unary_op - def rint(self, v): - if isfinite(v): - return rfloat.round_double(v, 0, half_even=True) - else: - return v - - @simple_unary_op def ones_like(self, v): return 1 @@ -322,6 +315,10 @@ def zeros_like(self, v): return 0 + @raw_unary_op + def rint(self, v): + float64 = Float64() + return float64.rint(float64.box(v)) class NonNativePrimitive(Primitive): _mixin_ = True @@ -1036,6 +1033,25 @@ else: return v1 + v2 + @simple_unary_op + def rint(self, v): + x = float(v) + if isfinite(x): + import math + y = math.floor(x) + r = x - y + + if r > 0.5: + y += 1.0 + + if r == 0.5: + r = y - 2.0 * math.floor(0.5 * y) + if r == 1.0: + y += 1.0 + return y + else: + return x + class NonNativeFloat(NonNativePrimitive, Float): _mixin_ = True @@ -1748,12 +1764,16 @@ arr.storage[i] = arg[i] return interp_boxes.W_StringBox(arr, 0, arr.dtype) - @jit.unroll_safe def store(self, arr, i, offset, box): assert isinstance(box, interp_boxes.W_StringBox) # XXX simplify to range(box.dtype.get_size()) ? + return self._store(arr.storage, i, offset, box) + + @jit.unroll_safe + def _store(self, storage, i, offset, box): + assert isinstance(box, interp_boxes.W_StringBox) for k in range(min(self.size, box.arr.size-offset)): - arr.storage[k + i] = box.arr.storage[k + offset] + storage[k + i] = box.arr.storage[k + offset] def read(self, arr, i, offset, dtype=None): if dtype is None: @@ -1843,6 +1863,11 @@ arr.storage[j] = '\x00' return interp_boxes.W_StringBox(arr, 0, arr.dtype) + def fill(self, storage, width, box, start, stop, offset): + from pypy.module.micronumpy.arrayimpl.concrete import ConcreteArrayNotOwning + for i in xrange(start, stop, width): + self._store(storage, i, offset, box) + NonNativeStringType = StringType class UnicodeType(BaseType, BaseStringType): diff --git a/pypy/objspace/descroperation.py b/pypy/objspace/descroperation.py --- a/pypy/objspace/descroperation.py +++ b/pypy/objspace/descroperation.py @@ -122,7 +122,7 @@ greens=['w_type'], reds='auto') class DescrOperation(object): - _mixin_ = True + # This is meant to be a *mixin*. def is_data_descr(space, w_obj): return space.lookup(w_obj, '__set__') is not None @@ -697,12 +697,12 @@ elif _arity == 2 and len(_specialnames) == 2: #print "binop", _specialnames _impl_maker = _make_binop_impl - elif _arity == 1 and len(_specialnames) == 1: + elif _arity == 1 and len(_specialnames) == 1 and _name != 'int': #print "unaryop", _specialnames _impl_maker = _make_unaryop_impl if _impl_maker: setattr(DescrOperation,_name,_impl_maker(_symbol,_specialnames)) - elif _name not in ['is_', 'id','type','issubtype', + elif _name not in ['is_', 'id','type','issubtype', 'int', # not really to be defined in DescrOperation 'ord', 'unichr', 'unicode']: raise Exception, "missing def for operation %s" % _name diff --git a/pypy/objspace/std/builtinshortcut.py b/pypy/objspace/std/builtinshortcut.py --- a/pypy/objspace/std/builtinshortcut.py +++ b/pypy/objspace/std/builtinshortcut.py @@ -126,6 +126,7 @@ w_obj = w_res # general case fallback - return DescrOperation.is_true(space, w_obj) + return _DescrOperation_is_true(space, w_obj) + _DescrOperation_is_true = DescrOperation.is_true.im_func space.is_true = is_true diff --git a/pypy/objspace/std/bytearrayobject.py b/pypy/objspace/std/bytearrayobject.py --- a/pypy/objspace/std/bytearrayobject.py +++ b/pypy/objspace/std/bytearrayobject.py @@ -4,7 +4,6 @@ from pypy.interpreter.error import OperationError, operationerrfmt from pypy.objspace.std import stringobject from pypy.objspace.std.bytearraytype import new_bytearray -from pypy.objspace.std.inttype import wrapint from pypy.objspace.std.longobject import W_LongObject from pypy.objspace.std.model import W_Object, registerimplementation from pypy.objspace.std.multimethod import FailedToImplement @@ -31,7 +30,7 @@ def len__Bytearray(space, w_bytearray): result = len(w_bytearray.data) - return wrapint(space, result) + return space.newint(result) def ord__Bytearray(space, w_bytearray): if len(w_bytearray.data) != 1: diff --git a/pypy/objspace/std/dictmultiobject.py b/pypy/objspace/std/dictmultiobject.py --- a/pypy/objspace/std/dictmultiobject.py +++ b/pypy/objspace/std/dictmultiobject.py @@ -1184,10 +1184,7 @@ return _all_contained_in(space, self, w_other) return space.w_False - def descr_ne(self, space, w_other): - if not _is_set_like(w_other): - return space.w_NotImplemented - return space.not_(space.eq(self, w_other)) + descr_ne = negate(descr_eq) def descr_lt(self, space, w_other): if not _is_set_like(w_other): diff --git a/pypy/objspace/std/intobject.py b/pypy/objspace/std/intobject.py --- a/pypy/objspace/std/intobject.py +++ b/pypy/objspace/std/intobject.py @@ -1,20 +1,22 @@ +"""The builtin int implementation + +In order to have the same behavior running on CPython, and after RPython +translation this module uses rarithmetic.ovfcheck to explicitly check +for overflows, something CPython does not do anymore. +""" + +from rpython.rlib import jit +from rpython.rlib.rarithmetic import LONG_BIT, is_valid_int, ovfcheck, r_uint +from rpython.rlib.rbigint import rbigint + from pypy.interpreter.error import OperationError from pypy.objspace.std import newformat -from pypy.objspace.std.inttype import wrapint, W_AbstractIntObject -from pypy.objspace.std.model import registerimplementation, W_Object +from pypy.objspace.std.inttype import W_AbstractIntObject +from pypy.objspace.std.model import W_Object, registerimplementation from pypy.objspace.std.multimethod import FailedToImplementArgs from pypy.objspace.std.noneobject import W_NoneObject from pypy.objspace.std.register_all import register_all -from rpython.rlib import jit -from rpython.rlib.rarithmetic import ovfcheck, LONG_BIT, r_uint, is_valid_int -from rpython.rlib.rbigint import rbigint -""" -In order to have the same behavior running -on CPython, and after RPython translation we use ovfcheck -from rarithmetic to explicitly check for overflows, -something CPython does not do anymore. -""" class W_IntObject(W_AbstractIntObject): __slots__ = 'intval' @@ -22,28 +24,29 @@ # from pypy.objspace.std.inttype import int_typedef as typedef - def __init__(w_self, intval): + def __init__(self, intval): assert is_valid_int(intval) - w_self.intval = intval + self.intval = intval - def __repr__(w_self): - """ representation for debugging purposes """ - return "%s(%d)" % (w_self.__class__.__name__, w_self.intval) + def __repr__(self): + """representation for debugging purposes""" + return "%s(%d)" % (self.__class__.__name__, self.intval) - def unwrap(w_self, space): - return int(w_self.intval) + def unwrap(self, space): + return int(self.intval) int_w = unwrap - def uint_w(w_self, space): - intval = w_self.intval + def uint_w(self, space): + intval = self.intval if intval < 0: - raise OperationError(space.w_ValueError, - space.wrap("cannot convert negative integer to unsigned")) + raise OperationError( + space.w_ValueError, + space.wrap("cannot convert negative integer to unsigned")) else: return r_uint(intval) - def bigint_w(w_self, space): - return rbigint.fromint(w_self.intval) + def bigint_w(self, space): + return rbigint.fromint(self.intval) def float_w(self, space): return float(self.intval) @@ -55,7 +58,7 @@ if space.is_w(space.type(self), space.w_int): return self a = self.intval - return wrapint(space, a) + return space.newint(a) #registerimplementation(W_IntObject) @@ -100,7 +103,7 @@ except OverflowError: raise FailedToImplementArgs(space.w_OverflowError, space.wrap("integer addition")) - return wrapint(space, z) + return space.newint(z) def sub__Int_Int(space, w_int1, w_int2): x = w_int1.intval @@ -110,7 +113,7 @@ except OverflowError: raise FailedToImplementArgs(space.w_OverflowError, space.wrap("integer substraction")) - return wrapint(space, z) + return space.newint(z) def mul__Int_Int(space, w_int1, w_int2): x = w_int1.intval @@ -120,7 +123,7 @@ except OverflowError: raise FailedToImplementArgs(space.w_OverflowError, space.wrap("integer multiplication")) - return wrapint(space, z) + return space.newint(z) def floordiv__Int_Int(space, w_int1, w_int2): x = w_int1.intval @@ -133,14 +136,15 @@ except OverflowError: raise FailedToImplementArgs(space.w_OverflowError, space.wrap("integer division")) - return wrapint(space, z) + return space.newint(z) div__Int_Int = floordiv__Int_Int def truediv__Int_Int(space, w_int1, w_int2): x = float(w_int1.intval) y = float(w_int2.intval) if y == 0.0: - raise FailedToImplementArgs(space.w_ZeroDivisionError, space.wrap("float division")) + raise FailedToImplementArgs(space.w_ZeroDivisionError, + space.wrap("float division")) return space.wrap(x / y) def mod__Int_Int(space, w_int1, w_int2): @@ -154,7 +158,7 @@ except OverflowError: raise FailedToImplementArgs(space.w_OverflowError, space.wrap("integer modulo")) - return wrapint(space, z) + return space.newint(z) def divmod__Int_Int(space, w_int1, w_int2): x = w_int1.intval @@ -227,7 +231,7 @@ except OverflowError: raise FailedToImplementArgs(space.w_OverflowError, space.wrap("integer negation")) - return wrapint(space, x) + return space.newint(x) get_negint = neg__Int @@ -243,7 +247,7 @@ def invert__Int(space, w_int1): x = w_int1.intval a = ~x - return wrapint(space, a) + return space.newint(a) def lshift__Int_Int(space, w_int1, w_int2): a = w_int1.intval @@ -254,7 +258,7 @@ except OverflowError: raise FailedToImplementArgs(space.w_OverflowError, space.wrap("integer left shift")) - return wrapint(space, c) + return space.newint(c) if b < 0: raise OperationError(space.w_ValueError, space.wrap("negative shift count")) @@ -280,25 +284,25 @@ a = 0 else: a = a >> b - return wrapint(space, a) + return space.newint(a) def and__Int_Int(space, w_int1, w_int2): a = w_int1.intval b = w_int2.intval res = a & b - return wrapint(space, res) + return space.newint(res) def xor__Int_Int(space, w_int1, w_int2): a = w_int1.intval b = w_int2.intval res = a ^ b - return wrapint(space, res) + return space.newint(res) def or__Int_Int(space, w_int1, w_int2): a = w_int1.intval b = w_int2.intval res = a | b - return wrapint(space, res) + return space.newint(res) def pos__Int(self, space): return self.int(space) @@ -313,7 +317,7 @@ return space.newfloat(x) def getnewargs__Int(space, w_int1): - return space.newtuple([wrapint(space, w_int1.intval)]) + return space.newtuple([space.newint(w_int1.intval)]) register_all(vars()) diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -19,7 +19,6 @@ from pypy.objspace.std import slicetype from pypy.objspace.std.floatobject import W_FloatObject from pypy.objspace.std.intobject import W_IntObject -from pypy.objspace.std.inttype import wrapint from pypy.objspace.std.iterobject import (W_FastListIterObject, W_ReverseSeqIterObject) from pypy.objspace.std.sliceobject import W_SliceObject @@ -431,7 +430,7 @@ def descr_len(self, space): result = self.length() - return wrapint(space, result) + return space.newint(result) def descr_iter(self, space): return W_FastListIterObject(self) diff --git a/pypy/objspace/std/longobject.py b/pypy/objspace/std/longobject.py --- a/pypy/objspace/std/longobject.py +++ b/pypy/objspace/std/longobject.py @@ -1,26 +1,31 @@ +"""The builtin long implementation""" + import sys + +from rpython.rlib.rarithmetic import intmask +from rpython.rlib.rbigint import SHIFT, _widen_digit, rbigint + from pypy.interpreter.error import OperationError from pypy.objspace.std import model, newformat -from pypy.objspace.std.model import registerimplementation, W_Object +from pypy.objspace.std.intobject import W_IntObject +from pypy.objspace.std.longtype import W_AbstractLongObject, long_typedef +from pypy.objspace.std.model import W_Object, registerimplementation +from pypy.objspace.std.multimethod import FailedToImplementArgs +from pypy.objspace.std.noneobject import W_NoneObject from pypy.objspace.std.register_all import register_all -from pypy.objspace.std.multimethod import FailedToImplementArgs -from pypy.objspace.std.intobject import W_IntObject -from pypy.objspace.std.noneobject import W_NoneObject -from rpython.rlib.rarithmetic import intmask -from rpython.rlib.rbigint import SHIFT, _widen_digit, rbigint -from pypy.objspace.std.longtype import long_typedef, W_AbstractLongObject HASH_BITS = 61 if sys.maxsize > 2 ** 31 - 1 else 31 HASH_MODULUS = 2 ** HASH_BITS - 1 + class W_LongObject(W_AbstractLongObject): """This is a wrapper of rbigint.""" _immutable_fields_ = ['num'] typedef = long_typedef - def __init__(w_self, l): - w_self.num = l # instance of rbigint + def __init__(self, l): + self.num = l # instance of rbigint def fromint(space, intval): return W_LongObject(rbigint.fromint(intval)) @@ -55,16 +60,16 @@ fromrarith_int._annspecialcase_ = "specialize:argtype(0)" fromrarith_int = staticmethod(fromrarith_int) - def int_w(w_self, space): + def int_w(self, space): try: - return w_self.num.toint() + return self.num.toint() except OverflowError: raise OperationError(space.w_OverflowError, space.wrap( "long int too large to convert to int")) - def uint_w(w_self, space): + def uint_w(self, space): try: - return w_self.num.touint() + return self.num.touint() except ValueError: raise OperationError(space.w_ValueError, space.wrap( "cannot convert negative integer to unsigned int")) @@ -72,8 +77,8 @@ raise OperationError(space.w_OverflowError, space.wrap( "long int too large to convert to unsigned int")) - def bigint_w(w_self, space): - return w_self.num + def bigint_w(self, space): + return self.num def float_w(self, space): return self.tofloat(space) @@ -346,7 +351,8 @@ sys.maxint == 2147483647) # binary ops -for opname in ['add', 'sub', 'mul', 'div', 'floordiv', 'truediv', 'mod', 'divmod', 'lshift']: +for opname in ['add', 'sub', 'mul', 'div', 'floordiv', 'truediv', 'mod', + 'divmod', 'lshift']: exec compile(""" def %(opname)s_ovr__Int_Int(space, w_int1, w_int2): if recover_with_smalllong(space) and %(opname)r != 'truediv': diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py --- a/pypy/objspace/std/objspace.py +++ b/pypy/objspace/std/objspace.py @@ -11,7 +11,7 @@ from rpython.rlib.objectmodel import instantiate, specialize, is_annotation_constant from rpython.rlib.debug import make_sure_not_resized from rpython.rlib.rarithmetic import base_int, widen, is_valid_int -from rpython.rlib.objectmodel import we_are_translated +from rpython.rlib.objectmodel import we_are_translated, import_from_mixin from rpython.rlib import jit # Object imports @@ -39,9 +39,10 @@ from pypy.objspace.std.stringtype import wrapstr from pypy.objspace.std.unicodetype import wrapunicode -class StdObjSpace(ObjSpace, DescrOperation): +class StdObjSpace(ObjSpace): """The standard object space, implementing a general-purpose object library in Restricted Python.""" + import_from_mixin(DescrOperation) def initialize(self): "NOT_RPYTHON: only for initializing the space." @@ -524,16 +525,19 @@ self.wrap("Expected tuple of length 3")) return self.int_w(l_w[0]), self.int_w(l_w[1]), self.int_w(l_w[2]) + _DescrOperation_is_true = is_true + _DescrOperation_getattr = getattr + def is_true(self, w_obj): # a shortcut for performance # NOTE! this method is typically overridden by builtinshortcut.py. if type(w_obj) is W_BoolObject: return w_obj.boolval - return DescrOperation.is_true(self, w_obj) + return self._DescrOperation_is_true(w_obj) def getattr(self, w_obj, w_name): if not self.config.objspace.std.getattributeshortcut: - return DescrOperation.getattr(self, w_obj, w_name) + return self._DescrOperation_getattr(w_obj, w_name) # an optional shortcut for performance w_type = self.type(w_obj) diff --git a/pypy/objspace/std/stringobject.py b/pypy/objspace/std/stringobject.py --- a/pypy/objspace/std/stringobject.py +++ b/pypy/objspace/std/stringobject.py @@ -3,7 +3,6 @@ from pypy.interpreter.buffer import StringBuffer from pypy.interpreter.error import OperationError, operationerrfmt from pypy.objspace.std import slicetype -from pypy.objspace.std.inttype import wrapint from pypy.objspace.std.longobject import W_LongObject from pypy.objspace.std.model import W_Object, registerimplementation from pypy.objspace.std.multimethod import FailedToImplement @@ -601,7 +600,7 @@ def str_count__String_String_ANY_ANY(space, w_self, w_arg, w_start, w_end): u_self, u_start, u_end = _convert_idx_params(space, w_self, w_start, w_end) - return wrapint(space, u_self.count(w_arg._value, u_start, u_end)) + return space.newint(u_self.count(w_arg._value, u_start, u_end)) def _suffix_to_str(space, w_suffix, funcname): try: @@ -735,7 +734,7 @@ def hash__String(space, w_str): s = w_str._value x = compute_hash(s) - return wrapint(space, x) + return space.newint(x) def lt__String_String(space, w_str1, w_str2): s1 = w_str1._value diff --git a/pypy/objspace/std/tupleobject.py b/pypy/objspace/std/tupleobject.py --- a/pypy/objspace/std/tupleobject.py +++ b/pypy/objspace/std/tupleobject.py @@ -7,7 +7,6 @@ from pypy.interpreter.gateway import ( WrappedDefault, interp2app, interpindirect2app, unwrap_spec) from pypy.objspace.std import slicetype -from pypy.objspace.std.inttype import wrapint from pypy.objspace.std.sliceobject import W_SliceObject, normalize_simple_slice from pypy.objspace.std.stdtypedef import StdTypeDef from pypy.objspace.std.util import negate @@ -56,7 +55,7 @@ def descr_len(self, space): result = self.length() - return wrapint(space, result) + return space.newint(result) def descr_iter(self, space): from pypy.objspace.std import iterobject diff --git a/pypy/objspace/std/util.py b/pypy/objspace/std/util.py --- a/pypy/objspace/std/util.py +++ b/pypy/objspace/std/util.py @@ -9,10 +9,7 @@ tmp = f(self, space, w_other) if tmp is space.w_NotImplemented: return space.w_NotImplemented - elif tmp is space.w_False: - return space.w_True - else: - return space.w_False + return space.newbool(tmp is space.w_False) _negator.func_name = 'negate-%s' % f.func_name return _negator diff --git a/pypy/tool/gcdump.py b/pypy/tool/gcdump.py --- a/pypy/tool/gcdump.py +++ b/pypy/tool/gcdump.py @@ -20,11 +20,17 @@ for obj in self.walk(a): self.add_object_summary(obj[2], obj[3]) - def load_typeids(self, filename): + def load_typeids(self, filename_or_iter): self.typeids = Stat.typeids.copy() - for num, line in enumerate(open(filename)): + if isinstance(filename_or_iter, str): + iter = open(filename_or_iter) + else: + iter = filename_or_iter + for num, line in enumerate(iter): if num == 0: continue + if not line: + continue words = line.split() if words[0].startswith('member'): del words[0] @@ -92,5 +98,8 @@ typeid_name = os.path.join(os.path.dirname(sys.argv[1]), 'typeids.txt') if os.path.isfile(typeid_name): stat.load_typeids(typeid_name) + else: + import zlib, gc + stat.load_typeids(zlib.decompress(gc.get_typeids_z()).split("\n")) # stat.print_summary() diff --git a/pypy/tool/jitlogparser/parser.py b/pypy/tool/jitlogparser/parser.py --- a/pypy/tool/jitlogparser/parser.py +++ b/pypy/tool/jitlogparser/parser.py @@ -390,7 +390,12 @@ data = r.data.encode('hex') # backward compatibility dumps[name] = (world.backend_name, r.addr, data) loops = [] - for entry in extract_category(log, 'jit-log-opt'): + cat = extract_category(log, 'jit-log-opt') + if not cat: + extract_category(log, 'jit-log-rewritten') + if not cat: + extract_category(log, 'jit-log-noopt') + for entry in cat: parser = ParserCls(entry, None, {}, 'lltype', None, nonstrict=True) loop = parser.parse() diff --git a/rpython/annotator/test/test_annrpython.py b/rpython/annotator/test/test_annrpython.py --- a/rpython/annotator/test/test_annrpython.py +++ b/rpython/annotator/test/test_annrpython.py @@ -2539,6 +2539,27 @@ s = a.build_types(f, []) assert s.const == 2 + def test_import_from_mixin(self): + class M(object): + def f(self): + return self.a + class I(object): + objectmodel.import_from_mixin(M) + def __init__(self, i): + self.a = i + class S(object): + objectmodel.import_from_mixin(M) + def __init__(self, s): + self.a = s + def f(n): + return (I(n).f(), S("a" * n).f()) + + assert f(3) == (3, "aaa") + a = self.RPythonAnnotator() + s = a.build_types(f, [int]) + assert isinstance(s.items[0], annmodel.SomeInteger) + assert isinstance(s.items[1], annmodel.SomeString) + def test___class___attribute(self): class Base(object): pass class A(Base): pass diff --git a/rpython/jit/backend/arm/assembler.py b/rpython/jit/backend/arm/assembler.py --- a/rpython/jit/backend/arm/assembler.py +++ b/rpython/jit/backend/arm/assembler.py @@ -571,7 +571,8 @@ self.mc.BL(self.stack_check_slowpath, c=c.HI) # call if ip > lr # cpu interface - def assemble_loop(self, loopname, inputargs, operations, looptoken, log): + def assemble_loop(self, logger, loopname, inputargs, operations, looptoken, + log): clt = CompiledLoopToken(self.cpu, looptoken.number) looptoken.compiled_loop_token = clt clt._debug_nbargs = len(inputargs) @@ -620,6 +621,9 @@ 'loop.asm') ops_offset = self.mc.ops_offset + if logger is not None: + logger.log_loop(inputargs, operations, 0, "rewritten", + name=loopname, ops_offset=ops_offset) self.teardown() debug_start("jit-backend-addr") @@ -644,8 +648,8 @@ frame_depth = max(frame_depth, target_frame_depth) return frame_depth - def assemble_bridge(self, faildescr, inputargs, operations, - original_loop_token, log): + def assemble_bridge(self, logger, faildescr, inputargs, operations, + original_loop_token, log): if not we_are_translated(): # Arguments should be unique assert len(set(inputargs)) == len(inputargs) @@ -694,6 +698,9 @@ frame_depth_no_fixed_size + JITFRAME_FIXED_SIZE) self.fixup_target_tokens(rawstart) self.update_frame_depth(frame_depth) + if logger: + logger.log_bridge(inputargs, operations, "rewritten", + ops_offset=ops_offset) self.teardown() debug_bridge(descr_number, rawstart, codeendpos) diff --git a/rpython/jit/backend/arm/opassembler.py b/rpython/jit/backend/arm/opassembler.py --- a/rpython/jit/backend/arm/opassembler.py +++ b/rpython/jit/backend/arm/opassembler.py @@ -452,7 +452,7 @@ # Write code equivalent to write_barrier() in the GC: it checks # a flag in the object at arglocs[0], and if set, it calls a # helper piece of assembler. The latter saves registers as needed - # and call the function jit_remember_young_pointer() from the GC. + # and call the function remember_young_pointer() from the GC. if we_are_translated(): cls = self.cpu.gc_ll_descr.has_write_barrier_class() assert cls is not None and isinstance(descr, cls) diff --git a/rpython/jit/backend/arm/runner.py b/rpython/jit/backend/arm/runner.py --- a/rpython/jit/backend/arm/runner.py +++ b/rpython/jit/backend/arm/runner.py @@ -56,17 +56,18 @@ def finish_once(self): self.assembler.finish_once() - def compile_loop(self, inputargs, operations, looptoken, + def compile_loop(self, logger, inputargs, operations, looptoken, log=True, name=''): - return self.assembler.assemble_loop(name, inputargs, operations, - looptoken, log=log) + return self.assembler.assemble_loop(logger, name, inputargs, operations, + looptoken, log=log) - def compile_bridge(self, faildescr, inputargs, operations, + def compile_bridge(self, logger, faildescr, inputargs, operations, original_loop_token, log=True): clt = original_loop_token.compiled_loop_token clt.compiling_a_bridge() - return self.assembler.assemble_bridge(faildescr, inputargs, operations, - original_loop_token, log=log) + return self.assembler.assemble_bridge(logger, faildescr, inputargs, + operations, + original_loop_token, log=log) def clear_latest_values(self, count): setitem = self.assembler.fail_boxes_ptr.setitem diff --git a/rpython/jit/backend/arm/test/test_generated.py b/rpython/jit/backend/arm/test/test_generated.py --- a/rpython/jit/backend/arm/test/test_generated.py +++ b/rpython/jit/backend/arm/test/test_generated.py @@ -40,7 +40,7 @@ looptoken = JitCellToken() operations[2].setfailargs([v12, v8, v3, v2, v1, v11]) operations[3].setfailargs([v9, v6, v10, v2, v8, v5, v1, v4]) - cpu.compile_loop(inputargs, operations, looptoken) + cpu.compile_loop(None, inputargs, operations, looptoken) args = [-12 , -26 , -19 , 7 , -5 , -24 , -37 , 62 , 9 , 12] deadframe = cpu.execute_token(looptoken, *args) assert cpu.get_int_value(deadframe, 0) == 0 @@ -92,7 +92,7 @@ operations[9].setfailargs([v15, v7, v10, v18, v4, v17, v1]) operations[-1].setfailargs([v7, v1, v2]) looptoken = JitCellToken() - cpu.compile_loop(inputargs, operations, looptoken) + cpu.compile_loop(None, inputargs, operations, looptoken) args = [16 , 5 , 5 , 16 , 46 , 6 , 63 , 39 , 78 , 0] deadframe = cpu.execute_token(looptoken, *args) assert cpu.get_int_value(deadframe, 0) == 105 @@ -136,7 +136,7 @@ operations[-1].setfailargs([v5, v2, v1, v10, v3, v8, v4, v6]) looptoken = JitCellToken() - cpu.compile_loop(inputargs, operations, looptoken) + cpu.compile_loop(None, inputargs, operations, looptoken) args = [-5 , 24 , 46 , -15 , 13 , -8 , 0 , -6 , 6 , 6] deadframe = cpu.execute_token(looptoken, *args) assert cpu.get_latest_descr(deadframe).identifier == 2 @@ -179,7 +179,7 @@ operations[5].setfailargs([]) operations[-1].setfailargs([v8, v2, v6, v5, v7, v1, v10]) looptoken = JitCellToken() - cpu.compile_loop(inputargs, operations, looptoken) + cpu.compile_loop(None, inputargs, operations, looptoken) args = [19 , -3 , -58 , -7 , 12 , 22 , -54 , -29 , -19 , -64] deadframe = cpu.execute_token(looptoken, *args) assert cpu.get_int_value(deadframe, 0) == -29 @@ -223,7 +223,7 @@ looptoken = JitCellToken() operations[5].setfailargs([]) operations[-1].setfailargs([v1, v4, v10, v8, v7, v3]) - cpu.compile_loop(inputargs, operations, looptoken) + cpu.compile_loop(None, inputargs, operations, looptoken) args = [1073741824 , 95 , -16 , 5 , 92 , 12 , 32 , 17 , 37 , -63] deadframe = cpu.execute_token(looptoken, *args) assert cpu.get_int_value(deadframe, 0) == 1073741824 @@ -280,7 +280,7 @@ operations[9].setfailargs([v10, v13]) operations[-1].setfailargs([v8, v10, v6, v3, v2, v9]) args = [32 , 41 , -9 , 12 , -18 , 46 , 15 , 17 , 10 , 12] - cpu.compile_loop(inputargs, operations, looptoken) + cpu.compile_loop(None, inputargs, operations, looptoken) deadframe = cpu.execute_token(looptoken, *args) assert cpu.get_latest_descr(deadframe).identifier == 3 assert cpu.get_int_value(deadframe, 0) == 12 @@ -328,7 +328,7 @@ operations[8].setfailargs([v5, v9]) operations[-1].setfailargs([v4, v10, v6, v5, v9, v7]) looptoken = JitCellToken() - cpu.compile_loop(inputargs, operations, looptoken) + cpu.compile_loop(None, inputargs, operations, looptoken) args = [-8 , 0 , 62 , 35 , 16 , 9 , 30 , 581610154 , -1 , 738197503] deadframe = cpu.execute_token(looptoken, *args) assert cpu.get_latest_descr(deadframe).identifier == 2 @@ -378,7 +378,7 @@ operations[-2].setfailargs([v9, v4, v10, v11, v14]) operations[-1].setfailargs([v10, v8, v1, v6, v4]) looptoken = JitCellToken() - cpu.compile_loop(inputargs, operations, looptoken) + cpu.compile_loop(None, inputargs, operations, looptoken) args = [-39 , -18 , 1588243114 , -9 , -4 , 1252698794 , 0 , 715827882 , -15 , 536870912] deadframe = cpu.execute_token(looptoken, *args) assert cpu.get_latest_descr(deadframe).identifier == 1 @@ -433,7 +433,7 @@ operations[9].setfailargs([v5, v7, v12, v14, v2, v13, v8]) operations[-1].setfailargs([v1, v2, v9]) looptoken = JitCellToken() - cpu.compile_loop(inputargs, operations, looptoken) + cpu.compile_loop(None, inputargs, operations, looptoken) args = [0 , -2 , 24 , 1 , -4 , 13 , -95 , 33 , 2 , -44] deadframe = cpu.execute_token(looptoken, *args) assert cpu.get_latest_descr(deadframe).identifier == 3 @@ -475,7 +475,7 @@ operations[2].setfailargs([v10, v3, v6, v11, v9, v2]) operations[-1].setfailargs([v8, v2, v10, v6, v7, v9, v5, v4]) looptoken = JitCellToken() - cpu.compile_loop(inputargs, operations, looptoken) + cpu.compile_loop(None, inputargs, operations, looptoken) args = [3 , -5 , 1431655765 , 47 , 12 , 1789569706 , 15 , 939524096 , 16 , -43] deadframe = cpu.execute_token(looptoken, *args) assert cpu.get_latest_descr(deadframe).identifier == 1 @@ -524,7 +524,7 @@ operations[-1].setfailargs([v2, v3, v5, v7, v10, v8, v9]) operations[4].setfailargs([v14]) looptoken = JitCellToken() - cpu.compile_loop(inputargs, operations, looptoken) + cpu.compile_loop(None, inputargs, operations, looptoken) args = [14 , -20 , 18 , -2058005163 , 6 , 1 , -16 , 11 , 0 , 19] deadframe = cpu.execute_token(looptoken, *args) assert cpu.get_latest_descr(deadframe).identifier == 1 diff --git a/rpython/jit/backend/arm/test/test_regalloc2.py b/rpython/jit/backend/arm/test/test_regalloc2.py --- a/rpython/jit/backend/arm/test/test_regalloc2.py +++ b/rpython/jit/backend/arm/test/test_regalloc2.py @@ -24,7 +24,7 @@ cpu = CPU(None, None) cpu.setup_once() looptoken = JitCellToken() - cpu.compile_loop(inputargs, operations, looptoken) + cpu.compile_loop(None, inputargs, operations, looptoken) deadframe = cpu.execute_token(looptoken, 9) assert cpu.get_int_value(deadframe, 0) == (9 >> 3) assert cpu.get_int_value(deadframe, 1) == (~18) @@ -48,7 +48,7 @@ cpu = CPU(None, None) cpu.setup_once() looptoken = JitCellToken() - cpu.compile_loop(inputargs, operations, looptoken) + cpu.compile_loop(None, inputargs, operations, looptoken) deadframe = cpu.execute_token(looptoken, -10) assert cpu.get_int_value(deadframe, 0) == 0 assert cpu.get_int_value(deadframe, 1) == -1000 @@ -145,7 +145,7 @@ cpu = CPU(None, None) cpu.setup_once() looptoken = JitCellToken() - cpu.compile_loop(inputargs, operations, looptoken) + cpu.compile_loop(None, inputargs, operations, looptoken) args = [-13 , 10 , 10 , 8 , -8 , -16 , -18 , 46 , -12 , 26] deadframe = cpu.execute_token(looptoken, *args) assert cpu.get_int_value(deadframe, 0) == 0 @@ -252,7 +252,7 @@ cpu = CPU(None, None) cpu.setup_once() looptoken = JitCellToken() - cpu.compile_loop(inputargs, operations, looptoken) + cpu.compile_loop(None, inputargs, operations, looptoken) args = [17 , -20 , -6 , 6 , 1 , 13 , 13 , 9 , 49 , 8] deadframe = cpu.execute_token(looptoken, *args) assert cpu.get_int_value(deadframe, 0) == 0 diff --git a/rpython/jit/backend/arm/test/test_runner.py b/rpython/jit/backend/arm/test/test_runner.py --- a/rpython/jit/backend/arm/test/test_runner.py +++ b/rpython/jit/backend/arm/test/test_runner.py @@ -75,7 +75,7 @@ ResOperation(rop.FINISH, [inp[1]], None, descr=BasicFinalDescr(1)), ] operations[-2].setfailargs(out) - cpu.compile_loop(inp, operations, looptoken) + cpu.compile_loop(None, inp, operations, looptoken) args = [i for i in range(1, 15)] deadframe = self.cpu.execute_token(looptoken, *args) output = [self.cpu.get_int_value(deadframe, i - 1) for i in range(1, 15)] @@ -117,9 +117,9 @@ i1 = int_sub(i0, 1) finish(i1) ''') - self.cpu.compile_loop(loop2.inputargs, loop2.operations, lt2) - self.cpu.compile_loop(loop3.inputargs, loop3.operations, lt3) - self.cpu.compile_loop(loop1.inputargs, loop1.operations, lt1) + self.cpu.compile_loop(None, loop2.inputargs, loop2.operations, lt2) + self.cpu.compile_loop(None, loop3.inputargs, loop3.operations, lt3) + self.cpu.compile_loop(None, loop1.inputargs, loop1.operations, lt1) df = self.cpu.execute_token(lt1, 10) assert self.cpu.get_int_value(df, 0) == 7 @@ -214,7 +214,7 @@ ops = "".join(ops) loop = parse(ops) looptoken = JitCellToken() - self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + self.cpu.compile_loop(None, loop.inputargs, loop.operations, looptoken) ARGS = [lltype.Signed] * numargs RES = lltype.Signed args = [i+1 for i in range(numargs)] @@ -246,7 +246,7 @@ try: self.cpu.assembler.set_debug(True) looptoken = JitCellToken() - self.cpu.compile_loop(ops.inputargs, ops.operations, looptoken) + self.cpu.compile_loop(None, ops.inputargs, ops.operations, looptoken) self.cpu.execute_token(looptoken, 0) # check debugging info struct = self.cpu.assembler.loop_run_counters[0] @@ -280,7 +280,7 @@ faildescr = BasicFailDescr(2) loop = parse(ops, self.cpu, namespace=locals()) looptoken = JitCellToken() - info = self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + info = self.cpu.compile_loop(None, loop.inputargs, loop.operations, looptoken) ops2 = """ [i0, f1] i1 = same_as(i0) @@ -293,7 +293,7 @@ """ loop2 = parse(ops2, self.cpu, namespace=locals()) looptoken2 = JitCellToken() - info = self.cpu.compile_loop(loop2.inputargs, loop2.operations, looptoken2) + info = self.cpu.compile_loop(None, loop2.inputargs, loop2.operations, looptoken2) deadframe = self.cpu.execute_token(looptoken, -9, longlong.getfloatstorage(-13.5)) res = longlong.getrealfloat(self.cpu.get_float_value(deadframe, 0)) diff --git a/rpython/jit/backend/llgraph/runner.py b/rpython/jit/backend/llgraph/runner.py --- a/rpython/jit/backend/llgraph/runner.py +++ b/rpython/jit/backend/llgraph/runner.py @@ -183,7 +183,8 @@ self.stats = stats or MiniStats() self.vinfo_for_tests = kwds.get('vinfo_for_tests', None) - def compile_loop(self, inputargs, operations, looptoken, log=True, name=''): + def compile_loop(self, logger, inputargs, operations, looptoken, log=True, + name=''): clt = model.CompiledLoopToken(self, looptoken.number) looptoken.compiled_loop_token = clt lltrace = LLTrace(inputargs, operations) @@ -191,7 +192,7 @@ clt._llgraph_alltraces = [lltrace] self._record_labels(lltrace) - def compile_bridge(self, faildescr, inputargs, operations, + def compile_bridge(self, logger, faildescr, inputargs, operations, original_loop_token, log=True): clt = original_loop_token.compiled_loop_token clt.compiling_a_bridge() @@ -960,10 +961,10 @@ def execute_force_token(self, _): return self - def execute_cond_call_gc_wb(self, descr, a, b): + def execute_cond_call_gc_wb(self, descr, a): py.test.skip("cond_call_gc_wb not supported") - def execute_cond_call_gc_wb_array(self, descr, a, b, c): + def execute_cond_call_gc_wb_array(self, descr, a, b): py.test.skip("cond_call_gc_wb_array not supported") def execute_keepalive(self, descr, x): diff --git a/rpython/jit/backend/llsupport/gc.py b/rpython/jit/backend/llsupport/gc.py --- a/rpython/jit/backend/llsupport/gc.py +++ b/rpython/jit/backend/llsupport/gc.py @@ -136,7 +136,7 @@ """ Allocate a new frame, overwritten by tests """ frame = jitframe.JITFRAME.allocate(frame_info) - llop.gc_assume_young_pointers(lltype.Void, frame) + llop.gc_writebarrier(lltype.Void, frame) return frame class JitFrameDescrs: @@ -360,8 +360,7 @@ def _check_valid_gc(self): # we need the hybrid or minimark GC for rgc._make_sure_does_not_move() - # to work. Additionally, 'hybrid' is missing some stuff like - # jit_remember_young_pointer() for now. + # to work. 'hybrid' could work but isn't tested with the JIT. if self.gcdescr.config.translation.gc not in ('minimark',): raise NotImplementedError("--gc=%s not implemented with the JIT" % (self.gcdescr.config.translation.gc,)) diff --git a/rpython/jit/backend/llsupport/llmodel.py b/rpython/jit/backend/llsupport/llmodel.py --- a/rpython/jit/backend/llsupport/llmodel.py +++ b/rpython/jit/backend/llsupport/llmodel.py @@ -97,7 +97,7 @@ new_frame.jf_savedata = frame.jf_savedata new_frame.jf_guard_exc = frame.jf_guard_exc # all other fields are empty - llop.gc_assume_young_pointers(lltype.Void, new_frame) + llop.gc_writebarrier(lltype.Void, new_frame) return lltype.cast_opaque_ptr(llmemory.GCREF, new_frame) except Exception, e: print "Unhandled exception", e, "in realloc_frame" diff --git a/rpython/jit/backend/llsupport/rewrite.py b/rpython/jit/backend/llsupport/rewrite.py --- a/rpython/jit/backend/llsupport/rewrite.py +++ b/rpython/jit/backend/llsupport/rewrite.py @@ -26,10 +26,11 @@ - Add COND_CALLs to the write barrier before SETFIELD_GC and SETARRAYITEM_GC operations. - recent_mallocs contains a dictionary of variable -> None. If a variable - is in the dictionary, next setfields can be called without a write barrier, - because the variable got allocated after the last potentially collecting - resop + 'write_barrier_applied' contains a dictionary of variable -> None. + If a variable is in the dictionary, next setfields can be called without + a write barrier. The idea is that an object that was freshly allocated + or already write_barrier'd don't need another write_barrier if there + was no potentially collecting resop inbetween. """ _previous_size = -1 @@ -42,7 +43,7 @@ self.cpu = cpu self.newops = [] self.known_lengths = {} - self.recent_mallocs = {} + self.write_barrier_applied = {} def rewrite(self, operations): # we can only remember one malloc since the next malloc can possibly @@ -221,18 +222,18 @@ def emitting_an_operation_that_can_collect(self): # must be called whenever we emit an operation that can collect: # forgets the previous MALLOC_NURSERY, if any; and empty the - # set 'recent_mallocs', so that future SETFIELDs will generate + # set 'write_barrier_applied', so that future SETFIELDs will generate # a write barrier as usual. self._op_malloc_nursery = None - self.recent_mallocs.clear() + self.write_barrier_applied.clear() def _gen_call_malloc_gc(self, args, v_result, descr): """Generate a CALL_MALLOC_GC with the given args.""" self.emitting_an_operation_that_can_collect() op = ResOperation(rop.CALL_MALLOC_GC, args, v_result, descr) self.newops.append(op) - # mark 'v_result' as freshly malloced - self.recent_mallocs[v_result] = None + # mark 'v_result' as freshly malloced, so not needing a write barrier + self.write_barrier_applied[v_result] = None def gen_malloc_fixedsize(self, size, typeid, v_result): """Generate a CALL_MALLOC_GC(malloc_fixedsize_fn, ...). @@ -315,7 +316,7 @@ [ConstInt(kind), ConstInt(itemsize), v_length], v_result, descr=arraydescr) self.newops.append(op) - self.recent_mallocs[v_result] = None + self.write_barrier_applied[v_result] = None return True def gen_malloc_nursery_varsize_frame(self, sizebox, v_result): @@ -327,7 +328,7 @@ v_result) self.newops.append(op) - self.recent_mallocs[v_result] = None + self.write_barrier_applied[v_result] = None def gen_malloc_nursery(self, size, v_result): """Try to generate or update a CALL_MALLOC_NURSERY. @@ -360,7 +361,7 @@ self.newops.append(op) self._previous_size = size self._v_last_malloced_nursery = v_result - self.recent_mallocs[v_result] = None + self.write_barrier_applied[v_result] = None return True def gen_initialize_tid(self, v_newgcobj, tid): @@ -382,45 +383,42 @@ def handle_write_barrier_setfield(self, op): val = op.getarg(0) - # no need for a write barrier in the case of previous malloc - if val not in self.recent_mallocs: + if val not in self.write_barrier_applied: v = op.getarg(1) if isinstance(v, BoxPtr) or (isinstance(v, ConstPtr) and bool(v.value)): # store a non-NULL - self.gen_write_barrier(op.getarg(0), v) - op = op.copy_and_change(rop.SETFIELD_RAW) + self.gen_write_barrier(val) + #op = op.copy_and_change(rop.SETFIELD_RAW) self.newops.append(op) def handle_write_barrier_setinteriorfield(self, op): val = op.getarg(0) - # no need for a write barrier in the case of previous malloc - if val not in self.recent_mallocs: + if val not in self.write_barrier_applied: v = op.getarg(2) if isinstance(v, BoxPtr) or (isinstance(v, ConstPtr) and bool(v.value)): # store a non-NULL - self.gen_write_barrier(op.getarg(0), v) - op = op.copy_and_change(rop.SETINTERIORFIELD_RAW) + self.gen_write_barrier(val) + #op = op.copy_and_change(rop.SETINTERIORFIELD_RAW) self.newops.append(op) def handle_write_barrier_setarrayitem(self, op): val = op.getarg(0) - # no need for a write barrier in the case of previous malloc - if val not in self.recent_mallocs: + if val not in self.write_barrier_applied: v = op.getarg(2) if isinstance(v, BoxPtr) or (isinstance(v, ConstPtr) and bool(v.value)): # store a non-NULL - self.gen_write_barrier_array(op.getarg(0), - op.getarg(1), v) - op = op.copy_and_change(rop.SETARRAYITEM_RAW) + self.gen_write_barrier_array(val, op.getarg(1)) + #op = op.copy_and_change(rop.SETARRAYITEM_RAW) self.newops.append(op) - def gen_write_barrier(self, v_base, v_value): + def gen_write_barrier(self, v_base): write_barrier_descr = self.gc_ll_descr.write_barrier_descr - args = [v_base, v_value] + args = [v_base] self.newops.append(ResOperation(rop.COND_CALL_GC_WB, args, None, descr=write_barrier_descr)) + self.write_barrier_applied[v_base] = None - def gen_write_barrier_array(self, v_base, v_index, v_value): + def gen_write_barrier_array(self, v_base, v_index): write_barrier_descr = self.gc_ll_descr.write_barrier_descr if write_barrier_descr.has_write_barrier_from_array(self.cpu): # If we know statically the length of 'v', and it is not too @@ -430,13 +428,15 @@ length = self.known_lengths.get(v_base, LARGE) if length >= LARGE: # unknown or too big: produce a write_barrier_from_array - args = [v_base, v_index, v_value] + args = [v_base, v_index] self.newops.append( ResOperation(rop.COND_CALL_GC_WB_ARRAY, args, None, descr=write_barrier_descr)) + # a WB_ARRAY is not enough to prevent any future write + # barriers, so don't add to 'write_barrier_applied'! return # fall-back case: produce a write_barrier - self.gen_write_barrier(v_base, v_value) + self.gen_write_barrier(v_base) def round_up_for_allocation(self, size): if not self.gc_ll_descr.round_up: diff --git a/rpython/jit/backend/llsupport/test/test_gc.py b/rpython/jit/backend/llsupport/test/test_gc.py --- a/rpython/jit/backend/llsupport/test/test_gc.py +++ b/rpython/jit/backend/llsupport/test/test_gc.py @@ -202,13 +202,11 @@ rewriter = gc.GcRewriterAssembler(gc_ll_descr, None) newops = rewriter.newops v_base = BoxPtr() - v_value = BoxPtr() - rewriter.gen_write_barrier(v_base, v_value) + rewriter.gen_write_barrier(v_base) assert llop1.record == [] assert len(newops) == 1 assert newops[0].getopnum() == rop.COND_CALL_GC_WB assert newops[0].getarg(0) == v_base - assert newops[0].getarg(1) == v_value assert newops[0].result is None wbdescr = newops[0].getdescr() assert is_valid_int(wbdescr.jit_wb_if_flag) diff --git a/rpython/jit/backend/llsupport/test/test_gc_integration.py b/rpython/jit/backend/llsupport/test/test_gc_integration.py --- a/rpython/jit/backend/llsupport/test/test_gc_integration.py +++ b/rpython/jit/backend/llsupport/test/test_gc_integration.py @@ -676,7 +676,7 @@ 'checkdescr': checkdescr, 'fielddescr': cpu.fielddescrof(S, 'x')}) token = JitCellToken() - cpu.compile_loop(loop.inputargs, loop.operations, token) + cpu.compile_loop(None, loop.inputargs, loop.operations, token) p0 = lltype.malloc(S, zero=True) p1 = lltype.malloc(S) p2 = lltype.malloc(S) @@ -715,7 +715,7 @@ 'calldescr': checkdescr, }) token = JitCellToken() - cpu.compile_loop(loop.inputargs, loop.operations, token) + cpu.compile_loop(None, loop.inputargs, loop.operations, token) S = self.S s = lltype.malloc(S) cpu.execute_token(token, 1, s) @@ -743,7 +743,7 @@ token = JitCellToken() cpu.gc_ll_descr.init_nursery(20) cpu.setup_once() - cpu.compile_loop(loop.inputargs, loop.operations, token) + cpu.compile_loop(None, loop.inputargs, loop.operations, token) arg = longlong.getfloatstorage(2.3) frame = cpu.execute_token(token, arg) ofs = cpu.get_baseofs_of_frame_field() @@ -770,7 +770,7 @@ cpu.gc_ll_descr.collections = [[0, sizeof.size]] cpu.gc_ll_descr.init_nursery(2 * sizeof.size) cpu.setup_once() - cpu.compile_loop(loop.inputargs, loop.operations, token) + cpu.compile_loop(None, loop.inputargs, loop.operations, token) frame = cpu.execute_token(token) # now we should be able to track everything from the frame frame = lltype.cast_opaque_ptr(JITFRAMEPTR, frame) @@ -821,7 +821,7 @@ token = JitCellToken() cpu.gc_ll_descr.init_nursery(100) cpu.setup_once() - cpu.compile_loop(loop.inputargs, loop.operations, token) + cpu.compile_loop(None, loop.inputargs, loop.operations, token) args = [lltype.nullptr(llmemory.GCREF.TO) for i in range(7)] frame = cpu.execute_token(token, 1, *args) frame = rffi.cast(JITFRAMEPTR, frame) @@ -867,7 +867,7 @@ token = JitCellToken() cpu.gc_ll_descr.init_nursery(100) cpu.setup_once() - cpu.compile_loop(loop.inputargs, loop.operations, token) + cpu.compile_loop(None, loop.inputargs, loop.operations, token) frame = lltype.cast_opaque_ptr(JITFRAMEPTR, cpu.execute_token(token, 1, a)) @@ -911,7 +911,7 @@ token = JitCellToken() cpu.gc_ll_descr.init_nursery(100) cpu.setup_once() - cpu.compile_loop(loop.inputargs, loop.operations, token) + cpu.compile_loop(None, loop.inputargs, loop.operations, token) frame = lltype.cast_opaque_ptr(JITFRAMEPTR, cpu.execute_token(token, 1, a)) assert getmap(frame).count('1') == 4 diff --git a/rpython/jit/backend/llsupport/test/test_regalloc_integration.py b/rpython/jit/backend/llsupport/test/test_regalloc_integration.py --- a/rpython/jit/backend/llsupport/test/test_regalloc_integration.py +++ b/rpython/jit/backend/llsupport/test/test_regalloc_integration.py @@ -97,7 +97,7 @@ loop = self.parse(ops, namespace=namespace) self.loop = loop looptoken = JitCellToken() - self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + self.cpu.compile_loop(None, loop.inputargs, loop.operations, looptoken) arguments = [] for arg in args: if isinstance(arg, int): @@ -147,7 +147,8 @@ assert ([box.type for box in bridge.inputargs] == [box.type for box in guard_op.getfailargs()]) faildescr = guard_op.getdescr() - self.cpu.compile_bridge(faildescr, bridge.inputargs, bridge.operations, + self.cpu.compile_bridge(None, faildescr, bridge.inputargs, + bridge.operations, loop._jitcelltoken) return bridge @@ -335,7 +336,7 @@ ''' self.interpret(ops, [0, 0, 3, 0]) assert self.getints(3) == [1, -3, 10] - + def test_compare_memory_result_survives(self): ops = ''' [i0, i1, i2, i3] @@ -409,7 +410,7 @@ class TestRegallocCompOps(BaseTestRegalloc): - + def test_cmp_op_0(self): ops = ''' [i0, i3] @@ -575,7 +576,7 @@ class TestRegAllocCallAndStackDepth(BaseTestRegalloc): def setup_class(cls): py.test.skip("skip for now, not sure what do we do") - + def expected_frame_depth(self, num_call_args, num_pushed_input_args=0): # Assumes the arguments are all non-float if not self.cpu.IS_64_BIT: @@ -612,7 +613,7 @@ ops = ''' [i0, i1, i2, i3, i4, i5, i6, i7, i8, i9] i10 = call(ConstClass(f1ptr), i0, descr=f1_calldescr) - i11 = call(ConstClass(f2ptr), i10, i1, descr=f2_calldescr) + i11 = call(ConstClass(f2ptr), i10, i1, descr=f2_calldescr) guard_false(i5) [i11, i1, i2, i3, i4, i5, i6, i7, i8, i9] ''' loop = self.interpret(ops, [4, 7, 9, 9 ,9, 9, 9, 9, 9, 9]) @@ -649,7 +650,7 @@ ops = ''' [i2, i0, i1] - i3 = call(ConstClass(f2ptr), i2, i1, descr=f2_calldescr) + i3 = call(ConstClass(f2ptr), i2, i1, descr=f2_calldescr) guard_false(i0, descr=fdescr2) [i3, i0] ''' bridge = self.attach_bridge(ops, loop, -2) @@ -676,7 +677,7 @@ ops = ''' [i2] - i3 = call(ConstClass(f1ptr), i2, descr=f1_calldescr) + i3 = call(ConstClass(f1ptr), i2, descr=f1_calldescr) guard_false(i3, descr=fdescr2) [i3] ''' bridge = self.attach_bridge(ops, loop, -2) diff --git a/rpython/jit/backend/llsupport/test/test_rewrite.py b/rpython/jit/backend/llsupport/test/test_rewrite.py --- a/rpython/jit/backend/llsupport/test/test_rewrite.py +++ b/rpython/jit/backend/llsupport/test/test_rewrite.py @@ -561,8 +561,8 @@ jump() """, """ [p1, p2] - cond_call_gc_wb(p1, p2, descr=wbdescr) - setfield_raw(p1, p2, descr=tzdescr) + cond_call_gc_wb(p1, descr=wbdescr) + setfield_gc(p1, p2, descr=tzdescr) jump() """) @@ -575,8 +575,8 @@ jump() """, """ [p1, i2, p3] - cond_call_gc_wb(p1, p3, descr=wbdescr) - setarrayitem_raw(p1, i2, p3, descr=cdescr) + cond_call_gc_wb(p1, descr=wbdescr) + setarrayitem_gc(p1, i2, p3, descr=cdescr) jump() """) @@ -595,8 +595,8 @@ setfield_gc(p1, 8111, descr=tiddescr) setfield_gc(p1, 129, descr=clendescr) call(123456) - cond_call_gc_wb(p1, p3, descr=wbdescr) - setarrayitem_raw(p1, i2, p3, descr=cdescr) + cond_call_gc_wb(p1, descr=wbdescr) + setarrayitem_gc(p1, i2, p3, descr=cdescr) jump() """) @@ -616,8 +616,8 @@ setfield_gc(p1, 8111, descr=tiddescr) setfield_gc(p1, 130, descr=clendescr) call(123456) - cond_call_gc_wb_array(p1, i2, p3, descr=wbdescr) - setarrayitem_raw(p1, i2, p3, descr=cdescr) + cond_call_gc_wb_array(p1, i2, descr=wbdescr) + setarrayitem_gc(p1, i2, p3, descr=cdescr) jump() """) @@ -628,8 +628,8 @@ jump() """, """ [p1, i2, p3] - cond_call_gc_wb_array(p1, i2, p3, descr=wbdescr) - setarrayitem_raw(p1, i2, p3, descr=cdescr) + cond_call_gc_wb_array(p1, i2, descr=wbdescr) + setarrayitem_gc(p1, i2, p3, descr=cdescr) jump() """) @@ -647,8 +647,8 @@ setfield_gc(p1, 8111, descr=tiddescr) setfield_gc(p1, 5, descr=clendescr) label(p1, i2, p3) - cond_call_gc_wb_array(p1, i2, p3, descr=wbdescr) - setarrayitem_raw(p1, i2, p3, descr=cdescr) + cond_call_gc_wb_array(p1, i2, descr=wbdescr) + setarrayitem_gc(p1, i2, p3, descr=cdescr) jump() """) @@ -666,8 +666,8 @@ jump(p1, p2) """, """ [p1, p2] - cond_call_gc_wb(p1, p2, descr=wbdescr) - setinteriorfield_raw(p1, 0, p2, descr=interiorzdescr) + cond_call_gc_wb(p1, descr=wbdescr) + setinteriorfield_gc(p1, 0, p2, descr=interiorzdescr) jump(p1, p2) """, interiorzdescr=interiorzdescr) @@ -733,8 +733,8 @@ p1 = call_malloc_nursery_varsize(1, 1, i0, \ descr=strdescr) setfield_gc(p1, i0, descr=strlendescr) - cond_call_gc_wb(p0, p1, descr=wbdescr) - setfield_raw(p0, p1, descr=tzdescr) + cond_call_gc_wb(p0, descr=wbdescr) + setfield_gc(p0, p1, descr=tzdescr) jump() """) @@ -750,11 +750,25 @@ p0 = call_malloc_nursery(%(tdescr.size)d) setfield_gc(p0, 5678, descr=tiddescr) label(p0, p1) - cond_call_gc_wb(p0, p1, descr=wbdescr) - setfield_raw(p0, p1, descr=tzdescr) + cond_call_gc_wb(p0, descr=wbdescr) + setfield_gc(p0, p1, descr=tzdescr) jump() """) + def test_multiple_writes(self): + self.check_rewrite(""" + [p0, p1, p2] + setfield_gc(p0, p1, descr=tzdescr) + setfield_gc(p0, p2, descr=tzdescr) + jump(p1, p2, p0) + """, """ + [p0, p1, p2] + cond_call_gc_wb(p0, descr=wbdescr) + setfield_gc(p0, p1, descr=tzdescr) + setfield_gc(p0, p2, descr=tzdescr) + jump(p1, p2, p0) + """) + def test_rewrite_call_assembler(self): self.check_rewrite(""" [i0, f0] diff --git a/rpython/jit/backend/llsupport/test/test_runner.py b/rpython/jit/backend/llsupport/test/test_runner.py --- a/rpython/jit/backend/llsupport/test/test_runner.py +++ b/rpython/jit/backend/llsupport/test/test_runner.py @@ -14,7 +14,7 @@ def set_debug(flag): pass - def compile_loop(self, inputargs, operations, looptoken): + def compile_loop(self, logger, inputargs, operations, looptoken): py.test.skip("llsupport test: cannot compile operations") diff --git a/rpython/jit/backend/model.py b/rpython/jit/backend/model.py --- a/rpython/jit/backend/model.py +++ b/rpython/jit/backend/model.py @@ -51,7 +51,8 @@ """ return False - def compile_loop(self, inputargs, operations, looptoken, log=True, name=''): + def compile_loop(self, logger, inputargs, operations, looptoken, + log=True, name=''): """Assemble the given loop. Should create and attach a fresh CompiledLoopToken to looptoken.compiled_loop_token and stick extra attributes @@ -67,7 +68,7 @@ """ raise NotImplementedError - def compile_bridge(self, faildescr, inputargs, operations, + def compile_bridge(self, logger, faildescr, inputargs, operations, original_loop_token, log=True): """Assemble the bridge. The FailDescr is the descr of the original guard that failed. diff --git a/rpython/jit/backend/test/calling_convention_test.py b/rpython/jit/backend/test/calling_convention_test.py --- a/rpython/jit/backend/test/calling_convention_test.py +++ b/rpython/jit/backend/test/calling_convention_test.py @@ -105,7 +105,7 @@ loop = parse(ops, namespace=locals()) looptoken = JitCellToken() - self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + self.cpu.compile_loop(None, loop.inputargs, loop.operations, looptoken) argvals, expected_result = self._prepare_args(args, floats, ints) deadframe = self.cpu.execute_token(looptoken, *argvals) @@ -249,7 +249,7 @@ called_looptoken = JitCellToken() called_looptoken.outermost_jitdriver_sd = FakeJitDriverSD() done_descr = called_loop.operations[-1].getdescr() - self.cpu.compile_loop(called_loop.inputargs, called_loop.operations, called_looptoken) + self.cpu.compile_loop(None, called_loop.inputargs, called_loop.operations, called_looptoken) argvals, expected_result = self._prepare_args(args, floats, ints) deadframe = cpu.execute_token(called_looptoken, *argvals) @@ -278,7 +278,7 @@ self.cpu.done_with_this_frame_descr_float = done_descr try: othertoken = JitCellToken() - self.cpu.compile_loop(loop.inputargs, loop.operations, othertoken) + self.cpu.compile_loop(None, loop.inputargs, loop.operations, othertoken) # prepare call to called_loop argvals, _ = self._prepare_args(args, floats, ints) @@ -424,7 +424,7 @@ loop = parse(ops, namespace=locals()) looptoken = JitCellToken() - self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + self.cpu.compile_loop(None, loop.inputargs, loop.operations, looptoken) argvals, expected_result = self._prepare_args(args, floats, ints) deadframe = self.cpu.execute_token(looptoken, *argvals) diff --git a/rpython/jit/backend/test/runner_test.py b/rpython/jit/backend/test/runner_test.py --- a/rpython/jit/backend/test/runner_test.py +++ b/rpython/jit/backend/test/runner_test.py @@ -49,7 +49,7 @@ valueboxes, descr) looptoken = JitCellToken() - self.cpu.compile_loop(inputargs, operations, looptoken) + self.cpu.compile_loop(None, inputargs, operations, looptoken) args = [] for box in inputargs: if isinstance(box, BoxInt): @@ -127,7 +127,7 @@ ] inputargs = [i0] looptoken = JitCellToken() - self.cpu.compile_loop(inputargs, operations, looptoken) + self.cpu.compile_loop(None, inputargs, operations, looptoken) deadframe = self.cpu.execute_token(looptoken, 2) fail = self.cpu.get_latest_descr(deadframe) res = self.cpu.get_int_value(deadframe, 0) @@ -145,7 +145,7 @@ ] inputargs = [i0] looptoken = JitCellToken() - self.cpu.compile_loop(inputargs, operations, looptoken) + self.cpu.compile_loop(None, inputargs, operations, looptoken) deadframe = self.cpu.execute_token(looptoken, longlong.getfloatstorage(2.8)) fail = self.cpu.get_latest_descr(deadframe) @@ -170,7 +170,7 @@ inputargs = [i0] operations[3].setfailargs([i1]) - self.cpu.compile_loop(inputargs, operations, looptoken) + self.cpu.compile_loop(None, inputargs, operations, looptoken) deadframe = self.cpu.execute_token(looptoken, 2) fail = self.cpu.get_latest_descr(deadframe) assert fail.identifier == 2 @@ -195,7 +195,7 @@ inputargs = [i3] operations[4].setfailargs([None, None, i1, None]) - self.cpu.compile_loop(inputargs, operations, looptoken) + self.cpu.compile_loop(None, inputargs, operations, looptoken) deadframe = self.cpu.execute_token(looptoken, 44) fail = self.cpu.get_latest_descr(deadframe) assert fail.identifier == 2 @@ -221,7 +221,7 @@ operations[3].setfailargs([i1]) wr_i1 = weakref.ref(i1) wr_guard = weakref.ref(operations[2]) - self.cpu.compile_loop(inputargs, operations, looptoken) + self.cpu.compile_loop(None, inputargs, operations, looptoken) if hasattr(looptoken, '_x86_ops_offset'): del looptoken._x86_ops_offset # else it's kept alive del i0, i1, i2 @@ -249,7 +249,7 @@ ] inputargs = [i0] operations[3].setfailargs([i1]) - self.cpu.compile_loop(inputargs, operations, looptoken) + self.cpu.compile_loop(None, inputargs, operations, looptoken) i1b = BoxInt() i3 = BoxInt() @@ -260,7 +260,7 @@ ] bridge[1].setfailargs([i1b]) - self.cpu.compile_bridge(faildescr1, [i1b], bridge, looptoken) + self.cpu.compile_bridge(None, faildescr1, [i1b], bridge, looptoken) deadframe = self.cpu.execute_token(looptoken, 2) fail = self.cpu.get_latest_descr(deadframe) @@ -291,7 +291,7 @@ ] inputargs = [i3] operations[4].setfailargs([None, i1, None]) - self.cpu.compile_loop(inputargs, operations, looptoken) + self.cpu.compile_loop(None, inputargs, operations, looptoken) i1b = BoxInt() i3 = BoxInt() @@ -302,7 +302,7 @@ ] bridge[1].setfailargs([i1b]) - self.cpu.compile_bridge(faildescr1, [i1b], bridge, looptoken) From noreply at buildbot.pypy.org Mon Aug 26 22:30:47 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Mon, 26 Aug 2013 22:30:47 +0200 (CEST) Subject: [pypy-commit] pypy py3k: adapt to py3 Message-ID: <20130826203047.55EA11C0189@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r66354:a3fe46f5747d Date: 2013-08-26 13:29 -0700 http://bitbucket.org/pypy/pypy/changeset/a3fe46f5747d/ Log: adapt to py3 diff --git a/pypy/module/array/test/test_array.py b/pypy/module/array/test/test_array.py --- a/pypy/module/array/test/test_array.py +++ b/pypy/module/array/test/test_array.py @@ -919,13 +919,9 @@ raises(TypeError, a.__setitem__, Silly()) raises(TypeError, a.__setitem__, OldSilly()) - a = array('c', 'hi') + a = array('u', 'hi') a[0] = 'b' assert a[0] == 'b' - - a = array('u', u'hi') - a[0] = u'b' - assert a[0] == u'b' def test_bytearray(self): a = self.array('u', 'hi') From noreply at buildbot.pypy.org Mon Aug 26 23:29:12 2013 From: noreply at buildbot.pypy.org (amauryfa) Date: Mon, 26 Aug 2013 23:29:12 +0200 (CEST) Subject: [pypy-commit] pypy default: Issue1591: sqlite should swallow exceptions raised in adapter. Message-ID: <20130826212912.5720C1C13EE@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: Changeset: r66355:6181c1116a92 Date: 2013-08-26 23:27 +0200 http://bitbucket.org/pypy/pypy/changeset/6181c1116a92/ Log: Issue1591: sqlite should swallow exceptions raised in adapter. diff --git a/lib_pypy/_sqlite3.py b/lib_pypy/_sqlite3.py --- a/lib_pypy/_sqlite3.py +++ b/lib_pypy/_sqlite3.py @@ -1229,7 +1229,10 @@ if cvt is not None: param = cvt(param) - param = adapt(param) + try: + param = adapt(param) + except: + pass # And use previous value if param is None: rc = _lib.sqlite3_bind_null(self._statement, idx) diff --git a/pypy/module/test_lib_pypy/test_sqlite3.py b/pypy/module/test_lib_pypy/test_sqlite3.py --- a/pypy/module/test_lib_pypy/test_sqlite3.py +++ b/pypy/module/test_lib_pypy/test_sqlite3.py @@ -228,3 +228,18 @@ cur = con.cursor() cur.execute(u'SELECT 1 as méil') assert cur.description[0][0] == u"méil".encode('utf-8') + +def test_adapter_exception(con): + def cast(obj): + raise ZeroDivisionError + + _sqlite3.register_adapter(int, cast) + try: + cur = con.cursor() + cur.execute("select ?", (4,)) + val = cur.fetchone()[0] + # Adapter error is ignored, and parameter is passed as is. + assert val == 4 + assert type(val) is int + finally: + del _sqlite3.adapters[(int, _sqlite3.PrepareProtocol)] From noreply at buildbot.pypy.org Tue Aug 27 11:06:44 2013 From: noreply at buildbot.pypy.org (fijal) Date: Tue, 27 Aug 2013 11:06:44 +0200 (CEST) Subject: [pypy-commit] pypy jit-threshold-hooks: another hint to not trace inside something Message-ID: <20130827090644.2A5401C01F5@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: jit-threshold-hooks Changeset: r66356:055e0f71be5c Date: 2013-08-27 10:06 +0100 http://bitbucket.org/pypy/pypy/changeset/055e0f71be5c/ Log: another hint to not trace inside something diff --git a/pypy/module/pypyjit/__init__.py b/pypy/module/pypyjit/__init__.py --- a/pypy/module/pypyjit/__init__.py +++ b/pypy/module/pypyjit/__init__.py @@ -19,6 +19,7 @@ 'Box': 'interp_resop.WrappedBox', 'PARAMETER_DOCS': 'space.wrap(rpython.rlib.jit.PARAMETER_DOCS)', 'set_local_threshold': 'interp_jit.set_local_threshold', + 'dont_trace_inside': 'interp_jit.dont_trace_inside', } def setup_after_space_initialization(self): diff --git a/pypy/module/pypyjit/interp_jit.py b/pypy/module/pypyjit/interp_jit.py --- a/pypy/module/pypyjit/interp_jit.py +++ b/pypy/module/pypyjit/interp_jit.py @@ -168,6 +168,15 @@ the JIT follow the call.''' return space.call_args(w_callable, __args__) +def _jitcell_at(w_code, pos): + try: + jitcell = w_code.jit_cells[pos << 1] + except KeyError: + ref = jit_hooks.new_jitcell() + jitcell = cast_base_ptr_to_instance(BaseJitCell, ref) + w_code.jit_cells[pos << 1] = jitcell + return jitcell + @jit.dont_look_inside @unwrap_spec(w_code=PyCode, pos=r_uint, value=int) def set_local_threshold(space, w_code, pos, value): @@ -176,10 +185,19 @@ For testing. Set the threshold for this code object at position pos at value given. """ - try: - jitcell = w_code.jit_cells[pos << 1] - except KeyError: - ref = jit_hooks.new_jitcell() - jitcell = cast_base_ptr_to_instance(BaseJitCell, ref) - w_code.jit_cells[pos << 1] = jitcell + jitcell = _jitcell_at(w_code, pos) jitcell.counter = value + + at jit.dont_look_inside + at unwrap_spec(w_code=PyCode) +def dont_trace_inside(space, w_code): + """ dont trace inside this function + """ + from rpython.rlib.nonconst import NonConstant + + flag = True + if NonConstant(0): + flag = False # annotation hack to annotate it as real bool + jitcell = _jitcell_at(w_code, 0) + jitcell.dont_trace_here = flag + diff --git a/pypy/module/pypyjit/test/test_jit_hook.py b/pypy/module/pypyjit/test/test_jit_hook.py --- a/pypy/module/pypyjit/test/test_jit_hook.py +++ b/pypy/module/pypyjit/test/test_jit_hook.py @@ -279,4 +279,5 @@ pass pypyjit.set_local_threshold(f.__code__, 0, 0) + pypyjit.dont_trace_inside(f.__code__) # assert did not crash diff --git a/rpython/rlib/jit.py b/rpython/rlib/jit.py --- a/rpython/rlib/jit.py +++ b/rpython/rlib/jit.py @@ -682,7 +682,7 @@ # Annotation and rtyping of some of the JitDriver methods class BaseJitCell(object): - __slots__ = ('counter') + __slots__ = ('counter', 'dont_trace_here') class ExtEnterLeaveMarker(ExtRegistryEntry): From noreply at buildbot.pypy.org Tue Aug 27 11:18:03 2013 From: noreply at buildbot.pypy.org (fijal) Date: Tue, 27 Aug 2013 11:18:03 +0200 (CEST) Subject: [pypy-commit] pypy jit-threshold-hooks: fix types Message-ID: <20130827091803.EC6321C029A@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: jit-threshold-hooks Changeset: r66357:3dcc97a308e2 Date: 2013-08-27 10:17 +0100 http://bitbucket.org/pypy/pypy/changeset/3dcc97a308e2/ Log: fix types diff --git a/pypy/module/pypyjit/interp_jit.py b/pypy/module/pypyjit/interp_jit.py --- a/pypy/module/pypyjit/interp_jit.py +++ b/pypy/module/pypyjit/interp_jit.py @@ -198,6 +198,6 @@ flag = True if NonConstant(0): flag = False # annotation hack to annotate it as real bool - jitcell = _jitcell_at(w_code, 0) + jitcell = _jitcell_at(w_code, r_uint(0)) jitcell.dont_trace_here = flag From noreply at buildbot.pypy.org Tue Aug 27 11:37:20 2013 From: noreply at buildbot.pypy.org (cfbolz) Date: Tue, 27 Aug 2013 11:37:20 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: (all) planning for today Message-ID: <20130827093720.076BA1C01A6@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: extradoc Changeset: r5029:2aff8cf3bde9 Date: 2013-08-27 10:37 +0100 http://bitbucket.org/pypy/extradoc/changeset/2aff8cf3bde9/ Log: (all) planning for today diff --git a/sprintinfo/london-2013/planning.txt b/sprintinfo/london-2013/planning.txt --- a/sprintinfo/london-2013/planning.txt +++ b/sprintinfo/london-2013/planning.txt @@ -2,28 +2,25 @@ --------------- Carl Friedrich -Laurie -Remy +Anto +Remi Marko -Olmo Romain -Manuel -Rami Lukas Richard -Ronan +Richard2 Armin -Tom Edd Maciej People not present ------------------- -Anto - - - +Manuel +Rami +Olmo +Tom +Laurie Tasks ----- @@ -41,39 +38,50 @@ threads or greenlets, probably by adding a thread-or-greenlet number prefix (see branch stmgc-c4 where we already add a thread num prefix) -* general STM things (Remy, Armin) +* general STM things (Remy, Armin) GENERAL PROGRESS -* general Numpy things (Marko, Romain) +* general Numpy things (Romain) GENERAL PROGRESS -* fix some of the RPython nits that Edd found (Ronan, Edd) +* PyOpenCL (Marko) -* continue less-stringly-ops +* fix some of the RPython nits that Edd found SOME PROGRESS -* better error messages (Ronan, Edd) +* continue less-stringly-ops and other RPython cleanups (Ronan, Romain) + +* better error messages for union errors (Edd, Ronan) TO BE PUSHED + +* better error messages for moving attributes * programming -* JIT for dont-know-yet (Richard, Lukas) +* JIT for xlispx (Richard, Lukas, Carl Friedrich around) IN PROGRESS -* explore Laurie's crazy scheme of persisting loop entry counts (Maciej, Laurie) +* explore Laurie's crazy scheme of persisting loop entry counts (Maciej, Anto) IN PROGRESS * progress on the documentation branch (Olmo, Manuel) +* Python 3 benchmarks (Richard2) SOME PROGRESS + +* meditate on benchmarking infrastructure (Richard2, Edd, Maciej around) + * shave all the yaks -* find the slow generator task (Marko, Romain) +* find the slow generator task (Marko, Romain) INVALID * general wizardry (Carl Friedrich; Armin) -* general getting started (Rami, Carl Friedrich) +* general getting started (Rami, Carl Friedrich) GOT STARTED Discussions planned -------------------- * demo session Tuesday +* scientific computing roadmap TODAY, Maciek leads discussion * STM dissemination round * JIT optimizer mess * roadmap planning -* scientific computing roadmap -* LuaJIT discussion TODAY (Tom, Armin, Maciek, Carl Friedrich, Laurie) +* do we want pip installed on downloadable pypys? +* generalize jitviewer to other languages + +* LuaJIT discussion DONE (Tom, Armin, Maciek, Carl Friedrich, Laurie) From noreply at buildbot.pypy.org Tue Aug 27 12:50:31 2013 From: noreply at buildbot.pypy.org (fijal) Date: Tue, 27 Aug 2013 12:50:31 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: blog post about numpy Message-ID: <20130827105031.DB7EE1C1447@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: extradoc Changeset: r5030:cc9ac5553e9e Date: 2013-08-27 11:18 +0100 http://bitbucket.org/pypy/extradoc/changeset/cc9ac5553e9e/ Log: blog post about numpy diff --git a/blog/draft/numpy-road-forward.rst b/blog/draft/numpy-road-forward.rst new file mode 100644 --- /dev/null +++ b/blog/draft/numpy-road-forward.rst @@ -0,0 +1,40 @@ + +Hello everyone. + +This is the roadmap for numpy effort in PyPy as discussed on the London sprint. +First, the highest on our priority list is to finish the low-level part +of the numpy module. What +we'll do is to finish the RPython part of numpy and provide a pip installable +numpypy repository that includes the pure python part of Numpy. This would +contain the original Numpy with a few minor changes. + +Second, we need to work on the JIT support that will make NumPy on PyPy +faster. In detail: + +* reenable the lazy loop evaluation + +* optimize bridges, which is depending on optimizer refactorings + +* SSE support + +On the compatibility front, there were some independent attempts into +making the following stuff working: + +* f2py + +* C API (in fact, PyArray\_* API is partly present in the nightly builds of + PyPy) + +* matplotlib (both using PyArray\_* API and embedding CPython runtime in PyPy) + +* scipy + +In order to make all of the above happen faster, it would be helpful to raise +more funds. You can donate to `PyPy's NumPy project`_ on our website. Note +that PyPy is a member of SFC which is a 501(c)(3) US non-profit, so donations +from US companies can be tax-deducted. + +Cheers, +fijal, arigo, ronan, rguillebert, anto and others + +.. _`PyPy's NumPy project`: http://pypy.org/numpydonate.html From noreply at buildbot.pypy.org Tue Aug 27 12:50:33 2013 From: noreply at buildbot.pypy.org (fijal) Date: Tue, 27 Aug 2013 12:50:33 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: merge Message-ID: <20130827105033.27D2D1C1449@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: extradoc Changeset: r5031:8d4b1136211a Date: 2013-08-27 11:50 +0100 http://bitbucket.org/pypy/extradoc/changeset/8d4b1136211a/ Log: merge diff --git a/sprintinfo/london-2013/planning.txt b/sprintinfo/london-2013/planning.txt --- a/sprintinfo/london-2013/planning.txt +++ b/sprintinfo/london-2013/planning.txt @@ -2,28 +2,25 @@ --------------- Carl Friedrich -Laurie -Remy +Anto +Remi Marko -Olmo Romain -Manuel -Rami Lukas Richard -Ronan +Richard2 Armin -Tom Edd Maciej People not present ------------------- -Anto - - - +Manuel +Rami +Olmo +Tom +Laurie Tasks ----- @@ -41,39 +38,50 @@ threads or greenlets, probably by adding a thread-or-greenlet number prefix (see branch stmgc-c4 where we already add a thread num prefix) -* general STM things (Remy, Armin) +* general STM things (Remy, Armin) GENERAL PROGRESS -* general Numpy things (Marko, Romain) +* general Numpy things (Romain) GENERAL PROGRESS -* fix some of the RPython nits that Edd found (Ronan, Edd) +* PyOpenCL (Marko) -* continue less-stringly-ops +* fix some of the RPython nits that Edd found SOME PROGRESS -* better error messages (Ronan, Edd) +* continue less-stringly-ops and other RPython cleanups (Ronan, Romain) + +* better error messages for union errors (Edd, Ronan) TO BE PUSHED + +* better error messages for moving attributes * programming -* JIT for dont-know-yet (Richard, Lukas) +* JIT for xlispx (Richard, Lukas, Carl Friedrich around) IN PROGRESS -* explore Laurie's crazy scheme of persisting loop entry counts (Maciej, Laurie) +* explore Laurie's crazy scheme of persisting loop entry counts (Maciej, Anto) IN PROGRESS * progress on the documentation branch (Olmo, Manuel) +* Python 3 benchmarks (Richard2) SOME PROGRESS + +* meditate on benchmarking infrastructure (Richard2, Edd, Maciej around) + * shave all the yaks -* find the slow generator task (Marko, Romain) +* find the slow generator task (Marko, Romain) INVALID * general wizardry (Carl Friedrich; Armin) -* general getting started (Rami, Carl Friedrich) +* general getting started (Rami, Carl Friedrich) GOT STARTED Discussions planned -------------------- * demo session Tuesday +* scientific computing roadmap TODAY, Maciek leads discussion * STM dissemination round * JIT optimizer mess * roadmap planning -* scientific computing roadmap -* LuaJIT discussion TODAY (Tom, Armin, Maciek, Carl Friedrich, Laurie) +* do we want pip installed on downloadable pypys? +* generalize jitviewer to other languages + +* LuaJIT discussion DONE (Tom, Armin, Maciek, Carl Friedrich, Laurie) From noreply at buildbot.pypy.org Tue Aug 27 13:06:28 2013 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Tue, 27 Aug 2013 13:06:28 +0200 (CEST) Subject: [pypy-commit] pypy py3k-memoryview: hg merge py3k Message-ID: <20130827110628.D54901C01A6@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: py3k-memoryview Changeset: r66358:ef1b47716e8c Date: 2013-08-27 12:07 +0100 http://bitbucket.org/pypy/pypy/changeset/ef1b47716e8c/ Log: hg merge py3k diff too long, truncating to 2000 out of 13346 lines diff --git a/dotviewer/drawgraph.py b/dotviewer/drawgraph.py --- a/dotviewer/drawgraph.py +++ b/dotviewer/drawgraph.py @@ -22,6 +22,7 @@ 'yellow': (255,255,0), } re_nonword=re.compile(r'([^0-9a-zA-Z_.]+)') +re_linewidth=re.compile(r'setlinewidth\((\d+(\.\d*)?|\.\d+)\)') def combine(color1, color2, alpha): r1, g1, b1 = color1 @@ -138,6 +139,13 @@ self.yl = float(yl) rest = rest[3:] self.style, self.color = rest + linematch = re_linewidth.match(self.style) + if linematch: + num = linematch.group(1) + self.linewidth = int(round(float(num))) + self.style = self.style[linematch.end(0):] + else: + self.linewidth = 1 self.highlight = False self.cachedbezierpoints = None self.cachedarrowhead = None @@ -520,8 +528,8 @@ fgcolor = highlight_color(fgcolor) points = [self.map(*xy) for xy in edge.bezierpoints()] - def drawedgebody(points=points, fgcolor=fgcolor): - pygame.draw.lines(self.screen, fgcolor, False, points) + def drawedgebody(points=points, fgcolor=fgcolor, width=edge.linewidth): + pygame.draw.lines(self.screen, fgcolor, False, points, width) edgebodycmd.append(drawedgebody) points = [self.map(*xy) for xy in edge.arrowhead()] diff --git a/dotviewer/graphparse.py b/dotviewer/graphparse.py --- a/dotviewer/graphparse.py +++ b/dotviewer/graphparse.py @@ -152,7 +152,8 @@ try: plaincontent = dot2plain_graphviz(content, contenttype) except PlainParseError, e: - print e - # failed, retry via codespeak - plaincontent = dot2plain_codespeak(content, contenttype) + raise + ##print e + ### failed, retry via codespeak + ##plaincontent = dot2plain_codespeak(content, contenttype) return list(parse_plain(graph_id, plaincontent, links, fixedfont)) diff --git a/dotviewer/test/test_interactive.py b/dotviewer/test/test_interactive.py --- a/dotviewer/test/test_interactive.py +++ b/dotviewer/test/test_interactive.py @@ -34,6 +34,23 @@ } ''' +SOURCE2=r'''digraph f { + a; d; e; f; g; h; i; j; k; l; + a -> d [penwidth=1, style="setlinewidth(1)"]; + d -> e [penwidth=2, style="setlinewidth(2)"]; + e -> f [penwidth=4, style="setlinewidth(4)"]; + f -> g [penwidth=8, style="setlinewidth(8)"]; + g -> h [penwidth=16, style="setlinewidth(16)"]; + h -> i [penwidth=32, style="setlinewidth(32)"]; + i -> j [penwidth=64, style="setlinewidth(64)"]; + j -> k [penwidth=128, style="setlinewidth(128)"]; + k -> l [penwidth=256, style="setlinewidth(256)"]; +}''' + + + + + def setup_module(mod): if not option.pygame: py.test.skip("--pygame not enabled") @@ -161,3 +178,10 @@ page = MyPage(str(dotfile)) page.fixedfont = True graphclient.display_page(page) + +def test_linewidth(): + udir.join("graph2.dot").write(SOURCE2) + from dotviewer import graphpage, graphclient + dotfile = udir.join('graph2.dot') + page = graphpage.DotFileGraphPage(str(dotfile)) + graphclient.display_page(page) diff --git a/lib-python/2.7/socket.py b/lib-python/2.7/socket.py --- a/lib-python/2.7/socket.py +++ b/lib-python/2.7/socket.py @@ -165,6 +165,8 @@ # All _delegate_methods must also be initialized here. send = recv = recv_into = sendto = recvfrom = recvfrom_into = _dummy __getattr__ = _dummy + def _drop(self): + pass # Wrapper around platform socket objects. This implements # a platform-independent dup() functionality. The @@ -179,12 +181,21 @@ def __init__(self, family=AF_INET, type=SOCK_STREAM, proto=0, _sock=None): if _sock is None: _sock = _realsocket(family, type, proto) - elif _type(_sock) is _realsocket: + else: + # PyPy note about refcounting: implemented with _reuse()/_drop() + # on the class '_socket.socket'. Python 3 did it differently + # with a reference counter on this class 'socket._socketobject' + # instead, but it is a less compatible change. + + # Note that a few libraries (like eventlet) poke at the + # private implementation of socket.py, passing custom + # objects to _socketobject(). These libraries need the + # following fix for use on PyPy: the custom objects need + # methods _reuse() and _drop() that maintains an explicit + # reference counter, starting at 0. When it drops back to + # zero, close() must be called. _sock._reuse() - # PyPy note about refcounting: implemented with _reuse()/_drop() - # on the class '_socket.socket'. Python 3 did it differently - # with a reference counter on this class 'socket._socketobject' - # instead, but it is a less compatible change (breaks eventlet). + self._sock = _sock def send(self, data, flags=0): @@ -216,9 +227,8 @@ def close(self): s = self._sock - if type(s) is _realsocket: - s._drop() self._sock = _closedsocket() + s._drop() close.__doc__ = _realsocket.close.__doc__ def accept(self): @@ -280,8 +290,14 @@ "_close"] def __init__(self, sock, mode='rb', bufsize=-1, close=False): - if type(sock) is _realsocket: - sock._reuse() + # Note that a few libraries (like eventlet) poke at the + # private implementation of socket.py, passing custom + # objects to _fileobject(). These libraries need the + # following fix for use on PyPy: the custom objects need + # methods _reuse() and _drop() that maintains an explicit + # reference counter, starting at 0. When it drops back to + # zero, close() must be called. + sock._reuse() self._sock = sock self.mode = mode # Not actually used in this version if bufsize < 0: @@ -317,11 +333,11 @@ self.flush() finally: s = self._sock - if type(s) is _realsocket: + self._sock = None + if s is not None: s._drop() - if self._close: - self._sock.close() - self._sock = None + if self._close: + s.close() def __del__(self): try: diff --git a/lib-python/2.7/ssl.py b/lib-python/2.7/ssl.py --- a/lib-python/2.7/ssl.py +++ b/lib-python/2.7/ssl.py @@ -110,6 +110,12 @@ suppress_ragged_eofs=True, ciphers=None): socket.__init__(self, _sock=sock._sock) + # "close" the original socket: it is not usable any more. + # this only calls _drop(), which should not actually call + # the operating system's close() because the reference + # counter is greater than 1 (we hold one too). + sock.close() + if ciphers is None and ssl_version != _SSLv2_IF_EXISTS: ciphers = _DEFAULT_CIPHERS @@ -352,11 +358,19 @@ works with the SSL connection. Just use the code from the socket module.""" - self._makefile_refs += 1 # close=True so as to decrement the reference count when done with # the file-like object. return _fileobject(self, mode, bufsize, close=True) + def _reuse(self): + self._makefile_refs += 1 + + def _drop(self): + if self._makefile_refs < 1: + self.close() + else: + self._makefile_refs -= 1 + def wrap_socket(sock, keyfile=None, certfile=None, diff --git a/lib-python/2.7/test/test_socket.py b/lib-python/2.7/test/test_socket.py --- a/lib-python/2.7/test/test_socket.py +++ b/lib-python/2.7/test/test_socket.py @@ -1066,6 +1066,9 @@ def recv(self, size): return self._recv_step.next()() + def _reuse(self): pass + def _drop(self): pass + @staticmethod def _raise_eintr(): raise socket.error(errno.EINTR) @@ -1321,7 +1324,8 @@ closed = False def flush(self): pass def close(self): self.closed = True - def _decref_socketios(self): pass + def _reuse(self): pass + def _drop(self): pass # must not close unless we request it: the original use of _fileobject # by module socket requires that the underlying socket not be closed until diff --git a/lib-python/2.7/test/test_urllib2.py b/lib-python/2.7/test/test_urllib2.py --- a/lib-python/2.7/test/test_urllib2.py +++ b/lib-python/2.7/test/test_urllib2.py @@ -270,6 +270,8 @@ self.reason = reason def read(self): return '' + def _reuse(self): pass + def _drop(self): pass class MockHTTPClass: def __init__(self): diff --git a/lib-python/2.7/urllib2.py b/lib-python/2.7/urllib2.py --- a/lib-python/2.7/urllib2.py +++ b/lib-python/2.7/urllib2.py @@ -1193,6 +1193,8 @@ # out of socket._fileobject() and into a base class. r.recv = r.read + r._reuse = lambda: None + r._drop = lambda: None fp = socket._fileobject(r, close=True) resp = addinfourl(fp, r.msg, req.get_full_url()) diff --git a/lib-python/3/site.py b/lib-python/3/site.py --- a/lib-python/3/site.py +++ b/lib-python/3/site.py @@ -57,6 +57,8 @@ import builtins import traceback +is_pypy = '__pypy__' in sys.builtin_module_names + # Prefixes for site-packages; add additional prefixes like /usr/local here PREFIXES = [sys.prefix, sys.exec_prefix] # Enable per user site-packages directory @@ -284,6 +286,10 @@ if sys.platform in ('os2emx', 'riscos'): sitepackages.append(os.path.join(prefix, "Lib", "site-packages")) + elif is_pypy: + from distutils.sysconfig import get_python_lib + sitepackages.append(get_python_lib(standard_lib=False, + prefix=prefix)) elif os.sep == '/': sitepackages.append(os.path.join(prefix, "lib", "python" + sys.version[:3], @@ -427,20 +433,27 @@ def setcopyright(): """Set 'copyright' and 'credits' in builtins""" + licenseargs = None + if is_pypy: + credits = "PyPy is maintained by the PyPy developers: http://pypy.org/" + license = "See https://bitbucket.org/pypy/pypy/src/default/LICENSE" + licenseargs = (license,) + elif sys.platform[:4] == 'java': + credits = ("Jython is maintained by the Jython developers " + "(www.jython.org).") + else: + credits = """\ + Thanks to CWI, CNRI, BeOpen.com, Zope Corporation and a cast of thousands + for supporting Python development. See www.python.org for more information.""" + builtins.copyright = _Printer("copyright", sys.copyright) - if sys.platform[:4] == 'java': - builtins.credits = _Printer( - "credits", - "Jython is maintained by the Jython developers (www.jython.org).") - else: - builtins.credits = _Printer("credits", """\ - Thanks to CWI, CNRI, BeOpen.com, Zope Corporation and a cast of thousands - for supporting Python development. See www.python.org for more information.""") - here = os.path.dirname(os.__file__) - builtins.license = _Printer( - "license", "See http://www.python.org/%.3s/license.html" % sys.version, - ["LICENSE.txt", "LICENSE"], - [os.path.join(here, os.pardir), here, os.curdir]) + builtins.credits = _Printer("credits", credits) + if licenseargs is None: + here = os.path.dirname(os.__file__) + license = "See http://www.python.org/%.3s/license.html" % sys.version + licenseargs = (license, ["LICENSE.txt", "LICENSE"], + [os.path.join(here, os.pardir), here, os.curdir]) + builtins.license = _Printer("license", *licenseargs) class _Helper(object): diff --git a/lib-python/3/test/test_site.py b/lib-python/3/test/test_site.py --- a/lib-python/3/test/test_site.py +++ b/lib-python/3/test/test_site.py @@ -223,6 +223,10 @@ self.assertEqual(len(dirs), 1) wanted = os.path.join('xoxo', 'Lib', 'site-packages') self.assertEqual(dirs[0], wanted) + elif '__pypy__' in sys.builtin_module_names: + self.assertEquals(len(dirs), 1) + wanted = os.path.join('xoxo', 'site-packages') + self.assertEquals(dirs[0], wanted) elif (sys.platform == "darwin" and sysconfig.get_config_var("PYTHONFRAMEWORK")): # OS X framework builds diff --git a/lib_pypy/_curses.py b/lib_pypy/_curses.py --- a/lib_pypy/_curses.py +++ b/lib_pypy/_curses.py @@ -966,7 +966,7 @@ r, g, b = ffi.new("short *"), ffi.new("short *"), ffi.new("short *") if lib.color_content(color, r, g, b) == lib.ERR: raise error("Argument 1 was out of range. Check value of COLORS.") - return (r, g, b) + return (r[0], g[0], b[0]) def color_pair(n): @@ -1121,6 +1121,7 @@ term = ffi.NULL err = ffi.new("int *") if lib.setupterm(term, fd, err) == lib.ERR: + err = err[0] if err == 0: raise error("setupterm: could not find terminal") elif err == -1: diff --git a/lib_pypy/cffi.egg-info b/lib_pypy/cffi.egg-info --- a/lib_pypy/cffi.egg-info +++ b/lib_pypy/cffi.egg-info @@ -1,6 +1,6 @@ Metadata-Version: 1.0 Name: cffi -Version: 0.6 +Version: 0.7 Summary: Foreign Function Interface for Python calling C code. Home-page: http://cffi.readthedocs.org Author: Armin Rigo, Maciej Fijalkowski diff --git a/lib_pypy/readline.egg-info b/lib_pypy/readline.egg-info new file mode 100644 --- /dev/null +++ b/lib_pypy/readline.egg-info @@ -0,0 +1,10 @@ +Metadata-Version: 1.0 +Name: readline +Version: 6.2.4.1 +Summary: Hack to make "pip install readline" happy and do nothing +Home-page: UNKNOWN +Author: UNKNOWN +Author-email: UNKNOWN +License: UNKNOWN +Description: UNKNOWN +Platform: UNKNOWN diff --git a/pypy/doc/_ref.txt b/pypy/doc/_ref.txt --- a/pypy/doc/_ref.txt +++ b/pypy/doc/_ref.txt @@ -98,8 +98,6 @@ .. _`rpython/rtyper/memory/gc/hybrid.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/memory/gc/hybrid.py .. _`rpython/rtyper/memory/gc/minimarkpage.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/memory/gc/minimarkpage.py .. _`rpython/rtyper/memory/gc/semispace.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/memory/gc/semispace.py -.. _`rpython/rtyper/ootypesystem/`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/ootypesystem/ -.. _`rpython/rtyper/ootypesystem/ootype.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/ootypesystem/ootype.py .. _`rpython/rtyper/rint.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/rint.py .. _`rpython/rtyper/rlist.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/rlist.py .. _`rpython/rtyper/rmodel.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/rmodel.py diff --git a/pypy/doc/coding-guide.rst b/pypy/doc/coding-guide.rst --- a/pypy/doc/coding-guide.rst +++ b/pypy/doc/coding-guide.rst @@ -339,9 +339,10 @@ + methods and other class attributes do not change after startup + single inheritance is fully supported -+ simple mixins somewhat work too, but the mixed in class needs a - ``_mixin_ = True`` class attribute. isinstance checks against the - mixin type will fail when translated. ++ use `rpython.rlib.objectmodel.import_from_mixin(M)` in a class + body to copy the whole content of a class `M`. This can be used + to implement mixins: functions and staticmethods are duplicated + (the other class attributes are just copied unmodified). + classes are first-class objects too diff --git a/pypy/doc/conf.py b/pypy/doc/conf.py --- a/pypy/doc/conf.py +++ b/pypy/doc/conf.py @@ -45,9 +45,9 @@ # built documents. # # The short X.Y version. -version = '2.0' +version = '2.1' # The full version, including alpha/beta/rc tags. -release = '2.0.2' +release = '2.1.0' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/pypy/doc/cppyy.rst b/pypy/doc/cppyy.rst --- a/pypy/doc/cppyy.rst +++ b/pypy/doc/cppyy.rst @@ -83,7 +83,7 @@ the selection of scientific software) will also work for a build with the builtin backend. -.. _`download`: http://cern.ch/wlav/reflex-2013-04-23.tar.bz2 +.. _`download`: http://cern.ch/wlav/reflex-2013-08-14.tar.bz2 .. _`ROOT`: http://root.cern.ch/ Besides Reflex, you probably need a version of `gccxml`_ installed, which is @@ -98,8 +98,8 @@ To install the standalone version of Reflex, after download:: - $ tar jxf reflex-2013-04-23.tar.bz2 - $ cd reflex-2013-04-23 + $ tar jxf reflex-2013-08-14.tar.bz2 + $ cd reflex-2013-08-14 $ ./build/autogen $ ./configure $ make && make install diff --git a/pypy/doc/getting-started-python.rst b/pypy/doc/getting-started-python.rst --- a/pypy/doc/getting-started-python.rst +++ b/pypy/doc/getting-started-python.rst @@ -57,6 +57,12 @@ zlib-devel bzip2-devel ncurses-devel expat-devel \ openssl-devel gc-devel python-sphinx python-greenlet + On SLES11: + + $ sudo zypper install gcc make python-devel pkg-config \ + zlib-devel libopenssl-devel libbz2-devel sqlite3-devel \ + libexpat-devel libffi-devel python-curses + The above command lines are split with continuation characters, giving the necessary dependencies first, then the optional ones. * ``pkg-config`` (to help us locate libffi files) @@ -104,8 +110,8 @@ executable. The executable behaves mostly like a normal Python interpreter:: $ ./pypy-c - Python 2.7.3 (7e4f0faa3d51, Nov 22 2012, 10:35:18) - [PyPy 2.0.0 with GCC 4.7.1] on linux2 + Python 2.7.3 (480845e6b1dd, Jul 31 2013, 11:05:31) + [PyPy 2.1.0 with GCC 4.7.1] on linux2 Type "help", "copyright", "credits" or "license" for more information. And now for something completely different: ``RPython magically makes you rich and famous (says so on the tin)'' @@ -171,7 +177,7 @@ the ``bin/pypy`` executable. To install PyPy system wide on unix-like systems, it is recommended to put the -whole hierarchy alone (e.g. in ``/opt/pypy2.0``) and put a symlink to the +whole hierarchy alone (e.g. in ``/opt/pypy2.1``) and put a symlink to the ``pypy`` executable into ``/usr/bin`` or ``/usr/local/bin`` If the executable fails to find suitable libraries, it will report diff --git a/pypy/doc/getting-started.rst b/pypy/doc/getting-started.rst --- a/pypy/doc/getting-started.rst +++ b/pypy/doc/getting-started.rst @@ -53,10 +53,10 @@ PyPy is ready to be executed as soon as you unpack the tarball or the zip file, with no need to install it in any specific location:: - $ tar xf pypy-2.0.tar.bz2 - $ ./pypy-2.0/bin/pypy - Python 2.7.3 (7e4f0faa3d51, Nov 22 2012, 10:35:18) - [PyPy 2.0.0 with GCC 4.7.1] on linux2 + $ tar xf pypy-2.1.tar.bz2 + $ ./pypy-2.1/bin/pypy + Python 2.7.3 (480845e6b1dd, Jul 31 2013, 11:05:31) + [PyPy 2.1.0 with GCC 4.4.3] on linux2 Type "help", "copyright", "credits" or "license" for more information. And now for something completely different: ``PyPy is an exciting technology that lets you to write fast, portable, multi-platform interpreters with less @@ -75,14 +75,14 @@ $ curl -O https://raw.github.com/pypa/pip/master/contrib/get-pip.py - $ ./pypy-2.0/bin/pypy distribute_setup.py + $ ./pypy-2.1/bin/pypy distribute_setup.py - $ ./pypy-2.0/bin/pypy get-pip.py + $ ./pypy-2.1/bin/pypy get-pip.py - $ ./pypy-2.0/bin/pip install pygments # for example + $ ./pypy-2.1/bin/pip install pygments # for example -3rd party libraries will be installed in ``pypy-2.0/site-packages``, and -the scripts in ``pypy-2.0/bin``. +3rd party libraries will be installed in ``pypy-2.1/site-packages``, and +the scripts in ``pypy-2.1/bin``. Installing using virtualenv --------------------------- diff --git a/pypy/doc/index.rst b/pypy/doc/index.rst --- a/pypy/doc/index.rst +++ b/pypy/doc/index.rst @@ -40,7 +40,7 @@ * `FAQ`_: some frequently asked questions. -* `Release 2.0.2`_: the latest official release +* `Release 2.1.0`_: the latest official release * `PyPy Blog`_: news and status info about PyPy @@ -110,7 +110,7 @@ .. _`Getting Started`: getting-started.html .. _`Papers`: extradoc.html .. _`Videos`: video-index.html -.. _`Release 2.0.2`: http://pypy.org/download.html +.. _`Release 2.1.0`: http://pypy.org/download.html .. _`speed.pypy.org`: http://speed.pypy.org .. _`RPython toolchain`: translation.html .. _`potential project ideas`: project-ideas.html diff --git a/pypy/doc/jit/index.rst b/pypy/doc/jit/index.rst --- a/pypy/doc/jit/index.rst +++ b/pypy/doc/jit/index.rst @@ -23,7 +23,10 @@ - Hooks_ debugging facilities available to a python programmer +- Virtualizable_ how virtualizables work and what they are (in other words how + to make frames more efficient). .. _Overview: overview.html .. _Notes: pyjitpl5.html .. _Hooks: ../jit-hooks.html +.. _Virtualizable: virtualizable.html diff --git a/pypy/doc/jit/virtualizable.rst b/pypy/doc/jit/virtualizable.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/jit/virtualizable.rst @@ -0,0 +1,60 @@ + +Virtualizables +============== + +**Note:** this document does not have a proper introduction as to how +to understand the basics. We should write some. If you happen to be here +and you're missing context, feel free to pester us on IRC. + +Problem description +------------------- + +The JIT is very good at making sure some objects are never allocated if they +don't escape from the trace. Such objects are called ``virtuals``. However, +if we're dealing with frames, virtuals are often not good enough. Frames +can escape and they can also be allocated already at the moment we enter the +JIT. In such cases we need some extra object that can still be optimized away, +despite existing on the heap. + +Solution +-------- + +We introduce virtualizables. They're objects that exist on the heap, but their +fields are not always in sync with whatever happens in the assembler. One +example is that virtualizable fields can store virtual objects without +forcing them. This is very useful for frames. Declaring an object to be +virtualizable works like this: + + class Frame(object): + _virtualizable_ = ['locals[*]', 'stackdepth'] + +And we use them in ``JitDriver`` like this:: + + jitdriver = JitDriver(greens=[], reds=['frame'], virtualizables=['frame']) + +This declaration means that ``stackdepth`` is a virtualizable **field**, while +``locals`` is a virtualizable **array** (a list stored on a virtualizable). +There are various rules about using virtualizables, especially using +virtualizable arrays that can be very confusing. Those will usually end +up with a compile-time error (as opposed to strange behavior). The rules are: + +* Each array access must be with a known positive index that cannot raise + an ``IndexError``. Using ``no = jit.hint(no, promote=True)`` might be useful + to get a constant-number access. This is only safe if the index is actually + constant or changing rarely within the context of the user's code. + +* If you initialize a new virtualizable in the JIT, it has to be done like this + (for example if we're in ``Frame.__init__``):: + + self = hint(self, access_directly=True, fresh_virtualizable=True) + + that way you can populate the fields directly. + +* If you use virtualizable outside of the JIT – it's very expensive and + sometimes aborts tracing. Consider it carefully as to how do it only for + debugging purposes and not every time (e.g. ``sys._getframe`` call). + +* If you have something equivalent of a Python generator, where the + virtualizable survives for longer, you want to force it before returning. + It's better to do it that way than by an external call some time later. + It's done using ``jit.hint(frame, force_virtualizable=True)`` diff --git a/pypy/doc/release-2.1.0.rst b/pypy/doc/release-2.1.0.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/release-2.1.0.rst @@ -0,0 +1,89 @@ +============================ +PyPy 2.1 - Considered ARMful +============================ + +We're pleased to announce PyPy 2.1, which targets version 2.7.3 of the Python +language. This is the first release with official support for ARM processors in the JIT. +This release also contains several bugfixes and performance improvements. + +You can download the PyPy 2.1 release here: + + http://pypy.org/download.html + +We would like to thank the `Raspberry Pi Foundation`_ for supporting the work +to finish PyPy's ARM support. + +.. _`Raspberry Pi Foundation`: http://www.raspberrypi.org + +The first beta of PyPy3 2.1, targeting version 3 of the Python language, was +just released, more details can be found `here`_. + +.. _`here`: http://morepypy.blogspot.com/2013/07/pypy3-21-beta-1.html + +What is PyPy? +============= + +PyPy is a very compliant Python interpreter, almost a drop-in replacement for +CPython 2.7. It's fast (`pypy 2.1 and cpython 2.7.2`_ performance comparison) +due to its integrated tracing JIT compiler. + +This release supports x86 machines running Linux 32/64, Mac OS X 64 or Windows +32. This release also supports ARM machines running Linux 32bit - anything with +``ARMv6`` (like the Raspberry Pi) or ``ARMv7`` (like the Beagleboard, +Chromebook, Cubieboard, etc.) that supports ``VFPv3`` should work. Both +hard-float ``armhf/gnueabihf`` and soft-float ``armel/gnueabi`` builds are +provided. ``armhf`` builds for Raspbian are created using the Raspberry Pi +`custom cross-compilation toolchain `_ +based on ``gcc-arm-linux-gnueabihf`` and should work on ``ARMv6`` and +``ARMv7`` devices running Debian or Raspbian. ``armel`` builds are built +using the ``gcc-arm-linux-gnuebi`` toolchain provided by Ubuntu and +currently target ``ARMv7``. + +Windows 64 work is still stalling, we would welcome a volunteer +to handle that. + +.. _`pypy 2.1 and cpython 2.7.2`: http://speed.pypy.org + +Highlights +========== + +* JIT support for ARM, architecture versions 6 and 7, hard- and soft-float ABI + +* Stacklet support for ARM + +* Support for os.statvfs and os.fstatvfs on unix systems + +* Improved logging performance + +* Faster sets for objects + +* Interpreter improvements + +* During packaging, compile the CFFI based TK extension + +* Pickling of numpy arrays and dtypes + +* Subarrays for numpy + +* Bugfixes to numpy + +* Bugfixes to cffi and ctypes + +* Bugfixes to the x86 stacklet support + +* Fixed issue `1533`_: fix an RPython-level OverflowError for space.float_w(w_big_long_number). + +* Fixed issue `1552`_: GreenletExit should inherit from BaseException. + +* Fixed issue `1537`_: numpypy __array_interface__ + +* Fixed issue `1238`_: Writing to an SSL socket in PyPy sometimes failed with a "bad write retry" message. + +.. _`1533`: https://bugs.pypy.org/issue1533 +.. _`1552`: https://bugs.pypy.org/issue1552 +.. _`1537`: https://bugs.pypy.org/issue1537 +.. _`1238`: https://bugs.pypy.org/issue1238 + +Cheers, + +David Schneider for the PyPy team. diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -52,3 +52,26 @@ .. branch: fast-slowpath Added an abstraction for functions with a fast and slow path in the JIT. This speeds up list.append() and list.pop(). + +.. branch: curses_fixes + +.. branch: foldable-getarrayitem-indexerror +Constant-fold reading out of constant tuples in PyPy. + +.. branch: mro-reorder-numpypy-str +No longer delegate numpy string_ methods to space.StringObject, in numpy +this works by kind of by accident. Support for merging the refactor-str-types +branch + +.. branch: kill-typesystem +Remove the "type system" abstraction, now that there is only ever one kind of +type system used. + +.. branch: kill-gen-store-back-in +Kills gen_store_back_in_virtualizable - should improve non-inlined calls by +a bit + +.. branch: dotviewer-linewidth +.. branch: reflex-support +.. branch: numpypy-inplace-op +.. branch: rewritten-loop-logging diff --git a/pypy/doc/windows.rst b/pypy/doc/windows.rst --- a/pypy/doc/windows.rst +++ b/pypy/doc/windows.rst @@ -6,6 +6,10 @@ The following text gives some hints about how to translate the PyPy interpreter. +PyPy supports only being translated as a 32bit program, even on +64bit Windows. See at the end of this page for what is missing +for a full 64bit translation. + To build pypy-c you need a C compiler. Microsoft Visual Studio is preferred, but can also use the mingw32 port of gcc. @@ -63,7 +67,7 @@ INCLUDE, LIB and PATH (for DLLs) environment variables appropriately. Abridged method (for -Ojit builds using Visual Studio 2008) -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Download the versions of all the external packages from https://bitbucket.org/pypy/pypy/downloads/local.zip @@ -112,13 +116,14 @@ nmake -f makefile.msc The sqlite3 database library -~~~~~~~~~~~~~~~~~~~~ +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Download http://www.sqlite.org/2013/sqlite-amalgamation-3071601.zip and extract it into a directory under the base directory. Also get http://www.sqlite.org/2013/sqlite-dll-win32-x86-3071601.zip and extract the dll into the bin directory, and the sqlite3.def into the sources directory. Now build the import library so cffi can use the header and dll:: + lib /DEF:sqlite3.def" /OUT:sqlite3.lib" copy sqlite3.lib path\to\libs @@ -206,8 +211,86 @@ March 2012, --cc is not a valid option for pytest.py. However if you set an environment variable CC to the compliter exe, testing will use it. -.. _'mingw32 build': http://sourceforge.net/projects/mingw-w64/files/Toolchains%20targetting%20Win32/Automated%20Builds +.. _`mingw32 build`: http://sourceforge.net/projects/mingw-w64/files/Toolchains%20targetting%20Win32/Automated%20Builds .. _`mingw64 build`: http://sourceforge.net/projects/mingw-w64/files/Toolchains%20targetting%20Win64/Automated%20Builds .. _`msys for mingw`: http://sourceforge.net/projects/mingw-w64/files/External%20binary%20packages%20%28Win64%20hosted%29/MSYS%20%2832-bit%29 .. _`libffi source files`: http://sourceware.org/libffi/ .. _`RPython translation toolchain`: translation.html + + +What is missing for a full 64-bit translation +--------------------------------------------- + +The main blocker is that we assume that the integer type of RPython is +large enough to (occasionally) contain a pointer value cast to an +integer. The simplest fix is to make sure that it is so, but it will +give the following incompatibility between CPython and PyPy on Win64: + +CPython: ``sys.maxint == 2**32-1, sys.maxsize == 2**64-1`` + +PyPy: ``sys.maxint == sys.maxsize == 2**64-1`` + +...and, correspondingly, PyPy supports ints up to the larger value of +sys.maxint before they are converted to ``long``. The first decision +that someone needs to make is if this incompatibility is reasonable. + +Assuming that it is, the first thing to do is probably to hack *CPython* +until it fits this model: replace the field in PyIntObject with a ``long +long`` field, and change the value of ``sys.maxint``. This might just +work, even if half-brokenly: I'm sure you can crash it because of the +precision loss that undoubtedly occurs everywhere, but try not to. :-) + +Such a hacked CPython is what you'll use in the next steps. We'll call +it CPython64/64. + +It is probably not too much work if the goal is only to get a translated +PyPy executable, and to run all tests before transaction. But you need +to start somewhere, and you should start with some tests in +rpython/translator/c/test/, like ``test_standalone.py`` and +``test_newgc.py``: try to have them pass on top of CPython64/64. + +Keep in mind that this runs small translations, and some details may go +wrong. The most obvious one is to check that it produces C files that +use the integer type ``Signed`` --- but what is ``Signed`` defined to? +It should be equal to ``long`` on every other platforms, but on Win64 it +should be something like ``long long``. + +What is more generally needed is to review all the C files in +rpython/translator/c/src for the word ``long``, because this means a +32-bit integer even on Win64. Replace it with ``Signed`` most of the +times. You can replace one with the other without breaking anything on +any other platform, so feel free to. + +Then, these two C types have corresponding RPython types: ``rffi.LONG`` +and ``lltype.Signed`` respectively. The first should really correspond +to the C ``long``. Add tests that check that integers casted to one +type or the other really have 32 and 64 bits respectively, on Win64. + +Once these basic tests work, you need to review ``rpython/rlib/`` for +usages of ``rffi.LONG`` versus ``lltype.Signed``. The goal would be to +fix some more ``LONG-versus-Signed`` issues, by fixing the tests --- as +always run on top of CPython64/64. Note that there was some early work +done in ``rpython/rlib/rarithmetic`` with the goal of running all the +tests on Win64 on the regular CPython, but I think by now that it's a +bad idea. Look only at CPython64/64. + +The major intermediate goal is to get a translation of PyPy with ``-O2`` +with a minimal set of modules, starting with ``--no-allworkingmodules``; +you need to use CPython64/64 to run this translation too. Check +carefully the warnings of the C compiler at the end. I think that MSVC +is "nice" in the sense that by default a lot of mismatches of integer +sizes are reported as warnings. + +Then you need to review ``pypy/module/*/`` for ``LONG-versus-Signed`` +issues. At some time during this review, we get a working translated +PyPy on Windows 64 that includes all ``--translationmodules``, i.e. +everything needed to run translations. When we are there, the hacked +CPython64/64 becomes much less important, because we can run future +translations on top of this translated PyPy. As soon as we get there, +please *distribute* the translated PyPy. It's an essential component +for anyone else that wants to work on Win64! We end up with a strange +kind of dependency --- we need a translated PyPy in order to translate a +PyPy ---, but I believe it's ok here, as Windows executables are +supposed to never be broken by newer versions of Windows. + +Happy hacking :-) diff --git a/pypy/interpreter/app_main.py b/pypy/interpreter/app_main.py --- a/pypy/interpreter/app_main.py +++ b/pypy/interpreter/app_main.py @@ -114,13 +114,12 @@ except BaseException as e: try: stderr = sys.stderr - except AttributeError: - pass # too bad - else: print('Error calling sys.excepthook:', file=stderr) originalexcepthook(type(e), e, e.__traceback__) print(file=stderr) print('Original exception was:', file=stderr) + except: + pass # too bad # we only get here if sys.excepthook didn't do its job originalexcepthook(etype, evalue, etraceback) diff --git a/pypy/interpreter/astcompiler/astbuilder.py b/pypy/interpreter/astcompiler/astbuilder.py --- a/pypy/interpreter/astcompiler/astbuilder.py +++ b/pypy/interpreter/astcompiler/astbuilder.py @@ -4,7 +4,6 @@ from pypy.interpreter.pyparser.pygram import syms, tokens from pypy.interpreter.pyparser.error import SyntaxError from pypy.interpreter.pyparser import parsestring -from rpython.rlib.objectmodel import specialize def ast_from_node(space, node, compile_info): @@ -1198,7 +1197,7 @@ return self.handle_genexp(gexp_node) return self.handle_testlist(gexp_node) - def count_comp_fors(self, comp_node, for_type, if_type): + def count_comp_fors(self, comp_node): count = 0 current_for = comp_node while True: @@ -1209,10 +1208,10 @@ return count while True: first_child = current_iter.children[0] - if first_child.type == for_type: + if first_child.type == syms.comp_for: current_for = current_iter.children[0] break - elif first_child.type == if_type: + elif first_child.type == syms.comp_if: if len(first_child.children) == 3: current_iter = first_child.children[2] else: @@ -1220,48 +1219,40 @@ else: raise AssertionError("should not reach here") - def count_comp_ifs(self, iter_node, for_type): + def count_comp_ifs(self, iter_node): count = 0 while True: first_child = iter_node.children[0] - if first_child.type == for_type: + if first_child.type == syms.comp_for: return count count += 1 if len(first_child.children) == 2: return count iter_node = first_child.children[2] - @specialize.arg(2) - def comprehension_helper(self, comp_node, - handle_source_expr_meth="handle_expr", - for_type=syms.comp_for, if_type=syms.comp_if, - iter_type=syms.comp_iter, - comp_fix_unamed_tuple_location=False): - handle_source_expression = getattr(self, handle_source_expr_meth) - fors_count = self.count_comp_fors(comp_node, for_type, if_type) + def comprehension_helper(self, comp_node): + fors_count = self.count_comp_fors(comp_node) comps = [] for i in range(fors_count): for_node = comp_node.children[1] for_targets = self.handle_exprlist(for_node, ast.Store) - expr = handle_source_expression(comp_node.children[3]) + expr = self.handle_expr(comp_node.children[3]) assert isinstance(expr, ast.expr) if len(for_node.children) == 1: comp = ast.comprehension(for_targets[0], expr, None) else: - col = comp_node.column - line = comp_node.lineno # Modified in python2.7, see http://bugs.python.org/issue6704 - if comp_fix_unamed_tuple_location: - expr_node = for_targets[0] - assert isinstance(expr_node, ast.expr) - col = expr_node.col_offset - line = expr_node.lineno + # Fixing unamed tuple location + expr_node = for_targets[0] + assert isinstance(expr_node, ast.expr) + col = expr_node.col_offset + line = expr_node.lineno target = ast.Tuple(for_targets, ast.Store, line, col) comp = ast.comprehension(target, expr, None) if len(comp_node.children) == 5: comp_node = comp_iter = comp_node.children[4] - assert comp_iter.type == iter_type - ifs_count = self.count_comp_ifs(comp_iter, for_type) + assert comp_iter.type == syms.comp_iter + ifs_count = self.count_comp_ifs(comp_iter) if ifs_count: ifs = [] for j in range(ifs_count): @@ -1270,7 +1261,7 @@ if len(comp_if.children) == 3: comp_node = comp_iter = comp_if.children[2] comp.ifs = ifs - if comp_node.type == iter_type: + if comp_node.type == syms.comp_iter: comp_node = comp_node.children[0] assert isinstance(comp, ast.comprehension) comps.append(comp) @@ -1278,32 +1269,26 @@ def handle_genexp(self, genexp_node): elt = self.handle_expr(genexp_node.children[0]) - comps = self.comprehension_helper(genexp_node.children[1], - comp_fix_unamed_tuple_location=True) + comps = self.comprehension_helper(genexp_node.children[1]) return ast.GeneratorExp(elt, comps, genexp_node.lineno, genexp_node.column) def handle_listcomp(self, listcomp_node): elt = self.handle_expr(listcomp_node.children[0]) - comps = self.comprehension_helper(listcomp_node.children[1], - "handle_testlist", - syms.comp_for, syms.comp_if, - syms.comp_iter, - comp_fix_unamed_tuple_location=True) + comps = self.comprehension_helper(listcomp_node.children[1]) return ast.ListComp(elt, comps, listcomp_node.lineno, listcomp_node.column) def handle_setcomp(self, set_maker): elt = self.handle_expr(set_maker.children[0]) - comps = self.comprehension_helper(set_maker.children[1], - comp_fix_unamed_tuple_location=True) - return ast.SetComp(elt, comps, set_maker.lineno, set_maker.column) + comps = self.comprehension_helper(set_maker.children[1]) + return ast.SetComp(elt, comps, set_maker.lineno, + set_maker.column) def handle_dictcomp(self, dict_maker): key = self.handle_expr(dict_maker.children[0]) value = self.handle_expr(dict_maker.children[2]) - comps = self.comprehension_helper(dict_maker.children[3], - comp_fix_unamed_tuple_location=True) + comps = self.comprehension_helper(dict_maker.children[3]) return ast.DictComp(key, value, comps, dict_maker.lineno, dict_maker.column) diff --git a/pypy/interpreter/astcompiler/test/test_astbuilder.py b/pypy/interpreter/astcompiler/test/test_astbuilder.py --- a/pypy/interpreter/astcompiler/test/test_astbuilder.py +++ b/pypy/interpreter/astcompiler/test/test_astbuilder.py @@ -1253,6 +1253,12 @@ if1, if2 = comps[0].ifs assert isinstance(if1, ast.Name) assert isinstance(if2, ast.Name) + gen = self.get_first_expr(brack("x for x in y or z")) + comp = gen.generators[0] + assert isinstance(comp.iter, ast.BoolOp) + assert len(comp.iter.values) == 2 + assert isinstance(comp.iter.values[0], ast.Name) + assert isinstance(comp.iter.values[1], ast.Name) def test_genexp(self): self.check_comprehension("(%s)", ast.GeneratorExp) diff --git a/pypy/interpreter/astcompiler/test/test_compiler.py b/pypy/interpreter/astcompiler/test/test_compiler.py --- a/pypy/interpreter/astcompiler/test/test_compiler.py +++ b/pypy/interpreter/astcompiler/test/test_compiler.py @@ -961,6 +961,10 @@ None).value assert exc.msg == "too many expressions in star-unpacking assignment" + def test_list_compr_or(self): + yield self.st, 'x = list(d for d in [1] or [])', 'x', [1] + yield self.st, 'y = [d for d in [1] or []]', 'y', [1] + class AppTestCompiler: diff --git a/pypy/interpreter/buffer.py b/pypy/interpreter/buffer.py --- a/pypy/interpreter/buffer.py +++ b/pypy/interpreter/buffer.py @@ -19,7 +19,7 @@ from pypy.interpreter.typedef import TypeDef from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.error import OperationError -from rpython.rlib.objectmodel import compute_hash +from rpython.rlib.objectmodel import compute_hash, import_from_mixin from rpython.rlib.rstring import StringBuilder @@ -283,8 +283,6 @@ # ____________________________________________________________ class SubBufferMixin(object): - _mixin_ = True - def __init__(self, buffer, offset, size, format, itemsize): self.buffer = buffer self.offset = offset @@ -310,10 +308,11 @@ # out of bounds return self.buffer.getslice(self.offset + start, self.offset + stop, step, size) -class SubBuffer(SubBufferMixin, Buffer): - pass +class SubBuffer(Buffer): + import_from_mixin(SubBufferMixin) -class RWSubBuffer(SubBufferMixin, RWBuffer): +class RWSubBuffer(RWBuffer): + import_from_mixin(SubBufferMixin) def setitem(self, index, char): self.buffer.setitem(self.offset + index, char) diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py --- a/pypy/interpreter/pyframe.py +++ b/pypy/interpreter/pyframe.py @@ -123,8 +123,8 @@ # CO_OPTIMIZED: no locals dict needed at all # NB: this method is overridden in nestedscope.py flags = code.co_flags - if flags & pycode.CO_OPTIMIZED: - return + if flags & pycode.CO_OPTIMIZED: + return if flags & pycode.CO_NEWLOCALS: self.w_locals = self.space.newdict(module=True) else: @@ -179,11 +179,10 @@ executioncontext.return_trace(self, self.space.w_None) raise executioncontext.return_trace(self, w_exitvalue) - # clean up the exception, might be useful for not - # allocating exception objects in some cases - # if it's a generator, we have to preserve the exception state - if not self.is_generator(): - self.last_exception = None + # it used to say self.last_exception = None + # this is now done by the code in pypyjit module + # since we don't want to invalidate the virtualizable + # for no good reason got_exception = False finally: executioncontext.leave(self, w_exitvalue, got_exception) @@ -265,7 +264,7 @@ break w_value = self.peekvalue(delta) self.pushvalue(w_value) - + def peekvalue(self, index_from_top=0): # NOTE: top of the stack is peekvalue(0). # Contrast this with CPython where it's PEEK(-1). @@ -335,7 +334,7 @@ nlocals = self.pycode.co_nlocals values_w = self.locals_stack_w[nlocals:self.valuestackdepth] w_valuestack = maker.slp_into_tuple_with_nulls(space, values_w) - + w_blockstack = nt([block._get_state_(space) for block in self.get_blocklist()]) w_fastlocals = maker.slp_into_tuple_with_nulls( space, self.locals_stack_w[:nlocals]) @@ -345,7 +344,7 @@ else: w_exc_value = self.last_exception.get_w_value(space) w_tb = w(self.last_exception.get_traceback()) - + tup_state = [ w(self.f_backref()), w(self.get_builtin()), @@ -360,7 +359,7 @@ w(f_lineno), w_fastlocals, space.w_None, #XXX placeholder for f_locals - + #f_restricted requires no additional data! space.w_None, ## self.w_f_trace, ignore for now @@ -394,7 +393,7 @@ ncellvars = len(pycode.co_cellvars) cellvars = cells[:ncellvars] closure = cells[ncellvars:] - + # do not use the instance's __init__ but the base's, because we set # everything like cells from here # XXX hack @@ -481,7 +480,7 @@ ### line numbers ### - def fget_f_lineno(self, space): + def fget_f_lineno(self, space): "Returns the line number of the instruction currently being executed." if self.w_f_trace is None: return space.wrap(self.get_last_lineno()) @@ -495,7 +494,7 @@ except OperationError, e: raise OperationError(space.w_ValueError, space.wrap("lineno must be an integer")) - + if self.w_f_trace is None: raise OperationError(space.w_ValueError, space.wrap("f_lineno can only be set by a trace function.")) @@ -527,7 +526,7 @@ if ord(code[new_lasti]) in (DUP_TOP, POP_TOP): raise OperationError(space.w_ValueError, space.wrap("can't jump to 'except' line as there's no exception")) - + # Don't jump into or out of a finally block. f_lasti_setup_addr = -1 new_lasti_setup_addr = -1 @@ -558,12 +557,12 @@ if addr == self.last_instr: f_lasti_setup_addr = setup_addr break - + if op >= HAVE_ARGUMENT: addr += 3 else: addr += 1 - + assert len(blockstack) == 0 if new_lasti_setup_addr != f_lasti_setup_addr: @@ -614,10 +613,10 @@ block = self.pop_block() block.cleanup(self) f_iblock -= 1 - + self.f_lineno = new_lineno self.last_instr = new_lasti - + def get_last_lineno(self): "Returns the line number of the instruction currently being executed." return pytraceback.offset2lineno(self.pycode, self.last_instr) @@ -643,8 +642,8 @@ self.f_lineno = self.get_last_lineno() space.frame_trace_action.fire() - def fdel_f_trace(self, space): - self.w_f_trace = None + def fdel_f_trace(self, space): + self.w_f_trace = None def fget_f_exc_type(self, space): if self.last_exception is not None: @@ -654,7 +653,7 @@ if f is not None: return f.last_exception.w_type return space.w_None - + def fget_f_exc_value(self, space): if self.last_exception is not None: f = self.f_backref() @@ -672,7 +671,7 @@ if f is not None: return space.wrap(f.last_exception.get_traceback()) return space.w_None - + def fget_f_restricted(self, space): if space.config.objspace.honor__builtins__: return space.wrap(self.builtin is not space.builtin) diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py --- a/pypy/interpreter/typedef.py +++ b/pypy/interpreter/typedef.py @@ -329,10 +329,6 @@ instance=True) base_user_setup(self, space, w_subtype) - def setclass(self, space, w_subtype): - # only used by descr_set___class__ - self.w__class__ = w_subtype - add(Proto) subcls = type(name, (supercls,), body) diff --git a/pypy/module/__pypy__/test/test_signal.py b/pypy/module/__pypy__/test/test_signal.py --- a/pypy/module/__pypy__/test/test_signal.py +++ b/pypy/module/__pypy__/test/test_signal.py @@ -28,15 +28,17 @@ import __pypy__, _thread, signal, time, sys def subthread(): + print('subthread started') try: with __pypy__.thread.signals_enabled: _thread.interrupt_main() for i in range(10): print('x') - time.sleep(0.1) + time.sleep(0.25) except BaseException as e: interrupted.append(e) finally: + print('subthread stops, interrupted=%r' % (interrupted,)) done.append(None) # This is normally called by app_main.py @@ -52,11 +54,13 @@ try: done = [] interrupted = [] + print('--- start ---') _thread.start_new_thread(subthread, ()) for j in range(10): if len(done): break print('.') - time.sleep(0.1) + time.sleep(0.25) + print('main thread loop done') assert len(done) == 1 assert len(interrupted) == 1 assert 'KeyboardInterrupt' in interrupted[0].__class__.__name__ @@ -113,7 +117,7 @@ def subthread(): try: - time.sleep(0.25) + time.sleep(0.5) with __pypy__.thread.signals_enabled: _thread.interrupt_main() except BaseException as e: diff --git a/pypy/module/array/interp_array.py b/pypy/module/array/interp_array.py --- a/pypy/module/array/interp_array.py +++ b/pypy/module/array/interp_array.py @@ -10,7 +10,7 @@ from rpython.rlib.unroll import unrolling_iterable from rpython.rlib.objectmodel import keepalive_until_here from rpython.rtyper.lltypesystem import lltype, rffi - +from pypy.objspace.std.floatobject import W_FloatObject @unwrap_spec(typecode=str) def w_array(space, w_cls, typecode, __args__): @@ -59,7 +59,7 @@ def descr_typecode(space, self): return space.wrap(self.typecode) -arr_eq_driver = jit.JitDriver(greens = ['comp_func'], reds = 'auto') +arr_eq_driver = jit.JitDriver(name='array_eq_driver', greens = ['comp_func'], reds = 'auto') EQ, NE, LT, LE, GT, GE = range(6) def compare_arrays(space, arr1, arr2, comp_op): @@ -571,7 +571,7 @@ class TypeCode(object): - def __init__(self, itemtype, unwrap, canoverflow=False, signed=False): + def __init__(self, itemtype, unwrap, canoverflow=False, signed=False, method='__int__'): self.itemtype = itemtype self.bytes = rffi.sizeof(itemtype) self.arraytype = lltype.Array(itemtype, hints={'nolength': True}) @@ -579,6 +579,7 @@ self.signed = signed self.canoverflow = canoverflow self.w_class = None + self.method = method if self.canoverflow: assert self.bytes <= rffi.sizeof(rffi.ULONG) @@ -597,7 +598,7 @@ types = { - 'u': TypeCode(lltype.UniChar, 'unicode_w'), + 'u': TypeCode(lltype.UniChar, 'unicode_w', method=''), 'b': TypeCode(rffi.SIGNEDCHAR, 'int_w', True, True), 'B': TypeCode(rffi.UCHAR, 'int_w', True), 'h': TypeCode(rffi.SHORT, 'int_w', True, True), @@ -609,8 +610,8 @@ # rbigint.touint() which # corresponds to the # C-type unsigned long - 'f': TypeCode(lltype.SingleFloat, 'float_w'), - 'd': TypeCode(lltype.Float, 'float_w'), + 'f': TypeCode(lltype.SingleFloat, 'float_w', method='__float__'), + 'd': TypeCode(lltype.Float, 'float_w', method='__float__'), } for k, v in types.items(): v.typecode = k @@ -678,7 +679,19 @@ def item_w(self, w_item): space = self.space unwrap = getattr(space, mytype.unwrap) - item = unwrap(w_item) + try: + item = unwrap(w_item) + except OperationError, e: + if isinstance(w_item, W_FloatObject): # Odd special case from cpython + raise + if mytype.method != '' and e.match(space, space.w_TypeError): + try: + item = unwrap(space.call_method(w_item, mytype.method)) + except OperationError: + msg = 'array item must be ' + mytype.unwrap[:-2] + raise OperationError(space.w_TypeError, space.wrap(msg)) + else: + raise if mytype.unwrap == 'bigint_w': try: item = item.touint() diff --git a/pypy/module/array/test/test_array.py b/pypy/module/array/test/test_array.py --- a/pypy/module/array/test/test_array.py +++ b/pypy/module/array/test/test_array.py @@ -865,6 +865,73 @@ assert l assert l[0] is None or len(l[0]) == 0 + def test_assign_object_with_special_methods(self): + from array import array + + class Num(object): + def __float__(self): + return 5.25 + + def __int__(self): + return 7 + + class NotNum(object): + pass + + class Silly(object): + def __float__(self): + return None + + def __int__(self): + return None + + class OldNum: + def __float__(self): + return 6.25 + + def __int__(self): + return 8 + + class OldNotNum: + pass + + class OldSilly: + def __float__(self): + return None + + def __int__(self): + return None + + for tc in 'bBhHiIlL': + a = array(tc, [0]) + raises(TypeError, a.__setitem__, 0, 1.0) + a[0] = 1 + a[0] = Num() + assert a[0] == 7 + raises(TypeError, a.__setitem__, NotNum()) + a[0] = OldNum() + assert a[0] == 8 + raises(TypeError, a.__setitem__, OldNotNum()) + raises(TypeError, a.__setitem__, Silly()) + raises(TypeError, a.__setitem__, OldSilly()) + + for tc in 'fd': + a = array(tc, [0]) + a[0] = 1.0 + a[0] = 1 + a[0] = Num() + assert a[0] == 5.25 + raises(TypeError, a.__setitem__, NotNum()) + a[0] = OldNum() + assert a[0] == 6.25 + raises(TypeError, a.__setitem__, OldNotNum()) + raises(TypeError, a.__setitem__, Silly()) + raises(TypeError, a.__setitem__, OldSilly()) + + a = array('u', 'hi') + a[0] = 'b' + assert a[0] == 'b' + def test_bytearray(self): a = self.array('u', 'hi') b = self.array('u') diff --git a/pypy/module/binascii/interp_uu.py b/pypy/module/binascii/interp_uu.py --- a/pypy/module/binascii/interp_uu.py +++ b/pypy/module/binascii/interp_uu.py @@ -61,7 +61,7 @@ return ord(bin[i]) except IndexError: return 0 -_a2b_read._always_inline_ = True +_b2a_read._always_inline_ = True @unwrap_spec(bin='bufferstr') def b2a_uu(space, bin): diff --git a/pypy/module/cppyy/genreflex-methptrgetter.patch b/pypy/module/cppyy/genreflex-methptrgetter.patch --- a/pypy/module/cppyy/genreflex-methptrgetter.patch +++ b/pypy/module/cppyy/genreflex-methptrgetter.patch @@ -10,7 +10,7 @@ # The next is to avoid a known problem with gccxml that it generates a # references to id equal '_0' which is not defined anywhere self.xref['_0'] = {'elem':'Unknown', 'attrs':{'id':'_0','name':''}, 'subelems':[]} -@@ -1306,6 +1307,8 @@ +@@ -1328,6 +1329,8 @@ bases = self.getBases( attrs['id'] ) if inner and attrs.has_key('demangled') and self.isUnnamedType(attrs['demangled']) : cls = attrs['demangled'] @@ -19,7 +19,7 @@ clt = '' else: cls = self.genTypeName(attrs['id'],const=True,colon=True) -@@ -1343,7 +1346,7 @@ +@@ -1365,7 +1368,7 @@ # Inner class/struct/union/enum. for m in memList : member = self.xref[m] @@ -28,7 +28,7 @@ and member['attrs'].get('access') in ('private','protected') \ and not self.isUnnamedType(member['attrs'].get('demangled')): cmem = self.genTypeName(member['attrs']['id'],const=True,colon=True) -@@ -1981,8 +1984,15 @@ +@@ -2003,8 +2006,15 @@ else : params = '0' s = ' .AddFunctionMember(%s, Reflex::Literal("%s"), %s%s, 0, %s, %s)' % (self.genTypeID(id), name, type, id, params, mod) s += self.genCommentProperty(attrs) @@ -44,7 +44,7 @@ def genMCODef(self, type, name, attrs, args): id = attrs['id'] cl = self.genTypeName(attrs['context'],colon=True) -@@ -2049,8 +2059,44 @@ +@@ -2071,8 +2081,44 @@ if returns == 'void' : body += ' }\n' else : body += ' }\n' body += '}\n' @@ -105,17 +105,16 @@ -h, --help Print this help\n """ -@@ -127,7 +131,8 @@ - opts, args = getopt.getopt(options, 'ho:s:c:I:U:D:PC', \ +@@ -128,7 +132,7 @@ ['help','debug=', 'output=','selection_file=','pool','dataonly','interpreteronly','deep','gccxmlpath=', 'capabilities=','rootmap=','rootmap-lib=','comments','iocomments','no_membertypedefs', -- 'fail_on_warnings', 'quiet', 'gccxmlopt=', 'reflex', 'split=','no_templatetypedefs','gccxmlpost=']) -+ 'fail_on_warnings', 'quiet', 'gccxmlopt=', 'reflex', 'split=','no_templatetypedefs','gccxmlpost=', -+ 'with-methptrgetter']) + 'fail_on_warnings', 'quiet', 'gccxmlopt=', 'reflex', 'split=','no_templatetypedefs','gccxmlpost=', +- 'library=']) ++ 'library=', 'with-methptrgetter']) except getopt.GetoptError, e: print "--->> genreflex: ERROR:",e self.usage(2) -@@ -186,6 +191,8 @@ +@@ -187,6 +191,8 @@ self.rootmap = a if o in ('--rootmap-lib',): self.rootmaplib = a diff --git a/pypy/module/cppyy/interp_cppyy.py b/pypy/module/cppyy/interp_cppyy.py --- a/pypy/module/cppyy/interp_cppyy.py +++ b/pypy/module/cppyy/interp_cppyy.py @@ -592,11 +592,15 @@ def get(self, w_cppinstance, w_pycppclass): cppinstance = self.space.interp_w(W_CPPInstance, w_cppinstance, can_be_None=True) + if not cppinstance: + raise OperationError(self.space.w_ReferenceError, self.space.wrap("attribute access requires an instance")) offset = self._get_offset(cppinstance) return self.converter.from_memory(self.space, w_cppinstance, w_pycppclass, offset) def set(self, w_cppinstance, w_value): cppinstance = self.space.interp_w(W_CPPInstance, w_cppinstance, can_be_None=True) + if not cppinstance: + raise OperationError(self.space.w_ReferenceError, self.space.wrap("attribute access requires an instance")) offset = self._get_offset(cppinstance) self.converter.to_memory(self.space, w_cppinstance, w_value, offset) return self.space.w_None diff --git a/pypy/module/cppyy/pythonify.py b/pypy/module/cppyy/pythonify.py --- a/pypy/module/cppyy/pythonify.py +++ b/pypy/module/cppyy/pythonify.py @@ -426,6 +426,11 @@ # mostly for the benefit of the CINT backend, which treats std as special gbl.std = make_cppnamespace(None, "std", None, False) + # install a type for enums to refer to + # TODO: this is correct for C++98, not for C++11 and in general there will + # be the same issue for all typedef'd builtin types + setattr(gbl, 'unsigned int', int) + # install for user access cppyy.gbl = gbl diff --git a/pypy/module/cppyy/test/test_datatypes.py b/pypy/module/cppyy/test/test_datatypes.py --- a/pypy/module/cppyy/test/test_datatypes.py +++ b/pypy/module/cppyy/test/test_datatypes.py @@ -105,6 +105,13 @@ raises(IndexError, c.m_float_array.__getitem__, self.N) raises(IndexError, c.m_double_array.__getitem__, self.N) + # can not access an instance member on the class + raises(ReferenceError, getattr, cppyy_test_data, 'm_bool') + raises(ReferenceError, getattr, cppyy_test_data, 'm_int') + + assert not hasattr(cppyy_test_data, 'm_bool') + assert not hasattr(cppyy_test_data, 'm_int') + c.destruct() def test03_instance_data_write_access(self): @@ -428,12 +435,17 @@ c = cppyy_test_data() assert isinstance(c, cppyy_test_data) - # TODO: test that the enum is accessible as a type + # test that the enum is accessible as a type + assert cppyy_test_data.what assert cppyy_test_data.kNothing == 6 assert cppyy_test_data.kSomething == 111 assert cppyy_test_data.kLots == 42 + assert cppyy_test_data.what(cppyy_test_data.kNothing) == cppyy_test_data.kNothing + assert cppyy_test_data.what(6) == cppyy_test_data.kNothing + # TODO: only allow instantiations with correct values (C++11) + assert c.get_enum() == cppyy_test_data.kNothing assert c.m_enum == cppyy_test_data.kNothing @@ -455,6 +467,7 @@ assert cppyy_test_data.s_enum == cppyy_test_data.kSomething # global enums + assert gbl.fruit # test type accessible assert gbl.kApple == 78 assert gbl.kBanana == 29 assert gbl.kCitrus == 34 diff --git a/pypy/module/cpyext/include/patchlevel.h b/pypy/module/cpyext/include/patchlevel.h --- a/pypy/module/cpyext/include/patchlevel.h +++ b/pypy/module/cpyext/include/patchlevel.h @@ -29,7 +29,7 @@ #define PY_VERSION "3.2.3" /* PyPy version as a string */ -#define PYPY_VERSION "2.1.0-alpha0" +#define PYPY_VERSION "2.2.0-alpha0" /* Subversion Revision number of this file (not of the repository). * Empty since Mercurial migration. */ diff --git a/pypy/module/cpyext/number.py b/pypy/module/cpyext/number.py --- a/pypy/module/cpyext/number.py +++ b/pypy/module/cpyext/number.py @@ -41,13 +41,13 @@ def PyNumber_Int(space, w_obj): """Returns the o converted to an integer object on success, or NULL on failure. This is the equivalent of the Python expression int(o).""" - return space.int(w_obj) + return space.call_function(space.w_int, w_obj) @cpython_api([PyObject], PyObject) def PyNumber_Long(space, w_obj): """Returns the o converted to a long integer object on success, or NULL on failure. This is the equivalent of the Python expression long(o).""" - return space.int(w_obj) + return space.call_function(space.w_int, w_obj) @cpython_api([PyObject], PyObject) def PyNumber_Index(space, w_obj): diff --git a/pypy/module/cpyext/pystate.py b/pypy/module/cpyext/pystate.py --- a/pypy/module/cpyext/pystate.py +++ b/pypy/module/cpyext/pystate.py @@ -3,7 +3,6 @@ from pypy.module.cpyext.pyobject import PyObject, Py_DecRef, make_ref, from_ref from rpython.rtyper.lltypesystem import rffi, lltype from rpython.rlib import rthread -from pypy.module.thread import os_thread PyInterpreterStateStruct = lltype.ForwardReference() PyInterpreterState = lltype.Ptr(PyInterpreterStateStruct) @@ -17,6 +16,9 @@ ('dict', PyObject), ])) +class NoThreads(Exception): + pass + @cpython_api([], PyThreadState, error=CANNOT_FAIL) def PyEval_SaveThread(space): """Release the global interpreter lock (if it has been created and thread @@ -45,10 +47,15 @@ @cpython_api([], lltype.Void) def PyEval_InitThreads(space): + if not space.config.translation.thread: + raise NoThreads + from pypy.module.thread import os_thread os_thread.setup_threads(space) @cpython_api([], rffi.INT_real, error=CANNOT_FAIL) def PyEval_ThreadsInitialized(space): + if not space.config.translation.thread: + return 0 return 1 # XXX: might be generally useful @@ -236,6 +243,8 @@ """Create a new thread state object belonging to the given interpreter object. The global interpreter lock need not be held, but may be held if it is necessary to serialize calls to this function.""" + if not space.config.translation.thread: + raise NoThreads rthread.gc_thread_prepare() # PyThreadState_Get will allocate a new execution context, # we need to protect gc and other globals with the GIL. @@ -250,6 +259,8 @@ def PyThreadState_Clear(space, tstate): """Reset all information in a thread state object. The global interpreter lock must be held.""" + if not space.config.translation.thread: + raise NoThreads Py_DecRef(space, tstate.c_dict) tstate.c_dict = lltype.nullptr(PyObject.TO) space.threadlocals.leave_thread(space) diff --git a/pypy/module/cpyext/test/test_number.py b/pypy/module/cpyext/test/test_number.py --- a/pypy/module/cpyext/test/test_number.py +++ b/pypy/module/cpyext/test/test_number.py @@ -19,6 +19,8 @@ def test_number_long(self, space, api): w_l = api.PyNumber_Long(space.wrap(123)) assert api.PyLong_CheckExact(w_l) + w_l = api.PyNumber_Long(space.wrap("123")) + assert api.PyLong_CheckExact(w_l) def test_number_int(self, space, api): w_l = api.PyNumber_Int(space.wraplong(123L)) @@ -27,6 +29,8 @@ assert api.PyLong_CheckExact(w_l) w_l = api.PyNumber_Int(space.wrap(42.3)) assert api.PyLong_CheckExact(w_l) + w_l = api.PyNumber_Int(space.wrap("42")) + assert api.PyLong_CheckExact(w_l) def test_number_index(self, space, api): w_l = api.PyNumber_Index(space.wraplong(123L)) diff --git a/pypy/module/imp/importing.py b/pypy/module/imp/importing.py --- a/pypy/module/imp/importing.py +++ b/pypy/module/imp/importing.py @@ -852,15 +852,10 @@ # Depending on which opcodes are enabled, eg. CALL_METHOD we bump the version # number by some constant # -# * CALL_METHOD +2 -# -# In other words: -# -# default_magic -- used by CPython without the -U option -# default_magic + 1 -- used by CPython with the -U option -# default_magic + 2 -- used by PyPy without any extra opcode -# ... -# default_magic + 5 -- used by PyPy with both extra opcodes +# default_magic - 6 -- used by CPython without the -U option +# default_magic - 5 -- used by CPython with the -U option +# default_magic -- used by PyPy without the CALL_METHOD opcode +# default_magic + 2 -- used by PyPy with the CALL_METHOD opcode # from pypy.interpreter.pycode import default_magic MARSHAL_VERSION_FOR_PYC = 2 diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py --- a/pypy/module/micronumpy/__init__.py +++ b/pypy/module/micronumpy/__init__.py @@ -140,6 +140,7 @@ ("deg2rad", "radians"), ("rad2deg", "degrees"), ("reciprocal", "reciprocal"), + ("rint", "rint"), ("sign", "sign"), ("signbit", "signbit"), ("sin", "sin"), @@ -175,6 +176,8 @@ ('logaddexp2', 'logaddexp2'), ('real', 'real'), ('imag', 'imag'), + ('ones_like', 'ones_like'), + ('zeros_like', 'zeros_like'), ]: interpleveldefs[exposed] = "interp_ufuncs.get(space).%s" % impl diff --git a/pypy/module/micronumpy/base.py b/pypy/module/micronumpy/base.py --- a/pypy/module/micronumpy/base.py +++ b/pypy/module/micronumpy/base.py @@ -37,7 +37,8 @@ from pypy.module.micronumpy.arrayimpl import concrete, scalar if not shape: - impl = scalar.Scalar(dtype.base) + w_val = dtype.base.coerce(space, space.wrap(0)) + impl = scalar.Scalar(dtype.base, w_val) else: strides, backstrides = calc_strides(shape, dtype.base, order) impl = concrete.ConcreteArray(shape, dtype.base, order, strides, @@ -79,6 +80,8 @@ if w_val is not None: w_val = dtype.coerce(space, w_val) + else: + w_val = dtype.coerce(space, space.wrap(0)) return W_NDimArray(scalar.Scalar(dtype, w_val)) diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -205,6 +205,7 @@ descr_neg = _unaryop_impl("negative") descr_abs = _unaryop_impl("absolute") descr_invert = _unaryop_impl("invert") + descr_conjugate = _unaryop_impl("conjugate") def descr_divmod(self, space, w_other): w_quotient = self.descr_div(space, w_other) @@ -378,12 +379,14 @@ return self class W_CharacterBox(W_FlexibleBox): - pass + def convert_to(self, dtype): + # XXX assert dtype is str type + return self + class W_StringBox(W_CharacterBox): def descr__new__string_box(space, w_subtype, w_arg): from pypy.module.micronumpy.interp_dtype import new_string_dtype - arg = space.str_w(space.str(w_arg)) arr = VoidBoxStorage(len(arg), new_string_dtype(space, len(arg))) for i in range(len(arg)): @@ -517,6 +520,7 @@ all = interp2app(W_GenericBox.descr_all), ravel = interp2app(W_GenericBox.descr_ravel), round = interp2app(W_GenericBox.descr_round), + conjugate = interp2app(W_GenericBox.descr_conjugate), view = interp2app(W_GenericBox.descr_view), ) @@ -682,12 +686,12 @@ __module__ = "numpypy", ) -W_StringBox.typedef = TypeDef("string_", (str_typedef, W_CharacterBox.typedef), +W_StringBox.typedef = TypeDef("string_", (W_CharacterBox.typedef, str_typedef), __module__ = "numpypy", __new__ = interp2app(W_StringBox.descr__new__string_box.im_func), ) -W_UnicodeBox.typedef = TypeDef("unicode_", (unicode_typedef, W_CharacterBox.typedef), +W_UnicodeBox.typedef = TypeDef("unicode_", (W_CharacterBox.typedef, unicode_typedef), __module__ = "numpypy", __new__ = interp2app(W_UnicodeBox.descr__new__unicode_box.im_func), ) diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -234,6 +234,9 @@ def is_record_type(self): return self.fields is not None + def is_str_type(self): + return self.num == 18 + def is_str_or_unicode(self): return (self.num == 18 or self.num == 19) @@ -682,7 +685,7 @@ name='string', char='S', w_box_type = space.gettypefor(interp_boxes.W_StringBox), - alternate_constructors=[space.w_str], + alternate_constructors=[space.w_str, space.gettypefor(interp_boxes.W_CharacterBox)], aliases=["str"], ) self.w_unicodedtype = W_Dtype( diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -737,6 +737,27 @@ descr_gt = _binop_comp_impl(_binop_impl("greater")) descr_ge = _binop_comp_impl(_binop_impl("greater_equal")) + def _binop_inplace_impl(ufunc_name): + def impl(self, space, w_other): + w_out = self + ufunc = getattr(interp_ufuncs.get(space), ufunc_name) + return ufunc.call(space, [self, w_other, w_out]) + return func_with_new_name(impl, "binop_inplace_%s_impl" % ufunc_name) + + descr_iadd = _binop_inplace_impl("add") + descr_isub = _binop_inplace_impl("subtract") + descr_imul = _binop_inplace_impl("multiply") + descr_idiv = _binop_inplace_impl("divide") + descr_itruediv = _binop_inplace_impl("true_divide") + descr_ifloordiv = _binop_inplace_impl("floor_divide") + descr_imod = _binop_inplace_impl("mod") + descr_ipow = _binop_inplace_impl("power") + descr_ilshift = _binop_inplace_impl("left_shift") + descr_irshift = _binop_inplace_impl("right_shift") + descr_iand = _binop_inplace_impl("bitwise_and") + descr_ior = _binop_inplace_impl("bitwise_or") + descr_ixor = _binop_inplace_impl("bitwise_xor") + def _binop_right_impl(ufunc_name): def impl(self, space, w_other, w_out=None): w_other = convert_to_array(space, w_other) @@ -1007,6 +1028,20 @@ __ror__ = interp2app(W_NDimArray.descr_ror), __rxor__ = interp2app(W_NDimArray.descr_rxor), + __iadd__ = interp2app(W_NDimArray.descr_iadd), + __isub__ = interp2app(W_NDimArray.descr_isub), + __imul__ = interp2app(W_NDimArray.descr_imul), + __idiv__ = interp2app(W_NDimArray.descr_idiv), + __itruediv__ = interp2app(W_NDimArray.descr_itruediv), + __ifloordiv__ = interp2app(W_NDimArray.descr_ifloordiv), + __imod__ = interp2app(W_NDimArray.descr_imod), + __ipow__ = interp2app(W_NDimArray.descr_ipow), + __ilshift__ = interp2app(W_NDimArray.descr_ilshift), + __irshift__ = interp2app(W_NDimArray.descr_irshift), + __iand__ = interp2app(W_NDimArray.descr_iand), + __ior__ = interp2app(W_NDimArray.descr_ior), + __ixor__ = interp2app(W_NDimArray.descr_ixor), + __eq__ = interp2app(W_NDimArray.descr_eq), __ne__ = interp2app(W_NDimArray.descr_ne), __lt__ = interp2app(W_NDimArray.descr_lt), diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -328,14 +328,19 @@ w_out = None w_lhs = convert_to_array(space, w_lhs) w_rhs = convert_to_array(space, w_rhs) - if (w_lhs.get_dtype().is_flexible_type() or \ - w_rhs.get_dtype().is_flexible_type()): + w_ldtype = w_lhs.get_dtype() + w_rdtype = w_rhs.get_dtype() + if w_ldtype.is_str_type() and w_rdtype.is_str_type() and \ + self.comparison_func: + pass + elif (w_ldtype.is_flexible_type() or \ + w_rdtype.is_flexible_type()): raise OperationError(space.w_TypeError, space.wrap( 'unsupported operand dtypes %s and %s for "%s"' % \ - (w_rhs.get_dtype().get_name(), w_lhs.get_dtype().get_name(), + (w_rdtype.get_name(), w_ldtype.get_name(), self.name))) calc_dtype = find_binop_result_dtype(space, - w_lhs.get_dtype(), w_rhs.get_dtype(), + w_ldtype, w_rdtype, int_only=self.int_only, promote_to_float=self.promote_to_float, promote_bools=self.promote_bools, @@ -610,6 +615,7 @@ ("positive", "pos", 1), ("negative", "neg", 1), ("absolute", "abs", 1, {"complex_to_float": True}), + ("rint", "rint", 1), ("sign", "sign", 1, {"promote_bools": True}), ("signbit", "signbit", 1, {"bool_result": True, "allow_complex": False}), @@ -665,6 +671,8 @@ "allow_complex": False}), ("logaddexp2", "logaddexp2", 2, {"promote_to_float": True, "allow_complex": False}), + ("ones_like", "ones_like", 1), + ("zeros_like", "zeros_like", 1), ]: self.add_ufunc(space, *ufunc_def) diff --git a/pypy/module/micronumpy/loop.py b/pypy/module/micronumpy/loop.py --- a/pypy/module/micronumpy/loop.py +++ b/pypy/module/micronumpy/loop.py @@ -155,7 +155,8 @@ obj_iter.next() return cur_value -reduce_cum_driver = jit.JitDriver(greens = ['shapelen', 'func', 'dtype'], +reduce_cum_driver = jit.JitDriver(name='numpy_reduce_cum_driver', + greens = ['shapelen', 'func', 'dtype'], reds = 'auto') def compute_reduce_cumultative(obj, out, calc_dtype, func, identity): @@ -214,8 +215,7 @@ axis_reduce__driver = jit.JitDriver(name='numpy_axis_reduce', greens=['shapelen', - 'func', 'dtype', - 'identity'], + 'func', 'dtype'], reds='auto') def do_axis_reduce(shape, func, arr, dtype, axis, out, identity, cumultative, @@ -231,8 +231,7 @@ shapelen = len(shape) while not out_iter.done(): axis_reduce__driver.jit_merge_point(shapelen=shapelen, func=func, - dtype=dtype, identity=identity, - ) + dtype=dtype) w_val = arr_iter.getitem().convert_to(dtype) if out_iter.first_line: if identity is not None: From noreply at buildbot.pypy.org Tue Aug 27 14:54:27 2013 From: noreply at buildbot.pypy.org (fijal) Date: Tue, 27 Aug 2013 14:54:27 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: Add numpy talk for tonight Message-ID: <20130827125427.C701E1C0149@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: extradoc Changeset: r5032:425b99b150b7 Date: 2013-08-27 13:52 +0100 http://bitbucket.org/pypy/extradoc/changeset/425b99b150b7/ Log: Add numpy talk for tonight diff --git a/talk/london-demo-session/numpy.rst b/talk/london-demo-session/numpy.rst new file mode 100644 --- /dev/null +++ b/talk/london-demo-session/numpy.rst @@ -0,0 +1,64 @@ +.. include:: beamerdefs.txt + +================================ +Numpy on PyPy status and roadmap +================================ + +Goals +----- + +* fully compliant numpy replacement for PyPy + +* fast looped operations + +* fast vectorized operations + +Why? +---- + +* fast looping + +* single language + +Model +----- + +* some programs have numerical kernels that can be written in C + +* some don't + +* http://arxiv.org/abs/1301.1334 + +* image manipulation demo + +* abstraction unfriendly + +Status +------ + +* fast looped operations + +* ok vectorized operations + +Future goals +------------ + +* finish numpy + +* make it fast + +* make it compatible with more software (matplotlib, scipy) + +Funding +------- + +* about $20k left + +* we likely need more + +* behind schedule, but not behind budget + +Q&A +--- + +* Thank you! diff --git a/talk/london-demo-session/talk.pdf b/talk/london-demo-session/talk.pdf new file mode 100644 index 0000000000000000000000000000000000000000..860b8a9472307b8fc3d2f3d66a76d5a2caf18fea GIT binary patch [cut] From noreply at buildbot.pypy.org Tue Aug 27 15:27:29 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Tue, 27 Aug 2013 15:27:29 +0200 (CEST) Subject: [pypy-commit] stmgc default: fix wrong assumption about h_revision of stubs Message-ID: <20130827132729.580431C01F5@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: Changeset: r500:63c2673c2045 Date: 2013-08-27 14:34 +0200 http://bitbucket.org/pypy/stmgc/changeset/63c2673c2045/ Log: fix wrong assumption about h_revision of stubs diff --git a/c4/gcpage.c b/c4/gcpage.c --- a/c4/gcpage.c +++ b/c4/gcpage.c @@ -495,15 +495,24 @@ static void mark_registered_stubs(void) { wlog_t *item; + gcptr L; + G2L_LOOP_FORWARD(registered_stubs, item) { gcptr R = item->addr; assert(R->h_tid & GCFLAG_SMALLSTUB); R->h_tid |= (GCFLAG_MARKED | GCFLAG_VISITED); - gcptr L = (gcptr)(R->h_revision - 2); - L = stmgcpage_visit(L); - R->h_revision = ((revision_t)L) | 2; + if (R->h_revision & 2) { + L = (gcptr)(R->h_revision - 2); + L = stmgcpage_visit(L); + R->h_revision = ((revision_t)L) | 2; + } + else { + L = (gcptr)R->h_revision; + L = stmgcpage_visit(L); + R->h_revision = (revision_t)L; + } /* h_original will be kept up-to-date because it is either == L or L's h_original. And From noreply at buildbot.pypy.org Tue Aug 27 16:37:28 2013 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Tue, 27 Aug 2013 16:37:28 +0200 (CEST) Subject: [pypy-commit] pypy refactor-str-types: Fix. Message-ID: <20130827143728.427FA1C01F5@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: refactor-str-types Changeset: r66359:1f16bf23eaf0 Date: 2013-08-27 15:38 +0100 http://bitbucket.org/pypy/pypy/changeset/1f16bf23eaf0/ Log: Fix. diff --git a/pypy/objspace/std/bytearrayobject.py b/pypy/objspace/std/bytearrayobject.py --- a/pypy/objspace/std/bytearrayobject.py +++ b/pypy/objspace/std/bytearrayobject.py @@ -236,6 +236,54 @@ def descr_str(self, space): return space.wrap(''.join(self.data)) + def descr_eq(self, space, w_other): + try: + return space.newbool(self._val(space) == self._op_val(space, w_other)) + except OperationError, e: + if e.match(space, space.w_TypeError): + return space.w_NotImplemented + raise + + def descr_ne(self, space, w_other): + try: + return space.newbool(self._val(space) != self._op_val(space, w_other)) + except OperationError, e: + if e.match(space, space.w_TypeError): + return space.w_NotImplemented + raise + + def descr_lt(self, space, w_other): + try: + return space.newbool(self._val(space) < self._op_val(space, w_other)) + except OperationError, e: + if e.match(space, space.w_TypeError): + return space.w_NotImplemented + raise + + def descr_le(self, space, w_other): + try: + return space.newbool(self._val(space) <= self._op_val(space, w_other)) + except OperationError, e: + if e.match(space, space.w_TypeError): + return space.w_NotImplemented + raise + + def descr_gt(self, space, w_other): + try: + return space.newbool(self._val(space) > self._op_val(space, w_other)) + except OperationError, e: + if e.match(space, space.w_TypeError): + return space.w_NotImplemented + raise + + def descr_ge(self, space, w_other): + try: + return space.newbool(self._val(space) >= self._op_val(space, w_other)) + except OperationError, e: + if e.match(space, space.w_TypeError): + return space.w_NotImplemented + raise + def descr_buffer(self, space): return BytearrayBuffer(self.data) diff --git a/pypy/objspace/std/stringmethods.py b/pypy/objspace/std/stringmethods.py --- a/pypy/objspace/std/stringmethods.py +++ b/pypy/objspace/std/stringmethods.py @@ -24,64 +24,6 @@ space, lenself, w_start, w_end, upper_bound=upper_bound) return (value, start, end) - def descr_eq(self, space, w_other): - try: - return space.newbool(self._val(space) == self._op_val(space, w_other)) - except OperationError, e: - if e.match(space, space.w_TypeError): - return space.w_NotImplemented - if (e.match(space, space.w_UnicodeDecodeError) or - e.match(space, space.w_UnicodeEncodeError)): - msg = ("Unicode equal comparison failed to convert both " - "arguments to Unicode - interpreting them as being " - "unequal") - space.warn(space.wrap(msg), space.w_UnicodeWarning) - return space.w_False - raise - - def descr_ne(self, space, w_other): - try: - return space.newbool(self._val(space) != self._op_val(space, w_other)) - except OperationError, e: - if e.match(space, space.w_TypeError): - return space.w_NotImplemented - if (e.match(space, space.w_UnicodeDecodeError) or - e.match(space, space.w_UnicodeEncodeError)): - msg = ("Unicode unequal comparison failed to convert both " - "arguments to Unicode - interpreting them as being " - "unequal") - space.warn(space.wrap(msg), space.w_UnicodeWarning) - return space.w_True - raise - - def descr_lt(self, space, w_other): - try: - return space.newbool(self._val(space) < self._op_val(space, w_other)) - except OperationError, e: - if e.match(space, space.w_TypeError): - return space.w_NotImplemented - - def descr_le(self, space, w_other): - try: - return space.newbool(self._val(space) <= self._op_val(space, w_other)) - except OperationError, e: - if e.match(space, space.w_TypeError): - return space.w_NotImplemented - - def descr_gt(self, space, w_other): - try: - return space.newbool(self._val(space) > self._op_val(space, w_other)) - except OperationError, e: - if e.match(space, space.w_TypeError): - return space.w_NotImplemented - - def descr_ge(self, space, w_other): - try: - return space.newbool(self._val(space) >= self._op_val(space, w_other)) - except OperationError, e: - if e.match(space, space.w_TypeError): - return space.w_NotImplemented - def descr_len(self, space): return space.wrap(self._len()) diff --git a/pypy/objspace/std/unicodeobject.py b/pypy/objspace/std/unicodeobject.py --- a/pypy/objspace/std/unicodeobject.py +++ b/pypy/objspace/std/unicodeobject.py @@ -190,6 +190,68 @@ x = compute_hash(self._value) return space.wrap(x) + def descr_eq(self, space, w_other): + try: + return space.newbool(self._val(space) == self._op_val(space, w_other)) + except OperationError, e: + if e.match(space, space.w_TypeError): + return space.w_NotImplemented + if (e.match(space, space.w_UnicodeDecodeError) or + e.match(space, space.w_UnicodeEncodeError)): + msg = ("Unicode equal comparison failed to convert both " + "arguments to Unicode - interpreting them as being " + "unequal") + space.warn(space.wrap(msg), space.w_UnicodeWarning) + return space.w_False + raise + + def descr_ne(self, space, w_other): + try: + return space.newbool(self._val(space) != self._op_val(space, w_other)) + except OperationError, e: + if e.match(space, space.w_TypeError): + return space.w_NotImplemented + if (e.match(space, space.w_UnicodeDecodeError) or + e.match(space, space.w_UnicodeEncodeError)): + msg = ("Unicode unequal comparison failed to convert both " + "arguments to Unicode - interpreting them as being " + "unequal") + space.warn(space.wrap(msg), space.w_UnicodeWarning) + return space.w_True + raise + + def descr_lt(self, space, w_other): + try: + return space.newbool(self._val(space) < self._op_val(space, w_other)) + except OperationError, e: + if e.match(space, space.w_TypeError): + return space.w_NotImplemented + raise + + def descr_le(self, space, w_other): + try: + return space.newbool(self._val(space) <= self._op_val(space, w_other)) + except OperationError, e: + if e.match(space, space.w_TypeError): + return space.w_NotImplemented + raise + + def descr_gt(self, space, w_other): + try: + return space.newbool(self._val(space) > self._op_val(space, w_other)) + except OperationError, e: + if e.match(space, space.w_TypeError): + return space.w_NotImplemented + raise + + def descr_ge(self, space, w_other): + try: + return space.newbool(self._val(space) >= self._op_val(space, w_other)) + except OperationError, e: + if e.match(space, space.w_TypeError): + return space.w_NotImplemented + raise + def descr_format(self, space, __args__): return newformat.format_method(space, self, __args__, is_unicode=True) From noreply at buildbot.pypy.org Tue Aug 27 16:43:08 2013 From: noreply at buildbot.pypy.org (rguillebert) Date: Tue, 27 Aug 2013 16:43:08 +0200 (CEST) Subject: [pypy-commit] pypy default: Add two failing tests Message-ID: <20130827144308.83F5B1C0149@cobra.cs.uni-duesseldorf.de> Author: Romain Guillebert Branch: Changeset: r66360:53b319fef3b0 Date: 2013-08-27 15:38 +0100 http://bitbucket.org/pypy/pypy/changeset/53b319fef3b0/ Log: Add two failing tests diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -1928,6 +1928,22 @@ a.fill(12) assert (a == '1').all() + def test_boolean_indexing(self): + import numpypy as np + a = np.zeros((1, 3)) + b = np.array([True]) + + assert (a[b] == a).all() + + a[b] = 1. + + assert (a == [[1., 1., 1.]]).all() + + def test_boolean_array(self): + import numpypy as np + a = np.ndarray([1], dtype=bool) + assert a[0] == True + class AppTestMultiDim(BaseNumpyAppTest): def test_init(self): import numpypy From noreply at buildbot.pypy.org Tue Aug 27 16:43:09 2013 From: noreply at buildbot.pypy.org (rguillebert) Date: Tue, 27 Aug 2013 16:43:09 +0200 (CEST) Subject: [pypy-commit] pypy default: Fix boolean indexing Message-ID: <20130827144309.BC32E1C0149@cobra.cs.uni-duesseldorf.de> Author: Romain Guillebert Branch: Changeset: r66361:c28b4d66af84 Date: 2013-08-27 15:41 +0100 http://bitbucket.org/pypy/pypy/changeset/c28b4d66af84/ Log: Fix boolean indexing diff --git a/pypy/module/micronumpy/loop.py b/pypy/module/micronumpy/loop.py --- a/pypy/module/micronumpy/loop.py +++ b/pypy/module/micronumpy/loop.py @@ -370,7 +370,7 @@ def setitem_filter(arr, index, value): arr_iter = arr.create_iter() - index_iter = index.create_iter() + index_iter = index.create_iter(arr.get_shape()) value_iter = value.create_iter() shapelen = len(arr.get_shape()) index_dtype = index.get_dtype() From noreply at buildbot.pypy.org Tue Aug 27 16:45:18 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Tue, 27 Aug 2013 16:45:18 +0200 (CEST) Subject: [pypy-commit] pypy default: Add failing test for boolean indexing Message-ID: <20130827144518.BA64C1C10AB@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: Changeset: r66362:11f97079c16f Date: 2013-08-27 15:44 +0100 http://bitbucket.org/pypy/pypy/changeset/11f97079c16f/ Log: Add failing test for boolean indexing diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -2351,6 +2351,15 @@ a[a & 1 == 0] = 15 assert (a == [[15, 1], [15, 5], [15, 9]]).all() + def test_array_indexing_bool_specialcases(self): + from numpypy import arange, array + a = arange(6) + a[a > 3] = array([15]) + assert (a == [0, 1, 2, 3, 15, 15]).all() + a = arange(6).reshape(3, 2) + a[a & 1 == 1] = [] # here, Numpy sticks garbage into the array + assert a.shape == (3, 2) + def test_copy_kwarg(self): from numpypy import array x = array([1, 2, 3]) From noreply at buildbot.pypy.org Tue Aug 27 16:49:42 2013 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 27 Aug 2013 16:49:42 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: My talk Message-ID: <20130827144942.A1FE71C10AB@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r5033:2737418477b4 Date: 2013-08-27 15:49 +0100 http://bitbucket.org/pypy/extradoc/changeset/2737418477b4/ Log: My talk diff --git a/talk/london-demo-session/stm.rst b/talk/london-demo-session/stm.rst new file mode 100644 --- /dev/null +++ b/talk/london-demo-session/stm.rst @@ -0,0 +1,62 @@ + +===================================== +Software Transactional Memory on PyPy +===================================== + + +Pseudo-Goal +----------- + +* "Kill the GIL" + +* GIL = Global Interpreter Lock + + +Real Goals +---------- + +* Multi-core programming + +* But *reasonable* multi-core programming + +* Using the recent model of Transactional Memory + + +PyPy-STM +-------- + +* An executable ``pypy-stm`` which uses internally + Software Transactional Memory + +* Optimistically run multiple threads in parallel + +* The only new feature is ``atomic``:: + + with atomic: + piece of code... + + +Example of higher-level API +--------------------------- + +:: + + def work(...): + ... + several more calls to: transaction.add(work, ...) + ... + + +* Starts N threads, scheduling `work()` calls to them + +* Each `work()` is done in an ``atomic`` block + +* Multi-core, but as if all the `work()` are done sequentially + + +Q&A +--- + +* Thank you! + +* Budget of $10k left, likely more needed too From noreply at buildbot.pypy.org Tue Aug 27 16:53:47 2013 From: noreply at buildbot.pypy.org (antocuni) Date: Tue, 27 Aug 2013 16:53:47 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: rst fixes Message-ID: <20130827145347.D56D91C10AB@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: extradoc Changeset: r5034:1dbc40a977dc Date: 2013-08-27 16:53 +0200 http://bitbucket.org/pypy/extradoc/changeset/1dbc40a977dc/ Log: rst fixes diff --git a/talk/london-demo-session/stm.rst b/talk/london-demo-session/stm.rst --- a/talk/london-demo-session/stm.rst +++ b/talk/london-demo-session/stm.rst @@ -30,7 +30,9 @@ * Optimistically run multiple threads in parallel -* The only new feature is ``atomic``:: +* The only new feature is ``atomic``: + +.. sourcecode:: python with atomic: piece of code... @@ -39,11 +41,12 @@ Example of higher-level API --------------------------- -:: +.. sourcecode:: python def work(...): ... - several more calls to: transaction.add(work, ...) + several more calls to: + transaction.add(work, ...) ... From noreply at buildbot.pypy.org Tue Aug 27 16:57:23 2013 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 27 Aug 2013 16:57:23 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: Add a Status slide Message-ID: <20130827145723.3BC931C10AB@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r5035:7ff21248320b Date: 2013-08-27 15:57 +0100 http://bitbucket.org/pypy/extradoc/changeset/7ff21248320b/ Log: Add a Status slide diff --git a/talk/london-demo-session/stm.rst b/talk/london-demo-session/stm.rst --- a/talk/london-demo-session/stm.rst +++ b/talk/london-demo-session/stm.rst @@ -57,6 +57,16 @@ * Multi-core, but as if all the `work()` are done sequentially +Status +------ + +* Kind of working without the JIT + +* Roughly three times slower (you need four cores to see benefits) + +* Working on the JIT support + + Q&A --- From noreply at buildbot.pypy.org Tue Aug 27 17:21:54 2013 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Tue, 27 Aug 2013 17:21:54 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: Move myself to the list of people present. Message-ID: <20130827152154.9E28A1C0149@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: extradoc Changeset: r5036:7b973027cfca Date: 2013-08-27 16:23 +0100 http://bitbucket.org/pypy/extradoc/changeset/7b973027cfca/ Log: Move myself to the list of people present. diff --git a/sprintinfo/london-2013/planning.txt b/sprintinfo/london-2013/planning.txt --- a/sprintinfo/london-2013/planning.txt +++ b/sprintinfo/london-2013/planning.txt @@ -12,11 +12,11 @@ Armin Edd Maciej +Manuel People not present ------------------- -Manuel Rami Olmo Tom From noreply at buildbot.pypy.org Tue Aug 27 17:28:46 2013 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Tue, 27 Aug 2013 17:28:46 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: Add Rami to the list of people present. Add him to the documentation task. Message-ID: <20130827152846.808701C13E1@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: extradoc Changeset: r5037:331cd9f5bf9e Date: 2013-08-27 16:30 +0100 http://bitbucket.org/pypy/extradoc/changeset/331cd9f5bf9e/ Log: Add Rami to the list of people present. Add him to the documentation task. diff --git a/sprintinfo/london-2013/planning.txt b/sprintinfo/london-2013/planning.txt --- a/sprintinfo/london-2013/planning.txt +++ b/sprintinfo/london-2013/planning.txt @@ -13,11 +13,11 @@ Edd Maciej Manuel +Rami People not present ------------------- -Rami Olmo Tom Laurie @@ -58,7 +58,7 @@ * explore Laurie's crazy scheme of persisting loop entry counts (Maciej, Anto) IN PROGRESS -* progress on the documentation branch (Olmo, Manuel) +* progress on the documentation branch (Manuel, Rami) * Python 3 benchmarks (Richard2) SOME PROGRESS From noreply at buildbot.pypy.org Tue Aug 27 17:43:56 2013 From: noreply at buildbot.pypy.org (fijal) Date: Tue, 27 Aug 2013 17:43:56 +0200 (CEST) Subject: [pypy-commit] pypy default: move the resume descr creation to where it's needed Message-ID: <20130827154356.810C11C0149@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r66363:868535ba6a6b Date: 2013-08-27 16:42 +0100 http://bitbucket.org/pypy/pypy/changeset/868535ba6a6b/ Log: move the resume descr creation to where it's needed diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -1015,9 +1015,6 @@ @arguments("int", "boxes3", "jitcode_position", "boxes3", "orgpc") def opimpl_jit_merge_point(self, jdindex, greenboxes, jcposition, redboxes, orgpc): - resumedescr = compile.ResumeAtPositionDescr() - self.metainterp.capture_resumedata(resumedescr, orgpc) - any_operation = len(self.metainterp.history.operations) > 0 jitdriver_sd = self.metainterp.staticdata.jitdrivers_sd[jdindex] self.verify_green_args(jitdriver_sd, greenboxes) @@ -1049,6 +1046,9 @@ # much less expensive to blackhole out of. saved_pc = self.pc self.pc = orgpc + resumedescr = compile.ResumeAtPositionDescr() + self.metainterp.capture_resumedata(resumedescr, orgpc) + self.metainterp.reached_loop_header(greenboxes, redboxes, resumedescr) self.pc = saved_pc # no exception, which means that the jit_merge_point did not From noreply at buildbot.pypy.org Tue Aug 27 17:43:57 2013 From: noreply at buildbot.pypy.org (fijal) Date: Tue, 27 Aug 2013 17:43:57 +0200 (CEST) Subject: [pypy-commit] pypy default: merge Message-ID: <20130827154357.D097E1C0149@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r66364:892ad8713af9 Date: 2013-08-27 16:43 +0100 http://bitbucket.org/pypy/pypy/changeset/892ad8713af9/ Log: merge diff --git a/lib_pypy/_sqlite3.py b/lib_pypy/_sqlite3.py --- a/lib_pypy/_sqlite3.py +++ b/lib_pypy/_sqlite3.py @@ -1229,7 +1229,10 @@ if cvt is not None: param = cvt(param) - param = adapt(param) + try: + param = adapt(param) + except: + pass # And use previous value if param is None: rc = _lib.sqlite3_bind_null(self._statement, idx) diff --git a/pypy/module/micronumpy/loop.py b/pypy/module/micronumpy/loop.py --- a/pypy/module/micronumpy/loop.py +++ b/pypy/module/micronumpy/loop.py @@ -370,7 +370,7 @@ def setitem_filter(arr, index, value): arr_iter = arr.create_iter() - index_iter = index.create_iter() + index_iter = index.create_iter(arr.get_shape()) value_iter = value.create_iter() shapelen = len(arr.get_shape()) index_dtype = index.get_dtype() diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -1922,6 +1922,28 @@ a = numpy.arange(10.).reshape((5, 2))[::2] assert (loads(dumps(a)) == a).all() + def test_string_filling(self): + import numpypy as numpy + a = numpy.empty((10,10), dtype='c1') + a.fill(12) + assert (a == '1').all() + + def test_boolean_indexing(self): + import numpypy as np + a = np.zeros((1, 3)) + b = np.array([True]) + + assert (a[b] == a).all() + + a[b] = 1. + + assert (a == [[1., 1., 1.]]).all() + + def test_boolean_array(self): + import numpypy as np + a = np.ndarray([1], dtype=bool) + assert a[0] == True + class AppTestMultiDim(BaseNumpyAppTest): def test_init(self): import numpypy @@ -2329,6 +2351,15 @@ a[a & 1 == 0] = 15 assert (a == [[15, 1], [15, 5], [15, 9]]).all() + def test_array_indexing_bool_specialcases(self): + from numpypy import arange, array + a = arange(6) + a[a > 3] = array([15]) + assert (a == [0, 1, 2, 3, 15, 15]).all() + a = arange(6).reshape(3, 2) + a[a & 1 == 1] = [] # here, Numpy sticks garbage into the array + assert a.shape == (3, 2) + def test_copy_kwarg(self): from numpypy import array x = array([1, 2, 3]) diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -1764,12 +1764,16 @@ arr.storage[i] = arg[i] return interp_boxes.W_StringBox(arr, 0, arr.dtype) - @jit.unroll_safe def store(self, arr, i, offset, box): assert isinstance(box, interp_boxes.W_StringBox) # XXX simplify to range(box.dtype.get_size()) ? + return self._store(arr.storage, i, offset, box) + + @jit.unroll_safe + def _store(self, storage, i, offset, box): + assert isinstance(box, interp_boxes.W_StringBox) for k in range(min(self.size, box.arr.size-offset)): - arr.storage[k + i] = box.arr.storage[k + offset] + storage[k + i] = box.arr.storage[k + offset] def read(self, arr, i, offset, dtype=None): if dtype is None: @@ -1859,6 +1863,11 @@ arr.storage[j] = '\x00' return interp_boxes.W_StringBox(arr, 0, arr.dtype) + def fill(self, storage, width, box, start, stop, offset): + from pypy.module.micronumpy.arrayimpl.concrete import ConcreteArrayNotOwning + for i in xrange(start, stop, width): + self._store(storage, i, offset, box) + NonNativeStringType = StringType class UnicodeType(BaseType, BaseStringType): diff --git a/pypy/module/test_lib_pypy/test_sqlite3.py b/pypy/module/test_lib_pypy/test_sqlite3.py --- a/pypy/module/test_lib_pypy/test_sqlite3.py +++ b/pypy/module/test_lib_pypy/test_sqlite3.py @@ -228,3 +228,18 @@ cur = con.cursor() cur.execute(u'SELECT 1 as méil') assert cur.description[0][0] == u"méil".encode('utf-8') + +def test_adapter_exception(con): + def cast(obj): + raise ZeroDivisionError + + _sqlite3.register_adapter(int, cast) + try: + cur = con.cursor() + cur.execute("select ?", (4,)) + val = cur.fetchone()[0] + # Adapter error is ignored, and parameter is passed as is. + assert val == 4 + assert type(val) is int + finally: + del _sqlite3.adapters[(int, _sqlite3.PrepareProtocol)] From noreply at buildbot.pypy.org Tue Aug 27 18:01:48 2013 From: noreply at buildbot.pypy.org (fijal) Date: Tue, 27 Aug 2013 18:01:48 +0200 (CEST) Subject: [pypy-commit] pypy optmodel-refactor: commit in-progress, closing the branch to start again Message-ID: <20130827160148.264D71C10AB@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: optmodel-refactor Changeset: r66365:65538d09eae7 Date: 2013-08-27 17:00 +0100 http://bitbucket.org/pypy/pypy/changeset/65538d09eae7/ Log: commit in-progress, closing the branch to start again diff --git a/rpython/jit/metainterp/optimizeopt/optimizer.py b/rpython/jit/metainterp/optimizeopt/optimizer.py --- a/rpython/jit/metainterp/optimizeopt/optimizer.py +++ b/rpython/jit/metainterp/optimizeopt/optimizer.py @@ -7,7 +7,8 @@ from rpython.jit.metainterp.optimizeopt.util import make_dispatcher_method from rpython.jit.metainterp.resoperation import rop, ResOperation, AbstractResOp from rpython.jit.metainterp.typesystem import llhelper -from rpython.jit.metainterp.resume2 import OptimizerResumeInterpreter +from rpython.jit.metainterp.resume2 import OptimizerResumeInterpreter,\ + ResumeBytecodeBuilder, MODEL_FAILARGS from rpython.tool.pairtype import extendabletype from rpython.rlib.debug import debug_print from rpython.rlib.objectmodel import specialize @@ -345,17 +346,22 @@ self.allboxes = {} for k, v in allboxes.iteritems(): self.allboxes[v] = k - # we fish bytecode from the loop + # XXX make this be fished from loop, not from a descr, sounds a bit + # obscure for op in loop.operations: if op.is_guard(): descr = op.getdescr() assert isinstance(descr, AbstractFailDescr) bc = descr.rd_bytecode jitcodes = metainterp_sd.alljitcodes - self.resume_bc = OptimizerResumeInterpreter(bc, jitcodes) + self.resume_bc_writer = ResumeBytecodeBuilder(metainterp_sd, + MODEL_FAILARGS) + self.resume_bc = OptimizerResumeInterpreter(bc, jitcodes, + self.resume_bc_writer) break else: self.resume_bc = None # trivial case + self.resume_bc_writer = None self.metainterp_sd = metainterp_sd self.cpu = metainterp_sd.cpu self.loop = loop diff --git a/rpython/jit/metainterp/resume2.py b/rpython/jit/metainterp/resume2.py --- a/rpython/jit/metainterp/resume2.py +++ b/rpython/jit/metainterp/resume2.py @@ -5,7 +5,7 @@ opcodes: -UPDATE_PC [list-of-alive-boxes] +CAPTURE_POINT [list-of-alive-boxes] ENTER_FRAME [list-of-alive-boxes] LEAVE_FRAME @@ -19,11 +19,34 @@ BC_NAMES = ['ENTER_FRAME', 'LEAVE_FRAME', 'CAPTURE_POINT'] +MODEL_FRONTEND = 0 +MODEL_FAILARGS = 1 +MODEL_BACKEND = 2 + +class Bytecode(object): + """ an object representing a single bytecode. We keep it on faildescrs, + however it would be more efficient to keep it on a loop token. + + XXX fix that + + model can be one of the above, it means the numbers in numberings + are relative to: + + frontend - means the index is an index into list of allboxes + failargs - means it's index in the list of failargs + backend - a backend specific number + + """ + def __init__(self, bc_repr, model): + self.bc_repr = bc_repr + self.model = model + class ResumeBytecodeBuilder(object): - def __init__(self, metainterp_sd): + def __init__(self, metainterp_sd, model=MODEL_FRONTEND): self.bc = [] self.boxes = {} self.metainterp_sd = metainterp_sd + self.model = model def enumerate_box(self, box): if box in self.boxes: @@ -73,12 +96,12 @@ if op.is_guard(): descr = op.getdescr() assert isinstance(descr, AbstractFailDescr) - descr.rd_bytecode = finished_bc - print_bc(finished_bc, self.metainterp_sd.alljitcodes) + descr.rd_bytecode = Bytecode(finished_bc, self.model) + #print_bc(finished_bc, self.metainterp_sd.alljitcodes) class AbstractBytecodeInterpreter(object): def __init__(self, bc, alljitcodes): - self.bc = bc + self.bc = bc.bc_repr self.alljitcodes = alljitcodes self.init() @@ -144,6 +167,14 @@ self.framestack.pop() class OptimizerResumeInterpreter(AbstractBytecodeInterpreter): + """ This resume interpreter reads the resume and writes the new one + in resume_bc_writer + """ + + def __init__(self, bc, jitcode, resume_bc_writer): + AbstractBytecodeInterpreter.__init__(self, bc, jitcode) + self.resume_bc_writer = resume_bc_writer + def init(self): self.pos = 0 self.framestack = [] @@ -151,6 +182,7 @@ self.cur_boxlist = None def get_current_boxes(self, allboxes): + xxx newboxes = [None] * (self.cur_len + len(self.cur_boxlist)) i = 0 j = 0 From noreply at buildbot.pypy.org Tue Aug 27 18:01:49 2013 From: noreply at buildbot.pypy.org (fijal) Date: Tue, 27 Aug 2013 18:01:49 +0200 (CEST) Subject: [pypy-commit] pypy optmodel-refactor: close abandoned branch in favor of a new approach Message-ID: <20130827160149.5EF6E1C10AB@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: optmodel-refactor Changeset: r66366:6a5f3429faad Date: 2013-08-27 17:00 +0100 http://bitbucket.org/pypy/pypy/changeset/6a5f3429faad/ Log: close abandoned branch in favor of a new approach From noreply at buildbot.pypy.org Tue Aug 27 18:06:34 2013 From: noreply at buildbot.pypy.org (antocuni) Date: Tue, 27 Aug 2013 18:06:34 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: generate pdf Message-ID: <20130827160634.CB6C81C10AB@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: extradoc Changeset: r5038:9ebed77c19ba Date: 2013-08-27 18:06 +0200 http://bitbucket.org/pypy/extradoc/changeset/9ebed77c19ba/ Log: generate pdf diff --git a/talk/london-demo-session/stm.pdf b/talk/london-demo-session/stm.pdf new file mode 100644 index 0000000000000000000000000000000000000000..8c45eb7023601bdf8016f4794a11ca1a8c948558 GIT binary patch [cut] From noreply at buildbot.pypy.org Tue Aug 27 18:43:42 2013 From: noreply at buildbot.pypy.org (antocuni) Date: Tue, 27 Aug 2013 18:43:42 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: add a new task Message-ID: <20130827164342.EBA5B1C36B7@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: extradoc Changeset: r5039:b5d64247ed62 Date: 2013-08-27 18:43 +0200 http://bitbucket.org/pypy/extradoc/changeset/b5d64247ed62/ Log: add a new task diff --git a/sprintinfo/london-2013/planning.txt b/sprintinfo/london-2013/planning.txt --- a/sprintinfo/london-2013/planning.txt +++ b/sprintinfo/london-2013/planning.txt @@ -68,6 +68,8 @@ * find the slow generator task (Marko, Romain) INVALID +* review the pypy-pyarray branch + * general wizardry (Carl Friedrich; Armin) * general getting started (Rami, Carl Friedrich) GOT STARTED From noreply at buildbot.pypy.org Tue Aug 27 18:58:24 2013 From: noreply at buildbot.pypy.org (necaris) Date: Tue, 27 Aug 2013 18:58:24 +0200 (CEST) Subject: [pypy-commit] pypy improve-docs: (mjacob, necaris) Improve documentation index page Message-ID: <20130827165824.D65BA1C36B7@cobra.cs.uni-duesseldorf.de> Author: Rami Chowdhury Branch: improve-docs Changeset: r66367:85eb4f2b2c2d Date: 2013-08-27 17:54 +0100 http://bitbucket.org/pypy/pypy/changeset/85eb4f2b2c2d/ Log: (mjacob, necaris) Improve documentation index page Add a getting started section, factor out installation / building to their own sections, and update links. diff --git a/pypy/doc/getting-started.rst b/pypy/doc/getting-started.rst --- a/pypy/doc/getting-started.rst +++ b/pypy/doc/getting-started.rst @@ -4,31 +4,10 @@ .. contents:: -What is PyPy ? --------------- - -In common parlance, PyPy has been used to mean two things. The first is the -:doc:`RPython translation toolchain `, which is a framework for generating -dynamic programming language implementations. And the second is one -particular implementation that is so generated -- -an implementation of the Python_ programming language written in -Python itself. It is designed to be flexible and easy to experiment with. - -This double usage has proven to be confusing, and we are trying to move -away from using the word PyPy to mean both things. From now on we will -try to use PyPy to only mean the Python implementation, and say the -:doc:`RPython translation toolchain ` when we mean the framework. Some older -documents, presentations, papers and videos will still have the old -usage. You are hereby warned. - -We target a large variety of platforms, small and large, by providing a -compiler toolsuite that can produce custom Python versions. Platform, memory -and threading models, as well as the JIT compiler itself, are aspects of the -translation process - as opposed to encoding low level details into the -language implementation itself. :doc:`more... ` .. _Python: http://docs.python.org/reference/ +:doc:`Downloading and installing PyPy ` Just the facts -------------- @@ -105,6 +84,8 @@ .. _pip: http://pypi.python.org/pypi/pip + + Clone the repository ~~~~~~~~~~~~~~~~~~~~ diff --git a/pypy/doc/index-old.rst b/pypy/doc/index-old.rst deleted file mode 100644 --- a/pypy/doc/index-old.rst +++ /dev/null @@ -1,127 +0,0 @@ -Welcome to PyPy -=============== - -The PyPy project aims to produce a flexible and fast Python_ -implementation. This page documents the development of the PyPy -project itself. If you don't know what PyPy is, consult the `PyPy -website`_. If you just want to use PyPy, consult the `download`_ page -and the :doc:`getting-started-python` documents. If you want to help -develop PyPy -- keep reading! - -PyPy is written in a language called `RPython`_, which is suitable for -writing dynamic language interpreters (and not much else). RPython is -a subset of Python and is itself written in Python. If you'd like to -learn more about RPython, `XXXX` should provide a -reasonable overview. - -**If you would like to contribute to PyPy**, please read :doc:`how to -contribute ` first. PyPy's development style is somewhat different to -that of many other software projects and it often surprises -newcomers. What is **not** necessary is an academic background from -university in writing compilers -- much of it does not apply to PyPy -any way. - -All of the documentation and source code is available under the MIT license, -unless otherwise specified. Consult :source:`LICENSE`. - -.. _Python: http://python.org/ -.. _download: http://pypy.org/download.html -.. _PyPy website: http://pypy.org/ -.. _RPython: http://rpython.readthedocs.org/ - -.. toctree:: - :hidden: - - getting-started-python - how-to-contribute - - -Index of various topics: ------------------------- - -* :doc:`getting-started`: how to install and run the PyPy Python interpreter - -* :doc:`FAQ `: some frequently asked questions. - -* `Release 2.1.0`_: the latest official release - -* `PyPy Blog`_: news and status info about PyPy - -* :doc:`Papers `: Academic papers, talks, and related projects - -* `speed.pypy.org`_: Daily benchmarks of how fast PyPy is - -* :doc:`project-ideas`: In case you want to get your feet wet... - -* :doc:`More stuff `: this is a collection of documentation that's there, but not - particularly organized - -.. _PyPy blog: http://morepypy.blogspot.com/ -.. _Release 2.1.0: http://pypy.org/download.html -.. _speed.pypy.org: http://speed.pypy.org - -.. toctree:: - :hidden: - - getting-started - faq - extradoc - project-ideas - project-documentation - - -Documentation for the PyPy Python Interpreter ---------------------------------------------- - -New features of PyPy's Python Interpreter and -Translation Framework: - - * :doc:`cpython_differences` - * :doc:`objspace-proxies` - transparent proxy documentation - * :doc:`Continulets and greenlets ` - documentation about stackless features - * :doc:`jit-hooks` - * :doc:`sandbox` - * :doc:`Garbage collection environment variables ` - -.. toctree:: - :hidden: - - cpython_differences - objspace-proxies - stackless - jit-hooks - sandbox - gc_info - - -.. _contact: - -Mailing lists, bug tracker, IRC channel ---------------------------------------------- - -* `Development mailing list`_: development and conceptual - discussions. - -* `Mercurial commit mailing list`_: updates to code and - documentation. - -* `Development bug/feature tracker`_: filing bugs and feature requests. - -* **IRC channel #pypy on freenode**: Many of the core developers are hanging out - at #pypy on irc.freenode.net. You are welcome to join and ask questions - (if they are not already developed in the :doc:`FAQ `). - You can find logs of the channel here_. - -.. _development mailing list: http://python.org/mailman/listinfo/pypy-dev -.. _Mercurial commit mailing list: http://python.org/mailman/listinfo/pypy-commit -.. _development bug/feature tracker: https://bugs.pypy.org -.. _here: http://tismerysoft.de/pypy/irc-logs/pypy - - -Meeting PyPy developers ------------------------ - -The PyPy developers are organizing sprints and presenting results at -conferences all year round. They will be happy to meet in person with -anyone interested in the project. Watch out for sprint announcements -on the `development mailing list`_. diff --git a/pypy/doc/index.rst b/pypy/doc/index.rst --- a/pypy/doc/index.rst +++ b/pypy/doc/index.rst @@ -2,28 +2,41 @@ ================================ Welcome to the documentation for PyPy, a fast_, compliant alternative -implementation of the Python_ language. If you don't know what PyPy is, -consult the `PyPy website`_. +implementation of the Python_ language. -PyPy is written using the RPython toolchain. RPython enables writing dynamic -language interpreters in a subset of Python which can be translated to C code -including an automatically generated JIT for the implemented language. If you -want to learn more about RPython, see the `RPython website`_. +* If you want to find out more about what PyPy is, have a look at our :doc:`introduction ` + or consult the `PyPy website`_. + +* If you're interested in trying PyPy out, check out the :doc:`installation instructions `. + +* If you want to help develop PyPy, please have a look at :doc:`how to contribute ` + and get in touch (:ref:`contact`)! + +All of the documentation and source code is available under the MIT license, +unless otherwise specified. Consult :source:`LICENSE`. .. _fast: http://speed.pypy.org .. _Python: http://python.org/ .. _PyPy website: http://pypy.org/ -.. _RPython website: http://rpython.readthedocs.org/ -User documentation ------------------- +Getting Started +--------------- .. toctree:: :maxdepth: 1 + introduction install build + faq + +Using PyPy +---------- + +.. toctree:: + :maxdepth: 1 + cpython_differences gc_info jit-hooks @@ -37,15 +50,28 @@ ------------------------- .. toctree:: - :maxdepth: 2 + :maxdepth: 1 + + how-to-contribute + project-ideas + project-documentation +.. TODO: audit ^^ + + +.. TODO: Fill this in Academical stuff ---------------- .. toctree:: - :maxdepth: 2 + :maxdepth: 1 + extradoc +.. TODO: Remove this? Or fill it with links to papers? + + +.. _contact: Contact ------- @@ -83,4 +109,3 @@ * :ref:`genindex` * :ref:`modindex` * :ref:`search` - diff --git a/pypy/doc/install.rst b/pypy/doc/install.rst --- a/pypy/doc/install.rst +++ b/pypy/doc/install.rst @@ -1,2 +1,78 @@ Downloading and Installing PyPy =============================== + +Download a pre-built PyPy +~~~~~~~~~~~~~~~~~~~~~~~~~ + +The quickest way to start using PyPy is to download a prebuilt binary for your +OS and architecture. You can either use the `most recent release`_ or one of +our `development nightly build`_. Please note that the nightly builds are not +guaranteed to be as stable as official releases, use them at your own risk. + +.. _most recent release: http://pypy.org/download.html +.. _development nightly build: http://buildbot.pypy.org/nightly/trunk/ + + +Installing PyPy +~~~~~~~~~~~~~~~ + +PyPy is ready to be executed as soon as you unpack the tarball or the zip +file, with no need to install it in any specific location:: + + $ tar xf pypy-2.1.tar.bz2 + $ ./pypy-2.1/bin/pypy + Python 2.7.3 (480845e6b1dd, Jul 31 2013, 11:05:31) + [PyPy 2.1.0 with GCC 4.4.3] on linux2 + Type "help", "copyright", "credits" or "license" for more information. + And now for something completely different: ``PyPy is an exciting technology + that lets you to write fast, portable, multi-platform interpreters with less + effort'' + >>>> + +If you want to make PyPy available system-wide, you can put a symlink to the +``pypy`` executable in ``/usr/local/bin``. It is important to put a symlink +and not move the binary there, else PyPy would not be able to find its +library. + +If you want to install 3rd party libraries, the most convenient way is to +install distribute_ and pip_: + + $ curl -O http://python-distribute.org/distribute_setup.py + + $ curl -O https://raw.github.com/pypa/pip/master/contrib/get-pip.py + + $ ./pypy-2.1/bin/pypy distribute_setup.py + + $ ./pypy-2.1/bin/pypy get-pip.py + + $ ./pypy-2.1/bin/pip install pygments # for example + +Third party libraries will be installed in ``pypy-2.1/site-packages``, and +the scripts in ``pypy-2.1/bin``. + + +Installing using virtualenv +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +It is often convenient to run pypy inside a virtualenv. To do this +you need a recent version of virtualenv -- 1.6.1 or greater. You can +then install PyPy both from a precompiled tarball or from a mercurial +checkout:: + + # from a tarball + $ virtualenv -p /opt/pypy-c-jit-41718-3fb486695f20-linux/bin/pypy my-pypy-env + + # from the mercurial checkout + $ virtualenv -p /path/to/pypy/pypy/translator/goal/pypy-c my-pypy-env + +Note that bin/python is now a symlink to bin/pypy. + +.. _distribute: http://www.python-distribute.org/ +.. _pip: http://pypi.python.org/pypi/pip + + +Building PyPy yourself +~~~~~~~~~~~~~~~~~~~~~~ + +If you're interested in getting more involved, or doing something different with +PyPy, consult `the build instructions `. diff --git a/pypy/doc/introduction.rst b/pypy/doc/introduction.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/introduction.rst @@ -0,0 +1,22 @@ +What is PyPy? +============= + +In common parlance, PyPy has been used to mean two things. The first is the +:doc:`RPython translation toolchain `, which is a framework for generating +dynamic programming language implementations. And the second is one +particular implementation that is so generated -- +an implementation of the Python_ programming language written in +Python itself. It is designed to be flexible and easy to experiment with. + +This double usage has proven to be confusing, and we are trying to move +away from using the word PyPy to mean both things. From now on we will +try to use PyPy to only mean the Python implementation, and say the +:doc:`RPython translation toolchain ` when we mean the framework. Some older +documents, presentations, papers and videos will still have the old +usage. You are hereby warned. + +We target a large variety of platforms, small and large, by providing a +compiler toolsuite that can produce custom Python versions. Platform, memory +and threading models, as well as the JIT compiler itself, are aspects of the +translation process - as opposed to encoding low level details into the +language implementation itself. :doc:`more... ` diff --git a/pypy/doc/project-documentation.rst b/pypy/doc/project-documentation.rst --- a/pypy/doc/project-documentation.rst +++ b/pypy/doc/project-documentation.rst @@ -79,11 +79,3 @@ config/index commandline_ref dir-reference - - -Indices and tables -================== - -* :ref:`genindex` -* :ref:`search` -* :ref:`glossary` From noreply at buildbot.pypy.org Tue Aug 27 20:25:15 2013 From: noreply at buildbot.pypy.org (mattip) Date: Tue, 27 Aug 2013 20:25:15 +0200 (CEST) Subject: [pypy-commit] pypy pypy-pyarray: revert 56fa5d73e5ec Message-ID: <20130827182515.755DD1C13E4@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: pypy-pyarray Changeset: r66368:0bf8764d2f62 Date: 2013-08-27 21:18 +0300 http://bitbucket.org/pypy/pypy/changeset/0bf8764d2f62/ Log: revert 56fa5d73e5ec diff --git a/pypy/module/cpyext/floatobject.py b/pypy/module/cpyext/floatobject.py --- a/pypy/module/cpyext/floatobject.py +++ b/pypy/module/cpyext/floatobject.py @@ -25,7 +25,7 @@ """ Returns the o converted to a float object on success, or NULL on failure. This is the equivalent of the Python expression float(o).""" - return space.float(w_obj) + return space.call_function(space.w_float, w_obj) @cpython_api([PyObject, rffi.CCHARPP], PyObject) def PyFloat_FromString(space, w_obj, _): From noreply at buildbot.pypy.org Tue Aug 27 21:15:13 2013 From: noreply at buildbot.pypy.org (amauryfa) Date: Tue, 27 Aug 2013 21:15:13 +0200 (CEST) Subject: [pypy-commit] pypy release-2.1.x: Issue1591: sqlite should swallow exceptions raised in adapter. Message-ID: <20130827191513.A6DC01C13E1@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: release-2.1.x Changeset: r66369:3a6d7c5fbebd Date: 2013-08-26 23:27 +0200 http://bitbucket.org/pypy/pypy/changeset/3a6d7c5fbebd/ Log: Issue1591: sqlite should swallow exceptions raised in adapter. (transplanted from 6181c1116a9296f123aada93b1dcd4835517f643) diff --git a/lib_pypy/_sqlite3.py b/lib_pypy/_sqlite3.py --- a/lib_pypy/_sqlite3.py +++ b/lib_pypy/_sqlite3.py @@ -1229,7 +1229,10 @@ if cvt is not None: param = cvt(param) - param = adapt(param) + try: + param = adapt(param) + except: + pass # And use previous value if param is None: rc = _lib.sqlite3_bind_null(self._statement, idx) diff --git a/pypy/module/test_lib_pypy/test_sqlite3.py b/pypy/module/test_lib_pypy/test_sqlite3.py --- a/pypy/module/test_lib_pypy/test_sqlite3.py +++ b/pypy/module/test_lib_pypy/test_sqlite3.py @@ -228,3 +228,18 @@ cur = con.cursor() cur.execute(u'SELECT 1 as méil') assert cur.description[0][0] == u"méil".encode('utf-8') + +def test_adapter_exception(con): + def cast(obj): + raise ZeroDivisionError + + _sqlite3.register_adapter(int, cast) + try: + cur = con.cursor() + cur.execute("select ?", (4,)) + val = cur.fetchone()[0] + # Adapter error is ignored, and parameter is passed as is. + assert val == 4 + assert type(val) is int + finally: + del _sqlite3.adapters[(int, _sqlite3.PrepareProtocol)] From noreply at buildbot.pypy.org Tue Aug 27 22:08:40 2013 From: noreply at buildbot.pypy.org (mattip) Date: Tue, 27 Aug 2013 22:08:40 +0200 (CEST) Subject: [pypy-commit] pypy pypy-pyarray: fix so test_zjit does not fail Message-ID: <20130827200840.5CAF91C01F5@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: pypy-pyarray Changeset: r66370:b3a2caa8b269 Date: 2013-08-27 23:03 +0300 http://bitbucket.org/pypy/pypy/changeset/b3a2caa8b269/ Log: fix so test_zjit does not fail diff --git a/pypy/module/micronumpy/compile.py b/pypy/module/micronumpy/compile.py --- a/pypy/module/micronumpy/compile.py +++ b/pypy/module/micronumpy/compile.py @@ -47,6 +47,9 @@ def __init__(self, name): self.name = name + def lookup(self, name): + return self.getdictvalue(self, name) + class FakeSpace(object): w_ValueError = W_TypeObject("ValueError") w_TypeError = W_TypeObject("TypeError") @@ -204,6 +207,10 @@ return W_NDimArray return self.w_None + def lookup(self, w_obj, name): + w_type = self.type(w_obj) + return w_type.lookup(name) + def gettypefor(self, w_obj): return None From noreply at buildbot.pypy.org Wed Aug 28 11:39:53 2013 From: noreply at buildbot.pypy.org (cfbolz) Date: Wed, 28 Aug 2013 11:39:53 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: (all) planning for today Message-ID: <20130828093953.301F61C0189@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: extradoc Changeset: r5040:0edfb54e5d7e Date: 2013-08-28 10:39 +0100 http://bitbucket.org/pypy/extradoc/changeset/0edfb54e5d7e/ Log: (all) planning for today diff --git a/sprintinfo/london-2013/planning.txt b/sprintinfo/london-2013/planning.txt --- a/sprintinfo/london-2013/planning.txt +++ b/sprintinfo/london-2013/planning.txt @@ -6,7 +6,6 @@ Remi Marko Romain -Lukas Richard Richard2 Armin @@ -21,6 +20,7 @@ Olmo Tom Laurie +Lukas Tasks ----- @@ -38,42 +38,40 @@ threads or greenlets, probably by adding a thread-or-greenlet number prefix (see branch stmgc-c4 where we already add a thread num prefix) -* general STM things (Remy, Armin) GENERAL PROGRESS +* general STM things (Remi, Armin) ONE BUG LESS -* general Numpy things (Romain) GENERAL PROGRESS +* general Numpy things TWO BUGS LESS, ONE BUG MORE -* PyOpenCL (Marko) +* track raw array out of bounds in lldebug builds + +* PyOpenCL (Marko) HARDER THAN WE THOUGHT, BUT PROGRESS * fix some of the RPython nits that Edd found SOME PROGRESS -* continue less-stringly-ops and other RPython cleanups (Ronan, Romain) +* continue less-stringly-ops and other RPython cleanups -* better error messages for union errors (Edd, Ronan) TO BE PUSHED +* better error messages for union errors TO BE MERGED -* better error messages for moving attributes +* better error messages for moving attributes (Edd, Ronan) -* programming +* programming (all) -* JIT for xlispx (Richard, Lukas, Carl Friedrich around) IN PROGRESS +* JIT for xlispx (Richard, Carl Friedrich) WE HAVE A JIT (NOT A GOOD ONE THOUGH) -* explore Laurie's crazy scheme of persisting loop entry counts (Maciej, Anto) IN PROGRESS +* explore Laurie's crazy scheme of persisting loop entry counts EXPERIMENTING -* progress on the documentation branch (Manuel, Rami) +* progress on refactoring resume data (Maciek, Anto) -* Python 3 benchmarks (Richard2) SOME PROGRESS +* progress on the documentation branch (Manuel, Rami) NICER GETTING STARTED/INTRODUCTION -* meditate on benchmarking infrastructure (Richard2, Edd, Maciej around) +* Python 3 benchmarks (Richard2) PULL REQUEST SENT -* shave all the yaks - -* find the slow generator task (Marko, Romain) INVALID +* meditate on benchmarking infrastructure (Richard2, Maciej around) * review the pypy-pyarray branch * general wizardry (Carl Friedrich; Armin) -* general getting started (Rami, Carl Friedrich) GOT STARTED - Discussions planned -------------------- From noreply at buildbot.pypy.org Wed Aug 28 11:43:55 2013 From: noreply at buildbot.pypy.org (vext01) Date: Wed, 28 Aug 2013 11:43:55 +0200 (CEST) Subject: [pypy-commit] pypy nobold-backtrace: Improve printing of error messages from the annotator. Message-ID: <20130828094355.EACA91C0189@cobra.cs.uni-duesseldorf.de> Author: Edd Barrett Branch: nobold-backtrace Changeset: r66371:bd21357ce288 Date: 2013-08-26 15:09 +0100 http://bitbucket.org/pypy/pypy/changeset/bd21357ce288/ Log: Improve printing of error messages from the annotator. The trace and summary now print at different log levels. diff --git a/rpython/translator/goal/translate.py b/rpython/translator/goal/translate.py --- a/rpython/translator/goal/translate.py +++ b/rpython/translator/goal/translate.py @@ -246,17 +246,19 @@ tb = None if got_error: import traceback - errmsg = ["Error:\n"] + stacktrace_errmsg = ["Error:\n"] exc, val, tb = sys.exc_info() - errmsg.extend([" %s" % line for line in traceback.format_exception(exc, val, tb)]) + stacktrace_errmsg.extend([" %s" % line for line in traceback.format_tb(tb)]) + summary_errmsg = traceback.format_exception_only(exc, val) block = getattr(val, '__annotator_block', None) if block: class FileLike: def write(self, s): - errmsg.append(" %s" % s) - errmsg.append("Processing block:\n") + summary_errmsg.append(" %s" % s) + summary_errmsg.append("Processing block:\n") t.about(block, FileLike()) - log.ERROR(''.join(errmsg)) + log.info(''.join(stacktrace_errmsg)) + log.ERROR(''.join(summary_errmsg)) else: log.event('Done.') From noreply at buildbot.pypy.org Wed Aug 28 11:43:57 2013 From: noreply at buildbot.pypy.org (vext01) Date: Wed, 28 Aug 2013 11:43:57 +0200 (CEST) Subject: [pypy-commit] pypy nobold-backtrace: Improve error reporting/formatting for UnionErrors and test. Message-ID: <20130828094357.5369A1C0189@cobra.cs.uni-duesseldorf.de> Author: Edd Barrett Branch: nobold-backtrace Changeset: r66372:208c4f0bed8e Date: 2013-08-26 18:14 +0100 http://bitbucket.org/pypy/pypy/changeset/208c4f0bed8e/ Log: Improve error reporting/formatting for UnionErrors and test. When instantiating UnionError with two args, this is a "generic" UnionError. Optionally, a third argument can be passed to gove the user additional hints. diff --git a/rpython/annotator/annrpython.py b/rpython/annotator/annrpython.py --- a/rpython/annotator/annrpython.py +++ b/rpython/annotator/annrpython.py @@ -5,7 +5,7 @@ from rpython.tool.ansi_print import ansi_log from rpython.tool.pairtype import pair from rpython.tool.error import (format_blocked_annotation_error, - AnnotatorError, gather_error, ErrorWrapper) + AnnotatorError, gather_error, ErrorWrapper, source_lines) from rpython.flowspace.model import (Variable, Constant, FunctionGraph, c_last_exception, checkgraph) from rpython.translator import simplify, transform @@ -383,8 +383,8 @@ try: unions = [annmodel.unionof(c1,c2) for c1, c2 in zip(oldcells,inputcells)] except annmodel.UnionError, e: - e.args = e.args + ( - ErrorWrapper(gather_error(self, graph, block, None)),) + # Add source code to the UnionError + e.source = '\n'.join(source_lines(graph, block, None, long=True)) raise # if the merged cells changed, we must redo the analysis if unions != oldcells: diff --git a/rpython/annotator/binaryop.py b/rpython/annotator/binaryop.py --- a/rpython/annotator/binaryop.py +++ b/rpython/annotator/binaryop.py @@ -243,14 +243,16 @@ if t2 is int: if int2.nonneg == False: - raise UnionError, "Merging %s and a possibly negative int is not allowed" % t1 + raise UnionError(int1, int2, "RPython cannot prove that these " + \ + "integers are of the same signedness") knowntype = t1 elif t1 is int: if int1.nonneg == False: - raise UnionError, "Merging %s and a possibly negative int is not allowed" % t2 + raise UnionError(int1, int2, "RPython cannot prove that these " + \ + "integers are of the same signedness") knowntype = t2 else: - raise UnionError, "Merging these types (%s, %s) is not supported" % (t1, t2) + raise UnionError(int1, int2) return SomeInteger(nonneg=int1.nonneg and int2.nonneg, knowntype=knowntype) @@ -551,9 +553,9 @@ def union((tup1, tup2)): if len(tup1.items) != len(tup2.items): - raise UnionError("cannot take the union of a tuple of length %d " - "and a tuple of length %d" % (len(tup1.items), - len(tup2.items))) + raise UnionError(tup1, tup2, "RPython cannot unify tuples of " + "different length: %d versus %d" % \ + (len(tup1.items), len(tup2.items))) else: unions = [unionof(x,y) for x,y in zip(tup1.items, tup2.items)] return SomeTuple(items = unions) @@ -726,7 +728,8 @@ else: basedef = ins1.classdef.commonbase(ins2.classdef) if basedef is None: - raise UnionError(ins1, ins2) + raise UnionError(ins1, ins2, "RPython cannot unify instances " + "with no common base class") flags = ins1.flags if flags: flags = flags.copy() @@ -768,7 +771,8 @@ def union((iter1, iter2)): s_cont = unionof(iter1.s_container, iter2.s_container) if iter1.variant != iter2.variant: - raise UnionError("merging incompatible iterators variants") + raise UnionError(iter1, iter2, + "RPython cannot unify incompatible iterator variants") return SomeIterator(s_cont, *iter1.variant) @@ -778,8 +782,7 @@ if (bltn1.analyser != bltn2.analyser or bltn1.methodname != bltn2.methodname or bltn1.s_self is None or bltn2.s_self is None): - raise UnionError("cannot merge two different builtin functions " - "or methods:\n %r\n %r" % (bltn1, bltn2)) + raise UnionError(bltn1, bltn2) s_self = unionof(bltn1.s_self, bltn2.s_self) return SomeBuiltin(bltn1.analyser, s_self, methodname=bltn1.methodname) @@ -976,8 +979,8 @@ class __extend__(pairtype(SomeAddress, SomeObject)): def union((s_addr, s_obj)): - raise UnionError, "union of address and anything else makes no sense" + raise UnionError(s_addr, s_obj) class __extend__(pairtype(SomeObject, SomeAddress)): def union((s_obj, s_addr)): - raise UnionError, "union of address and anything else makes no sense" + raise UnionError(s_obj, s_addr) diff --git a/rpython/annotator/listdef.py b/rpython/annotator/listdef.py --- a/rpython/annotator/listdef.py +++ b/rpython/annotator/listdef.py @@ -58,7 +58,7 @@ def merge(self, other): if self is not other: if getattr(TLS, 'no_side_effects_in_union', 0): - raise UnionError("merging list/dict items") + raise UnionError(self, other) if other.dont_change_any_more: if self.dont_change_any_more: diff --git a/rpython/annotator/model.py b/rpython/annotator/model.py --- a/rpython/annotator/model.py +++ b/rpython/annotator/model.py @@ -680,6 +680,33 @@ """Signals an suspicious attempt at taking the union of deeply incompatible SomeXxx instances.""" + def __init__(self, s_obj1, s_obj2, msg=None): + """ + This exception expresses the fact that s_obj1 and s_obj2 cannot be unified. + The msg paramter is appended to a generic message. This can be used to + give the user a little more information. + """ + self.s_obj1 = s_obj1 + self.s_obj2 = s_obj2 + self.msg = msg + self.source = None + + def __str__(self): + s = "\n\n" + + if self.msg is not None: + s += "%s\n\n" % self.msg + + s += "Offending annotations:\n" + s += "%s\n%s\n\n" % (self.s_obj1, self.s_obj2) + + if self.source is not None: + s += self.source + + return s + + def __repr__(self): + return str(self) def unionof(*somevalues): "The most precise SomeValue instance that contains all the values." diff --git a/rpython/annotator/test/test_annrpython.py b/rpython/annotator/test/test_annrpython.py --- a/rpython/annotator/test/test_annrpython.py +++ b/rpython/annotator/test/test_annrpython.py @@ -4023,6 +4023,81 @@ a = self.RPythonAnnotator() assert not a.build_types(fn, [int]).nonneg + def test_unionerror_attrs(self): + def f(x): + if x < 10: + return 1 + else: + return "bbb" + a = self.RPythonAnnotator() + + with py.test.raises(annmodel.UnionError) as exc: + a.build_types(f, [int]) + + the_exc = exc.value + s_objs = set([type(the_exc.s_obj1), type(the_exc.s_obj2)]) + + assert s_objs == set([annmodel.SomeInteger, annmodel.SomeString]) + assert the_exc.msg == None # Check that this is a generic UnionError + + def test_unionerror_tuple_size(self): + def f(x): + if x < 10: + return (1, ) + else: + return (1, 2) + a = self.RPythonAnnotator() + + with py.test.raises(annmodel.UnionError) as exc: + a.build_types(f, [int]) + + assert exc.value.msg == "RPython cannot unify tuples of different length: 2 versus 1" + + def test_unionerror_signedness(self): + def f(x): + if x < 10: + return r_uint(99) + else: + return -1 + a = self.RPythonAnnotator() + + with py.test.raises(annmodel.UnionError) as exc: + a.build_types(f, [int]) + + assert exc.value.msg == ("RPython cannot prove that these integers are of " + "the same signedness") + + def test_unionerror_instance(self): + class A(object): pass + class B(object): pass + + def f(x): + if x < 10: + return A() + else: + return B() + a = self.RPythonAnnotator() + + with py.test.raises(annmodel.UnionError) as exc: + a.build_types(f, [int]) + + assert exc.value.msg == ("RPython cannot unify instances with no common base class") + + def test_unionerror_iters(self): + + def f(x): + d = { 1 : "a", 2 : "b" } + if x < 10: + return d.iterkeys() + else: + return d.itervalues() + a = self.RPythonAnnotator() + + with py.test.raises(annmodel.UnionError) as exc: + a.build_types(f, [int]) + + assert exc.value.msg == ("RPython cannot unify incompatible iterator variants") + def g(n): return [0, 1, 2, n] From noreply at buildbot.pypy.org Wed Aug 28 11:43:58 2013 From: noreply at buildbot.pypy.org (vext01) Date: Wed, 28 Aug 2013 11:43:58 +0200 (CEST) Subject: [pypy-commit] pypy nobold-backtrace: Defining new classes is fine, just not inside functions/methods. Message-ID: <20130828094358.8E8471C0189@cobra.cs.uni-duesseldorf.de> Author: Edd Barrett Branch: nobold-backtrace Changeset: r66373:3dfa52d1c632 Date: 2013-08-27 16:46 +0100 http://bitbucket.org/pypy/pypy/changeset/3dfa52d1c632/ Log: Defining new classes is fine, just not inside functions/methods. diff --git a/rpython/flowspace/flowcontext.py b/rpython/flowspace/flowcontext.py --- a/rpython/flowspace/flowcontext.py +++ b/rpython/flowspace/flowcontext.py @@ -293,7 +293,7 @@ _unsupported_ops = [ ('BINARY_POWER', "a ** b"), - ('BUILD_CLASS', 'creating new classes'), + ('BUILD_CLASS', 'defining classes inside functions'), ('EXEC_STMT', 'exec statement'), ('STOP_CODE', '???'), ('STORE_NAME', 'modifying globals'), From noreply at buildbot.pypy.org Wed Aug 28 11:43:59 2013 From: noreply at buildbot.pypy.org (vext01) Date: Wed, 28 Aug 2013 11:43:59 +0200 (CEST) Subject: [pypy-commit] pypy nobold-backtrace: Revise display of FlowingError to look more like the new UnionError error. Message-ID: <20130828094359.CC48B1C0189@cobra.cs.uni-duesseldorf.de> Author: Edd Barrett Branch: nobold-backtrace Changeset: r66374:8c89148edd71 Date: 2013-08-27 16:57 +0100 http://bitbucket.org/pypy/pypy/changeset/8c89148edd71/ Log: Revise display of FlowingError to look more like the new UnionError error. diff --git a/rpython/flowspace/flowcontext.py b/rpython/flowspace/flowcontext.py --- a/rpython/flowspace/flowcontext.py +++ b/rpython/flowspace/flowcontext.py @@ -24,8 +24,9 @@ self.frame = frame def __str__(self): - msg = ['-+' * 30] + msg = ["\n"] msg += map(str, self.args) + msg += [""] msg += source_lines(self.frame.graph, None, offset=self.frame.last_instr) return "\n".join(msg) From noreply at buildbot.pypy.org Wed Aug 28 11:44:01 2013 From: noreply at buildbot.pypy.org (vext01) Date: Wed, 28 Aug 2013 11:44:01 +0200 (CEST) Subject: [pypy-commit] pypy nobold-backtrace: Make 'blocked block' errors look like the other errors. Message-ID: <20130828094401.0EDDB1C0189@cobra.cs.uni-duesseldorf.de> Author: Edd Barrett Branch: nobold-backtrace Changeset: r66375:bf45de26fd1d Date: 2013-08-27 17:24 +0100 http://bitbucket.org/pypy/pypy/changeset/bf45de26fd1d/ Log: Make 'blocked block' errors look like the other errors. diff --git a/rpython/tool/error.py b/rpython/tool/error.py --- a/rpython/tool/error.py +++ b/rpython/tool/error.py @@ -90,7 +90,7 @@ format_simple_call(annotator, oper, msg) else: oper = None - msg.append(" " + str(oper)) + msg.append(" %s\n" % str(oper)) msg += source_lines(graph, block, operindex, long=True) if oper is not None: if SHOW_ANNOTATIONS: @@ -106,7 +106,7 @@ def format_blocked_annotation_error(annotator, blocked_blocks): text = [] for block, (graph, index) in blocked_blocks.items(): - text.append('-+' * 30) + text.append('\n') text.append("Blocked block -- operation cannot succeed") text.append(gather_error(annotator, graph, block, index)) return '\n'.join(text) From noreply at buildbot.pypy.org Wed Aug 28 11:44:02 2013 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 28 Aug 2013 11:44:02 +0200 (CEST) Subject: [pypy-commit] pypy default: Merged in vext01/pypy/nobold-backtrace (pull request #183) Message-ID: <20130828094402.8ABD81C0189@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r66376:f7a79b540a6e Date: 2013-08-28 10:43 +0100 http://bitbucket.org/pypy/pypy/changeset/f7a79b540a6e/ Log: Merged in vext01/pypy/nobold-backtrace (pull request #183) Work on improving UnionError messages and stack trace displays. diff --git a/rpython/annotator/annrpython.py b/rpython/annotator/annrpython.py --- a/rpython/annotator/annrpython.py +++ b/rpython/annotator/annrpython.py @@ -5,7 +5,7 @@ from rpython.tool.ansi_print import ansi_log from rpython.tool.pairtype import pair from rpython.tool.error import (format_blocked_annotation_error, - AnnotatorError, gather_error, ErrorWrapper) + AnnotatorError, gather_error, ErrorWrapper, source_lines) from rpython.flowspace.model import (Variable, Constant, FunctionGraph, c_last_exception, checkgraph) from rpython.translator import simplify, transform @@ -383,8 +383,8 @@ try: unions = [annmodel.unionof(c1,c2) for c1, c2 in zip(oldcells,inputcells)] except annmodel.UnionError, e: - e.args = e.args + ( - ErrorWrapper(gather_error(self, graph, block, None)),) + # Add source code to the UnionError + e.source = '\n'.join(source_lines(graph, block, None, long=True)) raise # if the merged cells changed, we must redo the analysis if unions != oldcells: diff --git a/rpython/annotator/binaryop.py b/rpython/annotator/binaryop.py --- a/rpython/annotator/binaryop.py +++ b/rpython/annotator/binaryop.py @@ -243,14 +243,16 @@ if t2 is int: if int2.nonneg == False: - raise UnionError, "Merging %s and a possibly negative int is not allowed" % t1 + raise UnionError(int1, int2, "RPython cannot prove that these " + \ + "integers are of the same signedness") knowntype = t1 elif t1 is int: if int1.nonneg == False: - raise UnionError, "Merging %s and a possibly negative int is not allowed" % t2 + raise UnionError(int1, int2, "RPython cannot prove that these " + \ + "integers are of the same signedness") knowntype = t2 else: - raise UnionError, "Merging these types (%s, %s) is not supported" % (t1, t2) + raise UnionError(int1, int2) return SomeInteger(nonneg=int1.nonneg and int2.nonneg, knowntype=knowntype) @@ -551,9 +553,9 @@ def union((tup1, tup2)): if len(tup1.items) != len(tup2.items): - raise UnionError("cannot take the union of a tuple of length %d " - "and a tuple of length %d" % (len(tup1.items), - len(tup2.items))) + raise UnionError(tup1, tup2, "RPython cannot unify tuples of " + "different length: %d versus %d" % \ + (len(tup1.items), len(tup2.items))) else: unions = [unionof(x,y) for x,y in zip(tup1.items, tup2.items)] return SomeTuple(items = unions) @@ -726,7 +728,8 @@ else: basedef = ins1.classdef.commonbase(ins2.classdef) if basedef is None: - raise UnionError(ins1, ins2) + raise UnionError(ins1, ins2, "RPython cannot unify instances " + "with no common base class") flags = ins1.flags if flags: flags = flags.copy() @@ -768,7 +771,8 @@ def union((iter1, iter2)): s_cont = unionof(iter1.s_container, iter2.s_container) if iter1.variant != iter2.variant: - raise UnionError("merging incompatible iterators variants") + raise UnionError(iter1, iter2, + "RPython cannot unify incompatible iterator variants") return SomeIterator(s_cont, *iter1.variant) @@ -778,8 +782,7 @@ if (bltn1.analyser != bltn2.analyser or bltn1.methodname != bltn2.methodname or bltn1.s_self is None or bltn2.s_self is None): - raise UnionError("cannot merge two different builtin functions " - "or methods:\n %r\n %r" % (bltn1, bltn2)) + raise UnionError(bltn1, bltn2) s_self = unionof(bltn1.s_self, bltn2.s_self) return SomeBuiltin(bltn1.analyser, s_self, methodname=bltn1.methodname) @@ -976,8 +979,8 @@ class __extend__(pairtype(SomeAddress, SomeObject)): def union((s_addr, s_obj)): - raise UnionError, "union of address and anything else makes no sense" + raise UnionError(s_addr, s_obj) class __extend__(pairtype(SomeObject, SomeAddress)): def union((s_obj, s_addr)): - raise UnionError, "union of address and anything else makes no sense" + raise UnionError(s_obj, s_addr) diff --git a/rpython/annotator/listdef.py b/rpython/annotator/listdef.py --- a/rpython/annotator/listdef.py +++ b/rpython/annotator/listdef.py @@ -58,7 +58,7 @@ def merge(self, other): if self is not other: if getattr(TLS, 'no_side_effects_in_union', 0): - raise UnionError("merging list/dict items") + raise UnionError(self, other) if other.dont_change_any_more: if self.dont_change_any_more: diff --git a/rpython/annotator/model.py b/rpython/annotator/model.py --- a/rpython/annotator/model.py +++ b/rpython/annotator/model.py @@ -680,6 +680,33 @@ """Signals an suspicious attempt at taking the union of deeply incompatible SomeXxx instances.""" + def __init__(self, s_obj1, s_obj2, msg=None): + """ + This exception expresses the fact that s_obj1 and s_obj2 cannot be unified. + The msg paramter is appended to a generic message. This can be used to + give the user a little more information. + """ + self.s_obj1 = s_obj1 + self.s_obj2 = s_obj2 + self.msg = msg + self.source = None + + def __str__(self): + s = "\n\n" + + if self.msg is not None: + s += "%s\n\n" % self.msg + + s += "Offending annotations:\n" + s += "%s\n%s\n\n" % (self.s_obj1, self.s_obj2) + + if self.source is not None: + s += self.source + + return s + + def __repr__(self): + return str(self) def unionof(*somevalues): "The most precise SomeValue instance that contains all the values." diff --git a/rpython/annotator/test/test_annrpython.py b/rpython/annotator/test/test_annrpython.py --- a/rpython/annotator/test/test_annrpython.py +++ b/rpython/annotator/test/test_annrpython.py @@ -4023,6 +4023,81 @@ a = self.RPythonAnnotator() assert not a.build_types(fn, [int]).nonneg + def test_unionerror_attrs(self): + def f(x): + if x < 10: + return 1 + else: + return "bbb" + a = self.RPythonAnnotator() + + with py.test.raises(annmodel.UnionError) as exc: + a.build_types(f, [int]) + + the_exc = exc.value + s_objs = set([type(the_exc.s_obj1), type(the_exc.s_obj2)]) + + assert s_objs == set([annmodel.SomeInteger, annmodel.SomeString]) + assert the_exc.msg == None # Check that this is a generic UnionError + + def test_unionerror_tuple_size(self): + def f(x): + if x < 10: + return (1, ) + else: + return (1, 2) + a = self.RPythonAnnotator() + + with py.test.raises(annmodel.UnionError) as exc: + a.build_types(f, [int]) + + assert exc.value.msg == "RPython cannot unify tuples of different length: 2 versus 1" + + def test_unionerror_signedness(self): + def f(x): + if x < 10: + return r_uint(99) + else: + return -1 + a = self.RPythonAnnotator() + + with py.test.raises(annmodel.UnionError) as exc: + a.build_types(f, [int]) + + assert exc.value.msg == ("RPython cannot prove that these integers are of " + "the same signedness") + + def test_unionerror_instance(self): + class A(object): pass + class B(object): pass + + def f(x): + if x < 10: + return A() + else: + return B() + a = self.RPythonAnnotator() + + with py.test.raises(annmodel.UnionError) as exc: + a.build_types(f, [int]) + + assert exc.value.msg == ("RPython cannot unify instances with no common base class") + + def test_unionerror_iters(self): + + def f(x): + d = { 1 : "a", 2 : "b" } + if x < 10: + return d.iterkeys() + else: + return d.itervalues() + a = self.RPythonAnnotator() + + with py.test.raises(annmodel.UnionError) as exc: + a.build_types(f, [int]) + + assert exc.value.msg == ("RPython cannot unify incompatible iterator variants") + def g(n): return [0, 1, 2, n] diff --git a/rpython/flowspace/flowcontext.py b/rpython/flowspace/flowcontext.py --- a/rpython/flowspace/flowcontext.py +++ b/rpython/flowspace/flowcontext.py @@ -24,8 +24,9 @@ self.frame = frame def __str__(self): - msg = ['-+' * 30] + msg = ["\n"] msg += map(str, self.args) + msg += [""] msg += source_lines(self.frame.graph, None, offset=self.frame.last_instr) return "\n".join(msg) @@ -293,7 +294,7 @@ _unsupported_ops = [ ('BINARY_POWER', "a ** b"), - ('BUILD_CLASS', 'creating new classes'), + ('BUILD_CLASS', 'defining classes inside functions'), ('EXEC_STMT', 'exec statement'), ('STOP_CODE', '???'), ('STORE_NAME', 'modifying globals'), diff --git a/rpython/tool/error.py b/rpython/tool/error.py --- a/rpython/tool/error.py +++ b/rpython/tool/error.py @@ -90,7 +90,7 @@ format_simple_call(annotator, oper, msg) else: oper = None - msg.append(" " + str(oper)) + msg.append(" %s\n" % str(oper)) msg += source_lines(graph, block, operindex, long=True) if oper is not None: if SHOW_ANNOTATIONS: @@ -106,7 +106,7 @@ def format_blocked_annotation_error(annotator, blocked_blocks): text = [] for block, (graph, index) in blocked_blocks.items(): - text.append('-+' * 30) + text.append('\n') text.append("Blocked block -- operation cannot succeed") text.append(gather_error(annotator, graph, block, index)) return '\n'.join(text) diff --git a/rpython/translator/goal/translate.py b/rpython/translator/goal/translate.py --- a/rpython/translator/goal/translate.py +++ b/rpython/translator/goal/translate.py @@ -246,17 +246,19 @@ tb = None if got_error: import traceback - errmsg = ["Error:\n"] + stacktrace_errmsg = ["Error:\n"] exc, val, tb = sys.exc_info() - errmsg.extend([" %s" % line for line in traceback.format_exception(exc, val, tb)]) + stacktrace_errmsg.extend([" %s" % line for line in traceback.format_tb(tb)]) + summary_errmsg = traceback.format_exception_only(exc, val) block = getattr(val, '__annotator_block', None) if block: class FileLike: def write(self, s): - errmsg.append(" %s" % s) - errmsg.append("Processing block:\n") + summary_errmsg.append(" %s" % s) + summary_errmsg.append("Processing block:\n") t.about(block, FileLike()) - log.ERROR(''.join(errmsg)) + log.info(''.join(stacktrace_errmsg)) + log.ERROR(''.join(summary_errmsg)) else: log.event('Done.') From noreply at buildbot.pypy.org Wed Aug 28 11:50:06 2013 From: noreply at buildbot.pypy.org (cfbolz) Date: Wed, 28 Aug 2013 11:50:06 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: (all): planning for today Message-ID: <20130828095006.B41A91C0189@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: extradoc Changeset: r5041:4e50706dc4b6 Date: 2013-08-28 10:49 +0100 http://bitbucket.org/pypy/extradoc/changeset/4e50706dc4b6/ Log: (all): planning for today diff --git a/sprintinfo/london-2013/planning.txt b/sprintinfo/london-2013/planning.txt --- a/sprintinfo/london-2013/planning.txt +++ b/sprintinfo/london-2013/planning.txt @@ -68,20 +68,25 @@ * meditate on benchmarking infrastructure (Richard2, Maciej around) +* start branch for Python 3.3 + * review the pypy-pyarray branch * general wizardry (Carl Friedrich; Armin) +* try going back to the empty strategy (Lukas) + +* look at this: https://bitbucket.org/pypy/pypy/pull-request/176/fixed-support-for-re-stdlib-274/diff Discussions planned -------------------- -* demo session Tuesday -* scientific computing roadmap TODAY, Maciek leads discussion +* demo session Tuesday DONE +* scientific computing roadmap DONE * STM dissemination round -* JIT optimizer mess +* JIT optimizer mess DONE * roadmap planning -* do we want pip installed on downloadable pypys? +* do we want pip installed on downloadable pypys? TODAY after lunch * generalize jitviewer to other languages * LuaJIT discussion DONE (Tom, Armin, Maciek, Carl Friedrich, Laurie) From noreply at buildbot.pypy.org Wed Aug 28 14:43:32 2013 From: noreply at buildbot.pypy.org (rguillebert) Date: Wed, 28 Aug 2013 14:43:32 +0200 (CEST) Subject: [pypy-commit] pypy default: (rguillebert, ronan, joanna) Progress on fancy indexing with booleans Message-ID: <20130828124332.881E21C3675@cobra.cs.uni-duesseldorf.de> Author: Romain Guillebert Branch: Changeset: r66377:9515e4524aaa Date: 2013-08-27 17:56 +0100 http://bitbucket.org/pypy/pypy/changeset/9515e4524aaa/ Log: (rguillebert, ronan, joanna) Progress on fancy indexing with booleans diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -95,6 +95,12 @@ if idx.get_size() > self.get_size(): raise OperationError(space.w_ValueError, space.wrap("index out of range for array")) + idx_iter = idx.create_iter(self.get_shape()) + size = loop.count_all_true_iter(idx_iter, self.get_shape(), idx.get_dtype()) + if size != val.get_shape()[0]: + raise OperationError(space.w_ValueError, space.wrap("NumPy boolean array indexing assignment " + "cannot assign %d input values to " + "the %d output values where the mask is true" % (val.get_shape()[0],size))) loop.setitem_filter(self, idx, val) def _prepare_array_index(self, space, w_index): diff --git a/pypy/module/micronumpy/loop.py b/pypy/module/micronumpy/loop.py --- a/pypy/module/micronumpy/loop.py +++ b/pypy/module/micronumpy/loop.py @@ -318,23 +318,27 @@ lefti.next() return result -count_all_true_driver = jit.JitDriver(name = 'numpy_count', - greens = ['shapelen', 'dtype'], - reds = 'auto') def count_all_true(arr): - s = 0 if arr.is_scalar(): return arr.get_dtype().itemtype.bool(arr.get_scalar_value()) iter = arr.create_iter() - shapelen = len(arr.get_shape()) - dtype = arr.get_dtype() + return count_all_true_iter(iter, arr.get_shape(), arr.get_dtype()) + +count_all_true_iter_driver = jit.JitDriver(name = 'numpy_count', + greens = ['shapelen', 'dtype'], + reds = 'auto') +def count_all_true_iter(iter, shape, dtype): + s = 0 + shapelen = len(shape) + dtype = dtype while not iter.done(): - count_all_true_driver.jit_merge_point(shapelen=shapelen, dtype=dtype) + count_all_true_iter_driver.jit_merge_point(shapelen=shapelen, dtype=dtype) s += iter.getitem_bool() iter.next() return s + getitem_filter_driver = jit.JitDriver(name = 'numpy_getitem_bool', greens = ['shapelen', 'arr_dtype', 'index_dtype'], diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -2354,6 +2354,12 @@ def test_array_indexing_bool_specialcases(self): from numpypy import arange, array a = arange(6) + try: + a[a < 3] = [1, 2] + assert False, "Should not work" + except ValueError: + pass + a = arange(6) a[a > 3] = array([15]) assert (a == [0, 1, 2, 3, 15, 15]).all() a = arange(6).reshape(3, 2) From noreply at buildbot.pypy.org Wed Aug 28 14:43:34 2013 From: noreply at buildbot.pypy.org (rguillebert) Date: Wed, 28 Aug 2013 14:43:34 +0200 (CEST) Subject: [pypy-commit] pypy default: Fix more of fancy indexing Message-ID: <20130828124334.0D2A71C3675@cobra.cs.uni-duesseldorf.de> Author: Romain Guillebert Branch: Changeset: r66378:dbb60d9a195a Date: 2013-08-28 13:36 +0100 http://bitbucket.org/pypy/pypy/changeset/dbb60d9a195a/ Log: Fix more of fancy indexing diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -88,7 +88,9 @@ w_res = W_NDimArray.from_shape(space, res_shape, self.get_dtype(), w_instance=self) return loop.getitem_filter(w_res, self, arr) - def setitem_filter(self, space, idx, val): + def setitem_filter(self, space, idx, value): + from pypy.module.micronumpy.interp_boxes import Box + val = value if len(idx.get_shape()) > 1 and idx.get_shape() != self.get_shape(): raise OperationError(space.w_ValueError, space.wrap("boolean index array should have 1 dimension")) @@ -97,10 +99,16 @@ space.wrap("index out of range for array")) idx_iter = idx.create_iter(self.get_shape()) size = loop.count_all_true_iter(idx_iter, self.get_shape(), idx.get_dtype()) - if size != val.get_shape()[0]: + if len(val.get_shape()) > 0 and val.get_shape()[0] > 1 and size > val.get_shape()[0]: raise OperationError(space.w_ValueError, space.wrap("NumPy boolean array indexing assignment " "cannot assign %d input values to " "the %d output values where the mask is true" % (val.get_shape()[0],size))) + if val.get_shape() == [1]: + box = val.descr_getitem(space, space.wrap(0)) + assert isinstance(box, Box) + val = W_NDimArray(scalar.Scalar(val.get_dtype(), box)) + elif val.get_shape() == [0]: + val.implementation.dtype = self.implementation.dtype loop.setitem_filter(self, idx, val) def _prepare_array_index(self, space, w_index): From noreply at buildbot.pypy.org Wed Aug 28 14:43:35 2013 From: noreply at buildbot.pypy.org (rguillebert) Date: Wed, 28 Aug 2013 14:43:35 +0200 (CEST) Subject: [pypy-commit] pypy default: Merge heads Message-ID: <20130828124335.578421C3675@cobra.cs.uni-duesseldorf.de> Author: Romain Guillebert Branch: Changeset: r66379:c240d50f3830 Date: 2013-08-28 13:37 +0100 http://bitbucket.org/pypy/pypy/changeset/c240d50f3830/ Log: Merge heads diff --git a/rpython/annotator/annrpython.py b/rpython/annotator/annrpython.py --- a/rpython/annotator/annrpython.py +++ b/rpython/annotator/annrpython.py @@ -5,7 +5,7 @@ from rpython.tool.ansi_print import ansi_log from rpython.tool.pairtype import pair from rpython.tool.error import (format_blocked_annotation_error, - AnnotatorError, gather_error, ErrorWrapper) + AnnotatorError, gather_error, ErrorWrapper, source_lines) from rpython.flowspace.model import (Variable, Constant, FunctionGraph, c_last_exception, checkgraph) from rpython.translator import simplify, transform @@ -383,8 +383,8 @@ try: unions = [annmodel.unionof(c1,c2) for c1, c2 in zip(oldcells,inputcells)] except annmodel.UnionError, e: - e.args = e.args + ( - ErrorWrapper(gather_error(self, graph, block, None)),) + # Add source code to the UnionError + e.source = '\n'.join(source_lines(graph, block, None, long=True)) raise # if the merged cells changed, we must redo the analysis if unions != oldcells: diff --git a/rpython/annotator/binaryop.py b/rpython/annotator/binaryop.py --- a/rpython/annotator/binaryop.py +++ b/rpython/annotator/binaryop.py @@ -243,14 +243,16 @@ if t2 is int: if int2.nonneg == False: - raise UnionError, "Merging %s and a possibly negative int is not allowed" % t1 + raise UnionError(int1, int2, "RPython cannot prove that these " + \ + "integers are of the same signedness") knowntype = t1 elif t1 is int: if int1.nonneg == False: - raise UnionError, "Merging %s and a possibly negative int is not allowed" % t2 + raise UnionError(int1, int2, "RPython cannot prove that these " + \ + "integers are of the same signedness") knowntype = t2 else: - raise UnionError, "Merging these types (%s, %s) is not supported" % (t1, t2) + raise UnionError(int1, int2) return SomeInteger(nonneg=int1.nonneg and int2.nonneg, knowntype=knowntype) @@ -551,9 +553,9 @@ def union((tup1, tup2)): if len(tup1.items) != len(tup2.items): - raise UnionError("cannot take the union of a tuple of length %d " - "and a tuple of length %d" % (len(tup1.items), - len(tup2.items))) + raise UnionError(tup1, tup2, "RPython cannot unify tuples of " + "different length: %d versus %d" % \ + (len(tup1.items), len(tup2.items))) else: unions = [unionof(x,y) for x,y in zip(tup1.items, tup2.items)] return SomeTuple(items = unions) @@ -726,7 +728,8 @@ else: basedef = ins1.classdef.commonbase(ins2.classdef) if basedef is None: - raise UnionError(ins1, ins2) + raise UnionError(ins1, ins2, "RPython cannot unify instances " + "with no common base class") flags = ins1.flags if flags: flags = flags.copy() @@ -768,7 +771,8 @@ def union((iter1, iter2)): s_cont = unionof(iter1.s_container, iter2.s_container) if iter1.variant != iter2.variant: - raise UnionError("merging incompatible iterators variants") + raise UnionError(iter1, iter2, + "RPython cannot unify incompatible iterator variants") return SomeIterator(s_cont, *iter1.variant) @@ -778,8 +782,7 @@ if (bltn1.analyser != bltn2.analyser or bltn1.methodname != bltn2.methodname or bltn1.s_self is None or bltn2.s_self is None): - raise UnionError("cannot merge two different builtin functions " - "or methods:\n %r\n %r" % (bltn1, bltn2)) + raise UnionError(bltn1, bltn2) s_self = unionof(bltn1.s_self, bltn2.s_self) return SomeBuiltin(bltn1.analyser, s_self, methodname=bltn1.methodname) @@ -976,8 +979,8 @@ class __extend__(pairtype(SomeAddress, SomeObject)): def union((s_addr, s_obj)): - raise UnionError, "union of address and anything else makes no sense" + raise UnionError(s_addr, s_obj) class __extend__(pairtype(SomeObject, SomeAddress)): def union((s_obj, s_addr)): - raise UnionError, "union of address and anything else makes no sense" + raise UnionError(s_obj, s_addr) diff --git a/rpython/annotator/listdef.py b/rpython/annotator/listdef.py --- a/rpython/annotator/listdef.py +++ b/rpython/annotator/listdef.py @@ -58,7 +58,7 @@ def merge(self, other): if self is not other: if getattr(TLS, 'no_side_effects_in_union', 0): - raise UnionError("merging list/dict items") + raise UnionError(self, other) if other.dont_change_any_more: if self.dont_change_any_more: diff --git a/rpython/annotator/model.py b/rpython/annotator/model.py --- a/rpython/annotator/model.py +++ b/rpython/annotator/model.py @@ -680,6 +680,33 @@ """Signals an suspicious attempt at taking the union of deeply incompatible SomeXxx instances.""" + def __init__(self, s_obj1, s_obj2, msg=None): + """ + This exception expresses the fact that s_obj1 and s_obj2 cannot be unified. + The msg paramter is appended to a generic message. This can be used to + give the user a little more information. + """ + self.s_obj1 = s_obj1 + self.s_obj2 = s_obj2 + self.msg = msg + self.source = None + + def __str__(self): + s = "\n\n" + + if self.msg is not None: + s += "%s\n\n" % self.msg + + s += "Offending annotations:\n" + s += "%s\n%s\n\n" % (self.s_obj1, self.s_obj2) + + if self.source is not None: + s += self.source + + return s + + def __repr__(self): + return str(self) def unionof(*somevalues): "The most precise SomeValue instance that contains all the values." diff --git a/rpython/annotator/test/test_annrpython.py b/rpython/annotator/test/test_annrpython.py --- a/rpython/annotator/test/test_annrpython.py +++ b/rpython/annotator/test/test_annrpython.py @@ -4023,6 +4023,81 @@ a = self.RPythonAnnotator() assert not a.build_types(fn, [int]).nonneg + def test_unionerror_attrs(self): + def f(x): + if x < 10: + return 1 + else: + return "bbb" + a = self.RPythonAnnotator() + + with py.test.raises(annmodel.UnionError) as exc: + a.build_types(f, [int]) + + the_exc = exc.value + s_objs = set([type(the_exc.s_obj1), type(the_exc.s_obj2)]) + + assert s_objs == set([annmodel.SomeInteger, annmodel.SomeString]) + assert the_exc.msg == None # Check that this is a generic UnionError + + def test_unionerror_tuple_size(self): + def f(x): + if x < 10: + return (1, ) + else: + return (1, 2) + a = self.RPythonAnnotator() + + with py.test.raises(annmodel.UnionError) as exc: + a.build_types(f, [int]) + + assert exc.value.msg == "RPython cannot unify tuples of different length: 2 versus 1" + + def test_unionerror_signedness(self): + def f(x): + if x < 10: + return r_uint(99) + else: + return -1 + a = self.RPythonAnnotator() + + with py.test.raises(annmodel.UnionError) as exc: + a.build_types(f, [int]) + + assert exc.value.msg == ("RPython cannot prove that these integers are of " + "the same signedness") + + def test_unionerror_instance(self): + class A(object): pass + class B(object): pass + + def f(x): + if x < 10: + return A() + else: + return B() + a = self.RPythonAnnotator() + + with py.test.raises(annmodel.UnionError) as exc: + a.build_types(f, [int]) + + assert exc.value.msg == ("RPython cannot unify instances with no common base class") + + def test_unionerror_iters(self): + + def f(x): + d = { 1 : "a", 2 : "b" } + if x < 10: + return d.iterkeys() + else: + return d.itervalues() + a = self.RPythonAnnotator() + + with py.test.raises(annmodel.UnionError) as exc: + a.build_types(f, [int]) + + assert exc.value.msg == ("RPython cannot unify incompatible iterator variants") + def g(n): return [0, 1, 2, n] diff --git a/rpython/flowspace/flowcontext.py b/rpython/flowspace/flowcontext.py --- a/rpython/flowspace/flowcontext.py +++ b/rpython/flowspace/flowcontext.py @@ -24,8 +24,9 @@ self.frame = frame def __str__(self): - msg = ['-+' * 30] + msg = ["\n"] msg += map(str, self.args) + msg += [""] msg += source_lines(self.frame.graph, None, offset=self.frame.last_instr) return "\n".join(msg) @@ -293,7 +294,7 @@ _unsupported_ops = [ ('BINARY_POWER', "a ** b"), - ('BUILD_CLASS', 'creating new classes'), + ('BUILD_CLASS', 'defining classes inside functions'), ('EXEC_STMT', 'exec statement'), ('STOP_CODE', '???'), ('STORE_NAME', 'modifying globals'), diff --git a/rpython/tool/error.py b/rpython/tool/error.py --- a/rpython/tool/error.py +++ b/rpython/tool/error.py @@ -90,7 +90,7 @@ format_simple_call(annotator, oper, msg) else: oper = None - msg.append(" " + str(oper)) + msg.append(" %s\n" % str(oper)) msg += source_lines(graph, block, operindex, long=True) if oper is not None: if SHOW_ANNOTATIONS: @@ -106,7 +106,7 @@ def format_blocked_annotation_error(annotator, blocked_blocks): text = [] for block, (graph, index) in blocked_blocks.items(): - text.append('-+' * 30) + text.append('\n') text.append("Blocked block -- operation cannot succeed") text.append(gather_error(annotator, graph, block, index)) return '\n'.join(text) diff --git a/rpython/translator/goal/translate.py b/rpython/translator/goal/translate.py --- a/rpython/translator/goal/translate.py +++ b/rpython/translator/goal/translate.py @@ -246,17 +246,19 @@ tb = None if got_error: import traceback - errmsg = ["Error:\n"] + stacktrace_errmsg = ["Error:\n"] exc, val, tb = sys.exc_info() - errmsg.extend([" %s" % line for line in traceback.format_exception(exc, val, tb)]) + stacktrace_errmsg.extend([" %s" % line for line in traceback.format_tb(tb)]) + summary_errmsg = traceback.format_exception_only(exc, val) block = getattr(val, '__annotator_block', None) if block: class FileLike: def write(self, s): - errmsg.append(" %s" % s) - errmsg.append("Processing block:\n") + summary_errmsg.append(" %s" % s) + summary_errmsg.append("Processing block:\n") t.about(block, FileLike()) - log.ERROR(''.join(errmsg)) + log.info(''.join(stacktrace_errmsg)) + log.ERROR(''.join(summary_errmsg)) else: log.event('Done.') From noreply at buildbot.pypy.org Wed Aug 28 14:51:59 2013 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Wed, 28 Aug 2013 14:51:59 +0200 (CEST) Subject: [pypy-commit] pypy py3k-memoryview: IN-PROGRESS: I'll refactor the internal buffer API on default before continuing. Message-ID: <20130828125159.CBFDA1C3676@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: py3k-memoryview Changeset: r66380:4ef858c69a40 Date: 2013-08-28 13:26 +0100 http://bitbucket.org/pypy/pypy/changeset/4ef858c69a40/ Log: IN-PROGRESS: I'll refactor the internal buffer API on default before continuing. diff --git a/pypy/module/__builtin__/interp_memoryview.py b/pypy/module/__builtin__/interp_memoryview.py --- a/pypy/module/__builtin__/interp_memoryview.py +++ b/pypy/module/__builtin__/interp_memoryview.py @@ -44,7 +44,10 @@ descr_ne = _make_descr__cmp('ne') def as_str(self): - return self.buf.as_str() + buf = self.buf + # copied and modified from pypy/interpreter/buffer.py + n_bytes = buf.getlength() * buf.itemsize + return buf.getslice(0, n_bytes, 1, n_bytes) def getlength(self): return self.buf.getlength() @@ -102,12 +105,28 @@ def descr_setitem(self, space, w_index, newstring): self._check_released(space) buf = self.buf - if isinstance(buf, buffer.RWBuffer): - buf.descr_setitem(space, w_index, newstring) - else: + if not isinstance(buf, buffer.RWBuffer): raise OperationError(space.w_TypeError, space.wrap("cannot modify read-only memory")) + # copied and modified from pypy/interpreter/buffer.py + start, stop, step, size = space.decode_index4(w_index, self.getlength()) + if step == 0: # index only + if len(newstring) != buf.itemsize: + msg = 'cannot modify size of memoryview object' + raise OperationError(space.w_ValueError, space.wrap(msg)) + for i in range(buf.itemsize): + buf.setitem(start + i, newstring[i]) + elif step == 1: + if len(newstring) != size * buf.itemsize: + msg = 'cannot modify size of memoryview object' + raise OperationError(space.w_ValueError, space.wrap(msg)) + buf.setslice(start, newstring) + else: + raise OperationError(space.w_ValueError, + space.wrap("buffer object does not support" + " slicing with a step")) + def descr_len(self, space): self._check_released(space) return self.buf.descr_len(space) diff --git a/pypy/module/array/test/test_array.py b/pypy/module/array/test/test_array.py --- a/pypy/module/array/test/test_array.py +++ b/pypy/module/array/test/test_array.py @@ -442,6 +442,11 @@ a.fromstring(b'some extra text') assert buf[:] == b'foobarbazsome extra text' + def test_memview_multi_tobytes(self): + a = self.array('i', list(b"abcdef")) + m = memoryview(a) + assert m.tobytes() == a.tobytes() + def test_list_methods(self): assert repr(self.array('i')) == "array('i')" assert repr(self.array('i', [1, 2, 3])) == "array('i', [1, 2, 3])" From noreply at buildbot.pypy.org Wed Aug 28 14:52:01 2013 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Wed, 28 Aug 2013 14:52:01 +0200 (CEST) Subject: [pypy-commit] pypy refactor-buffer-api: Start a branch in which to refactor the internal buffer API. Message-ID: <20130828125201.039591C3676@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: refactor-buffer-api Changeset: r66381:f9258d2dd31f Date: 2013-08-28 13:32 +0100 http://bitbucket.org/pypy/pypy/changeset/f9258d2dd31f/ Log: Start a branch in which to refactor the internal buffer API. From noreply at buildbot.pypy.org Wed Aug 28 14:52:03 2013 From: noreply at buildbot.pypy.org (necaris) Date: Wed, 28 Aug 2013 14:52:03 +0200 (CEST) Subject: [pypy-commit] pypy improve-docs: Remove unused 'statistic' subdirectory Message-ID: <20130828125203.462DB1C3676@cobra.cs.uni-duesseldorf.de> Author: Rami Chowdhury Branch: improve-docs Changeset: r66382:837ebf436385 Date: 2013-08-28 13:41 +0100 http://bitbucket.org/pypy/pypy/changeset/837ebf436385/ Log: Remove unused 'statistic' subdirectory Doesn't look like it's been touched in _years_. diff too long, truncating to 2000 out of 4966 lines diff --git a/pypy/doc/statistic/__init__.py b/pypy/doc/statistic/__init__.py deleted file mode 100644 diff --git a/pypy/doc/statistic/confrest.py b/pypy/doc/statistic/confrest.py deleted file mode 100644 --- a/pypy/doc/statistic/confrest.py +++ /dev/null @@ -1,35 +0,0 @@ -import py -from pypy.doc.confrest import * - -class PyPyPage(Page): - def fill_menubar(self): - self.menubar = html.div( - html.a("news", href="../news.html", class_="menu"), " ", - html.a("doc", href="../index.html", class_="menu"), " ", - html.a("contact", href="../contact.html", class_="menu"), " ", - html.a("getting-started", - href="../getting-started.html", class_="menu"), " ", - html.a("EU/project", - href="http://pypy.org/", class_="menu"), " ", - html.a("issue", - href="https://codespeak.net/issue/pypy-dev/", - class_="menu"), - " ", id="menubar") - -class Project(Project): - mydir = py.path.local(__file__).dirpath() - title = "PyPy" - stylesheet = 'style.css' - encoding = 'latin1' - prefix_title = "PyPy" - logo = html.div( - html.a( - html.img(alt="PyPy", id="pyimg", - src="http://codespeak.net/pypy/img/py-web1.png", - height=110, width=149))) - Page = PyPyPage - - def get_docpath(self): - return self.mydir - - diff --git a/pypy/doc/statistic/format.py b/pypy/doc/statistic/format.py deleted file mode 100644 --- a/pypy/doc/statistic/format.py +++ /dev/null @@ -1,115 +0,0 @@ -import py -import datetime -import dateutil -from dateutil import parser - -import pylab -import matplotlib - -greyscale = False - -def get_data(p): - data = p.readlines() - title = data[0].strip() - axis = data[1].strip().split(',') - data = [convert_data(t) for t in zip(*[l.strip().split(',') for l in data[2:]])] - return title, axis, data - -def convert_data(row): - if not row: - return [] - first = row[0] - try: - int(first) - return [int(elt) for elt in row] - except ValueError: - pass - try: - float(first) - return [float(elt) for elt in row] - except ValueError: - pass - if first[0] == '"': - return [elt[1:-1] for elt in row] - return [parsedate(elt) for elt in row] - -def parsedate(s): - if len(s) <= 7: - year, month = s.split("-") - result = datetime.datetime(int(year), int(month), 15) - else: - result = parser.parse(s) - return pylab.date2num(result) - -if greyscale: - colors = ["k", "k--", "k."] -else: - colors = "brg" - -def csv2png(p): - print p - title, axis, data = get_data(p) - dates = data[0] - - release_title, release_axis, release_data = get_data( py.path.local("release_dates.dat") ) - release_dates, release_names = release_data - - sprint_title, sprint_axis, sprint_data = get_data( py.path.local("sprint_dates.dat") ) - sprint_locations, sprint_begin_dates, sprint_end_dates = sprint_data - - ax = pylab.subplot(111) - for i, d in enumerate(data[1:]): - args = [dates, d, colors[i]] - pylab.plot_date(linewidth=0.8, *args) - - ymax = max(pylab.yticks()[0]) #just below the legend - for i, release_date in enumerate(release_dates): - release_name = release_names[i] - if greyscale: - color = 0.3 - else: - color = "g" - pylab.axvline(release_date, linewidth=0.8, color=color, alpha=0.5) - ax.text(release_date, ymax * 0.4, release_name, - fontsize=10, - horizontalalignment='right', - verticalalignment='top', - rotation='vertical') - for i, location in enumerate(sprint_locations): - begin = sprint_begin_dates[i] - end = sprint_end_dates[i] - if float(begin) >= float(min(dates[0],dates[-1])): - if greyscale: - color = 0.8 - else: - color = "y" - pylab.axvspan(begin, end, linewidth=0, facecolor=color, alpha=0.5) - ax.text(begin, ymax * 0.85, location, - fontsize=10, - horizontalalignment='right', - verticalalignment='top', - rotation='vertical') - pylab.legend(axis[1:], "upper left") - pylab.ylabel(axis[0]) - pylab.xlabel("") - ticklabels = ax.get_xticklabels() - pylab.setp(ticklabels, 'rotation', 45, size=9) -# ax.autoscale_view() - ax.grid(True) - pylab.title(title) - - pylab.savefig(p.purebasename + ".png") - pylab.savefig(p.purebasename + ".eps") - py.process.cmdexec("epstopdf %s" % (p.purebasename + ".eps", )) - -if __name__ == '__main__': - args = py.std.sys.argv - if len(args) == 1: - print "usage: %s <--all>" % args[0] - py.std.sys.exit() - for arg in args[1:]: - if arg == "--all": - for p in py.path.local().listdir("*.csv"): - py.std.os.system("python %s %s" % (args[0], p.basename)) - else: - csv2png(py.path.local(arg)) diff --git a/pypy/doc/statistic/index.rst b/pypy/doc/statistic/index.rst deleted file mode 100644 --- a/pypy/doc/statistic/index.rst +++ /dev/null @@ -1,67 +0,0 @@ -======================= -PyPy Project Statistics -======================= - -This page collects some statistics (updated in irregular intervals) about the -PyPy project. - -Lines of Code -============= - -Lines of code and lines of test code below -the ``pypy/dist/pypy`` tree: - -.. image:: loc.png - - -Number of Files -=============== - -Number of non-test files and the number of test -files below the ``pypy/dist/pypy`` tree: - -.. image:: number_files.png - - -Subscribers to mailing lists -============================ - -Number of people subscribed to the `pypy-dev`_ and `pypy-svn`_ mailing lists: - -.. image:: subscribers.png - - -Posts to mailing lists -====================== - -Number of posts to the `pypy-dev`_ and `pypy-svn`_ mailing lists: - -.. image:: post.png - - -IRC channel activity -==================== - -Written lines in the #pypy irc channel: - -.. image:: statistic_irc_log.png - - -comp.lang.python -================ - -Mentions of the terms "pypy" and "py.test" on comp.lang.python: - -.. image:: python-list.png - - -Web access -========== - -Page hits to http://codespeak.net/pypy/: - - -.. image:: webaccess.png - -.. _pypy-dev: http://python.org/mailman/listinfo/pypy-commit -.. _pypy-svn: http://python.org/mailman/listinfo/pypy-dev diff --git a/pypy/doc/statistic/loc.csv b/pypy/doc/statistic/loc.csv deleted file mode 100644 --- a/pypy/doc/statistic/loc.csv +++ /dev/null @@ -1,1295 +0,0 @@ -Lines of Code in the pypy subtree -lines of code, lines of code, lines of testcode -2003-02-19, 1436, 0 -2003-02-20, 1603, 114 -2003-02-21, 2637, 401 -2003-02-22, 2921, 121 -2003-02-23, 3674, 969 -2003-02-24, 3674, 969 -2003-02-25, 3808, 998 -2003-02-26, 3808, 998 -2003-02-27, 3808, 998 -2003-02-28, 3847, 1054 -2003-03-01, 3854, 1056 -2003-03-03, 3865, 1114 -2003-03-04, 3865, 1116 -2003-03-18, 3865, 1116 -2003-04-06, 3865, 1116 -2003-04-11, 3865, 1116 -2003-04-18, 3892, 1221 -2003-04-19, 3943, 1273 -2003-05-22, 3943, 1273 -2003-05-25, 3943, 1273 -2003-05-26, 4239, 1414 -2003-05-27, 4714, 1668 -2003-05-28, 5603, 1847 -2003-05-29, 5735, 1883 -2003-05-30, 6515, 2380 -2003-05-31, 6927, 2393 -2003-06-01, 7144, 2396 -2003-06-02, 7152, 2400 -2003-06-05, 7195, 2400 -2003-06-06, 7212, 2400 -2003-06-07, 7215, 2400 -2003-06-08, 7228, 2390 -2003-06-09, 7230, 2390 -2003-06-13, 7356, 2387 -2003-06-14, 7436, 2387 -2003-06-15, 7436, 2387 -2003-06-16, 7558, 2411 -2003-06-17, 7568, 2411 -2003-06-18, 7708, 2465 -2003-06-19, 7796, 2460 -2003-06-20, 7794, 2467 -2003-06-21, 7823, 2479 -2003-06-22, 9536, 2608 -2003-06-23, 10196, 3153 -2003-06-24, 10675, 3428 -2003-06-28, 11165, 3571 -2003-06-29, 11173, 3579 -2003-06-30, 11173, 3579 -2003-07-01, 11336, 3597 -2003-07-02, 11344, 3597 -2003-07-03, 11344, 3597 -2003-07-04, 11344, 3597 -2003-07-06, 11354, 3597 -2003-07-07, 11354, 3597 -2003-07-08, 11544, 3621 -2003-07-10, 11544, 3621 -2003-07-12, 11544, 3621 -2003-07-13, 11544, 3621 -2003-07-14, 11617, 3625 -2003-07-15, 11617, 3625 -2003-07-16, 11617, 3625 -2003-07-17, 11617, 3625 -2003-07-18, 11617, 3625 -2003-07-19, 11617, 3625 -2003-07-24, 11617, 3625 -2003-07-26, 11617, 3625 -2003-07-27, 11617, 3625 -2003-07-28, 11617, 3625 -2003-07-29, 11617, 3625 -2003-07-30, 11617, 3625 -2003-07-31, 11617, 3625 -2003-08-01, 11617, 3625 -2003-08-02, 11617, 3625 -2003-08-06, 11617, 3625 -2003-08-07, 11617, 3625 -2003-08-09, 11617, 3625 -2003-08-10, 11617, 3625 -2003-08-17, 11617, 3625 -2003-08-24, 11617, 3625 -2003-08-25, 11617, 3625 -2003-09-02, 11617, 3625 -2003-09-07, 11617, 3625 -2003-09-08, 11617, 3625 -2003-09-09, 11617, 3625 -2003-09-10, 11617, 3625 -2003-09-12, 11617, 3625 -2003-09-13, 11617, 3625 -2003-09-14, 11617, 3625 -2003-09-15, 11617, 3625 -2003-09-16, 11617, 3625 -2003-09-17, 11617, 3625 -2003-09-18, 11954, 3536 -2003-09-19, 11976, 3541 -2003-09-20, 11976, 3541 -2003-09-21, 11976, 3541 -2003-09-22, 11976, 3541 -2003-09-23, 12024, 3671 -2003-09-25, 12024, 3671 -2003-09-26, 12024, 3671 -2003-09-28, 12024, 3671 -2003-09-29, 12024, 3677 -2003-09-30, 12393, 3760 -2003-10-01, 12900, 3965 -2003-10-02, 13241, 4310 -2003-10-03, 13241, 4310 -2003-10-04, 13310, 4356 -2003-10-05, 12443, 4223 -2003-10-06, 12415, 4223 -2003-10-07, 12415, 4223 -2003-10-08, 12417, 4223 -2003-10-09, 12417, 4223 -2003-10-10, 12875, 4431 -2003-10-11, 12884, 4452 -2003-10-12, 12970, 4492 -2003-10-13, 12984, 4492 -2003-10-14, 12984, 4492 -2003-10-15, 12974, 4492 -2003-10-16, 13051, 4492 -2003-10-17, 13094, 4521 -2003-10-18, 13131, 4561 -2003-10-20, 13131, 4561 -2003-10-21, 13131, 4561 -2003-10-23, 13147, 4570 -2003-10-24, 13268, 4413 -2003-10-25, 13276, 4416 -2003-10-26, 13372, 4409 -2003-10-27, 13641, 4403 -2003-10-28, 13699, 4409 -2003-10-29, 13850, 4419 -2003-10-30, 13848, 4431 -2003-10-31, 13854, 4431 -2003-11-03, 13704, 4437 -2003-11-04, 13704, 4437 -2003-11-05, 13711, 4437 -2003-11-10, 13711, 4437 -2003-11-11, 13872, 4634 -2003-11-13, 13872, 4634 -2003-11-14, 13872, 4634 -2003-11-17, 13872, 4634 -2003-11-18, 13850, 4682 -2003-11-19, 13954, 4508 -2003-11-20, 14086, 4555 -2003-11-21, 14226, 4565 -2003-11-22, 14231, 4565 -2003-11-24, 14170, 4620 -2003-11-25, 14170, 4620 -2003-11-26, 14120, 4624 -2003-11-27, 14543, 4673 -2003-11-28, 14546, 4673 -2003-11-29, 14546, 4673 -2003-11-30, 15350, 4695 -2003-12-01, 15350, 4695 -2003-12-02, 15350, 4695 -2003-12-03, 15350, 4695 -2003-12-04, 15350, 4695 -2003-12-08, 15350, 4695 -2003-12-09, 15352, 4695 -2003-12-10, 15352, 4695 -2003-12-11, 15352, 4695 -2003-12-12, 15352, 4695 -2003-12-13, 15352, 4695 -2003-12-15, 15474, 4695 -2003-12-16, 16321, 4701 -2003-12-17, 17469, 4878 -2003-12-18, 17793, 5121 -2003-12-19, 19327, 5002 -2003-12-20, 19795, 5058 -2003-12-21, 20048, 5121 -2003-12-22, 20062, 5145 -2003-12-23, 20118, 5156 -2003-12-24, 20232, 5178 -2003-12-27, 20232, 5178 -2003-12-28, 20232, 5178 -2003-12-29, 20232, 5178 -2003-12-31, 20232, 5178 -2004-01-01, 20404, 5292 -2004-01-02, 20418, 5292 -2004-01-05, 20418, 5292 -2004-01-06, 20283, 5284 -2004-01-07, 20283, 5284 -2004-01-08, 20284, 5284 -2004-01-09, 20351, 5284 -2004-01-10, 20351, 5284 -2004-01-11, 20350, 5284 -2004-01-12, 20356, 5284 -2004-01-13, 20356, 5284 -2004-01-14, 20356, 5284 -2004-01-15, 20356, 5284 -2004-01-16, 20356, 5284 -2004-01-17, 20356, 5284 -2004-01-18, 20356, 5284 -2004-01-19, 20360, 5284 -2004-01-20, 20360, 5284 -2004-01-21, 20360, 5284 -2004-01-22, 20360, 5284 -2004-01-23, 20360, 5284 -2004-01-26, 20360, 5284 -2004-01-27, 20360, 5284 -2004-01-28, 20360, 5284 -2004-01-29, 20360, 5284 -2004-01-30, 20360, 5284 -2004-02-02, 20360, 5284 -2004-02-03, 20360, 5284 -2004-02-04, 20360, 5284 -2004-02-05, 20360, 5284 -2004-02-07, 20360, 5284 -2004-02-08, 20360, 5284 -2004-02-09, 20360, 5284 -2004-02-10, 20360, 5284 -2004-02-11, 20360, 5284 -2004-02-12, 20380, 5284 -2004-02-13, 20380, 5284 -2004-02-14, 20380, 5284 -2004-02-15, 20380, 5284 -2004-02-16, 20372, 5284 -2004-02-19, 20442, 5289 -2004-02-20, 20442, 5289 -2004-02-21, 20442, 5289 -2004-02-23, 20442, 5289 -2004-02-24, 20442, 5289 -2004-02-25, 20442, 5289 -2004-02-26, 20442, 5289 -2004-02-27, 20442, 5289 -2004-02-28, 20442, 5289 -2004-02-29, 20442, 5289 -2004-03-01, 20442, 5289 -2004-03-02, 20442, 5289 -2004-03-03, 20442, 5289 -2004-03-04, 20442, 5289 -2004-03-05, 20442, 5289 -2004-03-06, 20442, 5289 -2004-03-07, 20442, 5289 -2004-03-08, 20442, 5289 -2004-03-09, 20442, 5289 -2004-03-10, 20442, 5289 -2004-03-11, 20442, 5289 -2004-03-12, 20442, 5289 -2004-03-13, 20442, 5289 -2004-03-14, 20442, 5289 -2004-03-16, 20442, 5289 -2004-03-17, 20442, 5289 -2004-03-18, 20442, 5289 -2004-03-19, 20442, 5289 -2004-03-21, 20442, 5289 -2004-03-22, 20442, 5289 -2004-03-23, 20442, 5289 -2004-03-24, 21127, 5289 -2004-03-25, 21128, 5289 -2004-03-26, 21128, 5289 -2004-03-28, 21128, 5289 -2004-03-29, 20587, 5289 -2004-03-30, 20587, 5289 -2004-03-31, 20587, 5289 -2004-04-01, 20587, 5289 -2004-04-02, 20587, 5289 -2004-04-03, 20587, 5289 -2004-04-04, 20587, 5289 -2004-04-05, 20587, 5289 -2004-04-06, 20587, 5289 -2004-04-07, 20587, 5289 -2004-04-08, 20587, 5289 -2004-04-09, 20587, 5289 -2004-04-10, 20587, 5289 -2004-04-11, 20587, 5289 -2004-04-13, 20587, 5289 -2004-04-14, 20587, 5289 -2004-04-15, 20587, 5289 -2004-04-16, 20587, 5289 -2004-04-17, 20587, 5289 -2004-04-18, 20587, 5289 -2004-04-19, 20587, 5289 -2004-04-20, 20587, 5289 -2004-04-21, 20587, 5289 -2004-04-22, 20587, 5289 -2004-04-23, 20587, 5289 -2004-04-24, 20587, 5289 -2004-04-25, 20587, 5289 -2004-04-26, 20587, 5289 -2004-04-27, 20587, 5289 -2004-04-28, 20587, 5289 -2004-04-29, 20587, 5289 -2004-04-30, 20587, 5289 -2004-05-01, 20587, 5289 -2004-05-02, 20587, 5289 -2004-05-03, 20587, 5289 -2004-05-04, 20587, 5289 -2004-05-05, 20587, 5289 -2004-05-06, 20587, 5289 -2004-05-07, 20890, 5143 -2004-05-08, 21079, 5148 -2004-05-09, 21165, 5166 -2004-05-10, 21150, 5166 -2004-05-11, 21150, 5166 -2004-05-12, 21508, 5171 -2004-05-13, 21508, 5171 -2004-05-14, 21508, 5171 -2004-05-15, 21508, 5171 -2004-05-16, 21508, 5171 -2004-05-17, 21508, 5171 -2004-05-18, 21508, 5171 -2004-05-19, 21508, 5171 -2004-05-20, 21508, 5171 -2004-05-21, 21508, 5171 -2004-05-22, 21537, 5199 -2004-05-23, 21537, 5199 -2004-05-24, 21537, 5199 -2004-05-25, 21537, 5199 -2004-05-26, 21537, 5199 -2004-05-27, 21537, 5199 -2004-05-28, 21673, 5214 -2004-05-29, 21673, 5215 -2004-05-30, 21673, 5215 -2004-05-31, 21673, 5215 -2004-06-01, 21848, 5308 -2004-06-02, 22297, 5451 -2004-06-03, 22297, 5474 -2004-06-04, 22310, 5491 -2004-06-05, 22551, 5508 -2004-06-06, 22480, 5496 -2004-06-07, 23177, 5557 -2004-06-08, 23177, 5557 -2004-06-09, 23177, 5557 -2004-06-10, 23181, 5554 -2004-06-11, 24001, 5609 -2004-06-12, 24124, 5618 -2004-06-13, 24341, 5677 -2004-06-15, 24370, 5691 -2004-06-16, 24078, 5704 -2004-06-17, 24218, 5720 -2004-06-18, 24217, 5723 -2004-06-19, 24217, 5723 -2004-06-20, 24217, 5723 -2004-06-21, 24217, 5723 -2004-06-22, 24607, 5723 -2004-06-23, 24607, 5723 -2004-06-24, 24609, 5723 -2004-06-25, 24740, 5751 -2004-06-26, 24865, 5810 -2004-06-27, 24939, 5906 -2004-06-28, 25972, 5974 -2004-06-29, 25986, 5984 -2004-06-30, 25841, 5986 -2004-07-01, 25840, 5986 -2004-07-02, 25937, 5986 -2004-07-03, 25738, 5986 -2004-07-04, 25475, 5986 -2004-07-05, 25475, 5986 -2004-07-06, 25264, 6712 -2004-07-07, 25259, 6780 -2004-07-08, 25259, 6780 -2004-07-09, 25258, 6780 -2004-07-10, 25262, 6799 -2004-07-11, 25262, 6799 -2004-07-12, 25266, 6802 -2004-07-13, 25161, 6825 -2004-07-14, 25161, 6834 -2004-07-15, 25172, 6846 -2004-07-16, 24911, 6757 -2004-07-17, 23902, 6689 -2004-07-21, 23902, 6689 -2004-07-22, 23941, 6690 -2004-07-23, 23971, 6680 -2004-07-24, 24148, 6687 -2004-07-25, 24200, 6687 -2004-07-26, 24354, 6717 -2004-07-27, 24354, 6717 -2004-07-28, 24527, 6755 -2004-07-29, 24527, 6755 -2004-07-30, 24527, 6755 -2004-07-31, 24527, 6755 -2004-08-01, 24527, 6755 -2004-08-02, 24527, 6755 -2004-08-03, 24527, 6755 -2004-08-04, 24527, 6755 -2004-08-05, 24527, 6755 -2004-08-06, 24527, 6755 -2004-08-07, 24527, 6755 -2004-08-08, 24527, 6755 -2004-08-09, 24527, 6755 -2004-08-10, 24527, 6755 -2004-08-11, 24527, 6755 -2004-08-12, 24527, 6755 -2004-08-13, 24527, 6758 -2004-08-14, 24796, 6758 -2004-08-15, 24803, 6758 -2004-08-16, 25070, 6758 -2004-08-17, 25079, 6758 -2004-08-18, 25122, 6758 -2004-08-19, 25122, 6758 -2004-08-20, 25122, 6758 -2004-08-21, 25122, 6758 -2004-08-22, 25122, 6758 -2004-08-23, 25122, 6758 -2004-08-24, 25426, 6758 -2004-08-25, 25426, 6758 -2004-08-26, 25426, 6758 -2004-08-27, 25426, 6758 -2004-08-30, 25426, 6758 -2004-08-31, 25426, 6758 -2004-09-01, 25426, 6758 -2004-09-02, 25426, 6758 -2004-09-03, 25426, 6758 -2004-09-04, 25426, 6758 -2004-09-06, 25426, 6758 -2004-09-07, 26640, 6881 -2004-09-08, 26640, 6881 -2004-09-09, 26840, 6882 -2004-09-10, 26934, 6882 -2004-09-11, 27425, 6889 -2004-09-12, 27401, 6889 -2004-09-13, 27401, 6889 -2004-09-14, 27401, 6889 -2004-09-15, 27401, 6889 -2004-09-16, 27401, 6889 -2004-09-17, 27401, 6889 -2004-09-18, 27401, 6889 -2004-09-19, 27401, 6889 -2004-09-20, 27401, 6889 -2004-09-21, 27401, 6889 -2004-09-22, 27720, 6889 -2004-09-23, 27720, 6889 -2004-09-24, 27720, 6889 -2004-09-25, 27855, 6909 -2004-09-26, 27855, 6909 -2004-09-27, 27855, 6909 -2004-09-28, 27855, 6909 -2004-09-29, 27855, 6909 -2004-09-30, 27855, 6909 -2004-10-01, 27855, 6909 -2004-10-02, 27855, 6909 -2004-10-03, 27855, 6909 -2004-10-04, 27855, 6909 -2004-10-05, 27855, 6909 -2004-10-06, 27855, 6909 -2004-10-07, 27855, 6909 -2004-10-08, 27855, 6909 -2004-10-09, 27855, 6909 -2004-10-10, 25999, 6909 -2004-10-11, 26069, 6913 -2004-10-13, 26069, 6913 -2004-10-14, 26069, 6913 -2004-10-15, 26129, 6917 -2004-10-16, 26129, 6917 -2004-10-17, 26129, 6917 -2004-10-18, 26129, 6917 -2004-10-19, 26147, 6917 -2004-10-20, 26147, 6917 -2004-10-21, 26147, 6917 -2004-10-22, 26147, 6917 -2004-10-23, 26147, 6917 -2004-10-24, 26147, 6917 -2004-10-25, 26147, 6917 -2004-10-26, 26147, 6917 -2004-10-27, 26147, 6917 -2004-10-28, 26147, 6917 -2004-10-29, 26147, 6917 -2004-10-30, 26147, 6917 -2004-10-31, 26147, 6917 -2004-11-01, 26147, 6917 -2004-11-02, 26147, 6917 -2004-11-03, 26147, 6917 -2004-11-04, 26147, 6917 -2004-11-05, 26147, 6917 -2004-11-06, 26147, 6917 -2004-11-08, 26147, 6917 -2004-11-09, 26147, 6917 -2004-11-10, 26147, 6917 -2004-11-11, 26160, 6917 -2004-11-12, 26170, 6917 -2004-11-14, 26169, 6917 -2004-11-15, 26168, 6917 -2004-11-16, 25751, 6829 -2004-11-17, 26131, 6844 -2004-11-18, 26504, 6927 -2004-11-19, 27590, 7162 -2004-11-20, 28246, 7206 -2004-11-21, 29449, 7880 -2004-11-22, 29516, 7927 -2004/11/23, 30041, 8108 -2004/11/24, 30139, 8115 -2004/11/25, 30148, 8115 -2004/11/27, 30150, 8115 -2004/11/28, 30151, 8115 -2004/11/29, 30175, 8232 -2004/11/30, 30261, 8242 -2004/12/04, 30404, 8242 -2004/12/06, 30465, 8242 -2004/12/07, 30489, 8273 -2004/12/08, 30490, 8273 -2004/12/09, 30622, 8318 -2004/12/10, 30631, 8332 -2004/12/13, 30652, 8332 -2004/12/16, 30679, 8332 -2004/12/17, 30748, 8342 -2004/12/18, 30760, 8352 -2004/12/19, 30881, 8352 -2004/12/20, 30881, 8352 -2004/12/22, 30939, 8352 -2004/12/24, 30812, 8383 -2004/12/27, 30860, 8375 -2004/12/30, 30860, 8378 -2005/01/01, 30565, 8357 -2005/01/02, 30565, 8357 -2005/01/03, 30565, 8352 -2005/01/08, 30701, 8256 -2005/01/09, 30701, 8248 -2005/01/10, 30702, 8130 -2005/01/11, 30569, 8097 -2005/01/12, 32119, 8145 -2005/01/13, 32095, 8154 -2005/01/14, 32103, 8154 -2005/01/15, 32108, 8154 -2005/01/16, 32108, 8165 -2005/01/17, 32110, 8166 -2005/01/18, 32110, 8166 -2005/01/19, 32110, 8543 -2005/01/20, 32091, 8560 -2005/01/22, 32108, 8555 -2005/01/23, 32486, 8707 -2005/01/24, 32679, 8707 -2005/01/25, 33281, 9335 -2005/01/26, 33547, 9492 -2005/01/27, 31118, 9024 -2005/01/28, 35443, 9696 -2005/01/29, 35440, 10266 -2005/01/30, 35430, 10266 -2005/01/31, 35541, 10266 -2005/02/01, 36390, 10292 -2005/02/02, 36931, 10843 -2005/02/03, 37045, 10890 -2005/02/04, 36471, 10929 -2005/02/05, 36551, 10929 -2005/02/06, 37347, 11064 -2005/02/07, 37299, 11031 -2005/02/08, 37341, 11046 -2005/02/09, 37442, 11055 -2005/02/10, 43918, 11075 -2005/02/11, 43929, 11085 -2005/02/13, 46454, 11085 -2005/02/13, 46454, 11085 -2005/02/13, 46454, 11085 -2005/02/14, 46513, 11092 -2005/02/15, 46912, 11167 -2005/02/16, 46912, 11167 -2005/02/17, 46971, 11167 -2005/02/18, 46949, 11283 -2005/02/19, 47139, 11363 -2005/02/21, 47207, 11362 -2005/02/22, 47252, 11557 -2005/02/23, 47309, 11563 -2005/02/24, 47429, 11567 -2005/02/25, 47623, 11627 -2005/02/26, 47624, 11627 -2005/02/27, 47624, 11627 -2005/02/28, 47752, 11643 -2005/03/01, 47635, 11565 -2005/03/02, 47765, 11661 -2005/03/03, 47863, 11666 -2005/03/04, 47719, 11596 -2005/03/05, 47888, 11628 -2005/03/06, 47912, 11638 -2005/03/10, 48463, 11638 -2005/03/11, 48566, 11653 -2005/03/12, 48605, 11671 -2005/03/13, 48629, 11754 -2005/03/14, 48645, 11755 -2005/03/15, 48683, 11755 -2005/03/17, 48791, 11755 -2005/03/19, 48756, 11768 -2005/03/20, 50569, 15353 -2005/03/21, 50164, 15056 -2005/03/22, 54019, 15607 -2005/03/23, 54091, 13093 -2005/03/24, 54198, 13096 -2005/03/27, 54216, 13108 -2005/03/28, 54272, 13111 -2005/03/29, 54788, 13157 -2005/03/30, 54890, 13167 -2005/03/31, 54997, 13276 -2005/04/01, 55751, 13280 -2005/04/02, 56072, 13325 -2005/04/03, 56272, 13325 -2005/04/04, 56579, 13340 -2005/04/05, 56748, 13356 -2005/04/06, 56885, 13380 -2005/04/07, 56983, 13422 -2005/04/08, 57306, 13520 -2005/04/09, 57270, 13538 -2005/04/10, 57578, 13545 -2005/04/11, 57717, 13522 -2005/04/12, 58127, 13668 -2005/04/13, 58464, 13699 -2005/04/14, 58819, 13856 -2005/04/15, 58914, 13931 -2005/04/16, 59050, 13979 -2005/04/17, 59013, 14000 -2005/04/18, 59087, 14047 -2005/04/19, 59131, 14097 -2005/04/20, 58504, 14881 -2005/04/21, 58521, 14898 -2005/04/22, 58565, 14939 -2005/04/23, 58565, 14939 -2005/04/24, 58807, 14955 -2005/04/25, 60595, 15369 -2005/04/26, 60919, 15192 -2005/04/27, 61763, 15262 -2005/04/28, 61815, 15250 -2005/04/29, 63797, 15974 -2005/04/30, 63937, 16140 -2005/05/01, 62128, 11466 -2005/05/02, 62628, 12096 -2005/05/03, 63010, 12163 -2005/05/04, 63289, 12171 -2005/05/05, 63428, 12267 -2005/05/06, 63489, 12329 -2005/05/07, 63552, 12329 -2005/05/08, 63690, 12349 -2005/05/09, 63755, 12352 -2005/05/10, 64144, 12557 -2005/05/11, 64484, 12685 -2005/05/12, 64790, 12735 -2005/05/13, 64811, 12749 -2005/05/14, 64917, 12761 -2005/05/15, 66548, 13088 -2005/05/16, 66562, 13121 -2005/05/17, 66595, 13184 -2005/05/18, 79865, 12413 -2005/05/19, 71673, 12429 -2005/05/20, 71816, 12433 -2005/05/21, 73363, 12587 -2005/05/22, 73426, 12608 -2005/05/23, 98647, 12664 -2005/05/24, 99080, 12693 -2005/05/25, 99295, 12693 -2005/05/26, 99681, 12701 -2005/05/27, 99874, 12742 -2005/05/28, 99760, 12742 -2005/05/29, 100002, 12811 -2005/05/30, 100321, 13014 -2005/05/31, 100942, 13201 -2005/06/01, 101442, 13353 -2005/06/02, 101619, 13444 -2005/06/03, 101734, 13461 -2005/06/04, 102228, 13525 -2005/06/05, 103949, 12880 -2005/06/06, 104657, 12995 -2005/06/07, 104662, 12955 -2005/06/08, 113246, 12999 -2005/06/09, 112505, 13115 -2005/06/10, 112796, 13151 -2005/06/11, 112825, 13178 -2005/06/12, 112856, 13332 -2005/06/13, 112895, 13446 -2005/06/14, 111088, 13725 -2005/06/15, 111031, 13891 -2005/06/16, 111272, 13979 -2005/06/17, 111660, 14165 -2005/06/18, 112462, 14266 -2005/06/19, 112774, 14332 -2005/06/20, 113060, 14432 -2005/06/21, 113628, 14678 -2005/06/22, 113623, 14689 -2005/06/23, 114363, 15004 -2005/06/24, 113296, 15214 -2005/06/25, 113704, 15474 -2005/06/26, 113883, 15561 -2005/06/28, 113886, 15562 -2005/06/29, 113952, 15603 -2005/06/30, 113952, 15603 -2005/07/01, 114035, 15625 -2005/07/02, 114021, 15697 -2005/07/03, 126972, 16148 -2005/07/04, 127161, 16176 -2005/07/05, 132622, 16446 -2005/07/06, 133732, 16589 -2005/07/07, 134141, 16707 -2005/07/09, 134189, 16711 -2005/07/10, 134279, 16770 -2005/07/11, 134472, 16944 -2005/07/12, 134835, 17106 -2005/07/13, 135456, 17254 -2005/07/14, 135576, 17401 -2005/07/15, 135695, 17468 -2005/07/18, 135877, 17663 -2005/07/19, 132611, 17276 -2005/07/20, 132758, 17419 -2005/07/21, 133068, 17799 -2005/07/22, 133289, 18016 -2005/07/23, 133322, 18020 -2005/07/24, 133359, 18035 -2005/07/25, 133921, 18324 -2005/07/26, 134555, 18519 -2005/07/27, 135175, 18670 -2005/07/28, 139093, 18793 -2005/07/29, 139511, 19128 -2005/07/30, 140023, 19365 -2005/07/31, 140413, 19866 -2005/08/02, 141678, 19969 -2005/08/03, 143636, 20718 -2005/08/04, 144278, 20895 -2005/08/05, 144471, 21098 -2005/08/06, 144699, 21466 -2005/08/07, 144812, 21489 -2005/08/08, 145076, 21661 -2005/08/09, 145674, 21822 -2005/08/10, 146311, 22828 -2005/08/11, 146795, 23113 -2005/08/12, 147266, 23259 -2005/08/13, 147307, 23316 -2005/08/14, 147325, 23329 -2005/08/15, 147602, 23349 -2005/08/16, 147613, 23353 -2005/08/17, 147700, 23385 -2005/08/18, 147807, 23409 -2005/08/19, 148620, 23925 -2005/08/20, 148620, 23925 -2005/08/22, 149110, 24116 -2005/08/23, 149698, 24405 -2005/08/24, 150268, 24873 -2005/08/25, 150138, 25151 -2005/08/26, 151296, 25435 -2005/08/27, 151828, 25688 -2005/08/28, 151807, 25680 -2005/08/29, 152082, 25747 -2005/08/30, 152432, 25655 -2005/08/31, 152475, 25673 -2005/09/01, 152432, 25673 -2005/09/02, 152608, 25829 -2005/09/03, 152637, 25829 -2005/09/04, 152749, 25883 -2005/09/05, 153175, 25883 -2005/09/06, 158929, 26044 -2005/09/07, 159089, 26078 -2005/09/08, 159095, 26194 -2005/09/09, 159532, 26410 -2005/09/10, 159474, 26604 -2005/09/11, 159590, 26842 -2005/09/12, 159914, 27003 -2005/09/13, 160295, 27222 -2005/09/14, 160100, 27289 -2005/09/15, 160186, 27308 -2005/09/16, 160250, 27355 -2005/09/17, 160147, 27417 -2005/09/18, 160256, 27417 -2005/09/19, 160423, 27449 -2005/09/20, 160520, 27565 -2005/09/21, 160716, 27585 -2005/09/22, 160766, 27591 -2005/09/23, 161014, 27616 -2005/09/24, 161159, 27660 -2005/09/25, 161178, 27660 -2005/09/26, 161707, 27865 -2005/09/27, 161746, 27929 -2005/09/28, 161945, 27912 -2005/09/29, 161926, 27969 -2005/09/30, 161908, 28024 -2005/10/01, 161217, 28060 -2005/10/02, 161405, 28148 -2005/10/03, 161470, 28231 -2005/10/04, 161652, 28325 -2005/10/05, 161650, 28403 -2005/10/06, 150147, 27867 -2005/10/07, 150310, 27981 -2005/10/08, 150336, 28002 -2005/10/09, 150340, 28002 -2005/10/10, 152248, 29917 -2005/10/11, 152893, 30132 -2005/10/12, 153415, 30315 -2005/10/13, 153234, 30314 -2005/10/14, 154456, 30886 -2005/10/15, 155217, 31128 -2005/10/16, 155503, 31283 -2005/10/17, 155558, 31306 -2005/10/18, 155323, 31407 -2005/10/19, 155548, 31764 -2005/10/20, 155670, 31818 -2005/10/21, 156006, 31832 -2005/10/22, 156124, 31942 -2005/10/23, 156153, 31954 -2005/10/24, 156358, 31940 -2005/10/25, 156761, 31982 -2005/10/26, 156875, 32344 -2005/10/27, 151521, 32461 -2005/10/28, 151704, 32470 -2005/10/29, 151768, 32470 -2005/10/30, 151701, 32470 -2005/10/31, 151905, 32496 -2005/11/01, 151913, 32508 -2005/11/02, 151918, 32514 -2005/11/03, 151918, 32514 -2005/11/04, 151923, 32514 -2005/11/05, 152018, 32514 -2005/11/06, 152076, 32545 -2005/11/07, 152059, 32545 -2005/11/08, 152062, 32545 -2005/11/09, 152051, 32550 -2005/11/10, 151995, 32548 -2005/11/11, 152031, 32696 -2005/11/12, 152031, 32696 -2005/11/13, 152123, 32696 -2005/11/14, 152151, 32690 -2005/11/15, 152151, 32690 -2005/11/16, 152126, 32718 -2005/11/17, 152702, 32718 -2005/11/18, 152704, 32718 -2005/11/19, 152702, 32718 -2005/11/21, 152822, 32825 -2005/11/22, 152844, 32830 -2005/11/23, 152830, 32830 -2005/11/23, 152830, 32830 -2005/11/25, 152830, 32830 -2005/11/27, 152884, 32862 -2005/11/28, 153190, 33011 -2005/11/29, 153246, 33011 -2005/11/30, 153293, 33011 -2005/12/01, 153738, 33186 -2005/12/02, 153897, 33205 -2005/12/03, 153937, 33205 -2005/12/04, 154104, 33187 -2005/12/05, 154085, 33187 -2005/12/06, 154069, 33216 -2005/12/07, 154963, 33506 -2005/12/08, 156568, 33832 -2005/12/09, 156931, 34791 -2005/12/10, 157305, 34891 -2005/12/11, 157466, 35017 -2005/12/12, 157583, 35018 -2005/12/13, 158273, 35108 -2005/12/14, 158266, 35238 -2005/12/15, 158751, 35651 -2005/12/16, 158822, 35712 -2005/12/17, 158902, 35713 -2005/12/18, 159070, 35868 -2005/12/19, 159083, 36018 -2005/12/20, 159100, 36030 -2005/12/21, 159197, 36060 -2005/12/22, 159276, 36124 -2005/12/23, 159305, 36195 -2005/12/24, 159321, 36197 -2005/12/25, 159321, 36247 -2005/12/27, 159374, 36285 -2005/12/29, 159455, 36319 -2005/12/30, 159520, 36330 -2005/12/31, 159549, 36333 -2006/01/02, 159692, 36336 -2006/01/04, 159796, 36423 -2006/01/05, 159787, 36462 -2006/01/06, 159735, 36497 -2006/01/07, 159822, 36617 -2006/01/08, 159964, 36760 -2006/01/09, 159988, 36799 -2006/01/10, 160237, 37028 -2006/01/11, 160307, 37098 -2006/01/12, 160335, 37215 -2006/01/13, 160359, 37217 -2006/01/14, 160642, 37353 -2006/01/15, 160640, 37439 -2006/01/16, 160714, 37551 -2006/01/17, 160737, 37562 -2006/01/18, 160809, 37566 -2006/01/19, 161141, 37707 -2006/01/20, 161285, 37794 -2006/01/21, 161311, 37789 -2006/01/22, 161327, 37820 -2006/01/23, 162130, 38129 -2006/01/24, 162825, 38485 -2006/01/25, 163208, 38818 -2006/01/26, 164445, 39306 -2006/01/27, 165729, 39552 -2006/01/28, 165022, 39852 -2006/01/29, 165580, 40015 -2006/01/30, 165580, 40016 -2006/01/31, 165833, 40588 -2006/02/01, 165903, 40680 -2006/02/02, 165996, 40725 -2006/02/03, 166074, 40861 -2006/02/04, 166270, 40962 -2006/02/05, 166436, 41110 -2006/02/06, 166453, 41209 -2006/02/07, 166496, 41367 -2006/02/08, 167122, 41723 -2006/02/09, 167396, 41812 -2006/02/10, 167651, 41888 -2006/02/11, 167719, 41992 -2006/02/12, 167741, 41981 -2006/02/13, 167901, 42278 -2006/02/14, 168068, 42354 -2006/02/15, 168112, 42671 -2006/02/16, 168628, 42835 -2006/02/17, 168836, 43089 -2006/02/18, 168844, 43089 -2006/02/19, 168953, 43173 -2006/02/20, 169263, 43201 -2006/02/21, 169602, 43297 -2006/02/22, 169433, 43239 -2006/02/23, 169557, 43279 -2006/02/25, 169557, 43279 -2006/02/26, 169558, 43279 -2006/02/27, 169940, 43442 -2006/02/28, 169197, 43619 -2006/03/01, 169535, 44112 -2006/03/02, 169723, 44499 -2006/03/03, 169915, 44512 -2006/03/04, 169917, 44512 -2006/03/06, 170008, 44611 -2006/03/07, 170046, 44647 -2006/03/08, 170180, 44731 -2006/03/09, 170579, 44855 -2006/03/10, 170589, 44743 -2006/03/11, 170597, 44743 -2006/03/12, 170626, 44743 -2006/03/13, 170830, 44738 -2006/03/14, 170944, 44877 -2006/03/15, 170968, 44841 -2006/03/16, 171176, 44945 -2006/03/17, 171385, 45125 -2006/03/18, 171465, 45202 -2006/03/19, 171607, 45205 -2006/03/20, 171933, 45506 -2006/03/21, 172152, 45725 -2006/03/22, 172896, 45937 -2006/03/23, 173493, 46142 -2006/03/24, 174140, 46532 -2006/03/25, 174223, 46600 -2006/03/26, 174271, 46600 -2006/03/27, 174514, 46658 -2006/03/28, 174663, 46741 -2006/03/29, 174706, 46762 -2006/03/30, 174706, 46787 -2006/03/31, 174915, 46889 -2006/04/01, 175211, 46910 -2006/04/02, 175537, 47085 -2006/04/03, 175795, 47186 -2006/04/04, 176373, 47399 -2006/04/05, 178170, 47256 -2006/04/06, 177675, 47389 -2006/04/07, 178276, 47631 -2006/04/08, 178733, 47684 -2006/04/09, 186394, 48077 -2006/04/10, 186785, 48474 -2006/04/11, 187071, 48645 -2006/04/12, 187328, 48773 -2006/04/13, 187417, 48900 -2006/04/14, 187650, 48936 -2006/04/15, 187874, 49072 -2006/04/16, 188073, 49083 -2006/04/17, 188185, 49343 -2006/04/18, 188368, 49760 -2006/04/19, 188764, 50000 -2006/04/20, 189386, 50060 -2006/04/21, 189444, 50233 -2006/04/22, 189708, 50343 -2006/04/23, 190100, 50476 -2006/04/24, 190581, 50716 -2006/04/25, 191060, 50946 -2006/04/26, 191265, 51180 -2006/04/27, 191806, 51553 -2006/04/28, 192251, 51836 -2006/04/29, 192654, 52220 -2006/04/30, 192740, 52241 -2006/05/01, 192740, 52245 -2006/05/02, 192950, 52435 -2006/05/03, 193237, 52548 -2006/05/04, 193297, 52720 -2006/05/05, 193577, 53008 -2006/05/06, 194029, 53366 -2006/05/07, 194263, 53544 -2006/05/08, 194325, 53503 -2006/05/09, 194384, 53587 -2006/05/10, 194921, 53800 -2006/05/11, 195221, 54028 -2006/05/12, 195535, 54202 -2006/05/13, 196169, 54314 -2006/05/14, 195622, 54424 -2006/05/15, 195827, 54685 -2006/05/16, 196171, 54954 -2006/05/17, 197172, 55029 -2006/05/18, 197223, 54967 -2006/05/19, 197404, 55047 -2006/05/20, 198903, 55211 -2006/05/21, 199040, 55475 -2006/05/22, 199114, 55577 -2006/05/23, 199431, 55391 -2006/05/24, 203342, 55383 -2006/05/25, 203490, 55570 -2006/05/26, 203563, 55563 -2006/05/27, 203653, 55592 -2006/05/28, 203670, 55592 -2006/05/29, 204409, 57115 -2006/05/30, 235501, 57122 -2006/05/31, 235493, 57122 -2006/06/02, 235690, 57191 -2006/06/03, 236072, 57355 -2006/06/04, 236020, 58196 -2006/06/05, 236286, 58331 -2006/06/06, 236730, 58474 -2006/06/07, 237402, 58537 -2006/06/08, 237731, 58790 -2006/06/09, 236391, 56722 -2006/06/10, 236447, 56788 -2006/06/11, 236558, 56800 -2006/06/12, 237246, 57071 -2006/06/13, 236849, 57211 -2006/06/14, 236747, 57335 -2006/06/15, 237010, 57438 -2006/06/16, 237223, 57674 -2006/06/17, 237227, 57705 -2006/06/18, 237243, 57759 -2006/06/19, 237290, 58140 -2006/06/20, 237711, 58324 -2006/06/21, 236934, 58324 -2006/06/22, 237228, 58454 -2006/06/23, 237265, 58460 -2006/06/24, 237586, 58460 -2006/06/25, 237964, 58689 -2006/06/26, 238162, 58755 -2006/06/27, 238361, 58778 -2006/06/28, 238484, 58830 -2006/06/29, 238832, 58938 -2006/06/30, 238742, 58961 -2006/07/01, 238361, 58943 -2006/07/02, 238651, 59001 -2006/07/03, 238674, 59009 -2006/07/04, 238724, 59012 -2006/07/05, 238901, 59013 -2006/07/06, 238884, 59396 -2006/07/07, 239546, 59615 -2006/07/08, 241573, 60173 -2006/07/09, 242229, 60524 -2006/07/10, 242273, 60532 -2006/07/11, 242885, 60796 -2006/07/12, 243281, 61279 -2006/07/13, 243406, 61287 -2006/07/14, 243459, 61314 -2006/07/15, 243520, 61313 -2006/07/16, 243621, 61344 -2006/07/17, 243883, 61480 -2006-07-17, 243621, 61344 -2006-07-18, 243886, 61525 -2006-07-19, 244520, 61568 -2006-07-20, 244866, 61922 -2006-07-21, 244969, 61966 -2006-07-22, 245046, 62020 -2006-07-23, 245229, 62043 -2006-07-24, 245421, 62142 -2006-07-25, 245618, 62211 -2006-07-26, 245251, 62324 -2006-07-27, 245921, 62650 -2006-07-28, 246016, 62694 -2006-07-29, 249837, 62836 -2006-07-30, 249901, 62893 -2006-07-31, 249933, 62894 -2006-08-01, 250022, 62941 -2006-08-02, 250276, 63020 -2006-08-03, 248113, 62052 -2006-08-04, 248192, 62110 -2006-08-05, 249951, 62532 -2006-08-06, 250333, 62659 -2006-08-07, 250401, 62670 -2006-08-08, 252585, 63100 -2006-08-09, 252849, 63027 -2006-08-10, 253370, 63137 -2006-08-11, 253464, 63219 -2006-08-12, 253592, 63309 -2006-08-13, 253597, 63309 -2006-08-14, 253600, 63309 -2006-08-15, 253707, 63375 -2006-08-16, 254098, 63549 -2006-08-17, 254591, 63698 -2006-08-18, 254640, 63783 -2006-08-19, 254723, 63783 -2006-08-20, 254707, 63789 -2006-08-21, 254932, 64065 -2006-08-22, 254820, 63836 -2006-08-23, 255008, 63958 -2006-08-24, 255255, 64074 -2006-08-25, 255383, 64175 -2006-08-26, 255553, 64197 -2006-08-27, 255283, 64604 -2006-08-28, 255368, 64660 -2006-08-29, 255368, 64660 -2006-08-30, 256002, 64772 -2006-08-31, 255939, 64828 -2006-09-01, 256059, 64897 -2006-09-02, 256072, 64896 -2006-09-03, 256651, 65050 -2006-09-04, 256654, 65213 -2006-09-05, 256678, 65296 -2006-09-06, 256933, 65562 -2006-09-07, 257168, 65707 -2006-09-08, 257380, 65826 -2006-09-09, 257996, 66107 -2006-09-10, 258138, 66194 -2006-09-11, 258419, 66313 -2006-09-12, 258503, 66321 -2006-09-13, 258624, 66365 -2006-09-14, 258625, 66401 -2006-09-15, 258783, 66509 -2006-09-16, 258785, 66512 -2006-09-17, 258806, 66519 -2006-09-18, 258424, 66766 -2006-09-19, 258438, 66797 -2006-09-20, 258438, 66797 -2006-09-21, 258436, 66851 -2006-09-22, 258059, 67034 -2006-09-23, 258667, 67089 -2006-09-24, 258792, 67181 -2006-09-25, 258793, 67181 -2006-09-26, 258797, 67221 -2006-09-27, 258858, 67228 -2006-09-28, 259082, 67296 -2006-09-29, 258980, 67013 -2006-09-30, 258822, 67973 -2006-10-01, 258935, 68062 -2006-10-02, 259055, 68084 -2006-10-03, 259204, 68209 -2006-10-04, 296696, 68497 -2006-10-05, 298069, 68956 -2006-10-06, 298223, 69071 -2006-10-07, 298327, 69155 -2006-10-08, 298428, 69175 -2006-10-09, 298430, 69177 -2006-10-10, 297621, 69224 -2006-10-11, 296425, 68673 -2006-10-12, 294584, 68892 -2006-10-13, 294744, 68942 -2006-10-14, 295543, 68963 -2006-10-15, 295543, 68963 -2006-10-16, 295732, 68963 -2006-10-17, 295851, 69007 -2006-10-18, 296612, 69393 -2006-10-19, 296832, 69369 -2006-10-20, 297146, 69404 -2006-10-21, 297332, 69565 -2006-10-22, 298113, 69570 -2006-10-23, 298120, 69570 -2006-10-24, 298222, 69603 -2006-10-25, 298556, 69777 -2006-10-26, 298706, 69813 -2006-10-27, 298766, 70013 -2006-10-28, 298359, 69758 -2006-10-29, 298359, 69758 -2006-10-30, 298357, 69735 -2006-10-31, 299054, 69842 -2006-11-01, 300281, 69956 -2006-11-02, 300230, 69927 -2006-11-03, 300443, 70008 -2006-11-04, 300886, 70417 -2006-11-05, 301166, 70513 -2006-11-06, 301201, 70616 -2006-11-07, 300495, 69816 -2006-11-08, 300804, 70084 -2006-11-09, 300878, 70257 -2006-11-10, 301106, 70340 -2006-11-11, 301302, 70392 -2006-11-12, 301728, 70598 -2006-11-13, 314202, 72483 -2006-11-14, 314394, 72265 -2006-11-15, 314736, 72346 -2006-11-16, 315160, 72531 -2006-11-17, 315070, 72670 -2006-11-18, 315097, 72705 -2006-11-19, 315261, 72795 -2006-11-20, 315333, 72837 -2006-11-21, 315777, 72949 -2006-11-22, 316130, 73006 -2006-11-23, 316162, 73035 -2006-11-24, 316338, 73248 -2006-11-25, 316378, 73355 -2006-11-26, 316415, 73366 -2006-11-27, 316532, 73366 -2006-11-28, 318388, 73700 -2006-11-29, 318515, 73704 -2006-11-30, 318645, 73752 -2006-12-01, 318816, 73833 -2006-12-02, 318840, 73899 -2006-12-03, 318845, 73899 -2006-12-04, 318930, 73941 -2006-12-05, 319614, 73996 -2006-12-06, 319623, 74108 -2006-12-07, 319828, 74140 -2006-12-08, 320019, 74173 -2006-12-09, 320215, 74192 -2006-12-10, 320927, 74430 -2006-12-11, 320937, 74505 -2006-12-12, 321194, 74652 -2006-12-13, 321238, 74723 -2006-12-14, 321114, 74849 -2006-12-15, 321900, 75884 -2006-12-16, 322135, 76292 -2006-12-17, 322261, 76341 -2006-12-18, 322384, 76348 -2006-12-19, 322382, 76315 -2006-12-20, 322718, 76471 -2006-12-21, 322961, 76259 -2006-12-22, 323044, 76354 -2006-12-23, 323068, 76392 -2006-12-24, 323117, 76424 -2006-12-25, 323123, 76420 -2006-12-26, 323108, 76427 -2006-12-27, 321740, 76431 -2006-12-28, 321872, 76671 -2006-12-29, 322115, 76909 -2006-12-30, 322246, 76916 -2006-12-31, 322288, 77018 -2007-01-01, 322277, 77019 -2007-01-02, 322403, 77007 -2007-01-03, 322478, 77390 -2007-01-04, 322672, 77448 -2007-01-05, 322736, 77448 -2007-01-06, 322746, 77448 -2007-01-07, 322751, 77449 -2007-01-08, 322751, 77449 -2007-01-09, 323243, 77508 -2007-01-10, 323870, 77955 -2007-01-11, 324115, 78315 -2007-01-12, 324587, 78684 -2007-01-13, 325435, 79072 -2007-01-14, 325840, 79048 -2007-01-15, 327282, 79265 -2007-01-16, 327409, 79308 -2007-01-17, 327661, 79392 -2007-01-18, 327683, 79412 -2007-01-19, 327710, 79413 -2007-01-20, 327918, 79523 -2007-01-21, 328238, 79543 -2007-01-22, 328265, 79543 -2007-01-23, 328471, 79704 -2007-01-24, 328920, 79596 -2007-01-25, 329021, 79667 -2007-01-26, 329599, 79832 -2007-01-27, 329913, 80096 -2007-01-28, 329973, 80135 -2007-01-29, 330044, 80152 -2007-01-30, 330947, 80359 -2007-01-31, 331201, 80459 -2007-02-01, 331558, 80704 -2007-02-02, 331301, 80417 -2007-02-03, 331338, 80446 -2007-02-04, 331374, 80467 -2007-02-05, 331386, 80467 -2007-02-06, 332398, 80586 -2007-02-07, 332416, 80605 -2007-02-08, 332417, 80637 -2007-02-09, 332470, 80667 -2007-02-10, 332813, 80738 -2007-02-11, 332829, 80737 -2007-02-12, 333328, 80848 -2007-02-13, 333567, 81010 -2007-02-14, 333735, 81254 -2007-02-15, 334047, 81257 -2007-02-16, 333963, 81385 -2007-02-17, 335704, 81385 -2007-02-18, 335734, 81378 -2007-02-19, 335817, 81385 -2007-02-20, 335827, 81515 -2007-02-21, 335956, 81523 -2007-02-22, 335977, 81541 -2007-02-23, 336100, 81548 -2007-02-24, 336190, 81624 -2007-02-25, 336196, 81631 -2007-02-26, 336210, 81660 -2007-02-27, 336236, 81682 -2007-02-28, 336322, 81773 -2007-03-01, 336260, 81100 -2007-03-02, 336403, 81141 -2007-03-03, 336942, 81632 -2007-03-04, 334152, 81512 -2007-03-05, 333965, 81622 -2007-03-06, 334545, 81991 -2007-03-07, 334605, 81919 -2007-03-08, 334715, 82002 -2007-03-09, 336733, 82600 -2007-03-10, 336770, 82633 -2007-03-11, 336818, 82713 -2007-03-12, 336834, 82726 -2007-03-13, 337004, 83011 -2007-03-14, 337416, 83332 -2007-03-15, 337519, 83417 -2007-03-16, 337652, 83481 -2007-03-17, 337877, 83536 -2007-03-18, 337882, 83545 -2007-03-19, 340120, 84724 -2007-03-20, 340903, 85165 -2007-03-21, 341386, 85533 -2007-03-22, 341629, 85843 -2007-03-23, 341836, 85996 -2007-03-24, 341916, 86128 -2007-03-25, 341946, 86165 -2007-03-26, 341954, 86163 -2007-03-27, 339868, 84825 -2007-03-28, 339894, 84837 -2007-03-29, 339918, 84862 diff --git a/pypy/doc/statistic/loc.png b/pypy/doc/statistic/loc.png deleted file mode 100644 Binary file pypy/doc/statistic/loc.png has changed diff --git a/pypy/doc/statistic/number_files.csv b/pypy/doc/statistic/number_files.csv deleted file mode 100644 --- a/pypy/doc/statistic/number_files.csv +++ /dev/null @@ -1,1292 +0,0 @@ -Number of files in the pypy subtree -number of files, number of non-test files, number of testfiles -2003-02-19, 8, 0 -2003-02-20, 10, 1 -2003-02-21, 29, 5 -2003-02-22, 30, 4 -2003-02-23, 49, 12 -2003-02-24, 49, 12 -2003-02-25, 48, 12 -2003-02-26, 48, 12 -2003-02-27, 48, 12 -2003-02-28, 48, 12 -2003-03-01, 48, 12 -2003-03-03, 48, 13 -2003-03-04, 48, 13 -2003-03-18, 48, 13 -2003-04-06, 48, 13 -2003-04-11, 48, 13 -2003-04-18, 48, 14 -2003-04-19, 48, 15 -2003-05-22, 48, 15 -2003-05-25, 48, 15 -2003-05-26, 52, 19 -2003-05-27, 59, 26 -2003-05-28, 70, 27 -2003-05-29, 71, 26 -2003-05-30, 75, 37 -2003-05-31, 87, 37 -2003-06-01, 92, 37 -2003-06-02, 92, 37 -2003-06-05, 92, 37 -2003-06-06, 93, 37 -2003-06-07, 93, 37 -2003-06-08, 92, 37 -2003-06-09, 92, 37 -2003-06-13, 92, 37 -2003-06-14, 93, 37 -2003-06-15, 93, 37 -2003-06-16, 94, 37 -2003-06-17, 94, 37 -2003-06-18, 94, 37 -2003-06-19, 92, 37 -2003-06-20, 92, 37 -2003-06-21, 92, 37 -2003-06-22, 98, 40 -2003-06-23, 102, 42 -2003-06-24, 103, 43 -2003-06-28, 104, 43 -2003-06-29, 104, 43 -2003-06-30, 104, 43 -2003-07-01, 104, 43 -2003-07-02, 104, 43 -2003-07-03, 104, 43 -2003-07-04, 104, 43 -2003-07-06, 104, 43 -2003-07-07, 104, 43 -2003-07-08, 106, 43 -2003-07-10, 106, 43 -2003-07-12, 106, 43 -2003-07-13, 106, 43 -2003-07-14, 106, 43 -2003-07-15, 106, 43 -2003-07-16, 106, 43 -2003-07-17, 106, 43 -2003-07-18, 106, 43 -2003-07-19, 106, 43 -2003-07-24, 106, 43 -2003-07-26, 106, 43 -2003-07-27, 106, 43 -2003-07-28, 106, 43 -2003-07-29, 106, 43 -2003-07-30, 106, 43 -2003-07-31, 106, 43 -2003-08-01, 106, 43 -2003-08-02, 106, 43 -2003-08-06, 106, 43 -2003-08-07, 106, 43 -2003-08-09, 106, 43 -2003-08-10, 106, 43 -2003-08-17, 106, 43 -2003-08-24, 106, 43 -2003-08-25, 106, 43 -2003-09-02, 106, 43 -2003-09-07, 106, 43 -2003-09-08, 106, 43 -2003-09-09, 106, 43 -2003-09-10, 106, 43 -2003-09-12, 106, 43 -2003-09-13, 106, 43 -2003-09-14, 106, 43 -2003-09-15, 106, 43 -2003-09-16, 106, 43 -2003-09-17, 106, 43 -2003-09-18, 108, 42 -2003-09-19, 108, 42 -2003-09-20, 108, 42 -2003-09-21, 108, 42 -2003-09-22, 108, 42 -2003-09-23, 108, 46 -2003-09-25, 108, 46 -2003-09-26, 108, 46 -2003-09-28, 108, 46 -2003-09-29, 108, 46 -2003-09-30, 118, 48 -2003-10-01, 121, 49 -2003-10-02, 124, 53 -2003-10-03, 124, 53 -2003-10-04, 124, 53 -2003-10-05, 118, 52 -2003-10-06, 118, 52 -2003-10-07, 118, 52 -2003-10-08, 118, 52 -2003-10-09, 118, 52 -2003-10-10, 122, 53 -2003-10-11, 123, 54 -2003-10-12, 123, 54 -2003-10-13, 123, 54 -2003-10-14, 123, 54 -2003-10-15, 123, 54 -2003-10-16, 124, 54 -2003-10-17, 125, 54 -2003-10-18, 126, 54 -2003-10-20, 126, 54 -2003-10-21, 126, 54 -2003-10-23, 126, 54 -2003-10-24, 127, 54 -2003-10-25, 127, 54 -2003-10-26, 130, 54 -2003-10-27, 131, 53 -2003-10-28, 131, 53 -2003-10-29, 131, 53 -2003-10-30, 131, 53 -2003-10-31, 131, 53 -2003-11-03, 130, 53 -2003-11-04, 130, 53 -2003-11-05, 130, 53 -2003-11-10, 130, 53 -2003-11-11, 131, 54 -2003-11-13, 131, 54 -2003-11-14, 131, 54 -2003-11-17, 131, 54 -2003-11-18, 131, 55 -2003-11-19, 131, 54 -2003-11-20, 131, 54 -2003-11-21, 131, 54 -2003-11-22, 131, 54 -2003-11-24, 130, 55 -2003-11-25, 130, 55 -2003-11-26, 129, 55 -2003-11-27, 133, 55 -2003-11-28, 133, 55 -2003-11-29, 133, 55 -2003-11-30, 140, 55 -2003-12-01, 140, 55 -2003-12-02, 140, 55 -2003-12-03, 140, 55 -2003-12-04, 140, 55 -2003-12-08, 140, 55 -2003-12-09, 140, 55 -2003-12-10, 140, 55 -2003-12-11, 140, 55 -2003-12-12, 140, 55 -2003-12-13, 140, 55 -2003-12-15, 140, 55 -2003-12-16, 145, 55 -2003-12-17, 147, 56 -2003-12-18, 149, 57 -2003-12-19, 156, 57 -2003-12-20, 159, 58 -2003-12-21, 161, 58 -2003-12-22, 161, 58 -2003-12-23, 161, 58 -2003-12-24, 162, 58 -2003-12-27, 162, 58 -2003-12-28, 162, 58 -2003-12-29, 162, 58 -2003-12-31, 162, 58 -2004-01-01, 165, 59 -2004-01-02, 165, 59 -2004-01-05, 165, 59 -2004-01-06, 160, 59 -2004-01-07, 160, 59 -2004-01-08, 160, 59 -2004-01-09, 161, 59 -2004-01-10, 161, 59 -2004-01-11, 161, 59 -2004-01-12, 161, 59 -2004-01-13, 161, 59 -2004-01-14, 161, 59 -2004-01-15, 161, 59 -2004-01-16, 161, 59 -2004-01-17, 161, 59 -2004-01-18, 161, 59 -2004-01-19, 161, 59 -2004-01-20, 161, 59 -2004-01-21, 161, 59 -2004-01-22, 161, 59 -2004-01-23, 161, 59 -2004-01-26, 161, 59 -2004-01-27, 161, 59 -2004-01-28, 161, 59 -2004-01-29, 161, 59 -2004-01-30, 161, 59 -2004-02-02, 161, 59 -2004-02-03, 161, 59 -2004-02-04, 161, 59 -2004-02-05, 161, 59 -2004-02-07, 161, 59 -2004-02-08, 161, 59 -2004-02-09, 161, 59 -2004-02-10, 161, 59 -2004-02-11, 161, 59 -2004-02-12, 161, 59 -2004-02-13, 161, 59 -2004-02-14, 161, 59 -2004-02-15, 161, 59 -2004-02-16, 161, 59 -2004-02-19, 161, 59 -2004-02-20, 161, 59 -2004-02-21, 161, 59 -2004-02-23, 161, 59 -2004-02-24, 161, 59 -2004-02-25, 161, 59 -2004-02-26, 161, 59 -2004-02-27, 161, 59 -2004-02-28, 161, 59 -2004-02-29, 161, 59 -2004-03-01, 161, 59 -2004-03-02, 161, 59 -2004-03-03, 161, 59 -2004-03-04, 161, 59 -2004-03-05, 161, 59 -2004-03-06, 161, 59 -2004-03-07, 161, 59 -2004-03-08, 161, 59 -2004-03-09, 161, 59 -2004-03-10, 161, 59 -2004-03-11, 161, 59 -2004-03-12, 161, 59 -2004-03-13, 161, 59 -2004-03-14, 161, 59 -2004-03-16, 161, 59 -2004-03-17, 161, 59 -2004-03-18, 161, 59 -2004-03-19, 161, 59 -2004-03-21, 161, 59 -2004-03-22, 161, 59 -2004-03-23, 161, 59 -2004-03-24, 162, 59 -2004-03-25, 162, 59 -2004-03-26, 162, 59 -2004-03-28, 162, 59 -2004-03-29, 161, 59 -2004-03-30, 161, 59 -2004-03-31, 161, 59 -2004-04-01, 161, 59 -2004-04-02, 161, 59 -2004-04-03, 161, 59 -2004-04-04, 161, 59 -2004-04-05, 161, 59 -2004-04-06, 161, 59 -2004-04-07, 161, 59 -2004-04-08, 161, 59 -2004-04-09, 161, 59 -2004-04-10, 161, 59 -2004-04-11, 161, 59 -2004-04-13, 161, 59 -2004-04-14, 161, 59 -2004-04-15, 161, 59 -2004-04-16, 161, 59 -2004-04-17, 161, 59 -2004-04-18, 161, 59 -2004-04-19, 161, 59 -2004-04-20, 161, 59 -2004-04-21, 161, 59 -2004-04-22, 161, 59 -2004-04-23, 161, 59 -2004-04-24, 161, 59 -2004-04-25, 161, 59 -2004-04-26, 161, 59 -2004-04-27, 161, 59 -2004-04-28, 161, 59 -2004-04-29, 161, 59 -2004-04-30, 161, 59 -2004-05-01, 161, 59 -2004-05-02, 161, 59 -2004-05-03, 161, 59 -2004-05-04, 161, 59 -2004-05-05, 161, 59 -2004-05-06, 161, 59 -2004-05-07, 164, 59 -2004-05-08, 165, 59 -2004-05-09, 165, 59 -2004-05-10, 165, 59 -2004-05-11, 165, 59 -2004-05-12, 167, 59 -2004-05-13, 167, 59 -2004-05-14, 167, 59 -2004-05-15, 167, 59 -2004-05-16, 167, 59 -2004-05-17, 167, 59 -2004-05-18, 167, 59 -2004-05-19, 167, 59 -2004-05-20, 167, 59 -2004-05-21, 167, 59 -2004-05-22, 167, 59 -2004-05-23, 167, 59 -2004-05-24, 167, 59 -2004-05-25, 167, 59 -2004-05-26, 167, 59 -2004-05-27, 167, 59 -2004-05-28, 168, 59 -2004-05-29, 168, 59 -2004-05-30, 168, 59 -2004-05-31, 168, 59 -2004-06-01, 176, 61 -2004-06-02, 188, 63 -2004-06-03, 188, 63 -2004-06-04, 188, 63 -2004-06-05, 190, 63 -2004-06-06, 190, 63 -2004-06-07, 195, 64 -2004-06-08, 195, 64 -2004-06-09, 195, 64 -2004-06-10, 195, 64 -2004-06-11, 198, 64 -2004-06-12, 198, 64 -2004-06-13, 199, 64 -2004-06-15, 199, 64 -2004-06-16, 184, 65 -2004-06-17, 185, 64 -2004-06-18, 185, 64 -2004-06-19, 185, 64 -2004-06-20, 185, 64 -2004-06-21, 185, 64 -2004-06-22, 186, 64 -2004-06-23, 186, 64 -2004-06-24, 186, 64 -2004-06-25, 186, 64 -2004-06-26, 187, 64 -2004-06-27, 188, 65 -2004-06-28, 195, 67 -2004-06-29, 195, 67 -2004-06-30, 195, 67 -2004-07-01, 195, 67 -2004-07-02, 196, 67 -2004-07-03, 195, 67 -2004-07-04, 194, 67 -2004-07-05, 194, 67 -2004-07-06, 195, 69 -2004-07-07, 195, 69 -2004-07-08, 195, 69 -2004-07-09, 195, 69 -2004-07-10, 195, 69 -2004-07-11, 195, 69 -2004-07-12, 198, 69 -2004-07-13, 198, 69 -2004-07-14, 198, 69 -2004-07-15, 198, 69 -2004-07-16, 198, 68 -2004-07-17, 192, 66 -2004-07-21, 192, 66 -2004-07-22, 192, 66 -2004-07-23, 192, 66 -2004-07-24, 194, 66 -2004-07-25, 195, 66 -2004-07-26, 195, 66 -2004-07-27, 195, 66 -2004-07-28, 197, 67 -2004-07-29, 197, 67 -2004-07-30, 197, 67 -2004-07-31, 197, 67 -2004-08-01, 197, 67 -2004-08-02, 197, 67 -2004-08-03, 197, 67 -2004-08-04, 197, 67 -2004-08-05, 197, 67 -2004-08-06, 197, 67 -2004-08-07, 197, 67 -2004-08-08, 197, 67 -2004-08-09, 197, 67 -2004-08-10, 197, 67 -2004-08-11, 197, 67 -2004-08-12, 197, 67 -2004-08-13, 197, 67 -2004-08-14, 198, 67 -2004-08-15, 198, 67 -2004-08-16, 199, 67 -2004-08-17, 199, 67 -2004-08-18, 199, 67 -2004-08-19, 199, 67 -2004-08-20, 199, 67 -2004-08-21, 199, 67 -2004-08-22, 199, 67 -2004-08-23, 199, 67 -2004-08-24, 200, 67 -2004-08-25, 200, 67 -2004-08-26, 200, 67 -2004-08-27, 200, 67 -2004-08-30, 200, 67 -2004-08-31, 200, 67 -2004-09-01, 200, 67 -2004-09-02, 200, 67 -2004-09-03, 200, 67 -2004-09-04, 200, 67 -2004-09-06, 200, 67 -2004-09-07, 205, 68 -2004-09-08, 205, 68 -2004-09-09, 206, 68 -2004-09-10, 207, 68 -2004-09-11, 207, 68 -2004-09-12, 207, 68 -2004-09-13, 207, 68 -2004-09-14, 207, 68 -2004-09-15, 207, 68 -2004-09-16, 207, 68 -2004-09-17, 207, 68 -2004-09-18, 207, 68 -2004-09-19, 207, 68 -2004-09-20, 207, 68 -2004-09-21, 207, 68 -2004-09-22, 207, 68 -2004-09-23, 207, 68 -2004-09-24, 207, 68 -2004-09-25, 207, 68 -2004-09-26, 207, 68 -2004-09-27, 207, 68 -2004-09-28, 207, 68 -2004-09-29, 207, 68 -2004-09-30, 207, 68 -2004-10-01, 207, 68 -2004-10-02, 207, 68 -2004-10-03, 207, 68 -2004-10-04, 207, 68 -2004-10-05, 207, 68 -2004-10-06, 207, 68 -2004-10-07, 207, 68 -2004-10-08, 207, 68 -2004-10-09, 207, 68 -2004-10-10, 202, 68 -2004-10-11, 202, 68 -2004-10-13, 202, 68 -2004-10-14, 202, 68 -2004-10-15, 202, 68 -2004-10-16, 202, 68 -2004-10-17, 202, 68 -2004-10-18, 202, 68 -2004-10-19, 202, 68 -2004-10-20, 202, 68 -2004-10-21, 202, 68 -2004-10-22, 202, 68 -2004-10-23, 202, 68 -2004-10-24, 202, 68 -2004-10-25, 202, 68 -2004-10-26, 202, 68 -2004-10-27, 202, 68 -2004-10-28, 202, 68 -2004-10-29, 202, 68 From noreply at buildbot.pypy.org Wed Aug 28 14:52:04 2013 From: noreply at buildbot.pypy.org (necaris) Date: Wed, 28 Aug 2013 14:52:04 +0200 (CEST) Subject: [pypy-commit] pypy improve-docs: Remove unused Sphinx configuration / customization Message-ID: <20130828125204.8D1771C3676@cobra.cs.uni-duesseldorf.de> Author: Rami Chowdhury Branch: improve-docs Changeset: r66383:33b19de0fe6d Date: 2013-08-28 13:42 +0100 http://bitbucket.org/pypy/pypy/changeset/33b19de0fe6d/ Log: Remove unused Sphinx configuration / customization diff --git a/pypy/doc/config/confrest.py b/pypy/doc/config/confrest.py deleted file mode 100644 --- a/pypy/doc/config/confrest.py +++ /dev/null @@ -1,62 +0,0 @@ -from pypy.doc.confrest import * -from pypy.config.makerestdoc import make_cmdline_overview -from pypy.config import pypyoption -from rpython.config.config import Config -from rpython.config import translationoption - - -all_optiondescrs = [pypyoption.pypy_optiondescription, - translationoption.translation_optiondescription, - ] -start_to_descr = dict([(descr._name, descr) for descr in all_optiondescrs]) - -class PyPyPage(PyPyPage): - def fill(self): - super(PyPyPage, self).fill() - self.menubar[:] = html.div( - html.a("general documentation", href="../index.html", - class_="menu"), " ", - html.a("config index", href="index.html", - class_="menu"), " ", - html.a("command-line overview", href="commandline.html", - class_="menu"), " ", - " ", id="menubar") - -class Project(Project): - stylesheet = "../style.css" - title = "PyPy Configuration" - prefix_title = "PyPy Configuration" - Page = PyPyPage - - def get_content(self, txtpath, encoding): - if txtpath.basename == "commandline.rst": - result = [] - for line in txtpath.read().splitlines(): - if line.startswith('.. GENERATE:'): - start = line[len('.. GENERATE:'):].strip() - descr = start_to_descr[start] - line = make_cmdline_overview(descr, title=False).text() - result.append(line) - return "\n".join(result) - fullpath = txtpath.purebasename - start = fullpath.split(".")[0] - path = fullpath.rsplit(".", 1)[0] - basedescr = start_to_descr.get(start) - if basedescr is None: - return txtpath.read() - if fullpath.count(".") == 0: - descr = basedescr - path = "" - else: - conf = Config(basedescr) - subconf, step = conf._cfgimpl_get_home_by_path( - fullpath.split(".", 1)[1]) - descr = getattr(subconf._cfgimpl_descr, step) - text = unicode(descr.make_rest_doc(path).text()) - if txtpath.check(file=True): - content = txtpath.read() - if content: - text += "\nDescription\n===========" - return u"%s\n\n%s" % (text, unicode(txtpath.read(), encoding)) - return text - diff --git a/pypy/doc/confrest.py b/pypy/doc/confrest.py deleted file mode 100644 --- a/pypy/doc/confrest.py +++ /dev/null @@ -1,69 +0,0 @@ -import py - -# XXX importing/inheriting from an internal py lib class is hackish -from confrest_oldpy import Project, Page, relpath -html = py.xml.html - - -class PyPyPage(Page): - googlefragment = """ - - -""" - - def fill_menubar(self): - self.menubar = html.div( - html.a("home", - href=self.get_doclink("index.html"), - class_="menu"), - " ", - html.a("blog", href="http://morepypy.blogspot.com", class_="menu"), - " ", - html.a("getting-started", - href=self.get_doclink("getting-started.html"), - class_="menu"), - " ", - html.a("documentation", href=self.get_doclink("docindex.html"), - class_="menu"), - " ", - html.a("hg", href="https://bitbucket.org/pypy/pypy", - class_="menu"), - " ", - html.a("issues", - href="https://bugs.pypy.org/", - class_="menu"), - " ", id="menubar") - - def get_doclink(self, target): - return relpath(self.targetpath.strpath, - self.project.docpath.join(target).strpath) - - def unicode(self, doctype=True): - page = self._root.unicode() - page = page.replace("", self.googlefragment + "") - if doctype: - return self.doctype + page - else: - return page - - -class Project(Project): - mydir = py.path.local(__file__).dirpath() - - title = "PyPy" - stylesheet = 'style.css' - encoding = 'latin1' - prefix_title = "PyPy" - logo = html.div( - html.a( - html.img(alt="PyPy", id="pyimg", - src="http://codespeak.net/pypy/img/py-web1.png", - height=110, width=149))) - Page = PyPyPage diff --git a/pypy/doc/confrest_oldpy.py b/pypy/doc/confrest_oldpy.py deleted file mode 100644 --- a/pypy/doc/confrest_oldpy.py +++ /dev/null @@ -1,248 +0,0 @@ -import py -from pypy.tool.rest.rest import convert_rest_html, strip_html_header -from pypy.tool.difftime import worded_time - -html = py.xml.html - -class Page(object): - doctype = ('\n') - - def __init__(self, project, title, targetpath, stylesheeturl=None, - type="text/html", encoding="ISO-8859-1"): - self.project = project - self.title = project.prefix_title + title - self.targetpath = targetpath - self.stylesheeturl = stylesheeturl - self.type = type - self.encoding = encoding - - self.body = html.body() - self.head = html.head() - self._root = html.html(self.head, self.body) - self.fill() - - def a_docref(self, name, relhtmlpath): - docpath = self.project.docpath - return html.a(name, class_="menu", - href=relpath(self.targetpath.strpath, - docpath.join(relhtmlpath).strpath)) - - def a_apigenref(self, name, relhtmlpath): - apipath = self.project.apigenpath - return html.a(name, class_="menu", - href=relpath(self.targetpath.strpath, - apipath.join(relhtmlpath).strpath)) - - def fill_menubar(self): - items = [ - self.a_docref("index", "index.html"), - #self.a_apigenref("api", "api/index.html"), - #self.a_apigenref("source", "source/index.html"), - self.a_docref("contact", "contact.html"), - self.a_docref("download", "download.html"), - ] - items2 = [items.pop(0)] - sep = " " - for item in items: - items2.append(sep) - items2.append(item) - self.menubar = html.div(id="menubar", *items2) - - def fill(self): - content_type = "%s;charset=%s" %(self.type, self.encoding) - self.head.append(html.title(self.title)) - self.head.append(html.meta(name="Content-Type", content=content_type)) - if self.stylesheeturl: - self.head.append( - html.link(href=self.stylesheeturl, - media="screen", rel="stylesheet", - type="text/css")) - self.fill_menubar() - - self.metaspace = html.div( - html.div(self.title, class_="project_title"), - self.menubar, - id='metaspace') - - self.body.append(self.project.logo) - self.body.append(self.metaspace) - self.contentspace = html.div(id="contentspace") - self.body.append(self.contentspace) - - def unicode(self, doctype=True): - page = self._root.unicode() - if doctype: - return self.doctype + page - else: - return page - -class PyPage(Page): - def get_menubar(self): - menubar = super(PyPage, self).get_menubar() - # base layout - menubar.append( - html.a("issue", href="https://codespeak.net/issue/py-dev/", - class_="menu"), - ) - return menubar - - -def getrealname(username): - try: - import uconf - except ImportError: - return username - try: - user = uconf.system.User(username) - except KeyboardInterrupt: - raise - try: - return user.realname or username - except KeyError: - return username - - -class Project: - mydir = py.path.local(__file__).dirpath() - title = "py lib" - prefix_title = "" # we have a logo already containing "py lib" - encoding = 'latin1' - stylesheet = None - logo = html.div( - html.a( - html.img(alt="py lib", id='pyimg', height=114, width=154, - src="http://codespeak.net/img/pylib.png"), - href="http://codespeak.net")) - Page = PyPage - - def __init__(self, sourcepath=None): - if sourcepath is None: - sourcepath = self.mydir - self.setpath(sourcepath) - - def setpath(self, sourcepath, docpath=None, - apigenpath=None, stylesheet=None): - self.sourcepath = sourcepath - if docpath is None: - docpath = sourcepath - self.docpath = docpath - if apigenpath is None: - apigenpath = docpath - self.apigenpath = apigenpath - if stylesheet is None: - p = sourcepath.join(self.stylesheet or "style.css") - if p.check(): - self.stylesheet = p - else: - self.stylesheet = None - else: - p = sourcepath.join(stylesheet) - if p.check(): - stylesheet = p - self.stylesheet = stylesheet - #assert self.stylesheet - self.apigen_relpath = relpath( - self.docpath.strpath + '/', self.apigenpath.strpath + '/') - - def get_content(self, txtpath, encoding): - return unicode(txtpath.read(), encoding) - - def get_htmloutputpath(self, txtpath): - reloutputpath = txtpath.new(ext='.html').relto(self.sourcepath) - return self.docpath.join(reloutputpath) - - def process(self, txtpath): - encoding = self.encoding - content = self.get_content(txtpath, encoding) - outputpath = self.get_htmloutputpath(txtpath) - - stylesheet = self.stylesheet - if isinstance(stylesheet, py.path.local): - if not self.docpath.join(stylesheet.basename).check(): - self.docpath.ensure(dir=True) - stylesheet.copy(self.docpath) - stylesheet = relpath(outputpath.strpath, - self.docpath.join(stylesheet.basename).strpath) - - content = convert_rest_html(content, txtpath, - stylesheet=stylesheet, encoding=encoding) - content = strip_html_header(content, encoding=encoding) - - page = self.Page(self, "[%s] " % txtpath.purebasename, - outputpath, stylesheeturl=stylesheet) - - try: - svninfo = txtpath.info() - modified = " modified %s by %s" % (worded_time(svninfo.mtime), - getrealname(svninfo.last_author)) - except (KeyboardInterrupt, SystemExit): - raise - except: - modified = " " - - page.contentspace.append( - html.div(html.div(modified, style="float: right; font-style: italic;"), - id = 'docinfoline')) - - page.contentspace.append(py.xml.raw(content)) - f = outputpath.open('w') - f.write(page.unicode().encode(encoding)) - f.close() - -# XXX this function comes from apigen/linker.py, put it -# somewhere in py lib -import os -def relpath(p1, p2, sep=os.path.sep, back='..', normalize=True): - """ create a relative path from p1 to p2 - - sep is the seperator used for input and (depending - on the setting of 'normalize', see below) output - - back is the string used to indicate the parent directory - - when 'normalize' is True, any backslashes (\) in the path - will be replaced with forward slashes, resulting in a consistent - output on Windows and the rest of the world - - paths to directories must end on a / (URL style) - """ - if normalize: - p1 = p1.replace(sep, '/') - p2 = p2.replace(sep, '/') - sep = '/' - # XXX would be cool to be able to do long filename - # expansion and drive - # letter fixes here, and such... iow: windows sucks :( - if (p1.startswith(sep) ^ p2.startswith(sep)): - raise ValueError("mixed absolute relative path: %r -> %r" %(p1, p2)) - fromlist = p1.split(sep) - tolist = p2.split(sep) - - # AA - # AA BB -> AA/BB - # - # AA BB - # AA CC -> CC - # - # AA BB - # AA -> ../AA - - diffindex = 0 - for x1, x2 in zip(fromlist, tolist): - if x1 != x2: - break - diffindex += 1 - commonindex = diffindex - 1 - - fromlist_diff = fromlist[diffindex:] - tolist_diff = tolist[diffindex:] - - if not fromlist_diff: - return sep.join(tolist[commonindex:]) - backcount = len(fromlist_diff) - if tolist_diff: - return sep.join([back,]*(backcount-1) + tolist_diff) - return sep.join([back,]*(backcount) + tolist[commonindex:]) - - From noreply at buildbot.pypy.org Wed Aug 28 14:52:05 2013 From: noreply at buildbot.pypy.org (necaris) Date: Wed, 28 Aug 2013 14:52:05 +0200 (CEST) Subject: [pypy-commit] pypy improve-docs: Split out getting-started doc Message-ID: <20130828125205.C29381C3676@cobra.cs.uni-duesseldorf.de> Author: Rami Chowdhury Branch: improve-docs Changeset: r66384:7d1d8febf184 Date: 2013-08-28 13:45 +0100 http://bitbucket.org/pypy/pypy/changeset/7d1d8febf184/ Log: Split out getting-started doc Extract the relevant portions into index, getting-started-dev, and build documents, and remove generic (and less useful) getting- started document. diff --git a/pypy/doc/build.rst b/pypy/doc/build.rst --- a/pypy/doc/build.rst +++ b/pypy/doc/build.rst @@ -1,7 +1,41 @@ Building PyPy from Source ========================= -For building PyPy, it is recommended to install a pre-build PyPy first (see +Clone the repository +-------------------- + +If you prefer to compile your own PyPy, or if you want to modify it, you +will need to obtain a copy of the sources. This can be done either by +`downloading them from the download page`_ or by checking them out from the +repository using mercurial. We suggest using mercurial if one wants to access +the current development. + +.. _downloading them from the download page: http://pypy.org/download.html + +You must issue the following command on your +command line, DOS box, or terminal:: + + hg clone http://bitbucket.org/pypy/pypy pypy + +This will clone the repository and place it into a directory +named ``pypy``, and will get you the PyPy source in ``pypy/pypy`` and +documentation files in ``pypy/pypy/doc``. +We try to ensure that the tip is always stable, but it might +occasionally be broken. You may want to check out `our nightly tests`_: +find a revision (12-chars alphanumeric string, e.g. "963e808156b3") +that passed at least the +``{linux32}`` tests (corresponding to a ``+`` sign on the +line ``success``) and then, in your cloned repository, switch to this revision +using:: + + hg up -r XXXXX + +where XXXXX is the revision id. + +.. _our nightly tests: http://buildbot.pypy.org/summary?branch= + + +For building PyPy, we recommend installing a pre-built PyPy first (see :doc:`install`). It is possible to build PyPy with CPython, but it will take a lot longer to run -- depending on your architecture, between two and three times as long. @@ -66,7 +100,18 @@ If everything works correctly this will create an executable ``pypy-c`` in the current directory. The executable behaves mostly like a normal Python -interpreter (see :doc:`cpython differences`). +interpreter (see :doc:`cpython_differences`). + + +.. _translate-pypy: + +Translating with non-standard options +------------------------------------- + +It is possible to have non-standard features enabled for translation, +but they are not really tested any more. Look, for example, at the +:doc:`objspace proxies ` document. + Installation @@ -99,3 +144,27 @@ .. TODO windows + + +Where to go from here +--------------------- + +Congratulations! Now that you've successfully built your own PyPy, you might +want to `start writing a fast JITed interpreter with PyPy`_, or look at some +:doc:`projects we need help with `, or just dive deeper into +the docs: + +.. toctree:: + :maxdepth: 1 + + getting-started-dev + cpython_differences + gc_info + jit-hooks + stackless + cppyy + objspace-proxies + sandbox + + +.. _start writing a fast JITed interpreter with PyPy: http://morepypy.blogspot.com/2011/04/tutorial-writing-interpreter-with-pypy.html diff --git a/pypy/doc/getting-started-dev.rst b/pypy/doc/getting-started-dev.rst --- a/pypy/doc/getting-started-dev.rst +++ b/pypy/doc/getting-started-dev.rst @@ -1,39 +1,8 @@ -Getting Started with PyPy's Development Process -=============================================== +Getting Started Developing With PyPy +==================================== .. contents:: -.. _start-reading-sources: - -Where to start reading the sources ----------------------------------- - -PyPy is made from parts that are relatively independent of each other. -You should start looking at the part that attracts you most (all paths are -relative to the PyPy top level directory). You may look at our :doc:`directory reference ` -or start off at one of the following points: - -* `pypy/interpreter`_ contains the bytecode interpreter: bytecode dispatcher - in `pypy/interpreter/pyopcode.py`_, frame and code objects in `pypy/interpreter/eval.py`_ and `pypy/interpreter/pyframe.py`_, - function objects and argument passing in `pypy/interpreter/function.py`_ and `pypy/interpreter/argument.py`_, - the object space interface definition in `pypy/interpreter/baseobjspace.py`_, modules in - `pypy/interpreter/module.py`_ and `pypy/interpreter/mixedmodule.py`_. Core types supporting the bytecode - interpreter are defined in `pypy/interpreter/typedef.py`_. - -* :source:`pypy/interpreter/pyparser` contains a recursive descent parser, - and grammar files that allow it to parse the syntax of various Python - versions. Once the grammar has been processed, the parser can be - translated by the above machinery into efficient code. - -* :source:`pypy/interpreter/astcompiler` contains the compiler. This - contains a modified version of the compiler package from CPython - that fixes some bugs and is translatable. - -* :source:`pypy/objspace/std` contains the :ref:`Standard object space `. The main file - is :source:`pypy/objspace/std/objspace.py`. For each type, the files ``xxxtype.py`` and - ``xxxobject.py`` contain respectively the definition of the type and its - (default) implementation. - Running PyPy's unit tests ------------------------- @@ -96,9 +65,15 @@ Interpreter-level console ~~~~~~~~~~~~~~~~~~~~~~~~~ -If you start an untranslated Python interpreter via:: +To start interpreting Python with PyPy, install a C compiler that is +supported by distutils and use Python 2.7 or greater to run PyPy:: - python pypy/bin/pyinteractive.py + cd pypy + python bin/pyinteractive.py + +After a few seconds (remember: this is running on top of CPython), you should +be at the PyPy prompt, which is the same as the Python prompt, but with an +extra ">". If you press on the console you enter the interpreter-level console, a @@ -129,6 +104,28 @@ You may be interested in reading more about the distinction between :ref:`interpreter-level and app-level `. +pyinteractive.py options +~~~~~~~~~~~~~~~~~~~~~~~~ + +To list the PyPy interpreter command line options, type:: + + cd pypy + python bin/pyinteractive.py --help + +pyinteractive.py supports most of the options that CPython supports too (in addition to a +large amount of options that can be used to customize pyinteractive.py). +As an example of using PyPy from the command line, you could type:: + + python pyinteractive.py -c "from test import pystone; pystone.main(10)" + +Alternatively, as with regular Python, you can simply give a +script name on the command line:: + + python pyinteractive.py ../../lib-python/2.7/test/pystone.py 10 + +See our :doc:`configuration sections ` for details about what all the commandline +options do. + .. _trace example: @@ -201,12 +198,46 @@ PyPy employs an open development process. You are invited to join our `pypy-dev mailing list`_ or look at the other :ref:`contact possibilities `. Usually we give out commit rights fairly liberally, so if you -want to do something with PyPy, you can become a committer. We are also doing -coding Sprints which are -separately announced and often happen around Python conferences such -as EuroPython or Pycon. Upcoming events are usually announced on `the blog`_. +want to do something with PyPy, you can become a committer. We also run frequent +coding sprints which are separately announced and often happen around Python +conferences such as EuroPython or PyCon. Upcoming events are usually announced +on `the blog`_. .. _the blog: http://morepypy.blogspot.com .. _pypy-dev mailing list: http://python.org/mailman/listinfo/pypy-dev .. _py library: http://pylib.org + + +.. _start-reading-sources: + +Where to start reading the sources +---------------------------------- + +PyPy is made from parts that are relatively independent of each other. +You should start looking at the part that attracts you most (all paths are +relative to the PyPy top level directory). You may look at our :doc:`directory reference ` +or start off at one of the following points: + +* :source:`pypy/interpreter` contains the bytecode interpreter: bytecode dispatcher + in :source:`pypy/interpreter/pyopcode.py`, frame and code objects in + :source:`pypy/interpreter/eval.py` and :source:`pypy/interpreter/pyframe.py`, + function objects and argument passing in :source:`pypy/interpreter/function.py` + and :source:`pypy/interpreter/argument.py`, the object space interface + definition in :source:`pypy/interpreter/baseobjspace.py`, modules in + :source:`pypy/interpreter/module.py` and :source:`pypy/interpreter/mixedmodule.py`. + Core types supporting the bytecode interpreter are defined in :source:`pypy/interpreter/typedef.py`. + +* :source:`pypy/interpreter/pyparser` contains a recursive descent parser, + and grammar files that allow it to parse the syntax of various Python + versions. Once the grammar has been processed, the parser can be + translated by the above machinery into efficient code. + +* :source:`pypy/interpreter/astcompiler` contains the compiler. This + contains a modified version of the compiler package from CPython + that fixes some bugs and is translatable. + +* :source:`pypy/objspace/std` contains the :ref:`Standard object space `. The main file + is :source:`pypy/objspace/std/objspace.py`. For each type, the files ``xxxtype.py`` and + ``xxxobject.py`` contain respectively the definition of the type and its + (default) implementation. diff --git a/pypy/doc/getting-started.rst b/pypy/doc/getting-started.rst deleted file mode 100644 --- a/pypy/doc/getting-started.rst +++ /dev/null @@ -1,154 +0,0 @@ -Getting Started -================================== - -.. contents:: - - - -.. _Python: http://docs.python.org/reference/ - -:doc:`Downloading and installing PyPy ` - -Just the facts --------------- - -.. _prebuilt-pypy: - -Download a pre-built PyPy -~~~~~~~~~~~~~~~~~~~~~~~~~ - -The quickest way to start using PyPy is to download a prebuilt binary for your -OS and architecture. You can either use the `most recent release`_ or one of -our `development nightly build`_. Please note that the nightly builds are not -guaranteed to be as stable as official releases, use them at your own risk. - -.. _most recent release: http://pypy.org/download.html -.. _development nightly build: http://buildbot.pypy.org/nightly/trunk/ - - -Installing PyPy -~~~~~~~~~~~~~~~ - -PyPy is ready to be executed as soon as you unpack the tarball or the zip -file, with no need to install it in any specific location:: - - $ tar xf pypy-2.1.tar.bz2 - $ ./pypy-2.1/bin/pypy - Python 2.7.3 (480845e6b1dd, Jul 31 2013, 11:05:31) - [PyPy 2.1.0 with GCC 4.4.3] on linux2 - Type "help", "copyright", "credits" or "license" for more information. - And now for something completely different: ``PyPy is an exciting technology - that lets you to write fast, portable, multi-platform interpreters with less - effort'' - >>>> - -If you want to make PyPy available system-wide, you can put a symlink to the -``pypy`` executable in ``/usr/local/bin``. It is important to put a symlink -and not move the binary there, else PyPy would not be able to find its -library. - -If you want to install 3rd party libraries, the most convenient way is to -install distribute_ and pip_: - - $ curl -O http://python-distribute.org/distribute_setup.py - - $ curl -O https://raw.github.com/pypa/pip/master/contrib/get-pip.py - - $ ./pypy-2.1/bin/pypy distribute_setup.py - - $ ./pypy-2.1/bin/pypy get-pip.py - - $ ./pypy-2.1/bin/pip install pygments # for example - -3rd party libraries will be installed in ``pypy-2.1/site-packages``, and -the scripts in ``pypy-2.1/bin``. - - -Installing using virtualenv -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -It is often convenient to run pypy inside a virtualenv. To do this -you need a recent version of virtualenv -- 1.6.1 or greater. You can -then install PyPy both from a precompiled tarball or from a mercurial -checkout:: - - # from a tarball - $ virtualenv -p /opt/pypy-c-jit-41718-3fb486695f20-linux/bin/pypy my-pypy-env - - # from the mercurial checkout - $ virtualenv -p /path/to/pypy/pypy/translator/goal/pypy-c my-pypy-env - -Note that bin/python is now a symlink to bin/pypy. - -.. _distribute: http://www.python-distribute.org/ -.. _pip: http://pypi.python.org/pypi/pip - - - - -Clone the repository -~~~~~~~~~~~~~~~~~~~~ - -If you prefer to :doc:`compile PyPy by yourself `, or if you want to modify it, you -will need to obtain a copy of the sources. This can be done either by -`downloading them from the download page`_ or by checking them out from the -repository using mercurial. We suggest using mercurial if one wants to access -the current development. - -.. _downloading them from the download page: http://pypy.org/download.html - -You must issue the following command on your -command line, DOS box, or terminal:: - - hg clone http://bitbucket.org/pypy/pypy pypy - -This will clone the repository and place it into a directory -named ``pypy``, and will get you the PyPy source in -``pypy/pypy`` and documentation files in ``pypy/pypy/doc``. -We try to ensure that the tip is always stable, but it might -occasionally be broken. You may want to check out `our nightly tests`_: -find a revision (12-chars alphanumeric string, e.g. "963e808156b3") -that passed at least the -``{linux32}`` tests (corresponding to a ``+`` sign on the -line ``success``) and then, in your cloned repository, switch to this revision -using:: - - hg up -r XXXXX - -where XXXXX is the revision id. - -.. _our nightly tests: http://buildbot.pypy.org/summary?branch= - - -Where to go from here ----------------------- - -After you successfully manage to get PyPy's source you can read more about: - - - :doc:`Building and using PyPy's Python interpreter ` - - :doc:`Learning more about the RPython toolchain and how to develop (with) PyPy ` - - `Tutorial for how to write an interpreter with the RPython toolchain and make it fast`_ - - `Look at our benchmark results`_ - -.. _Tutorial for how to write an interpreter with the RPython toolchain and make it fast: http://morepypy.blogspot.com/2011/04/tutorial-writing-interpreter-with-pypy.html -.. _Look at our benchmark results: http://speed.pypy.org - - -Understanding PyPy's architecture -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -For in-depth information about architecture and coding documentation -head over to the :doc:`documentation section ` where you'll find lots of -interesting information. Additionally, in true hacker spirit, you -may just :ref:`start reading sources `. - - -Filing bugs or feature requests -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -You may file `bug reports`_ on our issue tracker which is -also accessible through the 'issues' top menu of -the PyPy website. :ref:`Using the development tracker ` has -more detailed information on specific features of the tracker. - -.. _bug reports: https://bugs.pypy.org/ diff --git a/pypy/doc/index.rst b/pypy/doc/index.rst --- a/pypy/doc/index.rst +++ b/pypy/doc/index.rst @@ -4,7 +4,7 @@ Welcome to the documentation for PyPy, a fast_, compliant alternative implementation of the Python_ language. -* If you want to find out more about what PyPy is, have a look at our :doc:`introduction ` +* If you want to find out more about what PyPy is, have a look at our :doc:`introduction` or consult the `PyPy website`_. * If you're interested in trying PyPy out, check out the :doc:`installation instructions `. @@ -20,41 +20,51 @@ .. _PyPy website: http://pypy.org/ +.. _getting-started-index: + Getting Started --------------- .. toctree:: - :maxdepth: 1 + :maxdepth: 1 - introduction - install - build - faq + introduction + install + build + faq + + +.. _using-pypy: Using PyPy ---------- .. toctree:: - :maxdepth: 1 + :maxdepth: 1 - cpython_differences - gc_info - jit-hooks - stackless - cppyy - objspace-proxies - sandbox + cpython_differences + gc_info + jit-hooks + stackless + cppyy + objspace-proxies + sandbox +.. _developing-pypy: + Development documentation ------------------------- .. toctree:: - :maxdepth: 1 + :maxdepth: 1 - how-to-contribute - project-ideas - project-documentation + getting-started-dev + how-to-contribute + architecture + project-ideas + project-documentation + .. TODO: audit ^^ @@ -65,9 +75,9 @@ ---------------- .. toctree:: - :maxdepth: 1 + :maxdepth: 1 - extradoc + extradoc .. TODO: Remove this? Or fill it with links to papers? @@ -109,3 +119,34 @@ * :ref:`genindex` * :ref:`modindex` * :ref:`search` + + +.. TODO figure out what to do with these! + +.. toctree:: + :hidden: + + configuration + contributor + cppyy_backend + cppyy_example + ctypes-implementation + discussion/jit-profiler + discussions + eventhistory + extending + getting-started-dev + how-to-release + release-2.0.1 + release-2.0.2 + release-2.1.0 + release-2.1.0-beta1 + release-2.1.0-beta2 + releases/index + whatsnew-1.9 + whatsnew-2.0 + whatsnew-2.0.0-beta1 + whatsnew-2.1 + whatsnew-head + you-want-to-help + __pypy__-module From noreply at buildbot.pypy.org Wed Aug 28 14:52:07 2013 From: noreply at buildbot.pypy.org (necaris) Date: Wed, 28 Aug 2013 14:52:07 +0200 (CEST) Subject: [pypy-commit] pypy improve-docs: Add more getting-started stuff into getting-started-python Message-ID: <20130828125207.1AD681C3676@cobra.cs.uni-duesseldorf.de> Author: Rami Chowdhury Branch: improve-docs Changeset: r66385:275734994ef5 Date: 2013-08-28 13:46 +0100 http://bitbucket.org/pypy/pypy/changeset/275734994ef5/ Log: Add more getting-started stuff into getting-started-python diff --git a/pypy/doc/getting-started-python.rst b/pypy/doc/getting-started-python.rst --- a/pypy/doc/getting-started-python.rst +++ b/pypy/doc/getting-started-python.rst @@ -1,236 +0,0 @@ -Getting Started with PyPy's Python Interpreter -============================================== - -.. contents:: - - -PyPy's Python interpreter is a very compliant Python -interpreter implemented in RPython. When compiled, it passes most of -`CPython's core language regression tests`_ and comes with many of the extension -modules included in the standard library including ``ctypes``. It can run large -libraries such as Django_ and Twisted_. There are some small behavioral -differences with CPython and some missing extensions, for details see :doc:`cpython_differences` - -To actually use PyPy's Python interpreter, the first thing to do is to -`download a pre-built PyPy`_ for your architecture. - -.. _CPython's core language regression tests: http://buildbot.pypy.org/summary?category=applevel&branch=%3Ctrunk%3E -.. _Django: http://djangoproject.com -.. _Twisted: http://twistedmatrix.com -.. _download a pre-built PyPy: http://pypy.org/download.html - - -.. _translate-pypy: - -Translating the PyPy Python interpreter ---------------------------------------- - -.. note:: For some hints on how to translate the Python interpreter under - Windows, see the :doc:`windows document `. - -You can translate the whole of PyPy's Python interpreter to low level C code. -If you intend to build using gcc, check to make sure that -the version you have is not 4.2 or you will run into `this bug`_. - -.. _this bug: https://bugs.launchpad.net/ubuntu/+source/gcc-4.2/+bug/187391 - -1. First `download a pre-built PyPy`_ for your architecture which you will - use to translate your Python interpreter. It is, of course, possible to - translate with a CPython 2.6 or later, but this is not the preferred way, - because it will take a lot longer to run -- depending on your architecture, - between two and three times as long. - -2. Install build-time dependencies. On a Debian box these are:: - - [user at debian-box ~]$ sudo apt-get install \ - gcc make python-dev libffi-dev libsqlite3-dev pkg-config \ - libz-dev libbz2-dev libncurses-dev libexpat1-dev \ - libssl-dev libgc-dev python-sphinx python-greenlet - - On a Fedora-16 box these are:: - - [user at fedora-or-rh-box ~]$ sudo yum install \ - gcc make python-devel libffi-devel lib-sqlite3-devel pkgconfig \ - zlib-devel bzip2-devel ncurses-devel expat-devel \ - openssl-devel gc-devel python-sphinx python-greenlet - - On SLES11: - - $ sudo zypper install gcc make python-devel pkg-config \ - zlib-devel libopenssl-devel libbz2-devel sqlite3-devel \ - libexpat-devel libffi-devel python-curses - - The above command lines are split with continuation characters, giving the necessary dependencies first, then the optional ones. - - * ``pkg-config`` (to help us locate libffi files) - * ``libz-dev`` (for the optional ``zlib`` module) - * ``libbz2-dev`` (for the optional ``bz2`` module) - * ``libsqlite3-dev`` (for the optional ``sqlite3`` module via cffi) - * ``libncurses-dev`` (for the optional ``_minimal_curses`` module) - * ``libexpat1-dev`` (for the optional ``pyexpat`` module) - * ``libssl-dev`` (for the optional ``_ssl`` module) - * ``libgc-dev`` (for the Boehm garbage collector: only needed when translating with `--opt=0, 1` or `size`) - * ``python-sphinx`` (for the optional documentation build. You need version 1.0.7 or later) - - -3. Translation is time-consuming -- 45 minutes on a very fast machine -- - and RAM-hungry. As of March 2011, you will need **at least** 2 GB of - memory on a - 32-bit machine and 4GB on a 64-bit machine. If your memory resources - are constrained, or your machine is slow you might want to pick the - :doc:`optimization level ` `1` in the next step. A level of - `2` or `3` or `jit` gives much better results, though. But if all - you want to do is to test that some new feature that you just wrote - translates, level 1 is enough. - - Let me stress this again: at ``--opt=1`` you get the Boehm - GC, which is here mostly for historical and for testing reasons. - You really do not want to pick it for a program you intend to use. - The resulting ``pypy-c`` is slow. - -4. Run:: - - cd pypy/goal - python ../../rpython/bin/rpython --opt=jit targetpypystandalone.py - - possibly replacing ``--opt=jit`` with another :doc:`optimization level ` - of your choice. Typical example: ``--opt=2`` gives a good (but of - course slower) Python interpreter without the JIT. - -If everything works correctly this will create an executable -``pypy-c`` in the current directory. Type ``pypy-c --help`` -to see the options it supports - mainly the same basic -options as CPython. In addition, ``pypy-c --info`` prints the -translation options that where used to produce this particular -executable. The executable behaves mostly like a normal Python interpreter:: - - $ ./pypy-c - Python 2.7.3 (480845e6b1dd, Jul 31 2013, 11:05:31) - [PyPy 2.1.0 with GCC 4.7.1] on linux2 - Type "help", "copyright", "credits" or "license" for more information. - And now for something completely different: ``RPython magically makes you rich - and famous (says so on the tin)'' - - >>>> 46 - 4 - 42 - >>>> from test import pystone - >>>> pystone.main() - Pystone(1.1) time for 50000 passes = 0.220015 - This machine benchmarks at 227257 pystones/second - >>>> pystone.main() - Pystone(1.1) time for 50000 passes = 0.060004 - This machine benchmarks at 833278 pystones/second - >>>> - -Note that pystone gets faster as the JIT kicks in. -This executable can be moved around or copied on other machines; see -Installation_ below. - -The ``translate.py`` script takes a very large number of options controlling -what to translate and how. See ``translate.py -h``. The default options -should be suitable for mostly everybody by now. -Find a more detailed description of the various options in our :doc:`configuration -sections `. - - -Translating with non-standard options -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -It is possible to have non-standard features enabled for translation, -but they are not really tested any more. Look, for example, at the -:doc:`objspace proxies ` document. - - -Installation -~~~~~~~~~~~~ - -A prebuilt ``pypy-c`` can be installed in a standard location like -``/usr/local/bin``, although some details of this process are still in -flux. It can also be copied to other machines as long as their system -is "similar enough": some details of the system on which the translation -occurred might be hard-coded in the executable. - -PyPy dynamically finds the location of its libraries depending on the location -of the executable. The directory hierarchy of a typical PyPy installation -looks like this:: - - ./bin/pypy - ./include/ - ./lib_pypy/ - ./lib-python/2.7 - ./site-packages/ - -The hierarchy shown above is relative to a PREFIX directory. PREFIX is -computed by starting from the directory where the executable resides, and -"walking up" the filesystem until we find a directory containing ``lib_pypy`` -and ``lib-python/2.7``. - -The archives (.tar.bz2 or .zip) containing PyPy releases already contain the -correct hierarchy, so to run PyPy it's enough to unpack the archive, and run -the ``bin/pypy`` executable. - -To install PyPy system wide on unix-like systems, it is recommended to put the -whole hierarchy alone (e.g. in ``/opt/pypy2.1``) and put a symlink to the -``pypy`` executable into ``/usr/bin`` or ``/usr/local/bin`` - -If the executable fails to find suitable libraries, it will report -``debug: WARNING: library path not found, using compiled-in sys.path`` -and then attempt to continue normally. If the default path is usable, -most code will be fine. However, the ``sys.prefix`` will be unset -and some existing libraries assume that this is never the case. - - -.. _pyinteractive.py interpreter: - -Running the Python Interpreter Without Translation --------------------------------------------------- - -The pyinteractive.py interpreter -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -To start interpreting Python with PyPy, install a C compiler that is -supported by distutils and use Python 2.5 or greater to run PyPy:: - - cd pypy - python bin/pyinteractive.py - -After a few seconds (remember: this is running on top of CPython), -you should be at the PyPy prompt, which is the same as the Python -prompt, but with an extra ">". - -Now you are ready to start running Python code. Most Python -modules should work if they don't involve CPython extension -modules. **This is slow, and most C modules are not present by -default even if they are standard!** Here is an example of -determining PyPy's performance in pystones:: - - >>>> from test import pystone - >>>> pystone.main(10) - -The parameter is the number of loops to run through the test. The -default is 50000, which is far too many to run in a non-translated -PyPy version (i.e. when PyPy's interpreter itself is being interpreted -by CPython). - - -pyinteractive.py options -~~~~~~~~~~~~~~~~~~~~~~~~ - -To list the PyPy interpreter command line options, type:: - - cd pypy - python bin/pyinteractive.py --help - -pyinteractive.py supports most of the options that CPython supports too (in addition to a -large amount of options that can be used to customize pyinteractive.py). -As an example of using PyPy from the command line, you could type:: - - python pyinteractive.py -c "from test import pystone; pystone.main(10)" - -Alternatively, as with regular Python, you can simply give a -script name on the command line:: - - python pyinteractive.py ../../lib-python/2.7/test/pystone.py 10 - -See our :doc:`configuration sections ` for details about what all the commandline -options do. From noreply at buildbot.pypy.org Wed Aug 28 14:52:08 2013 From: noreply at buildbot.pypy.org (necaris) Date: Wed, 28 Aug 2013 14:52:08 +0200 (CEST) Subject: [pypy-commit] pypy improve-docs: Remove nonexistent documents, update references to getting-started Message-ID: <20130828125208.6C3791C3676@cobra.cs.uni-duesseldorf.de> Author: Rami Chowdhury Branch: improve-docs Changeset: r66386:fcbb6cfccce4 Date: 2013-08-28 13:46 +0100 http://bitbucket.org/pypy/pypy/changeset/fcbb6cfccce4/ Log: Remove nonexistent documents, update references to getting-started diff --git a/pypy/doc/dev_method.rst b/pypy/doc/dev_method.rst --- a/pypy/doc/dev_method.rst +++ b/pypy/doc/dev_method.rst @@ -191,8 +191,8 @@ expectations is also good to do. Unfortunately there is always time spent the first day, mostly in the morning when people arrive to get the internet and server infrastructure up and running. That is why we are, through - :doc:`documentation `, trying to get participants to set up the tools and - configurations needed before they arrive to the sprint. + :doc:`documentation `, trying to get participants to + set up the tools and configurations needed before they arrive to the sprint. Approximate hours being held are 10-17, but people tend to stay longer to code during the evenings. A short status meeting starts up the day and work diff --git a/pypy/doc/discussions.rst b/pypy/doc/discussions.rst --- a/pypy/doc/discussions.rst +++ b/pypy/doc/discussions.rst @@ -11,7 +11,3 @@ discussion/finalizer-order.rst discussion/howtoimplementpickling.rst discussion/improve-rpython.rst - discussion/outline-external-ootype.rst - discussion/VM-integration.rst - - diff --git a/pypy/doc/eventhistory.rst b/pypy/doc/eventhistory.rst --- a/pypy/doc/eventhistory.rst +++ b/pypy/doc/eventhistory.rst @@ -173,7 +173,7 @@ with its features preserved. See the :doc:`release 0.8 announcement ` for further details about the release and -the :doc:`getting started ` document for instructions about downloading it and +the :doc:`getting started ` document for instructions about downloading it and trying it out. There is also a short :doc:`FAQ `. *(11/03/2005)* From noreply at buildbot.pypy.org Wed Aug 28 14:52:09 2013 From: noreply at buildbot.pypy.org (necaris) Date: Wed, 28 Aug 2013 14:52:09 +0200 (CEST) Subject: [pypy-commit] pypy improve-docs: Remove unused import of confrest configuration file Message-ID: <20130828125209.B7B6A1C3676@cobra.cs.uni-duesseldorf.de> Author: Rami Chowdhury Branch: improve-docs Changeset: r66387:7df2382ef8bb Date: 2013-08-28 13:47 +0100 http://bitbucket.org/pypy/pypy/changeset/7df2382ef8bb/ Log: Remove unused import of confrest configuration file diff --git a/pypy/doc/config/generate.py b/pypy/doc/config/generate.py --- a/pypy/doc/config/generate.py +++ b/pypy/doc/config/generate.py @@ -1,6 +1,5 @@ import py from pypy.config import pypyoption, makerestdoc -from pypy.doc.config.confrest import all_optiondescrs from rpython.config import translationoption, config all_optiondescrs = [pypyoption.pypy_optiondescription, From noreply at buildbot.pypy.org Wed Aug 28 14:52:10 2013 From: noreply at buildbot.pypy.org (necaris) Date: Wed, 28 Aug 2013 14:52:10 +0200 (CEST) Subject: [pypy-commit] pypy improve-docs: Better phrasing, more links in architecture & introduction documents Message-ID: <20130828125210.E087F1C3676@cobra.cs.uni-duesseldorf.de> Author: Rami Chowdhury Branch: improve-docs Changeset: r66388:77fade6cb02c Date: 2013-08-28 13:48 +0100 http://bitbucket.org/pypy/pypy/changeset/77fade6cb02c/ Log: Better phrasing, more links in architecture & introduction documents diff --git a/pypy/doc/architecture.rst b/pypy/doc/architecture.rst --- a/pypy/doc/architecture.rst +++ b/pypy/doc/architecture.rst @@ -3,8 +3,9 @@ .. contents:: -This document gives an overview of the goals and architecture of PyPy. -See :doc:`getting started ` for a practical introduction and starting points. +This document gives an overview of the goals and architecture of PyPy. If you're +interested in :ref:`using PyPy ` or :ref:`hacking on it `, +have a look at our :ref:`getting started ` section. Mission statement diff --git a/pypy/doc/introduction.rst b/pypy/doc/introduction.rst --- a/pypy/doc/introduction.rst +++ b/pypy/doc/introduction.rst @@ -2,7 +2,7 @@ ============= In common parlance, PyPy has been used to mean two things. The first is the -:doc:`RPython translation toolchain `, which is a framework for generating +:ref:`RPython translation toolchain `, which is a framework for generating dynamic programming language implementations. And the second is one particular implementation that is so generated -- an implementation of the Python_ programming language written in @@ -11,12 +11,18 @@ This double usage has proven to be confusing, and we are trying to move away from using the word PyPy to mean both things. From now on we will try to use PyPy to only mean the Python implementation, and say the -:doc:`RPython translation toolchain ` when we mean the framework. Some older -documents, presentations, papers and videos will still have the old +:ref:`RPython translation toolchain ` when we mean the framework. + +Some older documents, presentations, papers and videos will still have the old usage. You are hereby warned. We target a large variety of platforms, small and large, by providing a compiler toolsuite that can produce custom Python versions. Platform, memory and threading models, as well as the JIT compiler itself, are aspects of the translation process - as opposed to encoding low level details into the -language implementation itself. :doc:`more... ` +language implementation itself. + +For more details, have a look at our :doc:`architecture overview `. + +.. _Python: http://python.org +.. _ From noreply at buildbot.pypy.org Wed Aug 28 15:03:37 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Wed, 28 Aug 2013 15:03:37 +0200 (CEST) Subject: [pypy-commit] stmgc default: stubs should have a thread assigned to them Message-ID: <20130828130337.628FB1C3676@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: Changeset: r501:cdd017855adc Date: 2013-08-27 18:45 +0200 http://bitbucket.org/pypy/stmgc/changeset/cdd017855adc/ Log: stubs should have a thread assigned to them diff --git a/c4/extra.c b/c4/extra.c --- a/c4/extra.c +++ b/c4/extra.c @@ -57,6 +57,8 @@ stub->h_original = (revision_t)obj; } + STUB_THREAD(stub) = d->public_descriptor; + result = (intptr_t)stub; spinlock_release(d->public_descriptor->collection_lock); stm_register_integer_address(result); diff --git a/c4/test/test_nursery.py b/c4/test/test_nursery.py --- a/c4/test/test_nursery.py +++ b/c4/test/test_nursery.py @@ -361,3 +361,10 @@ def test_collect_soon(): lib.stmgc_minor_collect_soon() nalloc(HDR) + + +# def test_out_of_memory(): +# import sys +# size = sys.maxint - 3 +# p = lib.stm_allocate(size, 111) +# assert p == ffi.NULL From noreply at buildbot.pypy.org Wed Aug 28 15:03:38 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Wed, 28 Aug 2013 15:03:38 +0200 (CEST) Subject: [pypy-commit] stmgc default: clear MARKED flag too Message-ID: <20130828130338.967B31C3676@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: Changeset: r502:aa8b51f1033d Date: 2013-08-28 15:03 +0200 http://bitbucket.org/pypy/stmgc/changeset/aa8b51f1033d/ Log: clear MARKED flag too diff --git a/c4/et.c b/c4/et.c --- a/c4/et.c +++ b/c4/et.c @@ -588,6 +588,7 @@ assert(!(L->h_tid & GCFLAG_STUB)); assert(!(L->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED)); L->h_tid &= ~(GCFLAG_VISITED | + GCFLAG_MARKED | GCFLAG_PUBLIC | GCFLAG_PREBUILT_ORIGINAL | GCFLAG_PUBLIC_TO_PRIVATE | From noreply at buildbot.pypy.org Wed Aug 28 15:13:42 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Wed, 28 Aug 2013 15:13:42 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: cleanup for last fix Message-ID: <20130828131342.3DEE21C0149@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r66389:1ae1396869f6 Date: 2013-08-27 11:41 +0200 http://bitbucket.org/pypy/pypy/changeset/1ae1396869f6/ Log: cleanup for last fix diff --git a/rpython/jit/backend/llsupport/regalloc.py b/rpython/jit/backend/llsupport/regalloc.py --- a/rpython/jit/backend/llsupport/regalloc.py +++ b/rpython/jit/backend/llsupport/regalloc.py @@ -399,6 +399,13 @@ self.reg_bindings[v] = loc return loc + def update_spill_loc_if_necessary(self, var, current_loc): + """if variable var is in two locations (spilled and current_loc), + update spilled location with current_loc""" + spill_loc = self.frame_manager.get(var) + if spill_loc: + self.assembler.regalloc_mov(current_loc, spill_loc) + def _spill_var(self, v, forbidden_vars, selected_reg, need_lower_byte=False): v_to_spill = self._pick_variable_to_spill(v, forbidden_vars, diff --git a/rpython/jit/backend/x86/regalloc.py b/rpython/jit/backend/x86/regalloc.py --- a/rpython/jit/backend/x86/regalloc.py +++ b/rpython/jit/backend/x86/regalloc.py @@ -816,10 +816,10 @@ argloc = self.rm.make_sure_var_in_reg(arg) self.perform_discard(op, [argloc]) - spilled_loc = self.rm.frame_manager.get(arg) - if spilled_loc: - # spilled var, make sure it gets updated in the frame too - self.assembler.regalloc_mov(argloc, spilled_loc) + # if 'arg' is in two locations (once in argloc and once spilled + # on the frame), we need to ensure that both locations are + # updated with the possibly changed reference. + self.rm.update_spill_loc_if_necessary(arg, argloc) consider_cond_call_gc_wb_array = consider_cond_call_gc_wb From noreply at buildbot.pypy.org Wed Aug 28 15:13:43 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Wed, 28 Aug 2013 15:13:43 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: import stmgc Message-ID: <20130828131343.7758C1C0149@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r66390:a9503a4c6217 Date: 2013-08-27 15:28 +0200 http://bitbucket.org/pypy/pypy/changeset/a9503a4c6217/ Log: import stmgc diff --git a/rpython/translator/stm/src_stm/gcpage.c b/rpython/translator/stm/src_stm/gcpage.c --- a/rpython/translator/stm/src_stm/gcpage.c +++ b/rpython/translator/stm/src_stm/gcpage.c @@ -496,15 +496,24 @@ static void mark_registered_stubs(void) { wlog_t *item; + gcptr L; + G2L_LOOP_FORWARD(registered_stubs, item) { gcptr R = item->addr; assert(R->h_tid & GCFLAG_SMALLSTUB); R->h_tid |= (GCFLAG_MARKED | GCFLAG_VISITED); - gcptr L = (gcptr)(R->h_revision - 2); - L = stmgcpage_visit(L); - R->h_revision = ((revision_t)L) | 2; + if (R->h_revision & 2) { + L = (gcptr)(R->h_revision - 2); + L = stmgcpage_visit(L); + R->h_revision = ((revision_t)L) | 2; + } + else { + L = (gcptr)R->h_revision; + L = stmgcpage_visit(L); + R->h_revision = (revision_t)L; + } /* h_original will be kept up-to-date because it is either == L or L's h_original. And diff --git a/rpython/translator/stm/src_stm/revision b/rpython/translator/stm/src_stm/revision --- a/rpython/translator/stm/src_stm/revision +++ b/rpython/translator/stm/src_stm/revision @@ -1,1 +1,1 @@ -cb61cf4e30a9 +63c2673c2045 From noreply at buildbot.pypy.org Wed Aug 28 15:13:45 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Wed, 28 Aug 2013 15:13:45 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: merge static-write-barriers Message-ID: <20130828131345.038A01C0149@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r66391:f37582685aca Date: 2013-08-28 15:07 +0200 http://bitbucket.org/pypy/pypy/changeset/f37582685aca/ Log: merge static-write-barriers diff --git a/rpython/jit/backend/tool/viewcode.py b/rpython/jit/backend/tool/viewcode.py --- a/rpython/jit/backend/tool/viewcode.py +++ b/rpython/jit/backend/tool/viewcode.py @@ -58,6 +58,9 @@ 'arm': 'arm', 'arm_32': 'arm', } + backend_to_machine = { + 'x86-64': 'i386:x86-64', + } cmd = find_objdump() objdump = ('%(command)s -w -M %(backend)s -b binary -m %(machine)s ' '--disassembler-options=intel-mnemonics ' @@ -66,12 +69,13 @@ f = open(tmpfile, 'wb') f.write(data) f.close() + backend = objdump_backend_option[backend_name] p = subprocess.Popen(objdump % { 'command': cmd, 'file': tmpfile, 'origin': originaddr, - 'backend': objdump_backend_option[backend_name], - 'machine': 'i386' if not backend_name.startswith('arm') else 'arm', + 'backend': backend, + 'machine': backend_to_machine.get(backend, backend), }, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = p.communicate() assert not p.returncode, ('Encountered an error running objdump: %s' % @@ -239,7 +243,7 @@ self.backend_name = None self.executable_name = None - def parse(self, f, textonly=True, truncate_addr=True): + def parse(self, f, textonly=True): for line in f: line = line[line.find('#') + 1:].strip() if line.startswith('BACKEND '): @@ -251,9 +255,7 @@ if len(pieces) == 3: continue # empty line baseaddr = long(pieces[1][1:], 16) - if truncate_addr: - baseaddr &= 0xFFFFFFFFL - elif baseaddr < 0: + if baseaddr < 0: baseaddr += (2 * sys.maxint + 2) offset = int(pieces[2][1:]) addr = baseaddr + offset @@ -273,9 +275,7 @@ assert pieces[1].startswith('@') assert pieces[2].startswith('+') baseaddr = long(pieces[1][1:], 16) - if truncate_addr: - baseaddr &= 0xFFFFFFFFL - elif baseaddr < 0: + if baseaddr < 0: baseaddr += (2 * sys.maxint + 2) offset = int(pieces[2][1:]) addr = baseaddr + offset diff --git a/rpython/rtyper/lltypesystem/lloperation.py b/rpython/rtyper/lltypesystem/lloperation.py --- a/rpython/rtyper/lltypesystem/lloperation.py +++ b/rpython/rtyper/lltypesystem/lloperation.py @@ -624,6 +624,7 @@ 'debug_reraise_traceback': LLOp(), 'debug_print_traceback': LLOp(), 'debug_nonnull_pointer': LLOp(canrun=True), + 'debug_stm_flush_barrier': LLOp(canrun=True), # __________ instrumentation _________ 'instrument_count': LLOp(), diff --git a/rpython/rtyper/lltypesystem/opimpl.py b/rpython/rtyper/lltypesystem/opimpl.py --- a/rpython/rtyper/lltypesystem/opimpl.py +++ b/rpython/rtyper/lltypesystem/opimpl.py @@ -673,6 +673,9 @@ def op_nop(x): pass +def op_debug_stm_flush_barrier(): + pass + # ____________________________________________________________ def get_op_impl(opname): diff --git a/rpython/translator/stm/breakfinder.py b/rpython/translator/stm/breakfinder.py new file mode 100644 --- /dev/null +++ b/rpython/translator/stm/breakfinder.py @@ -0,0 +1,22 @@ +from rpython.translator.backendopt import graphanalyze +from rpython.translator.simplify import get_funcobj + + +TRANSACTION_BREAK = set([ + 'stm_commit_transaction', + 'stm_begin_inevitable_transaction', + 'stm_perform_transaction', + ]) + + +class TransactionBreakAnalyzer(graphanalyze.BoolGraphAnalyzer): + + def analyze_simple_operation(self, op, graphinfo): + return op.opname in TRANSACTION_BREAK + + def analyze_external_call(self, op, seen=None): + # if 'funcobj' releases the GIL, then the GIL-releasing + # functions themselves will call stm_commit_transaction + # and stm_begin_inevitable_transaction. This case is + # covered above. + return False diff --git a/rpython/translator/stm/funcgen.py b/rpython/translator/stm/funcgen.py --- a/rpython/translator/stm/funcgen.py +++ b/rpython/translator/stm/funcgen.py @@ -48,19 +48,27 @@ def stm_finalize(funcgen, op): return 'stm_finalize();' -_STM_BARRIER_FUNCS = { # XXX try to see if some combinations can be shorter - 'P2R': 'stm_read_barrier', - 'G2R': 'stm_read_barrier', - 'O2R': 'stm_read_barrier', - 'P2W': 'stm_write_barrier', - 'G2W': 'stm_write_barrier', - 'O2W': 'stm_write_barrier', - 'R2W': 'stm_write_barrier', - } - def stm_barrier(funcgen, op): category_change = op.args[0].value - funcname = _STM_BARRIER_FUNCS[category_change] + frm, middle, to = category_change + assert middle == '2' + assert frm < to + if to == 'W': + if frm >= 'V': + funcname = 'stm_repeat_write_barrier' + else: + funcname = 'stm_write_barrier' + elif to == 'V': + funcname = 'stm_write_barrier_noptr' + elif to == 'R': + if frm >= 'Q': + funcname = 'stm_repeat_read_barrier' + else: + funcname = 'stm_read_barrier' + elif to == 'I': + funcname = 'stm_immut_read_barrier' + else: + raise AssertionError(category_change) assert op.args[1].concretetype == op.result.concretetype arg = funcgen.expr(op.args[1]) result = funcgen.expr(op.result) @@ -69,11 +77,20 @@ funcname, arg) def stm_ptr_eq(funcgen, op): - arg0 = funcgen.expr(op.args[0]) - arg1 = funcgen.expr(op.args[1]) + args = [funcgen.expr(v) for v in op.args] result = funcgen.expr(op.result) + # check for prebuilt arguments + for i, j in [(0, 1), (1, 0)]: + if isinstance(op.args[j], Constant): + if op.args[j].value: # non-NULL + return ('%s = stm_pointer_equal_prebuilt((gcptr)%s, (gcptr)%s);' + % (result, args[i], args[j])) + else: + # this case might be unreachable, but better safe than sorry + return '%s = (%s == NULL);' % (result, args[i]) + # return '%s = stm_pointer_equal((gcptr)%s, (gcptr)%s);' % ( - result, arg0, arg1) + result, args[0], args[1]) def stm_become_inevitable(funcgen, op): try: diff --git a/rpython/translator/stm/src_stm/revision b/rpython/translator/stm/src_stm/revision --- a/rpython/translator/stm/src_stm/revision +++ b/rpython/translator/stm/src_stm/revision @@ -1,1 +1,1 @@ -63c2673c2045 +cdd017855adc+ diff --git a/rpython/translator/stm/test/test_writebarrier.py b/rpython/translator/stm/test/test_writebarrier.py --- a/rpython/translator/stm/test/test_writebarrier.py +++ b/rpython/translator/stm/test/test_writebarrier.py @@ -1,4 +1,6 @@ -from rpython.rtyper.lltypesystem import lltype, rffi +from rpython.rlib.rstm import register_invoke_around_extcall +from rpython.rtyper.lltypesystem import lltype, llmemory, rffi +from rpython.rtyper.lltypesystem.lloperation import llop from rpython.translator.stm.test.transform_support import BaseTestTransform @@ -24,7 +26,7 @@ res = self.interpret(f1, [-5]) assert res == 42 assert len(self.writemode) == 0 - assert self.barriers == ['P2R'] + assert self.barriers == ['I2R'] def test_simple_write(self): X = lltype.GcStruct('X', ('foo', lltype.Signed)) @@ -37,7 +39,21 @@ self.interpret(f1, [4]) assert x1.foo == 4 assert len(self.writemode) == 1 - assert self.barriers == ['P2W'] + assert self.barriers == ['I2V'] + + def test_simple_write_pointer(self): + T = lltype.GcStruct('T') + X = lltype.GcStruct('X', ('foo', lltype.Ptr(T))) + t1 = lltype.malloc(T, immortal=True) + x1 = lltype.malloc(X, immortal=True, zero=True) + + def f1(n): + x1.foo = t1 + + self.interpret(f1, [4]) + assert x1.foo == t1 + assert len(self.writemode) == 1 + assert self.barriers == ['I2W'] def test_multiple_reads(self): X = lltype.GcStruct('X', ('foo', lltype.Signed), @@ -58,7 +74,7 @@ res = self.interpret(f1, [4]) assert res == -81 assert len(self.writemode) == 0 - assert self.barriers == ['P2R'] + assert self.barriers == ['I2R'] def test_malloc(self): X = lltype.GcStruct('X', ('foo', lltype.Signed)) @@ -70,10 +86,9 @@ assert len(self.writemode) == 1 assert self.barriers == [] - def test_repeat_write_barrier_after_malloc(self): + def test_dont_repeat_write_barrier_after_malloc_if_not_a_ptr(self): X = lltype.GcStruct('X', ('foo', lltype.Signed)) - x1 = lltype.malloc(X, immortal=True) - x1.foo = 6 + x1 = lltype.malloc(X, immortal=True, zero=True) def f1(n): x1.foo = n lltype.malloc(X) @@ -81,7 +96,22 @@ self.interpret(f1, [4]) assert len(self.writemode) == 2 - assert self.barriers == ['P2W', 'r2w'] + assert self.barriers == ['I2V'] + + def test_repeat_write_barrier_after_malloc(self): + T = lltype.GcStruct('T') + X = lltype.GcStruct('X', ('foo', lltype.Ptr(T))) + t1 = lltype.malloc(T, immortal=True) + t2 = lltype.malloc(T, immortal=True) + x1 = lltype.malloc(X, immortal=True, zero=True) + def f1(n): + x1.foo = t1 + lltype.malloc(X) + x1.foo = t2 + + self.interpret(f1, [4]) + assert len(self.writemode) == 2 + assert self.barriers == ['I2W', 'V2W'] def test_repeat_read_barrier_after_malloc(self): X = lltype.GcStruct('X', ('foo', lltype.Signed)) @@ -95,7 +125,7 @@ self.interpret(f1, [4]) assert len(self.writemode) == 1 - assert self.barriers == ['P2R'] + assert self.barriers == ['I2R'] def test_write_may_alias(self): X = lltype.GcStruct('X', ('foo', lltype.Signed)) @@ -109,10 +139,10 @@ y = lltype.malloc(X, immortal=True) res = self.interpret(f1, [x, y]) assert res == 36 - assert self.barriers == ['P2R', 'P2W', 'p2r'] + assert self.barriers == ['A2R', 'A2V', 'q2r'] res = self.interpret(f1, [x, x]) assert res == 42 - assert self.barriers == ['P2R', 'P2W', 'P2R'] + assert self.barriers == ['A2R', 'A2V', 'Q2R'] def test_write_cannot_alias(self): X = lltype.GcStruct('X', ('foo', lltype.Signed)) @@ -127,37 +157,49 @@ y = lltype.malloc(Y, immortal=True) res = self.interpret(f1, [x, y]) assert res == 36 - assert self.barriers == ['P2R', 'P2W'] + assert self.barriers == ['A2R', 'A2V'] - def test_call_external_random_effects(self): + def test_call_external_release_gil(self): X = lltype.GcStruct('X', ('foo', lltype.Signed)) def f1(p): + register_invoke_around_extcall() x1 = p.foo - external_stuff() + external_release_gil() x2 = p.foo return x1 * x2 x = lltype.malloc(X, immortal=True); x.foo = 6 res = self.interpret(f1, [x]) assert res == 36 - assert self.barriers == ['P2R', 'p2r'] + assert self.barriers == ['A2R', 'I2R'] - def test_call_external_no_random_effects(self): + def test_call_external_any_gcobj(self): X = lltype.GcStruct('X', ('foo', lltype.Signed)) - external_stuff = rffi.llexternal('external_stuff2', [], lltype.Void, - _callable=lambda: None, - random_effects_on_gcobjs=False, - threadsafe=False) def f1(p): + register_invoke_around_extcall() x1 = p.foo - external_stuff() + external_any_gcobj() x2 = p.foo return x1 * x2 x = lltype.malloc(X, immortal=True); x.foo = 6 res = self.interpret(f1, [x]) assert res == 36 - assert self.barriers == ['P2R'] + assert self.barriers == ['A2R', 'q2r'] + + def test_call_external_safest(self): + X = lltype.GcStruct('X', ('foo', lltype.Signed)) + def f1(p): + register_invoke_around_extcall() + x1 = p.foo + external_safest() + x2 = p.foo + return x1 * x2 + + x = lltype.malloc(X, immortal=True); x.foo = 6 + res = self.interpret(f1, [x]) + assert res == 36 + assert self.barriers == ['A2R'] def test_pointer_compare_0(self): X = lltype.GcStruct('X', ('foo', lltype.Signed)) @@ -190,10 +232,10 @@ y = lltype.malloc(X, immortal=True) res = self.interpret(f1, [x, y]) assert res == 0 - assert self.barriers == ['P2W', '='] + assert self.barriers == ['A2V', '='] res = self.interpret(f1, [x, x]) assert res == 1 - assert self.barriers == ['P2W', '='] + assert self.barriers == ['A2V', '='] def test_pointer_compare_3(self): X = lltype.GcStruct('X', ('foo', lltype.Signed)) @@ -204,10 +246,10 @@ y = lltype.malloc(X, immortal=True) res = self.interpret(f1, [x, y]) assert res == 1 - assert self.barriers == ['P2W', '='] + assert self.barriers == ['A2V', '='] res = self.interpret(f1, [x, x]) assert res == 0 - assert self.barriers == ['P2W', '='] + assert self.barriers == ['A2V', '='] def test_pointer_compare_4(self): X = lltype.GcStruct('X', ('foo', lltype.Signed)) @@ -219,10 +261,10 @@ y = lltype.malloc(X, immortal=True) res = self.interpret(f1, [x, y]) assert res == 1 - assert self.barriers == ['P2W', 'P2W'] + assert self.barriers == ['A2V', 'A2V'] res = self.interpret(f1, [x, x]) assert res == 0 - assert self.barriers == ['P2W', 'P2W'] + assert self.barriers == ['A2V', 'A2V'] def test_simple_loop(self): X = lltype.GcStruct('X', ('foo', lltype.Signed)) @@ -235,7 +277,7 @@ res = self.interpret(f1, [x, 5]) assert res == 0 # for now we get this. Later, we could probably optimize it - assert self.barriers == ['P2W', 'p2w', 'p2w', 'p2w', 'p2w'] + assert self.barriers == ['A2V', 'a2v', 'a2v', 'a2v', 'a2v'] def test_subclassing(self): class X: @@ -253,35 +295,210 @@ x = Z() x.foo = 815 x.zbar = 'A' - external_stuff() + llop.debug_stm_flush_barrier(lltype.Void) result = x.foo # 1 if isinstance(x, Y): # 2 - result += x.ybar # 3 + result += x.ybar # 3: optimized return result res = self.interpret(f1, [10]) assert res == 42 + 10 - assert self.barriers == ['p2r', 'p2r', 'p2r'] # from 3 blocks (could be - # optimized later) + assert self.barriers == ['a2r', 'a2i'] res = self.interpret(f1, [-10]) assert res == 815 - assert self.barriers == ['p2r', 'p2r'] + assert self.barriers == ['a2r', 'a2i'] + + def test_no_subclasses_2(self): + class Y(object): + pass + def handle(y): + y.ybar += 1 + def make_y(i): + y = Y(); y.foo = 42; y.ybar = i + return y + def f1(i): + y = make_y(i) + llop.debug_stm_flush_barrier(lltype.Void) + prev = y.ybar # a2r + handle(y) # inside handle(): a2r, r2v + return prev + y.ybar # q2r + + res = self.interpret(f1, [10]) + assert res == 21 + assert self.barriers == ['a2r', 'a2r', 'r2v', 'q2r'] + + def test_subclassing_2(self): + class X: + __slots__ = ['foo'] + class Y(X): + pass + class Z(X): + pass + def handle(y): + y.ybar += 1 + def f1(i): + if i > 5: + y = Y(); y.foo = 42; y.ybar = i + x = y + else: + x = Z(); x.foo = 815; x.zbar = 'A' + y = Y(); y.foo = -13; y.ybar = i + llop.debug_stm_flush_barrier(lltype.Void) + prev = x.foo # a2r + handle(y) # inside handle(): a2r, r2v + return prev + x.foo # q2r + + res = self.interpret(f1, [10]) + assert res == 84 + assert self.barriers == ['a2r', 'a2r', 'r2v', 'q2r'] + + def test_subclassing_gcref(self): + Y = lltype.GcStruct('Y', ('foo', lltype.Signed), + ('ybar', lltype.Signed)) + YPTR = lltype.Ptr(Y) + # + def handle(y): + y.ybar += 1 + def f1(i): + if i > 5: + y = lltype.malloc(Y); y.foo = 52 - i; y.ybar = i + x = lltype.cast_opaque_ptr(llmemory.GCREF, y) + else: + y = lltype.nullptr(Y) + x = lltype.cast_opaque_ptr(llmemory.GCREF, y) + llop.debug_stm_flush_barrier(lltype.Void) + prev = lltype.cast_opaque_ptr(YPTR, x).foo # a2r + handle(y) # inside handle(): a2r, r2v + return prev + lltype.cast_opaque_ptr(YPTR, x).ybar # q2r? + + res = self.interpret(f1, [10]) + assert res == 42 + 11 + assert self.barriers == ['a2r', 'a2r', 'r2v', 'a2r'] + # Ideally we should get [... 'q2r'] but getting 'a2r' is not wrong + # either. This is because from a GCREF the only thing we can do is + # cast_opaque_ptr, which is not special-cased in writebarrier.py. def test_write_barrier_repeated(self): class X: pass x = X() + x2 = X() + x3 = X() def f1(i): - x.a = i # write barrier + x.a = x2 # write barrier y = X() # malloc - x.a += 1 # write barrier again + x.a = x3 # repeat write barrier return y res = self.interpret(f1, [10]) - assert self.barriers == ['P2W', 'r2w'] + assert self.barriers == ['I2W', 'V2W'] + def test_read_immutable(self): + class Foo: + _immutable_ = True -external_stuff = rffi.llexternal('external_stuff', [], lltype.Void, - _callable=lambda: None, - random_effects_on_gcobjs=True, - threadsafe=False) + def f1(n): + x = Foo() + llop.debug_stm_flush_barrier(lltype.Void) + if n > 1: + x.foo = n + llop.debug_stm_flush_barrier(lltype.Void) + return x.foo + + res = self.interpret(f1, [4]) + assert res == 4 + assert self.barriers == ['a2v', 'a2i'] + + def test_read_immutable_prebuilt(self): + class Foo: + _immutable_ = True + x1 = Foo() + x1.foo = 42 + x2 = Foo() + x2.foo = 81 + + def f1(n): + if n > 1: + return x2.foo + else: + return x1.foo + + res = self.interpret(f1, [4]) + assert res == 81 + assert self.barriers == [] + + def test_isinstance(self): + class Base: pass + class A(Base): pass + + def f1(n): + if n > 1: + x = Base() + else: + x = A() + return isinstance(x, A) + + res = self.interpret(f1, [5]) + assert res == False + assert self.barriers == ['a2i'] + res = self.interpret(f1, [-5]) + assert res == True + assert self.barriers == ['a2i'] + + def test_isinstance_gcremovetypeptr(self): + class Base: pass + class A(Base): pass + + def f1(n): + if n > 1: + x = Base() + else: + x = A() + return isinstance(x, A) + + res = self.interpret(f1, [5], gcremovetypeptr=True) + assert res == False + assert self.barriers == [] + res = self.interpret(f1, [-5], gcremovetypeptr=True) + assert res == True + assert self.barriers == [] + + def test_infinite_loop_bug(self): + class A(object): + user_overridden_class = False + + def stuff(self): + return 12.3 + + def immutable_unique_id(self): + if self.user_overridden_class: + return None + from rpython.rlib.longlong2float import float2longlong + from rpython.rlib.rarithmetic import r_ulonglong + from rpython.rlib.rbigint import rbigint + real = self.stuff() + imag = self.stuff() + real_b = rbigint.fromrarith_int(float2longlong(real)) + imag_b = rbigint.fromrarith_int(r_ulonglong(float2longlong(imag))) + val = real_b.lshift(64).or_(imag_b).lshift(3) + return val + + def f(): + return A().immutable_unique_id() + + for i in range(10): + self.interpret(f, [], run=False) + + +external_release_gil = rffi.llexternal('external_release_gil', [], lltype.Void, + _callable=lambda: None, + random_effects_on_gcobjs=True, + threadsafe=True) # GIL is released +external_any_gcobj = rffi.llexternal('external_any_gcobj', [], lltype.Void, + _callable=lambda: None, + random_effects_on_gcobjs=True, + threadsafe=False) # GIL is not released +external_safest = rffi.llexternal('external_safest', [], lltype.Void, + _callable=lambda: None, + random_effects_on_gcobjs=False, + threadsafe=False) # GIL is not released diff --git a/rpython/translator/stm/test/test_ztranslated.py b/rpython/translator/stm/test/test_ztranslated.py --- a/rpython/translator/stm/test/test_ztranslated.py +++ b/rpython/translator/stm/test/test_ztranslated.py @@ -1,8 +1,6 @@ -import py from rpython.rlib import rstm, rgc, objectmodel -from rpython.rtyper.lltypesystem import lltype, llmemory, rffi, rclass +from rpython.rtyper.lltypesystem import lltype, rffi from rpython.rtyper.lltypesystem.lloperation import llop -from rpython.rtyper.annlowlevel import cast_instance_to_base_ptr from rpython.translator.stm.test.support import CompiledSTMTests from rpython.translator.stm.test import targetdemo2 @@ -303,3 +301,31 @@ t, cbuilder = self.compile(main) data = cbuilder.cmdexec('a b') assert 'test ok\n' in data + + def test_stm_pointer_equal(self): + class Foo: + pass + prebuilt_foo = Foo() + def make(n): + foo1 = Foo() + foo2 = Foo() + if n < 100: + return foo1, foo2, foo1, None + return None, None, None, foo1 # to annotate as "can be none" + def main(argv): + foo1, foo2, foo3, foo4 = make(len(argv)) + assert foo1 is not prebuilt_foo + assert foo1 is not foo2 + assert foo1 is foo3 + assert foo4 is None + assert foo1 is not None + assert prebuilt_foo is not foo1 + assert None is not foo1 + assert None is foo4 + print 'test ok' + return 0 + + main([]) + t, cbuilder = self.compile(main) + data = cbuilder.cmdexec('') + assert 'test ok\n' in data diff --git a/rpython/translator/stm/test/transform_support.py b/rpython/translator/stm/test/transform_support.py --- a/rpython/translator/stm/test/transform_support.py +++ b/rpython/translator/stm/test/transform_support.py @@ -2,7 +2,7 @@ from rpython.rtyper.llinterp import LLFrame from rpython.rtyper.test.test_llinterp import get_interpreter, clear_tcache from rpython.translator.stm.transform import STMTransformer -from rpython.translator.stm.writebarrier import NEEDS_BARRIER +from rpython.translator.stm.writebarrier import needs_barrier from rpython.conftest import option @@ -33,12 +33,12 @@ if isinstance(p, _stmptr): return p._category if not p: - return 'N' + return None if p._solid: - return 'P' # allocated with immortal=True + return 'I' # allocated with immortal=True raise AssertionError("unknown category on %r" % (p,)) - def interpret(self, fn, args): + def interpret(self, fn, args, gcremovetypeptr=False, run=True): self.build_state() clear_tcache() interp, self.graph = get_interpreter(fn, args, view=False) @@ -46,6 +46,7 @@ interp.frame_class = LLSTMFrame # self.translator = interp.typer.annotator.translator + self.translator.config.translation.gcremovetypeptr = gcremovetypeptr self.stmtransformer = STMTransformer(self.translator) if self.do_jit_driver: self.stmtransformer.transform_jit_driver() @@ -59,8 +60,9 @@ if self.do_jit_driver: import py py.test.skip("XXX how to test?") - result = interp.eval_graph(self.graph, args) - return result + if run: + result = interp.eval_graph(self.graph, args) + return result class LLSTMFrame(LLFrame): @@ -76,53 +78,90 @@ def check_category(self, p, expected): cat = self.get_category_or_null(p) - assert cat in 'NPRW' + assert cat is None or cat in 'AIQRVW' + if expected is not None: + assert cat is not None and cat >= expected return cat def op_stm_barrier(self, kind, obj): frm, middledigit, to = kind assert middledigit == '2' cat = self.check_category(obj, frm) - if not NEEDS_BARRIER[cat, to]: + if not needs_barrier(cat, to): # a barrier, but with no effect self.llinterpreter.tester.barriers.append(kind.lower()) return obj else: # a barrier, calling a helper ptr2 = _stmptr(obj, to) - if to == 'W': + if to >= 'V': self.llinterpreter.tester.writemode.add(ptr2._obj) self.llinterpreter.tester.barriers.append(kind) return ptr2 def op_stm_ptr_eq(self, obj1, obj2): - self.check_category(obj1, 'P') - self.check_category(obj2, 'P') + self.check_category(obj1, None) + self.check_category(obj2, None) self.llinterpreter.tester.barriers.append('=') return obj1 == obj2 def op_getfield(self, obj, field): - if not obj._TYPE.TO._immutable_field(field): - self.check_category(obj, 'R') + if obj._TYPE.TO._gckind == 'gc': + if obj._TYPE.TO._immutable_field(field): + expected = 'I' + else: + expected = 'R' + self.check_category(obj, expected) return LLFrame.op_getfield(self, obj, field) def op_setfield(self, obj, fieldname, fieldvalue): - if not obj._TYPE.TO._immutable_field(fieldname): - self.check_category(obj, 'W') - # convert R -> P all other pointers to the same object we can find + if obj._TYPE.TO._gckind == 'gc': + T = lltype.typeOf(fieldvalue) + if isinstance(T, lltype.Ptr) and T.TO._gckind == 'gc': + self.check_category(obj, 'W') + else: + self.check_category(obj, 'V') + # convert R -> Q all other pointers to the same object we can find for p in self.all_stm_ptrs(): if p._category == 'R' and p._T == obj._T and p == obj: - _stmptr._category.__set__(p, 'P') + _stmptr._category.__set__(p, 'Q') return LLFrame.op_setfield(self, obj, fieldname, fieldvalue) def op_cast_pointer(self, RESTYPE, obj): - cat = self.check_category(obj, 'P') - p = opimpl.op_cast_pointer(RESTYPE, obj) - return _stmptr(p, cat) + if obj._TYPE.TO._gckind == 'gc': + cat = self.check_category(obj, None) + p = opimpl.op_cast_pointer(RESTYPE, obj) + return _stmptr(p, cat) + return lltype.cast_pointer(RESTYPE, obj) op_cast_pointer.need_result_type = True + def op_cast_opaque_ptr(self, RESTYPE, obj): + if obj._TYPE.TO._gckind == 'gc': + cat = self.check_category(obj, None) + p = lltype.cast_opaque_ptr(RESTYPE, obj) + return _stmptr(p, cat) + return LLFrame.op_cast_opaque_ptr(self, RESTYPE, obj) + op_cast_opaque_ptr.need_result_type = True + def op_malloc(self, obj, flags): + assert flags['flavor'] == 'gc' + # convert all existing pointers W -> V + for p in self.all_stm_ptrs(): + if p._category == 'W': + _stmptr._category.__set__(p, 'V') p = LLFrame.op_malloc(self, obj, flags) ptr2 = _stmptr(p, 'W') self.llinterpreter.tester.writemode.add(ptr2._obj) return ptr2 + + def transaction_break(self): + # convert -> I all other pointers to the same object we can find + for p in self.all_stm_ptrs(): + if p._category > 'I': + _stmptr._category.__set__(p, 'I') + + def op_stm_commit_transaction(self): + self.transaction_break() + + def op_stm_begin_inevitable_transaction(self): + self.transaction_break() diff --git a/rpython/translator/stm/transform.py b/rpython/translator/stm/transform.py --- a/rpython/translator/stm/transform.py +++ b/rpython/translator/stm/transform.py @@ -3,6 +3,7 @@ from rpython.translator.stm.inevitable import insert_turn_inevitable from rpython.translator.stm.jitdriver import reorganize_around_jit_driver from rpython.translator.stm.threadlocalref import transform_tlref +from rpython.translator.stm.breakfinder import TransactionBreakAnalyzer from rpython.translator.c.support import log from rpython.memory.gctransform.framework import CollectAnalyzer @@ -11,6 +12,7 @@ def __init__(self, translator): self.translator = translator + self.barrier_counts = {} def transform(self): assert not hasattr(self.translator, 'stm_transformation_applied') @@ -28,10 +30,14 @@ def transform_write_barrier(self): self.write_analyzer = WriteAnalyzer(self.translator) self.collect_analyzer = CollectAnalyzer(self.translator) + self.break_analyzer = TransactionBreakAnalyzer(self.translator) for graph in self.translator.graphs: insert_stm_barrier(self, graph) + for key, value in sorted(self.barrier_counts.items()): + log("%s: %d barriers" % (key, value[0])) del self.write_analyzer del self.collect_analyzer + del self.break_analyzer def transform_turn_inevitable(self): for graph in self.translator.graphs: diff --git a/rpython/translator/stm/writebarrier.py b/rpython/translator/stm/writebarrier.py --- a/rpython/translator/stm/writebarrier.py +++ b/rpython/translator/stm/writebarrier.py @@ -1,5 +1,6 @@ from rpython.flowspace.model import SpaceOperation, Constant, Variable from rpython.translator.unsimplify import varoftype, insert_empty_block +from rpython.translator.unsimplify import insert_empty_startblock from rpython.rtyper.lltypesystem import lltype from rpython.translator.backendopt.writeanalyze import top_set @@ -9,15 +10,6 @@ 'malloc_nonmovable', 'malloc_nonmovable_varsize', ]) -NEEDS_BARRIER = { - ('P', 'R'): True, - ('P', 'W'): True, - ('R', 'R'): False, - ('R', 'W'): True, - ('W', 'R'): False, - ('W', 'W'): False, - } - def unwraplist(list_v): for v in list_v: if isinstance(v, Constant): @@ -42,153 +34,337 @@ return OUTER._immutable_interiorfield(unwraplist(op.args[1:-1])) raise AssertionError(op) +def needs_barrier(frm, to): + return to > frm + +def is_gc_ptr(T): + return isinstance(T, lltype.Ptr) and T.TO._gckind == 'gc' + + +class Renaming(object): + def __init__(self, newvar, category): + self.newvar = newvar # a Variable or a Constant + self.TYPE = newvar.concretetype + self.category = category + + +class BlockTransformer(object): + + def __init__(self, stmtransformer, block): + self.stmtransformer = stmtransformer + self.block = block + self.patch = None + self.inputargs_category = [None] * len(block.inputargs) + self.inputargs_category_per_link = {} + + + def analyze_inside_block(self): + gcremovetypeptr = ( + self.stmtransformer.translator.config.translation.gcremovetypeptr) + wants_a_barrier = {} + expand_comparison = set() + for op in self.block.operations: + is_getter = (op.opname in ('getfield', 'getarrayitem', + 'getinteriorfield') and + op.result.concretetype is not lltype.Void and + is_gc_ptr(op.args[0].concretetype)) + + if (gcremovetypeptr and op.opname in ('getfield', 'setfield') and + op.args[1].value == 'typeptr' and + op.args[0].concretetype.TO._hints.get('typeptr')): + # if gcremovetypeptr, we can access directly the typeptr + # field even on a stub + pass + + elif (op.opname in ('getarraysize', 'getinteriorarraysize') + or (is_getter and is_immutable(op))): + # we can't leave getarraysize or the immutable getfields + # fully unmodified: we need at least immut_read_barrier + # to detect stubs. + wants_a_barrier[op] = 'I' + + elif is_getter: + # the non-immutable getfields need a regular read barrier + wants_a_barrier[op] = 'R' + + elif (op.opname in ('setfield', 'setarrayitem', + 'setinteriorfield') and + op.args[-1].concretetype is not lltype.Void and + is_gc_ptr(op.args[0].concretetype)): + # setfields need a regular write barrier + T = op.args[-1].concretetype + if is_gc_ptr(T): + wants_a_barrier[op] = 'W' + else: + # a write of a non-gc pointer doesn't need to check for + # the GCFLAG_WRITEBARRIER + wants_a_barrier[op] = 'V' + + elif (op.opname in ('ptr_eq', 'ptr_ne') and + is_gc_ptr(op.args[0].concretetype)): + # GC pointer comparison might need special care + expand_comparison.add(op) + # + self.wants_a_barrier = wants_a_barrier + self.expand_comparison = expand_comparison + + + def flow_through_block(self, graphinfo): + + def renfetch(v): + try: + return renamings[v] + except KeyError: + if isinstance(v, Variable): + ren = Renaming(v, 'A') + else: + ren = Renaming(v, 'I') # prebuilt objects cannot be stubs + renamings[v] = ren + return ren + + def get_category_or_null(v): + # 'v' is an original variable here, or a constant + if isinstance(v, Constant) and not v.value: # a NULL constant + return None + if v in renamings: + return renamings[v].category + if isinstance(v, Constant): + return 'I' + else: + return 'A' + + def renamings_get(v): + try: + ren = renamings[v] + except KeyError: + return v # unmodified + v2 = ren.newvar + if v2.concretetype == v.concretetype: + return v2 + v3 = varoftype(v.concretetype) + newoperations.append(SpaceOperation('cast_pointer', [v2], v3)) + if lltype.castable(ren.TYPE, v3.concretetype) > 0: + ren.TYPE = v3.concretetype + return v3 + + # note: 'renamings' maps old vars to new vars, but cast_pointers + # are done lazily. It means that the two vars may not have + # exactly the same type. + renamings = {} # {original-var: Renaming(newvar, category)} + newoperations = [] + stmtransformer = self.stmtransformer + + # make the initial trivial renamings needed to have some precise + # categories for the input args + for v, cat in zip(self.block.inputargs, self.inputargs_category): + if cat is not None and is_gc_ptr(v.concretetype): + renamings[v] = Renaming(v, cat) + + for op in self.block.operations: + # + if (op.opname in ('cast_pointer', 'same_as') and + is_gc_ptr(op.result.concretetype)): + renamings[op.result] = renfetch(op.args[0]) + continue + # + to = self.wants_a_barrier.get(op) + if to is not None: + ren = renfetch(op.args[0]) + frm = ren.category + if needs_barrier(frm, to): + try: + b = stmtransformer.barrier_counts[frm, to] + except KeyError: + c_info = Constant('%s2%s' % (frm, to), lltype.Void) + b = [0, c_info] + stmtransformer.barrier_counts[frm, to] = b + b[0] += 1 + c_info = b[1] + v = ren.newvar + w = varoftype(v.concretetype) + newop = SpaceOperation('stm_barrier', [c_info, v], w) + newoperations.append(newop) + ren.newvar = w + ren.category = to + # + newop = SpaceOperation(op.opname, + [renamings_get(v) for v in op.args], + op.result) + newoperations.append(newop) + # + if op in self.expand_comparison: + cats = (get_category_or_null(op.args[0]), + get_category_or_null(op.args[1])) + if None not in cats and (cats[0] < 'V' or cats[1] < 'V'): + if newop.opname == 'ptr_ne': + v = varoftype(lltype.Bool) + negop = SpaceOperation('bool_not', [v], + newop.result) + newoperations.append(negop) + newop.result = v + newop.opname = 'stm_ptr_eq' + + if stmtransformer.break_analyzer.analyze(op): + # this operation can perform a transaction break: + # all pointers are lowered to 'I', because a non- + # stub cannot suddenly point to a stub, but we + # cannot guarantee anything more + for ren in renamings.values(): + if ren.category > 'I': + ren.category = 'I' + + if op.opname == 'debug_stm_flush_barrier': + for ren in renamings.values(): + ren.category = 'A' + + if stmtransformer.collect_analyzer.analyze(op): + # this operation can collect: we bring all 'W' + # categories back to 'V', because we would need + # a repeat_write_barrier on them afterwards + for ren in renamings.values(): + if ren.category == 'W': + ren.category = 'V' + + effectinfo = stmtransformer.write_analyzer.analyze( + op, graphinfo=graphinfo) + if effectinfo: + if effectinfo is top_set: + # this operation can perform random writes: any + # 'R'-category object falls back to 'Q' because + # we would need a repeat_read_barrier() + for ren in renamings.values(): + if ren.category == 'R': + ren.category = 'Q' + else: + # the same, but only on objects of the right types + # -- we need to consider 'types' or any base type + types = set() + for entry in effectinfo: + TYPE = entry[1].TO + while TYPE is not None: + types.add(TYPE) + if not isinstance(TYPE, lltype.Struct): + break + _, TYPE = TYPE._first_struct() + for ren in renamings.values(): + if ren.TYPE.TO in types and ren.category == 'R': + ren.category = 'Q' + + if op.opname in MALLOCS: + assert op.result not in renamings + renamings[op.result] = Renaming(op.result, 'W') + + if isinstance(self.block.exitswitch, Variable): + switchv = renamings_get(self.block.exitswitch) + else: + switchv = None + blockoperations = newoperations + linkoperations = [] + for link in self.block.exits: + output_categories = [] + for v in link.args: + if is_gc_ptr(v.concretetype): + cat = get_category_or_null(v) + else: + cat = None + output_categories.append(cat) + newoperations = [] + newargs = [renamings_get(v) for v in link.args] + linkoperations.append((newargs, newoperations, output_categories)) + # + # Record how we'd like to patch the block, but don't do any + # patching yet + self.patch = (blockoperations, switchv, linkoperations) + + + def update_targets(self, block_transformers): + (_, _, linkoperations) = self.patch + assert len(linkoperations) == len(self.block.exits) + targetbts = [] + for link, (_, _, output_categories) in zip(self.block.exits, + linkoperations): + targetblock = link.target + if targetblock not in block_transformers: + continue # ignore the exit block + targetbt = block_transformers[targetblock] + targetbt.inputargs_category_per_link[link] = output_categories + if targetbt.update_inputargs_category(): + targetbts.append(targetbt) + return set(targetbts) + + def update_inputargs_category(self): + values = self.inputargs_category_per_link.values() + newcats = [] + for i in range(len(self.block.inputargs)): + cat = None + for output_categories in values: + cat2 = output_categories[i] + if cat is None: + cat = cat2 + elif cat2 is not None: + cat = min(cat, cat2) + newcats.append(cat) + if newcats != self.inputargs_category: + self.inputargs_category = newcats + return True + else: + return False + + + def patch_now(self): + if self.patch is None: + return + newoperations, switchv, linkoperations = self.patch + self.block.operations = newoperations + if switchv is not None: + self.block.exitswitch = switchv + assert len(linkoperations) == len(self.block.exits) + for link, (newargs, newoperations, _) in zip(self.block.exits, + linkoperations): + link.args[:] = newargs + if newoperations: + # must put them in a fresh block along the link + annotator = self.stmtransformer.translator.annotator + newblock = insert_empty_block(annotator, link, + newoperations) + def insert_stm_barrier(stmtransformer, graph): """This function uses the following characters for 'categories': - * 'P': a general pointer + * 'A': any general pointer + * 'I': not a stub (immut_read_barrier was applied) + * 'Q': same as R, except needs a repeat_read_barrier * 'R': the read barrier was applied + * 'V': same as W, except needs a repeat_write_barrier * 'W': the write barrier was applied + + The letters are chosen so that a barrier is needed to change a + pointer from category x to category y if and only if y > x. """ graphinfo = stmtransformer.write_analyzer.compute_graph_info(graph) + annotator = stmtransformer.translator.annotator + insert_empty_startblock(annotator, graph) - def get_category(v): - return category.get(v, 'P') - - def get_category_or_null(v): - if isinstance(v, Constant) and not v.value: - return 'N' - return category.get(v, 'P') - - def renamings_get(v): - if v not in renamings: - return v - v2 = renamings[v][0] - if v2.concretetype == v.concretetype: - return v2 - v3 = varoftype(v.concretetype) - newoperations.append(SpaceOperation('cast_pointer', [v2], v3)) - return v3 + block_transformers = {} + pending = set() for block in graph.iterblocks(): if block.operations == (): continue - # - wants_a_barrier = {} - expand_comparison = set() - for op in block.operations: - # [1] XXX we can't leave getarraysize or the immutable getfields - # fully unmodified. We'd need at least some lightweight - # read barrier to detect stubs. For now we just put a - # regular read barrier. - if (op.opname in ('getfield', 'getarrayitem', - 'getinteriorfield', - 'getarraysize', 'getinteriorarraysize', # XXX [1] - ) and - op.result.concretetype is not lltype.Void and - op.args[0].concretetype.TO._gckind == 'gc' and - True): #not is_immutable(op)): XXX see [1] - wants_a_barrier[op] = 'R' - elif (op.opname in ('setfield', 'setarrayitem', - 'setinteriorfield') and - op.args[-1].concretetype is not lltype.Void and - op.args[0].concretetype.TO._gckind == 'gc' and - not is_immutable(op)): - wants_a_barrier[op] = 'W' - elif (op.opname in ('ptr_eq', 'ptr_ne') and - op.args[0].concretetype.TO._gckind == 'gc'): - expand_comparison.add(op) - # - if wants_a_barrier or expand_comparison: - # note: 'renamings' maps old vars to new vars, but cast_pointers - # are done lazily. It means that the two vars may not have - # exactly the same type. - renamings = {} # {original-var: [var-in-newoperations] (len 1)} - category = {} # {var-in-newoperations: LETTER} - newoperations = [] - for op in block.operations: - # - if op.opname == 'cast_pointer': - v = op.args[0] - renamings[op.result] = renamings.setdefault(v, [v]) - continue - # - to = wants_a_barrier.get(op) - if to is not None: - v = op.args[0] - v_holder = renamings.setdefault(v, [v]) - v = v_holder[0] - frm = get_category(v) - if NEEDS_BARRIER[frm, to]: - c_info = Constant('%s2%s' % (frm, to), lltype.Void) - w = varoftype(v.concretetype) - newop = SpaceOperation('stm_barrier', [c_info, v], w) - newoperations.append(newop) - v_holder[0] = w - category[w] = to - if to == 'W': - # if any of the other vars in the same path - # points to the same object, they must lose - # their read-status now - for u in block.getvariables(): - if get_category(u) == 'R' \ - and u.concretetype == v.concretetype: - category[u] = 'P' - - # - newop = SpaceOperation(op.opname, - [renamings_get(v) for v in op.args], - op.result) - newoperations.append(newop) - # - if op in expand_comparison: - cats = (get_category_or_null(newop.args[0]), - get_category_or_null(newop.args[1])) - if 'N' not in cats and cats != ('W', 'W'): - if newop.opname == 'ptr_ne': - v = varoftype(lltype.Bool) - negop = SpaceOperation('bool_not', [v], - newop.result) - newoperations.append(negop) - newop.result = v - newop.opname = 'stm_ptr_eq' + bt = BlockTransformer(stmtransformer, block) + bt.analyze_inside_block() + block_transformers[block] = bt + pending.add(bt) - if stmtransformer.collect_analyzer.analyze(op): - # this operation can collect: we bring all 'W' - # categories back to 'R', because we would need - # another stm_write_barrier on them afterwards - for v, cat in category.items(): - if cat == 'W': - category[v] = 'R' + while pending: + # XXX sadly, this seems to be order-dependent. Picking the minimum + # of the blocks seems to be necessary, too, to avoid the situation + # of two blocks chasing each other around a loop :-( + bt = min(pending) + pending.remove(bt) + bt.flow_through_block(graphinfo) + pending |= bt.update_targets(block_transformers) - effectinfo = stmtransformer.write_analyzer.analyze( - op, graphinfo=graphinfo) - if effectinfo: - if effectinfo is top_set: - # this operation can perform random writes: any - # 'R'-category object falls back to 'P' because - # we would need another stm_read_barrier() - for v, cat in category.items(): - if cat == 'R': - category[v] = 'P' - else: - # the same, but only on objects of the right types - types = set([entry[1] for entry in effectinfo]) - for v in category.keys(): - if v.concretetype in types and category[v] == 'R': - category[v] = 'P' - - if op.opname in MALLOCS: - category[op.result] = 'W' - - block.operations = newoperations - # - for link in block.exits: - newoperations = [] - for i, v in enumerate(link.args): - link.args[i] = renamings_get(v) - if newoperations: - # must put them in a fresh block along the link - annotator = stmtransformer.translator.annotator - newblock = insert_empty_block(annotator, link, - newoperations) + for bt in block_transformers.values(): + bt.patch_now() From noreply at buildbot.pypy.org Wed Aug 28 15:13:46 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Wed, 28 Aug 2013 15:13:46 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: implement rgc.no_release_gil Message-ID: <20130828131346.421F81C0149@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r66392:ff994d51e7d4 Date: 2013-08-28 15:09 +0200 http://bitbucket.org/pypy/pypy/changeset/ff994d51e7d4/ Log: implement rgc.no_release_gil diff --git a/rpython/memory/gctransform/framework.py b/rpython/memory/gctransform/framework.py --- a/rpython/memory/gctransform/framework.py +++ b/rpython/memory/gctransform/framework.py @@ -46,6 +46,32 @@ return (op.opname in LL_OPERATIONS and LL_OPERATIONS[op.opname].canmallocgc) +class GilAnalyzer(graphanalyze.BoolGraphAnalyzer): + + def analyze_direct_call(self, graph, seen=None): + try: + func = graph.func + except AttributeError: + pass + else: + if getattr(func, '_gctransformer_hint_close_stack_', False): + return True + if getattr(func, '_transaction_break_', False): + return True + + return graphanalyze.BoolGraphAnalyzer.analyze_direct_call(self, graph, + seen) + def analyze_external_call(self, op, seen=None): + funcobj = op.args[0].value._obj + if getattr(funcobj, 'transactionsafe', False): + return False + else: + return False + + def analyze_simple_operation(self, op, graphinfo): + return False + + def find_initializing_stores(collect_analyzer, graph): from rpython.flowspace.model import mkentrymap entrymap = mkentrymap(graph) @@ -251,6 +277,9 @@ self.collect_analyzer = CollectAnalyzer(self.translator) self.collect_analyzer.analyze_all() + self.gil_analyzer = GilAnalyzer(self.translator) + self.gil_analyzer.analyze_all() + s_gc = self.translator.annotator.bookkeeper.valueoftype(GCClass) r_gc = self.translator.rtyper.getrepr(s_gc) self.c_const_gc = rmodel.inputconst(r_gc, self.gcdata.gc) @@ -639,6 +668,27 @@ # causes it to return True raise Exception("'no_collect' function can trigger collection:" " %s\n%s" % (func, err.getvalue())) + + if func and getattr(func, '_no_release_gil_', False): + if self.gil_analyzer.analyze_direct_call(graph): + # 'no_release_gil' function can release the gil + import cStringIO + err = cStringIO.StringIO() + import sys + prev = sys.stdout + try: + sys.stdout = err + ca = GilAnalyzer(self.translator) + ca.verbose = True + ca.analyze_direct_call(graph) # print the "traceback" here + sys.stdout = prev + except: + sys.stdout = prev + # ^^^ for the dump of which operation in which graph actually + # causes it to return True + raise Exception("'no_release_gil' function can release the GIL:" + " %s\n%s" % (func, err.getvalue())) + if self.write_barrier_ptr: self.clean_sets = ( diff --git a/rpython/memory/gctransform/test/test_framework.py b/rpython/memory/gctransform/test/test_framework.py --- a/rpython/memory/gctransform/test/test_framework.py +++ b/rpython/memory/gctransform/test/test_framework.py @@ -5,7 +5,7 @@ from rpython.rtyper.lltypesystem.lloperation import llop from rpython.memory.gc.semispace import SemiSpaceGC from rpython.memory.gctransform.framework import (CollectAnalyzer, - find_initializing_stores, find_clean_setarrayitems) + find_initializing_stores, find_clean_setarrayitems, GilAnalyzer) from rpython.memory.gctransform.shadowstack import ( ShadowStackFrameworkGCTransformer) from rpython.memory.gctransform.test.test_transform import rtype @@ -100,6 +100,21 @@ gg = graphof(t, g) assert CollectAnalyzer(t).analyze_direct_call(gg) +def test_canrelease_external(): + for ths in ['auto', True, False]: + for sbxs in [True, False]: + fext = rffi.llexternal('fext2', [], lltype.Void, + threadsafe=ths, sandboxsafe=sbxs) + def g(): + fext() + t = rtype(g, []) + gg = graphof(t, g) + + releases = (ths == 'auto' and not sbxs) or ths is True + assert releases == GilAnalyzer(t).analyze_direct_call(gg) + return + + def test_no_collect(gc="minimark"): from rpython.rlib import rgc from rpython.translator.c.genc import CStandaloneBuilder @@ -125,6 +140,60 @@ def test_no_collect_stm(): test_no_collect("stmgc") +def test_no_release_gil(gc="minimark"): + from rpython.rlib import rgc + from rpython.translator.c.genc import CStandaloneBuilder + + @rgc.no_release_gil + def g(): + return 1 + + assert g._dont_inline_ + assert g._no_release_gil_ + + def entrypoint(argv): + return g() + 2 + + t = rtype(entrypoint, [s_list_of_strings]) + if gc == "stmgc": + t.config.translation.stm = True + t.config.translation.gc = gc + cbuild = CStandaloneBuilder(t, entrypoint, t.config, + gcpolicy=FrameworkGcPolicy2) + db = cbuild.generate_graphs_for_llinterp() + +def test_no_release_gil_stm(): + test_no_release_gil("stmgc") + +def test_no_release_gil_detect(gc="minimark"): + from rpython.rlib import rgc + from rpython.translator.c.genc import CStandaloneBuilder + + fext1 = rffi.llexternal('fext1', [], lltype.Void, threadsafe=True) + @rgc.no_release_gil + def g(): + fext1() + return 1 + + assert g._dont_inline_ + assert g._no_release_gil_ + + def entrypoint(argv): + return g() + 2 + + t = rtype(entrypoint, [s_list_of_strings]) + if gc == "stmgc": + t.config.translation.stm = True + t.config.translation.gc = gc + cbuild = CStandaloneBuilder(t, entrypoint, t.config, + gcpolicy=FrameworkGcPolicy2) + f = py.test.raises(Exception, cbuild.generate_graphs_for_llinterp) + expected = "'no_release_gil' function can release the GIL: Author: Remi Meier Branch: stmgc-c4 Changeset: r66393:d7a1bf94c7a6 Date: 2013-08-28 15:10 +0200 http://bitbucket.org/pypy/pypy/changeset/d7a1bf94c7a6/ Log: threadlocal_base() should not be threadsafe (and release the gil) diff --git a/rpython/jit/backend/x86/stmtlocal.py b/rpython/jit/backend/x86/stmtlocal.py --- a/rpython/jit/backend/x86/stmtlocal.py +++ b/rpython/jit/backend/x86/stmtlocal.py @@ -22,7 +22,9 @@ threadlocal_base = rffi.llexternal( 'pypy__threadlocal_base', [], lltype.Signed, - compilation_info=eci) + compilation_info=eci, + threadsafe=False, + transactionsafe=True) def tl_segment_prefix(mc): From noreply at buildbot.pypy.org Wed Aug 28 15:13:48 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Wed, 28 Aug 2013 15:13:48 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: import stmgc Message-ID: <20130828131348.E6B291C0149@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r66394:5fb41d870c59 Date: 2013-08-28 15:11 +0200 http://bitbucket.org/pypy/pypy/changeset/5fb41d870c59/ Log: import stmgc diff --git a/rpython/translator/stm/src_stm/et.c b/rpython/translator/stm/src_stm/et.c --- a/rpython/translator/stm/src_stm/et.c +++ b/rpython/translator/stm/src_stm/et.c @@ -589,6 +589,7 @@ assert(!(L->h_tid & GCFLAG_STUB)); assert(!(L->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED)); L->h_tid &= ~(GCFLAG_VISITED | + GCFLAG_MARKED | GCFLAG_PUBLIC | GCFLAG_PREBUILT_ORIGINAL | GCFLAG_PUBLIC_TO_PRIVATE | diff --git a/rpython/translator/stm/src_stm/extra.c b/rpython/translator/stm/src_stm/extra.c --- a/rpython/translator/stm/src_stm/extra.c +++ b/rpython/translator/stm/src_stm/extra.c @@ -58,6 +58,8 @@ stub->h_original = (revision_t)obj; } + STUB_THREAD(stub) = d->public_descriptor; + result = (intptr_t)stub; spinlock_release(d->public_descriptor->collection_lock); stm_register_integer_address(result); diff --git a/rpython/translator/stm/src_stm/revision b/rpython/translator/stm/src_stm/revision --- a/rpython/translator/stm/src_stm/revision +++ b/rpython/translator/stm/src_stm/revision @@ -1,1 +1,1 @@ -cdd017855adc+ +aa8b51f1033d+ From noreply at buildbot.pypy.org Wed Aug 28 15:13:50 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Wed, 28 Aug 2013 15:13:50 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: in-progress fixes to test_stm_integration Message-ID: <20130828131350.1D2631C0149@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r66395:671127330eac Date: 2013-08-28 15:12 +0200 http://bitbucket.org/pypy/pypy/changeset/671127330eac/ Log: in-progress fixes to test_stm_integration diff --git a/rpython/jit/backend/x86/test/test_stm_integration.py b/rpython/jit/backend/x86/test/test_stm_integration.py --- a/rpython/jit/backend/x86/test/test_stm_integration.py +++ b/rpython/jit/backend/x86/test/test_stm_integration.py @@ -191,7 +191,7 @@ print "malloc:", size, tid if size > sys.maxint / 2: # for testing exception - return lltype.nullptr(llmemory.GCREF.TO) + raise Exception() entries = size + StmGC.GCHDRSIZE TP = rffi.CArray(lltype.Char) @@ -740,7 +740,7 @@ looptoken = JitCellToken() looptoken.outermost_jitdriver_sd = FakeJitDriverSD() c_loop = cpu.compile_loop(inputargs, ops, looptoken) - + print "\n".join(map(str,c_loop[1])) ARGS = [lltype.Signed] * 10 RES = lltype.Signed @@ -753,18 +753,23 @@ not_forced = ResOperation(rop.GUARD_NOT_FORCED, [], None, descr=BasicFailDescr(1)) not_forced.setfailargs([]) + no_exception = ResOperation(rop.GUARD_NO_EXCEPTION, [], None, + descr=BasicFailDescr(2)) + no_exception.setfailargs([]) ops = [ResOperation(rop.CALL_ASSEMBLER, [i1], i2, descr=looptoken), not_forced, + no_exception, ResOperation(rop.FINISH, [i1], None, descr=finaldescr), ] othertoken = JitCellToken() cpu.done_with_this_frame_descr_int = BasicFinalDescr() - loop = cpu.compile_loop([], ops, othertoken) + c_loop = cpu.compile_loop([], ops, othertoken) + print "\n".join(map(str,c_loop[1])) deadframe = cpu.execute_token(othertoken) frame = rffi.cast(JITFRAMEPTR, deadframe) - frame_adr = rffi.cast(lltype.Signed, frame.jf_descr) - assert frame_adr != id(finaldescr) + descr = rffi.cast(lltype.Signed, frame.jf_descr) + assert descr != id(finaldescr) def test_write_barrier_on_spilled(self): From noreply at buildbot.pypy.org Wed Aug 28 16:01:21 2013 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 28 Aug 2013 16:01:21 +0200 (CEST) Subject: [pypy-commit] pypy jitframe-offset: (fijal, antocuni, rguillebert) Small refactoring in the x86 backend Message-ID: <20130828140121.268981C07BB@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: jitframe-offset Changeset: r66396:0868aba1db4d Date: 2013-08-28 14:59 +0100 http://bitbucket.org/pypy/pypy/changeset/0868aba1db4d/ Log: (fijal, antocuni, rguillebert) Small refactoring in the x86 backend to have one way to read the position in jitframe diff --git a/rpython/jit/backend/arm/locations.py b/rpython/jit/backend/arm/locations.py --- a/rpython/jit/backend/arm/locations.py +++ b/rpython/jit/backend/arm/locations.py @@ -33,6 +33,9 @@ def get_position(self): raise NotImplementedError # only for stack + def get_jitframe_position(self): + raise NotImplementedError + class RegisterLocation(AssemblerLocation): _immutable_ = True width = WORD diff --git a/rpython/jit/backend/arm/opassembler.py b/rpython/jit/backend/arm/opassembler.py --- a/rpython/jit/backend/arm/opassembler.py +++ b/rpython/jit/backend/arm/opassembler.py @@ -46,6 +46,26 @@ self.fcond = fcond self.offset = offset + def compute_gcmap(self, gcmap, failargs, fail_locs, frame_depth): + # note that this is an old version that should not be here in + # the first place. Implement get_jitframe_position on ARM locations + # in order to make it work, then kill this function + input_i = 0 + for i in range(len(failargs)): + arg = failargs[i] + if arg is None: + continue + loc = fail_locs[input_i] + input_i += 1 + if arg.type == REF: + loc = fail_locs[i] + if loc.is_core_reg(): + val = self.cpu.all_reg_indexes[loc.value] + else: + val = loc.get_position() + self.cpu.JITFRAME_FIXED_SIZE + gcmap[val // WORD // 8] |= r_uint(1) << (val % (WORD * 8)) + return gcmap + class ResOpAssembler(BaseAssembler): diff --git a/rpython/jit/backend/llsupport/assembler.py b/rpython/jit/backend/llsupport/assembler.py --- a/rpython/jit/backend/llsupport/assembler.py +++ b/rpython/jit/backend/llsupport/assembler.py @@ -36,22 +36,12 @@ self.is_guard_not_forced = is_guard_not_forced def compute_gcmap(self, gcmap, failargs, fail_locs, frame_depth): - # note that regalloc has a very similar compute, but - # one that does iteration over all bindings, so slightly different, - # eh - input_i = 0 for i in range(len(failargs)): arg = failargs[i] if arg is None: continue - loc = fail_locs[input_i] - input_i += 1 if arg.type == REF: - loc = fail_locs[i] - if loc.is_core_reg(): - val = self.cpu.all_reg_indexes[loc.value] - else: - val = loc.get_position() + self.cpu.JITFRAME_FIXED_SIZE + val = fail_locs[i].get_jitframe_position() gcmap[val // WORD // 8] |= r_uint(1) << (val % (WORD * 8)) return gcmap diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -1772,17 +1772,13 @@ regs = gpr_reg_mgr_cls.all_regs for gpr in regs: if gpr not in ignored_regs: - v = gpr_reg_mgr_cls.all_reg_indexes[gpr.value] + v = gpr.get_jitframe_position() mc.MOV_br(v * WORD + base_ofs, gpr.value) if withfloats: - if IS_X86_64: - coeff = 1 - else: - coeff = 2 # Push all XMM regs - ofs = len(gpr_reg_mgr_cls.all_regs) - for i in range(len(xmm_reg_mgr_cls.all_regs)): - mc.MOVSD_bx((ofs + i * coeff) * WORD + base_ofs, i) + for reg in xmm_reg_mgr_cls.all_regs: + v = reg.get_jitframe_position() + mc.MOVSD_bx(v * WORD + base_ofs, reg.value) def _pop_all_regs_from_frame(self, mc, ignored_regs, withfloats, callee_only=False): diff --git a/rpython/jit/backend/x86/regalloc.py b/rpython/jit/backend/x86/regalloc.py --- a/rpython/jit/backend/x86/regalloc.py +++ b/rpython/jit/backend/x86/regalloc.py @@ -899,12 +899,12 @@ if box.type == REF and self.rm.is_still_alive(box): assert not noregs assert isinstance(loc, RegLoc) - val = gpr_reg_mgr_cls.all_reg_indexes[loc.value] + val = loc.get_jitframe_position() gcmap[val // WORD // 8] |= r_uint(1) << (val % (WORD * 8)) for box, loc in self.fm.bindings.iteritems(): if box.type == REF and self.rm.is_still_alive(box): assert isinstance(loc, FrameLoc) - val = loc.position + JITFRAME_FIXED_SIZE + val = loc.get_jitframe_position() gcmap[val // WORD // 8] |= r_uint(1) << (val % (WORD * 8)) return gcmap diff --git a/rpython/jit/backend/x86/regloc.py b/rpython/jit/backend/x86/regloc.py --- a/rpython/jit/backend/x86/regloc.py +++ b/rpython/jit/backend/x86/regloc.py @@ -1,5 +1,6 @@ from rpython.jit.metainterp.history import ConstInt from rpython.jit.backend.x86 import rx86 +from rpython.jit.backend.x86.arch import JITFRAME_FIXED_SIZE from rpython.rlib.unroll import unrolling_iterable from rpython.jit.backend.x86.arch import WORD, IS_X86_32, IS_X86_64 from rpython.tool.sourcetools import func_with_new_name @@ -51,6 +52,9 @@ def get_position(self): raise NotImplementedError # only for stack + def get_jitframe_position(self): + raise NotImplementedError + class RawEbpLoc(AssemblerLocation): """ The same as stack location, but does not know it's position. Mostly usable for raw frame access @@ -112,7 +116,7 @@ class FrameLoc(RawEbpLoc): _immutable_ = True - + def __init__(self, position, ebp_offset, type): # _getregkey() returns self.value; the value returned must not # conflict with RegLoc._getregkey(). It doesn't a bit by chance, @@ -128,6 +132,9 @@ def get_position(self): return self.position + def get_jitframe_position(self): + return self.position + JITFRAME_FIXED_SIZE + class RegLoc(AssemblerLocation): _immutable_ = True def __init__(self, regnum, is_xmm): @@ -172,6 +179,18 @@ def is_core_reg(self): return True + def get_jitframe_position(self): + from rpython.jit.backend.x86 import regalloc + + if self.is_xmm: + ofs = len(regalloc.gpr_reg_mgr_cls.all_regs) + if IS_X86_64: + return ofs + self.value + else: + return ofs + 2 * self.value + else: + return regalloc.gpr_reg_mgr_cls.all_reg_indexes[self.value] + class ImmediateAssemblerLocation(AssemblerLocation): _immutable_ = True @@ -342,7 +361,7 @@ # we actually do: # mov r11, 0xDEADBEEFDEADBEEF # mov rax, [r11] -# +# # NB: You can use the scratch register as a temporary register in # assembler.py, but care must be taken when doing so. A call to a method in # LocationCodeBuilder could clobber the scratch register when certain @@ -638,7 +657,7 @@ CVTTSD2SI = _binaryop('CVTTSD2SI') CVTSD2SS = _binaryop('CVTSD2SS') CVTSS2SD = _binaryop('CVTSS2SD') - + SQRTSD = _binaryop('SQRTSD') ANDPD = _binaryop('ANDPD') From noreply at buildbot.pypy.org Wed Aug 28 16:34:11 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Wed, 28 Aug 2013 16:34:11 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: add support for instantiate_call to rgc.no_release_gil Message-ID: <20130828143411.8AF651C1147@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r66397:a47c7b9b052e Date: 2013-08-28 16:31 +0200 http://bitbucket.org/pypy/pypy/changeset/a47c7b9b052e/ Log: add support for instantiate_call to rgc.no_release_gil diff --git a/rpython/memory/gctransform/framework.py b/rpython/memory/gctransform/framework.py --- a/rpython/memory/gctransform/framework.py +++ b/rpython/memory/gctransform/framework.py @@ -59,8 +59,9 @@ if getattr(func, '_transaction_break_', False): return True - return graphanalyze.BoolGraphAnalyzer.analyze_direct_call(self, graph, - seen) + return graphanalyze.BoolGraphAnalyzer.analyze_direct_call( + self, graph, seen) + def analyze_external_call(self, op, seen=None): funcobj = op.args[0].value._obj if getattr(funcobj, 'transactionsafe', False): @@ -68,6 +69,9 @@ else: return False + def analyze_instantiate_call(self, seen=None): + return False + def analyze_simple_operation(self, op, graphinfo): return False diff --git a/rpython/memory/gctransform/test/test_framework.py b/rpython/memory/gctransform/test/test_framework.py --- a/rpython/memory/gctransform/test/test_framework.py +++ b/rpython/memory/gctransform/test/test_framework.py @@ -112,7 +112,22 @@ releases = (ths == 'auto' and not sbxs) or ths is True assert releases == GilAnalyzer(t).analyze_direct_call(gg) - return + +def test_canrelease_instantiate(): + class O: + pass + class A(O): + pass + class B(O): + pass + + classes = [A, B] + def g(i): + classes[i]() + + t = rtype(g, [int]) + gg = graphof(t, g) + assert not GilAnalyzer(t).analyze_direct_call(gg) def test_no_collect(gc="minimark"): diff --git a/rpython/translator/backendopt/graphanalyze.py b/rpython/translator/backendopt/graphanalyze.py --- a/rpython/translator/backendopt/graphanalyze.py +++ b/rpython/translator/backendopt/graphanalyze.py @@ -1,6 +1,6 @@ from rpython.translator.simplify import get_graph, get_funcobj from rpython.tool.algo.unionfind import UnionFind -from rpython.rtyper.lltypesystem import lltype +from rpython.rtyper.lltypesystem import lltype, rclass class GraphAnalyzer(object): @@ -64,6 +64,9 @@ result, self.analyze_direct_call(graph, seen)) return result + def analyze_instantiate_call(self, seen=None): + return self.top_result() + def analyze_external_method(self, op, TYPE, meth): return self.top_result() @@ -75,7 +78,7 @@ def compute_graph_info(self, graph): return None - def analyze(self, op, seen=None, graphinfo=None): + def analyze(self, op, seen=None, graphinfo=None, block=None): if op.opname == "direct_call": try: graph = get_graph(op.args[0], self.translator) @@ -96,6 +99,18 @@ elif op.opname == "indirect_call": graphs = op.args[-1].value if graphs is None: + if block is not None: + v_func = op.args[0] + for op1 in block.operations: + if (v_func is op1.result and + op1.opname == 'getfield' and + op1.args[0].concretetype == rclass.CLASSTYPE and + op1.args[1].value == 'instantiate'): + x = self.analyze_instantiate_call(seen) + if self.verbose and x: + self.dump_info('analyze_instantiate(%s): %r' % ( + graphs, x)) + return x if self.verbose: self.dump_info('%s to unknown' % (op,)) return self.top_result() @@ -141,7 +156,7 @@ for op in block.operations: result = self.add_to_result( result, - self.analyze(op, seen, graphinfo) + self.analyze(op, seen, graphinfo, block=block) ) if self.is_top_result(result): break @@ -179,7 +194,7 @@ graphs = self.translator.graphs for graph in graphs: for block, op in graph.iterblockops(): - self.analyze(op) + self.analyze(op, block=block) class Dependency(object): From noreply at buildbot.pypy.org Wed Aug 28 16:34:12 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Wed, 28 Aug 2013 16:34:12 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: add rgc.no_release_gil to places that may access the prebuilt assembler object Message-ID: <20130828143412.D31031C1147@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r66398:c7cebe44256d Date: 2013-08-28 16:33 +0200 http://bitbucket.org/pypy/pypy/changeset/c7cebe44256d/ Log: add rgc.no_release_gil to places that may access the prebuilt assembler object diff --git a/rpython/jit/backend/arm/assembler.py b/rpython/jit/backend/arm/assembler.py --- a/rpython/jit/backend/arm/assembler.py +++ b/rpython/jit/backend/arm/assembler.py @@ -29,6 +29,7 @@ from rpython.rtyper.annlowlevel import llhelper, cast_instance_to_gcref from rpython.rtyper.lltypesystem import lltype, rffi from rpython.jit.backend.arm import callbuilder +from rpython.rtyper.lltypesystem.lloperation import llop class AssemblerARM(ResOpAssembler): @@ -1463,7 +1464,9 @@ def not_implemented(msg): - os.write(2, '[ARM/asm] %s\n' % msg) + msg = '[ARM/asm] %s\n' % msg + if we_are_translated(): + llop.debug_print(lltype.Void, msg) raise NotImplementedError(msg) diff --git a/rpython/jit/backend/llsupport/regalloc.py b/rpython/jit/backend/llsupport/regalloc.py --- a/rpython/jit/backend/llsupport/regalloc.py +++ b/rpython/jit/backend/llsupport/regalloc.py @@ -2,6 +2,8 @@ from rpython.jit.metainterp.history import Const, Box, REF, JitCellToken from rpython.rlib.objectmodel import we_are_translated, specialize from rpython.jit.metainterp.resoperation import rop +from rpython.rtyper.lltypesystem import lltype +from rpython.rtyper.lltypesystem.lloperation import llop try: from collections import OrderedDict @@ -759,5 +761,7 @@ def not_implemented(msg): - os.write(2, '[llsupport/regalloc] %s\n' % msg) + msg = '[llsupport/regalloc] %s\n' % msg + if we_are_translated(): + llop.debug_print(lltype.Void, msg) raise NotImplementedError(msg) diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -512,6 +512,7 @@ else: descr.set_b_slowpath(withcards + 2 * withfloats, rawstart) + @rgc.no_release_gil def assemble_loop(self, loopname, inputargs, operations, looptoken, log, logger=None): '''adds the following attributes to looptoken: @@ -591,6 +592,7 @@ return AsmInfo(ops_offset, rawstart + looppos, size_excluding_failure_stuff - looppos), operations + @rgc.no_release_gil def assemble_bridge(self, faildescr, inputargs, operations, original_loop_token, log, logger=None): if not we_are_translated(): @@ -2818,7 +2820,9 @@ return AddressLoc(ImmedLoc(addr), imm0, 0, 0) def not_implemented(msg): - os.write(2, '[x86/asm] %s\n' % msg) + msg = '[x86/asm] %s\n' % msg + if we_are_translated(): + llop.debug_print(lltype.Void, msg) raise NotImplementedError(msg) class BridgeAlreadyCompiled(Exception): diff --git a/rpython/jit/backend/x86/regalloc.py b/rpython/jit/backend/x86/regalloc.py --- a/rpython/jit/backend/x86/regalloc.py +++ b/rpython/jit/backend/x86/regalloc.py @@ -29,6 +29,7 @@ from rpython.jit.backend.x86.arch import IS_X86_32, IS_X86_64 from rpython.jit.backend.x86 import rx86 from rpython.rlib.rarithmetic import r_longlong, r_uint +from rpython.rtyper.lltypesystem.lloperation import llop class X86RegisterManager(RegisterManager): @@ -1390,7 +1391,9 @@ return base_ofs + WORD * (position + JITFRAME_FIXED_SIZE) def not_implemented(msg): - os.write(2, '[x86/regalloc] %s\n' % msg) + msg = '[x86/regalloc] %s\n' % msg + if we_are_translated(): + llop.debug_print(lltype.Void, msg) raise NotImplementedError(msg) # xxx hack: set a default value for TargetToken._ll_loop_code. diff --git a/rpython/jit/backend/x86/runner.py b/rpython/jit/backend/x86/runner.py --- a/rpython/jit/backend/x86/runner.py +++ b/rpython/jit/backend/x86/runner.py @@ -1,6 +1,7 @@ import py from rpython.rtyper.lltypesystem import lltype, llmemory, rffi from rpython.rlib.jit_hooks import LOOP_RUN_CONTAINER +from rpython.rlib import rgc from rpython.jit.backend.x86.assembler import Assembler386 from rpython.jit.backend.x86.regalloc import gpr_reg_mgr_cls, xmm_reg_mgr_cls from rpython.jit.backend.x86.profagent import ProfileAgent @@ -65,10 +66,12 @@ assert self.assembler is not None return RegAlloc(self.assembler, False) + @rgc.no_release_gil def setup_once(self): self.profile_agent.startup() self.assembler.setup_once() + @rgc.no_release_gil def finish_once(self): self.assembler.finish_once() self.profile_agent.shutdown() From noreply at buildbot.pypy.org Wed Aug 28 16:59:44 2013 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Wed, 28 Aug 2013 16:59:44 +0200 (CEST) Subject: [pypy-commit] pypy refactor-buffer-api: Split pypy.interpreter.buffer.Buffer into an interp-level part and an app-level wrapper. Message-ID: <20130828145944.DD7611C0149@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: refactor-buffer-api Changeset: r66399:072e215b9c7b Date: 2013-08-28 16:00 +0100 http://bitbucket.org/pypy/pypy/changeset/072e215b9c7b/ Log: Split pypy.interpreter.buffer.Buffer into an interp-level part and an app-level wrapper. diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -196,6 +196,9 @@ def immutable_unique_id(self, space): return None + def buffer_w(self, space): + self._typed_unwrap_error(space, "buffer") + def str_w(self, space): self._typed_unwrap_error(space, "string") @@ -1256,10 +1259,7 @@ 'to unsigned int')) def buffer_w(self, w_obj): - # returns a Buffer instance - from pypy.interpreter.buffer import Buffer - w_buffer = self.buffer(w_obj) - return self.interp_w(Buffer, w_buffer) + return w_obj.buffer_w(self) def rwbuffer_w(self, w_obj): # returns a RWBuffer instance @@ -1598,7 +1598,6 @@ ('set', 'set', 3, ['__set__']), ('delete', 'delete', 2, ['__delete__']), ('userdel', 'del', 1, ['__del__']), - ('buffer', 'buffer', 1, ['__buffer__']), # see buffer.py ] ObjSpace.BuiltinModuleTable = [ diff --git a/pypy/interpreter/buffer.py b/pypy/interpreter/buffer.py --- a/pypy/interpreter/buffer.py +++ b/pypy/interpreter/buffer.py @@ -1,30 +1,12 @@ """ Buffer protocol support. """ +from pypy.interpreter.error import OperationError +from rpython.rlib.objectmodel import import_from_mixin -# The implementation of the buffer protocol. The basic idea is that we -# can ask any app-level object for a 'buffer' view on it, by calling its -# __buffer__() special method. It should return a wrapped instance of a -# subclass of the Buffer class defined below. Note that __buffer__() is -# a PyPy-only extension to the Python language, made necessary by the -# fact that it's not natural in PyPy to hack an interp-level-only -# interface. -# In normal usage, the convenience method space.buffer_w() should be -# used to get directly a Buffer instance. Doing so also gives you for -# free the typecheck that __buffer__() really returned a wrapped Buffer. - -import operator -from pypy.interpreter.baseobjspace import W_Root -from pypy.interpreter.typedef import TypeDef -from pypy.interpreter.gateway import interp2app, unwrap_spec -from pypy.interpreter.error import OperationError -from rpython.rlib.objectmodel import compute_hash, import_from_mixin -from rpython.rlib.rstring import StringBuilder - - -class Buffer(W_Root): - """Abstract base class for memory views.""" +class Buffer(object): + """Abstract base class for buffers.""" __slots__ = () # no extra slot here @@ -47,91 +29,9 @@ def get_raw_address(self): raise ValueError("no raw buffer") - # __________ app-level support __________ - - def descr_len(self, space): - return space.wrap(self.getlength()) - - def descr_getitem(self, space, w_index): - start, stop, step, size = space.decode_index4(w_index, self.getlength()) - if step == 0: # index only - return space.wrap(self.getitem(start)) - res = self.getslice(start, stop, step, size) - return space.wrap(res) - - @unwrap_spec(newstring='bufferstr') - def descr_setitem(self, space, w_index, newstring): - if not isinstance(self, RWBuffer): - raise OperationError(space.w_TypeError, - space.wrap("buffer is read-only")) - start, stop, step, size = space.decode_index4(w_index, self.getlength()) - if step == 0: # index only - if len(newstring) != 1: - msg = 'buffer[index]=x: x must be a single character' - raise OperationError(space.w_TypeError, space.wrap(msg)) - char = newstring[0] # annotator hint - self.setitem(start, char) - elif step == 1: - if len(newstring) != size: - msg = "right operand length must match slice length" - raise OperationError(space.w_ValueError, space.wrap(msg)) - self.setslice(start, newstring) - else: - raise OperationError(space.w_ValueError, - space.wrap("buffer object does not support" - " slicing with a step")) - - def descr__buffer__(self, space): - return space.wrap(self) - - def descr_str(self, space): - return space.wrap(self.as_str()) - - @unwrap_spec(other='bufferstr') - def descr_add(self, space, other): - return space.wrap(self.as_str() + other) - - def _make_descr__cmp(name): - def descr__cmp(self, space, w_other): - if not isinstance(w_other, Buffer): - return space.w_NotImplemented - # xxx not the most efficient implementation - str1 = self.as_str() - str2 = w_other.as_str() - return space.wrap(getattr(operator, name)(str1, str2)) - descr__cmp.func_name = name - return descr__cmp - - descr_eq = _make_descr__cmp('eq') - descr_ne = _make_descr__cmp('ne') - descr_lt = _make_descr__cmp('lt') - descr_le = _make_descr__cmp('le') - descr_gt = _make_descr__cmp('gt') - descr_ge = _make_descr__cmp('ge') - - def descr_hash(self, space): - return space.wrap(compute_hash(self.as_str())) - - def descr_mul(self, space, w_times): - # xxx not the most efficient implementation - w_string = space.wrap(self.as_str()) - # use the __mul__ method instead of space.mul() so that we - # return NotImplemented instead of raising a TypeError - return space.call_method(w_string, '__mul__', w_times) - - def descr_repr(self, space): - if isinstance(self, RWBuffer): - info = 'read-write buffer' - else: - info = 'read-only buffer' - addrstring = self.getaddrstring(space) - - return space.wrap("<%s for 0x%s, size %d>" % - (info, addrstring, self.getlength())) - class RWBuffer(Buffer): - """Abstract base class for read-write memory views.""" + """Abstract base class for read-write buffers.""" __slots__ = () # no extra slot here @@ -145,72 +45,6 @@ self.setitem(start + i, string[i]) - at unwrap_spec(offset=int, size=int) -def descr_buffer__new__(space, w_subtype, w_object, offset=0, size=-1): - # w_subtype can only be exactly 'buffer' for now - if not space.is_w(w_subtype, space.gettypefor(Buffer)): - raise OperationError(space.w_TypeError, - space.wrap("argument 1 must be 'buffer'")) - - if space.isinstance_w(w_object, space.w_unicode): - # unicode objects support the old buffer interface - # but not the new buffer interface (change in python 2.7) - from rpython.rlib.rstruct.unichar import pack_unichar, UNICODE_SIZE - unistr = space.unicode_w(w_object) - builder = StringBuilder(len(unistr) * UNICODE_SIZE) - for unich in unistr: - pack_unichar(unich, builder) - from pypy.interpreter.buffer import StringBuffer - w_buffer = space.wrap(StringBuffer(builder.build())) - else: - w_buffer = space.buffer(w_object) - - buffer = space.interp_w(Buffer, w_buffer) # type-check - if offset == 0 and size == -1: - return w_buffer - # handle buffer slices - if offset < 0: - raise OperationError(space.w_ValueError, - space.wrap("offset must be zero or positive")) - if size < -1: - raise OperationError(space.w_ValueError, - space.wrap("size must be zero or positive")) - if isinstance(buffer, RWBuffer): - buffer = RWSubBuffer(buffer, offset, size) - else: - buffer = SubBuffer(buffer, offset, size) - return space.wrap(buffer) - - -Buffer.typedef = TypeDef( - "buffer", - __doc__ = """\ -buffer(object [, offset[, size]]) - -Create a new buffer object which references the given object. -The buffer will reference a slice of the target object from the -start of the object (or at the specified offset). The slice will -extend to the end of the target object (or with the specified size). -""", - __new__ = interp2app(descr_buffer__new__), - __len__ = interp2app(Buffer.descr_len), - __getitem__ = interp2app(Buffer.descr_getitem), - __setitem__ = interp2app(Buffer.descr_setitem), - __buffer__ = interp2app(Buffer.descr__buffer__), - __str__ = interp2app(Buffer.descr_str), - __add__ = interp2app(Buffer.descr_add), - __eq__ = interp2app(Buffer.descr_eq), - __ne__ = interp2app(Buffer.descr_ne), - __lt__ = interp2app(Buffer.descr_lt), - __le__ = interp2app(Buffer.descr_le), - __gt__ = interp2app(Buffer.descr_gt), - __ge__ = interp2app(Buffer.descr_ge), - __hash__ = interp2app(Buffer.descr_hash), - __mul__ = interp2app(Buffer.descr_mul), - __rmul__ = interp2app(Buffer.descr_mul), - __repr__ = interp2app(Buffer.descr_repr), -) -Buffer.typedef.acceptable_as_base_class = False # ____________________________________________________________ diff --git a/pypy/module/__builtin__/interp_memoryview.py b/pypy/module/__builtin__/interp_memoryview.py --- a/pypy/module/__builtin__/interp_memoryview.py +++ b/pypy/module/__builtin__/interp_memoryview.py @@ -1,14 +1,168 @@ """ Implementation of the 'buffer' and 'memoryview' types. """ +import operator + +from pypy.interpreter import buffer from pypy.interpreter.baseobjspace import W_Root -from pypy.interpreter import buffer +from pypy.interpreter.error import OperationError from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.typedef import TypeDef, GetSetProperty -from pypy.interpreter.error import OperationError -import operator +from rpython.rlib.objectmodel import compute_hash +from rpython.rlib.rstring import StringBuilder -W_Buffer = buffer.Buffer # actually implemented in pypy.interpreter.buffer + +def _buffer_setitem(space, buf, w_index, newstring): + start, stop, step, size = space.decode_index4(w_index, buf.getlength()) + if step == 0: # index only + if len(newstring) != 1: + msg = 'buffer[index]=x: x must be a single character' + raise OperationError(space.w_TypeError, space.wrap(msg)) + char = newstring[0] # annotator hint + buf.setitem(start, char) + elif step == 1: + if len(newstring) != size: + msg = "right operand length must match slice length" + raise OperationError(space.w_ValueError, space.wrap(msg)) + buf.setslice(start, newstring) + else: + raise OperationError(space.w_ValueError, + space.wrap("buffer object does not support" + " slicing with a step")) + + +class W_Buffer(W_Root): + """Implement the built-in 'buffer' type as a thin wrapper around + an interp-level buffer. + """ + + def __init__(self, buf): + self.buf = buf + + def buffer_w(self, space): + return self.buf + + @staticmethod + @unwrap_spec(offset=int, size=int) + def descr_new(space, w_subtype, w_object, offset=0, size=-1): + if space.isinstance_w(w_object, space.w_unicode): + # unicode objects support the old buffer interface + # but not the new buffer interface (change in python 2.7) + from rpython.rlib.rstruct.unichar import pack_unichar, UNICODE_SIZE + unistr = space.unicode_w(w_object) + builder = StringBuilder(len(unistr) * UNICODE_SIZE) + for unich in unistr: + pack_unichar(unich, builder) + from pypy.interpreter.buffer import StringBuffer + buf = StringBuffer(builder.build()) + else: + buf = space.buffer_w(w_object) + + if offset == 0 and size == -1: + return W_Buffer(buf) + # handle buffer slices + if offset < 0: + raise OperationError(space.w_ValueError, + space.wrap("offset must be zero or positive")) + if size < -1: + raise OperationError(space.w_ValueError, + space.wrap("size must be zero or positive")) + if isinstance(buf, buffer.RWBuffer): + buf = buffer.RWSubBuffer(buf, offset, size) + else: + buf = buffer.SubBuffer(buf, offset, size) + return W_Buffer(buf) + + def descr_len(self, space): + return space.wrap(self.buf.getlength()) + + def descr_getitem(self, space, w_index): + start, stop, step, size = space.decode_index4(w_index, self.buf.getlength()) + if step == 0: # index only + return space.wrap(self.buf.getitem(start)) + res = self.buf.getslice(start, stop, step, size) + return space.wrap(res) + + @unwrap_spec(newstring='bufferstr') + def descr_setitem(self, space, w_index, newstring): + if not isinstance(self.buf, buffer.RWBuffer): + raise OperationError(space.w_TypeError, + space.wrap("buffer is read-only")) + _buffer_setitem(space, self.buf, w_index, newstring) + + def descr_str(self, space): + return space.wrap(self.buf.as_str()) + + @unwrap_spec(other='bufferstr') + def descr_add(self, space, other): + return space.wrap(self.buf.as_str() + other) + + def _make_descr__cmp(name): + def descr__cmp(self, space, w_other): + if not isinstance(w_other, W_Buffer): + return space.w_NotImplemented + # xxx not the most efficient implementation + str1 = self.buf.as_str() + str2 = w_other.buf.as_str() + return space.wrap(getattr(operator, name)(str1, str2)) + descr__cmp.func_name = name + return descr__cmp + + descr_eq = _make_descr__cmp('eq') + descr_ne = _make_descr__cmp('ne') + descr_lt = _make_descr__cmp('lt') + descr_le = _make_descr__cmp('le') + descr_gt = _make_descr__cmp('gt') + descr_ge = _make_descr__cmp('ge') + + def descr_hash(self, space): + return space.wrap(compute_hash(self.buf.as_str())) + + def descr_mul(self, space, w_times): + # xxx not the most efficient implementation + w_string = space.wrap(self.buf.as_str()) + # use the __mul__ method instead of space.mul() so that we + # return NotImplemented instead of raising a TypeError + return space.call_method(w_string, '__mul__', w_times) + + def descr_repr(self, space): + if isinstance(self.buf, buffer.RWBuffer): + info = 'read-write buffer' + else: + info = 'read-only buffer' + addrstring = self.getaddrstring(space) + + return space.wrap("<%s for 0x%s, size %d>" % + (info, addrstring, self.buf.getlength())) + +W_Buffer.typedef = TypeDef( + "buffer", + __doc__ = """\ +buffer(object [, offset[, size]]) + +Create a new buffer object which references the given object. +The buffer will reference a slice of the target object from the +start of the object (or at the specified offset). The slice will +extend to the end of the target object (or with the specified size). +""", + __new__ = interp2app(W_Buffer.descr_new), + __len__ = interp2app(W_Buffer.descr_len), + __getitem__ = interp2app(W_Buffer.descr_getitem), + __setitem__ = interp2app(W_Buffer.descr_setitem), + __str__ = interp2app(W_Buffer.descr_str), + __add__ = interp2app(W_Buffer.descr_add), + __eq__ = interp2app(W_Buffer.descr_eq), + __ne__ = interp2app(W_Buffer.descr_ne), + __lt__ = interp2app(W_Buffer.descr_lt), + __le__ = interp2app(W_Buffer.descr_le), + __gt__ = interp2app(W_Buffer.descr_gt), + __ge__ = interp2app(W_Buffer.descr_ge), + __hash__ = interp2app(W_Buffer.descr_hash), + __mul__ = interp2app(W_Buffer.descr_mul), + __rmul__ = interp2app(W_Buffer.descr_mul), + __repr__ = interp2app(W_Buffer.descr_repr), +) +W_Buffer.typedef.acceptable_as_base_class = False class W_MemoryView(W_Root): @@ -17,9 +171,13 @@ """ def __init__(self, buf): - assert isinstance(buf, buffer.Buffer) self.buf = buf + @staticmethod + def descr_new(space, w_subtype, w_object): + w_memoryview = W_MemoryView(space.buffer_w(w_object)) + return w_memoryview + def _make_descr__cmp(name): def descr__cmp(self, space, w_other): if isinstance(w_other, W_MemoryView): @@ -29,14 +187,14 @@ return space.wrap(getattr(operator, name)(str1, str2)) try: - w_buf = space.buffer(w_other) + buf = space.buffer_w(w_other) except OperationError, e: if not e.match(space, space.w_TypeError): raise return space.w_NotImplemented else: str1 = self.as_str() - str2 = space.buffer_w(w_buf).as_str() + str2 = buf.as_str() return space.wrap(getattr(operator, name)(str1, str2)) descr__cmp.func_name = name return descr__cmp @@ -98,15 +256,13 @@ @unwrap_spec(newstring='bufferstr') def descr_setitem(self, space, w_index, newstring): - buf = self.buf - if isinstance(buf, buffer.RWBuffer): - buf.descr_setitem(space, w_index, newstring) - else: + if not isinstance(self.buf, buffer.RWBuffer): raise OperationError(space.w_TypeError, space.wrap("cannot modify read-only memory")) + _buffer_setitem(space, self.buf, w_index, newstring) def descr_len(self, space): - return self.buf.descr_len(space) + return space.wrap(self.buf.getlength()) def w_get_format(self, space): return space.wrap("B") @@ -130,18 +286,12 @@ # I've never seen anyone filling this field return space.w_None - -def descr_new(space, w_subtype, w_object): - memoryview = W_MemoryView(space.buffer(w_object)) - return space.wrap(memoryview) - W_MemoryView.typedef = TypeDef( "memoryview", __doc__ = """\ Create a new memoryview object which references the given object. """, - __new__ = interp2app(descr_new), - __buffer__ = interp2app(W_MemoryView.descr_buffer), + __new__ = interp2app(W_MemoryView.descr_new), __eq__ = interp2app(W_MemoryView.descr_eq), __ge__ = interp2app(W_MemoryView.descr_ge), __getitem__ = interp2app(W_MemoryView.descr_getitem), diff --git a/pypy/module/_cffi_backend/cbuffer.py b/pypy/module/_cffi_backend/cbuffer.py --- a/pypy/module/_cffi_backend/cbuffer.py +++ b/pypy/module/_cffi_backend/cbuffer.py @@ -4,6 +4,7 @@ from pypy.interpreter.gateway import unwrap_spec, interp2app from pypy.interpreter.typedef import TypeDef, make_weakref_descr from pypy.module._cffi_backend import cdataobj, ctypeptr, ctypearray +from pypy.module.__builtin__.interp_memoryview import W_Buffer from rpython.rtyper.lltypesystem import rffi @@ -38,35 +39,19 @@ raw_cdata[i] = string[i] -class MiniBuffer(W_Root): - # a different subclass of W_Root for the MiniBuffer, because we - # want a slightly different (simplified) API at the level of Python. +# Override the typedef to narrow down the interface that's exposed to app-level +class MiniBuffer(W_Buffer): def __init__(self, buffer, keepalive=None): - self.buffer = buffer + W_Buffer. __init__(self, buffer) self.keepalive = keepalive - def descr_len(self, space): - return self.buffer.descr_len(space) - - def descr_getitem(self, space, w_index): - return self.buffer.descr_getitem(space, w_index) - - @unwrap_spec(newstring='bufferstr') - def descr_setitem(self, space, w_index, newstring): - self.buffer.descr_setitem(space, w_index, newstring) - - def descr__buffer__(self, space): - return self.buffer.descr__buffer__(space) - - MiniBuffer.typedef = TypeDef( "buffer", __module__ = "_cffi_backend", __len__ = interp2app(MiniBuffer.descr_len), __getitem__ = interp2app(MiniBuffer.descr_getitem), __setitem__ = interp2app(MiniBuffer.descr_setitem), - __buffer__ = interp2app(MiniBuffer.descr__buffer__), __weakref__ = make_weakref_descr(MiniBuffer), ) MiniBuffer.typedef.acceptable_as_base_class = False diff --git a/pypy/module/_rawffi/array.py b/pypy/module/_rawffi/array.py --- a/pypy/module/_rawffi/array.py +++ b/pypy/module/_rawffi/array.py @@ -206,7 +206,6 @@ __setitem__ = interp2app(W_ArrayInstance.descr_setitem), __getitem__ = interp2app(W_ArrayInstance.descr_getitem), __len__ = interp2app(W_ArrayInstance.getlength), - __buffer__ = interp2app(W_ArrayInstance.descr_buffer), buffer = GetSetProperty(W_ArrayInstance.getbuffer), shape = interp_attrproperty('shape', W_ArrayInstance), free = interp2app(W_ArrayInstance.free), @@ -230,7 +229,6 @@ __setitem__ = interp2app(W_ArrayInstance.descr_setitem), __getitem__ = interp2app(W_ArrayInstance.descr_getitem), __len__ = interp2app(W_ArrayInstance.getlength), - __buffer__ = interp2app(W_ArrayInstance.descr_buffer), buffer = GetSetProperty(W_ArrayInstance.getbuffer), shape = interp_attrproperty('shape', W_ArrayInstance), byptr = interp2app(W_ArrayInstance.byptr), diff --git a/pypy/module/_rawffi/interp_rawffi.py b/pypy/module/_rawffi/interp_rawffi.py --- a/pypy/module/_rawffi/interp_rawffi.py +++ b/pypy/module/_rawffi/interp_rawffi.py @@ -355,9 +355,9 @@ lltype.free(self.ll_buffer, flavor='raw') self.ll_buffer = lltype.nullptr(rffi.VOIDP.TO) - def descr_buffer(self, space): + def buffer_w(self, space): from pypy.module._rawffi.buffer import RawFFIBuffer - return space.wrap(RawFFIBuffer(self)) + return RawFFIBuffer(self) def getrawsize(self): raise NotImplementedError("abstract base class") diff --git a/pypy/module/_rawffi/structure.py b/pypy/module/_rawffi/structure.py --- a/pypy/module/_rawffi/structure.py +++ b/pypy/module/_rawffi/structure.py @@ -367,7 +367,6 @@ __repr__ = interp2app(W_StructureInstance.descr_repr), __getattr__ = interp2app(W_StructureInstance.getattr), __setattr__ = interp2app(W_StructureInstance.setattr), - __buffer__ = interp2app(W_StructureInstance.descr_buffer), buffer = GetSetProperty(W_StructureInstance.getbuffer), free = interp2app(W_StructureInstance.free), shape = interp_attrproperty('shape', W_StructureInstance), @@ -389,7 +388,6 @@ __repr__ = interp2app(W_StructureInstance.descr_repr), __getattr__ = interp2app(W_StructureInstance.getattr), __setattr__ = interp2app(W_StructureInstance.setattr), - __buffer__ = interp2app(W_StructureInstance.descr_buffer), buffer = GetSetProperty(W_StructureInstance.getbuffer), shape = interp_attrproperty('shape', W_StructureInstance), byptr = interp2app(W_StructureInstance.byptr), diff --git a/pypy/module/array/interp_array.py b/pypy/module/array/interp_array.py --- a/pypy/module/array/interp_array.py +++ b/pypy/module/array/interp_array.py @@ -126,6 +126,9 @@ self.len = 0 self.allocated = 0 + def buffer_w(self, space): + return ArrayBuffer(self) + def descr_append(self, space, w_x): """ append(x) @@ -454,9 +457,6 @@ # Misc methods - def descr_buffer(self, space): - return space.wrap(ArrayBuffer(self)) - def descr_repr(self, space): if self.len == 0: return space.wrap("array('%s')" % self.typecode) @@ -500,7 +500,6 @@ __radd__ = interp2app(W_ArrayBase.descr_radd), __rmul__ = interp2app(W_ArrayBase.descr_rmul), - __buffer__ = interp2app(W_ArrayBase.descr_buffer), __repr__ = interp2app(W_ArrayBase.descr_repr), itemsize = GetSetProperty(descr_itemsize), diff --git a/pypy/module/mmap/interp_mmap.py b/pypy/module/mmap/interp_mmap.py --- a/pypy/module/mmap/interp_mmap.py +++ b/pypy/module/mmap/interp_mmap.py @@ -17,6 +17,10 @@ self.space = space self.mmap = mmap_obj + def buffer_w(self, space): + self.check_valid() + return MMapBuffer(self.space, self.mmap) + def close(self): self.mmap.close() @@ -194,10 +198,6 @@ self.mmap.setitem(start, value[i]) start += step - def descr_buffer(self): - self.check_valid() - return self.space.wrap(MMapBuffer(self.space, self.mmap)) - if rmmap._POSIX: @unwrap_spec(fileno=int, length=int, flags=int, @@ -254,7 +254,6 @@ __len__ = interp2app(W_MMap.__len__), __getitem__ = interp2app(W_MMap.descr_getitem), __setitem__ = interp2app(W_MMap.descr_setitem), - __buffer__ = interp2app(W_MMap.descr_buffer), ) constants = rmmap.constants diff --git a/pypy/objspace/std/bytearrayobject.py b/pypy/objspace/std/bytearrayobject.py --- a/pypy/objspace/std/bytearrayobject.py +++ b/pypy/objspace/std/bytearrayobject.py @@ -28,6 +28,9 @@ """ representation for debugging purposes """ return "%s(%s)" % (w_self.__class__.__name__, ''.join(w_self.data)) + def buffer_w(w_self, space): + return BytearrayBuffer(w_self.data) + registerimplementation(W_BytearrayObject) init_signature = Signature(['source', 'encoding', 'errors'], None, None) @@ -708,9 +711,5 @@ def setitem(self, index, char): self.data[index] = char -def buffer__Bytearray(space, self): - b = BytearrayBuffer(self.data) - return space.wrap(b) - from pypy.objspace.std import bytearraytype register_all(vars(), bytearraytype) diff --git a/pypy/objspace/std/inttype.py b/pypy/objspace/std/inttype.py --- a/pypy/objspace/std/inttype.py +++ b/pypy/objspace/std/inttype.py @@ -107,12 +107,11 @@ else: # If object supports the buffer interface try: - w_buffer = space.buffer(w_value) + buf = space.buffer_w(w_value) except OperationError, e: if not e.match(space, space.w_TypeError): raise else: - buf = space.interp_w(Buffer, w_buffer) value, w_longval = string_to_int_or_long(space, buf.as_str()) ok = True diff --git a/pypy/objspace/std/stringobject.py b/pypy/objspace/std/stringobject.py --- a/pypy/objspace/std/stringobject.py +++ b/pypy/objspace/std/stringobject.py @@ -66,6 +66,9 @@ def str_w(w_self, space): return w_self._value + def buffer_w(w_self, space): + return StringBuffer(w_self._value) + def listview_str(w_self): return _create_list_from_string(w_self._value) @@ -958,9 +961,6 @@ formatter = newformat.str_formatter(space, spec) return formatter.format_string(w_string._value) -def buffer__String(space, w_string): - return space.wrap(StringBuffer(w_string._value)) - # register all methods from pypy.objspace.std import stringtype register_all(vars(), stringtype) From noreply at buildbot.pypy.org Wed Aug 28 17:37:27 2013 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 28 Aug 2013 17:37:27 +0200 (CEST) Subject: [pypy-commit] pypy resume-refactor: (fijal, arigo, antocuni, rguillebert) in-progress, start fighting with Message-ID: <20130828153727.C49041C0149@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: resume-refactor Changeset: r66400:f43bf844019b Date: 2013-08-28 15:03 +0100 http://bitbucket.org/pypy/pypy/changeset/f43bf844019b/ Log: (fijal, arigo, antocuni, rguillebert) in-progress, start fighting with resume diff --git a/rpython/jit/backend/llsupport/regalloc.py b/rpython/jit/backend/llsupport/regalloc.py --- a/rpython/jit/backend/llsupport/regalloc.py +++ b/rpython/jit/backend/llsupport/regalloc.py @@ -706,13 +706,6 @@ if opnum != rop.JUMP and opnum != rop.LABEL: if arg not in last_real_usage: last_real_usage[arg] = i - if op.is_guard(): - for arg in op.getfailargs(): - if arg is None: # hole - continue - assert isinstance(arg, Box) - if arg not in last_used: - last_used[arg] = i # longevity = {} for arg in produced: diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -911,15 +911,14 @@ oopspecindex = effectinfo.oopspecindex genop_math_list[oopspecindex](self, op, arglocs, resloc) - def regalloc_perform_with_guard(self, op, guard_op, faillocs, + def regalloc_perform_with_guard(self, op, guard_op, arglocs, resloc, frame_depth): faildescr = guard_op.getdescr() assert isinstance(faildescr, AbstractFailDescr) - failargs = guard_op.getfailargs() guard_opnum = guard_op.getopnum() guard_token = self.implement_guard_recovery(guard_opnum, - faildescr, failargs, - faillocs, frame_depth) + faildescr, + frame_depth) if op is None: dispatch_opnum = guard_opnum else: @@ -930,9 +929,9 @@ # must be added by the genop_guard_list[]() assert guard_token is self.pending_guard_tokens[-1] - def regalloc_perform_guard(self, guard_op, faillocs, arglocs, resloc, + def regalloc_perform_guard(self, guard_op, arglocs, resloc, frame_depth): - self.regalloc_perform_with_guard(None, guard_op, faillocs, arglocs, + self.regalloc_perform_with_guard(None, guard_op, arglocs, resloc, frame_depth) def load_effective_addr(self, sizereg, baseofs, scale, result, frm=imm0): @@ -1712,14 +1711,15 @@ # self.implement_guard(guard_token, 'NE') - def implement_guard_recovery(self, guard_opnum, faildescr, failargs, - fail_locs, frame_depth): + def implement_guard_recovery(self, guard_opnum, faildescr, + frame_depth): exc = (guard_opnum == rop.GUARD_EXCEPTION or guard_opnum == rop.GUARD_NO_EXCEPTION or guard_opnum == rop.GUARD_NOT_FORCED) is_guard_not_invalidated = guard_opnum == rop.GUARD_NOT_INVALIDATED is_guard_not_forced = guard_opnum == rop.GUARD_NOT_FORCED gcmap = allocate_gcmap(self, frame_depth, JITFRAME_FIXED_SIZE) + XXX return GuardToken(self.cpu, gcmap, faildescr, failargs, fail_locs, exc, frame_depth, is_guard_not_invalidated, is_guard_not_forced) diff --git a/rpython/jit/backend/x86/regalloc.py b/rpython/jit/backend/x86/regalloc.py --- a/rpython/jit/backend/x86/regalloc.py +++ b/rpython/jit/backend/x86/regalloc.py @@ -7,6 +7,7 @@ from rpython.jit.backend.llsupport.descr import (ArrayDescr, CallDescr, unpack_arraydescr, unpack_fielddescr, unpack_interiorfielddescr) from rpython.jit.backend.llsupport.gcmap import allocate_gcmap +from rpython.jit.backend.llsupport.resumebuilder import ResumeBuilder from rpython.jit.backend.llsupport.regalloc import (FrameManager, BaseRegalloc, RegisterManager, TempBox, compute_vars_longevity, is_comparison_or_ovf_op) from rpython.jit.backend.x86 import rx86 @@ -22,7 +23,7 @@ from rpython.jit.codewriter.effectinfo import EffectInfo from rpython.jit.metainterp.history import (Box, Const, ConstInt, ConstPtr, ConstFloat, BoxInt, BoxFloat, INT, REF, FLOAT, TargetToken) -from rpython.jit.metainterp.resoperation import rop, ResOperation +from rpython.jit.metainterp.resoperation import rop from rpython.rlib import rgc from rpython.rlib.objectmodel import we_are_translated from rpython.rlib.rarithmetic import r_longlong, r_uint @@ -124,6 +125,7 @@ def __init__(self, assembler, translate_support_code=False): assert isinstance(translate_support_code, bool) + self.resumebuilder = ResumeBuilder(self) # variables that have place in register self.assembler = assembler self.translate_support_code = translate_support_code @@ -276,30 +278,23 @@ self.assembler.dump('%s <- %s(%s)' % (result_loc, op, arglocs)) self.assembler.regalloc_perform_math(op, arglocs, result_loc) - def locs_for_fail(self, guard_op): - return [self.loc(v) for v in guard_op.getfailargs()] - def perform_with_guard(self, op, guard_op, arglocs, result_loc): - faillocs = self.locs_for_fail(guard_op) self.rm.position += 1 self.xrm.position += 1 - self.assembler.regalloc_perform_with_guard(op, guard_op, faillocs, + self.assembler.regalloc_perform_with_guard(op, guard_op, arglocs, result_loc, self.fm.get_frame_depth()) - self.possibly_free_vars(guard_op.getfailargs()) def perform_guard(self, guard_op, arglocs, result_loc): - faillocs = self.locs_for_fail(guard_op) if not we_are_translated(): if result_loc is not None: self.assembler.dump('%s <- %s(%s)' % (result_loc, guard_op, arglocs)) else: self.assembler.dump('%s(%s)' % (guard_op, arglocs)) - self.assembler.regalloc_perform_guard(guard_op, faillocs, arglocs, + self.assembler.regalloc_perform_guard(guard_op, arglocs, result_loc, self.fm.get_frame_depth()) - self.possibly_free_vars(guard_op.getfailargs()) def perform_discard(self, op, arglocs): if not we_are_translated(): @@ -314,6 +309,10 @@ self.assembler.mc.mark_op(op) self.rm.position = i self.xrm.position = i + if op.is_resume(): + self.resumebuilder.process(op) + i += 1 + continue if op.has_no_side_effect() and op.result not in self.longevity: i += 1 self.possibly_free_vars_for_op(op) @@ -1333,6 +1332,7 @@ # self._compute_hint_frame_locations_from_descr(descr) def consider_guard_not_forced_2(self, op): + xxx self.rm.before_call(op.getfailargs(), save_all_regs=True) fail_locs = [self.loc(v) for v in op.getfailargs()] self.assembler.store_force_descr(op, fail_locs, diff --git a/rpython/jit/backend/x86/regloc.py b/rpython/jit/backend/x86/regloc.py --- a/rpython/jit/backend/x86/regloc.py +++ b/rpython/jit/backend/x86/regloc.py @@ -51,6 +51,7 @@ def get_position(self): raise NotImplementedError # only for stack + class RawEbpLoc(AssemblerLocation): """ The same as stack location, but does not know it's position. Mostly usable for raw frame access @@ -112,7 +113,7 @@ class FrameLoc(RawEbpLoc): _immutable_ = True - + def __init__(self, position, ebp_offset, type): # _getregkey() returns self.value; the value returned must not # conflict with RegLoc._getregkey(). It doesn't a bit by chance, @@ -342,7 +343,7 @@ # we actually do: # mov r11, 0xDEADBEEFDEADBEEF # mov rax, [r11] -# +# # NB: You can use the scratch register as a temporary register in # assembler.py, but care must be taken when doing so. A call to a method in # LocationCodeBuilder could clobber the scratch register when certain @@ -638,7 +639,7 @@ CVTTSD2SI = _binaryop('CVTTSD2SI') CVTSD2SS = _binaryop('CVTSD2SS') CVTSS2SD = _binaryop('CVTSS2SD') - + SQRTSD = _binaryop('SQRTSD') ANDPD = _binaryop('ANDPD') diff --git a/rpython/jit/metainterp/executor.py b/rpython/jit/metainterp/executor.py --- a/rpython/jit/metainterp/executor.py +++ b/rpython/jit/metainterp/executor.py @@ -289,7 +289,8 @@ for key, value in rop.__dict__.items(): if not key.startswith('_'): if (rop._FINAL_FIRST <= value <= rop._FINAL_LAST or - rop._GUARD_FIRST <= value <= rop._GUARD_LAST): + rop._GUARD_FIRST <= value <= rop._GUARD_LAST or + rop._RESUME_FIRST <= value <= rop._RESUME_LAST): continue # find which list to store the operation in, based on num_args num_args = resoperation.oparity[value] diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py --- a/rpython/jit/metainterp/resoperation.py +++ b/rpython/jit/metainterp/resoperation.py @@ -51,15 +51,6 @@ def numargs(self): raise NotImplementedError - # methods implemented by GuardResOp - # --------------------------------- - - def getfailargs(self): - return None - - def setfailargs(self, fail_args): - raise NotImplementedError - # methods implemented by ResOpWithDescr # ------------------------------------- @@ -145,10 +136,15 @@ self.getopnum() == rop.GUARD_NO_OVERFLOW) def is_always_pure(self): - return rop._ALWAYS_PURE_FIRST <= self.getopnum() <= rop._ALWAYS_PURE_LAST + return (rop._ALWAYS_PURE_FIRST <= self.getopnum() + <= rop._ALWAYS_PURE_LAST) def has_no_side_effect(self): - return rop._NOSIDEEFFECT_FIRST <= self.getopnum() <= rop._NOSIDEEFFECT_LAST + return (rop._NOSIDEEFFECT_FIRST <= self.getopnum() + <= rop._NOSIDEEFFECT_LAST) + + def is_resume(self): + return rop._RESUME_FIRST <= self.getopnum() <= rop._RESUME_LAST def can_raise(self): return rop._CANRAISE_FIRST <= self.getopnum() <= rop._CANRAISE_LAST @@ -211,24 +207,7 @@ class GuardResOp(ResOpWithDescr): - - _fail_args = None - - def getfailargs(self): - return self._fail_args - - def setfailargs(self, fail_args): - self._fail_args = fail_args - - def copy_and_change(self, opnum, args=None, result=None, descr=None): - newop = AbstractResOp.copy_and_change(self, opnum, args, result, descr) - newop.setfailargs(self.getfailargs()) - return newop - - def clone(self): - newop = AbstractResOp.clone(self) - newop.setfailargs(self.getfailargs()) - return newop + pass # ============ # arity mixins @@ -489,8 +468,11 @@ 'VIRTUAL_REF/2', # removed before it's passed to the backend 'READ_TIMESTAMP/0', 'MARK_OPAQUE_PTR/1b', - # this one has no *visible* side effect, since the virtualizable - # must be forced, however we need to execute it anyway + '_RESUME_FIRST', + 'ENTER_FRAME/1d', + 'LEAVE_FRAME/0', + 'RESUME_PUT/3', + '_RESUME_LAST', '_NOSIDEEFFECT_LAST', # ----- end of no_side_effect operations ----- 'SETARRAYITEM_GC/3d', diff --git a/rpython/jit/tool/oparser.py b/rpython/jit/tool/oparser.py --- a/rpython/jit/tool/oparser.py +++ b/rpython/jit/tool/oparser.py @@ -49,7 +49,7 @@ return newop -def default_fail_descr(model, opnum, fail_args=None): +def default_fail_descr(model, opnum): if opnum == rop.FINISH: return model.BasicFinalDescr() return model.BasicFailDescr() @@ -217,38 +217,22 @@ if endnum == -1: raise ParseError("invalid line: %s" % line) args, descr = self.parse_args(opname, line[num + 1:endnum]) + if '[' in line: + raise Exception("failargs are dead") if rop._GUARD_FIRST <= opnum <= rop._GUARD_LAST: - i = line.find('[', endnum) + 1 - j = line.find(']', i) - if (i <= 0 or j <= 0) and not self.nonstrict: - raise ParseError("missing fail_args for guard operation") - fail_args = [] - if i < j: - for arg in line[i:j].split(','): - arg = arg.strip() - if arg == 'None': - fail_arg = None - else: - try: - fail_arg = self.vars[arg] - except KeyError: - raise ParseError( - "Unknown var in fail_args: %s" % arg) - fail_args.append(fail_arg) if descr is None and self.invent_fail_descr: - descr = self.invent_fail_descr(self.model, opnum, fail_args) + descr = self.invent_fail_descr(self.model, opnum) if hasattr(descr, '_oparser_uses_descr_of_guard'): - descr._oparser_uses_descr_of_guard(self, fail_args) + descr._oparser_uses_descr_of_guard(self) else: - fail_args = None if opnum == rop.FINISH: if descr is None and self.invent_fail_descr: - descr = self.invent_fail_descr(self.model, opnum, fail_args) + descr = self.invent_fail_descr(self.model, opnum) elif opnum == rop.JUMP: if descr is None and self.invent_fail_descr: descr = self.original_jitcell_token - return opnum, args, descr, fail_args + return opnum, args, descr def create_op(self, opnum, args, result, descr): if opnum == ESCAPE_OP.OPNUM: @@ -268,21 +252,17 @@ res, op = line.split("=", 1) res = res.strip() op = op.strip() - opnum, args, descr, fail_args = self.parse_op(op) + opnum, args, descr = self.parse_op(op) if res in self.vars: raise ParseError("Double assign to var %s in line: %s" % (res, line)) rvar = self.box_for_var(res) self.vars[res] = rvar res = self.create_op(opnum, args, rvar, descr) - if fail_args is not None: - res.setfailargs(fail_args) return res def parse_op_no_result(self, line): - opnum, args, descr, fail_args = self.parse_op(line) + opnum, args, descr = self.parse_op(line) res = self.create_op(opnum, args, None, descr) - if fail_args is not None: - res.setfailargs(fail_args) return res def parse_next_op(self, line): diff --git a/rpython/jit/tool/test/test_oparser.py b/rpython/jit/tool/test/test_oparser.py --- a/rpython/jit/tool/test/test_oparser.py +++ b/rpython/jit/tool/test/test_oparser.py @@ -33,14 +33,13 @@ def test_const_ptr_subops(self): x = """ [p0] - guard_class(p0, ConstClass(vtable)) [] + guard_class(p0, ConstClass(vtable)) """ S = lltype.Struct('S') vtable = lltype.nullptr(S) loop = self.parse(x, None, locals()) assert len(loop.operations) == 1 assert loop.operations[0].getdescr() - assert loop.operations[0].getfailargs() == [] def test_descr(self): class Xyz(AbstractDescr): @@ -57,7 +56,7 @@ def test_after_fail(self): x = """ [i0] - guard_value(i0, 3) [] + guard_value(i0, 3) i1 = int_add(1, 2) """ loop = self.parse(x, None, {}) @@ -174,7 +173,7 @@ i4 = int_add(i0, 2) i6 = int_sub(i1, 1) i8 = int_gt(i6, 3) - guard_true(i8, descr=) [i4, i6] + guard_true(i8, descr=) debug_merge_point('(no jitdriver.get_printable_location!)', 0) jump(i6, i4, descr=) ''' @@ -195,14 +194,6 @@ loop = self.parse(x) assert loop.operations[0].getopname() == 'new' - def test_no_fail_args(self): - x = ''' - [i0] - guard_true(i0, descr=) - ''' - loop = self.parse(x, nonstrict=True) - assert loop.operations[0].getfailargs() == [] - def test_no_inputargs(self): x = ''' i2 = int_add(i0, i1) From noreply at buildbot.pypy.org Wed Aug 28 17:37:29 2013 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 28 Aug 2013 17:37:29 +0200 (CEST) Subject: [pypy-commit] pypy resume-refactor: merge jitframe-offset Message-ID: <20130828153729.31BAE1C0149@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: resume-refactor Changeset: r66401:ea101e9bbee4 Date: 2013-08-28 15:04 +0100 http://bitbucket.org/pypy/pypy/changeset/ea101e9bbee4/ Log: merge jitframe-offset diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -88,13 +88,27 @@ w_res = W_NDimArray.from_shape(space, res_shape, self.get_dtype(), w_instance=self) return loop.getitem_filter(w_res, self, arr) - def setitem_filter(self, space, idx, val): + def setitem_filter(self, space, idx, value): + from pypy.module.micronumpy.interp_boxes import Box + val = value if len(idx.get_shape()) > 1 and idx.get_shape() != self.get_shape(): raise OperationError(space.w_ValueError, space.wrap("boolean index array should have 1 dimension")) if idx.get_size() > self.get_size(): raise OperationError(space.w_ValueError, space.wrap("index out of range for array")) + idx_iter = idx.create_iter(self.get_shape()) + size = loop.count_all_true_iter(idx_iter, self.get_shape(), idx.get_dtype()) + if len(val.get_shape()) > 0 and val.get_shape()[0] > 1 and size > val.get_shape()[0]: + raise OperationError(space.w_ValueError, space.wrap("NumPy boolean array indexing assignment " + "cannot assign %d input values to " + "the %d output values where the mask is true" % (val.get_shape()[0],size))) + if val.get_shape() == [1]: + box = val.descr_getitem(space, space.wrap(0)) + assert isinstance(box, Box) + val = W_NDimArray(scalar.Scalar(val.get_dtype(), box)) + elif val.get_shape() == [0]: + val.implementation.dtype = self.implementation.dtype loop.setitem_filter(self, idx, val) def _prepare_array_index(self, space, w_index): diff --git a/pypy/module/micronumpy/loop.py b/pypy/module/micronumpy/loop.py --- a/pypy/module/micronumpy/loop.py +++ b/pypy/module/micronumpy/loop.py @@ -318,23 +318,27 @@ lefti.next() return result -count_all_true_driver = jit.JitDriver(name = 'numpy_count', - greens = ['shapelen', 'dtype'], - reds = 'auto') def count_all_true(arr): - s = 0 if arr.is_scalar(): return arr.get_dtype().itemtype.bool(arr.get_scalar_value()) iter = arr.create_iter() - shapelen = len(arr.get_shape()) - dtype = arr.get_dtype() + return count_all_true_iter(iter, arr.get_shape(), arr.get_dtype()) + +count_all_true_iter_driver = jit.JitDriver(name = 'numpy_count', + greens = ['shapelen', 'dtype'], + reds = 'auto') +def count_all_true_iter(iter, shape, dtype): + s = 0 + shapelen = len(shape) + dtype = dtype while not iter.done(): - count_all_true_driver.jit_merge_point(shapelen=shapelen, dtype=dtype) + count_all_true_iter_driver.jit_merge_point(shapelen=shapelen, dtype=dtype) s += iter.getitem_bool() iter.next() return s + getitem_filter_driver = jit.JitDriver(name = 'numpy_getitem_bool', greens = ['shapelen', 'arr_dtype', 'index_dtype'], diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -2354,6 +2354,12 @@ def test_array_indexing_bool_specialcases(self): from numpypy import arange, array a = arange(6) + try: + a[a < 3] = [1, 2] + assert False, "Should not work" + except ValueError: + pass + a = arange(6) a[a > 3] = array([15]) assert (a == [0, 1, 2, 3, 15, 15]).all() a = arange(6).reshape(3, 2) diff --git a/rpython/annotator/annrpython.py b/rpython/annotator/annrpython.py --- a/rpython/annotator/annrpython.py +++ b/rpython/annotator/annrpython.py @@ -5,7 +5,7 @@ from rpython.tool.ansi_print import ansi_log from rpython.tool.pairtype import pair from rpython.tool.error import (format_blocked_annotation_error, - AnnotatorError, gather_error, ErrorWrapper) + AnnotatorError, gather_error, ErrorWrapper, source_lines) from rpython.flowspace.model import (Variable, Constant, FunctionGraph, c_last_exception, checkgraph) from rpython.translator import simplify, transform @@ -383,8 +383,8 @@ try: unions = [annmodel.unionof(c1,c2) for c1, c2 in zip(oldcells,inputcells)] except annmodel.UnionError, e: - e.args = e.args + ( - ErrorWrapper(gather_error(self, graph, block, None)),) + # Add source code to the UnionError + e.source = '\n'.join(source_lines(graph, block, None, long=True)) raise # if the merged cells changed, we must redo the analysis if unions != oldcells: diff --git a/rpython/annotator/binaryop.py b/rpython/annotator/binaryop.py --- a/rpython/annotator/binaryop.py +++ b/rpython/annotator/binaryop.py @@ -243,14 +243,16 @@ if t2 is int: if int2.nonneg == False: - raise UnionError, "Merging %s and a possibly negative int is not allowed" % t1 + raise UnionError(int1, int2, "RPython cannot prove that these " + \ + "integers are of the same signedness") knowntype = t1 elif t1 is int: if int1.nonneg == False: - raise UnionError, "Merging %s and a possibly negative int is not allowed" % t2 + raise UnionError(int1, int2, "RPython cannot prove that these " + \ + "integers are of the same signedness") knowntype = t2 else: - raise UnionError, "Merging these types (%s, %s) is not supported" % (t1, t2) + raise UnionError(int1, int2) return SomeInteger(nonneg=int1.nonneg and int2.nonneg, knowntype=knowntype) @@ -551,9 +553,9 @@ def union((tup1, tup2)): if len(tup1.items) != len(tup2.items): - raise UnionError("cannot take the union of a tuple of length %d " - "and a tuple of length %d" % (len(tup1.items), - len(tup2.items))) + raise UnionError(tup1, tup2, "RPython cannot unify tuples of " + "different length: %d versus %d" % \ + (len(tup1.items), len(tup2.items))) else: unions = [unionof(x,y) for x,y in zip(tup1.items, tup2.items)] return SomeTuple(items = unions) @@ -726,7 +728,8 @@ else: basedef = ins1.classdef.commonbase(ins2.classdef) if basedef is None: - raise UnionError(ins1, ins2) + raise UnionError(ins1, ins2, "RPython cannot unify instances " + "with no common base class") flags = ins1.flags if flags: flags = flags.copy() @@ -768,7 +771,8 @@ def union((iter1, iter2)): s_cont = unionof(iter1.s_container, iter2.s_container) if iter1.variant != iter2.variant: - raise UnionError("merging incompatible iterators variants") + raise UnionError(iter1, iter2, + "RPython cannot unify incompatible iterator variants") return SomeIterator(s_cont, *iter1.variant) @@ -778,8 +782,7 @@ if (bltn1.analyser != bltn2.analyser or bltn1.methodname != bltn2.methodname or bltn1.s_self is None or bltn2.s_self is None): - raise UnionError("cannot merge two different builtin functions " - "or methods:\n %r\n %r" % (bltn1, bltn2)) + raise UnionError(bltn1, bltn2) s_self = unionof(bltn1.s_self, bltn2.s_self) return SomeBuiltin(bltn1.analyser, s_self, methodname=bltn1.methodname) @@ -976,8 +979,8 @@ class __extend__(pairtype(SomeAddress, SomeObject)): def union((s_addr, s_obj)): - raise UnionError, "union of address and anything else makes no sense" + raise UnionError(s_addr, s_obj) class __extend__(pairtype(SomeObject, SomeAddress)): def union((s_obj, s_addr)): - raise UnionError, "union of address and anything else makes no sense" + raise UnionError(s_obj, s_addr) diff --git a/rpython/annotator/listdef.py b/rpython/annotator/listdef.py --- a/rpython/annotator/listdef.py +++ b/rpython/annotator/listdef.py @@ -58,7 +58,7 @@ def merge(self, other): if self is not other: if getattr(TLS, 'no_side_effects_in_union', 0): - raise UnionError("merging list/dict items") + raise UnionError(self, other) if other.dont_change_any_more: if self.dont_change_any_more: diff --git a/rpython/annotator/model.py b/rpython/annotator/model.py --- a/rpython/annotator/model.py +++ b/rpython/annotator/model.py @@ -680,6 +680,33 @@ """Signals an suspicious attempt at taking the union of deeply incompatible SomeXxx instances.""" + def __init__(self, s_obj1, s_obj2, msg=None): + """ + This exception expresses the fact that s_obj1 and s_obj2 cannot be unified. + The msg paramter is appended to a generic message. This can be used to + give the user a little more information. + """ + self.s_obj1 = s_obj1 + self.s_obj2 = s_obj2 + self.msg = msg + self.source = None + + def __str__(self): + s = "\n\n" + + if self.msg is not None: + s += "%s\n\n" % self.msg + + s += "Offending annotations:\n" + s += "%s\n%s\n\n" % (self.s_obj1, self.s_obj2) + + if self.source is not None: + s += self.source + + return s + + def __repr__(self): + return str(self) def unionof(*somevalues): "The most precise SomeValue instance that contains all the values." diff --git a/rpython/annotator/test/test_annrpython.py b/rpython/annotator/test/test_annrpython.py --- a/rpython/annotator/test/test_annrpython.py +++ b/rpython/annotator/test/test_annrpython.py @@ -4023,6 +4023,81 @@ a = self.RPythonAnnotator() assert not a.build_types(fn, [int]).nonneg + def test_unionerror_attrs(self): + def f(x): + if x < 10: + return 1 + else: + return "bbb" + a = self.RPythonAnnotator() + + with py.test.raises(annmodel.UnionError) as exc: + a.build_types(f, [int]) + + the_exc = exc.value + s_objs = set([type(the_exc.s_obj1), type(the_exc.s_obj2)]) + + assert s_objs == set([annmodel.SomeInteger, annmodel.SomeString]) + assert the_exc.msg == None # Check that this is a generic UnionError + + def test_unionerror_tuple_size(self): + def f(x): + if x < 10: + return (1, ) + else: + return (1, 2) + a = self.RPythonAnnotator() + + with py.test.raises(annmodel.UnionError) as exc: + a.build_types(f, [int]) + + assert exc.value.msg == "RPython cannot unify tuples of different length: 2 versus 1" + + def test_unionerror_signedness(self): + def f(x): + if x < 10: + return r_uint(99) + else: + return -1 + a = self.RPythonAnnotator() + + with py.test.raises(annmodel.UnionError) as exc: + a.build_types(f, [int]) + + assert exc.value.msg == ("RPython cannot prove that these integers are of " + "the same signedness") + + def test_unionerror_instance(self): + class A(object): pass + class B(object): pass + + def f(x): + if x < 10: + return A() + else: + return B() + a = self.RPythonAnnotator() + + with py.test.raises(annmodel.UnionError) as exc: + a.build_types(f, [int]) + + assert exc.value.msg == ("RPython cannot unify instances with no common base class") + + def test_unionerror_iters(self): + + def f(x): + d = { 1 : "a", 2 : "b" } + if x < 10: + return d.iterkeys() + else: + return d.itervalues() + a = self.RPythonAnnotator() + + with py.test.raises(annmodel.UnionError) as exc: + a.build_types(f, [int]) + + assert exc.value.msg == ("RPython cannot unify incompatible iterator variants") + def g(n): return [0, 1, 2, n] diff --git a/rpython/flowspace/flowcontext.py b/rpython/flowspace/flowcontext.py --- a/rpython/flowspace/flowcontext.py +++ b/rpython/flowspace/flowcontext.py @@ -24,8 +24,9 @@ self.frame = frame def __str__(self): - msg = ['-+' * 30] + msg = ["\n"] msg += map(str, self.args) + msg += [""] msg += source_lines(self.frame.graph, None, offset=self.frame.last_instr) return "\n".join(msg) @@ -293,7 +294,7 @@ _unsupported_ops = [ ('BINARY_POWER', "a ** b"), - ('BUILD_CLASS', 'creating new classes'), + ('BUILD_CLASS', 'defining classes inside functions'), ('EXEC_STMT', 'exec statement'), ('STOP_CODE', '???'), ('STORE_NAME', 'modifying globals'), diff --git a/rpython/jit/backend/arm/locations.py b/rpython/jit/backend/arm/locations.py --- a/rpython/jit/backend/arm/locations.py +++ b/rpython/jit/backend/arm/locations.py @@ -33,6 +33,9 @@ def get_position(self): raise NotImplementedError # only for stack + def get_jitframe_position(self): + raise NotImplementedError + class RegisterLocation(AssemblerLocation): _immutable_ = True width = WORD diff --git a/rpython/jit/backend/arm/opassembler.py b/rpython/jit/backend/arm/opassembler.py --- a/rpython/jit/backend/arm/opassembler.py +++ b/rpython/jit/backend/arm/opassembler.py @@ -46,6 +46,26 @@ self.fcond = fcond self.offset = offset + def compute_gcmap(self, gcmap, failargs, fail_locs, frame_depth): + # note that this is an old version that should not be here in + # the first place. Implement get_jitframe_position on ARM locations + # in order to make it work, then kill this function + input_i = 0 + for i in range(len(failargs)): + arg = failargs[i] + if arg is None: + continue + loc = fail_locs[input_i] + input_i += 1 + if arg.type == REF: + loc = fail_locs[i] + if loc.is_core_reg(): + val = self.cpu.all_reg_indexes[loc.value] + else: + val = loc.get_position() + self.cpu.JITFRAME_FIXED_SIZE + gcmap[val // WORD // 8] |= r_uint(1) << (val % (WORD * 8)) + return gcmap + class ResOpAssembler(BaseAssembler): diff --git a/rpython/jit/backend/llsupport/assembler.py b/rpython/jit/backend/llsupport/assembler.py --- a/rpython/jit/backend/llsupport/assembler.py +++ b/rpython/jit/backend/llsupport/assembler.py @@ -36,22 +36,12 @@ self.is_guard_not_forced = is_guard_not_forced def compute_gcmap(self, gcmap, failargs, fail_locs, frame_depth): - # note that regalloc has a very similar compute, but - # one that does iteration over all bindings, so slightly different, - # eh - input_i = 0 for i in range(len(failargs)): arg = failargs[i] if arg is None: continue - loc = fail_locs[input_i] - input_i += 1 if arg.type == REF: - loc = fail_locs[i] - if loc.is_core_reg(): - val = self.cpu.all_reg_indexes[loc.value] - else: - val = loc.get_position() + self.cpu.JITFRAME_FIXED_SIZE + val = fail_locs[i].get_jitframe_position() gcmap[val // WORD // 8] |= r_uint(1) << (val % (WORD * 8)) return gcmap diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -1772,17 +1772,13 @@ regs = gpr_reg_mgr_cls.all_regs for gpr in regs: if gpr not in ignored_regs: - v = gpr_reg_mgr_cls.all_reg_indexes[gpr.value] + v = gpr.get_jitframe_position() mc.MOV_br(v * WORD + base_ofs, gpr.value) if withfloats: - if IS_X86_64: - coeff = 1 - else: - coeff = 2 # Push all XMM regs - ofs = len(gpr_reg_mgr_cls.all_regs) - for i in range(len(xmm_reg_mgr_cls.all_regs)): - mc.MOVSD_bx((ofs + i * coeff) * WORD + base_ofs, i) + for reg in xmm_reg_mgr_cls.all_regs: + v = reg.get_jitframe_position() + mc.MOVSD_bx(v * WORD + base_ofs, reg.value) def _pop_all_regs_from_frame(self, mc, ignored_regs, withfloats, callee_only=False): diff --git a/rpython/jit/backend/x86/regalloc.py b/rpython/jit/backend/x86/regalloc.py --- a/rpython/jit/backend/x86/regalloc.py +++ b/rpython/jit/backend/x86/regalloc.py @@ -898,12 +898,12 @@ if box.type == REF and self.rm.is_still_alive(box): assert not noregs assert isinstance(loc, RegLoc) - val = gpr_reg_mgr_cls.all_reg_indexes[loc.value] + val = loc.get_jitframe_position() gcmap[val // WORD // 8] |= r_uint(1) << (val % (WORD * 8)) for box, loc in self.fm.bindings.iteritems(): if box.type == REF and self.rm.is_still_alive(box): assert isinstance(loc, FrameLoc) - val = loc.position + JITFRAME_FIXED_SIZE + val = loc.get_jitframe_position() gcmap[val // WORD // 8] |= r_uint(1) << (val % (WORD * 8)) return gcmap diff --git a/rpython/jit/backend/x86/regloc.py b/rpython/jit/backend/x86/regloc.py --- a/rpython/jit/backend/x86/regloc.py +++ b/rpython/jit/backend/x86/regloc.py @@ -1,5 +1,6 @@ from rpython.jit.metainterp.history import ConstInt from rpython.jit.backend.x86 import rx86 +from rpython.jit.backend.x86.arch import JITFRAME_FIXED_SIZE from rpython.rlib.unroll import unrolling_iterable from rpython.jit.backend.x86.arch import WORD, IS_X86_32, IS_X86_64 from rpython.tool.sourcetools import func_with_new_name @@ -51,6 +52,8 @@ def get_position(self): raise NotImplementedError # only for stack + def get_jitframe_position(self): + raise NotImplementedError class RawEbpLoc(AssemblerLocation): """ The same as stack location, but does not know it's position. @@ -129,6 +132,9 @@ def get_position(self): return self.position + def get_jitframe_position(self): + return self.position + JITFRAME_FIXED_SIZE + class RegLoc(AssemblerLocation): _immutable_ = True def __init__(self, regnum, is_xmm): @@ -173,6 +179,18 @@ def is_core_reg(self): return True + def get_jitframe_position(self): + from rpython.jit.backend.x86 import regalloc + + if self.is_xmm: + ofs = len(regalloc.gpr_reg_mgr_cls.all_regs) + if IS_X86_64: + return ofs + self.value + else: + return ofs + 2 * self.value + else: + return regalloc.gpr_reg_mgr_cls.all_reg_indexes[self.value] + class ImmediateAssemblerLocation(AssemblerLocation): _immutable_ = True diff --git a/rpython/tool/error.py b/rpython/tool/error.py --- a/rpython/tool/error.py +++ b/rpython/tool/error.py @@ -90,7 +90,7 @@ format_simple_call(annotator, oper, msg) else: oper = None - msg.append(" " + str(oper)) + msg.append(" %s\n" % str(oper)) msg += source_lines(graph, block, operindex, long=True) if oper is not None: if SHOW_ANNOTATIONS: @@ -106,7 +106,7 @@ def format_blocked_annotation_error(annotator, blocked_blocks): text = [] for block, (graph, index) in blocked_blocks.items(): - text.append('-+' * 30) + text.append('\n') text.append("Blocked block -- operation cannot succeed") text.append(gather_error(annotator, graph, block, index)) return '\n'.join(text) diff --git a/rpython/translator/goal/translate.py b/rpython/translator/goal/translate.py --- a/rpython/translator/goal/translate.py +++ b/rpython/translator/goal/translate.py @@ -246,17 +246,19 @@ tb = None if got_error: import traceback - errmsg = ["Error:\n"] + stacktrace_errmsg = ["Error:\n"] exc, val, tb = sys.exc_info() - errmsg.extend([" %s" % line for line in traceback.format_exception(exc, val, tb)]) + stacktrace_errmsg.extend([" %s" % line for line in traceback.format_tb(tb)]) + summary_errmsg = traceback.format_exception_only(exc, val) block = getattr(val, '__annotator_block', None) if block: class FileLike: def write(self, s): - errmsg.append(" %s" % s) - errmsg.append("Processing block:\n") + summary_errmsg.append(" %s" % s) + summary_errmsg.append("Processing block:\n") t.about(block, FileLike()) - log.ERROR(''.join(errmsg)) + log.info(''.join(stacktrace_errmsg)) + log.ERROR(''.join(summary_errmsg)) else: log.event('Done.') From noreply at buildbot.pypy.org Wed Aug 28 17:37:30 2013 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 28 Aug 2013 17:37:30 +0200 (CEST) Subject: [pypy-commit] pypy resume-refactor: (fijal, arigo, antocuni, rguillebert) Commit in-progress of resume Message-ID: <20130828153730.678FB1C0149@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: resume-refactor Changeset: r66402:73b0937bd931 Date: 2013-08-28 15:57 +0100 http://bitbucket.org/pypy/pypy/changeset/73b0937bd931/ Log: (fijal, arigo, antocuni, rguillebert) Commit in-progress of resume refactoring diff --git a/rpython/jit/backend/llsupport/assembler.py b/rpython/jit/backend/llsupport/assembler.py --- a/rpython/jit/backend/llsupport/assembler.py +++ b/rpython/jit/backend/llsupport/assembler.py @@ -22,15 +22,13 @@ class GuardToken(object): - def __init__(self, cpu, gcmap, faildescr, failargs, fail_locs, exc, + def __init__(self, cpu, gcmap, faildescr, exc, frame_depth, is_guard_not_invalidated, is_guard_not_forced): assert isinstance(faildescr, AbstractFailDescr) self.cpu = cpu self.faildescr = faildescr - self.failargs = failargs - self.fail_locs = fail_locs - self.gcmap = self.compute_gcmap(gcmap, failargs, - fail_locs, frame_depth) + #self.gcmap = self.compute_gcmap(gcmap, failargs, + # fail_locs, frame_depth) self.exc = exc self.is_guard_not_invalidated = is_guard_not_invalidated self.is_guard_not_forced = is_guard_not_forced @@ -157,27 +155,7 @@ target = self.failure_recovery_code[exc + 2 * withfloats] fail_descr = cast_instance_to_gcref(guardtok.faildescr) fail_descr = rffi.cast(lltype.Signed, fail_descr) - base_ofs = self.cpu.get_baseofs_of_frame_field() - positions = [0] * len(guardtok.fail_locs) - for i, loc in enumerate(guardtok.fail_locs): - if loc is None: - positions[i] = -1 - elif loc.is_stack(): - positions[i] = loc.value - base_ofs - else: - assert loc is not self.cpu.frame_reg # for now - if self.cpu.IS_64_BIT: - coeff = 1 - else: - coeff = 2 - if loc.is_float(): - v = len(self.cpu.gen_regs) + loc.value * coeff - else: - v = self.cpu.all_reg_indexes[loc.value] - positions[i] = v * WORD - # write down the positions of locs - guardtok.faildescr.rd_locs = positions - # we want the descr to keep alive + # we want the descr to keep loop alive guardtok.faildescr.rd_loop_token = self.current_clt return fail_descr, target diff --git a/rpython/jit/backend/llsupport/regalloc.py b/rpython/jit/backend/llsupport/regalloc.py --- a/rpython/jit/backend/llsupport/regalloc.py +++ b/rpython/jit/backend/llsupport/regalloc.py @@ -2,6 +2,7 @@ from rpython.jit.metainterp.history import Const, Box, REF, JitCellToken from rpython.rlib.objectmodel import we_are_translated, specialize from rpython.jit.metainterp.resoperation import rop +from rpython.jit.backend.llsupport.resumebuilder import LivenessAnalyzer try: from collections import OrderedDict @@ -689,6 +690,22 @@ produced = {} last_used = {} last_real_usage = {} + liveness_analyzer = LivenessAnalyzer() + for position, op in enumerate(operations): + if op.getopnum() == rop.ENTER_FRAME: + liveness_analyzer.enter_frame(op.getdescr()) + elif op.getopnum() == rop.LEAVE_FRAME: + liveness_analyzer.leave_frame() + elif op.getopnum() == rop.RESUME_PUT: + liveness_analyzer.put(op.getarg(0), op.getarg(1).getint(), + op.getarg(2).getint()) + elif op.is_guard(): + framestack = liveness_analyzer.get_live_info() + for frame in framestack: + for item in frame: + if item is not None: + last_used[item] = position + for i in range(len(operations)-1, -1, -1): op = operations[i] if op.result: @@ -703,6 +720,8 @@ continue if arg not in last_used: last_used[arg] = i + else: + last_used[arg] = max(last_used[arg], i) if opnum != rop.JUMP and opnum != rop.LABEL: if arg not in last_real_usage: last_real_usage[arg] = i diff --git a/rpython/jit/backend/llsupport/resumebuilder.py b/rpython/jit/backend/llsupport/resumebuilder.py new file mode 100644 --- /dev/null +++ b/rpython/jit/backend/llsupport/resumebuilder.py @@ -0,0 +1,58 @@ + +from rpython.jit.metainterp.resoperation import rop +from rpython.jit.metainterp.history import ConstInt +from rpython.jit.codewriter.jitcode import JitCode + +class LivenessAnalyzer(object): + def __init__(self): + self.framestack = [] + + def enter_frame(self, jitcode): + assert isinstance(jitcode, JitCode) + self.framestack.append([None] * jitcode.num_regs()) + + def put(self, value, depth, position): + # - depth - 1 can be expressed as ~depth (haha) + self.framestack[- depth - 1][position] = value + + def get_live_info(self): + return self.framestack + + def leave_frame(self): + self.framestack.pop() + +class ResumeBuilder(object): + def __init__(self, regalloc): + self.framestack = [] + self.newops = [] + self.regalloc = regalloc + + def process(self, op): + oplist[op.getopnum()](self, op) + + def process_enter_frame(self, op): + self.framestack.append(None) + self.newops.append(op) + + def process_resume_put(self, op): + v = op.getarg(0) + loc = self.regalloc.loc(v) + pos = loc.get_jitframe_position() + self.newops.append(op.copy_and_change(rop.RESUME_PUT, + args=[ConstInt(pos), + op.getarg(1), + op.getarg(2)])) + + def process_leave_frame(self, op): + self.framestack.pop() + self.newops.append(op) + + def not_implemented_op(self, op): + print "Not implemented", op.getopname() + raise NotImplementedError(op.getopname()) + +oplist = [ResumeBuilder.not_implemented_op] * rop._LAST +for name, value in ResumeBuilder.__dict__.iteritems(): + if name.startswith('process_'): + num = getattr(rop, name[len('process_'):].upper()) + oplist[num] = value diff --git a/rpython/jit/backend/llsupport/test/test_regalloc.py b/rpython/jit/backend/llsupport/test/test_regalloc.py --- a/rpython/jit/backend/llsupport/test/test_regalloc.py +++ b/rpython/jit/backend/llsupport/test/test_regalloc.py @@ -3,6 +3,7 @@ BoxPtr from rpython.jit.backend.llsupport.regalloc import FrameManager, LinkedList from rpython.jit.backend.llsupport.regalloc import RegisterManager as BaseRegMan +from rpython.jit.backend.llsupport.regalloc import compute_vars_longevity def newboxes(*values): return [BoxInt(v) for v in values] @@ -68,7 +69,7 @@ class MockAsm(object): def __init__(self): self.moves = [] - + def regalloc_mov(self, from_loc, to_loc): self.moves.append((from_loc, to_loc)) @@ -98,7 +99,7 @@ rm._check_invariants() assert len(rm.free_regs) == 4 assert len(rm.reg_bindings) == 0 - + def test_register_exhaustion(self): boxes, longevity = boxes_and_longevity(5) rm = RegisterManager(longevity) @@ -114,7 +115,7 @@ class XRegisterManager(RegisterManager): no_lower_byte_regs = [r2, r3] - + rm = XRegisterManager(longevity) rm.next_instruction() loc0 = rm.try_allocate_reg(b0, need_lower_byte=True) @@ -149,7 +150,7 @@ class XRegisterManager(RegisterManager): no_lower_byte_regs = [r2, r3] - + rm = XRegisterManager(longevity, frame_manager=fm, assembler=MockAsm()) @@ -172,7 +173,7 @@ assert isinstance(loc, FakeReg) assert loc not in [r2, r3] rm._check_invariants() - + def test_make_sure_var_in_reg(self): boxes, longevity = boxes_and_longevity(5) fm = TFrameManager() @@ -186,7 +187,7 @@ loc = rm.make_sure_var_in_reg(b0) assert isinstance(loc, FakeReg) rm._check_invariants() - + def test_force_result_in_reg_1(self): b0, b1 = newboxes(0, 0) longevity = {b0: (0, 1), b1: (1, 3)} @@ -341,7 +342,7 @@ rm.after_call(boxes[-1]) assert len(rm.reg_bindings) == 1 rm._check_invariants() - + def test_different_frame_width(self): class XRegisterManager(RegisterManager): @@ -358,7 +359,7 @@ xrm.loc(f0) rm.loc(b0) assert fm.get_frame_depth() == 3 - + def test_spilling(self): b0, b1, b2, b3, b4, b5 = newboxes(0, 1, 2, 3, 4, 5) longevity = {b0: (0, 3), b1: (0, 3), b3: (0, 5), b2: (0, 2), b4: (1, 4), b5: (1, 3)} @@ -592,3 +593,31 @@ assert fm.get_loc_index(floc) == 0 for box in fm.bindings.keys(): fm.mark_as_free(box) + +def test_vars_longevity(): + from rpython.jit.tool.oparser import parse + from rpython.jit.codewriter.jitcode import JitCode + + class MockJitcode(JitCode): + def __init__(self, no): + self.no = no + + def num_regs(self): + return self.no + + loop = parse(""" + [i0, i1] + enter_frame(0, descr=jitcode) + resume_put(i0, 0, 1) + guard_true(1) + i5 = int_add(1, 2) + resume_put(i1, 0, 1) + i4 = int_add(1, 2) + guard_false(1) + i3 = int_add(1, 2) + leave_frame() + """, namespace={'jitcode': MockJitcode(3)}) + longevity, _ = compute_vars_longevity(loop.inputargs, loop.operations) + assert longevity[loop.inputargs[0]] == (0, 2) + assert longevity[loop.inputargs[1]] == (0, 6) + diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -1719,9 +1719,8 @@ is_guard_not_invalidated = guard_opnum == rop.GUARD_NOT_INVALIDATED is_guard_not_forced = guard_opnum == rop.GUARD_NOT_FORCED gcmap = allocate_gcmap(self, frame_depth, JITFRAME_FIXED_SIZE) - XXX - return GuardToken(self.cpu, gcmap, faildescr, failargs, - fail_locs, exc, frame_depth, + return GuardToken(self.cpu, gcmap, faildescr, + exc, frame_depth, is_guard_not_invalidated, is_guard_not_forced) def generate_propagate_error_64(self): diff --git a/rpython/jit/codewriter/jitcode.py b/rpython/jit/codewriter/jitcode.py --- a/rpython/jit/codewriter/jitcode.py +++ b/rpython/jit/codewriter/jitcode.py @@ -47,6 +47,9 @@ def num_regs_f(self): return ord(self.c_num_regs_f) + def num_regs(self): + return self.num_regs_i() + self.num_regs_r() + self.num_regs_f() + def has_liveness_info(self, pc): return pc in self.liveness From noreply at buildbot.pypy.org Wed Aug 28 17:37:31 2013 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 28 Aug 2013 17:37:31 +0200 (CEST) Subject: [pypy-commit] pypy resume-refactor: (fijal, arigo, antocuni, rguillebert) Make the first test pass about Message-ID: <20130828153731.E70C41C0149@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: resume-refactor Changeset: r66403:20e57f63b0cc Date: 2013-08-28 16:36 +0100 http://bitbucket.org/pypy/pypy/changeset/20e57f63b0cc/ Log: (fijal, arigo, antocuni, rguillebert) Make the first test pass about the resume logic diff --git a/rpython/jit/backend/llsupport/assembler.py b/rpython/jit/backend/llsupport/assembler.py --- a/rpython/jit/backend/llsupport/assembler.py +++ b/rpython/jit/backend/llsupport/assembler.py @@ -22,27 +22,17 @@ class GuardToken(object): - def __init__(self, cpu, gcmap, faildescr, exc, + def __init__(self, cpu, gcmap, faildescr, has_floats, exc, frame_depth, is_guard_not_invalidated, is_guard_not_forced): assert isinstance(faildescr, AbstractFailDescr) self.cpu = cpu + self.has_floats = has_floats self.faildescr = faildescr - #self.gcmap = self.compute_gcmap(gcmap, failargs, - # fail_locs, frame_depth) self.exc = exc + self.gcmap = gcmap self.is_guard_not_invalidated = is_guard_not_invalidated self.is_guard_not_forced = is_guard_not_forced - def compute_gcmap(self, gcmap, failargs, fail_locs, frame_depth): - for i in range(len(failargs)): - arg = failargs[i] - if arg is None: - continue - if arg.type == REF: - val = fail_locs[i].get_jitframe_position() - gcmap[val // WORD // 8] |= r_uint(1) << (val % (WORD * 8)) - return gcmap - class BaseAssembler(object): """ Base class for Assembler generator in real backends @@ -146,11 +136,7 @@ return locs def store_info_on_descr(self, startspos, guardtok): - withfloats = False - for box in guardtok.failargs: - if box is not None and box.type == FLOAT: - withfloats = True - break + withfloats = guardtok.has_floats exc = guardtok.exc target = self.failure_recovery_code[exc + 2 * withfloats] fail_descr = cast_instance_to_gcref(guardtok.faildescr) diff --git a/rpython/jit/backend/llsupport/resumebuilder.py b/rpython/jit/backend/llsupport/resumebuilder.py --- a/rpython/jit/backend/llsupport/resumebuilder.py +++ b/rpython/jit/backend/llsupport/resumebuilder.py @@ -47,6 +47,9 @@ self.framestack.pop() self.newops.append(op) + def get_position(self): + return len(self.newops) + def not_implemented_op(self, op): print "Not implemented", op.getopname() raise NotImplementedError(op.getopname()) diff --git a/rpython/jit/backend/llsupport/test/test_resume.py b/rpython/jit/backend/llsupport/test/test_resume.py new file mode 100644 --- /dev/null +++ b/rpython/jit/backend/llsupport/test/test_resume.py @@ -0,0 +1,42 @@ + +from rpython.jit.metainterp.history import JitCellToken +from rpython.jit.codewriter.jitcode import JitCode +from rpython.jit.tool.oparser import parse +from rpython.jit.metainterp.optimizeopt.util import equaloplists + +class MockJitCode(JitCode): + def __init__(self, no): + self.no = no + + def num_regs(self): + return self.no + + def __repr__(self): + return 'MockJitCode(%d)' % self.no + +class ResumeTest(object): + def setup_method(self, meth): + self.cpu = self.CPUClass(None, None) + self.cpu.setup_once() + + def test_simple(self): + jitcode = MockJitCode(3) + loop = parse(""" + [i0] + enter_frame(-1, descr=jitcode) + resume_put(i0, 0, 2) + guard_true(i0) + leave_frame() + """, namespace={'jitcode': jitcode}) + looptoken = JitCellToken() + self.cpu.compile_loop(None, loop.inputargs, loop.operations, + looptoken) + descr = loop.operations[2].getdescr() + assert descr.rd_bytecode_position == 2 + expected_resume = parse(""" + [] + enter_frame(-1, descr=jitcode) + resume_put(28, 0, 2) + leave_frame() + """, namespace={'jitcode': jitcode}).operations + equaloplists(descr.rd_loop_token.rd_bytecode, expected_resume) diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -1718,8 +1718,11 @@ guard_opnum == rop.GUARD_NOT_FORCED) is_guard_not_invalidated = guard_opnum == rop.GUARD_NOT_INVALIDATED is_guard_not_forced = guard_opnum == rop.GUARD_NOT_FORCED - gcmap = allocate_gcmap(self, frame_depth, JITFRAME_FIXED_SIZE) + gcmap = self._regalloc.get_gcmap() + pos = self._regalloc.resumebuilder.get_position() + faildescr.rd_bytecode_position = pos return GuardToken(self.cpu, gcmap, faildescr, + self._regalloc.uses_floats(), exc, frame_depth, is_guard_not_invalidated, is_guard_not_forced) diff --git a/rpython/jit/backend/x86/regalloc.py b/rpython/jit/backend/x86/regalloc.py --- a/rpython/jit/backend/x86/regalloc.py +++ b/rpython/jit/backend/x86/regalloc.py @@ -334,6 +334,7 @@ self.assembler.mc.mark_op(None) # end of the loop for arg in inputargs: self.possibly_free_var(arg) + self.assembler.current_clt.rd_bytecode = self.resumebuilder.newops def flush_loop(self): # rare case: if the loop is too short, or if we are just after @@ -907,6 +908,14 @@ gcmap[val // WORD // 8] |= r_uint(1) << (val % (WORD * 8)) return gcmap + def uses_floats(self): + if self.xrm.reg_bindings: + return True + for box in self.fm.bindings: + if box.type == FLOAT: + return True + return False + def consider_setfield_gc(self, op): ofs, size, _ = unpack_fielddescr(op.getdescr()) ofs_loc = imm(ofs) diff --git a/rpython/jit/backend/x86/test/test_resume.py b/rpython/jit/backend/x86/test/test_resume.py new file mode 100644 --- /dev/null +++ b/rpython/jit/backend/x86/test/test_resume.py @@ -0,0 +1,8 @@ + +from rpython.jit.backend.x86.test.test_basic import Jit386Mixin +from rpython.jit.backend.llsupport.test.test_resume import ResumeTest + +class TestResumeX86(Jit386Mixin, ResumeTest): + # for the individual tests see + # ====> ../../llsupport/test/test_resume.py + pass diff --git a/rpython/jit/metainterp/optimizeopt/util.py b/rpython/jit/metainterp/optimizeopt/util.py --- a/rpython/jit/metainterp/optimizeopt/util.py +++ b/rpython/jit/metainterp/optimizeopt/util.py @@ -158,24 +158,6 @@ remap[op2.result] = op1.result if op1.getopnum() not in (rop.JUMP, rop.LABEL): # xxx obscure assert op1.getdescr() == op2.getdescr() - if op1.getfailargs() or op2.getfailargs(): - assert len(op1.getfailargs()) == len(op2.getfailargs()) - if strict_fail_args: - for x, y in zip(op1.getfailargs(), op2.getfailargs()): - if x is None: - assert remap.get(y, y) is None - else: - assert x.same_box(remap.get(y, y)) - else: - fail_args1 = set(op1.getfailargs()) - fail_args2 = set([remap.get(y, y) for y in op2.getfailargs()]) - for x in fail_args1: - for y in fail_args2: - if x.same_box(y): - fail_args2.remove(y) - break - else: - assert False assert len(oplist1) == len(oplist2) print '-'*totwidth return True From noreply at buildbot.pypy.org Wed Aug 28 18:07:01 2013 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 28 Aug 2013 18:07:01 +0200 (CEST) Subject: [pypy-commit] pypy resume-refactor: (fijal, antocuni) Start adding resume2.py infrastructure + tests Message-ID: <20130828160701.B03D31C0149@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: resume-refactor Changeset: r66404:8b3e2a4c9b4b Date: 2013-08-28 17:06 +0100 http://bitbucket.org/pypy/pypy/changeset/8b3e2a4c9b4b/ Log: (fijal, antocuni) Start adding resume2.py infrastructure + tests diff --git a/rpython/jit/backend/llsupport/resumebuilder.py b/rpython/jit/backend/llsupport/resumebuilder.py --- a/rpython/jit/backend/llsupport/resumebuilder.py +++ b/rpython/jit/backend/llsupport/resumebuilder.py @@ -59,3 +59,4 @@ if name.startswith('process_'): num = getattr(rop, name[len('process_'):].upper()) oplist[num] = value + diff --git a/rpython/jit/metainterp/resume2.py b/rpython/jit/metainterp/resume2.py new file mode 100644 --- /dev/null +++ b/rpython/jit/metainterp/resume2.py @@ -0,0 +1,56 @@ + +from rpython.jit.metainterp.resoperation import rop +from rpython.jit.metainterp.history import BoxInt +from rpython.jit.codewriter.jitcode import JitCode + +class AbstractResumeReader(object): + def __init__(self, metainterp, deadframe): + self.metainterp = metainterp + self.deadframe = deadframe + + def rebuild(self, faildescr): + bytecode = faildescr.rd_loop.rd_bytecode + pos = faildescr.rd_bytecode_position + self.interpret_until(bytecode, pos) + + def interpret_until(self, bytecode, until): + pos = 0 + while pos < until: + op = bytecode[pos] + if op.getopnum() == rop.ENTER_FRAME: + descr = op.getdescr() + assert isinstance(descr, JitCode) + self.enter_frame(op.getarg(0).getint(), descr) + elif op.getopnum() == rop.LEAVE_FRAME: + xxx + elif op.getopnum() == rop.RESUME_PUT: + self.put(op.getarg(0).getint(), op.getarg(1).getint(), + op.getarg(2).getint()) + else: + xxx + pos += 1 + +class DirectResumeReader(AbstractResumeReader): + pass + +class BoxResumeReader(AbstractResumeReader): + def enter_frame(self, pc, jitcode): + if pc != -1: + self.metainterp.framestack[-1].pc = pc + self.metainterp.newframe(jitcode) + + def put(self, jitframe_index, depth, frontend_position): + jitcode = self.metainterp.framestack[-1].jitcode + cpu = self.metainterp.cpu + frame = self.metainterp.framestack[- depth - 1] + if frontend_position < jitcode.num_regs_i(): + box = BoxInt(cpu.get_int_value(self.deadframe, jitframe_index)) + frame.registers_i[frontend_position] = box + elif frontend_position < (jitcode.num_regs_r() + jitcode.num_regs_i()): + xxx + else: + assert frontend_position < jitcode.num_regs() + xxx + +def rebuild_from_resumedata(metainterp, deadframe, faildescr): + BoxResumeReader(metainterp, deadframe).rebuild(faildescr) diff --git a/rpython/jit/metainterp/test/test_resume2.py b/rpython/jit/metainterp/test/test_resume2.py new file mode 100644 --- /dev/null +++ b/rpython/jit/metainterp/test/test_resume2.py @@ -0,0 +1,50 @@ + +from rpython.jit.tool.oparser import parse +from rpython.jit.codewriter.jitcode import JitCode +from rpython.jit.metainterp.history import AbstractDescr +from rpython.jit.metainterp.resume2 import rebuild_from_resumedata + +class Descr(AbstractDescr): + pass + +class MockLoop(object): + pass + +class Frame(object): + def __init__(self, jitcode): + self.jitcode = jitcode + self.registers_i = [None] * jitcode.num_regs_i() + +class MockMetaInterp(object): + def __init__(self): + self.framestack = [] + + def newframe(self, jitcode): + self.framestack.append(Frame(jitcode)) + +class MockCPU(object): + def get_int_value(self, frame, index): + assert frame == "myframe" + assert index == 10 + return 13 + +class TestResumeDirect(object): + def test_direct_resume_reader(self): + jitcode = JitCode("jitcode") + jitcode.setup(num_regs_i=13) + resume_loop = parse(""" + [] + enter_frame(-1, descr=jitcode1) + resume_put(10, 0, 1) + leave_frame() + """, namespace={'jitcode1': jitcode}) + descr = Descr() + descr.rd_loop = MockLoop() + descr.rd_loop.rd_bytecode = resume_loop.operations + descr.rd_bytecode_position = 2 + metainterp = MockMetaInterp() + metainterp.cpu = MockCPU() + rebuild_from_resumedata(metainterp, "myframe", descr) + assert len(metainterp.framestack) == 1 + f = metainterp.framestack[-1] + assert f.registers_i[1].getint() == 13 From noreply at buildbot.pypy.org Wed Aug 28 18:14:12 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Wed, 28 Aug 2013 18:14:12 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: move rgc.no_release_gil to backendopt Message-ID: <20130828161412.776ED1C0149@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r66405:27b6cc609d16 Date: 2013-08-28 17:55 +0200 http://bitbucket.org/pypy/pypy/changeset/27b6cc609d16/ Log: move rgc.no_release_gil to backendopt diff --git a/rpython/memory/gctransform/framework.py b/rpython/memory/gctransform/framework.py --- a/rpython/memory/gctransform/framework.py +++ b/rpython/memory/gctransform/framework.py @@ -46,34 +46,6 @@ return (op.opname in LL_OPERATIONS and LL_OPERATIONS[op.opname].canmallocgc) -class GilAnalyzer(graphanalyze.BoolGraphAnalyzer): - - def analyze_direct_call(self, graph, seen=None): - try: - func = graph.func - except AttributeError: - pass - else: - if getattr(func, '_gctransformer_hint_close_stack_', False): - return True - if getattr(func, '_transaction_break_', False): - return True - - return graphanalyze.BoolGraphAnalyzer.analyze_direct_call( - self, graph, seen) - - def analyze_external_call(self, op, seen=None): - funcobj = op.args[0].value._obj - if getattr(funcobj, 'transactionsafe', False): - return False - else: - return False - - def analyze_instantiate_call(self, seen=None): - return False - - def analyze_simple_operation(self, op, graphinfo): - return False def find_initializing_stores(collect_analyzer, graph): @@ -281,9 +253,6 @@ self.collect_analyzer = CollectAnalyzer(self.translator) self.collect_analyzer.analyze_all() - self.gil_analyzer = GilAnalyzer(self.translator) - self.gil_analyzer.analyze_all() - s_gc = self.translator.annotator.bookkeeper.valueoftype(GCClass) r_gc = self.translator.rtyper.getrepr(s_gc) self.c_const_gc = rmodel.inputconst(r_gc, self.gcdata.gc) @@ -673,27 +642,6 @@ raise Exception("'no_collect' function can trigger collection:" " %s\n%s" % (func, err.getvalue())) - if func and getattr(func, '_no_release_gil_', False): - if self.gil_analyzer.analyze_direct_call(graph): - # 'no_release_gil' function can release the gil - import cStringIO - err = cStringIO.StringIO() - import sys - prev = sys.stdout - try: - sys.stdout = err - ca = GilAnalyzer(self.translator) - ca.verbose = True - ca.analyze_direct_call(graph) # print the "traceback" here - sys.stdout = prev - except: - sys.stdout = prev - # ^^^ for the dump of which operation in which graph actually - # causes it to return True - raise Exception("'no_release_gil' function can release the GIL:" - " %s\n%s" % (func, err.getvalue())) - - if self.write_barrier_ptr: self.clean_sets = ( find_initializing_stores(self.collect_analyzer, graph)) diff --git a/rpython/memory/gctransform/test/test_framework.py b/rpython/memory/gctransform/test/test_framework.py --- a/rpython/memory/gctransform/test/test_framework.py +++ b/rpython/memory/gctransform/test/test_framework.py @@ -5,7 +5,7 @@ from rpython.rtyper.lltypesystem.lloperation import llop from rpython.memory.gc.semispace import SemiSpaceGC from rpython.memory.gctransform.framework import (CollectAnalyzer, - find_initializing_stores, find_clean_setarrayitems, GilAnalyzer) + find_initializing_stores, find_clean_setarrayitems) from rpython.memory.gctransform.shadowstack import ( ShadowStackFrameworkGCTransformer) from rpython.memory.gctransform.test.test_transform import rtype @@ -99,36 +99,6 @@ t = rtype(g, []) gg = graphof(t, g) assert CollectAnalyzer(t).analyze_direct_call(gg) - -def test_canrelease_external(): - for ths in ['auto', True, False]: - for sbxs in [True, False]: - fext = rffi.llexternal('fext2', [], lltype.Void, - threadsafe=ths, sandboxsafe=sbxs) - def g(): - fext() - t = rtype(g, []) - gg = graphof(t, g) - - releases = (ths == 'auto' and not sbxs) or ths is True - assert releases == GilAnalyzer(t).analyze_direct_call(gg) - -def test_canrelease_instantiate(): - class O: - pass - class A(O): - pass - class B(O): - pass - - classes = [A, B] - def g(i): - classes[i]() - - t = rtype(g, [int]) - gg = graphof(t, g) - assert not GilAnalyzer(t).analyze_direct_call(gg) - def test_no_collect(gc="minimark"): from rpython.rlib import rgc @@ -155,60 +125,6 @@ def test_no_collect_stm(): test_no_collect("stmgc") -def test_no_release_gil(gc="minimark"): - from rpython.rlib import rgc - from rpython.translator.c.genc import CStandaloneBuilder - - @rgc.no_release_gil - def g(): - return 1 - - assert g._dont_inline_ - assert g._no_release_gil_ - - def entrypoint(argv): - return g() + 2 - - t = rtype(entrypoint, [s_list_of_strings]) - if gc == "stmgc": - t.config.translation.stm = True - t.config.translation.gc = gc - cbuild = CStandaloneBuilder(t, entrypoint, t.config, - gcpolicy=FrameworkGcPolicy2) - db = cbuild.generate_graphs_for_llinterp() - -def test_no_release_gil_stm(): - test_no_release_gil("stmgc") - -def test_no_release_gil_detect(gc="minimark"): - from rpython.rlib import rgc - from rpython.translator.c.genc import CStandaloneBuilder - - fext1 = rffi.llexternal('fext1', [], lltype.Void, threadsafe=True) - @rgc.no_release_gil - def g(): - fext1() - return 1 - - assert g._dont_inline_ - assert g._no_release_gil_ - - def entrypoint(argv): - return g() + 2 - - t = rtype(entrypoint, [s_list_of_strings]) - if gc == "stmgc": - t.config.translation.stm = True - t.config.translation.gc = gc - cbuild = CStandaloneBuilder(t, entrypoint, t.config, - gcpolicy=FrameworkGcPolicy2) - f = py.test.raises(Exception, cbuild.generate_graphs_for_llinterp) - expected = "'no_release_gil' function can release the GIL: Author: Remi Meier Branch: stmgc-c4 Changeset: r66406:c63bfbc93cec Date: 2013-08-28 17:56 +0200 http://bitbucket.org/pypy/pypy/changeset/c63bfbc93cec/ Log: add comment diff --git a/rpython/translator/backendopt/gilanalysis.py b/rpython/translator/backendopt/gilanalysis.py --- a/rpython/translator/backendopt/gilanalysis.py +++ b/rpython/translator/backendopt/gilanalysis.py @@ -1,5 +1,9 @@ from rpython.translator.backendopt import graphanalyze +# This is not an optimization. It checks for possible releases of the +# GIL in all graphs starting from rgc.no_release_gil. + + class GilAnalyzer(graphanalyze.BoolGraphAnalyzer): def analyze_direct_call(self, graph, seen=None): From noreply at buildbot.pypy.org Wed Aug 28 18:14:14 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Wed, 28 Aug 2013 18:14:14 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: prefix debug lines with the thread number during testing Message-ID: <20130828161414.EBB6D1C0149@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r66407:dc4ed04113de Date: 2013-08-28 18:13 +0200 http://bitbucket.org/pypy/pypy/changeset/dc4ed04113de/ Log: prefix debug lines with the thread number during testing diff --git a/rpython/rlib/debug.py b/rpython/rlib/debug.py --- a/rpython/rlib/debug.py +++ b/rpython/rlib/debug.py @@ -1,4 +1,4 @@ -import sys, time +import sys, time, thread from rpython.rtyper.extregistry import ExtRegistryEntry from rpython.rlib.objectmodel import we_are_translated from rpython.rlib.rarithmetic import is_valid_int @@ -76,10 +76,16 @@ _log = None # patched from tests to be an object of class DebugLog # or compatible +_thread_numbering = {} +def _get_thread_num(): + thid = thread.get_ident() + if thid not in _thread_numbering: + _thread_numbering[thid] = len(_thread_numbering) + return _thread_numbering[thid] + def debug_print(*args): - for arg in args: - print >> sys.stderr, arg, - print >> sys.stderr + msg = " ".join(map(str, args)) + sys.stderr.write("%s# %s\n" % (_get_thread_num(), msg)) if _log is not None: _log.debug_print(*args) @@ -108,15 +114,17 @@ def debug_start(category): c = int(time.clock() * 100) - print >> sys.stderr, '%s[%x] {%s%s' % (_start_colors_1, c, - category, _stop_colors) + sys.stderr.write('%s%s# [%x] {%s%s\n' % (_start_colors_1, + _get_thread_num(), c, + category, _stop_colors)) if _log is not None: _log.debug_start(category) def debug_stop(category): c = int(time.clock() * 100) - print >> sys.stderr, '%s[%x] %s}%s' % (_start_colors_2, c, - category, _stop_colors) + sys.stderr.write('%s%s# [%x] %s}%s\n' % (_start_colors_2, + _get_thread_num(), c, + category, _stop_colors)) if _log is not None: _log.debug_stop(category) From noreply at buildbot.pypy.org Wed Aug 28 18:21:07 2013 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 28 Aug 2013 18:21:07 +0200 (CEST) Subject: [pypy-commit] pypy resume-refactor: rename test Message-ID: <20130828162107.8B28A1C0149@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: resume-refactor Changeset: r66408:ae17909428e1 Date: 2013-08-28 17:08 +0100 http://bitbucket.org/pypy/pypy/changeset/ae17909428e1/ Log: rename test diff --git a/rpython/jit/metainterp/test/test_resume2.py b/rpython/jit/metainterp/test/test_resume2.py --- a/rpython/jit/metainterp/test/test_resume2.py +++ b/rpython/jit/metainterp/test/test_resume2.py @@ -29,7 +29,7 @@ return 13 class TestResumeDirect(object): - def test_direct_resume_reader(self): + def test_box_resume_reader(self): jitcode = JitCode("jitcode") jitcode.setup(num_regs_i=13) resume_loop = parse(""" @@ -48,3 +48,4 @@ assert len(metainterp.framestack) == 1 f = metainterp.framestack[-1] assert f.registers_i[1].getint() == 13 + From noreply at buildbot.pypy.org Wed Aug 28 18:21:08 2013 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 28 Aug 2013 18:21:08 +0200 (CEST) Subject: [pypy-commit] pypy resume-refactor: (fijal, antocuni) Write more tests Message-ID: <20130828162108.B29651C0149@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: resume-refactor Changeset: r66409:d90df29ed710 Date: 2013-08-28 17:20 +0100 http://bitbucket.org/pypy/pypy/changeset/d90df29ed710/ Log: (fijal, antocuni) Write more tests diff --git a/rpython/jit/metainterp/resume2.py b/rpython/jit/metainterp/resume2.py --- a/rpython/jit/metainterp/resume2.py +++ b/rpython/jit/metainterp/resume2.py @@ -22,7 +22,7 @@ assert isinstance(descr, JitCode) self.enter_frame(op.getarg(0).getint(), descr) elif op.getopnum() == rop.LEAVE_FRAME: - xxx + self.leave_frame() elif op.getopnum() == rop.RESUME_PUT: self.put(op.getarg(0).getint(), op.getarg(1).getint(), op.getarg(2).getint()) @@ -30,27 +30,33 @@ xxx pos += 1 -class DirectResumeReader(AbstractResumeReader): - pass - -class BoxResumeReader(AbstractResumeReader): def enter_frame(self, pc, jitcode): if pc != -1: self.metainterp.framestack[-1].pc = pc self.metainterp.newframe(jitcode) + def leave_frame(self): + self.metainterp.popframe() + def put(self, jitframe_index, depth, frontend_position): jitcode = self.metainterp.framestack[-1].jitcode cpu = self.metainterp.cpu frame = self.metainterp.framestack[- depth - 1] if frontend_position < jitcode.num_regs_i(): - box = BoxInt(cpu.get_int_value(self.deadframe, jitframe_index)) - frame.registers_i[frontend_position] = box + self.write_int(frame, frontend_position, + cpu.get_int_value(self.deadframe, jitframe_index)) elif frontend_position < (jitcode.num_regs_r() + jitcode.num_regs_i()): xxx else: assert frontend_position < jitcode.num_regs() xxx +class DirectResumeReader(AbstractResumeReader): + pass + +class BoxResumeReader(AbstractResumeReader): + def write_int(self, frame, pos, value): + frame.registers_i[pos] = BoxInt(value) + def rebuild_from_resumedata(metainterp, deadframe, faildescr): BoxResumeReader(metainterp, deadframe).rebuild(faildescr) diff --git a/rpython/jit/metainterp/test/test_resume2.py b/rpython/jit/metainterp/test/test_resume2.py --- a/rpython/jit/metainterp/test/test_resume2.py +++ b/rpython/jit/metainterp/test/test_resume2.py @@ -15,6 +15,9 @@ self.jitcode = jitcode self.registers_i = [None] * jitcode.num_regs_i() + def num_nonempty_regs(self): + return len(filter(bool, self.registers_i)) + class MockMetaInterp(object): def __init__(self): self.framestack = [] @@ -22,11 +25,13 @@ def newframe(self, jitcode): self.framestack.append(Frame(jitcode)) + def popframe(self): + self.framestack.pop() + class MockCPU(object): def get_int_value(self, frame, index): assert frame == "myframe" - assert index == 10 - return 13 + return index + 3 class TestResumeDirect(object): def test_box_resume_reader(self): @@ -49,3 +54,45 @@ f = metainterp.framestack[-1] assert f.registers_i[1].getint() == 13 + def test_nested_call(self): + jitcode1 = JitCode("jitcode") + jitcode1.setup(num_regs_i=13) + jitcode2 = JitCode("jitcode2") + jitcode2.setup(num_regs_i=9) + resume_loop = parse(""" + [] + enter_frame(-1, descr=jitcode1) + resume_put(11, 0, 2) + enter_frame(12, descr=jitcode2) + resume_put(12, 0, 3) + resume_put(8, 1, 4) + leave_frame() + resume_put(10, 0, 1) + leave_frame() + """, namespace={'jitcode1': jitcode1, 'jitcode2': jitcode2}) + metainterp = MockMetaInterp() + metainterp.cpu = MockCPU() + descr = Descr() + descr.rd_loop = MockLoop() + descr.rd_loop.rd_bytecode = resume_loop.operations + descr.rd_bytecode_position = 5 + rebuild_from_resumedata(metainterp, "myframe", descr) + assert len(metainterp.framestack) == 2 + f = metainterp.framestack[-1] + f2 = metainterp.framestack[0] + assert f.num_nonempty_regs() == 1 + assert f2.num_nonempty_regs() == 2 + assert f.registers_i[3].getint() == 12 + 3 + assert f2.registers_i[4].getint() == 8 + 3 + assert f2.registers_i[2].getint() == 11 + 3 + + descr.rd_bytecode_position = 7 + metainterp.framestack = [] + rebuild_from_resumedata(metainterp, "myframe", descr) + assert len(metainterp.framestack) == 1 + f = metainterp.framestack[-1] + assert f.num_nonempty_regs() == 3 + assert f.registers_i[1].getint() == 10 + 3 + assert f.registers_i[2].getint() == 11 + 3 + assert f.registers_i[4].getint() == 8 + 3 + From noreply at buildbot.pypy.org Wed Aug 28 18:24:56 2013 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 28 Aug 2013 18:24:56 +0200 (CEST) Subject: [pypy-commit] pypy resume-refactor: (fijal, antocuni) kill the failargs support from logger Message-ID: <20130828162456.8FDC31C0149@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: resume-refactor Changeset: r66410:07794a0a5cb2 Date: 2013-08-28 17:23 +0100 http://bitbucket.org/pypy/pypy/changeset/07794a0a5cb2/ Log: (fijal, antocuni) kill the failargs support from logger diff --git a/rpython/jit/metainterp/logger.py b/rpython/jit/metainterp/logger.py --- a/rpython/jit/metainterp/logger.py +++ b/rpython/jit/metainterp/logger.py @@ -147,12 +147,7 @@ args += ', descr=' + r else: args = "descr=" + r - if is_guard and op.getfailargs() is not None: - fail_args = ' [' + ", ".join([self.repr_of_arg(arg) - for arg in op.getfailargs()]) + ']' - else: - fail_args = '' - return s_offset + res + op.getopname() + '(' + args + ')' + fail_args + return s_offset + res + op.getopname() + '(' + args + ')' def _log_inputarg_setup_ops(self, op): target_token = op.getdescr() diff --git a/rpython/jit/metainterp/test/test_logger.py b/rpython/jit/metainterp/test/test_logger.py --- a/rpython/jit/metainterp/test/test_logger.py +++ b/rpython/jit/metainterp/test/test_logger.py @@ -100,7 +100,7 @@ inp = ''' [i0] i1 = int_add(i0, 1) - guard_true(i0) [i0, i1] + guard_true(i0) finish(i1) ''' self.reparse(inp) @@ -108,7 +108,7 @@ def test_guard_not_invalidated(self): inp = ''' [] - guard_not_invalidated(descr=descr) [] + guard_not_invalidated(descr=descr) finish(descr=finaldescr) ''' loop = pure_parse(inp, namespace={'descr': Descr(), @@ -117,15 +117,6 @@ output = logger.log_loop(loop, {'descr': Descr()}) assert 'guard_not_invalidated(descr=' in output - def test_guard_w_hole(self): - inp = ''' - [i0] - i1 = int_add(i0, 1) - guard_true(i0) [i0, None, i1] - finish(i1) - ''' - self.reparse(inp) - def test_debug_merge_point(self): inp = ''' [] @@ -161,12 +152,12 @@ namespace = {'fdescr': BasicFailDescr()} inp = ''' [i0] - guard_true(i0, descr=fdescr) [i0] + guard_true(i0, descr=fdescr) ''' loop = pure_parse(inp, namespace=namespace) logger = Logger(self.make_metainterp_sd(), guard_number=True) output = logger.log_loop(loop) - assert re.match("guard_true\(i0, descr=\) \[i0\]", output.splitlines()[-1]) + assert re.match("guard_true\(i0, descr=\)", output.splitlines()[-1]) pure_parse(output) logger = Logger(self.make_metainterp_sd(), guard_number=False) From noreply at buildbot.pypy.org Wed Aug 28 18:26:15 2013 From: noreply at buildbot.pypy.org (vext01) Date: Wed, 28 Aug 2013 18:26:15 +0200 (CEST) Subject: [pypy-commit] pypy improve-errors-again: (Ronan, Edd) Move formatting of annotations into its own function. Message-ID: <20130828162615.5D20B1C0149@cobra.cs.uni-duesseldorf.de> Author: Edd Barrett Branch: improve-errors-again Changeset: r66411:c39e6e158893 Date: 2013-08-28 11:18 +0100 http://bitbucket.org/pypy/pypy/changeset/c39e6e158893/ Log: (Ronan, Edd) Move formatting of annotations into its own function. diff --git a/rpython/tool/error.py b/rpython/tool/error.py --- a/rpython/tool/error.py +++ b/rpython/tool/error.py @@ -94,15 +94,21 @@ msg += source_lines(graph, block, operindex, long=True) if oper is not None: if SHOW_ANNOTATIONS: - msg.append("Known variable annotations:") - for arg in oper.args + [oper.result]: - if isinstance(arg, Variable): - try: - msg.append(" " + str(arg) + " = " + str(annotator.binding(arg))) - except KeyError: - pass + msg += format_annotations(annotator, oper) + msg += [''] return "\n".join(msg) +def format_annotations(annotator, oper): + msg = [] + msg.append("Known variable annotations:") + for arg in oper.args + [oper.result]: + if isinstance(arg, Variable): + try: + msg.append(" " + str(arg) + " = " + str(annotator.binding(arg))) + except KeyError: + pass + return msg + def format_blocked_annotation_error(annotator, blocked_blocks): text = [] for block, (graph, index) in blocked_blocks.items(): From noreply at buildbot.pypy.org Wed Aug 28 18:26:16 2013 From: noreply at buildbot.pypy.org (vext01) Date: Wed, 28 Aug 2013 18:26:16 +0200 (CEST) Subject: [pypy-commit] pypy improve-errors-again: (Ronan, Edd) Move AnnotatorError to a place that makes more sense. Message-ID: <20130828162616.B50BB1C0149@cobra.cs.uni-duesseldorf.de> Author: Edd Barrett Branch: improve-errors-again Changeset: r66412:79c2e7e9bdd8 Date: 2013-08-28 12:31 +0100 http://bitbucket.org/pypy/pypy/changeset/79c2e7e9bdd8/ Log: (Ronan, Edd) Move AnnotatorError to a place that makes more sense. diff --git a/rpython/annotator/annrpython.py b/rpython/annotator/annrpython.py --- a/rpython/annotator/annrpython.py +++ b/rpython/annotator/annrpython.py @@ -5,7 +5,7 @@ from rpython.tool.ansi_print import ansi_log from rpython.tool.pairtype import pair from rpython.tool.error import (format_blocked_annotation_error, - AnnotatorError, gather_error, ErrorWrapper, source_lines) + gather_error, ErrorWrapper, source_lines) from rpython.flowspace.model import (Variable, Constant, FunctionGraph, c_last_exception, checkgraph) from rpython.translator import simplify, transform @@ -18,7 +18,6 @@ FAIL = object() - class RPythonAnnotator(object): """Block annotator for RPython. See description in doc/translation.txt.""" @@ -221,7 +220,7 @@ text = format_blocked_annotation_error(self, self.blocked_blocks) #raise SystemExit() - raise AnnotatorError(text) + raise annmodel.AnnotatorError(text) for graph in newgraphs: v = graph.getreturnvar() if v not in self.bindings: diff --git a/rpython/annotator/binaryop.py b/rpython/annotator/binaryop.py --- a/rpython/annotator/binaryop.py +++ b/rpython/annotator/binaryop.py @@ -20,7 +20,7 @@ from rpython.annotator.bookkeeper import getbookkeeper from rpython.flowspace.model import Variable, Constant from rpython.rlib import rarithmetic -from rpython.tool.error import AnnotatorError +from rpython.annotator.model import AnnotatorError # convenience only! def immutablevalue(x): diff --git a/rpython/annotator/model.py b/rpython/annotator/model.py --- a/rpython/annotator/model.py +++ b/rpython/annotator/model.py @@ -676,6 +676,10 @@ # ____________________________________________________________ + +class AnnotatorError(Exception): + pass + class UnionError(Exception): """Signals an suspicious attempt at taking the union of deeply incompatible SomeXxx instances.""" diff --git a/rpython/annotator/test/test_model.py b/rpython/annotator/test/test_model.py --- a/rpython/annotator/test/test_model.py +++ b/rpython/annotator/test/test_model.py @@ -2,6 +2,7 @@ from rpython.annotator.model import * from rpython.annotator.listdef import ListDef +from rpython.translator.translator import TranslationContext listdef1 = ListDef(None, SomeTuple([SomeInteger(nonneg=True), SomeString()])) @@ -174,6 +175,28 @@ assert f2.contains(f1) assert f1.contains(f2) +def compile_function(function, annotation=[]): + t = TranslationContext() + t.buildannotator().build_types(function, annotation) + +class AAA(object): + pass + +def test_blocked_inference1(): + def blocked_inference(): + return AAA().m() + + py.test.raises(AnnotatorError, compile_function, blocked_inference) + +def test_blocked_inference2(): + def blocked_inference(): + a = AAA() + b = a.x + return b + + py.test.raises(AnnotatorError, compile_function, blocked_inference) + + if __name__ == '__main__': for name, value in globals().items(): if name.startswith('test_'): diff --git a/rpython/annotator/unaryop.py b/rpython/annotator/unaryop.py --- a/rpython/annotator/unaryop.py +++ b/rpython/annotator/unaryop.py @@ -14,7 +14,7 @@ from rpython.annotator.bookkeeper import getbookkeeper from rpython.annotator import builtin from rpython.annotator.binaryop import _clone ## XXX where to put this? -from rpython.tool.error import AnnotatorError +from rpython.annotator.model import AnnotatorError # convenience only! def immutablevalue(x): diff --git a/rpython/tool/error.py b/rpython/tool/error.py --- a/rpython/tool/error.py +++ b/rpython/tool/error.py @@ -68,9 +68,6 @@ lines = source_lines1(graph, *args, **kwds) return ['In %r:' % (graph,)] + lines -class AnnotatorError(Exception): - pass - class NoSuchAttrError(Exception): pass @@ -79,7 +76,7 @@ self.msg = msg def __repr__(self): - return '<%s>' % (self.msg,) + return '%s' % (self.msg,) def gather_error(annotator, graph, block, operindex): msg = [""] diff --git a/rpython/tool/test/test_error.py b/rpython/tool/test/test_error.py --- a/rpython/tool/test/test_error.py +++ b/rpython/tool/test/test_error.py @@ -3,33 +3,10 @@ """ from rpython.translator.translator import TranslationContext -from rpython.tool.error import AnnotatorError from rpython.annotator.model import UnionError import py - -def compile_function(function, annotation=[]): - t = TranslationContext() - t.buildannotator().build_types(function, annotation) - -class AAA(object): - pass - -def test_blocked_inference1(): - def blocked_inference(): - return AAA().m() - - py.test.raises(AnnotatorError, compile_function, blocked_inference) - -def test_blocked_inference2(): - def blocked_inference(): - a = AAA() - b = a.x - return b - - py.test.raises(AnnotatorError, compile_function, blocked_inference) - def test_someobject(): def someobject_degeneration(n): if n == 3: From noreply at buildbot.pypy.org Wed Aug 28 18:26:17 2013 From: noreply at buildbot.pypy.org (vext01) Date: Wed, 28 Aug 2013 18:26:17 +0200 (CEST) Subject: [pypy-commit] pypy improve-errors-again: (Edd, Ronan) Begin refactoring UnionError/AnnotatorError. Message-ID: <20130828162617.DE5261C0149@cobra.cs.uni-duesseldorf.de> Author: Edd Barrett Branch: improve-errors-again Changeset: r66413:34e2f4edb458 Date: 2013-08-28 12:43 +0100 http://bitbucket.org/pypy/pypy/changeset/34e2f4edb458/ Log: (Edd, Ronan) Begin refactoring UnionError/AnnotatorError. diff --git a/rpython/annotator/annrpython.py b/rpython/annotator/annrpython.py --- a/rpython/annotator/annrpython.py +++ b/rpython/annotator/annrpython.py @@ -602,6 +602,10 @@ raise BlockedInference(self, op, opindex) try: resultcell = consider_meth(*argcells) + except annmodel.AnnotatorError as e: # note that UnionError is a subclass + graph = self.bookkeeper.position_key[0] + e.source = '\n'.join(source_lines(graph, block, opindex, long=True)) + raise except Exception, e: graph = self.bookkeeper.position_key[0] e.args = e.args + ( diff --git a/rpython/annotator/model.py b/rpython/annotator/model.py --- a/rpython/annotator/model.py +++ b/rpython/annotator/model.py @@ -678,9 +678,19 @@ class AnnotatorError(Exception): - pass + def __init__(self, msg=None): + self.msg = msg + self.source = None -class UnionError(Exception): + def __str__(self): + s = "\n\n%s" % self.msg + if self.source is not None: + s += "\n\n" + s += self.source + + return s + +class UnionError(AnnotatorError): """Signals an suspicious attempt at taking the union of deeply incompatible SomeXxx instances.""" @@ -690,25 +700,16 @@ The msg paramter is appended to a generic message. This can be used to give the user a little more information. """ + s = "" + if msg is not None: + s += "%s\n\n" % msg + s += "Offending annotations:\n" + s += " %s\n %s" % (s_obj1, s_obj2) self.s_obj1 = s_obj1 self.s_obj2 = s_obj2 - self.msg = msg + self.msg = s self.source = None - def __str__(self): - s = "\n\n" - - if self.msg is not None: - s += "%s\n\n" % self.msg - - s += "Offending annotations:\n" - s += "%s\n%s\n\n" % (self.s_obj1, self.s_obj2) - - if self.source is not None: - s += self.source - - return s - def __repr__(self): return str(self) diff --git a/rpython/tool/error.py b/rpython/tool/error.py --- a/rpython/tool/error.py +++ b/rpython/tool/error.py @@ -109,7 +109,6 @@ def format_blocked_annotation_error(annotator, blocked_blocks): text = [] for block, (graph, index) in blocked_blocks.items(): - text.append('\n') text.append("Blocked block -- operation cannot succeed") text.append(gather_error(annotator, graph, block, index)) return '\n'.join(text) From noreply at buildbot.pypy.org Wed Aug 28 18:26:19 2013 From: noreply at buildbot.pypy.org (vext01) Date: Wed, 28 Aug 2013 18:26:19 +0200 (CEST) Subject: [pypy-commit] pypy improve-errors-again: (Edd, Ronan) Change any RPython error that the user should see to a subclass of AnnotatorError. Message-ID: <20130828162619.4A7311C0149@cobra.cs.uni-duesseldorf.de> Author: Edd Barrett Branch: improve-errors-again Changeset: r66414:c7946783093d Date: 2013-08-28 15:00 +0100 http://bitbucket.org/pypy/pypy/changeset/c7946783093d/ Log: (Edd, Ronan) Change any RPython error that the user should see to a subclass of AnnotatorError. Builtin exceptions should only be used to report errors with RPython itself and not the user program. In doing so we remove ErrorWrapper (yay) diff --git a/rpython/annotator/annrpython.py b/rpython/annotator/annrpython.py --- a/rpython/annotator/annrpython.py +++ b/rpython/annotator/annrpython.py @@ -5,7 +5,7 @@ from rpython.tool.ansi_print import ansi_log from rpython.tool.pairtype import pair from rpython.tool.error import (format_blocked_annotation_error, - gather_error, ErrorWrapper, source_lines) + gather_error, source_lines) from rpython.flowspace.model import (Variable, Constant, FunctionGraph, c_last_exception, checkgraph) from rpython.translator import simplify, transform @@ -136,9 +136,7 @@ checkgraph(flowgraph) nbarg = len(flowgraph.getargs()) - if len(inputcells) != nbarg: - raise TypeError("%s expects %d args, got %d" %( - flowgraph, nbarg, len(inputcells))) + assert len(inputcells) == nbarg # wrong number of args # register the entry point self.addpendinggraph(flowgraph, inputcells) @@ -159,7 +157,7 @@ else: return object else: - raise TypeError, ("Variable or Constant instance expected, " + raise TypeError("Variable or Constant instance expected, " "got %r" % (variable,)) def getuserclassdefinitions(self): @@ -243,7 +241,7 @@ # return annmodel.s_ImpossibleValue return self.bookkeeper.immutableconstant(arg) else: - raise TypeError, 'Variable or Constant expected, got %r' % (arg,) + raise TypeError('Variable or Constant expected, got %r' % (arg,)) def typeannotation(self, t): return signature.annotation(t, self.bookkeeper) @@ -604,12 +602,7 @@ resultcell = consider_meth(*argcells) except annmodel.AnnotatorError as e: # note that UnionError is a subclass graph = self.bookkeeper.position_key[0] - e.source = '\n'.join(source_lines(graph, block, opindex, long=True)) - raise - except Exception, e: - graph = self.bookkeeper.position_key[0] - e.args = e.args + ( - ErrorWrapper(gather_error(self, graph, block, opindex)),) + e.source = gather_error(self, graph, block, opindex) raise if resultcell is None: resultcell = self.noreturnvalue(op) diff --git a/rpython/annotator/argument.py b/rpython/annotator/argument.py --- a/rpython/annotator/argument.py +++ b/rpython/annotator/argument.py @@ -115,9 +115,7 @@ elif num_args > co_argcount: raise ArgErrCount(num_args, num_kwds, signature, defaults_w, 0) - # if a **kwargs argument is needed, explode - if signature.has_kwarg(): - raise TypeError("Keyword arguments as **kwargs is not supported by RPython") + assert not signature.has_kwarg() # XXX should not happen? # handle keyword arguments num_remainingkwds = 0 diff --git a/rpython/annotator/binaryop.py b/rpython/annotator/binaryop.py --- a/rpython/annotator/binaryop.py +++ b/rpython/annotator/binaryop.py @@ -457,7 +457,7 @@ class __extend__(pairtype(SomeString, SomeUnicodeString), pairtype(SomeUnicodeString, SomeString)): def mod((str, unistring)): - raise NotImplementedError( + raise AnnotatorError( "string formatting mixing strings and unicode not supported") @@ -471,7 +471,7 @@ if (is_unicode and isinstance(s_item, (SomeChar, SomeString)) or is_string and isinstance(s_item, (SomeUnicodeCodePoint, SomeUnicodeString))): - raise NotImplementedError( + raise AnnotatorError( "string formatting mixing strings and unicode not supported") getbookkeeper().count('strformat', s_string, s_tuple) no_nul = s_string.no_nul diff --git a/rpython/annotator/builtin.py b/rpython/annotator/builtin.py --- a/rpython/annotator/builtin.py +++ b/rpython/annotator/builtin.py @@ -337,7 +337,7 @@ return SomeAddress() def unicodedata_decimal(s_uchr): - raise TypeError, "unicodedate.decimal() calls should not happen at interp-level" + raise TypeError("unicodedate.decimal() calls should not happen at interp-level") def test(*args): return s_Bool diff --git a/rpython/annotator/classdef.py b/rpython/annotator/classdef.py --- a/rpython/annotator/classdef.py +++ b/rpython/annotator/classdef.py @@ -2,7 +2,7 @@ Type inference for user-defined classes. """ from rpython.annotator.model import SomePBC, s_ImpossibleValue, unionof -from rpython.annotator.model import SomeInteger, SomeTuple, SomeString +from rpython.annotator.model import SomeInteger, SomeTuple, SomeString, AnnotatorError from rpython.annotator import description @@ -429,7 +429,8 @@ result.extend(slots) return result -class NoSuchAttrError(Exception): +#class NoSuchAttrError(Exception): +class NoSuchAttrError(AnnotatorError): """Raised when an attribute is found on a class where __slots__ or _attrs_ forbits it.""" diff --git a/rpython/annotator/description.py b/rpython/annotator/description.py --- a/rpython/annotator/description.py +++ b/rpython/annotator/description.py @@ -6,6 +6,7 @@ from rpython.annotator.argument import rawshape, ArgErr from rpython.tool.sourcetools import valid_identifier, func_with_new_name from rpython.tool.pairtype import extendabletype +from rpython.annotator.model import AnnotatorError class CallFamily(object): """A family of Desc objects that could be called from common call sites. @@ -261,7 +262,7 @@ try: inputcells = args.match_signature(signature, defs_s) except ArgErr, e: - raise TypeError("signature mismatch: %s() %s" % + raise AnnotatorError("signature mismatch: %s() %s" % (self.name, e.getmsg())) return inputcells @@ -678,7 +679,7 @@ value = value.__get__(42) classdef = None # don't bind elif isinstance(value, classmethod): - raise AssertionError("classmethods are not supported") + raise AnnotatorError("classmethods are not supported") s_value = self.bookkeeper.immutablevalue(value) if classdef is not None: s_value = s_value.bind_callables_under(classdef, name) diff --git a/rpython/annotator/test/test_annrpython.py b/rpython/annotator/test/test_annrpython.py --- a/rpython/annotator/test/test_annrpython.py +++ b/rpython/annotator/test/test_annrpython.py @@ -228,7 +228,7 @@ def f(): return X().meth() a = self.RPythonAnnotator() - py.test.raises(AssertionError, a.build_types, f, []) + py.test.raises(annmodel.AnnotatorError, a.build_types, f, []) def test_methodcall1(self): a = self.RPythonAnnotator() @@ -3381,22 +3381,22 @@ return '%s' % unichr(x) a = self.RPythonAnnotator() - py.test.raises(NotImplementedError, a.build_types, f, [int]) + py.test.raises(annmodel.AnnotatorError, a.build_types, f, [int]) def f(x): return '%s' % (unichr(x) * 3) a = self.RPythonAnnotator() - py.test.raises(NotImplementedError, a.build_types, f, [int]) + py.test.raises(annmodel.AnnotatorError, a.build_types, f, [int]) def f(x): return '%s%s' % (1, unichr(x)) a = self.RPythonAnnotator() - py.test.raises(NotImplementedError, a.build_types, f, [int]) + py.test.raises(annmodel.AnnotatorError, a.build_types, f, [int]) def f(x): return '%s%s' % (1, unichr(x) * 15) a = self.RPythonAnnotator() - py.test.raises(NotImplementedError, a.build_types, f, [int]) + py.test.raises(annmodel.AnnotatorError, a.build_types, f, [int]) def test_strformatting_tuple(self): @@ -3434,7 +3434,7 @@ return [1, 2, 3][s:e] a = self.RPythonAnnotator() - py.test.raises(TypeError, "a.build_types(f, [int, int])") + py.test.raises(annmodel.AnnotatorError, "a.build_types(f, [int, int])") a.build_types(f, [annmodel.SomeInteger(nonneg=True), annmodel.SomeInteger(nonneg=True)]) def f(x): @@ -4038,7 +4038,6 @@ s_objs = set([type(the_exc.s_obj1), type(the_exc.s_obj2)]) assert s_objs == set([annmodel.SomeInteger, annmodel.SomeString]) - assert the_exc.msg == None # Check that this is a generic UnionError def test_unionerror_tuple_size(self): def f(x): @@ -4051,7 +4050,7 @@ with py.test.raises(annmodel.UnionError) as exc: a.build_types(f, [int]) - assert exc.value.msg == "RPython cannot unify tuples of different length: 2 versus 1" + assert "RPython cannot unify tuples of different length: 2 versus 1" in exc.value.msg def test_unionerror_signedness(self): def f(x): @@ -4064,8 +4063,8 @@ with py.test.raises(annmodel.UnionError) as exc: a.build_types(f, [int]) - assert exc.value.msg == ("RPython cannot prove that these integers are of " - "the same signedness") + assert ("RPython cannot prove that these integers are of the " + "same signedness" in exc.value.msg) def test_unionerror_instance(self): class A(object): pass @@ -4081,7 +4080,8 @@ with py.test.raises(annmodel.UnionError) as exc: a.build_types(f, [int]) - assert exc.value.msg == ("RPython cannot unify instances with no common base class") + assert ("RPython cannot unify instances with no common base class" + in exc.value.msg) def test_unionerror_iters(self): @@ -4096,7 +4096,8 @@ with py.test.raises(annmodel.UnionError) as exc: a.build_types(f, [int]) - assert exc.value.msg == ("RPython cannot unify incompatible iterator variants") + assert ("RPython cannot unify incompatible iterator variants" in + exc.value.msg) def g(n): return [0, 1, 2, n] diff --git a/rpython/annotator/test/test_argument.py b/rpython/annotator/test/test_argument.py --- a/rpython/annotator/test/test_argument.py +++ b/rpython/annotator/test/test_argument.py @@ -77,10 +77,6 @@ new_args = args.unmatch_signature(sig, data) assert args.unpack() == new_args.unpack() - args = make_arguments_for_translation(space, [1], {'c': 5, 'd': 7}) - sig = Signature(['a', 'b', 'c'], None, 'kw') - py.test.raises(TypeError, args.match_signature, sig, [2, 3]) - def test_rawshape(self): space = DummySpace() args = make_arguments_for_translation(space, [1,2,3]) diff --git a/rpython/annotator/unaryop.py b/rpython/annotator/unaryop.py --- a/rpython/annotator/unaryop.py +++ b/rpython/annotator/unaryop.py @@ -84,8 +84,7 @@ return obj.is_true() def hash(obj): - raise TypeError, ("cannot use hash() in RPython; " - "see objectmodel.compute_xxx()") + raise AnnotatorError("cannot use hash() in RPython") def str(obj): getbookkeeper().count('str', obj) @@ -341,10 +340,10 @@ def check_negative_slice(s_start, s_stop): if isinstance(s_start, SomeInteger) and not s_start.nonneg: - raise TypeError("slicing: not proven to have non-negative start") + raise AnnotatorError("slicing: not proven to have non-negative start") if isinstance(s_stop, SomeInteger) and not s_stop.nonneg and \ getattr(s_stop, 'const', 0) != -1: - raise TypeError("slicing: not proven to have non-negative stop") + raise AnnotatorError("slicing: not proven to have non-negative stop") class __extend__(SomeDict): @@ -529,10 +528,10 @@ class __extend__(SomeUnicodeString): def method_encode(uni, s_enc): if not s_enc.is_constant(): - raise TypeError("Non-constant encoding not supported") + raise AnnotatorError("Non-constant encoding not supported") enc = s_enc.const if enc not in ('ascii', 'latin-1', 'utf-8'): - raise TypeError("Encoding %s not supported for unicode" % (enc,)) + raise AnnotatorError("Encoding %s not supported for unicode" % (enc,)) return SomeString() method_encode.can_only_throw = [UnicodeEncodeError] @@ -562,10 +561,10 @@ def method_decode(str, s_enc): if not s_enc.is_constant(): - raise TypeError("Non-constant encoding not supported") + raise AnnotatorError("Non-constant encoding not supported") enc = s_enc.const if enc not in ('ascii', 'latin-1', 'utf-8'): - raise TypeError("Encoding %s not supported for strings" % (enc,)) + raise AnnotatorError("Encoding %s not supported for strings" % (enc,)) return SomeUnicodeString() method_decode.can_only_throw = [UnicodeDecodeError] diff --git a/rpython/tool/error.py b/rpython/tool/error.py --- a/rpython/tool/error.py +++ b/rpython/tool/error.py @@ -68,16 +68,6 @@ lines = source_lines1(graph, *args, **kwds) return ['In %r:' % (graph,)] + lines -class NoSuchAttrError(Exception): - pass - -class ErrorWrapper(object): - def __init__(self, msg): - self.msg = msg - - def __repr__(self): - return '%s' % (self.msg,) - def gather_error(annotator, graph, block, operindex): msg = [""] From noreply at buildbot.pypy.org Wed Aug 28 18:26:20 2013 From: noreply at buildbot.pypy.org (vext01) Date: Wed, 28 Aug 2013 18:26:20 +0200 (CEST) Subject: [pypy-commit] pypy improve-errors-again: (Ronan, Edd) Improve an error message. Message-ID: <20130828162620.733F71C0149@cobra.cs.uni-duesseldorf.de> Author: Edd Barrett Branch: improve-errors-again Changeset: r66415:3f413263db37 Date: 2013-08-28 15:40 +0100 http://bitbucket.org/pypy/pypy/changeset/3f413263db37/ Log: (Ronan, Edd) Improve an error message. diff --git a/rpython/annotator/unaryop.py b/rpython/annotator/unaryop.py --- a/rpython/annotator/unaryop.py +++ b/rpython/annotator/unaryop.py @@ -728,7 +728,7 @@ def setattr(pbc, s_attr, s_value): if not pbc.isNone(): - raise AnnotatorError("setattr on %r" % pbc) + raise AnnotatorError("Cannot modify attribute of a pre-built constant") def call(pbc, args): bookkeeper = getbookkeeper() From noreply at buildbot.pypy.org Wed Aug 28 18:26:21 2013 From: noreply at buildbot.pypy.org (vext01) Date: Wed, 28 Aug 2013 18:26:21 +0200 (CEST) Subject: [pypy-commit] pypy improve-errors-again: (Ronan, Edd) Correctly report a couple of user-level errors. Message-ID: <20130828162621.A21E71C0149@cobra.cs.uni-duesseldorf.de> Author: Edd Barrett Branch: improve-errors-again Changeset: r66416:bd95e658ba19 Date: 2013-08-28 16:53 +0100 http://bitbucket.org/pypy/pypy/changeset/bd95e658ba19/ Log: (Ronan, Edd) Correctly report a couple of user-level errors. It wasn't obvious from the error message what the actual problem in the user's program was. diff --git a/rpython/annotator/test/test_annrpython.py b/rpython/annotator/test/test_annrpython.py --- a/rpython/annotator/test/test_annrpython.py +++ b/rpython/annotator/test/test_annrpython.py @@ -4099,6 +4099,25 @@ assert ("RPython cannot unify incompatible iterator variants" in exc.value.msg) + def test_variable_getattr(self): + class A(object): pass + def f(y): + a = A() + return getattr(a, y) + a = self.RPythonAnnotator() + with py.test.raises(annmodel.AnnotatorError) as exc: + a.build_types(f, [str]) + assert ("variable argument to getattr" in exc.value.msg) + + def test_bad_call(self): + def f(x): + return x() + a = self.RPythonAnnotator() + with py.test.raises(annmodel.AnnotatorError) as exc: + a.build_types(f, [str]) + assert ("Cannot prove that the object is callable" in exc.value.msg) + + def g(n): return [0, 1, 2, n] diff --git a/rpython/annotator/unaryop.py b/rpython/annotator/unaryop.py --- a/rpython/annotator/unaryop.py +++ b/rpython/annotator/unaryop.py @@ -157,9 +157,7 @@ return obj.call(getbookkeeper().build_args("call_args", args_s)) def call(obj, args, implicit_init=False): - #raise Exception, "cannot follow call_args%r" % ((obj, args),) - getbookkeeper().warning("cannot follow call(%r, %r)" % (obj, args)) - return SomeObject() + raise AnnotatorError("Cannot prove that the object is callable") def op_contains(obj, s_element): return s_Bool @@ -652,7 +650,7 @@ if s_attr.is_constant() and isinstance(s_attr.const, str): attr = s_attr.const return ins._true_getattr(attr) - return SomeObject() + raise AnnotatorError("A variable argument to getattr is not RPython") getattr.can_only_throw = [] def setattr(ins, s_attr, s_value): @@ -750,7 +748,8 @@ # whose length is the constant 0; so let's tentatively answer 0. return immutablevalue(0) else: - return SomeObject() # len() on a pbc? no chance + # This should probably never happen + raise AnnotatorError("Cannot call len on a pbc") # annotation of low-level types from rpython.annotator.model import SomePtr, SomeLLADTMeth From noreply at buildbot.pypy.org Wed Aug 28 18:26:22 2013 From: noreply at buildbot.pypy.org (vext01) Date: Wed, 28 Aug 2013 18:26:22 +0200 (CEST) Subject: [pypy-commit] pypy improve-errors-again: Oops, left a comment. Message-ID: <20130828162622.D4AFF1C0149@cobra.cs.uni-duesseldorf.de> Author: Edd Barrett Branch: improve-errors-again Changeset: r66417:a0b3bd524f32 Date: 2013-08-28 17:22 +0100 http://bitbucket.org/pypy/pypy/changeset/a0b3bd524f32/ Log: Oops, left a comment. diff --git a/rpython/annotator/classdef.py b/rpython/annotator/classdef.py --- a/rpython/annotator/classdef.py +++ b/rpython/annotator/classdef.py @@ -429,7 +429,6 @@ result.extend(slots) return result -#class NoSuchAttrError(Exception): class NoSuchAttrError(AnnotatorError): """Raised when an attribute is found on a class where __slots__ or _attrs_ forbits it.""" From noreply at buildbot.pypy.org Wed Aug 28 18:26:24 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Wed, 28 Aug 2013 18:26:24 +0200 (CEST) Subject: [pypy-commit] pypy default: Merged in vext01/pypy/improve-errors-again (pull request #184) Message-ID: <20130828162624.111A71C0149@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: Changeset: r66418:4f1cd13cc351 Date: 2013-08-28 17:25 +0100 http://bitbucket.org/pypy/pypy/changeset/4f1cd13cc351/ Log: Merged in vext01/pypy/improve-errors-again (pull request #184) More improvements and refactorings of error messages. diff --git a/rpython/annotator/annrpython.py b/rpython/annotator/annrpython.py --- a/rpython/annotator/annrpython.py +++ b/rpython/annotator/annrpython.py @@ -5,7 +5,7 @@ from rpython.tool.ansi_print import ansi_log from rpython.tool.pairtype import pair from rpython.tool.error import (format_blocked_annotation_error, - AnnotatorError, gather_error, ErrorWrapper, source_lines) + gather_error, source_lines) from rpython.flowspace.model import (Variable, Constant, FunctionGraph, c_last_exception, checkgraph) from rpython.translator import simplify, transform @@ -18,7 +18,6 @@ FAIL = object() - class RPythonAnnotator(object): """Block annotator for RPython. See description in doc/translation.txt.""" @@ -137,9 +136,7 @@ checkgraph(flowgraph) nbarg = len(flowgraph.getargs()) - if len(inputcells) != nbarg: - raise TypeError("%s expects %d args, got %d" %( - flowgraph, nbarg, len(inputcells))) + assert len(inputcells) == nbarg # wrong number of args # register the entry point self.addpendinggraph(flowgraph, inputcells) @@ -160,7 +157,7 @@ else: return object else: - raise TypeError, ("Variable or Constant instance expected, " + raise TypeError("Variable or Constant instance expected, " "got %r" % (variable,)) def getuserclassdefinitions(self): @@ -221,7 +218,7 @@ text = format_blocked_annotation_error(self, self.blocked_blocks) #raise SystemExit() - raise AnnotatorError(text) + raise annmodel.AnnotatorError(text) for graph in newgraphs: v = graph.getreturnvar() if v not in self.bindings: @@ -244,7 +241,7 @@ # return annmodel.s_ImpossibleValue return self.bookkeeper.immutableconstant(arg) else: - raise TypeError, 'Variable or Constant expected, got %r' % (arg,) + raise TypeError('Variable or Constant expected, got %r' % (arg,)) def typeannotation(self, t): return signature.annotation(t, self.bookkeeper) @@ -603,10 +600,9 @@ raise BlockedInference(self, op, opindex) try: resultcell = consider_meth(*argcells) - except Exception, e: + except annmodel.AnnotatorError as e: # note that UnionError is a subclass graph = self.bookkeeper.position_key[0] - e.args = e.args + ( - ErrorWrapper(gather_error(self, graph, block, opindex)),) + e.source = gather_error(self, graph, block, opindex) raise if resultcell is None: resultcell = self.noreturnvalue(op) diff --git a/rpython/annotator/argument.py b/rpython/annotator/argument.py --- a/rpython/annotator/argument.py +++ b/rpython/annotator/argument.py @@ -115,9 +115,7 @@ elif num_args > co_argcount: raise ArgErrCount(num_args, num_kwds, signature, defaults_w, 0) - # if a **kwargs argument is needed, explode - if signature.has_kwarg(): - raise TypeError("Keyword arguments as **kwargs is not supported by RPython") + assert not signature.has_kwarg() # XXX should not happen? # handle keyword arguments num_remainingkwds = 0 diff --git a/rpython/annotator/binaryop.py b/rpython/annotator/binaryop.py --- a/rpython/annotator/binaryop.py +++ b/rpython/annotator/binaryop.py @@ -20,7 +20,7 @@ from rpython.annotator.bookkeeper import getbookkeeper from rpython.flowspace.model import Variable, Constant from rpython.rlib import rarithmetic -from rpython.tool.error import AnnotatorError +from rpython.annotator.model import AnnotatorError # convenience only! def immutablevalue(x): @@ -457,7 +457,7 @@ class __extend__(pairtype(SomeString, SomeUnicodeString), pairtype(SomeUnicodeString, SomeString)): def mod((str, unistring)): - raise NotImplementedError( + raise AnnotatorError( "string formatting mixing strings and unicode not supported") @@ -471,7 +471,7 @@ if (is_unicode and isinstance(s_item, (SomeChar, SomeString)) or is_string and isinstance(s_item, (SomeUnicodeCodePoint, SomeUnicodeString))): - raise NotImplementedError( + raise AnnotatorError( "string formatting mixing strings and unicode not supported") getbookkeeper().count('strformat', s_string, s_tuple) no_nul = s_string.no_nul diff --git a/rpython/annotator/builtin.py b/rpython/annotator/builtin.py --- a/rpython/annotator/builtin.py +++ b/rpython/annotator/builtin.py @@ -337,7 +337,7 @@ return SomeAddress() def unicodedata_decimal(s_uchr): - raise TypeError, "unicodedate.decimal() calls should not happen at interp-level" + raise TypeError("unicodedate.decimal() calls should not happen at interp-level") def test(*args): return s_Bool diff --git a/rpython/annotator/classdef.py b/rpython/annotator/classdef.py --- a/rpython/annotator/classdef.py +++ b/rpython/annotator/classdef.py @@ -2,7 +2,7 @@ Type inference for user-defined classes. """ from rpython.annotator.model import SomePBC, s_ImpossibleValue, unionof -from rpython.annotator.model import SomeInteger, SomeTuple, SomeString +from rpython.annotator.model import SomeInteger, SomeTuple, SomeString, AnnotatorError from rpython.annotator import description @@ -429,7 +429,7 @@ result.extend(slots) return result -class NoSuchAttrError(Exception): +class NoSuchAttrError(AnnotatorError): """Raised when an attribute is found on a class where __slots__ or _attrs_ forbits it.""" diff --git a/rpython/annotator/description.py b/rpython/annotator/description.py --- a/rpython/annotator/description.py +++ b/rpython/annotator/description.py @@ -6,6 +6,7 @@ from rpython.annotator.argument import rawshape, ArgErr from rpython.tool.sourcetools import valid_identifier, func_with_new_name from rpython.tool.pairtype import extendabletype +from rpython.annotator.model import AnnotatorError class CallFamily(object): """A family of Desc objects that could be called from common call sites. @@ -261,7 +262,7 @@ try: inputcells = args.match_signature(signature, defs_s) except ArgErr, e: - raise TypeError("signature mismatch: %s() %s" % + raise AnnotatorError("signature mismatch: %s() %s" % (self.name, e.getmsg())) return inputcells @@ -678,7 +679,7 @@ value = value.__get__(42) classdef = None # don't bind elif isinstance(value, classmethod): - raise AssertionError("classmethods are not supported") + raise AnnotatorError("classmethods are not supported") s_value = self.bookkeeper.immutablevalue(value) if classdef is not None: s_value = s_value.bind_callables_under(classdef, name) diff --git a/rpython/annotator/model.py b/rpython/annotator/model.py --- a/rpython/annotator/model.py +++ b/rpython/annotator/model.py @@ -676,7 +676,21 @@ # ____________________________________________________________ -class UnionError(Exception): + +class AnnotatorError(Exception): + def __init__(self, msg=None): + self.msg = msg + self.source = None + + def __str__(self): + s = "\n\n%s" % self.msg + if self.source is not None: + s += "\n\n" + s += self.source + + return s + +class UnionError(AnnotatorError): """Signals an suspicious attempt at taking the union of deeply incompatible SomeXxx instances.""" @@ -686,25 +700,16 @@ The msg paramter is appended to a generic message. This can be used to give the user a little more information. """ + s = "" + if msg is not None: + s += "%s\n\n" % msg + s += "Offending annotations:\n" + s += " %s\n %s" % (s_obj1, s_obj2) self.s_obj1 = s_obj1 self.s_obj2 = s_obj2 - self.msg = msg + self.msg = s self.source = None - def __str__(self): - s = "\n\n" - - if self.msg is not None: - s += "%s\n\n" % self.msg - - s += "Offending annotations:\n" - s += "%s\n%s\n\n" % (self.s_obj1, self.s_obj2) - - if self.source is not None: - s += self.source - - return s - def __repr__(self): return str(self) diff --git a/rpython/annotator/test/test_annrpython.py b/rpython/annotator/test/test_annrpython.py --- a/rpython/annotator/test/test_annrpython.py +++ b/rpython/annotator/test/test_annrpython.py @@ -228,7 +228,7 @@ def f(): return X().meth() a = self.RPythonAnnotator() - py.test.raises(AssertionError, a.build_types, f, []) + py.test.raises(annmodel.AnnotatorError, a.build_types, f, []) def test_methodcall1(self): a = self.RPythonAnnotator() @@ -3381,22 +3381,22 @@ return '%s' % unichr(x) a = self.RPythonAnnotator() - py.test.raises(NotImplementedError, a.build_types, f, [int]) + py.test.raises(annmodel.AnnotatorError, a.build_types, f, [int]) def f(x): return '%s' % (unichr(x) * 3) a = self.RPythonAnnotator() - py.test.raises(NotImplementedError, a.build_types, f, [int]) + py.test.raises(annmodel.AnnotatorError, a.build_types, f, [int]) def f(x): return '%s%s' % (1, unichr(x)) a = self.RPythonAnnotator() - py.test.raises(NotImplementedError, a.build_types, f, [int]) + py.test.raises(annmodel.AnnotatorError, a.build_types, f, [int]) def f(x): return '%s%s' % (1, unichr(x) * 15) a = self.RPythonAnnotator() - py.test.raises(NotImplementedError, a.build_types, f, [int]) + py.test.raises(annmodel.AnnotatorError, a.build_types, f, [int]) def test_strformatting_tuple(self): @@ -3434,7 +3434,7 @@ return [1, 2, 3][s:e] a = self.RPythonAnnotator() - py.test.raises(TypeError, "a.build_types(f, [int, int])") + py.test.raises(annmodel.AnnotatorError, "a.build_types(f, [int, int])") a.build_types(f, [annmodel.SomeInteger(nonneg=True), annmodel.SomeInteger(nonneg=True)]) def f(x): @@ -4038,7 +4038,6 @@ s_objs = set([type(the_exc.s_obj1), type(the_exc.s_obj2)]) assert s_objs == set([annmodel.SomeInteger, annmodel.SomeString]) - assert the_exc.msg == None # Check that this is a generic UnionError def test_unionerror_tuple_size(self): def f(x): @@ -4051,7 +4050,7 @@ with py.test.raises(annmodel.UnionError) as exc: a.build_types(f, [int]) - assert exc.value.msg == "RPython cannot unify tuples of different length: 2 versus 1" + assert "RPython cannot unify tuples of different length: 2 versus 1" in exc.value.msg def test_unionerror_signedness(self): def f(x): @@ -4064,8 +4063,8 @@ with py.test.raises(annmodel.UnionError) as exc: a.build_types(f, [int]) - assert exc.value.msg == ("RPython cannot prove that these integers are of " - "the same signedness") + assert ("RPython cannot prove that these integers are of the " + "same signedness" in exc.value.msg) def test_unionerror_instance(self): class A(object): pass @@ -4081,7 +4080,8 @@ with py.test.raises(annmodel.UnionError) as exc: a.build_types(f, [int]) - assert exc.value.msg == ("RPython cannot unify instances with no common base class") + assert ("RPython cannot unify instances with no common base class" + in exc.value.msg) def test_unionerror_iters(self): @@ -4096,7 +4096,27 @@ with py.test.raises(annmodel.UnionError) as exc: a.build_types(f, [int]) - assert exc.value.msg == ("RPython cannot unify incompatible iterator variants") + assert ("RPython cannot unify incompatible iterator variants" in + exc.value.msg) + + def test_variable_getattr(self): + class A(object): pass + def f(y): + a = A() + return getattr(a, y) + a = self.RPythonAnnotator() + with py.test.raises(annmodel.AnnotatorError) as exc: + a.build_types(f, [str]) + assert ("variable argument to getattr" in exc.value.msg) + + def test_bad_call(self): + def f(x): + return x() + a = self.RPythonAnnotator() + with py.test.raises(annmodel.AnnotatorError) as exc: + a.build_types(f, [str]) + assert ("Cannot prove that the object is callable" in exc.value.msg) + def g(n): return [0, 1, 2, n] diff --git a/rpython/annotator/test/test_argument.py b/rpython/annotator/test/test_argument.py --- a/rpython/annotator/test/test_argument.py +++ b/rpython/annotator/test/test_argument.py @@ -77,10 +77,6 @@ new_args = args.unmatch_signature(sig, data) assert args.unpack() == new_args.unpack() - args = make_arguments_for_translation(space, [1], {'c': 5, 'd': 7}) - sig = Signature(['a', 'b', 'c'], None, 'kw') - py.test.raises(TypeError, args.match_signature, sig, [2, 3]) - def test_rawshape(self): space = DummySpace() args = make_arguments_for_translation(space, [1,2,3]) diff --git a/rpython/annotator/test/test_model.py b/rpython/annotator/test/test_model.py --- a/rpython/annotator/test/test_model.py +++ b/rpython/annotator/test/test_model.py @@ -2,6 +2,7 @@ from rpython.annotator.model import * from rpython.annotator.listdef import ListDef +from rpython.translator.translator import TranslationContext listdef1 = ListDef(None, SomeTuple([SomeInteger(nonneg=True), SomeString()])) @@ -174,6 +175,28 @@ assert f2.contains(f1) assert f1.contains(f2) +def compile_function(function, annotation=[]): + t = TranslationContext() + t.buildannotator().build_types(function, annotation) + +class AAA(object): + pass + +def test_blocked_inference1(): + def blocked_inference(): + return AAA().m() + + py.test.raises(AnnotatorError, compile_function, blocked_inference) + +def test_blocked_inference2(): + def blocked_inference(): + a = AAA() + b = a.x + return b + + py.test.raises(AnnotatorError, compile_function, blocked_inference) + + if __name__ == '__main__': for name, value in globals().items(): if name.startswith('test_'): diff --git a/rpython/annotator/unaryop.py b/rpython/annotator/unaryop.py --- a/rpython/annotator/unaryop.py +++ b/rpython/annotator/unaryop.py @@ -14,7 +14,7 @@ from rpython.annotator.bookkeeper import getbookkeeper from rpython.annotator import builtin from rpython.annotator.binaryop import _clone ## XXX where to put this? -from rpython.tool.error import AnnotatorError +from rpython.annotator.model import AnnotatorError # convenience only! def immutablevalue(x): @@ -84,8 +84,7 @@ return obj.is_true() def hash(obj): - raise TypeError, ("cannot use hash() in RPython; " - "see objectmodel.compute_xxx()") + raise AnnotatorError("cannot use hash() in RPython") def str(obj): getbookkeeper().count('str', obj) @@ -158,9 +157,7 @@ return obj.call(getbookkeeper().build_args("call_args", args_s)) def call(obj, args, implicit_init=False): - #raise Exception, "cannot follow call_args%r" % ((obj, args),) - getbookkeeper().warning("cannot follow call(%r, %r)" % (obj, args)) - return SomeObject() + raise AnnotatorError("Cannot prove that the object is callable") def op_contains(obj, s_element): return s_Bool @@ -341,10 +338,10 @@ def check_negative_slice(s_start, s_stop): if isinstance(s_start, SomeInteger) and not s_start.nonneg: - raise TypeError("slicing: not proven to have non-negative start") + raise AnnotatorError("slicing: not proven to have non-negative start") if isinstance(s_stop, SomeInteger) and not s_stop.nonneg and \ getattr(s_stop, 'const', 0) != -1: - raise TypeError("slicing: not proven to have non-negative stop") + raise AnnotatorError("slicing: not proven to have non-negative stop") class __extend__(SomeDict): @@ -529,10 +526,10 @@ class __extend__(SomeUnicodeString): def method_encode(uni, s_enc): if not s_enc.is_constant(): - raise TypeError("Non-constant encoding not supported") + raise AnnotatorError("Non-constant encoding not supported") enc = s_enc.const if enc not in ('ascii', 'latin-1', 'utf-8'): - raise TypeError("Encoding %s not supported for unicode" % (enc,)) + raise AnnotatorError("Encoding %s not supported for unicode" % (enc,)) return SomeString() method_encode.can_only_throw = [UnicodeEncodeError] @@ -562,10 +559,10 @@ def method_decode(str, s_enc): if not s_enc.is_constant(): - raise TypeError("Non-constant encoding not supported") + raise AnnotatorError("Non-constant encoding not supported") enc = s_enc.const if enc not in ('ascii', 'latin-1', 'utf-8'): - raise TypeError("Encoding %s not supported for strings" % (enc,)) + raise AnnotatorError("Encoding %s not supported for strings" % (enc,)) return SomeUnicodeString() method_decode.can_only_throw = [UnicodeDecodeError] @@ -653,7 +650,7 @@ if s_attr.is_constant() and isinstance(s_attr.const, str): attr = s_attr.const return ins._true_getattr(attr) - return SomeObject() + raise AnnotatorError("A variable argument to getattr is not RPython") getattr.can_only_throw = [] def setattr(ins, s_attr, s_value): @@ -729,7 +726,7 @@ def setattr(pbc, s_attr, s_value): if not pbc.isNone(): - raise AnnotatorError("setattr on %r" % pbc) + raise AnnotatorError("Cannot modify attribute of a pre-built constant") def call(pbc, args): bookkeeper = getbookkeeper() @@ -751,7 +748,8 @@ # whose length is the constant 0; so let's tentatively answer 0. return immutablevalue(0) else: - return SomeObject() # len() on a pbc? no chance + # This should probably never happen + raise AnnotatorError("Cannot call len on a pbc") # annotation of low-level types from rpython.annotator.model import SomePtr, SomeLLADTMeth diff --git a/rpython/tool/error.py b/rpython/tool/error.py --- a/rpython/tool/error.py +++ b/rpython/tool/error.py @@ -68,19 +68,6 @@ lines = source_lines1(graph, *args, **kwds) return ['In %r:' % (graph,)] + lines -class AnnotatorError(Exception): - pass - -class NoSuchAttrError(Exception): - pass - -class ErrorWrapper(object): - def __init__(self, msg): - self.msg = msg - - def __repr__(self): - return '<%s>' % (self.msg,) - def gather_error(annotator, graph, block, operindex): msg = [""] @@ -94,19 +81,24 @@ msg += source_lines(graph, block, operindex, long=True) if oper is not None: if SHOW_ANNOTATIONS: - msg.append("Known variable annotations:") - for arg in oper.args + [oper.result]: - if isinstance(arg, Variable): - try: - msg.append(" " + str(arg) + " = " + str(annotator.binding(arg))) - except KeyError: - pass + msg += format_annotations(annotator, oper) + msg += [''] return "\n".join(msg) +def format_annotations(annotator, oper): + msg = [] + msg.append("Known variable annotations:") + for arg in oper.args + [oper.result]: + if isinstance(arg, Variable): + try: + msg.append(" " + str(arg) + " = " + str(annotator.binding(arg))) + except KeyError: + pass + return msg + def format_blocked_annotation_error(annotator, blocked_blocks): text = [] for block, (graph, index) in blocked_blocks.items(): - text.append('\n') text.append("Blocked block -- operation cannot succeed") text.append(gather_error(annotator, graph, block, index)) return '\n'.join(text) diff --git a/rpython/tool/test/test_error.py b/rpython/tool/test/test_error.py --- a/rpython/tool/test/test_error.py +++ b/rpython/tool/test/test_error.py @@ -3,33 +3,10 @@ """ from rpython.translator.translator import TranslationContext -from rpython.tool.error import AnnotatorError from rpython.annotator.model import UnionError import py - -def compile_function(function, annotation=[]): - t = TranslationContext() - t.buildannotator().build_types(function, annotation) - -class AAA(object): - pass - -def test_blocked_inference1(): - def blocked_inference(): - return AAA().m() - - py.test.raises(AnnotatorError, compile_function, blocked_inference) - -def test_blocked_inference2(): - def blocked_inference(): - a = AAA() - b = a.x - return b - - py.test.raises(AnnotatorError, compile_function, blocked_inference) - def test_someobject(): def someobject_degeneration(n): if n == 3: From noreply at buildbot.pypy.org Wed Aug 28 19:07:29 2013 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 28 Aug 2013 19:07:29 +0200 (CEST) Subject: [pypy-commit] pypy resume-refactor: (fijal, antocuni) start fixing the assembler backend tests Message-ID: <20130828170729.8A3E31C0189@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: resume-refactor Changeset: r66419:480e2c71e31a Date: 2013-08-28 18:06 +0100 http://bitbucket.org/pypy/pypy/changeset/480e2c71e31a/ Log: (fijal, antocuni) start fixing the assembler backend tests diff --git a/rpython/jit/backend/llsupport/llmodel.py b/rpython/jit/backend/llsupport/llmodel.py --- a/rpython/jit/backend/llsupport/llmodel.py +++ b/rpython/jit/backend/llsupport/llmodel.py @@ -339,27 +339,17 @@ assert isinstance(res, history.AbstractFailDescr) return res - def _decode_pos(self, deadframe, index): - descr = self.get_latest_descr(deadframe) - if descr.final_descr: - assert index == 0 - return 0 - return descr.rd_locs[index] - - def get_int_value(self, deadframe, index): - pos = self._decode_pos(deadframe, index) + def get_int_value(self, deadframe, pos): descr = self.gc_ll_descr.getframedescrs(self).arraydescr ofs = self.unpack_arraydescr(descr) return self.read_int_at_mem(deadframe, pos + ofs, WORD, 1) - def get_ref_value(self, deadframe, index): - pos = self._decode_pos(deadframe, index) + def get_ref_value(self, deadframe, pos): descr = self.gc_ll_descr.getframedescrs(self).arraydescr ofs = self.unpack_arraydescr(descr) return self.read_ref_at_mem(deadframe, pos + ofs) - def get_float_value(self, deadframe, index): - pos = self._decode_pos(deadframe, index) + def get_float_value(self, deadframe, pos): descr = self.gc_ll_descr.getframedescrs(self).arraydescr ofs = self.unpack_arraydescr(descr) return self.read_float_at_mem(deadframe, pos + ofs) diff --git a/rpython/jit/backend/llsupport/regalloc.py b/rpython/jit/backend/llsupport/regalloc.py --- a/rpython/jit/backend/llsupport/regalloc.py +++ b/rpython/jit/backend/llsupport/regalloc.py @@ -664,8 +664,7 @@ return False if operations[i + 1].getarg(0) is not op.result: return False - if (self.longevity[op.result][1] > i + 1 or - op.result in operations[i + 1].getfailargs()): + if self.longevity[op.result][1] > i + 1: return False return True diff --git a/rpython/jit/backend/test/runner_test.py b/rpython/jit/backend/test/runner_test.py --- a/rpython/jit/backend/test/runner_test.py +++ b/rpython/jit/backend/test/runner_test.py @@ -168,7 +168,6 @@ ResOperation(rop.JUMP, [i1], None, descr=targettoken), ] inputargs = [i0] - operations[3].setfailargs([i1]) self.cpu.compile_loop(None, inputargs, operations, looptoken) deadframe = self.cpu.execute_token(looptoken, 2) @@ -177,31 +176,6 @@ res = self.cpu.get_int_value(deadframe, 0) assert res == 10 - def test_compile_with_holes_in_fail_args(self): - i0 = BoxInt() - i1 = BoxInt() - i2 = BoxInt() - i3 = BoxInt() - looptoken = JitCellToken() - targettoken = TargetToken() - operations = [ - ResOperation(rop.INT_SUB, [i3, ConstInt(42)], i0), - ResOperation(rop.LABEL, [i0], None, descr=targettoken), - ResOperation(rop.INT_ADD, [i0, ConstInt(1)], i1), - ResOperation(rop.INT_LE, [i1, ConstInt(9)], i2), - ResOperation(rop.GUARD_TRUE, [i2], None, descr=BasicFailDescr(2)), - ResOperation(rop.JUMP, [i1], None, descr=targettoken), - ] - inputargs = [i3] - operations[4].setfailargs([None, None, i1, None]) - - self.cpu.compile_loop(None, inputargs, operations, looptoken) - deadframe = self.cpu.execute_token(looptoken, 44) - fail = self.cpu.get_latest_descr(deadframe) - assert fail.identifier == 2 - res = self.cpu.get_int_value(deadframe, 2) - assert res == 10 - def test_backends_dont_keep_loops_alive(self): import weakref, gc self.cpu.dont_keepalive_stuff = True @@ -218,7 +192,6 @@ ResOperation(rop.JUMP, [i1], None, descr=targettoken), ] inputargs = [i0] - operations[3].setfailargs([i1]) wr_i1 = weakref.ref(i1) wr_guard = weakref.ref(operations[2]) self.cpu.compile_loop(None, inputargs, operations, looptoken) @@ -248,7 +221,6 @@ ResOperation(rop.JUMP, [i1], None, descr=targettoken), ] inputargs = [i0] - operations[3].setfailargs([i1]) self.cpu.compile_loop(None, inputargs, operations, looptoken) i1b = BoxInt() @@ -258,7 +230,6 @@ ResOperation(rop.GUARD_TRUE, [i3], None, descr=faildescr2), ResOperation(rop.JUMP, [i1b], None, descr=targettoken), ] - bridge[1].setfailargs([i1b]) self.cpu.compile_bridge(None, faildescr1, [i1b], bridge, looptoken) diff --git a/rpython/jit/metainterp/compile.py b/rpython/jit/metainterp/compile.py --- a/rpython/jit/metainterp/compile.py +++ b/rpython/jit/metainterp/compile.py @@ -507,8 +507,6 @@ CNT_FLOAT = 0x60000000 def store_final_boxes(self, guard_op, boxes): - guard_op.setfailargs(boxes) - self.rd_count = len(boxes) self.guard_opnum = guard_op.getopnum() def make_a_counter_per_value(self, guard_value_op): diff --git a/rpython/jit/metainterp/history.py b/rpython/jit/metainterp/history.py --- a/rpython/jit/metainterp/history.py +++ b/rpython/jit/metainterp/history.py @@ -700,12 +700,6 @@ if hasattr(op.getdescr(), '_debug_suboperations'): ops = op.getdescr()._debug_suboperations TreeLoop.check_consistency_of_branch(ops, seen.copy()) - for box in op.getfailargs() or []: - if box is not None: - assert isinstance(box, Box) - assert box in seen - else: - assert op.getfailargs() is None box = op.result if box is not None: assert isinstance(box, Box) From noreply at buildbot.pypy.org Wed Aug 28 19:10:32 2013 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 28 Aug 2013 19:10:32 +0200 (CEST) Subject: [pypy-commit] pypy jitframe-offset: merge default Message-ID: <20130828171032.91F9B1C07BB@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: jitframe-offset Changeset: r66420:dacc8ce3ed06 Date: 2013-08-28 18:09 +0100 http://bitbucket.org/pypy/pypy/changeset/dacc8ce3ed06/ Log: merge default diff --git a/rpython/annotator/annrpython.py b/rpython/annotator/annrpython.py --- a/rpython/annotator/annrpython.py +++ b/rpython/annotator/annrpython.py @@ -5,7 +5,7 @@ from rpython.tool.ansi_print import ansi_log from rpython.tool.pairtype import pair from rpython.tool.error import (format_blocked_annotation_error, - AnnotatorError, gather_error, ErrorWrapper, source_lines) + gather_error, source_lines) from rpython.flowspace.model import (Variable, Constant, FunctionGraph, c_last_exception, checkgraph) from rpython.translator import simplify, transform @@ -18,7 +18,6 @@ FAIL = object() - class RPythonAnnotator(object): """Block annotator for RPython. See description in doc/translation.txt.""" @@ -137,9 +136,7 @@ checkgraph(flowgraph) nbarg = len(flowgraph.getargs()) - if len(inputcells) != nbarg: - raise TypeError("%s expects %d args, got %d" %( - flowgraph, nbarg, len(inputcells))) + assert len(inputcells) == nbarg # wrong number of args # register the entry point self.addpendinggraph(flowgraph, inputcells) @@ -160,7 +157,7 @@ else: return object else: - raise TypeError, ("Variable or Constant instance expected, " + raise TypeError("Variable or Constant instance expected, " "got %r" % (variable,)) def getuserclassdefinitions(self): @@ -221,7 +218,7 @@ text = format_blocked_annotation_error(self, self.blocked_blocks) #raise SystemExit() - raise AnnotatorError(text) + raise annmodel.AnnotatorError(text) for graph in newgraphs: v = graph.getreturnvar() if v not in self.bindings: @@ -244,7 +241,7 @@ # return annmodel.s_ImpossibleValue return self.bookkeeper.immutableconstant(arg) else: - raise TypeError, 'Variable or Constant expected, got %r' % (arg,) + raise TypeError('Variable or Constant expected, got %r' % (arg,)) def typeannotation(self, t): return signature.annotation(t, self.bookkeeper) @@ -603,10 +600,9 @@ raise BlockedInference(self, op, opindex) try: resultcell = consider_meth(*argcells) - except Exception, e: + except annmodel.AnnotatorError as e: # note that UnionError is a subclass graph = self.bookkeeper.position_key[0] - e.args = e.args + ( - ErrorWrapper(gather_error(self, graph, block, opindex)),) + e.source = gather_error(self, graph, block, opindex) raise if resultcell is None: resultcell = self.noreturnvalue(op) diff --git a/rpython/annotator/argument.py b/rpython/annotator/argument.py --- a/rpython/annotator/argument.py +++ b/rpython/annotator/argument.py @@ -115,9 +115,7 @@ elif num_args > co_argcount: raise ArgErrCount(num_args, num_kwds, signature, defaults_w, 0) - # if a **kwargs argument is needed, explode - if signature.has_kwarg(): - raise TypeError("Keyword arguments as **kwargs is not supported by RPython") + assert not signature.has_kwarg() # XXX should not happen? # handle keyword arguments num_remainingkwds = 0 diff --git a/rpython/annotator/binaryop.py b/rpython/annotator/binaryop.py --- a/rpython/annotator/binaryop.py +++ b/rpython/annotator/binaryop.py @@ -20,7 +20,7 @@ from rpython.annotator.bookkeeper import getbookkeeper from rpython.flowspace.model import Variable, Constant from rpython.rlib import rarithmetic -from rpython.tool.error import AnnotatorError +from rpython.annotator.model import AnnotatorError # convenience only! def immutablevalue(x): @@ -457,7 +457,7 @@ class __extend__(pairtype(SomeString, SomeUnicodeString), pairtype(SomeUnicodeString, SomeString)): def mod((str, unistring)): - raise NotImplementedError( + raise AnnotatorError( "string formatting mixing strings and unicode not supported") @@ -471,7 +471,7 @@ if (is_unicode and isinstance(s_item, (SomeChar, SomeString)) or is_string and isinstance(s_item, (SomeUnicodeCodePoint, SomeUnicodeString))): - raise NotImplementedError( + raise AnnotatorError( "string formatting mixing strings and unicode not supported") getbookkeeper().count('strformat', s_string, s_tuple) no_nul = s_string.no_nul diff --git a/rpython/annotator/builtin.py b/rpython/annotator/builtin.py --- a/rpython/annotator/builtin.py +++ b/rpython/annotator/builtin.py @@ -337,7 +337,7 @@ return SomeAddress() def unicodedata_decimal(s_uchr): - raise TypeError, "unicodedate.decimal() calls should not happen at interp-level" + raise TypeError("unicodedate.decimal() calls should not happen at interp-level") def test(*args): return s_Bool diff --git a/rpython/annotator/classdef.py b/rpython/annotator/classdef.py --- a/rpython/annotator/classdef.py +++ b/rpython/annotator/classdef.py @@ -2,7 +2,7 @@ Type inference for user-defined classes. """ from rpython.annotator.model import SomePBC, s_ImpossibleValue, unionof -from rpython.annotator.model import SomeInteger, SomeTuple, SomeString +from rpython.annotator.model import SomeInteger, SomeTuple, SomeString, AnnotatorError from rpython.annotator import description @@ -429,7 +429,7 @@ result.extend(slots) return result -class NoSuchAttrError(Exception): +class NoSuchAttrError(AnnotatorError): """Raised when an attribute is found on a class where __slots__ or _attrs_ forbits it.""" diff --git a/rpython/annotator/description.py b/rpython/annotator/description.py --- a/rpython/annotator/description.py +++ b/rpython/annotator/description.py @@ -6,6 +6,7 @@ from rpython.annotator.argument import rawshape, ArgErr from rpython.tool.sourcetools import valid_identifier, func_with_new_name from rpython.tool.pairtype import extendabletype +from rpython.annotator.model import AnnotatorError class CallFamily(object): """A family of Desc objects that could be called from common call sites. @@ -261,7 +262,7 @@ try: inputcells = args.match_signature(signature, defs_s) except ArgErr, e: - raise TypeError("signature mismatch: %s() %s" % + raise AnnotatorError("signature mismatch: %s() %s" % (self.name, e.getmsg())) return inputcells @@ -678,7 +679,7 @@ value = value.__get__(42) classdef = None # don't bind elif isinstance(value, classmethod): - raise AssertionError("classmethods are not supported") + raise AnnotatorError("classmethods are not supported") s_value = self.bookkeeper.immutablevalue(value) if classdef is not None: s_value = s_value.bind_callables_under(classdef, name) diff --git a/rpython/annotator/model.py b/rpython/annotator/model.py --- a/rpython/annotator/model.py +++ b/rpython/annotator/model.py @@ -676,7 +676,21 @@ # ____________________________________________________________ -class UnionError(Exception): + +class AnnotatorError(Exception): + def __init__(self, msg=None): + self.msg = msg + self.source = None + + def __str__(self): + s = "\n\n%s" % self.msg + if self.source is not None: + s += "\n\n" + s += self.source + + return s + +class UnionError(AnnotatorError): """Signals an suspicious attempt at taking the union of deeply incompatible SomeXxx instances.""" @@ -686,25 +700,16 @@ The msg paramter is appended to a generic message. This can be used to give the user a little more information. """ + s = "" + if msg is not None: + s += "%s\n\n" % msg + s += "Offending annotations:\n" + s += " %s\n %s" % (s_obj1, s_obj2) self.s_obj1 = s_obj1 self.s_obj2 = s_obj2 - self.msg = msg + self.msg = s self.source = None - def __str__(self): - s = "\n\n" - - if self.msg is not None: - s += "%s\n\n" % self.msg - - s += "Offending annotations:\n" - s += "%s\n%s\n\n" % (self.s_obj1, self.s_obj2) - - if self.source is not None: - s += self.source - - return s - def __repr__(self): return str(self) diff --git a/rpython/annotator/test/test_annrpython.py b/rpython/annotator/test/test_annrpython.py --- a/rpython/annotator/test/test_annrpython.py +++ b/rpython/annotator/test/test_annrpython.py @@ -228,7 +228,7 @@ def f(): return X().meth() a = self.RPythonAnnotator() - py.test.raises(AssertionError, a.build_types, f, []) + py.test.raises(annmodel.AnnotatorError, a.build_types, f, []) def test_methodcall1(self): a = self.RPythonAnnotator() @@ -3381,22 +3381,22 @@ return '%s' % unichr(x) a = self.RPythonAnnotator() - py.test.raises(NotImplementedError, a.build_types, f, [int]) + py.test.raises(annmodel.AnnotatorError, a.build_types, f, [int]) def f(x): return '%s' % (unichr(x) * 3) a = self.RPythonAnnotator() - py.test.raises(NotImplementedError, a.build_types, f, [int]) + py.test.raises(annmodel.AnnotatorError, a.build_types, f, [int]) def f(x): return '%s%s' % (1, unichr(x)) a = self.RPythonAnnotator() - py.test.raises(NotImplementedError, a.build_types, f, [int]) + py.test.raises(annmodel.AnnotatorError, a.build_types, f, [int]) def f(x): return '%s%s' % (1, unichr(x) * 15) a = self.RPythonAnnotator() - py.test.raises(NotImplementedError, a.build_types, f, [int]) + py.test.raises(annmodel.AnnotatorError, a.build_types, f, [int]) def test_strformatting_tuple(self): @@ -3434,7 +3434,7 @@ return [1, 2, 3][s:e] a = self.RPythonAnnotator() - py.test.raises(TypeError, "a.build_types(f, [int, int])") + py.test.raises(annmodel.AnnotatorError, "a.build_types(f, [int, int])") a.build_types(f, [annmodel.SomeInteger(nonneg=True), annmodel.SomeInteger(nonneg=True)]) def f(x): @@ -4038,7 +4038,6 @@ s_objs = set([type(the_exc.s_obj1), type(the_exc.s_obj2)]) assert s_objs == set([annmodel.SomeInteger, annmodel.SomeString]) - assert the_exc.msg == None # Check that this is a generic UnionError def test_unionerror_tuple_size(self): def f(x): @@ -4051,7 +4050,7 @@ with py.test.raises(annmodel.UnionError) as exc: a.build_types(f, [int]) - assert exc.value.msg == "RPython cannot unify tuples of different length: 2 versus 1" + assert "RPython cannot unify tuples of different length: 2 versus 1" in exc.value.msg def test_unionerror_signedness(self): def f(x): @@ -4064,8 +4063,8 @@ with py.test.raises(annmodel.UnionError) as exc: a.build_types(f, [int]) - assert exc.value.msg == ("RPython cannot prove that these integers are of " - "the same signedness") + assert ("RPython cannot prove that these integers are of the " + "same signedness" in exc.value.msg) def test_unionerror_instance(self): class A(object): pass @@ -4081,7 +4080,8 @@ with py.test.raises(annmodel.UnionError) as exc: a.build_types(f, [int]) - assert exc.value.msg == ("RPython cannot unify instances with no common base class") + assert ("RPython cannot unify instances with no common base class" + in exc.value.msg) def test_unionerror_iters(self): @@ -4096,7 +4096,27 @@ with py.test.raises(annmodel.UnionError) as exc: a.build_types(f, [int]) - assert exc.value.msg == ("RPython cannot unify incompatible iterator variants") + assert ("RPython cannot unify incompatible iterator variants" in + exc.value.msg) + + def test_variable_getattr(self): + class A(object): pass + def f(y): + a = A() + return getattr(a, y) + a = self.RPythonAnnotator() + with py.test.raises(annmodel.AnnotatorError) as exc: + a.build_types(f, [str]) + assert ("variable argument to getattr" in exc.value.msg) + + def test_bad_call(self): + def f(x): + return x() + a = self.RPythonAnnotator() + with py.test.raises(annmodel.AnnotatorError) as exc: + a.build_types(f, [str]) + assert ("Cannot prove that the object is callable" in exc.value.msg) + def g(n): return [0, 1, 2, n] diff --git a/rpython/annotator/test/test_argument.py b/rpython/annotator/test/test_argument.py --- a/rpython/annotator/test/test_argument.py +++ b/rpython/annotator/test/test_argument.py @@ -77,10 +77,6 @@ new_args = args.unmatch_signature(sig, data) assert args.unpack() == new_args.unpack() - args = make_arguments_for_translation(space, [1], {'c': 5, 'd': 7}) - sig = Signature(['a', 'b', 'c'], None, 'kw') - py.test.raises(TypeError, args.match_signature, sig, [2, 3]) - def test_rawshape(self): space = DummySpace() args = make_arguments_for_translation(space, [1,2,3]) diff --git a/rpython/annotator/test/test_model.py b/rpython/annotator/test/test_model.py --- a/rpython/annotator/test/test_model.py +++ b/rpython/annotator/test/test_model.py @@ -2,6 +2,7 @@ from rpython.annotator.model import * from rpython.annotator.listdef import ListDef +from rpython.translator.translator import TranslationContext listdef1 = ListDef(None, SomeTuple([SomeInteger(nonneg=True), SomeString()])) @@ -174,6 +175,28 @@ assert f2.contains(f1) assert f1.contains(f2) +def compile_function(function, annotation=[]): + t = TranslationContext() + t.buildannotator().build_types(function, annotation) + +class AAA(object): + pass + +def test_blocked_inference1(): + def blocked_inference(): + return AAA().m() + + py.test.raises(AnnotatorError, compile_function, blocked_inference) + +def test_blocked_inference2(): + def blocked_inference(): + a = AAA() + b = a.x + return b + + py.test.raises(AnnotatorError, compile_function, blocked_inference) + + if __name__ == '__main__': for name, value in globals().items(): if name.startswith('test_'): diff --git a/rpython/annotator/unaryop.py b/rpython/annotator/unaryop.py --- a/rpython/annotator/unaryop.py +++ b/rpython/annotator/unaryop.py @@ -14,7 +14,7 @@ from rpython.annotator.bookkeeper import getbookkeeper from rpython.annotator import builtin from rpython.annotator.binaryop import _clone ## XXX where to put this? -from rpython.tool.error import AnnotatorError +from rpython.annotator.model import AnnotatorError # convenience only! def immutablevalue(x): @@ -84,8 +84,7 @@ return obj.is_true() def hash(obj): - raise TypeError, ("cannot use hash() in RPython; " - "see objectmodel.compute_xxx()") + raise AnnotatorError("cannot use hash() in RPython") def str(obj): getbookkeeper().count('str', obj) @@ -158,9 +157,7 @@ return obj.call(getbookkeeper().build_args("call_args", args_s)) def call(obj, args, implicit_init=False): - #raise Exception, "cannot follow call_args%r" % ((obj, args),) - getbookkeeper().warning("cannot follow call(%r, %r)" % (obj, args)) - return SomeObject() + raise AnnotatorError("Cannot prove that the object is callable") def op_contains(obj, s_element): return s_Bool @@ -341,10 +338,10 @@ def check_negative_slice(s_start, s_stop): if isinstance(s_start, SomeInteger) and not s_start.nonneg: - raise TypeError("slicing: not proven to have non-negative start") + raise AnnotatorError("slicing: not proven to have non-negative start") if isinstance(s_stop, SomeInteger) and not s_stop.nonneg and \ getattr(s_stop, 'const', 0) != -1: - raise TypeError("slicing: not proven to have non-negative stop") + raise AnnotatorError("slicing: not proven to have non-negative stop") class __extend__(SomeDict): @@ -529,10 +526,10 @@ class __extend__(SomeUnicodeString): def method_encode(uni, s_enc): if not s_enc.is_constant(): - raise TypeError("Non-constant encoding not supported") + raise AnnotatorError("Non-constant encoding not supported") enc = s_enc.const if enc not in ('ascii', 'latin-1', 'utf-8'): - raise TypeError("Encoding %s not supported for unicode" % (enc,)) + raise AnnotatorError("Encoding %s not supported for unicode" % (enc,)) return SomeString() method_encode.can_only_throw = [UnicodeEncodeError] @@ -562,10 +559,10 @@ def method_decode(str, s_enc): if not s_enc.is_constant(): - raise TypeError("Non-constant encoding not supported") + raise AnnotatorError("Non-constant encoding not supported") enc = s_enc.const if enc not in ('ascii', 'latin-1', 'utf-8'): - raise TypeError("Encoding %s not supported for strings" % (enc,)) + raise AnnotatorError("Encoding %s not supported for strings" % (enc,)) return SomeUnicodeString() method_decode.can_only_throw = [UnicodeDecodeError] @@ -653,7 +650,7 @@ if s_attr.is_constant() and isinstance(s_attr.const, str): attr = s_attr.const return ins._true_getattr(attr) - return SomeObject() + raise AnnotatorError("A variable argument to getattr is not RPython") getattr.can_only_throw = [] def setattr(ins, s_attr, s_value): @@ -729,7 +726,7 @@ def setattr(pbc, s_attr, s_value): if not pbc.isNone(): - raise AnnotatorError("setattr on %r" % pbc) + raise AnnotatorError("Cannot modify attribute of a pre-built constant") def call(pbc, args): bookkeeper = getbookkeeper() @@ -751,7 +748,8 @@ # whose length is the constant 0; so let's tentatively answer 0. return immutablevalue(0) else: - return SomeObject() # len() on a pbc? no chance + # This should probably never happen + raise AnnotatorError("Cannot call len on a pbc") # annotation of low-level types from rpython.annotator.model import SomePtr, SomeLLADTMeth diff --git a/rpython/tool/error.py b/rpython/tool/error.py --- a/rpython/tool/error.py +++ b/rpython/tool/error.py @@ -68,19 +68,6 @@ lines = source_lines1(graph, *args, **kwds) return ['In %r:' % (graph,)] + lines -class AnnotatorError(Exception): - pass - -class NoSuchAttrError(Exception): - pass - -class ErrorWrapper(object): - def __init__(self, msg): - self.msg = msg - - def __repr__(self): - return '<%s>' % (self.msg,) - def gather_error(annotator, graph, block, operindex): msg = [""] @@ -94,19 +81,24 @@ msg += source_lines(graph, block, operindex, long=True) if oper is not None: if SHOW_ANNOTATIONS: - msg.append("Known variable annotations:") - for arg in oper.args + [oper.result]: - if isinstance(arg, Variable): - try: - msg.append(" " + str(arg) + " = " + str(annotator.binding(arg))) - except KeyError: - pass + msg += format_annotations(annotator, oper) + msg += [''] return "\n".join(msg) +def format_annotations(annotator, oper): + msg = [] + msg.append("Known variable annotations:") + for arg in oper.args + [oper.result]: + if isinstance(arg, Variable): + try: + msg.append(" " + str(arg) + " = " + str(annotator.binding(arg))) + except KeyError: + pass + return msg + def format_blocked_annotation_error(annotator, blocked_blocks): text = [] for block, (graph, index) in blocked_blocks.items(): - text.append('\n') text.append("Blocked block -- operation cannot succeed") text.append(gather_error(annotator, graph, block, index)) return '\n'.join(text) diff --git a/rpython/tool/test/test_error.py b/rpython/tool/test/test_error.py --- a/rpython/tool/test/test_error.py +++ b/rpython/tool/test/test_error.py @@ -3,33 +3,10 @@ """ from rpython.translator.translator import TranslationContext -from rpython.tool.error import AnnotatorError from rpython.annotator.model import UnionError import py - -def compile_function(function, annotation=[]): - t = TranslationContext() - t.buildannotator().build_types(function, annotation) - -class AAA(object): - pass - -def test_blocked_inference1(): - def blocked_inference(): - return AAA().m() - - py.test.raises(AnnotatorError, compile_function, blocked_inference) - -def test_blocked_inference2(): - def blocked_inference(): - a = AAA() - b = a.x - return b - - py.test.raises(AnnotatorError, compile_function, blocked_inference) - def test_someobject(): def someobject_degeneration(n): if n == 3: From noreply at buildbot.pypy.org Wed Aug 28 20:34:42 2013 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Wed, 28 Aug 2013 20:34:42 +0200 (CEST) Subject: [pypy-commit] pypy refactor-buffer-api: Fix __pypy__.bytebuffer. Message-ID: <20130828183442.F30661C1152@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: refactor-buffer-api Changeset: r66422:d73e50624b31 Date: 2013-08-28 17:06 +0100 http://bitbucket.org/pypy/pypy/changeset/d73e50624b31/ Log: Fix __pypy__.bytebuffer. diff --git a/pypy/module/__pypy__/bytebuffer.py b/pypy/module/__pypy__/bytebuffer.py --- a/pypy/module/__pypy__/bytebuffer.py +++ b/pypy/module/__pypy__/bytebuffer.py @@ -4,6 +4,7 @@ from pypy.interpreter.buffer import RWBuffer from pypy.interpreter.gateway import unwrap_spec +from pypy.module.__builtin__.interp_memoryview import W_Buffer class ByteBuffer(RWBuffer): @@ -23,4 +24,4 @@ @unwrap_spec(length=int) def bytebuffer(space, length): - return space.wrap(ByteBuffer(length)) + return W_Buffer(ByteBuffer(length)) From noreply at buildbot.pypy.org Wed Aug 28 20:34:44 2013 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Wed, 28 Aug 2013 20:34:44 +0200 (CEST) Subject: [pypy-commit] pypy refactor-buffer-api: Fix _io module. Message-ID: <20130828183444.3D47F1C1160@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: refactor-buffer-api Changeset: r66423:0abbdca2f517 Date: 2013-08-28 17:07 +0100 http://bitbucket.org/pypy/pypy/changeset/0abbdca2f517/ Log: Fix _io module. diff --git a/pypy/module/_io/interp_bufferedio.py b/pypy/module/_io/interp_bufferedio.py --- a/pypy/module/_io/interp_bufferedio.py +++ b/pypy/module/_io/interp_bufferedio.py @@ -4,6 +4,7 @@ from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault from pypy.interpreter.error import OperationError, operationerrfmt from pypy.interpreter.buffer import RWBuffer +from pypy.module.__builtin__.interp_memoryview import W_Buffer from rpython.rlib.rstring import StringBuilder from rpython.rlib.rarithmetic import r_longlong, intmask from rpython.rlib import rposix @@ -521,7 +522,7 @@ def _raw_read(self, space, buffer, start, length): length = intmask(length) - w_buf = space.wrap(RawBuffer(buffer, start, length)) + w_buf = W_Buffer(RawBuffer(buffer, start, length)) while True: try: w_size = space.call_method(self.w_raw, "readinto", w_buf) From noreply at buildbot.pypy.org Wed Aug 28 20:34:41 2013 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Wed, 28 Aug 2013 20:34:41 +0200 (CEST) Subject: [pypy-commit] pypy refactor-buffer-api: Fix pypy/interpreter/test/test_buffer.py. Message-ID: <20130828183441.A7CBB1C1148@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: refactor-buffer-api Changeset: r66421:8f6362df0589 Date: 2013-08-28 16:58 +0100 http://bitbucket.org/pypy/pypy/changeset/8f6362df0589/ Log: Fix pypy/interpreter/test/test_buffer.py. diff --git a/pypy/interpreter/test/test_buffer.py b/pypy/interpreter/test/test_buffer.py --- a/pypy/interpreter/test/test_buffer.py +++ b/pypy/interpreter/test/test_buffer.py @@ -1,5 +1,5 @@ import py -from pypy.interpreter.buffer import Buffer +from pypy.module.__builtin__.interp_memoryview import W_Buffer from rpython.tool.udir import udir testdir = udir.ensure('test_buffer', dir=1) @@ -11,19 +11,17 @@ space = self.space w_hello = space.wrap('hello world') buf = space.buffer_w(w_hello) - assert isinstance(buf, Buffer) assert buf.getlength() == 11 assert buf.as_str() == 'hello world' assert buf.getslice(1, 6, 1, 5) == 'ello ' - assert space.buffer_w(space.wrap(buf)) is buf + assert space.buffer_w(W_Buffer(buf)) is buf assert space.bufferstr_w(w_hello) == 'hello world' - assert space.bufferstr_w(space.buffer(w_hello)) == 'hello world' + assert space.bufferstr_w(W_Buffer(space.buffer_w(w_hello))) == 'hello world' space.raises_w(space.w_TypeError, space.buffer_w, space.wrap(5)) - space.raises_w(space.w_TypeError, space.buffer, space.wrap(5)) def test_file_write(self): space = self.space - w_buffer = space.buffer(space.wrap('hello world')) + w_buffer = W_Buffer(space.buffer_w(space.wrap('hello world'))) filename = str(testdir.join('test_file_write')) space.appexec([w_buffer, space.wrap(filename)], """(buffer, filename): f = open(filename, 'wb') From noreply at buildbot.pypy.org Wed Aug 28 20:36:48 2013 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Wed, 28 Aug 2013 20:36:48 +0200 (CEST) Subject: [pypy-commit] pypy refactor-buffer-api: cpyext fix #1 Message-ID: <20130828183648.174361C1148@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: refactor-buffer-api Changeset: r66424:237e3312dceb Date: 2013-08-28 19:37 +0100 http://bitbucket.org/pypy/pypy/changeset/237e3312dceb/ Log: cpyext fix #1 diff --git a/pypy/module/cpyext/bufferobject.py b/pypy/module/cpyext/bufferobject.py --- a/pypy/module/cpyext/bufferobject.py +++ b/pypy/module/cpyext/bufferobject.py @@ -3,9 +3,10 @@ cpython_api, Py_ssize_t, cpython_struct, bootstrap_function, PyObjectFields, PyObject) from pypy.module.cpyext.pyobject import make_typedescr, Py_DecRef, make_ref -from pypy.interpreter.buffer import Buffer, StringBuffer, SubBuffer +from pypy.interpreter.buffer import StringBuffer, SubBuffer from pypy.interpreter.error import OperationError from pypy.module.array.interp_array import ArrayBuffer +from pypy.module.__builtin__.interp_memoryview import W_Buffer PyBufferObjectStruct = lltype.ForwardReference() @@ -24,7 +25,7 @@ @bootstrap_function def init_bufferobject(space): "Type description of PyBufferObject" - make_typedescr(space.gettypefor(Buffer).instancetypedef, + make_typedescr(space.gettypefor(W_Buffer).instancetypedef, basestruct=PyBufferObject.TO, attach=buffer_attach, dealloc=buffer_dealloc, diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -28,7 +28,7 @@ PyNumberMethods, PyMappingMethods, PySequenceMethods, PyBufferProcs) from pypy.module.cpyext.slotdefs import ( slotdefs_for_tp_slots, slotdefs_for_wrappers, get_slot_tp_function) -from pypy.interpreter.buffer import Buffer +from pypy.module.__builtin__.interp_memoryview import W_Buffer from pypy.interpreter.error import OperationError from rpython.rlib.rstring import rsplit from rpython.rlib.objectmodel import specialize @@ -509,7 +509,7 @@ # buffer protocol if space.is_w(w_type, space.w_str): setup_string_buffer_procs(space, pto) - if space.is_w(w_type, space.gettypefor(Buffer)): + if space.is_w(w_type, space.gettypefor(W_Buffer)): setup_buffer_buffer_procs(space, pto) pto.c_tp_free = llhelper(PyObject_Del.api_func.functype, From noreply at buildbot.pypy.org Wed Aug 28 20:57:26 2013 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Wed, 28 Aug 2013 20:57:26 +0200 (CEST) Subject: [pypy-commit] pypy refactor-buffer-api: Fix. Message-ID: <20130828185726.793D91C1147@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: refactor-buffer-api Changeset: r66425:00d7bc2225d1 Date: 2013-08-28 19:57 +0100 http://bitbucket.org/pypy/pypy/changeset/00d7bc2225d1/ Log: Fix. diff --git a/pypy/module/__builtin__/interp_memoryview.py b/pypy/module/__builtin__/interp_memoryview.py --- a/pypy/module/__builtin__/interp_memoryview.py +++ b/pypy/module/__builtin__/interp_memoryview.py @@ -173,6 +173,9 @@ def __init__(self, buf): self.buf = buf + def buffer_w(self, space): + return self.buf + @staticmethod def descr_new(space, w_subtype, w_object): w_memoryview = W_MemoryView(space.buffer_w(w_object)) From noreply at buildbot.pypy.org Thu Aug 29 12:24:32 2013 From: noreply at buildbot.pypy.org (fijal) Date: Thu, 29 Aug 2013 12:24:32 +0200 (CEST) Subject: [pypy-commit] pypy resume-refactor: random delete Message-ID: <20130829102432.C4CD01C0149@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: resume-refactor Changeset: r66426:607ca4f81d20 Date: 2013-08-29 11:20 +0100 http://bitbucket.org/pypy/pypy/changeset/607ca4f81d20/ Log: random delete diff --git a/rpython/jit/backend/llgraph/runner.py b/rpython/jit/backend/llgraph/runner.py --- a/rpython/jit/backend/llgraph/runner.py +++ b/rpython/jit/backend/llgraph/runner.py @@ -45,8 +45,6 @@ map(mapping, op.getarglist()), mapping(op.result), newdescr) - if op.getfailargs() is not None: - newop.setfailargs(map(mapping, op.getfailargs())) self.operations.append(newop) class WeakrefDescr(AbstractDescr): @@ -694,19 +692,11 @@ # ----------------------------------------------------- def fail_guard(self, descr, saved_data=None): - values = [] - for box in self.current_op.getfailargs(): - if box is not None: - value = self.env[box] - else: - value = None - values.append(value) if hasattr(descr, '_llgraph_bridge'): target = (descr._llgraph_bridge, -1) - values = [value for value in values if value is not None] - raise Jump(target, values) + raise Jump(target, self.frontend_env) else: - raise ExecutionFinished(LLDeadFrame(descr, values, + raise ExecutionFinished(LLDeadFrame(descr, self.frontend_env, self.last_exception, saved_data)) @@ -827,6 +817,9 @@ def execute_jump(self, descr, *args): raise Jump(descr._llgraph_target, args) + def execute_resume_put(self, descr, box, depth, position): + xxx + def _do_math_sqrt(self, value): import math y = support.cast_from_floatstorage(lltype.Float, value) diff --git a/rpython/jit/backend/test/runner_test.py b/rpython/jit/backend/test/runner_test.py --- a/rpython/jit/backend/test/runner_test.py +++ b/rpython/jit/backend/test/runner_test.py @@ -164,6 +164,8 @@ ResOperation(rop.LABEL, [i0], None, descr=targettoken), ResOperation(rop.INT_ADD, [i0, ConstInt(1)], i1), ResOperation(rop.INT_LE, [i1, ConstInt(9)], i2), + ResOperation(rop.RESUME_PUT, [i2, ConstInt(0), ConstInt(0)], + None), ResOperation(rop.GUARD_TRUE, [i2], None, descr=BasicFailDescr(2)), ResOperation(rop.JUMP, [i1], None, descr=targettoken), ] @@ -2240,7 +2242,7 @@ for i in range(5): called = [] - + FUNC = self.FuncType([lltype.Signed] * i, lltype.Void) func_ptr = llhelper(lltype.Ptr(FUNC), func_void) calldescr = self.cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, diff --git a/rpython/jit/metainterp/compile.py b/rpython/jit/metainterp/compile.py --- a/rpython/jit/metainterp/compile.py +++ b/rpython/jit/metainterp/compile.py @@ -486,17 +486,9 @@ _counter = 0 # on a GUARD_VALUE, there is one counter per value; _counters = None # they get stored in _counters then. - # this class also gets the following attributes stored by resume.py code - - # XXX move all of unused stuff to guard_op, now that we can have - # a separate class, so it does not survive that long - rd_snapshot = None - rd_frame_info_list = None - rd_numb = lltype.nullptr(NUMBERING) - rd_count = 0 - rd_consts = None - rd_virtuals = None - rd_pendingfields = lltype.nullptr(PENDINGFIELDSP.TO) + # the following attributes are used by the resume + rd_loop = None # keeping the loop alive + rd_bytecode_position = -1 # position in the generated bytecode CNT_BASE_MASK = 0x0FFFFFFF # the base counter value CNT_BUSY_FLAG = 0x10000000 # if set, busy tracing from the guard @@ -628,32 +620,11 @@ self, inputargs, new_loop.operations, new_loop.original_jitcell_token) - def copy_all_attributes_into(self, res): - # XXX a bit ugly to have to list them all here - res.rd_snapshot = self.rd_snapshot - res.rd_frame_info_list = self.rd_frame_info_list - res.rd_numb = self.rd_numb - res.rd_consts = self.rd_consts - res.rd_virtuals = self.rd_virtuals - res.rd_pendingfields = self.rd_pendingfields - res.rd_count = self.rd_count - - def _clone_if_mutable(self): - res = ResumeGuardDescr() - self.copy_all_attributes_into(res) - return res - class ResumeGuardNotInvalidated(ResumeGuardDescr): - def _clone_if_mutable(self): - res = ResumeGuardNotInvalidated() - self.copy_all_attributes_into(res) - return res + pass class ResumeAtPositionDescr(ResumeGuardDescr): - def _clone_if_mutable(self): - res = ResumeAtPositionDescr() - self.copy_all_attributes_into(res) - return res + pass class AllVirtuals: llopaque = True @@ -736,12 +707,6 @@ hidden_all_virtuals = obj.hide(metainterp_sd.cpu) metainterp_sd.cpu.set_savedata_ref(deadframe, hidden_all_virtuals) - def _clone_if_mutable(self): - res = ResumeGuardForcedDescr(self.metainterp_sd, - self.jitdriver_sd) - self.copy_all_attributes_into(res) - return res - class AbstractResumeGuardCounters(object): # Completely custom algorithm for now: keep 5 pairs (value, counter), diff --git a/rpython/jit/metainterp/optimizeopt/optimizer.py b/rpython/jit/metainterp/optimizeopt/optimizer.py --- a/rpython/jit/metainterp/optimizeopt/optimizer.py +++ b/rpython/jit/metainterp/optimizeopt/optimizer.py @@ -546,14 +546,11 @@ self.metainterp_sd.profiler.count(jitprof.Counters.OPT_OPS) if op.is_guard(): self.metainterp_sd.profiler.count(jitprof.Counters.OPT_GUARDS) - pendingfields = self.pendingfields self.pendingfields = None if self.replaces_guard and op in self.replaces_guard: self.replace_op(self.replaces_guard[op], op) del self.replaces_guard[op] return - else: - op = self.store_final_boxes_in_guard(op, pendingfields) elif op.can_raise(): self.exception_might_have_happened = True if op.result: diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -53,9 +53,6 @@ self.copy_constants(self.registers_r, jitcode.constants_r, ConstPtr) self.copy_constants(self.registers_f, jitcode.constants_f, ConstFloat) self._result_argcode = 'v' - # for resume.py operation - self.parent_resumedata_snapshot = None - self.parent_resumedata_frame_info_list = None # counter for unrolling inlined loops self.unroll_iterations = 1 @@ -1047,7 +1044,7 @@ saved_pc = self.pc self.pc = orgpc resumedescr = compile.ResumeAtPositionDescr() - self.metainterp.capture_resumedata(resumedescr, orgpc) + #self.metainterp.capture_resumedata(resumedescr, orgpc) self.metainterp.reached_loop_header(greenboxes, redboxes, resumedescr) self.pc = saved_pc @@ -1791,7 +1788,7 @@ resumedescr = compile.ResumeGuardDescr() guard_op = self.history.record(opnum, moreargs, None, descr=resumedescr) - self.capture_resumedata(resumedescr, resumepc) + #self.capture_resumedata(resumedescr, resumepc) self.staticdata.profiler.count_ops(opnum, Counters.GUARDS) # count self.attach_debug_info(guard_op) diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py --- a/rpython/jit/metainterp/resoperation.py +++ b/rpython/jit/metainterp/resoperation.py @@ -472,6 +472,9 @@ 'ENTER_FRAME/1d', 'LEAVE_FRAME/0', 'RESUME_PUT/3', + 'BACKEND_PUT/3', + # same as resume_put, but the first arg is backend-dependent, + # instead of a box '_RESUME_LAST', '_NOSIDEEFFECT_LAST', # ----- end of no_side_effect operations ----- From noreply at buildbot.pypy.org Thu Aug 29 12:24:34 2013 From: noreply at buildbot.pypy.org (fijal) Date: Thu, 29 Aug 2013 12:24:34 +0200 (CEST) Subject: [pypy-commit] pypy resume-refactor: (fijal, arigo) change resume_put to backend_put for backend operations Message-ID: <20130829102434.1E6691C0189@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: resume-refactor Changeset: r66427:539d5f8c5ee9 Date: 2013-08-29 11:23 +0100 http://bitbucket.org/pypy/pypy/changeset/539d5f8c5ee9/ Log: (fijal, arigo) change resume_put to backend_put for backend operations diff --git a/rpython/jit/backend/llsupport/resumebuilder.py b/rpython/jit/backend/llsupport/resumebuilder.py --- a/rpython/jit/backend/llsupport/resumebuilder.py +++ b/rpython/jit/backend/llsupport/resumebuilder.py @@ -38,7 +38,7 @@ v = op.getarg(0) loc = self.regalloc.loc(v) pos = loc.get_jitframe_position() - self.newops.append(op.copy_and_change(rop.RESUME_PUT, + self.newops.append(op.copy_and_change(rop.BACKEND_PUT, args=[ConstInt(pos), op.getarg(1), op.getarg(2)])) diff --git a/rpython/jit/backend/llsupport/test/test_resume.py b/rpython/jit/backend/llsupport/test/test_resume.py --- a/rpython/jit/backend/llsupport/test/test_resume.py +++ b/rpython/jit/backend/llsupport/test/test_resume.py @@ -36,7 +36,7 @@ expected_resume = parse(""" [] enter_frame(-1, descr=jitcode) - resume_put(28, 0, 2) + backend_put(28, 0, 2) leave_frame() """, namespace={'jitcode': jitcode}).operations equaloplists(descr.rd_loop_token.rd_bytecode, expected_resume) diff --git a/rpython/jit/metainterp/resume2.py b/rpython/jit/metainterp/resume2.py --- a/rpython/jit/metainterp/resume2.py +++ b/rpython/jit/metainterp/resume2.py @@ -23,7 +23,7 @@ self.enter_frame(op.getarg(0).getint(), descr) elif op.getopnum() == rop.LEAVE_FRAME: self.leave_frame() - elif op.getopnum() == rop.RESUME_PUT: + elif op.getopnum() == rop.BACKEND_PUT: self.put(op.getarg(0).getint(), op.getarg(1).getint(), op.getarg(2).getint()) else: diff --git a/rpython/jit/metainterp/test/test_resume2.py b/rpython/jit/metainterp/test/test_resume2.py --- a/rpython/jit/metainterp/test/test_resume2.py +++ b/rpython/jit/metainterp/test/test_resume2.py @@ -40,7 +40,7 @@ resume_loop = parse(""" [] enter_frame(-1, descr=jitcode1) - resume_put(10, 0, 1) + backend_put(10, 0, 1) leave_frame() """, namespace={'jitcode1': jitcode}) descr = Descr() @@ -62,12 +62,12 @@ resume_loop = parse(""" [] enter_frame(-1, descr=jitcode1) - resume_put(11, 0, 2) + backend_put(11, 0, 2) enter_frame(12, descr=jitcode2) - resume_put(12, 0, 3) - resume_put(8, 1, 4) + backend_put(12, 0, 3) + backend_put(8, 1, 4) leave_frame() - resume_put(10, 0, 1) + backend_put(10, 0, 1) leave_frame() """, namespace={'jitcode1': jitcode1, 'jitcode2': jitcode2}) metainterp = MockMetaInterp() From noreply at buildbot.pypy.org Thu Aug 29 13:34:51 2013 From: noreply at buildbot.pypy.org (bivab) Date: Thu, 29 Aug 2013 13:34:51 +0200 (CEST) Subject: [pypy-commit] buildbot buildbot-0.8.7: merge default Message-ID: <20130829113451.0E5E81C01F5@cobra.cs.uni-duesseldorf.de> Author: David Schneider Branch: buildbot-0.8.7 Changeset: r832:d2fce15a6351 Date: 2013-08-02 11:32 +0200 http://bitbucket.org/pypy/buildbot/changeset/d2fce15a6351/ Log: merge default diff --git a/bot2/pypybuildbot/arm_master.py b/bot2/pypybuildbot/arm_master.py --- a/bot2/pypybuildbot/arm_master.py +++ b/bot2/pypybuildbot/arm_master.py @@ -56,7 +56,8 @@ + crosstranslationjitargs), platform='linux-armhf-raring', interpreter='pypy', - prefix=['schroot', '-c', 'raring']) + prefix=['schroot', '-c', 'raring'], + trigger='JITLINUXARMHF_RARING_scheduler') pypyARMJITTranslatedTestFactory = pypybuilds.TranslatedTests( translationArgs=(crosstranslationargs @@ -89,6 +90,15 @@ app_tests=True, platform='linux-armhf-raspbian', ) +pypyARMHF_RARING_JITTranslatedTestFactory = pypybuilds.TranslatedTests( + translationArgs=(crosstranslationargs + + jit_translation_args + + crosstranslationjitargs), + lib_python=True, + pypyjit=True, + app_tests=True, + platform='linux-armhf-raring', + ) # APPLVLLINUXARM = "pypy-c-app-level-linux-armel" APPLVLLINUXARMHF_v7 = "pypy-c-app-level-linux-armhf-v7" @@ -97,6 +107,7 @@ JITLINUXARM = "pypy-c-jit-linux-armel" JITLINUXARMHF_v7 = "pypy-c-jit-linux-armhf-v7" JITLINUXARMHF_RASPBIAN = "pypy-c-jit-linux-armhf-raspbian" +JITLINUXARMHF_RARING = "pypy-c-jit-linux-armhf-raring" JITBACKENDONLYLINUXARMEL = "jitbackendonly-own-linux-armel" JITBACKENDONLYLINUXARMHF = "jitbackendonly-own-linux-armhf" @@ -155,6 +166,10 @@ JITLINUXARMHF_RASPBIAN, # triggered by BUILDJITLINUXARMHF_RASPBIAN JITLINUXARMHF_v7, # triggered by BUILDJITLINUXARMHF_RASPBIAN, on cubieboard-bob ]), + + Triggerable("JITLINUXARMHF_RARING_scheduler", [ + JITLINUXARMHF_RARING, # triggered by BUILDJITLINUXARMHF_RARING + ]) ] builders = [ @@ -231,6 +246,12 @@ 'category': 'linux-armhf', "locks": [ARMBoardLock.access('counting')], }, + {"name": JITLINUXARMHF_RARING, + "slavenames": ["greenbox3-node0"], + 'builddir': JITLINUXARMHF_RARING, + 'factory': pypyARMHF_RARING_JITTranslatedTestFactory, + 'category': 'linux-armhf', + }, # Translation Builders for ARM {"name": BUILDLINUXARM, "slavenames": ['hhu-cross-armel'], diff --git a/bot2/pypybuildbot/summary.py b/bot2/pypybuildbot/summary.py --- a/bot2/pypybuildbot/summary.py +++ b/bot2/pypybuildbot/summary.py @@ -179,14 +179,14 @@ run_info = {'URL': run_url, 'elapsed': pytest_elapsed or None, 'times': build.getTimes()} outcome_set = RevisionOutcomeSet(rev, key, run_info) - someresult = False - # "*-run" categories mean the build is not a test build! - if builderStatus.category: - someresult = builderStatus.category.endswith("-run") + #someresult = False + ## "*-run" categories mean the build is not a test build! + #if builderStatus.category: + # someresult = builderStatus.category.endswith("-run") if pytest_logs: for stepName, resultLog in pytest_logs: if resultLog.hasContents(): - someresult = True + #someresult = True outcome_set.populate(resultLog) failedtests = not not outcome_set.failed @@ -201,11 +201,12 @@ failure = text break - if not someresult or failure is not None: - if failure: - name = '"%s"' % failure # quote - else: - name = '' + #if not someresult or failure is not None: + if failure is not None: + #if failure: + name = '"%s"' % failure # quote + #else: + # name = '' outcome_set.populate_one(name, '!') return outcome_set From noreply at buildbot.pypy.org Thu Aug 29 13:34:53 2013 From: noreply at buildbot.pypy.org (bivab) Date: Thu, 29 Aug 2013 13:34:53 +0200 (CEST) Subject: [pypy-commit] buildbot buildbot-0.8.7: actually remove the button from the template Message-ID: <20130829113453.3E2001C01F5@cobra.cs.uni-duesseldorf.de> Author: David Schneider Branch: buildbot-0.8.7 Changeset: r833:772861aab8e7 Date: 2013-08-28 13:41 +0200 http://bitbucket.org/pypy/buildbot/changeset/772861aab8e7/ Log: actually remove the button from the template diff --git a/master/templates/build.html b/master/templates/build.html --- a/master/templates/build.html +++ b/master/templates/build.html @@ -215,12 +215,7 @@
Elapsed{{ elapsed }}
- + From noreply at buildbot.pypy.org Thu Aug 29 13:34:54 2013 From: noreply at buildbot.pypy.org (bivab) Date: Thu, 29 Aug 2013 13:34:54 +0200 (CEST) Subject: [pypy-commit] buildbot buildbot-0.8.7: update templates Message-ID: <20130829113454.5AD7A1C01F5@cobra.cs.uni-duesseldorf.de> Author: David Schneider Branch: buildbot-0.8.7 Changeset: r834:fccaa5a3d14f Date: 2013-08-28 13:51 +0200 http://bitbucket.org/pypy/buildbot/changeset/fccaa5a3d14f/ Log: update templates diff --git a/master/templates/builder.html b/master/templates/builder.html --- a/master/templates/builder.html +++ b/master/templates/builder.html @@ -12,6 +12,10 @@ (view in summary)

+{% if description %} +
{{ description }}
+{% endif %} +
{% if current %} @@ -74,6 +78,8 @@ {{ build_table(recent) }} +Show more +
@@ -90,12 +96,15 @@ {{ s.name|e }} {% if s.connected %} - connected - {{ s.admin|email if s.admin else ""}} + {% if s.paused %} + paused + {% else %} + connected + {% endif %} {% else %} - offline - + offline {% endif %} + {{ s.admin|email if s.admin else ""}} {% else %} no slaves attached diff --git a/master/templates/layout.html b/master/templates/layout.html --- a/master/templates/layout.html +++ b/master/templates/layout.html @@ -15,6 +15,7 @@ {{ pageTitle|e }} + {% block morehead %}{% endblock %} {% endblock %} @@ -45,10 +46,10 @@ {%- block barecontent -%}
- - {% if alert_msg != '' %} -
- {{alert_msg}} + + {% if alert_msg != "" %} +
+ {{ alert_msg }}
{% endif %} From noreply at buildbot.pypy.org Thu Aug 29 13:34:55 2013 From: noreply at buildbot.pypy.org (bivab) Date: Thu, 29 Aug 2013 13:34:55 +0200 (CEST) Subject: [pypy-commit] buildbot buildbot-0.8.7: update css Message-ID: <20130829113455.7E5F71C01F5@cobra.cs.uni-duesseldorf.de> Author: David Schneider Branch: buildbot-0.8.7 Changeset: r835:d8589224fc11 Date: 2013-08-28 13:52 +0200 http://bitbucket.org/pypy/buildbot/changeset/d8589224fc11/ Log: update css diff --git a/master/public_html/default.css b/master/public_html/default.css --- a/master/public_html/default.css +++ b/master/public_html/default.css @@ -368,6 +368,12 @@ border-color: #A77272; } +.failure-again { + color: #000; + background-color: #eA9; + border-color: #A77272; +} + .warnings { color: #FFFFFF; background-color: #fa3; @@ -398,6 +404,12 @@ border-color: #C5C56D; } +.paused { + color: #FFFFFF; + background-color: #8080FF; + border-color: #dddddd; +} + .offline,td.offline { color: #FFFFFF; background-color: #777777; @@ -553,6 +565,10 @@ display: none; } +pre { + white-space: pre-wrap; +} + /* change comments (use regular colors here) */ pre.comments>a:link,pre.comments>a:visited { color: blue; From noreply at buildbot.pypy.org Thu Aug 29 13:34:56 2013 From: noreply at buildbot.pypy.org (bivab) Date: Thu, 29 Aug 2013 13:34:56 +0200 (CEST) Subject: [pypy-commit] buildbot buildbot-0.8.7: update requirements.txt Message-ID: <20130829113456.B447F1C01F5@cobra.cs.uni-duesseldorf.de> Author: David Schneider Branch: buildbot-0.8.7 Changeset: r836:ed1d0f31a591 Date: 2013-08-28 14:19 +0200 http://bitbucket.org/pypy/buildbot/changeset/ed1d0f31a591/ Log: update requirements.txt diff --git a/requirements.txt b/requirements.txt --- a/requirements.txt +++ b/requirements.txt @@ -1,13 +1,15 @@ Flask==0.9 -Jinja2==2.6 -SQLAlchemy==0.8.0 +Jinja2==2.7.1 +MarkupSafe==0.18 +SQLAlchemy==0.7.9 Tempita==0.5.1 -Twisted==13.0.0 +Twisted==13.1.0 Werkzeug==0.8.3 argparse==1.2.1 -buildbot==0.8.7p1 +buildbot==0.8.8 buildbot-slave==0.8.6p1 decorator==3.4.0 +mock==1.0.1 py==1.4.9 pytest==2.2.4 python-dateutil==1.5 From noreply at buildbot.pypy.org Thu Aug 29 13:34:57 2013 From: noreply at buildbot.pypy.org (bivab) Date: Thu, 29 Aug 2013 13:34:57 +0200 (CEST) Subject: [pypy-commit] buildbot buildbot-0.8.7: update summary and fix tests Message-ID: <20130829113457.CFBA01C01F5@cobra.cs.uni-duesseldorf.de> Author: David Schneider Branch: buildbot-0.8.7 Changeset: r837:2bb9286ac8b1 Date: 2013-08-28 21:04 +0200 http://bitbucket.org/pypy/buildbot/changeset/2bb9286ac8b1/ Log: update summary and fix tests diff --git a/bot2/pypybuildbot/summary.py b/bot2/pypybuildbot/summary.py --- a/bot2/pypybuildbot/summary.py +++ b/bot2/pypybuildbot/summary.py @@ -661,8 +661,7 @@ def getTitle(self, request): status = self.getStatus(request) - return "%s: summaries of last %d revisions" % (status.getProjectName(), - N) + return "%s: summaries of last %d revisions" % (status.getTitle(), N) @staticmethod def _prune_runs(runs, cutnum): @@ -686,8 +685,10 @@ except KeyError: pass builder = status.botmaster.builders[builderName] + factory = builder.config.factory branch = None - for _, kw in builder.buildFactory.steps: + for step in factory.steps: + kw = step.kwargs if 'defaultBranch' in kw: if kw.get('explicitBranch'): branch = kw['defaultBranch'] @@ -722,7 +723,6 @@ only_builder or only_branches) cat_branches = {} - for builderName in status.getBuilderNames(only_categories): if not test_builder(builderName): continue diff --git a/bot2/pypybuildbot/test/test_summary.py b/bot2/pypybuildbot/test/test_summary.py --- a/bot2/pypybuildbot/test/test_summary.py +++ b/bot2/pypybuildbot/test/test_summary.py @@ -27,7 +27,7 @@ s a/b.py:test_three S a/c.py:test_four """) - + rev_outcome_set.populate(log) assert rev_outcome_set.skipped == set([("a.b","test_three"), @@ -67,7 +67,7 @@ x a/c.py:test_nine x a/c.py:test_ten """) - + rev_outcome_set.populate(log) sum = rev_outcome_set.get_summary() assert sum.p == 1 @@ -80,7 +80,7 @@ rev_outcome_set = summary.RevisionOutcomeSet('0') log = StringIO("") rev_outcome_set.populate(log) - + def test_populate_longrepr(self): rev_outcome_set = summary.RevisionOutcomeSet('50000') log = StringIO("""F a/b.py:test_one @@ -90,7 +90,7 @@ s a/b.py:test_three some skip """) - + rev_outcome_set.populate(log) assert len(rev_outcome_set.skipped) == 1 @@ -115,7 +115,7 @@ F a/b.py:test_two \xc3\xa5 bar """) - + rev_outcome_set.populate(log) assert len(rev_outcome_set.failed) == 2 @@ -133,7 +133,7 @@ ! ! /a/b/c.py:92 """) - + rev_outcome_set.populate(log) assert rev_outcome_set.failed == set([ @@ -151,12 +151,12 @@ log = StringIO("""x a/b.py EXC """) - + rev_outcome_set.populate(log) assert rev_outcome_set.numxfailed == 1 - - + + def test_absent_outcome(self): rev_outcome_set = summary.RevisionOutcomeSet('50000') @@ -169,7 +169,7 @@ def load(x, y): calls.append(y) return y - + cache._load_outcome_set = load res = cache.get('status', 'a') @@ -183,14 +183,14 @@ cache.get('status', 'b') res = cache.get('status', 'c') assert res == 'c' - + assert calls == ['a', 'b', 'c'] calls = [] res = cache.get('status', 'd') assert res == 'd' assert cache.get('status', 'c') == 'c' - assert cache.get('status', 'b') == 'b' + assert cache.get('status', 'b') == 'b' assert calls == ['d'] res = cache.get('status', 'a') @@ -208,18 +208,18 @@ s a/b.py:test_three x a/b.py:test_four """) - + rev_outcome_set_foo.populate(log) - key_bar = ('bar', 7) + key_bar = ('bar', 7) rev_outcome_set_bar = summary.RevisionOutcomeSet('50000', key_bar) log = StringIO(""". a/b.py:test_one . a/b.py:test_two s a/b.py:test_three """) - + rev_outcome_set_bar.populate(log) d = {'foo': rev_outcome_set_foo, @@ -228,7 +228,7 @@ goutcome = summary.GatherOutcomeSet(d) assert goutcome.revision == '50000' - + assert goutcome.failed == set([('foo', 'a.b', 'test_one')]) assert goutcome.skipped == set([('foo', 'a.b', 'test_three'), @@ -273,14 +273,14 @@ assert res == ' ' res = goutcome_top.get_longrepr(('what', 'foo', 'a.b', 'test_one')) - assert res == '' + assert res == '' def test_colsizes(): failed = [('a', 'abc', 'd'), ('ab', 'c', 'xy'), ('ab', '', 'cd')] - + res = summary.colsizes(failed) - + assert res == [2,3,2] def test__prune_runs(): @@ -330,15 +330,15 @@ res = summary.show_elapsed(0.25) assert res == "0.25s" res = summary.show_elapsed(1.0) - assert res == "1.00s" + assert res == "1.00s" res = summary.show_elapsed(1.25) - assert res == "1.25s" + assert res == "1.25s" res = summary.show_elapsed(4.5) assert res == "4.50s" res = summary.show_elapsed(5.25) assert res == "5s" res = summary.show_elapsed(5.5) - assert res == "6s" + assert res == "6s" res = summary.show_elapsed(2*60+30) assert res == "2m30" res = summary.show_elapsed(4*60+30) @@ -348,22 +348,33 @@ res = summary.show_elapsed(61*60) assert res == "1h1" res = summary.show_elapsed(90*60) - assert res == "1h30" + assert res == "1h30" -def _BuilderToStatus(status): - setup = {'name': 'builder', 'builddir': 'BUILDDIR', - 'slavebuilddir': 'SLAVEBUILDDIR', - 'factory': process_factory.BuildFactory() } - return process_builder.Builder(setup, status) +class FakeMasterConfig(object): + buildbotURL = "http://buildbot/" + logCompressionLimit = 0 + def __init__(self, builders=None): + self.builders = builders + + +class FakeBuilderconfig(object): + validNames = 'name factory slavenames builddir slavebuilddir category ' \ + 'nextSlave nextBuild canStartBuild locks env properties ' \ + 'mergeRequests description'.split() + + def __init__(self, **kwargs): + for kw, item in kwargs.iteritems(): + assert kw in self.validNames + setattr(self, kw, item) class FakeMaster(object): basedir = None - buildbotURL = "http://buildbot/" def __init__(self, builders): self.botmaster = FakeBotMaster(builders) + self.config = FakeMasterConfig() def subscribeToBuildsetCompletions(self, callback): pass @@ -374,6 +385,7 @@ def subscribeToBuildRequests(self, callback): pass + class FakeBotMaster(object): def __init__(self, builders): @@ -384,28 +396,39 @@ self.builderNames.append(name) self.builders[name] = _BuilderToStatus(builder) + class FakeSite(object): def __init__(self, status): self.buildbot_service = FakeService(status) + class FakeService(object): - + def __init__(self, status): self.status = status def getStatus(self): return self.status + class FakeRequest(object): def __init__(self, builders, args={}): master = FakeMaster(builders) - status = status_builder.Status(master, builders) + status = status_builder.Status(master) self.args = args self.site = FakeSite(status) +def _BuilderToStatus(status): + builder = process_builder.Builder(status.name) + builder.builder_status = status + builder.builder_status.basedir = 'BASEDIR' + builder.config = FakeBuilderconfig(factory=process_factory.BuildFactory()) + return builder + + def witness_cat_branch(summary): ref = [None] recentRuns = summary.recentRuns @@ -414,7 +437,6 @@ ref[0] = cat_branch return cat_branch summary.recentRuns = witness - return lambda: ref[0] class FakeLog(object): @@ -424,7 +446,7 @@ self.step = step self.name = name self.cont = cont - + def getStep(self): return self.step @@ -453,11 +475,12 @@ step.started = t step.finished = t + (n+1)*60 t = step.finished + 30 + builder.buildCache.cache[build.number] = build + builder.buildStarted(build) build.buildFinished() - builder.touchBuildCache(build) n += 1 builder.nextBuildNumber = n - + class TestSummary(object): @@ -475,15 +498,14 @@ assert cat_branch == {} def test_one_build_no_rev(self): - builder = status_builder.BuilderStatus('builder0', '', self.master) + builder = status_builder.BuilderStatus('builder0', None, self.master, '') build = status_builder.BuildStatus(builder, self.master, 0) - builder.buildStarted(build) + build.buildStarted(builder) build.buildFinished() - builder.touchBuildCache(build) - builder.nextBuildNumber = len(builder.buildCache) + builder.nextBuildNumber = len(builder.buildCache.cache) s = summary.Summary() - res = witness_cat_branch(s) + res = witness_cat_branch(s) req = FakeRequest([builder]) out = s.body(req) cat_branch = res() @@ -491,58 +513,58 @@ assert cat_branch == {(None, None): ({}, [build])} def test_one_build_no_logs(self): - builder = status_builder.BuilderStatus('builder0', '', self.master) - build = status_builder.BuildStatus(None, builder, 0) - build.started = time.time() + builder = status_builder.BuilderStatus('builder0', None, self.master, '') + build = status_builder.BuildStatus(builder, self.master, 0) + build.started = time.time() build.setProperty('got_revision', '50000', None) build.buildFinished() - builder.touchBuildCache(build) - builder.nextBuildNumber = len(builder.buildCache) + builder.buildCache.cache[build.number] = build + builder.nextBuildNumber = len(builder.buildCache.cache) s = summary.Summary() - res = witness_cat_branch(s) + res = witness_cat_branch(s) req = FakeRequest([builder]) out = s.body(req) cat_branch = res() - + revs = cat_branch[(None, None)][0] assert revs.keys() == ['50000'] - assert '<run>' in out + assert 'success' in out def test_one_build_no_logs_failure(self): - builder = status_builder.BuilderStatus('builder0', '', self.master) - build = status_builder.BuildStatus(builder, 0) - build.started = time.time() + builder = status_builder.BuilderStatus('builder0', None, self.master, '') + build = status_builder.BuildStatus(builder, self.master, 0) + build.started = time.time() build.setProperty('got_revision', '50000', None) step = build.addStepWithName('step') step.setText(['step', 'borken']) step.stepFinished(summary.FAILURE) step1 = build.addStepWithName('other') step1.setText(['other', 'borken']) - step1.stepFinished(summary.FAILURE) + step1.stepFinished(summary.FAILURE) build.buildFinished() - builder.touchBuildCache(build) - builder.nextBuildNumber = len(builder.buildCache) + builder.buildCache.cache[build.number] = build + builder.nextBuildNumber = len(builder.buildCache.cache) s = summary.Summary() - res = witness_cat_branch(s) + res = witness_cat_branch(s) req = FakeRequest([builder]) out = s.body(req) cat_branch = res() - + revs = cat_branch[(None, None)][0] assert revs.keys() == ['50000'] assert 'step borken' in out - assert 'other borken' not in out - + assert 'other borken' not in out + def test_one_build(self): - builder = status_builder.BuilderStatus('builder0', '', self.master) + builder = status_builder.BuilderStatus('builder0', None, self.master, '') add_builds(builder, [(60000, "F TEST1\n. b")]) s = summary.Summary() - res = witness_cat_branch(s) + res = witness_cat_branch(s) req = FakeRequest([builder]) out = s.body(req) cat_branch = res() @@ -556,12 +578,12 @@ assert 'TEST1' in out def test_two_builds(self): - builder = status_builder.BuilderStatus('builder0', '', self.master) + builder = status_builder.BuilderStatus('builder0', None, self.master, '') add_builds(builder, [('60000', "F TEST1\n. b"), ('60001', ". TEST1\n. b")]) s = summary.Summary() - res = witness_cat_branch(s) + res = witness_cat_branch(s) req = FakeRequest([builder]) out = s.body(req) cat_branch = res() @@ -583,15 +605,16 @@ assert 'TEST1' in out assert ':-)' in out - assert '\n - + success' in out - + assert re.search(r'\n - ' + r'\+ success', out) is not None def test_two_builds_samerev(self): - builder = status_builder.BuilderStatus('builder0', '', self.master) + builder = status_builder.BuilderStatus('builder0', None, self.master, '') add_builds(builder, [('60000', "F TEST1\n. b"), - ('60000', "F TEST1\n. b")]) + ('60000', "F TEST1\n. b")]) s = summary.Summary() - res = witness_cat_branch(s) + res = witness_cat_branch(s) req = FakeRequest([builder]) out = s.body(req) cat_branch = res() @@ -605,12 +628,12 @@ assert 'TEST1' in out def test_two_builds_recentrev(self): - builder = status_builder.BuilderStatus('builder0', '', self.master) + builder = status_builder.BuilderStatus('builder0', None, self.master, '') add_builds(builder, [('60000', "F TEST1\n. b"), ('60001', "F TEST1\n. b")]) s = summary.Summary() - res = witness_cat_branch(s) + res = witness_cat_branch(s) req = FakeRequest([builder]) req.args = {'recentrev': ['60000']} out = s.body(req) @@ -625,13 +648,13 @@ assert 'TEST1' in out def test_many_builds_query_builder(self): - builder = status_builder.BuilderStatus('builder0', '', self.master) + builder = status_builder.BuilderStatus('builder0', None, self.master, '') add_builds(builder, [('60000', "F TEST1\n. b"), ('60000', ". a\n. b"), - ('60001', "F TEST1\n. b")]) + ('60001', "F TEST1\n. b")]) s = summary.Summary() - res = witness_cat_branch(s) + res = witness_cat_branch(s) req = FakeRequest([builder]) req.args={'builder': ['builder0']} out = s.body(req) @@ -661,13 +684,13 @@ def test_many_builds_query_builder_builds(self): - builder = status_builder.BuilderStatus('builder0', '', self.master) + builder = status_builder.BuilderStatus('builder0', None, self.master, '') add_builds(builder, [('60000', "F TEST1\n. b"), ('60000', ". a\n. b"), - ('60001', "F TEST1\n. b")]) + ('60001', "F TEST1\n. b")]) s = summary.Summary() - res = witness_cat_branch(s) + res = witness_cat_branch(s) req = FakeRequest([builder]) req.args={'builder': ['builder0'], 'builds': ['0','2-2', '7']} @@ -693,21 +716,21 @@ assert 'TEST1' in out def test_many_pytestLogs(self): - builder = status_builder.BuilderStatus('builder1', '', self.master) - build = status_builder.BuildStatus(builder, 0) + builder = status_builder.BuilderStatus('builder1', '', self.master, '') + build = status_builder.BuildStatus(builder, self.master, 0) build.started = time.time() build.setProperty('got_revision', '70000', None) step = build.addStepWithName('pytest') step.logs.extend([FakeLog(step, 'pytestLog', "F TEST1")]) step.setText(["pytest", "failed"]) - step.stepFinished(summary.FAILURE) + step.stepFinished(summary.FAILURE) step2 = build.addStepWithName('pytest2') step2.logs.extend([FakeLog(step, 'pytestLog', ". x\nF TEST2")]) step2.setText(["pytest2", "aborted"]) step2.stepFinished(summary.EXCEPTION) build.buildFinished() - builder.touchBuildCache(build) - builder.nextBuildNumber = 1 + builder.buildCache.cache[build.number] = build + builder.nextBuildNumber = len(builder.buildCache.cache) s = summary.Summary() req = FakeRequest([builder]) @@ -720,23 +743,23 @@ assert 'pytest2 aborted' in out def test_subtle_failures(self): - builder = status_builder.BuilderStatus('builder1', '', self.master) - build = status_builder.BuildStatus(builder, 0) + builder = status_builder.BuilderStatus('builder1', '', self.master, '') + build = status_builder.BuildStatus(builder, self.master, 0) build.started = time.time() build.setProperty('got_revision', '70000', None) - step = build.addStepWithName('pytest') + step = build.addStepWithName('pytest') step.logs.extend([FakeLog(step, 'pytestLog', ". TEST1")]) step.setText(["pytest", "failed slave lost"]) - step.stepFinished(summary.FAILURE) + step.stepFinished(summary.FAILURE) build.buildFinished() - builder.touchBuildCache(build) - builder.nextBuildNumber = 1 + builder.buildCache.cache[build.number] = build + builder.nextBuildNumber = len(builder.buildCache.cache) s = summary.Summary() req = FakeRequest([builder]) out = s.body(req) - assert 'pytest failed slave lost' in out + assert 'pytest failed slave lost' in out def test_category_branch_sorting_key(self): @@ -765,16 +788,16 @@ assert res == (2, '', 2, 'release/1') res = s._cat_branch_key(('', 'what')) - assert res == (2, '', 4, 'what') + assert res == (2, '', 4, 'what') def test_builders_with_categories(self): - builder1 = status_builder.BuilderStatus('builder_foo', 'foo', self.master) - builder2 = status_builder.BuilderStatus('builder_bar', 'bar', self.master) - builder3 = status_builder.BuilderStatus('builder_', '', self.master) + builder1 = status_builder.BuilderStatus('builder_foo', 'foo', self.master, '') + builder2 = status_builder.BuilderStatus('builder_bar', 'bar', self.master, '') + builder3 = status_builder.BuilderStatus('builder_', '', self.master, '') add_builds(builder1, [('60000', "F TEST1\n")]) add_builds(builder2, [('60000', "F TEST2\n")]) - add_builds(builder3, [('60000', "F TEST3\n")]) + add_builds(builder3, [('60000', "F TEST3\n")]) s = summary.Summary(['foo', 'bar']) req = FakeRequest([builder1, builder2, builder3]) @@ -790,7 +813,7 @@ assert "{bar}" in out def test_two_builds_different_rev_digits(self): - builder = status_builder.BuilderStatus('builder0', '', self.master) + builder = status_builder.BuilderStatus('builder0', '', self.master, '') add_builds(builder, [(999, "F TEST1\n. b"), (1000, "F TEST1\n. b")]) @@ -804,16 +827,16 @@ assert p999builder0-p999 == p1000builder0-p1000+1 def test_build_times_and_filtering(self): - builder1 = status_builder.BuilderStatus('builder1', '', self.master) - builder2 = status_builder.BuilderStatus('builder2', '', self.master) - + builder1 = status_builder.BuilderStatus('builder1', '', self.master, '') + builder2 = status_builder.BuilderStatus('builder2', '', self.master, '') + add_builds(builder1, [('60000', "F TEST1\n")]) - add_builds(builder2, [('50000', ". TEST2\n")]) + add_builds(builder2, [('50000', ". TEST2\n")]) add_builds(builder2, [('60000', "F TEST2\n")]) builder1.getBuild(0).started = 1228258800 # 3 Dec 2008 builder1.getBuild(0).finished = 1228258800 # 3 Dec 2008 - builder2.getBuild(1).started = 1228431600 # 5 Dec 2008 + builder2.getBuild(1).started = 1228431600 # 5 Dec 2008 builder2.getBuild(1).finished = 1228431600 # 5 Dec 2008 builder2.getBuild(0).started = 1227913200 # 29 Nov 2008 From noreply at buildbot.pypy.org Thu Aug 29 13:34:58 2013 From: noreply at buildbot.pypy.org (bivab) Date: Thu, 29 Aug 2013 13:34:58 +0200 (CEST) Subject: [pypy-commit] buildbot buildbot-0.8.7: remove hack Message-ID: <20130829113458.DB6011C01F5@cobra.cs.uni-duesseldorf.de> Author: David Schneider Branch: buildbot-0.8.7 Changeset: r838:b66b878f2ec1 Date: 2013-08-28 21:06 +0200 http://bitbucket.org/pypy/buildbot/changeset/b66b878f2ec1/ Log: remove hack diff --git a/bot2/pypybuildbot/builds.py b/bot2/pypybuildbot/builds.py --- a/bot2/pypybuildbot/builds.py +++ b/bot2/pypybuildbot/builds.py @@ -29,10 +29,6 @@ ARMBoardLock = locks.SlaveLock('arm_boards', maxCount=1) -# XXX monkey patch Trigger class, there are to issues with the list of renderables -# original: Trigger.renderables = [ 'set_propetries', 'scheduler', 'sourceStamp' ] -Trigger.renderables = [ 'set_properties', 'schedulerNames', 'sourceStamp' ] - class ShellCmd(shell.ShellCommand): # our own version that can distinguish abort cases (rc == -1) From noreply at buildbot.pypy.org Thu Aug 29 13:34:59 2013 From: noreply at buildbot.pypy.org (bivab) Date: Thu, 29 Aug 2013 13:34:59 +0200 (CEST) Subject: [pypy-commit] buildbot buildbot-update: hide additional force-build properties. Message-ID: <20130829113459.EAA6B1C01F5@cobra.cs.uni-duesseldorf.de> Author: David Schneider Branch: buildbot-update Changeset: r839:52a28d9daa48 Date: 2013-08-29 09:42 +0200 http://bitbucket.org/pypy/buildbot/changeset/52a28d9daa48/ Log: hide additional force-build properties. diff --git a/bot2/pypybuildbot/master.py b/bot2/pypybuildbot/master.py --- a/bot2/pypybuildbot/master.py +++ b/bot2/pypybuildbot/master.py @@ -257,7 +257,7 @@ JITONLYLINUXPPC64, JITBENCH, JITBENCH64, - ] + ARM.builderNames), + ] + ARM.builderNames, properties=[]), ] + ARM.schedulers, 'status': [status, ircbot], From noreply at buildbot.pypy.org Thu Aug 29 13:35:01 2013 From: noreply at buildbot.pypy.org (bivab) Date: Thu, 29 Aug 2013 13:35:01 +0200 (CEST) Subject: [pypy-commit] buildbot buildbot-update: explicitly set branch to 'default' for nightly builds Message-ID: <20130829113501.3F58C1C01F5@cobra.cs.uni-duesseldorf.de> Author: David Schneider Branch: buildbot-update Changeset: r840:3206ed6cefa9 Date: 2013-08-29 09:51 +0200 http://bitbucket.org/pypy/buildbot/changeset/3206ed6cefa9/ Log: explicitly set branch to 'default' for nightly builds diff --git a/bot2/pypybuildbot/master.py b/bot2/pypybuildbot/master.py --- a/bot2/pypybuildbot/master.py +++ b/bot2/pypybuildbot/master.py @@ -213,12 +213,12 @@ JITFREEBSD864, # on ananke JITFREEBSD964, # on exarkun's freebsd JITMACOSX64, # on xerxes - ], branch=None, hour=0, minute=0), + ], branch='default', hour=0, minute=0), Nightly("nightly-2-00", [ JITBENCH, # on tannit32, uses 1 core (in part exclusively) JITBENCH64, # on tannit64, uses 1 core (in part exclusively) - ], branch=None, hour=2, minute=0), + ], branch='default', hour=2, minute=0), Nightly("nightly-2-00-py3k", [ LINUX64, # on allegro64, uses all cores From noreply at buildbot.pypy.org Thu Aug 29 13:35:02 2013 From: noreply at buildbot.pypy.org (bivab) Date: Thu, 29 Aug 2013 13:35:02 +0200 (CEST) Subject: [pypy-commit] buildbot buildbot-0.8.7: close outdated branch -> buildbot-update Message-ID: <20130829113502.679961C01F5@cobra.cs.uni-duesseldorf.de> Author: David Schneider Branch: buildbot-0.8.7 Changeset: r841:43852fd74e7a Date: 2013-08-29 09:54 +0200 http://bitbucket.org/pypy/buildbot/changeset/43852fd74e7a/ Log: close outdated branch -> buildbot-update From noreply at buildbot.pypy.org Thu Aug 29 13:35:03 2013 From: noreply at buildbot.pypy.org (bivab) Date: Thu, 29 Aug 2013 13:35:03 +0200 (CEST) Subject: [pypy-commit] buildbot buildbot-update: show builds without a branch and for the default branch as , replacing Message-ID: <20130829113503.9FF071C01F5@cobra.cs.uni-duesseldorf.de> Author: David Schneider Branch: buildbot-update Changeset: r842:abef23b33519 Date: 2013-08-29 13:33 +0200 http://bitbucket.org/pypy/buildbot/changeset/abef23b33519/ Log: show builds without a branch and for the default branch as , replacing diff --git a/bot2/pypybuildbot/summary.py b/bot2/pypybuildbot/summary.py --- a/bot2/pypybuildbot/summary.py +++ b/bot2/pypybuildbot/summary.py @@ -374,7 +374,7 @@ def _start_cat_branch(self, cat_branch, fine=False): category, branch = cat_branch - branch = trunk_name(branch) + branch = default_name(branch) category = category_name(category) self.cur_cat_branch = (category, branch) @@ -615,14 +615,16 @@ return lambda v: v in membs def make_subst(v1, v2): + if not isinstance(v1, list): + v1 = [v1] def subst(v): - if v == v1: + if v in v1: return v2 return v return subst -trunk_name = make_subst(None, "") -trunk_value = make_subst("", None) +default_name = make_subst(['default', None], '') +default_value = make_subst(['default', ''], ['default', None]) category_name = make_subst(None, '-') nocat_value = make_subst("-", None) @@ -825,7 +827,13 @@ only_branches = request.args.get('branch', None) only_recentrevs = request.args.get('recentrev', None) if only_branches is not None: - only_branches = map(trunk_value, only_branches) + branches = [] + for x in map(default_value, only_branches): + if isinstance(x, str): + branches.append(x) + else: + branches.extend(x) + only_branches = branches only_builder = request.args.get('builder', None) only_builds = None if only_builder is not None: @@ -861,16 +869,16 @@ outcome_set_cache.stats())) if request.args: - trunk_vs_any_text = "filter nothing" - trunk_vs_any_query = "" + default_vs_any_text = "filter nothing" + default_vs_any_query = "" else: - trunk_vs_any_text = "all " - trunk_vs_any_query = "?branch=" + default_vs_any_text = "all " + default_vs_any_query = "?branch=" - trunk_vs_any_anchor = html.a(trunk_vs_any_text, + default_vs_any_anchor = html.a(default_vs_any_text, href="/summary%s" % - trunk_vs_any_query, + default_vs_any_query, class_="failSummary trunkVsAny") - trunk_vs_any = html.div(trunk_vs_any_anchor, + default_vs_any = html.div(default_vs_any_anchor, style="position: absolute; right: 5%;") - return trunk_vs_any.unicode() + page.render() + return default_vs_any.unicode() + page.render() diff --git a/master/public_html/index.html b/master/public_html/index.html --- a/master/public_html/index.html +++ b/master/public_html/index.html @@ -10,10 +10,10 @@